1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26#include <sys/types.h>
27#include <sys/cmn_err.h>
28#include <sys/errno.h>
29#include <sys/log.h>
30#include <sys/systm.h>
31#include <sys/modctl.h>
32#include <sys/errorq.h>
33#include <sys/controlregs.h>
34#include <sys/fm/util.h>
35#include <sys/fm/protocol.h>
36#include <sys/sysevent.h>
37#include <sys/pghw.h>
38#include <sys/cyclic.h>
39#include <sys/pci_cfgspace.h>
40#include <sys/mc_intel.h>
41#include <sys/smbios.h>
42#include "nb5000.h"
43#include "nb_log.h"
44#include "dimm_phys.h"
45
46int nb_check_validlog = 1;
47
48static uint32_t uerrcnt[2];
49static uint32_t cerrcnta[2][2];
50static uint32_t cerrcntb[2][2];
51static uint32_t cerrcntc[2][2];
52static uint32_t cerrcntd[2][2];
53static nb_logout_t nb_log;
54
55struct mch_error_code {
56	int intel_error_list;	/* error number in Chipset Error List */
57	uint32_t emask;		/* mask for machine check */
58	uint32_t error_bit;	/* error bit in fault register */
59};
60
61static struct mch_error_code fat_fbd_error_code[] = {
62	{ 23, EMASK_FBD_M23, ERR_FAT_FBD_M23 },
63	{ 3, EMASK_FBD_M3, ERR_FAT_FBD_M3 },
64	{ 2, EMASK_FBD_M2, ERR_FAT_FBD_M2 },
65	{ 1, EMASK_FBD_M1, ERR_FAT_FBD_M1 }
66};
67
68static int
69intel_fat_fbd_err(uint32_t fat_fbd)
70{
71	int rt = -1;
72	int nerr = 0;
73	uint32_t emask_fbd = 0;
74	int i;
75	int sz;
76
77	sz = sizeof (fat_fbd_error_code) / sizeof (struct mch_error_code);
78
79	for (i = 0; i < sz; i++) {
80		if (fat_fbd & fat_fbd_error_code[i].error_bit) {
81			rt = fat_fbd_error_code[i].intel_error_list;
82			emask_fbd |= fat_fbd_error_code[i].emask;
83			nerr++;
84		}
85	}
86
87	if (emask_fbd)
88		nb_fbd_mask_mc(emask_fbd);
89	if (nerr > 1)
90		rt = -1;
91	return (rt);
92}
93
94static char *
95fat_memory_error(const nb_regs_t *rp, void *data)
96{
97	int channel;
98	uint32_t ferr_fat_fbd, nrecmemb;
99	uint32_t nrecmema;
100	char *intr = "nb.unknown";
101	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
102
103	ferr_fat_fbd = rp->nb.fat_fbd_regs.ferr_fat_fbd;
104	if ((ferr_fat_fbd & ERR_FAT_FBD_MASK) == 0) {
105		sp->intel_error_list =
106		    intel_fat_fbd_err(rp->nb.fat_fbd_regs.nerr_fat_fbd);
107		sp->branch = -1;
108		sp->channel = -1;
109		sp->rank = -1;
110		sp->dimm = -1;
111		sp->bank = -1;
112		sp->cas = -1;
113		sp->ras = -1;
114		sp->pa = -1LL;
115		sp->offset = -1;
116		return (intr);
117	}
118	sp->intel_error_list = intel_fat_fbd_err(ferr_fat_fbd);
119	channel = (ferr_fat_fbd >> 28) & 3;
120	sp->branch = channel >> 1;
121	sp->channel = channel;
122	if ((ferr_fat_fbd & (ERR_FAT_FBD_M2|ERR_FAT_FBD_M1)) != 0) {
123		if ((ferr_fat_fbd & ERR_FAT_FBD_M1) != 0)
124			intr = "nb.fbd.alert";	/* Alert on FB-DIMM M1 */
125		else
126			intr = "nb.fbd.crc";	/* CRC error FB_DIMM M2 */
127		nrecmema = rp->nb.fat_fbd_regs.nrecmema;
128		nrecmemb = rp->nb.fat_fbd_regs.nrecmemb;
129		sp->rank = (nrecmema >> 8) & RANK_MASK;
130		sp->dimm = sp->rank >> 1;
131		sp->bank = (nrecmema >> 12) & BANK_MASK;
132		sp->cas = (nrecmemb >> 16) & CAS_MASK;
133		sp->ras = nrecmemb & RAS_MASK;
134		/*
135		 * If driver was built with closed tree present then we will
136		 * have Intel proprietary code for finding physaddr
137		 */
138		if (&dimm_getphys) {
139			sp->pa = dimm_getphys((uint16_t)sp->branch,
140			    (uint16_t)sp->rank, (uint64_t)sp->bank,
141			    (uint64_t)sp->ras, (uint64_t)sp->cas);
142			if (sp->pa >= MAXPHYS_ADDR)
143				sp->pa = -1ULL;
144		} else {
145			sp->pa = -1ULL;
146		}
147		/*
148		 * If there is an offset decoder use it otherwise encode
149		 * rank/bank/ras/cas
150		 */
151		if (&dimm_getoffset) {
152			sp->offset = dimm_getoffset(sp->branch, sp->rank,
153			    sp->bank, sp->ras, sp->cas);
154		} else {
155			sp->offset = TCODE_OFFSET(sp->rank, sp->bank, sp->ras,
156			    sp->cas);
157		}
158	} else {
159		if ((ferr_fat_fbd & ERR_FAT_FBD_M3) != 0)
160			intr = "nb.fbd.otf";	/* thermal temp > Tmid M3 */
161		else if ((ferr_fat_fbd & ERR_FAT_FBD_M23) != 0) {
162			intr = "nb.fbd.reset_timeout";
163			sp->channel = -1;
164		}
165		sp->rank = -1;
166		sp->dimm = -1;
167		sp->bank = -1;
168		sp->cas = -1;
169		sp->ras = -1;
170		sp->pa = -1LL;
171		sp->offset = -1;
172	}
173	return (intr);
174}
175
176
177static struct mch_error_code nf_fbd_error_code[] = {
178	{ 29, EMASK_FBD_M29, ERR_NF_FBD_M29 },
179	{ 28, EMASK_FBD_M28, ERR_NF_FBD_M28 },
180	{ 27, EMASK_FBD_M27, ERR_NF_FBD_M27 },
181	{ 26, EMASK_FBD_M26, ERR_NF_FBD_M26 },
182	{ 25, EMASK_FBD_M25, ERR_NF_FBD_M25 },
183	{ 24, EMASK_FBD_M24, ERR_NF_FBD_M24 },
184	{ 22, EMASK_FBD_M22, ERR_NF_FBD_M22 },
185	{ 21, EMASK_FBD_M21, ERR_NF_FBD_M21 },
186	{ 20, EMASK_FBD_M20, ERR_NF_FBD_M20 },
187	{ 19, EMASK_FBD_M19, ERR_NF_FBD_M19 },
188	{ 18, EMASK_FBD_M18, ERR_NF_FBD_M18 },
189	{ 17, EMASK_FBD_M17, ERR_NF_FBD_M17 },
190	{ 16, EMASK_FBD_M16, ERR_NF_FBD_M16 },
191	{ 15, EMASK_FBD_M15, ERR_NF_FBD_M15 },
192	{ 14, EMASK_FBD_M14, ERR_NF_FBD_M14 },
193	{ 13, EMASK_FBD_M13, ERR_NF_FBD_M13 },
194	{ 12, EMASK_FBD_M12, ERR_NF_FBD_M12 },
195	{ 11, EMASK_FBD_M11, ERR_NF_FBD_M11 },
196	{ 10, EMASK_FBD_M10, ERR_NF_FBD_M10 },
197	{ 9, EMASK_FBD_M9, ERR_NF_FBD_M9 },
198	{ 8, EMASK_FBD_M8, ERR_NF_FBD_M8 },
199	{ 7, EMASK_FBD_M7, ERR_NF_FBD_M7 },
200	{ 6, EMASK_FBD_M6, ERR_NF_FBD_M6 },
201	{ 5, EMASK_FBD_M5, ERR_NF_FBD_M5 },
202	{ 4, EMASK_FBD_M4, ERR_NF_FBD_M4 }
203};
204
205static int
206intel_nf_fbd_err(uint32_t nf_fbd)
207{
208	int rt = -1;
209	int nerr = 0;
210	uint32_t emask_fbd = 0;
211	int i;
212	int sz;
213
214	sz = sizeof (nf_fbd_error_code) / sizeof (struct mch_error_code);
215
216	for (i = 0; i < sz; i++) {
217		if (nf_fbd & nf_fbd_error_code[i].error_bit) {
218			rt = nf_fbd_error_code[i].intel_error_list;
219			emask_fbd |= nf_fbd_error_code[i].emask;
220			nerr++;
221		}
222	}
223	if (emask_fbd)
224		nb_fbd_mask_mc(emask_fbd);
225	if (nerr > 1)
226		rt = -1;
227	return (rt);
228}
229
230static char *
231nf_memory_error(const nb_regs_t *rp, void *data)
232{
233	uint32_t ferr_nf_fbd, recmemb, redmemb;
234	uint32_t recmema;
235	int branch, channel, ecc_locator;
236	char *intr = "nb.unknown";
237	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
238
239	sp->rank = -1;
240	sp->dimm = -1;
241	sp->bank = -1;
242	sp->cas = -1;
243	sp->ras = -1LL;
244	sp->pa = -1LL;
245	sp->offset = -1;
246	ferr_nf_fbd = rp->nb.nf_fbd_regs.ferr_nf_fbd;
247	if ((ferr_nf_fbd & ERR_NF_FBD_MASK) == 0) {
248		/* unknown ereport if a recognizable error was not found */
249		sp->branch = -1;
250		sp->channel = -1;
251		sp->intel_error_list = -1;
252		return (intr);
253	}
254	sp->intel_error_list = intel_nf_fbd_err(ferr_nf_fbd);
255	channel = (ferr_nf_fbd >> ERR_FBD_CH_SHIFT) & 3;
256	branch = channel >> 1;
257	sp->branch = branch;
258	sp->channel = channel;
259	if (ferr_nf_fbd & ERR_NF_FBD_MASK) {
260		if (ferr_nf_fbd & ERR_NF_FBD_ECC_UE) {
261			/*
262			 * uncorrectable ECC M4 - M12
263			 * we can only isolate to pair of dimms
264			 * for single dimm configuration let eversholt
265			 * sort it out with out needing a special rule
266			 */
267			sp->channel = -1;
268			recmema = rp->nb.nf_fbd_regs.recmema;
269			recmemb = rp->nb.nf_fbd_regs.recmemb;
270			sp->rank = (recmema >> 8) & RANK_MASK;
271			sp->bank = (recmema >> 12) & BANK_MASK;
272			sp->cas = (recmemb >> 16) & CAS_MASK;
273			sp->ras = recmemb & RAS_MASK;
274			intr = "nb.mem_ue";
275		} else if (ferr_nf_fbd & ERR_NF_FBD_M13) {
276			/*
277			 * write error M13
278			 * we can only isolate to pair of dimms
279			 */
280			sp->channel = -1;
281			if (nb_mode != NB_MEMORY_MIRROR) {
282				recmema = rp->nb.nf_fbd_regs.recmema;
283				sp->rank = (recmema >> 8) & RANK_MASK;
284				sp->bank = (recmema >> 12) & BANK_MASK;
285				sp->cas = (recmemb >> 16) & CAS_MASK;
286				sp->ras = recmemb & RAS_MASK;
287			}
288			intr = "nb.fbd.ma"; /* memory alert */
289		} else if (ferr_nf_fbd & ERR_NF_FBD_MA) { /* M14, M15 and M21 */
290			intr = "nb.fbd.ch"; /* FBD on channel */
291		} else if ((ferr_nf_fbd & ERR_NF_FBD_ECC_CE) != 0) {
292			/* correctable ECC M17-M20 */
293			recmema = rp->nb.nf_fbd_regs.recmema;
294			recmemb = rp->nb.nf_fbd_regs.recmemb;
295			sp->rank = (recmema >> 8) & RANK_MASK;
296			redmemb = rp->nb.nf_fbd_regs.redmemb;
297			ecc_locator = redmemb & 0x3ffff;
298			if (ecc_locator & 0x1ff)
299				sp->channel = branch << 1;
300			else if (ecc_locator & 0x3fe00)
301				sp->channel = (branch << 1) + 1;
302			sp->dimm = sp->rank >> 1;
303			sp->bank = (recmema >> 12) & BANK_MASK;
304			sp->cas = (recmemb >> 16) & CAS_MASK;
305			sp->ras = recmemb & RAS_MASK;
306			intr = "nb.mem_ce";
307		} else if ((ferr_nf_fbd & ERR_NF_FBD_SPARE) != 0) {
308			/* spare dimm M27, M28 */
309			intr = "nb.mem_ds";
310			sp->channel = -1;
311			if (rp->nb.nf_fbd_regs.spcps & SPCPS_SPARE_DEPLOYED) {
312				sp->rank =
313				    SPCPS_FAILED_RANK(rp->nb.nf_fbd_regs.spcps);
314				nb_used_spare_rank(sp->branch, sp->rank);
315				nb_config_gen++;
316			}
317		} else if ((ferr_nf_fbd & ERR_NF_FBD_M22) != 0) {
318			intr = "nb.spd";	/* SPD protocol */
319		}
320	}
321	if (sp->ras != -1) {
322		/*
323		 * If driver was built with closed tree present then we will
324		 * have Intel proprietary code for finding physaddr
325		 */
326		if (&dimm_getphys) {
327			sp->pa = dimm_getphys((uint16_t)sp->branch,
328			    (uint16_t)sp->rank, (uint64_t)sp->bank,
329			    (uint64_t)sp->ras, (uint64_t)sp->cas);
330			if (sp->pa >= MAXPHYS_ADDR)
331				sp->pa = -1ULL;
332		} else {
333			sp->pa = -1ULL;
334		}
335		if (&dimm_getoffset) {
336			sp->offset = dimm_getoffset(sp->branch, sp->rank,
337			    sp->bank, sp->ras, sp->cas);
338		} else {
339			sp->offset = TCODE_OFFSET(sp->rank, sp->bank, sp->ras,
340			    sp->cas);
341		}
342	}
343	return (intr);
344}
345
346static struct mch_error_code nf_mem_error_code[] = {
347	{ 21, EMASK_MEM_M21, ERR_NF_MEM_M21 },
348	{ 20, EMASK_MEM_M20, ERR_NF_MEM_M20 },
349	{ 18, EMASK_MEM_M18, ERR_NF_MEM_M18 },
350	{ 16, EMASK_MEM_M16, ERR_NF_MEM_M16 },
351	{ 15, EMASK_MEM_M15, ERR_NF_MEM_M15 },
352	{ 14, EMASK_MEM_M14, ERR_NF_MEM_M14 },
353	{ 12, EMASK_MEM_M12, ERR_NF_MEM_M12 },
354	{ 11, EMASK_MEM_M11, ERR_NF_MEM_M11 },
355	{ 10, EMASK_MEM_M10, ERR_NF_MEM_M10 },
356	{ 6, EMASK_MEM_M6, ERR_NF_MEM_M6 },
357	{ 5, EMASK_MEM_M5, ERR_NF_MEM_M5 },
358	{ 4, EMASK_MEM_M4, ERR_NF_MEM_M4 },
359	{ 1, EMASK_MEM_M1, ERR_NF_MEM_M1 }
360};
361
362static int
363intel_nf_mem_err(uint32_t nf_mem)
364{
365	int rt = -1;
366	int nerr = 0;
367	uint32_t emask_mem = 0;
368	int i;
369	int sz;
370
371	sz = sizeof (nf_mem_error_code) / sizeof (struct mch_error_code);
372
373	for (i = 0; i < sz; i++) {
374		if (nf_mem & nf_mem_error_code[i].error_bit) {
375			rt = nf_mem_error_code[i].intel_error_list;
376			emask_mem |= nf_mem_error_code[i].emask;
377			nerr++;
378		}
379	}
380	if (emask_mem)
381		nb_mem_mask_mc(emask_mem);
382	if (nerr > 1)
383		rt = -1;
384	return (rt);
385}
386
387static char *
388nf_mem_error(const nb_regs_t *rp, void *data)
389{
390	uint32_t ferr_nf_mem, recmema, recmemb;
391	uint32_t nrecmema, nrecmemb, validlog;
392	int channel;
393	char *intr = "nb.unknown";
394	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
395
396	sp->rank = -1;
397	sp->dimm = -1;
398	sp->bank = -1;
399	sp->cas = -1;
400	sp->ras = -1LL;
401	sp->pa = -1LL;
402	sp->offset = -1;
403	ferr_nf_mem = rp->nb.nf_mem_regs.ferr_nf_mem;
404	if ((ferr_nf_mem & ERR_NF_MEM_MASK) == 0) {
405		/* no first error found */
406		sp->branch = -1;
407		sp->channel = -1;
408		sp->intel_error_list =
409		    intel_nf_mem_err(rp->nb.nf_mem_regs.nerr_nf_mem);
410		return (intr);
411	}
412	sp->intel_error_list = intel_nf_mem_err(ferr_nf_mem);
413
414	channel = (ferr_nf_mem >> ERR_MEM_CH_SHIFT) & 0x1;
415	sp->branch = channel;
416	sp->channel = -1;
417	if (ferr_nf_mem & ERR_NF_MEM_MASK) {
418		if (ferr_nf_mem & ERR_NF_MEM_ECC_UE) {
419			/*
420			 * uncorrectable ECC M1,M4-M6,M10-M12
421			 * There is only channel per branch
422			 * Invalidate the channel number so the mem ereport
423			 * has the same detector with existing 5000 ereports.
424			 * so we can leverage the existing Everhsolt rule.
425			 */
426			validlog = rp->nb.nf_mem_regs.validlog;
427			if (ferr_nf_mem & ERR_NF_MEM_M1) {
428				nrecmema = rp->nb.nf_mem_regs.nrecmema;
429				nrecmemb = rp->nb.nf_mem_regs.nrecmemb;
430				/* check if the nrecmem log is valid */
431				if (validlog & 0x1 || nb_check_validlog == 0) {
432					sp->rank = (nrecmema >> 8) & RANK_MASK;
433					sp->bank = (nrecmema >> 12) & BANK_MASK;
434					sp->cas = (nrecmemb >> 16) & CAS_MASK;
435					sp->ras = nrecmemb & RAS_MASK;
436				}
437			} else {
438				recmema = rp->nb.nf_mem_regs.recmema;
439				recmemb = rp->nb.nf_mem_regs.recmemb;
440				/* check if the recmem log is valid */
441				if (validlog & 0x2 || nb_check_validlog == 0) {
442					sp->rank = (recmema >> 8) & RANK_MASK;
443					sp->bank = (recmema >> 12) & BANK_MASK;
444					sp->cas = (recmemb >> 16) & CAS_MASK;
445					sp->ras = recmemb & RAS_MASK;
446				}
447			}
448			intr = "nb.ddr2_mem_ue";
449		} else if ((ferr_nf_mem & ERR_NF_MEM_ECC_CE) != 0) {
450			/* correctable ECC M14-M16 */
451			recmema = rp->nb.nf_mem_regs.recmema;
452			recmemb = rp->nb.nf_mem_regs.recmemb;
453			validlog = rp->nb.nf_mem_regs.validlog;
454			/* check if the recmem log is valid */
455			if (validlog & 0x2 || nb_check_validlog == 0) {
456				sp->channel = channel;
457				sp->rank = (recmema >> 8) & RANK_MASK;
458				sp->dimm = nb_rank2dimm(sp->channel, sp->rank);
459				sp->bank = (recmema >> 12) & BANK_MASK;
460				sp->cas = (recmemb >> 16) & CAS_MASK;
461				sp->ras = recmemb & RAS_MASK;
462			}
463			intr = "nb.ddr2_mem_ce";
464		} else if ((ferr_nf_mem & ERR_NF_MEM_SPARE) != 0) {
465			/* spare dimm M20, M21 */
466			intr = "nb.ddr2_mem_ds";
467
468			/*
469			 * The channel can be valid here.
470			 * However, there is only one channel per branch and
471			 * to leverage the eversolt rules of other chipsets,
472			 * the channel is ignored and let the rule find it out
473			 * from the topology.
474			 */
475			if (rp->nb.nf_mem_regs.spcps & SPCPS_SPARE_DEPLOYED) {
476				sp->rank =
477				    SPCPS_FAILED_RANK(rp->nb.nf_mem_regs.spcps);
478				nb_used_spare_rank(sp->branch, sp->rank);
479				nb_config_gen++;
480			}
481		} else if ((ferr_nf_mem & ERR_NF_MEM_M18) != 0) {
482			sp->channel = channel;
483			intr = "nb.ddr2_spd";	/* SPD protocol */
484
485		}
486	}
487	if (sp->ras != -1) {
488		/*
489		 * If driver was built with closed tree present then we will
490		 * have Intel proprietary code for finding physaddr
491		 */
492		if (&dimm_getphys) {
493			sp->pa = dimm_getphys((uint16_t)sp->branch,
494			    (uint16_t)sp->rank, (uint64_t)sp->bank,
495			    (uint64_t)sp->ras, (uint64_t)sp->cas);
496			if (sp->pa >= MAXPHYS_ADDR)
497				sp->pa = -1ULL;
498		} else {
499			sp->pa = -1ULL;
500		}
501		if (&dimm_getoffset) {
502			sp->offset = dimm_getoffset(sp->branch, sp->rank,
503			    sp->bank, sp->ras, sp->cas);
504		} else {
505			sp->offset = TCODE_OFFSET(sp->rank, sp->bank, sp->ras,
506			    sp->cas);
507		}
508	}
509	return (intr);
510}
511
512static struct mch_error_code fat_int_error_code[] = {
513	{ 14, EMASK_INT_B14, ERR_FAT_INT_B14 },
514	{ 12, EMASK_INT_B12, ERR_FAT_INT_B12 },
515	{ 25, EMASK_INT_B25, ERR_FAT_INT_B25 },
516	{ 23, EMASK_INT_B23, ERR_FAT_INT_B23 },
517	{ 21, EMASK_INT_B21, ERR_FAT_INT_B21 },
518	{ 7, EMASK_INT_B7, ERR_FAT_INT_B7 },
519	{ 4, EMASK_INT_B4, ERR_FAT_INT_B4 },
520	{ 3, EMASK_INT_B3, ERR_FAT_INT_B3 },
521	{ 2, EMASK_INT_B2, ERR_FAT_INT_B2 },
522	{ 1, EMASK_INT_B1, ERR_FAT_INT_B1 }
523};
524
525static struct mch_error_code nf_int_error_code[] = {
526	{ 27, 0, ERR_NF_INT_B27 },
527	{ 24, 0, ERR_NF_INT_B24 },
528	{ 22, EMASK_INT_B22, ERR_NF_INT_B22 },
529	{ 20, EMASK_INT_B20, ERR_NF_INT_B20 },
530	{ 19, EMASK_INT_B19, ERR_NF_INT_B19 },
531	{ 18, 0, ERR_NF_INT_B18 },
532	{ 17, 0, ERR_NF_INT_B17 },
533	{ 16, 0, ERR_NF_INT_B16 },
534	{ 11, EMASK_INT_B11, ERR_NF_INT_B11 },
535	{ 10, EMASK_INT_B10, ERR_NF_INT_B10 },
536	{ 9, EMASK_INT_B9, ERR_NF_INT_B9 },
537	{ 8, EMASK_INT_B8, ERR_NF_INT_B8 },
538	{ 6, EMASK_INT_B6, ERR_NF_INT_B6 },
539	{ 5, EMASK_INT_B5, ERR_NF_INT_B5 }
540};
541
542static int
543intel_int_err(uint16_t err_fat_int, uint16_t err_nf_int)
544{
545	int rt = -1;
546	int nerr = 0;
547	uint32_t emask_int = 0;
548	int i;
549	int sz;
550
551	sz = sizeof (fat_int_error_code) / sizeof (struct mch_error_code);
552
553	for (i = 0; i < sz; i++) {
554		if (err_fat_int & fat_int_error_code[i].error_bit) {
555			rt = fat_int_error_code[i].intel_error_list;
556			emask_int |= fat_int_error_code[i].emask;
557			nerr++;
558		}
559	}
560
561	if (nb_chipset == INTEL_NB_5400 &&
562	    (err_nf_int & NERR_NF_5400_INT_B26) != 0) {
563		err_nf_int &= ~NERR_NF_5400_INT_B26;
564		rt = 26;
565		nerr++;
566	}
567
568	if (rt)
569		err_nf_int &= ~ERR_NF_INT_B18;
570
571	sz = sizeof (nf_int_error_code) / sizeof (struct mch_error_code);
572
573	for (i = 0; i < sz; i++) {
574		if (err_nf_int & nf_int_error_code[i].error_bit) {
575			rt = nf_int_error_code[i].intel_error_list;
576			emask_int |= nf_int_error_code[i].emask;
577			nerr++;
578		}
579	}
580
581	if (emask_int)
582		nb_int_mask_mc(emask_int);
583	if (nerr > 1)
584		rt = -1;
585	return (rt);
586}
587
588static int
589log_int_err(nb_regs_t *rp, int willpanic, int *interpose)
590{
591	int t = 0;
592	int rt = 0;
593
594	rp->flag = NB_REG_LOG_INT;
595	rp->nb.int_regs.ferr_fat_int = FERR_FAT_INT_RD(interpose);
596	rp->nb.int_regs.ferr_nf_int = FERR_NF_INT_RD(&t);
597	*interpose |= t;
598	rp->nb.int_regs.nerr_fat_int = NERR_FAT_INT_RD(&t);
599	*interpose |= t;
600	rp->nb.int_regs.nerr_nf_int = NERR_NF_INT_RD(&t);
601	*interpose |= t;
602	rp->nb.int_regs.nrecint = NRECINT_RD();
603	rp->nb.int_regs.recint = RECINT_RD();
604	rp->nb.int_regs.nrecsf = NRECSF_RD();
605	rp->nb.int_regs.recsf = RECSF_RD();
606
607	if (!willpanic) {
608		if (rp->nb.int_regs.ferr_fat_int || *interpose)
609			FERR_FAT_INT_WR(rp->nb.int_regs.ferr_fat_int);
610		if (rp->nb.int_regs.ferr_nf_int || *interpose)
611			FERR_NF_INT_WR(rp->nb.int_regs.ferr_nf_int);
612		if (rp->nb.int_regs.nerr_fat_int)
613			NERR_FAT_INT_WR(rp->nb.int_regs.nerr_fat_int);
614		if (rp->nb.int_regs.nerr_nf_int)
615			NERR_NF_INT_WR(rp->nb.int_regs.nerr_nf_int);
616		/*
617		 * if interpose write read-only registers to clear from pcii
618		 * cache
619		 */
620		if (*interpose) {
621			NRECINT_WR();
622			RECINT_WR();
623			NRECSF_WR();
624			RECSF_WR();
625		}
626	}
627	if (rp->nb.int_regs.ferr_fat_int == 0 &&
628	    rp->nb.int_regs.nerr_fat_int == 0 &&
629	    (rp->nb.int_regs.ferr_nf_int == ERR_NF_INT_B18 ||
630	    (rp->nb.int_regs.ferr_nf_int == 0 &&
631	    rp->nb.int_regs.nerr_nf_int == ERR_NF_INT_B18))) {
632		rt = 1;
633	}
634	return (rt);
635}
636
637static void
638log_thermal_err(nb_regs_t *rp, int willpanic, int *interpose)
639{
640	int t = 0;
641
642	rp->flag = NB_REG_LOG_THR;
643	rp->nb.thr_regs.ferr_fat_thr = FERR_FAT_THR_RD(interpose);
644	rp->nb.thr_regs.nerr_fat_thr = NERR_FAT_THR_RD(&t);
645	*interpose |= t;
646	rp->nb.thr_regs.ferr_nf_thr = FERR_NF_THR_RD(&t);
647	*interpose |= t;
648	rp->nb.thr_regs.nerr_nf_thr = NERR_NF_THR_RD(&t);
649	*interpose |= t;
650	rp->nb.thr_regs.ctsts = CTSTS_RD();
651	rp->nb.thr_regs.thrtsts = THRTSTS_RD();
652
653	if (!willpanic) {
654		if (rp->nb.thr_regs.ferr_fat_thr || *interpose)
655			FERR_FAT_THR_WR(rp->nb.thr_regs.ferr_fat_thr);
656		if (rp->nb.thr_regs.nerr_fat_thr || *interpose)
657			NERR_FAT_THR_WR(rp->nb.thr_regs.nerr_fat_thr);
658		if (rp->nb.thr_regs.ferr_nf_thr || *interpose)
659			FERR_NF_THR_WR(rp->nb.thr_regs.ferr_nf_thr);
660		if (rp->nb.thr_regs.nerr_nf_thr || *interpose)
661			NERR_NF_THR_WR(rp->nb.thr_regs.nerr_nf_thr);
662
663		if (*interpose) {
664			CTSTS_WR(rp->nb.thr_regs.ctsts);
665			THRTSTS_WR(rp->nb.thr_regs.thrtsts);
666		}
667	}
668}
669
670static void
671log_dma_err(nb_regs_t *rp, int *interpose)
672{
673	rp->flag = NB_REG_LOG_DMA;
674
675	rp->nb.dma_regs.pcists = PCISTS_RD(interpose);
676	rp->nb.dma_regs.pexdevsts = PCIDEVSTS_RD();
677}
678
679static struct mch_error_code fat_fsb_error_code[] = {
680	{ 9, EMASK_FSB_F9, ERR_FAT_FSB_F9 },
681	{ 2, EMASK_FSB_F2, ERR_FAT_FSB_F2 },
682	{ 1, EMASK_FSB_F1, ERR_FAT_FSB_F1 }
683};
684
685static struct mch_error_code nf_fsb_error_code[] = {
686	{ 8, EMASK_FSB_F8, ERR_NF_FSB_F8 },
687	{ 7, EMASK_FSB_F7, ERR_NF_FSB_F7 },
688	{ 6, EMASK_FSB_F6, ERR_NF_FSB_F6 }
689};
690
691static int
692intel_fsb_err(int fsb, uint8_t err_fat_fsb, uint8_t err_nf_fsb)
693{
694	int rt = -1;
695	int nerr = 0;
696	uint16_t emask_fsb = 0;
697	int i;
698	int sz;
699
700	sz = sizeof (fat_fsb_error_code) / sizeof (struct mch_error_code);
701
702	for (i = 0; i < sz; i++) {
703		if (err_fat_fsb & fat_fsb_error_code[i].error_bit) {
704			rt = fat_fsb_error_code[i].intel_error_list;
705			emask_fsb |= fat_fsb_error_code[i].emask;
706			nerr++;
707		}
708	}
709
710	sz = sizeof (nf_fsb_error_code) / sizeof (struct mch_error_code);
711
712	for (i = 0; i < sz; i++) {
713		if (err_nf_fsb & nf_fsb_error_code[i].error_bit) {
714			rt = nf_fsb_error_code[i].intel_error_list;
715			emask_fsb |= nf_fsb_error_code[i].emask;
716			nerr++;
717		}
718	}
719
720	if (emask_fsb)
721		nb_fsb_mask_mc(fsb, emask_fsb);
722	if (nerr > 1)
723		rt = -1;
724	return (rt);
725}
726
727static void
728log_fsb_err(uint64_t ferr, nb_regs_t *rp, int willpanic, int *interpose)
729{
730	uint8_t fsb;
731	int t = 0;
732
733	fsb = GE_FERR_FSB(ferr);
734	rp->flag = NB_REG_LOG_FSB;
735
736	rp->nb.fsb_regs.fsb = fsb;
737	rp->nb.fsb_regs.ferr_fat_fsb = FERR_FAT_FSB_RD(fsb, interpose);
738	rp->nb.fsb_regs.ferr_nf_fsb = FERR_NF_FSB_RD(fsb, &t);
739	*interpose |= t;
740	rp->nb.fsb_regs.nerr_fat_fsb = NERR_FAT_FSB_RD(fsb, &t);
741	*interpose |= t;
742	rp->nb.fsb_regs.nerr_nf_fsb = NERR_NF_FSB_RD(fsb, &t);
743	*interpose |= t;
744	rp->nb.fsb_regs.nrecfsb = NRECFSB_RD(fsb);
745	rp->nb.fsb_regs.nrecfsb_addr = NRECADDR_RD(fsb);
746	rp->nb.fsb_regs.recfsb = RECFSB_RD(fsb);
747	if (!willpanic) {
748		/* Clear the fatal/non-fatal first/next FSB errors */
749		if (rp->nb.fsb_regs.ferr_fat_fsb || *interpose)
750			FERR_FAT_FSB_WR(fsb, rp->nb.fsb_regs.ferr_fat_fsb);
751		if (rp->nb.fsb_regs.ferr_nf_fsb || *interpose)
752			FERR_NF_FSB_WR(fsb, rp->nb.fsb_regs.ferr_nf_fsb);
753		if (rp->nb.fsb_regs.nerr_fat_fsb || *interpose)
754			NERR_FAT_FSB_WR(fsb, rp->nb.fsb_regs.nerr_fat_fsb);
755		if (rp->nb.fsb_regs.nerr_nf_fsb || *interpose)
756			NERR_NF_FSB_WR(fsb, rp->nb.fsb_regs.nerr_nf_fsb);
757
758		/*
759		 * if interpose write read-only registers to clear from pcii
760		 * cache
761		 */
762		if (*interpose) {
763			NRECFSB_WR(fsb);
764			NRECADDR_WR(fsb);
765			RECFSB_WR(fsb);
766		}
767	}
768}
769
770static struct mch_error_code fat_pex_error_code[] = {
771	{ 19, EMASK_UNCOR_PEX_IO19, PEX_FAT_IO19 },
772	{ 18, EMASK_UNCOR_PEX_IO18, PEX_FAT_IO18 },
773	{ 10, EMASK_UNCOR_PEX_IO10, PEX_FAT_IO10 },
774	{ 9, EMASK_UNCOR_PEX_IO9, PEX_FAT_IO9 },
775	{ 8, EMASK_UNCOR_PEX_IO8, PEX_FAT_IO8 },
776	{ 7, EMASK_UNCOR_PEX_IO7, PEX_FAT_IO7 },
777	{ 6, EMASK_UNCOR_PEX_IO6, PEX_FAT_IO6 },
778	{ 5, EMASK_UNCOR_PEX_IO5, PEX_FAT_IO5 },
779	{ 4, EMASK_UNCOR_PEX_IO4, PEX_FAT_IO4 },
780	{ 3, EMASK_UNCOR_PEX_IO3, PEX_FAT_IO3 },
781	{ 2, EMASK_UNCOR_PEX_IO2, PEX_FAT_IO2 },
782	{ 0, EMASK_UNCOR_PEX_IO0, PEX_FAT_IO0 }
783};
784
785static struct mch_error_code fat_unit_pex_5400_error_code[] = {
786	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_FAT_IO32 },
787	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_FAT_IO31 },
788	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_FAT_IO30 },
789	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_FAT_IO29 },
790	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_FAT_IO27 },
791	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_FAT_IO26 },
792	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_FAT_IO25 },
793	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_FAT_IO24 },
794	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_FAT_IO23 },
795	{ 22, EMASK_UNIT_PEX_IO22, PEX_5400_FAT_IO22 },
796};
797
798static struct mch_error_code fat_pex_5400_error_code[] = {
799	{ 19, EMASK_UNCOR_PEX_IO19, PEX_5400_FAT_IO19 },
800	{ 18, EMASK_UNCOR_PEX_IO18, PEX_5400_FAT_IO18 },
801	{ 10, EMASK_UNCOR_PEX_IO10, PEX_5400_FAT_IO10 },
802	{ 9, EMASK_UNCOR_PEX_IO9, PEX_5400_FAT_IO9 },
803	{ 8, EMASK_UNCOR_PEX_IO8, PEX_5400_FAT_IO8 },
804	{ 7, EMASK_UNCOR_PEX_IO7, PEX_5400_FAT_IO7 },
805	{ 6, EMASK_UNCOR_PEX_IO6, PEX_5400_FAT_IO6 },
806	{ 5, EMASK_UNCOR_PEX_IO5, PEX_5400_FAT_IO5 },
807	{ 4, EMASK_UNCOR_PEX_IO4, PEX_5400_FAT_IO4 },
808	{ 2, EMASK_UNCOR_PEX_IO2, PEX_5400_FAT_IO2 },
809	{ 0, EMASK_UNCOR_PEX_IO0, PEX_5400_FAT_IO0 }
810};
811
812static struct mch_error_code fat_rp_5400_error_code[] = {
813	{ 1, EMASK_RP_PEX_IO1, PEX_5400_FAT_IO1 }
814};
815
816static struct mch_error_code fat_rp_error_code[] = {
817	{ 1, EMASK_RP_PEX_IO1, PEX_FAT_IO1 }
818};
819
820static struct mch_error_code uncor_pex_error_code[] = {
821	{ 19, EMASK_UNCOR_PEX_IO19, PEX_NF_IO19 },
822	{ 9, EMASK_UNCOR_PEX_IO9, PEX_NF_IO9 },
823	{ 8, EMASK_UNCOR_PEX_IO8, PEX_NF_IO8 },
824	{ 7, EMASK_UNCOR_PEX_IO7, PEX_NF_IO7 },
825	{ 6, EMASK_UNCOR_PEX_IO6, PEX_NF_IO6 },
826	{ 5, EMASK_UNCOR_PEX_IO5, PEX_NF_IO5 },
827	{ 4, EMASK_UNCOR_PEX_IO4, PEX_NF_IO4 },
828	{ 3, EMASK_UNCOR_PEX_IO3, PEX_NF_IO3 },
829	{ 0, EMASK_UNCOR_PEX_IO0, PEX_NF_IO0 }
830};
831
832static struct mch_error_code uncor_pex_5400_error_code[] = {
833	{ 33, EMASK_UNIT_PEX_IO33, PEX_5400_NF_IO33 },
834	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_NF_IO32 },
835	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_NF_IO31 },
836	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_NF_IO30 },
837	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_NF_IO29 },
838	{ 28, EMASK_UNIT_PEX_IO28, PEX_5400_NF_IO28 },
839	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_NF_IO27 },
840	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_NF_IO26 },
841	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_NF_IO25 },
842	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_NF_IO24 },
843	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_NF_IO23 },
844};
845
846static struct mch_error_code cor_pex_error_code[] = {
847	{ 20, EMASK_COR_PEX_IO20, PEX_5400_NF_IO20 },
848	{ 16, EMASK_COR_PEX_IO16, PEX_NF_IO16 },
849	{ 15, EMASK_COR_PEX_IO15, PEX_NF_IO15 },
850	{ 14, EMASK_COR_PEX_IO14, PEX_NF_IO14 },
851	{ 13, EMASK_COR_PEX_IO13, PEX_NF_IO13 },
852	{ 12, EMASK_COR_PEX_IO12, PEX_NF_IO12 },
853	{ 10, 0, PEX_NF_IO10 },
854	{ 2, 0, PEX_NF_IO2 }
855};
856
857static struct mch_error_code rp_pex_5400_error_code[] = {
858	{ 17, EMASK_RP_PEX_IO17, PEX_5400_NF_IO17 },
859	{ 11, EMASK_RP_PEX_IO11, PEX_5400_NF_IO11 }
860};
861
862static struct mch_error_code cor_pex_5400_error_code1[] = {
863	{ 19, EMASK_UNCOR_PEX_IO19, PEX_5400_NF_IO19 },
864	{ 10, EMASK_UNCOR_PEX_IO10, PEX_5400_NF_IO10 },
865	{ 9, EMASK_UNCOR_PEX_IO9, PEX_5400_NF_IO9 },
866	{ 8, EMASK_UNCOR_PEX_IO8, PEX_5400_NF_IO8 },
867	{ 7, EMASK_UNCOR_PEX_IO7, PEX_5400_NF_IO7 },
868	{ 6, EMASK_UNCOR_PEX_IO6, PEX_5400_NF_IO6 },
869	{ 5, EMASK_UNCOR_PEX_IO5, PEX_5400_NF_IO5 },
870	{ 4, EMASK_UNCOR_PEX_IO4, PEX_5400_NF_IO4 },
871	{ 2, EMASK_UNCOR_PEX_IO2, PEX_5400_NF_IO2 },
872	{ 0, EMASK_UNCOR_PEX_IO0, PEX_5400_NF_IO0 }
873};
874
875static struct mch_error_code cor_pex_5400_error_code2[] = {
876	{ 20, EMASK_COR_PEX_IO20, PEX_5400_NF_IO20 },
877	{ 16, EMASK_COR_PEX_IO16, PEX_5400_NF_IO16 },
878	{ 15, EMASK_COR_PEX_IO15, PEX_5400_NF_IO15 },
879	{ 14, EMASK_COR_PEX_IO14, PEX_5400_NF_IO14 },
880	{ 13, EMASK_COR_PEX_IO13, PEX_5400_NF_IO13 },
881	{ 12, EMASK_COR_PEX_IO12, PEX_5400_NF_IO12 }
882};
883
884static struct mch_error_code cor_pex_5400_error_code3[] = {
885	{ 33, EMASK_UNIT_PEX_IO33, PEX_5400_NF_IO33 },
886	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_NF_IO32 },
887	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_NF_IO31 },
888	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_NF_IO30 },
889	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_NF_IO29 },
890	{ 28, EMASK_UNIT_PEX_IO28, PEX_5400_NF_IO28 },
891	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_NF_IO27 },
892	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_NF_IO26 },
893	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_NF_IO25 },
894	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_NF_IO24 },
895	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_NF_IO23 }
896};
897
898static struct mch_error_code rp_pex_error_code[] = {
899	{ 17, EMASK_RP_PEX_IO17, PEX_NF_IO17 },
900	{ 11, EMASK_RP_PEX_IO11, PEX_NF_IO11 },
901};
902
903static int
904intel_pex_err(uint32_t pex_fat, uint32_t pex_nf_cor)
905{
906	int rt = -1;
907	int nerr = 0;
908	int i;
909	int sz;
910
911	sz = sizeof (fat_pex_error_code) / sizeof (struct mch_error_code);
912
913	for (i = 0; i < sz; i++) {
914		if (pex_fat & fat_pex_error_code[i].error_bit) {
915			rt = fat_pex_error_code[i].intel_error_list;
916			nerr++;
917		}
918	}
919	sz = sizeof (fat_rp_error_code) / sizeof (struct mch_error_code);
920
921	for (i = 0; i < sz; i++) {
922		if (pex_fat & fat_rp_error_code[i].error_bit) {
923			rt = fat_rp_error_code[i].intel_error_list;
924			nerr++;
925		}
926	}
927	sz = sizeof (uncor_pex_error_code) / sizeof (struct mch_error_code);
928
929	for (i = 0; i < sz; i++) {
930		if (pex_nf_cor & uncor_pex_error_code[i].error_bit) {
931			rt = uncor_pex_error_code[i].intel_error_list;
932			nerr++;
933		}
934	}
935
936	sz = sizeof (cor_pex_error_code) / sizeof (struct mch_error_code);
937
938	for (i = 0; i < sz; i++) {
939		if (pex_nf_cor & cor_pex_error_code[i].error_bit) {
940			rt = cor_pex_error_code[i].intel_error_list;
941			nerr++;
942		}
943	}
944	sz = sizeof (rp_pex_error_code) / sizeof (struct mch_error_code);
945
946	for (i = 0; i < sz; i++) {
947		if (pex_nf_cor & rp_pex_error_code[i].error_bit) {
948			rt = rp_pex_error_code[i].intel_error_list;
949			nerr++;
950		}
951	}
952
953	if (nerr > 1)
954		rt = -1;
955	return (rt);
956}
957
958static struct mch_error_code fat_thr_error_code[] = {
959	{ 2, EMASK_THR_F2, ERR_FAT_THR_F2 },
960	{ 1, EMASK_THR_F1, ERR_FAT_THR_F1 }
961};
962
963static struct mch_error_code nf_thr_error_code[] = {
964	{ 5, EMASK_THR_F5, ERR_NF_THR_F5 },
965	{ 4, EMASK_THR_F4, ERR_NF_THR_F4 },
966	{ 3, EMASK_THR_F3, ERR_NF_THR_F3 }
967};
968
969static int
970intel_thr_err(uint8_t err_fat_thr, uint8_t err_nf_thr)
971{
972	int rt = -1;
973	int nerr = 0;
974	uint16_t emask_thr = 0;
975	int i;
976	int sz;
977
978	sz = sizeof (fat_thr_error_code) / sizeof (struct mch_error_code);
979
980	for (i = 0; i < sz; i++) {
981		if (err_fat_thr & fat_thr_error_code[i].error_bit) {
982			rt = fat_thr_error_code[i].intel_error_list;
983			emask_thr |= fat_thr_error_code[i].emask;
984			nerr++;
985		}
986	}
987
988	sz = sizeof (nf_thr_error_code) / sizeof (struct mch_error_code);
989
990	for (i = 0; i < sz; i++) {
991		if (err_nf_thr & nf_thr_error_code[i].error_bit) {
992			rt = nf_thr_error_code[i].intel_error_list;
993			emask_thr |= nf_thr_error_code[i].emask;
994			nerr++;
995		}
996	}
997
998	if (emask_thr)
999		nb_thr_mask_mc(emask_thr);
1000	if (nerr > 1)
1001		rt = -1;
1002	return (rt);
1003}
1004
1005static int
1006intel_pex_5400_err(uint32_t pex_fat, uint32_t pex_nf_cor)
1007{
1008	int rt = -1;
1009	int nerr = 0;
1010	int i;
1011	int sz;
1012
1013	sz = sizeof (fat_pex_5400_error_code) / sizeof (struct mch_error_code);
1014
1015	for (i = 0; i < sz; i++) {
1016		if (pex_fat & fat_pex_5400_error_code[i].error_bit) {
1017			rt = fat_pex_5400_error_code[i].intel_error_list;
1018			nerr++;
1019		}
1020	}
1021	sz = sizeof (fat_rp_5400_error_code) / sizeof (struct mch_error_code);
1022
1023	for (i = 0; i < sz; i++) {
1024		if (pex_fat & fat_rp_5400_error_code[i].error_bit) {
1025			rt = fat_rp_5400_error_code[i].intel_error_list;
1026			nerr++;
1027		}
1028	}
1029	sz = sizeof (fat_unit_pex_5400_error_code) /
1030	    sizeof (struct mch_error_code);
1031
1032	for (i = 0; i < sz; i++) {
1033		if (pex_fat &
1034		    fat_unit_pex_5400_error_code[i].error_bit) {
1035			rt = fat_unit_pex_5400_error_code[i].intel_error_list;
1036			nerr++;
1037		}
1038	}
1039	sz = sizeof (uncor_pex_5400_error_code) /
1040	    sizeof (struct mch_error_code);
1041
1042	for (i = 0; i < sz; i++) {
1043		if (pex_fat & uncor_pex_5400_error_code[i].error_bit) {
1044			rt = uncor_pex_5400_error_code[i].intel_error_list;
1045			nerr++;
1046		}
1047	}
1048
1049	sz = sizeof (rp_pex_5400_error_code) / sizeof (struct mch_error_code);
1050
1051	for (i = 0; i < sz; i++) {
1052		if (pex_nf_cor & rp_pex_5400_error_code[i].error_bit) {
1053			rt = rp_pex_5400_error_code[i].intel_error_list;
1054			nerr++;
1055		}
1056	}
1057
1058	sz = sizeof (cor_pex_5400_error_code1) / sizeof (struct mch_error_code);
1059
1060	for (i = 0; i < sz; i++) {
1061		if (pex_nf_cor & cor_pex_5400_error_code1[i].error_bit) {
1062			rt = cor_pex_5400_error_code1[i].intel_error_list;
1063			nerr++;
1064		}
1065	}
1066
1067	sz = sizeof (cor_pex_5400_error_code2) / sizeof (struct mch_error_code);
1068
1069	for (i = 0; i < sz; i++) {
1070		if (pex_nf_cor & cor_pex_5400_error_code2[i].error_bit) {
1071			rt = cor_pex_5400_error_code2[i].intel_error_list;
1072			nerr++;
1073		}
1074	}
1075
1076	sz = sizeof (cor_pex_5400_error_code3) / sizeof (struct mch_error_code);
1077
1078	for (i = 0; i < sz; i++) {
1079		if (pex_nf_cor & cor_pex_5400_error_code3[i].error_bit) {
1080			rt = cor_pex_5400_error_code3[i].intel_error_list;
1081			nerr++;
1082		}
1083	}
1084
1085	if (nerr > 1)
1086		rt = -1;
1087	return (rt);
1088}
1089
1090static int
1091log_pex_err(uint64_t ferr, nb_regs_t *rp, int willpanic, int *interpose)
1092{
1093	uint8_t pex = (uint8_t)-1;
1094	int t = 0;
1095
1096	rp->flag = NB_REG_LOG_PEX;
1097	pex = GE_ERR_PEX(ferr);
1098
1099	rp->nb.pex_regs.pex = pex;
1100	rp->nb.pex_regs.pex_fat_ferr =  PEX_FAT_FERR_RD(pex, interpose);
1101	rp->nb.pex_regs.pex_fat_nerr = PEX_FAT_NERR_RD(pex, &t);
1102	*interpose |= t;
1103	rp->nb.pex_regs.pex_nf_corr_ferr = PEX_NF_FERR_RD(pex, &t);
1104	*interpose |= t;
1105	rp->nb.pex_regs.pex_nf_corr_nerr = PEX_NF_NERR_RD(pex, &t);
1106	*interpose |= t;
1107	if (rp->nb.pex_regs.pex_fat_ferr == 0 &&
1108	    rp->nb.pex_regs.pex_fat_nerr == 0 &&
1109	    rp->nb.pex_regs.pex_nf_corr_ferr == 0 &&
1110	    rp->nb.pex_regs.pex_nf_corr_nerr == 0)
1111		return (0);
1112	rp->nb.pex_regs.uncerrsev = UNCERRSEV_RD(pex);
1113	rp->nb.pex_regs.rperrsts = RPERRSTS_RD(pex);
1114	rp->nb.pex_regs.rperrsid = RPERRSID_RD(pex);
1115	if (pex != (uint8_t)-1)
1116		rp->nb.pex_regs.uncerrsts = UNCERRSTS_RD(pex);
1117	else
1118		rp->nb.pex_regs.uncerrsts = 0;
1119	rp->nb.pex_regs.aerrcapctrl = AERRCAPCTRL_RD(pex);
1120	rp->nb.pex_regs.corerrsts = CORERRSTS_RD(pex);
1121	rp->nb.pex_regs.pexdevsts = PEXDEVSTS_RD(pex);
1122
1123	if (!willpanic) {
1124		if (rp->nb.pex_regs.pex_fat_ferr || *interpose)
1125			PEX_FAT_FERR_WR(pex, rp->nb.pex_regs.pex_fat_ferr);
1126		if (rp->nb.pex_regs.pex_fat_nerr)
1127			PEX_FAT_NERR_WR(pex, rp->nb.pex_regs.pex_fat_nerr);
1128		if (rp->nb.pex_regs.pex_nf_corr_ferr || *interpose)
1129			PEX_NF_FERR_WR(pex, rp->nb.pex_regs.pex_nf_corr_ferr);
1130		if (rp->nb.pex_regs.pex_nf_corr_nerr)
1131			PEX_NF_NERR_WR(pex, rp->nb.pex_regs.pex_nf_corr_nerr);
1132		if (*interpose)
1133			UNCERRSTS_WR(pex, rp->nb.pex_regs.uncerrsts);
1134		if (*interpose)
1135			RPERRSTS_WR(pex, rp->nb.pex_regs.rperrsts);
1136		if (*interpose)
1137			PEXDEVSTS_WR(pex, 0);
1138	}
1139	return (1);
1140}
1141
1142static void
1143log_fat_fbd_err(nb_regs_t *rp, int willpanic, int *interpose)
1144{
1145	int channel, branch;
1146	int t = 0;
1147
1148	rp->flag = NB_REG_LOG_FAT_FBD;
1149	rp->nb.fat_fbd_regs.ferr_fat_fbd = FERR_FAT_FBD_RD(interpose);
1150	channel = (rp->nb.fat_fbd_regs.ferr_fat_fbd >> 28) & 3;
1151	branch = channel >> 1;
1152	rp->nb.fat_fbd_regs.nerr_fat_fbd = NERR_FAT_FBD_RD(&t);
1153	*interpose |= t;
1154	rp->nb.fat_fbd_regs.nrecmema = NRECMEMA_RD(branch);
1155	rp->nb.fat_fbd_regs.nrecmemb = NRECMEMB_RD(branch);
1156	rp->nb.fat_fbd_regs.nrecfglog = NRECFGLOG_RD(branch);
1157	rp->nb.fat_fbd_regs.nrecfbda = NRECFBDA_RD(branch);
1158	rp->nb.fat_fbd_regs.nrecfbdb = NRECFBDB_RD(branch);
1159	rp->nb.fat_fbd_regs.nrecfbdc = NRECFBDC_RD(branch);
1160	rp->nb.fat_fbd_regs.nrecfbdd = NRECFBDD_RD(branch);
1161	rp->nb.fat_fbd_regs.nrecfbde = NRECFBDE_RD(branch);
1162	rp->nb.fat_fbd_regs.nrecfbdf = NRECFBDF_RD(branch);
1163	rp->nb.fat_fbd_regs.spcps = SPCPS_RD(branch);
1164	rp->nb.fat_fbd_regs.spcpc = SPCPC_RD(branch);
1165	rp->nb.fat_fbd_regs.uerrcnt = UERRCNT_RD(branch);
1166	rp->nb.fat_fbd_regs.uerrcnt_last = uerrcnt[branch];
1167	uerrcnt[branch] = rp->nb.fat_fbd_regs.uerrcnt;
1168	rp->nb.fat_fbd_regs.badrama = BADRAMA_RD(branch);
1169	rp->nb.fat_fbd_regs.badramb = BADRAMB_RD(branch);
1170	rp->nb.fat_fbd_regs.badcnt = BADCNT_RD(branch);
1171	if (!willpanic) {
1172		if (rp->nb.fat_fbd_regs.ferr_fat_fbd || *interpose)
1173			FERR_FAT_FBD_WR(rp->nb.fat_fbd_regs.ferr_fat_fbd);
1174		if (rp->nb.fat_fbd_regs.nerr_fat_fbd)
1175			NERR_FAT_FBD_WR(rp->nb.fat_fbd_regs.nerr_fat_fbd);
1176		/*
1177		 * if interpose write read-only registers to clear from pcii
1178		 * cache
1179		 */
1180		if (*interpose) {
1181			NRECMEMA_WR(branch);
1182			NRECMEMB_WR(branch);
1183			NRECFGLOG_WR(branch);
1184			NRECFBDA_WR(branch);
1185			NRECFBDB_WR(branch);
1186			NRECFBDC_WR(branch);
1187			NRECFBDD_WR(branch);
1188			NRECFBDE_WR(branch);
1189			NRECFBDF_WR(branch);
1190		}
1191	}
1192}
1193
1194static void
1195log_nf_fbd_err(nb_regs_t *rp, int willpanic, int *interpose)
1196{
1197	int channel, branch;
1198	int t = 0;
1199
1200	rp->flag = NB_REG_LOG_NF_FBD;
1201	rp->nb.nf_fbd_regs.ferr_nf_fbd = FERR_NF_FBD_RD(interpose);
1202	channel = (rp->nb.nf_fbd_regs.ferr_nf_fbd >> 28) & 3;
1203	branch = channel >> 1;
1204	rp->nb.nf_fbd_regs.nerr_nf_fbd = NERR_NF_FBD_RD(&t);
1205	*interpose |= t;
1206	rp->nb.nf_fbd_regs.redmemb = REDMEMB_RD();
1207	rp->nb.nf_fbd_regs.recmema = RECMEMA_RD(branch);
1208	rp->nb.nf_fbd_regs.recmemb = RECMEMB_RD(branch);
1209	rp->nb.nf_fbd_regs.recfglog = RECFGLOG_RD(branch);
1210	rp->nb.nf_fbd_regs.recfbda = RECFBDA_RD(branch);
1211	rp->nb.nf_fbd_regs.recfbdb = RECFBDB_RD(branch);
1212	rp->nb.nf_fbd_regs.recfbdc = RECFBDC_RD(branch);
1213	rp->nb.nf_fbd_regs.recfbdd = RECFBDD_RD(branch);
1214	rp->nb.nf_fbd_regs.recfbde = RECFBDE_RD(branch);
1215	rp->nb.nf_fbd_regs.recfbdf = RECFBDF_RD(branch);
1216	rp->nb.nf_fbd_regs.spcps = SPCPS_RD(branch);
1217	rp->nb.nf_fbd_regs.spcpc = SPCPC_RD(branch);
1218	if (nb_chipset == INTEL_NB_7300 || nb_chipset == INTEL_NB_5400) {
1219		rp->nb.nf_fbd_regs.cerrcnta = CERRCNTA_RD(branch, channel);
1220		rp->nb.nf_fbd_regs.cerrcntb = CERRCNTB_RD(branch, channel);
1221		rp->nb.nf_fbd_regs.cerrcntc = CERRCNTC_RD(branch, channel);
1222		rp->nb.nf_fbd_regs.cerrcntd = CERRCNTD_RD(branch, channel);
1223	} else {
1224		rp->nb.nf_fbd_regs.cerrcnta = CERRCNT_RD(branch);
1225		rp->nb.nf_fbd_regs.cerrcntb = 0;
1226		rp->nb.nf_fbd_regs.cerrcntc = 0;
1227		rp->nb.nf_fbd_regs.cerrcntd = 0;
1228	}
1229	rp->nb.nf_fbd_regs.cerrcnta_last = cerrcnta[branch][channel & 1];
1230	rp->nb.nf_fbd_regs.cerrcntb_last = cerrcntb[branch][channel & 1];
1231	rp->nb.nf_fbd_regs.cerrcntc_last = cerrcntc[branch][channel & 1];
1232	rp->nb.nf_fbd_regs.cerrcntd_last = cerrcntd[branch][channel & 1];
1233	cerrcnta[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcnta;
1234	cerrcntb[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntb;
1235	cerrcntc[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntc;
1236	cerrcntd[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntd;
1237	rp->nb.nf_fbd_regs.badrama = BADRAMA_RD(branch);
1238	rp->nb.nf_fbd_regs.badramb = BADRAMB_RD(branch);
1239	rp->nb.nf_fbd_regs.badcnt = BADCNT_RD(branch);
1240	if (!willpanic) {
1241		if (rp->nb.nf_fbd_regs.ferr_nf_fbd || *interpose)
1242			FERR_NF_FBD_WR(rp->nb.nf_fbd_regs.ferr_nf_fbd);
1243		if (rp->nb.nf_fbd_regs.nerr_nf_fbd)
1244			NERR_NF_FBD_WR(rp->nb.nf_fbd_regs.nerr_nf_fbd);
1245		/*
1246		 * if interpose write read-only registers to clear from pcii
1247		 * cache
1248		 */
1249		if (*interpose) {
1250			RECMEMA_WR(branch);
1251			RECMEMB_WR(branch);
1252			RECFGLOG_WR(branch);
1253			RECFBDA_WR(branch);
1254			RECFBDB_WR(branch);
1255			RECFBDC_WR(branch);
1256			RECFBDD_WR(branch);
1257			RECFBDE_WR(branch);
1258			RECFBDF_WR(branch);
1259			SPCPS_WR(branch);
1260		}
1261	}
1262}
1263
1264static int
1265log_nf_mem_err(nb_regs_t *rp, int willpanic, int *interpose)
1266{
1267	int channel, branch;
1268	int t = 0;
1269	int rt = 0;
1270
1271	rp->flag = NB_REG_LOG_NF_MEM;
1272
1273	/* Memmory err registers */
1274	rp->nb.nf_mem_regs.ferr_nf_mem = FERR_NF_MEM_RD(interpose);
1275	channel = (rp->nb.nf_mem_regs.ferr_nf_mem >> 28) & 0x1;
1276	branch = channel;
1277	rp->nb.nf_mem_regs.nerr_nf_mem = NERR_NF_MEM_RD(&t);
1278	*interpose |= t;
1279	rp->nb.nf_mem_regs.redmema = MEM_REDMEMA_RD(branch);
1280	rp->nb.nf_mem_regs.redmemb = MEM_REDMEMB_RD(branch);
1281	rp->nb.nf_mem_regs.recmema = MEM_RECMEMA_RD(branch);
1282	rp->nb.nf_mem_regs.recmemb = MEM_RECMEMB_RD(branch);
1283	rp->nb.nf_mem_regs.nrecmema = MEM_NRECMEMA_RD(branch);
1284	rp->nb.nf_mem_regs.nrecmemb = MEM_NRECMEMB_RD(branch);
1285
1286	/* spare rank */
1287	rp->nb.nf_mem_regs.spcps = SPCPS_RD(branch);
1288	rp->nb.nf_mem_regs.spcpc = SPCPC_RD(branch);
1289
1290	/* RAS registers */
1291	rp->nb.nf_mem_regs.cerrcnt = MEM_CERRCNT_RD(branch);
1292	rp->nb.nf_mem_regs.cerrcnt_ext = (uint32_t)MEM_CERRCNT_EXT_RD(branch);
1293	rp->nb.nf_mem_regs.cerrcnt_last = cerrcnta[branch][channel & 1];
1294	rp->nb.nf_mem_regs.cerrcnt_ext_last = cerrcntb[branch][channel & 1];
1295	cerrcnta[branch][channel & 1] = rp->nb.nf_mem_regs.cerrcnt;
1296	cerrcntb[branch][channel & 1] = rp->nb.nf_mem_regs.cerrcnt_ext;
1297	rp->nb.nf_mem_regs.badram = BADRAMA_RD(branch);
1298	rp->nb.nf_mem_regs.badcnt = BADCNT_RD(branch);
1299	rp->nb.nf_mem_regs.validlog = VALIDLOG_RD(branch);
1300
1301	if (!willpanic) {
1302		if (rp->nb.nf_mem_regs.ferr_nf_mem || *interpose)
1303			FERR_NF_MEM_WR(rp->nb.nf_mem_regs.ferr_nf_mem);
1304		if (rp->nb.nf_mem_regs.nerr_nf_mem)
1305			NERR_NF_MEM_WR(rp->nb.nf_mem_regs.nerr_nf_mem);
1306		/*
1307		 * if interpose, write read-only registers to clear from pci
1308		 * cache
1309		 */
1310		if (*interpose) {
1311			MEM_NRECMEMA_WR(branch);
1312			MEM_NRECMEMB_WR(branch);
1313			MEM_REDMEMA_WR(branch);
1314			MEM_REDMEMB_WR(branch);
1315			MEM_RECMEMA_WR(branch);
1316			MEM_RECMEMB_WR(branch);
1317			SPCPS_WR(branch);
1318		}
1319	}
1320	if (nb_mode == NB_MEMORY_SINGLE_CHANNEL && channel != 0) {
1321		/*
1322		 * In the single channel mode, all dimms are on the channel 0.
1323		 * Invalidate this error if the channel number is invalid.
1324		 */
1325		rt = 1;
1326	}
1327	return (rt);
1328}
1329
1330static void
1331log_ferr(uint64_t ferr, uint32_t *nerrp, nb_logout_t *log, int willpanic)
1332{
1333	nb_regs_t *rp = &log->nb_regs;
1334	uint32_t nerr = *nerrp;
1335	int interpose = 0;
1336	int spurious = 0;
1337
1338	log->acl_timestamp = gethrtime_waitfree();
1339	if ((ferr & (GE_PCIEX_FATAL | GE_PCIEX_NF)) != 0) {
1340		*nerrp = nerr & ~(GE_PCIEX_FATAL | GE_PCIEX_NF);
1341		if (log_pex_err(ferr, rp, willpanic, &interpose) == 0)
1342			return;
1343	} else if ((ferr & GE_FBD_FATAL) != 0) {
1344		log_fat_fbd_err(rp, willpanic, &interpose);
1345		*nerrp = nerr & ~GE_NERR_FBD_FATAL;
1346	} else if ((ferr & GE_FBD_NF) != 0) {
1347		log_nf_fbd_err(rp, willpanic, &interpose);
1348		*nerrp = nerr & ~GE_NERR_FBD_NF;
1349	} else if ((ferr & GE_MEM_NF) != 0) {
1350		spurious = log_nf_mem_err(rp, willpanic, &interpose);
1351		*nerrp = nerr & ~GE_NERR_MEM_NF;
1352	} else if ((ferr & (GE_FERR_FSB_FATAL | GE_FERR_FSB_NF)) != 0) {
1353		log_fsb_err(ferr, rp, willpanic, &interpose);
1354		*nerrp = nerr & ~(GE_NERR_FSB_FATAL | GE_NERR_FSB_NF);
1355	} else if ((ferr & (GE_DMA_FATAL | GE_DMA_NF)) != 0) {
1356		log_dma_err(rp, &interpose);
1357		*nerrp = nerr & ~(GE_DMA_FATAL | GE_DMA_NF);
1358	} else if ((ferr & (GE_INT_FATAL | GE_INT_NF)) != 0) {
1359		spurious = log_int_err(rp, willpanic, &interpose);
1360		*nerrp = nerr & ~(GE_INT_FATAL | GE_INT_NF);
1361	} else if (nb_chipset == INTEL_NB_5400 &&
1362	    (ferr & (GE_FERR_THERMAL_FATAL | GE_FERR_THERMAL_NF)) != 0) {
1363		log_thermal_err(rp, willpanic, &interpose);
1364		*nerrp = nerr & ~(GE_FERR_THERMAL_FATAL | GE_FERR_THERMAL_NF);
1365	}
1366	if (interpose)
1367		log->type = "inject";
1368	else
1369		log->type = "error";
1370	if (!spurious) {
1371		errorq_dispatch(nb_queue, log, sizeof (nb_logout_t),
1372		    willpanic ? ERRORQ_SYNC : ERRORQ_ASYNC);
1373	}
1374}
1375
1376static void
1377log_nerr(uint32_t *errp, nb_logout_t *log, int willpanic)
1378{
1379	uint32_t err;
1380	nb_regs_t *rp = &log->nb_regs;
1381	int interpose = 0;
1382	int spurious = 0;
1383
1384	err = *errp;
1385	log->acl_timestamp = gethrtime_waitfree();
1386	if ((err & (GE_PCIEX_FATAL | GE_PCIEX_NF)) != 0) {
1387		*errp = err & ~(GE_PCIEX_FATAL | GE_PCIEX_NF);
1388		if (log_pex_err(err, rp, willpanic, &interpose) == 0)
1389			return;
1390	} else if ((err & GE_NERR_FBD_FATAL) != 0) {
1391		log_fat_fbd_err(rp, willpanic, &interpose);
1392		*errp = err & ~GE_NERR_FBD_FATAL;
1393	} else if ((err & GE_NERR_FBD_NF) != 0) {
1394		log_nf_fbd_err(rp, willpanic, &interpose);
1395		*errp = err & ~GE_NERR_FBD_NF;
1396	} else if ((err & GE_NERR_MEM_NF) != 0) {
1397		spurious = log_nf_mem_err(rp, willpanic, &interpose);
1398		*errp = err & ~GE_NERR_MEM_NF;
1399	} else if ((err & (GE_NERR_FSB_FATAL | GE_NERR_FSB_NF)) != 0) {
1400		log_fsb_err(GE_NERR_TO_FERR_FSB(err), rp, willpanic,
1401		    &interpose);
1402		*errp = err & ~(GE_NERR_FSB_FATAL | GE_NERR_FSB_NF);
1403	} else if ((err & (GE_DMA_FATAL | GE_DMA_NF)) != 0) {
1404		log_dma_err(rp, &interpose);
1405		*errp = err & ~(GE_DMA_FATAL | GE_DMA_NF);
1406	} else if ((err & (GE_INT_FATAL | GE_INT_NF)) != 0) {
1407		spurious = log_int_err(rp, willpanic, &interpose);
1408		*errp = err & ~(GE_INT_FATAL | GE_INT_NF);
1409	}
1410	if (interpose)
1411		log->type = "inject";
1412	else
1413		log->type = "error";
1414	if (!spurious) {
1415		errorq_dispatch(nb_queue, log, sizeof (nb_logout_t),
1416		    willpanic ? ERRORQ_SYNC : ERRORQ_ASYNC);
1417	}
1418}
1419
1420/*ARGSUSED*/
1421void
1422nb_error_trap(cmi_hdl_t hdl, boolean_t ismc, boolean_t willpanic)
1423{
1424	uint64_t ferr;
1425	uint32_t nerr, err;
1426	int nmc = 0;
1427	int i;
1428
1429	if (mutex_tryenter(&nb_mutex) == 0)
1430		return;
1431
1432	nerr = NERR_GLOBAL_RD();
1433	err = nerr;
1434	for (i = 0; i < NB_MAX_ERRORS; i++) {
1435		ferr = FERR_GLOBAL_RD();
1436		nb_log.nb_regs.chipset = nb_chipset;
1437		nb_log.nb_regs.ferr = ferr;
1438		nb_log.nb_regs.nerr = nerr;
1439		if (ferr) {
1440			log_ferr(ferr, &err, &nb_log, willpanic);
1441			FERR_GLOBAL_WR(ferr);
1442			nmc++;
1443		} else if (err) {
1444			log_nerr(&err, &nb_log, willpanic);
1445			nmc++;
1446		}
1447	}
1448	if (nerr) {
1449		NERR_GLOBAL_WR(nerr);
1450	}
1451	if (nmc == 0 && nb_mask_mc_set)
1452		nb_mask_mc_reset();
1453	mutex_exit(&nb_mutex);
1454}
1455
1456static void
1457nb_fsb_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1458    nb_scatchpad_t *data)
1459{
1460	int intel_error_list;
1461	char buf[32];
1462
1463	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FSB,
1464	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.fsb, NULL);
1465	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_FSB,
1466	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.ferr_fat_fsb, NULL);
1467	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_FSB,
1468	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.nerr_fat_fsb, NULL);
1469	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_FSB,
1470	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.ferr_nf_fsb, NULL);
1471	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_FSB,
1472	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.nerr_nf_fsb, NULL);
1473	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFSB,
1474	    DATA_TYPE_UINT32, nb_regs->nb.fsb_regs.nrecfsb, NULL);
1475	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFSB_ADDR,
1476	    DATA_TYPE_UINT64, nb_regs->nb.fsb_regs.nrecfsb_addr, NULL);
1477	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFSB,
1478	    DATA_TYPE_UINT32, nb_regs->nb.fsb_regs.recfsb, NULL);
1479	intel_error_list = data->intel_error_list;
1480	if (intel_error_list >= 0)
1481		(void) snprintf(buf, sizeof (buf), "F%d", intel_error_list);
1482	else
1483		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1484	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1485	    DATA_TYPE_STRING, buf, NULL);
1486}
1487
1488static void
1489nb_pex_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1490    nb_scatchpad_t *data)
1491{
1492	int intel_error_list;
1493	char buf[32];
1494
1495	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX,
1496	    DATA_TYPE_UINT8, nb_regs->nb.pex_regs.pex, NULL);
1497	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_FAT_FERR,
1498	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_fat_ferr, NULL);
1499	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_FAT_NERR,
1500	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_fat_nerr, NULL);
1501	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_NF_CORR_FERR,
1502	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_nf_corr_ferr, NULL);
1503	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_NF_CORR_NERR,
1504	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_nf_corr_nerr, NULL);
1505	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UNCERRSEV,
1506	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.uncerrsev, NULL);
1507	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RPERRSTS,
1508	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.rperrsts, NULL);
1509	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RPERRSID,
1510	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.rperrsid, NULL);
1511	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UNCERRSTS,
1512	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.uncerrsts, NULL);
1513	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_AERRCAPCTRL,
1514	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.aerrcapctrl, NULL);
1515	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CORERRSTS,
1516	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.corerrsts, NULL);
1517	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEXDEVSTS,
1518	    DATA_TYPE_UINT16, nb_regs->nb.pex_regs.pexdevsts, NULL);
1519	intel_error_list = data->intel_error_list;
1520	if (intel_error_list >= 0)
1521		(void) snprintf(buf, sizeof (buf), "IO%d", intel_error_list);
1522	else
1523		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1524	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1525	    DATA_TYPE_STRING, buf, NULL);
1526}
1527
1528static void
1529nb_int_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1530    nb_scatchpad_t *data)
1531{
1532	int intel_error_list;
1533	char buf[32];
1534
1535	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_INT,
1536	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.ferr_fat_int, NULL);
1537	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_INT,
1538	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.ferr_nf_int, NULL);
1539	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_INT,
1540	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.nerr_fat_int, NULL);
1541	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_INT,
1542	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.nerr_nf_int, NULL);
1543	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECINT,
1544	    DATA_TYPE_UINT32, nb_regs->nb.int_regs.nrecint, NULL);
1545	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECINT,
1546	    DATA_TYPE_UINT32, nb_regs->nb.int_regs.recint, NULL);
1547	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECSF,
1548	    DATA_TYPE_UINT64, nb_regs->nb.int_regs.nrecsf, NULL);
1549	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECSF,
1550	    DATA_TYPE_UINT64, nb_regs->nb.int_regs.recsf, NULL);
1551	intel_error_list = data->intel_error_list;
1552	if (intel_error_list >= 0)
1553		(void) snprintf(buf, sizeof (buf), "B%d", intel_error_list);
1554	else
1555		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1556	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1557	    DATA_TYPE_STRING, buf, NULL);
1558}
1559
1560static void
1561nb_fat_fbd_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1562    nb_scatchpad_t *data)
1563{
1564	nb_mem_scatchpad_t *sp;
1565	char buf[32];
1566
1567	sp = &((nb_scatchpad_t *)data)->ms;
1568
1569	if (sp->ras != -1) {
1570		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
1571		    DATA_TYPE_INT32, sp->bank, NULL);
1572		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
1573		    DATA_TYPE_INT32, sp->cas, NULL);
1574		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
1575		    DATA_TYPE_INT32, sp->ras, NULL);
1576		if (sp->offset != -1LL) {
1577			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
1578			    DATA_TYPE_UINT64, sp->offset, NULL);
1579		}
1580		if (sp->pa != -1LL) {
1581			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
1582			    DATA_TYPE_UINT64, sp->pa, NULL);
1583		}
1584	}
1585	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_FBD,
1586	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.ferr_fat_fbd, NULL);
1587	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_FBD,
1588	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nerr_fat_fbd, NULL);
1589	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMA,
1590	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecmema, NULL);
1591	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMB,
1592	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecmemb, NULL);
1593	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFGLOG,
1594	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfglog, NULL);
1595	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDA,
1596	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbda, NULL);
1597	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDB,
1598	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdb, NULL);
1599	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDC,
1600	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdc, NULL);
1601	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDD,
1602	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdd, NULL);
1603	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDE,
1604	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbde, NULL);
1605	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDF,
1606	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdf, NULL);
1607	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
1608	    DATA_TYPE_UINT8, nb_regs->nb.fat_fbd_regs.spcps, NULL);
1609	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
1610	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.spcpc, NULL);
1611	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UERRCNT,
1612	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.uerrcnt, NULL);
1613	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UERRCNT_LAST,
1614	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.uerrcnt_last, NULL);
1615	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMA,
1616	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.badrama, NULL);
1617	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMB,
1618	    DATA_TYPE_UINT16, nb_regs->nb.fat_fbd_regs.badramb, NULL);
1619	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
1620	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.badcnt, NULL);
1621
1622	if (sp->intel_error_list >= 0)
1623		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
1624	else
1625		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1626	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1627	    DATA_TYPE_STRING, buf, NULL);
1628}
1629
1630static void
1631nb_nf_fbd_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1632    nb_scatchpad_t *data)
1633{
1634	nb_mem_scatchpad_t *sp;
1635	char buf[32];
1636
1637	sp = &((nb_scatchpad_t *)data)->ms;
1638
1639	if (sp->dimm == -1 && sp->rank != -1) {
1640		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RANK,
1641		    DATA_TYPE_INT32, sp->rank, NULL);
1642	}
1643	if (sp->ras != -1) {
1644		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
1645		    DATA_TYPE_INT32, sp->bank, NULL);
1646		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
1647		    DATA_TYPE_INT32, sp->cas, NULL);
1648		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
1649		    DATA_TYPE_INT32, sp->ras, NULL);
1650		if (sp->offset != -1LL) {
1651			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
1652			    DATA_TYPE_UINT64, sp->offset, NULL);
1653		}
1654		if (sp->pa != -1LL) {
1655			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
1656			    DATA_TYPE_UINT64, sp->pa, NULL);
1657		}
1658	}
1659	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_FBD,
1660	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.ferr_nf_fbd, NULL);
1661	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_FBD,
1662	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.nerr_nf_fbd, NULL);
1663	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMA,
1664	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recmema, NULL);
1665	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMB,
1666	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recmemb, NULL);
1667	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFGLOG,
1668	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfglog, NULL);
1669	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDA,
1670	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbda, NULL);
1671	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDB,
1672	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdb, NULL);
1673	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDC,
1674	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdc, NULL);
1675	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDD,
1676	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdd, NULL);
1677	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDE,
1678	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbde, NULL);
1679	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDF,
1680	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdf, NULL);
1681	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
1682	    DATA_TYPE_UINT8, nb_regs->nb.nf_fbd_regs.spcps, NULL);
1683	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
1684	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.spcpc, NULL);
1685	if (nb_chipset == INTEL_NB_7300 || nb_chipset == INTEL_NB_5400) {
1686		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTA,
1687		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta, NULL);
1688		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTB,
1689		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntb, NULL);
1690		if (nb_chipset == INTEL_NB_7300) {
1691			fm_payload_set(payload,
1692			    FM_EREPORT_PAYLOAD_NAME_CERRCNTC,
1693			    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntc,
1694			    NULL);
1695			fm_payload_set(payload,
1696			    FM_EREPORT_PAYLOAD_NAME_CERRCNTD,
1697			    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntd,
1698			    NULL);
1699		}
1700		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTA_LAST,
1701		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta_last,
1702		    NULL);
1703		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTB_LAST,
1704		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntb_last,
1705		    NULL);
1706		if (nb_chipset == INTEL_NB_7300) {
1707			fm_payload_set(payload,
1708			    FM_EREPORT_PAYLOAD_NAME_CERRCNTC_LAST,
1709			    DATA_TYPE_UINT32,
1710			    nb_regs->nb.nf_fbd_regs.cerrcntc_last, NULL);
1711			fm_payload_set(payload,
1712			    FM_EREPORT_PAYLOAD_NAME_CERRCNTD_LAST,
1713			    DATA_TYPE_UINT32,
1714			    nb_regs->nb.nf_fbd_regs.cerrcntd_last, NULL);
1715		}
1716	} else {
1717		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT,
1718		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta, NULL);
1719		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_LAST,
1720		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta_last,
1721		    NULL);
1722	}
1723	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMA,
1724	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.badrama, NULL);
1725	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMB,
1726	    DATA_TYPE_UINT16, nb_regs->nb.nf_fbd_regs.badramb, NULL);
1727	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
1728	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.badcnt, NULL);
1729
1730	if (sp->intel_error_list >= 0)
1731		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
1732	else
1733		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1734	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1735	    DATA_TYPE_STRING, buf, NULL);
1736}
1737
1738static void
1739nb_nf_mem_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1740    nb_scatchpad_t *data)
1741{
1742	nb_mem_scatchpad_t *sp;
1743	char buf[32];
1744
1745	sp = &((nb_scatchpad_t *)data)->ms;
1746
1747	if (sp->dimm == -1 && sp->rank != -1) {
1748		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RANK,
1749		    DATA_TYPE_INT32, sp->rank, NULL);
1750	}
1751	if (sp->ras != -1) {
1752		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
1753		    DATA_TYPE_INT32, sp->bank, NULL);
1754		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
1755		    DATA_TYPE_INT32, sp->cas, NULL);
1756		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
1757		    DATA_TYPE_INT32, sp->ras, NULL);
1758		if (sp->offset != -1LL) {
1759			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
1760			    DATA_TYPE_UINT64, sp->offset, NULL);
1761		}
1762		if (sp->pa != -1LL) {
1763			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
1764			    DATA_TYPE_UINT64, sp->pa, NULL);
1765		}
1766	}
1767	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_MEM,
1768	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.ferr_nf_mem, NULL);
1769	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_MEM,
1770	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.nerr_nf_mem, NULL);
1771	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMA,
1772	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.recmema, NULL);
1773	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMB,
1774	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.recmemb, NULL);
1775	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_REDMEMA,
1776	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.redmema, NULL);
1777	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_REDMEMB,
1778	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.redmemb, NULL);
1779	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMA,
1780	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.nrecmema, NULL);
1781	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMB,
1782	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.nrecmemb, NULL);
1783	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
1784	    DATA_TYPE_UINT8, nb_regs->nb.nf_mem_regs.spcps, NULL);
1785	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
1786	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.spcpc, NULL);
1787	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT,
1788	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt, NULL);
1789	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_LAST,
1790	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt_last, NULL);
1791	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_EXT,
1792	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt_ext, NULL);
1793	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_EXT_LAST,
1794	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt_ext_last, NULL);
1795	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAM,
1796	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.badram, NULL);
1797	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
1798	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.badcnt, NULL);
1799	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_VALIDLOG,
1800	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.validlog, NULL);
1801
1802	if (sp->intel_error_list >= 0)
1803		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
1804	else
1805		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1806	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1807	    DATA_TYPE_STRING, buf, NULL);
1808}
1809
1810static void
1811nb_dma_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload)
1812{
1813	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PCISTS,
1814	    DATA_TYPE_UINT16, nb_regs->nb.dma_regs.pcists, NULL);
1815	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEXDEVSTS,
1816	    DATA_TYPE_UINT16, nb_regs->nb.dma_regs.pexdevsts, NULL);
1817}
1818
1819static void
1820nb_thr_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1821    nb_scatchpad_t *data)
1822{
1823	char buf[32];
1824
1825	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_THR,
1826	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ferr_fat_thr, NULL);
1827	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_THR,
1828	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.nerr_fat_thr, NULL);
1829	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_THR,
1830	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ferr_nf_thr, NULL);
1831	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_THR,
1832	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.nerr_nf_thr, NULL);
1833	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CTSTS,
1834	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ctsts, NULL);
1835	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_THRTSTS,
1836	    DATA_TYPE_UINT16, nb_regs->nb.thr_regs.thrtsts, NULL);
1837	if (data->intel_error_list >= 0) {
1838		(void) snprintf(buf, sizeof (buf), "TH%d",
1839		    data->intel_error_list);
1840	} else {
1841		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1842	}
1843	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1844	    DATA_TYPE_STRING, buf, NULL);
1845}
1846
1847static void
1848nb_ereport_add_logout(nvlist_t *payload, const nb_logout_t *acl,
1849    nb_scatchpad_t *data)
1850{
1851	const nb_regs_t *nb_regs = &acl->nb_regs;
1852
1853	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_MC_TYPE,
1854	    DATA_TYPE_STRING, acl->type, NULL);
1855	switch (nb_regs->flag) {
1856	case NB_REG_LOG_FSB:
1857		nb_fsb_err_payload(nb_regs, payload, data);
1858		break;
1859	case NB_REG_LOG_PEX:
1860		nb_pex_err_payload(nb_regs, payload, data);
1861		break;
1862	case NB_REG_LOG_INT:
1863		nb_int_err_payload(nb_regs, payload, data);
1864		break;
1865	case NB_REG_LOG_FAT_FBD:
1866		nb_fat_fbd_err_payload(nb_regs, payload, data);
1867		break;
1868	case NB_REG_LOG_NF_FBD:
1869		nb_nf_fbd_err_payload(nb_regs, payload, data);
1870		break;
1871	case NB_REG_LOG_DMA:
1872		nb_dma_err_payload(nb_regs, payload);
1873		break;
1874	case NB_REG_LOG_THR:
1875		nb_thr_err_payload(nb_regs, payload, data);
1876		break;
1877	case NB_REG_LOG_NF_MEM:
1878		nb_nf_mem_err_payload(nb_regs, payload, data);
1879		break;
1880	default:
1881		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_GLOBAL,
1882		    DATA_TYPE_UINT64, nb_regs->ferr, NULL);
1883		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_GLOBAL,
1884		    DATA_TYPE_UINT32, nb_regs->nerr, NULL);
1885		break;
1886	}
1887}
1888
1889void
1890nb_fsb_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1891    nb_scatchpad_t *data)
1892{
1893	int chip;
1894
1895	if (nb_chipset == INTEL_NB_7300)
1896		chip = nb_regs->nb.fsb_regs.fsb * 2;
1897	else
1898		chip = nb_regs->nb.fsb_regs.fsb;
1899	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1900	    "motherboard", 0, "chip", chip);
1901
1902	if (nb_regs->nb.fsb_regs.ferr_fat_fsb == 0 &&
1903	    nb_regs->nb.fsb_regs.ferr_nf_fsb == 0) {
1904		data->intel_error_list = intel_fsb_err(nb_regs->nb.fsb_regs.fsb,
1905		    nb_regs->nb.fsb_regs.nerr_fat_fsb,
1906		    nb_regs->nb.fsb_regs.nerr_nf_fsb);
1907	} else {
1908		data->intel_error_list = intel_fsb_err(nb_regs->nb.fsb_regs.fsb,
1909		    nb_regs->nb.fsb_regs.ferr_fat_fsb,
1910		    nb_regs->nb.fsb_regs.ferr_nf_fsb);
1911	}
1912	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1913	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "fsb");
1914}
1915
1916void
1917nb_pex_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1918    nb_scatchpad_t *data)
1919{
1920	int hostbridge;
1921
1922	if (nb_regs->nb.pex_regs.pex == 0) {
1923		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1924		    "motherboard", 0);
1925	} else {
1926		hostbridge = nb_regs->nb.pex_regs.pex - 1;
1927		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1928		    "motherboard", 0,
1929		    "hostbridge", hostbridge);
1930	}
1931
1932	if (nb_regs->nb.pex_regs.pex_fat_ferr == 0 &&
1933	    nb_regs->nb.pex_regs.pex_nf_corr_ferr == 0) {
1934		if (nb_chipset == INTEL_NB_5400) {
1935			data->intel_error_list =
1936			    intel_pex_5400_err(
1937			    nb_regs->nb.pex_regs.pex_fat_nerr,
1938			    nb_regs->nb.pex_regs.pex_nf_corr_nerr);
1939		} else {
1940			data->intel_error_list =
1941			    intel_pex_err(nb_regs->nb.pex_regs.pex_fat_nerr,
1942			    nb_regs->nb.pex_regs.pex_nf_corr_nerr);
1943		}
1944	} else {
1945		if (nb_chipset == INTEL_NB_5400) {
1946			data->intel_error_list =
1947			    intel_pex_5400_err(
1948			    nb_regs->nb.pex_regs.pex_fat_ferr,
1949			    nb_regs->nb.pex_regs.pex_nf_corr_ferr);
1950		} else {
1951			data->intel_error_list =
1952			    intel_pex_err(nb_regs->nb.pex_regs.pex_fat_ferr,
1953			    nb_regs->nb.pex_regs.pex_nf_corr_ferr);
1954		}
1955	}
1956
1957	if (nb_regs->nb.pex_regs.pex == 0) {
1958		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1959		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "esi");
1960	} else {
1961		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1962		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "pex");
1963	}
1964}
1965
1966void
1967nb_int_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1968    void *data)
1969{
1970	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1971	    "motherboard", 0);
1972
1973	if (nb_regs->nb.int_regs.ferr_fat_int == 0 &&
1974	    nb_regs->nb.int_regs.ferr_nf_int == 0) {
1975		((nb_scatchpad_t *)data)->intel_error_list =
1976		    intel_int_err(nb_regs->nb.int_regs.nerr_fat_int,
1977		    nb_regs->nb.int_regs.nerr_nf_int);
1978	} else {
1979		((nb_scatchpad_t *)data)->intel_error_list =
1980		    intel_int_err(nb_regs->nb.int_regs.ferr_fat_int,
1981		    nb_regs->nb.int_regs.ferr_nf_int);
1982	}
1983	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1984	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "ie");
1985}
1986
1987void
1988nb_fat_fbd_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1989    void *data)
1990{
1991	char *intr;
1992	nb_mem_scatchpad_t *sp;
1993
1994	intr = fat_memory_error(nb_regs, data);
1995	sp = &((nb_scatchpad_t *)data)->ms;
1996
1997	if (sp->dimm != -1) {
1998		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
1999		    "motherboard", 0,
2000		    "memory-controller", sp->branch,
2001		    "dram-channel", sp->channel,
2002		    "dimm", sp->dimm,
2003		    "rank", sp->rank);
2004	} else if (sp->channel != -1) {
2005		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
2006		    "motherboard", 0,
2007		    "memory-controller", sp->branch,
2008		    "dram-channel", sp->channel);
2009	} else if (sp->branch != -1) {
2010		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
2011		    "motherboard", 0,
2012		    "memory-controller", sp->branch);
2013	} else {
2014		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
2015		    "motherboard", 0);
2016	}
2017
2018	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
2019	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
2020}
2021
2022void
2023nb_nf_fbd_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
2024    void *data)
2025{
2026	char *intr;
2027	nb_mem_scatchpad_t *sp;
2028
2029	intr = nf_memory_error(nb_regs, data);
2030	sp = &((nb_scatchpad_t *)data)->ms;
2031
2032	if (sp->dimm != -1) {
2033		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
2034		    "motherboard", 0,
2035		    "memory-controller", sp->branch,
2036		    "dram-channel", sp->channel,
2037		    "dimm", sp->dimm,
2038		    "rank", sp->rank);
2039	} else if (sp->channel != -1) {
2040		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
2041		    "motherboard", 0,
2042		    "memory-controller", sp->branch,
2043		    "dram-channel", sp->channel);
2044	} else if (sp->branch != -1) {
2045		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
2046		    "motherboard", 0,
2047		    "memory-controller", sp->branch);
2048	} else {
2049		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
2050		    "motherboard", 0);
2051	}
2052
2053	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
2054	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
2055}
2056
2057void
2058nb_dma_report(char *class, nvlist_t *detector)
2059{
2060	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
2061	    "motherboard", 0);
2062
2063	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
2064	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "dma");
2065}
2066
2067void
2068nb_thr_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
2069    void *data)
2070{
2071	((nb_scatchpad_t *)data)->intel_error_list =
2072	    intel_thr_err(nb_regs->nb.thr_regs.ferr_fat_thr,
2073	    nb_regs->nb.thr_regs.ferr_nf_thr);
2074	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
2075	    "motherboard", 0);
2076
2077	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
2078	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "otf");
2079}
2080
2081void
2082nb_nf_mem_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
2083    void *data)
2084{
2085	char *intr;
2086	nb_mem_scatchpad_t *sp;
2087
2088	intr = nf_mem_error(nb_regs, data);
2089	sp = &((nb_scatchpad_t *)data)->ms;
2090
2091	if (sp->dimm != -1) {
2092		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
2093		    "motherboard", 0,
2094		    "memory-controller", sp->branch,
2095		    "dram-channel", sp->channel,
2096		    "dimm", sp->dimm,
2097		    "rank", sp->rank);
2098	} else if (sp->channel != -1) {
2099		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
2100		    "motherboard", 0,
2101		    "memory-controller", sp->branch,
2102		    "dram-channel", sp->channel);
2103	} else if (sp->branch != -1) {
2104		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
2105		    "motherboard", 0,
2106		    "memory-controller", sp->branch);
2107	} else {
2108		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
2109		    "motherboard", 0);
2110	}
2111
2112	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
2113	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
2114}
2115
2116
2117nvlist_t *
2118nb_report(const nb_regs_t *nb_regs, char *class, nv_alloc_t *nva, void *scratch)
2119{
2120	nvlist_t *detector = fm_nvlist_create(nva);
2121
2122	switch (nb_regs->flag) {
2123	case NB_REG_LOG_FSB:
2124		nb_fsb_report(nb_regs, class, detector, scratch);
2125		break;
2126	case NB_REG_LOG_PEX:
2127		nb_pex_report(nb_regs, class, detector, scratch);
2128		break;
2129	case NB_REG_LOG_INT:
2130		nb_int_report(nb_regs, class, detector, scratch);
2131		break;
2132	case NB_REG_LOG_FAT_FBD:
2133		nb_fat_fbd_report(nb_regs, class, detector, scratch);
2134		break;
2135	case NB_REG_LOG_NF_FBD:
2136		nb_nf_fbd_report(nb_regs, class, detector, scratch);
2137		break;
2138	case NB_REG_LOG_DMA:
2139		nb_dma_report(class, detector);
2140		break;
2141	case NB_REG_LOG_THR:
2142		nb_thr_report(nb_regs, class, detector, scratch);
2143		break;
2144	case NB_REG_LOG_NF_MEM:
2145		nb_nf_mem_report(nb_regs, class, detector, scratch);
2146		break;
2147	default:
2148		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
2149		    "motherboard", 0);
2150
2151		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
2152		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "unknown");
2153	}
2154	return (detector);
2155}
2156
2157/*ARGSUSED*/
2158void
2159nb_drain(void *ignored, const void *data, const errorq_elem_t *eqe)
2160{
2161	nb_logout_t *acl = (nb_logout_t *)data;
2162	errorq_elem_t *eqep, *scr_eqep;
2163	nvlist_t *ereport, *detector;
2164	nv_alloc_t *nva = NULL;
2165	char buf[FM_MAX_CLASS];
2166	nb_scatchpad_t nb_scatchpad;
2167
2168	if (panicstr) {
2169		if ((eqep = errorq_reserve(ereport_errorq)) == NULL)
2170			return;
2171		ereport = errorq_elem_nvl(ereport_errorq, eqep);
2172		/*
2173		 * Now try to allocate another element for scratch space and
2174		 * use that for further scratch space (eg for constructing
2175		 * nvlists to add the main ereport).  If we can't reserve
2176		 * a scratch element just fallback to working within the
2177		 * element we already have, and hope for the best.  All this
2178		 * is necessary because the fixed buffer nv allocator does
2179		 * not reclaim freed space and nvlist construction is
2180		 * expensive.
2181		 */
2182		if ((scr_eqep = errorq_reserve(ereport_errorq)) != NULL)
2183			nva = errorq_elem_nva(ereport_errorq, scr_eqep);
2184		else
2185			nva = errorq_elem_nva(ereport_errorq, eqep);
2186	} else {
2187		ereport = fm_nvlist_create(NULL);
2188	}
2189	detector = nb_report(&acl->nb_regs, buf, nva, &nb_scatchpad);
2190	if (detector == NULL)
2191		return;
2192	fm_ereport_set(ereport, FM_EREPORT_VERSION, buf,
2193	    fm_ena_generate(acl->acl_timestamp, FM_ENA_FMT1), detector, NULL);
2194	/*
2195	 * We're done with 'detector' so reclaim the scratch space.
2196	 */
2197	if (panicstr) {
2198		fm_nvlist_destroy(detector, FM_NVA_RETAIN);
2199		nv_alloc_reset(nva);
2200	} else {
2201		fm_nvlist_destroy(detector, FM_NVA_FREE);
2202	}
2203
2204	/*
2205	 * Encode the error-specific data that was saved in the logout area.
2206	 */
2207	nb_ereport_add_logout(ereport, acl, &nb_scatchpad);
2208
2209	if (panicstr) {
2210		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
2211		if (scr_eqep)
2212			errorq_cancel(ereport_errorq, scr_eqep);
2213	} else {
2214		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
2215		fm_nvlist_destroy(ereport, FM_NVA_FREE);
2216	}
2217}
2218