1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/cmn_err.h>
29 #include <sys/errno.h>
30 #include <sys/log.h>
31 #include <sys/systm.h>
32 #include <sys/modctl.h>
33 #include <sys/errorq.h>
34 #include <sys/controlregs.h>
35 #include <sys/fm/util.h>
36 #include <sys/fm/protocol.h>
37 #include <sys/sysevent.h>
38 #include <sys/pghw.h>
39 #include <sys/cyclic.h>
40 #include <sys/pci_cfgspace.h>
41 #include <sys/mc_intel.h>
42 #include <sys/smbios.h>
43 #include <sys/pci.h>
44 #include <sys/pcie.h>
45 #include "nb5000.h"
46 #include "nb_log.h"
47 #include "dimm_phys.h"
48 #include "rank.h"
49 
50 int nb_hw_memory_scrub_enable = 1;
51 static int nb_sw_scrub_disabled = 0;
52 
53 int nb_5000_memory_controller = 0;
54 int nb_number_memory_controllers = NB_5000_MAX_MEM_CONTROLLERS;
55 int nb_channels_per_branch = NB_MAX_CHANNELS_PER_BRANCH;
56 int nb_dimms_per_channel = 0;
57 
58 nb_dimm_t **nb_dimms;
59 int nb_ndimm;
60 uint32_t nb_chipset;
61 enum nb_memory_mode nb_mode;
62 bank_select_t nb_banks[NB_MAX_MEM_BRANCH_SELECT];
63 rank_select_t nb_ranks[NB_5000_MAX_MEM_CONTROLLERS][NB_MAX_MEM_RANK_SELECT];
64 uint32_t top_of_low_memory;
65 uint8_t spare_rank[NB_5000_MAX_MEM_CONTROLLERS];
66 
67 extern int nb_no_smbios;
68 
69 errorq_t *nb_queue;
70 kmutex_t nb_mutex;
71 
72 static int nb_dimm_slots;
73 
74 static uint32_t nb_err0_int;
75 static uint32_t nb_err1_int;
76 static uint32_t nb_err2_int;
77 static uint32_t nb_mcerr_int;
78 static uint32_t nb_emask_int;
79 
80 static uint32_t nb_err0_fbd;
81 static uint32_t nb_err1_fbd;
82 static uint32_t nb_err2_fbd;
83 static uint32_t nb_mcerr_fbd;
84 static uint32_t nb_emask_fbd;
85 
86 static uint32_t nb_err0_mem;
87 static uint32_t nb_err1_mem;
88 static uint32_t nb_err2_mem;
89 static uint32_t nb_mcerr_mem;
90 static uint32_t nb_emask_mem;
91 
92 static uint16_t nb_err0_fsb;
93 static uint16_t nb_err1_fsb;
94 static uint16_t nb_err2_fsb;
95 static uint16_t nb_mcerr_fsb;
96 static uint16_t nb_emask_fsb;
97 
98 static uint16_t nb_err0_thr;
99 static uint16_t nb_err1_thr;
100 static uint16_t nb_err2_thr;
101 static uint16_t nb_mcerr_thr;
102 static uint16_t nb_emask_thr;
103 
104 static uint32_t	emask_uncor_pex[NB_PCI_DEV];
105 static uint32_t emask_cor_pex[NB_PCI_DEV];
106 static uint32_t emask_rp_pex[NB_PCI_DEV];
107 static uint32_t docmd_pex[NB_PCI_DEV];
108 static uint32_t uncerrsev[NB_PCI_DEV];
109 
110 static uint32_t l_mcerr_int;
111 static uint32_t l_mcerr_fbd;
112 static uint32_t l_mcerr_mem;
113 static uint16_t l_mcerr_fsb;
114 static uint16_t l_mcerr_thr;
115 
116 uint_t nb5000_emask_fbd = EMASK_5000_FBD_RES;
117 uint_t nb5400_emask_fbd = 0;
118 int nb5000_reset_emask_fbd = 1;
119 uint_t nb5000_mask_poll_fbd = EMASK_FBD_NF;
120 uint_t nb5000_mask_bios_fbd = EMASK_FBD_FATAL;
121 uint_t nb5400_mask_poll_fbd = EMASK_5400_FBD_NF;
122 uint_t nb5400_mask_bios_fbd = EMASK_5400_FBD_FATAL;
123 uint_t nb7300_mask_poll_fbd = EMASK_7300_FBD_NF;
124 uint_t nb7300_mask_bios_fbd = EMASK_7300_FBD_FATAL;
125 
126 int nb5100_reset_emask_mem = 1;
127 uint_t nb5100_mask_poll_mem = EMASK_MEM_NF;
128 
129 uint_t nb5000_emask_fsb = 0;
130 int nb5000_reset_emask_fsb = 1;
131 uint_t nb5000_mask_poll_fsb = EMASK_FSB_NF;
132 uint_t nb5000_mask_bios_fsb = EMASK_FSB_FATAL;
133 
134 uint_t nb5100_emask_int = EMASK_INT_5100;
135 uint_t nb5400_emask_int = EMASK_INT_5400;
136 
137 uint_t nb7300_emask_int = EMASK_INT_7300;
138 uint_t nb7300_emask_int_step0 = EMASK_INT_7300_STEP_0;
139 uint_t nb5000_emask_int = EMASK_INT_5000;
140 int nb5000_reset_emask_int = 1;
141 uint_t nb5000_mask_poll_int = EMASK_INT_NF;
142 uint_t nb5000_mask_bios_int = EMASK_INT_FATAL;
143 uint_t nb5100_mask_poll_int = EMASK_INT_5100_NF;
144 uint_t nb5100_mask_bios_int = EMASK_INT_5100_FATAL;
145 
146 uint_t nb_mask_poll_thr = EMASK_THR_NF;
147 uint_t nb_mask_bios_thr = EMASK_THR_FATAL;
148 
149 int nb5000_reset_uncor_pex = 0;
150 uint_t nb5000_mask_uncor_pex = 0;
151 int nb5000_reset_cor_pex = 0;
152 uint_t nb5000_mask_cor_pex = 0xffffffff;
153 uint32_t nb5000_rp_pex = 0x1;
154 
155 int nb_mask_mc_set;
156 
157 typedef struct find_dimm_label {
158 	void (*label_function)(int, char *, int);
159 } find_dimm_label_t;
160 
161 static void x8450_dimm_label(int, char *, int);
162 static void cp3250_dimm_label(int, char *, int);
163 
164 static struct platform_label {
165 	const char *sys_vendor;		/* SMB_TYPE_SYSTEM vendor prefix */
166 	const char *sys_product;	/* SMB_TYPE_SYSTEM product prefix */
167 	find_dimm_label_t dimm_label;
168 	int dimms_per_channel;
169 } platform_label[] = {
170 	{ "SUN MICROSYSTEMS", "SUN BLADE X8450 SERVER MODULE",
171 	    x8450_dimm_label, 8 },
172 	{ "MiTAC,Shunde", "CP3250", cp3250_dimm_label, 0 },
173 	{ NULL, NULL, NULL, 0 }
174 };
175 
176 static unsigned short
177 read_spd(int bus)
178 {
179 	unsigned short rt = 0;
180 	int branch = bus >> 1;
181 	int channel = bus & 1;
182 
183 	rt = SPD_RD(branch, channel);
184 
185 	return (rt);
186 }
187 
188 static void
189 write_spdcmd(int bus, uint32_t val)
190 {
191 	int branch = bus >> 1;
192 	int channel = bus & 1;
193 	SPDCMD_WR(branch, channel, val);
194 }
195 
196 static int
197 read_spd_eeprom(int bus, int slave, int addr)
198 {
199 	int retry = 4;
200 	int wait;
201 	int spd;
202 	uint32_t cmd;
203 
204 	for (;;) {
205 		wait = 1000;
206 		for (;;) {
207 			spd = read_spd(bus);
208 			if ((spd & SPD_BUSY) == 0)
209 				break;
210 			if (--wait == 0)
211 				return (-1);
212 			drv_usecwait(10);
213 		}
214 		cmd = SPD_EEPROM_WRITE | SPD_ADDR(slave, addr);
215 		write_spdcmd(bus, cmd);
216 		wait = 1000;
217 		for (;;) {
218 			spd = read_spd(bus);
219 			if ((spd & SPD_BUSY) == 0)
220 				break;
221 			if (--wait == 0) {
222 				spd = SPD_BUS_ERROR;
223 				break;
224 			}
225 			drv_usecwait(10);
226 		}
227 		while ((spd & SPD_BUS_ERROR) == 0 &&
228 		    (spd & (SPD_READ_DATA_VALID|SPD_BUSY)) !=
229 		    SPD_READ_DATA_VALID) {
230 			spd = read_spd(bus);
231 			if (--wait == 0)
232 				return (-1);
233 		}
234 		if ((spd & SPD_BUS_ERROR) == 0)
235 			break;
236 		if (--retry == 0)
237 			return (-1);
238 	}
239 	return (spd & 0xff);
240 }
241 
242 static void
243 nb_fini()
244 {
245 	int i, j;
246 	int nchannels = nb_number_memory_controllers * nb_channels_per_branch;
247 	nb_dimm_t **dimmpp;
248 	nb_dimm_t *dimmp;
249 
250 	dimmpp = nb_dimms;
251 	for (i = 0; i < nchannels; i++) {
252 		for (j = 0; j < nb_dimms_per_channel; j++) {
253 			dimmp = *dimmpp;
254 			if (dimmp) {
255 				kmem_free(dimmp, sizeof (nb_dimm_t));
256 				*dimmpp = NULL;
257 			}
258 			dimmp++;
259 		}
260 	}
261 	kmem_free(nb_dimms, sizeof (nb_dimm_t *) * nb_dimm_slots);
262 	nb_dimms = NULL;
263 	dimm_fini();
264 }
265 
266 void
267 nb_scrubber_enable()
268 {
269 	uint32_t mc;
270 
271 	if (!nb_hw_memory_scrub_enable)
272 		return;
273 
274 	mc = MC_RD();
275 	if ((mc & MC_MIRROR) != 0) /* mirror mode */
276 		mc |= MC_PATROL_SCRUB;
277 	else
278 		mc |= MC_PATROL_SCRUB|MC_DEMAND_SCRUB;
279 	MC_WR(mc);
280 
281 	if (nb_sw_scrub_disabled++)
282 		cmi_mc_sw_memscrub_disable();
283 }
284 
285 static void
286 fbd_eeprom(int channel, int dimm, nb_dimm_t *dp)
287 {
288 	int i, t;
289 	int spd_sz;
290 
291 	t = read_spd_eeprom(channel, dimm, 0) & 0xf;
292 	if (t == 1)
293 		spd_sz = 128;
294 	else if (t == 2)
295 		spd_sz = 176;
296 	else
297 		spd_sz = 256;
298 	dp->manufacture_id = read_spd_eeprom(channel, dimm, 117) |
299 	    (read_spd_eeprom(channel, dimm, 118) << 8);
300 	dp->manufacture_location = read_spd_eeprom(channel, dimm, 119);
301 	dp->serial_number =
302 	    (read_spd_eeprom(channel, dimm, 122) << 24) |
303 	    (read_spd_eeprom(channel, dimm, 123) << 16) |
304 	    (read_spd_eeprom(channel, dimm, 124) << 8) |
305 	    read_spd_eeprom(channel, dimm, 125);
306 	t = read_spd_eeprom(channel, dimm, 121);
307 	dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
308 	dp->manufacture_year = read_spd_eeprom(channel, dimm, 120);
309 	if (spd_sz > 128) {
310 		for (i = 0; i < sizeof (dp->part_number); i++) {
311 			dp->part_number[i] =
312 			    read_spd_eeprom(channel, dimm, 128 + i);
313 		}
314 		for (i = 0; i < sizeof (dp->revision); i++) {
315 			dp->revision[i] =
316 			    read_spd_eeprom(channel, dimm, 146 + i);
317 		}
318 	}
319 }
320 
321 /* read the manR of the DDR2 dimm */
322 static void
323 ddr2_eeprom(int channel, int dimm, nb_dimm_t *dp)
324 {
325 	int i, t;
326 	int slave;
327 
328 	slave = channel & 0x1 ? dimm + 4 : dimm;
329 
330 	/* byte[3]: number of row addresses */
331 	dp->nrow = read_spd_eeprom(channel, slave, 3) & 0x1f;
332 
333 	/* byte[4]: number of column addresses */
334 	dp->ncolumn = read_spd_eeprom(channel, slave, 4) & 0xf;
335 
336 	/* byte[5]: numranks; 0 means one rank */
337 	dp->nranks = (read_spd_eeprom(channel, slave, 5) & 0x3) + 1;
338 
339 	/* byte[6]: data width */
340 	dp->width = (read_spd_eeprom(channel, slave, 6) >> 5) << 2;
341 
342 	/* byte[17]: number of banks */
343 	dp->nbanks = read_spd_eeprom(channel, slave, 17);
344 
345 	dp->dimm_size = DIMMSIZE(dp->nrow, dp->ncolumn, dp->nranks, dp->nbanks,
346 	    dp->width);
347 
348 	/* manufacture-id - byte[64-65] */
349 	dp->manufacture_id = read_spd_eeprom(channel, slave, 64) |
350 	    (read_spd_eeprom(channel, dimm, 65) << 8);
351 
352 	/* location - byte[72] */
353 	dp->manufacture_location = read_spd_eeprom(channel, slave, 72);
354 
355 	/* serial number - byte[95-98] */
356 	dp->serial_number =
357 	    (read_spd_eeprom(channel, slave, 98) << 24) |
358 	    (read_spd_eeprom(channel, slave, 97) << 16) |
359 	    (read_spd_eeprom(channel, slave, 96) << 8) |
360 	    read_spd_eeprom(channel, slave, 95);
361 
362 	/* week - byte[94] */
363 	t = read_spd_eeprom(channel, slave, 94);
364 	dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
365 	/* week - byte[93] */
366 	t = read_spd_eeprom(channel, slave, 93);
367 	dp->manufacture_year = (t >> 4) * 10 + (t & 0xf) + 2000;
368 
369 	/* part number - byte[73-81] */
370 	for (i = 0; i < 8; i++) {
371 		dp->part_number[i] = read_spd_eeprom(channel, slave, 73 + i);
372 	}
373 
374 	/* revision - byte[91-92] */
375 	for (i = 0; i < 2; i++) {
376 		dp->revision[i] = read_spd_eeprom(channel, slave, 91 + i);
377 	}
378 }
379 
380 static boolean_t
381 nb_dimm_present(int channel, int dimm)
382 {
383 	boolean_t rc = B_FALSE;
384 
385 	if (nb_chipset == INTEL_NB_5100) {
386 		int t, slave;
387 		slave = channel & 0x1 ? dimm + 4 : dimm;
388 		/* read the type field from the dimm and check for DDR2 type */
389 		if ((t = read_spd_eeprom(channel, slave, SPD_MEM_TYPE)) == -1)
390 			return (B_FALSE);
391 		rc = (t & 0xf) == SPD_DDR2;
392 	} else {
393 		rc = MTR_PRESENT(MTR_RD(channel, dimm)) != 0;
394 	}
395 
396 	return (rc);
397 }
398 
399 static nb_dimm_t *
400 nb_ddr2_dimm_init(int channel, int dimm, int start_rank)
401 {
402 	nb_dimm_t *dp;
403 
404 	if (nb_dimm_present(channel, dimm) == B_FALSE)
405 		return (NULL);
406 
407 	dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
408 
409 	ddr2_eeprom(channel, dimm, dp);
410 
411 	/* The 1st rank of the dimm takes on this value */
412 	dp->start_rank = (uint8_t)start_rank;
413 
414 	dp->mtr_present = 1;
415 
416 	return (dp);
417 }
418 
419 static nb_dimm_t *
420 nb_fbd_dimm_init(int channel, int dimm, uint16_t mtr)
421 {
422 	nb_dimm_t *dp;
423 	int t;
424 
425 	if (MTR_PRESENT(mtr) == 0)
426 		return (NULL);
427 	t = read_spd_eeprom(channel, dimm, SPD_MEM_TYPE) & 0xf;
428 
429 	/* check for the dimm type */
430 	if (t != SPD_FBDIMM)
431 		return (NULL);
432 
433 	dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
434 
435 	fbd_eeprom(channel, dimm, dp);
436 
437 	dp->mtr_present = MTR_PRESENT(mtr);
438 	dp->start_rank = dimm << 1;
439 	dp->nranks = MTR_NUMRANK(mtr);
440 	dp->nbanks = MTR_NUMBANK(mtr);
441 	dp->ncolumn = MTR_NUMCOL(mtr);
442 	dp->nrow = MTR_NUMROW(mtr);
443 	dp->width = MTR_WIDTH(mtr);
444 	dp->dimm_size = MTR_DIMMSIZE(mtr);
445 
446 	return (dp);
447 }
448 
449 static uint64_t
450 mc_range(int controller, uint64_t base)
451 {
452 	int i;
453 	uint64_t limit = 0;
454 
455 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
456 		if (nb_banks[i].way[controller] && base >= nb_banks[i].base &&
457 		    base < nb_banks[i].limit) {
458 			limit = nb_banks[i].limit;
459 			if (base <= top_of_low_memory &&
460 			    limit > top_of_low_memory) {
461 				limit -= TLOW_MAX - top_of_low_memory;
462 			}
463 			if (nb_banks[i].way[0] && nb_banks[i].way[1] &&
464 			    nb_mode != NB_MEMORY_MIRROR) {
465 				limit = limit / 2;
466 			}
467 		}
468 	}
469 	return (limit);
470 }
471 
472 void
473 nb_mc_init()
474 {
475 	uint16_t tolm;
476 	uint16_t mir;
477 	uint32_t hole_base;
478 	uint32_t hole_size;
479 	uint32_t dmir;
480 	uint64_t base;
481 	uint64_t limit;
482 	uint8_t way0, way1, rank0, rank1, rank2, rank3, branch_interleave;
483 	int i, j, k;
484 	uint8_t interleave;
485 
486 	base = 0;
487 	tolm = TOLM_RD();
488 	top_of_low_memory = ((uint32_t)(tolm >> 12) & 0xf) << 28;
489 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
490 		mir = MIR_RD(i);
491 		limit = (uint64_t)(mir >> 4) << 28;
492 		way0 = mir & 1;
493 		way1 = (mir >> 1) & 1;
494 		if (way0 == 0 && way1 == 0) {
495 			way0 = 1;
496 			way1 = 1;
497 		}
498 		if (limit > top_of_low_memory)
499 			limit += TLOW_MAX - top_of_low_memory;
500 		nb_banks[i].base = base;
501 		nb_banks[i].limit = limit;
502 		nb_banks[i].way[0] = way0;
503 		nb_banks[i].way[1] = way1;
504 		base = limit;
505 	}
506 	for (i = 0; i < nb_number_memory_controllers; i++) {
507 		base = 0;
508 
509 		for (j = 0; j < NB_MEM_RANK_SELECT; j++) {
510 			dmir = DMIR_RD(i, j);
511 			limit = ((uint64_t)(dmir >> 16) & 0xff) << 28;
512 			if (limit == 0) {
513 				limit = mc_range(i, base);
514 			}
515 			branch_interleave = 0;
516 			hole_base = 0;
517 			hole_size = 0;
518 			DMIR_RANKS(dmir, rank0, rank1, rank2, rank3);
519 			if (rank0 == rank1)
520 				interleave = 1;
521 			else if (rank0 == rank2)
522 				interleave = 2;
523 			else
524 				interleave = 4;
525 			if (nb_mode != NB_MEMORY_MIRROR &&
526 			    nb_mode != NB_MEMORY_SINGLE_CHANNEL) {
527 				for (k = 0; k < NB_MEM_BRANCH_SELECT; k++) {
528 					if (base >= nb_banks[k].base &&
529 					    base < nb_banks[k].limit) {
530 						if (nb_banks[i].way[0] &&
531 						    nb_banks[i].way[1]) {
532 							interleave *= 2;
533 							limit *= 2;
534 							branch_interleave = 1;
535 						}
536 						break;
537 					}
538 				}
539 			}
540 			if (base < top_of_low_memory &&
541 			    limit > top_of_low_memory) {
542 				hole_base = top_of_low_memory;
543 				hole_size = TLOW_MAX - top_of_low_memory;
544 				limit += hole_size;
545 			} else if (base > top_of_low_memory) {
546 				limit += TLOW_MAX - top_of_low_memory;
547 			}
548 			nb_ranks[i][j].base = base;
549 			nb_ranks[i][j].limit = limit;
550 			nb_ranks[i][j].rank[0] = rank0;
551 			nb_ranks[i][j].rank[1] = rank1;
552 			nb_ranks[i][j].rank[2] = rank2;
553 			nb_ranks[i][j].rank[3] = rank3;
554 			nb_ranks[i][j].interleave = interleave;
555 			nb_ranks[i][j].branch_interleave = branch_interleave;
556 			nb_ranks[i][j].hole_base = hole_base;
557 			nb_ranks[i][j].hole_size = hole_size;
558 			if (limit > base) {
559 				if (rank0 != rank1) {
560 					dimm_add_rank(i, rank1,
561 					    branch_interleave, 1, base,
562 					    hole_base, hole_size, interleave,
563 					    limit);
564 					if (rank0 != rank2) {
565 						dimm_add_rank(i, rank2,
566 						    branch_interleave, 2, base,
567 						    hole_base, hole_size,
568 						    interleave, limit);
569 						dimm_add_rank(i, rank3,
570 						    branch_interleave, 3, base,
571 						    hole_base, hole_size,
572 						    interleave, limit);
573 					}
574 				}
575 			}
576 			base = limit;
577 		}
578 	}
579 }
580 
581 void
582 nb_used_spare_rank(int branch, int bad_rank)
583 {
584 	int i;
585 	int j;
586 
587 	for (i = 0; i < NB_MEM_RANK_SELECT; i++) {
588 		for (j = 0; j < NB_RANKS_IN_SELECT; j++) {
589 			if (nb_ranks[branch][i].rank[j] == bad_rank) {
590 				nb_ranks[branch][i].rank[j] =
591 				    spare_rank[branch];
592 				i = NB_MEM_RANK_SELECT;
593 				break;
594 			}
595 		}
596 	}
597 }
598 
599 find_dimm_label_t *
600 find_dimms_per_channel()
601 {
602 	struct platform_label *pl;
603 	smbios_info_t si;
604 	smbios_system_t sy;
605 	id_t id;
606 	int i, j;
607 	find_dimm_label_t *rt = NULL;
608 
609 	if (ksmbios != NULL && nb_no_smbios == 0) {
610 		if ((id = smbios_info_system(ksmbios, &sy)) != SMB_ERR &&
611 		    smbios_info_common(ksmbios, id, &si) != SMB_ERR) {
612 			for (pl = platform_label; pl->sys_vendor; pl++) {
613 				if (strncmp(pl->sys_vendor,
614 				    si.smbi_manufacturer,
615 				    strlen(pl->sys_vendor)) == 0 &&
616 				    strncmp(pl->sys_product, si.smbi_product,
617 				    strlen(pl->sys_product)) == 0) {
618 					nb_dimms_per_channel =
619 					    pl->dimms_per_channel;
620 					rt = &pl->dimm_label;
621 					break;
622 				}
623 			}
624 		}
625 	}
626 	if (nb_dimms_per_channel == 0) {
627 		/*
628 		 * Scan all memory channels if we find a channel which has more
629 		 * dimms then we have seen before set nb_dimms_per_channel to
630 		 * the number of dimms on the channel
631 		 */
632 		for (i = 0; i < nb_number_memory_controllers; i++) {
633 			for (j = nb_dimms_per_channel;
634 			    j < NB_MAX_DIMMS_PER_CHANNEL; j++) {
635 				if (nb_dimm_present(i, j))
636 					nb_dimms_per_channel = j + 1;
637 			}
638 		}
639 	}
640 	return (rt);
641 }
642 
643 struct smb_dimm_rec {
644 	int dimms;
645 	int slots;
646 	int populated;
647 	nb_dimm_t **dimmpp;
648 };
649 
650 static int
651 dimm_label(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
652 {
653 	struct smb_dimm_rec *rp = (struct smb_dimm_rec *)arg;
654 	nb_dimm_t ***dimmpp;
655 	nb_dimm_t *dimmp;
656 	smbios_memdevice_t md;
657 
658 	dimmpp = &rp->dimmpp;
659 	if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
660 		if (*dimmpp >= &nb_dimms[nb_dimm_slots])
661 			return (-1);
662 		dimmp = **dimmpp;
663 		if (smbios_info_memdevice(shp, sp->smbstr_id, &md) == 0 &&
664 		    md.smbmd_dloc != NULL) {
665 			if (md.smbmd_size) {
666 				if (dimmp == NULL &&
667 				    (rp->slots == nb_dimm_slots ||
668 				    rp->dimms < rp->populated)) {
669 					(*dimmpp)++;
670 					return (0);
671 				}
672 				/*
673 				 * if there is no physical dimm for this smbios
674 				 * record it is because this system has less
675 				 * physical slots than the controller supports
676 				 * so skip empty slots to find the slot this
677 				 * smbios record belongs too
678 				 */
679 				while (dimmp == NULL) {
680 					(*dimmpp)++;
681 					if (*dimmpp >= &nb_dimms[nb_dimm_slots])
682 						return (-1);
683 					dimmp = **dimmpp;
684 				}
685 				(void) snprintf(dimmp->label,
686 				    sizeof (dimmp->label), "%s", md.smbmd_dloc);
687 				(*dimmpp)++;
688 			}
689 		}
690 	}
691 	return (0);
692 }
693 
694 static int
695 check_memdevice(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
696 {
697 	struct smb_dimm_rec *rp = (struct smb_dimm_rec *)arg;
698 	smbios_memdevice_t md;
699 
700 	if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
701 		if (smbios_info_memdevice(shp, sp->smbstr_id, &md) == 0) {
702 			rp->slots++;
703 			if (md.smbmd_size) {
704 				rp->populated++;
705 			}
706 		}
707 	}
708 	return (0);
709 }
710 
711 void
712 nb_smbios()
713 {
714 	struct smb_dimm_rec r;
715 	int i;
716 
717 	if (ksmbios != NULL && nb_no_smbios == 0) {
718 		r.dimms = 0;
719 		r.slots = 0;
720 		r.populated = 0;
721 		r.dimmpp = nb_dimms;
722 		for (i = 0; i < nb_dimm_slots; i++) {
723 			if (nb_dimms[i] != NULL)
724 				r.dimms++;
725 		}
726 		(void) smbios_iter(ksmbios, check_memdevice, &r);
727 		(void) smbios_iter(ksmbios, dimm_label, &r);
728 	}
729 }
730 
731 static void
732 x8450_dimm_label(int dimm, char *label, int label_sz)
733 {
734 	int channel = dimm >> 3;
735 
736 	dimm = dimm & 0x7;
737 	(void) snprintf(label, label_sz, "D%d", (dimm * 4) + channel);
738 }
739 
740 /*
741  * CP3250 DIMM labels
742  * Channel   Dimm   Label
743  *       0      0      A0
744  *       1      0      B0
745  *       0      1      A1
746  *       1      1      B1
747  *       0      2      A2
748  *       1      2      B2
749  */
750 static void
751 cp3250_dimm_label(int dimm, char *label, int label_sz)
752 {
753 	int channel = dimm / nb_dimms_per_channel;
754 
755 	dimm = dimm % nb_dimms_per_channel;
756 	(void) snprintf(label, label_sz, "%c%d", channel == 0 ? 'A' : 'B',
757 	    dimm);
758 }
759 
760 /*
761  * Map the rank id to dimm id of a channel
762  * For the 5100 chipset, walk through the dimm list of channel the check if
763  * the given rank id is within the rank range assigned to the dimm.
764  * For other chipsets, the dimm is rank/2.
765  */
766 int
767 nb_rank2dimm(int channel, int rank)
768 {
769 	int i;
770 	nb_dimm_t **dimmpp = nb_dimms;
771 
772 	if (nb_chipset != INTEL_NB_5100)
773 		return (rank >> 1);
774 
775 	dimmpp += channel * nb_dimms_per_channel;
776 	for (i = 0; i < nb_dimms_per_channel; i++) {
777 		if ((rank >= dimmpp[i]->start_rank) &&
778 		    (rank < dimmpp[i]->start_rank + dimmpp[i]->nranks)) {
779 			return (i);
780 		}
781 	}
782 	return (-1);
783 }
784 
785 static void
786 nb_ddr2_dimms_init(find_dimm_label_t *label_function)
787 {
788 	int i, j;
789 	int start_rank;
790 	uint32_t spcpc;
791 	uint8_t spcps;
792 	nb_dimm_t **dimmpp;
793 
794 	nb_dimm_slots = nb_number_memory_controllers * nb_channels_per_branch *
795 	    nb_dimms_per_channel;
796 	nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
797 	    nb_dimm_slots, KM_SLEEP);
798 	dimmpp = nb_dimms;
799 	nb_mode = NB_MEMORY_NORMAL;
800 	for (i = 0; i < nb_number_memory_controllers; i++) {
801 		if (nb_mode == NB_MEMORY_NORMAL) {
802 			spcpc = SPCPC_RD(i);
803 			spcps = SPCPS_RD(i);
804 			if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
805 			    (spcps & SPCPS_SPARE_DEPLOYED) != 0)
806 				nb_mode = NB_MEMORY_SPARE_RANK;
807 			spare_rank[i] = SPCPC_SPRANK(spcpc);
808 		}
809 
810 		/* The 1st dimm of a channel starts at rank 0 */
811 		start_rank = 0;
812 
813 		for (j = 0; j < nb_dimms_per_channel; j++) {
814 			dimmpp[j] = nb_ddr2_dimm_init(i, j, start_rank);
815 			if (dimmpp[j]) {
816 				nb_ndimm ++;
817 				if (label_function) {
818 					label_function->label_function(
819 					    (i * nb_dimms_per_channel) + j,
820 					    dimmpp[j]->label,
821 					    sizeof (dimmpp[j]->label));
822 				}
823 				start_rank += dimmpp[j]->nranks;
824 				/*
825 				 * add an extra rank because
826 				 * single-ranked dimm still takes on two ranks.
827 				 */
828 				if (dimmpp[j]->nranks & 0x1)
829 					start_rank++;
830 				}
831 		}
832 		dimmpp += nb_dimms_per_channel;
833 	}
834 
835 	/*
836 	 * single channel is supported.
837 	 */
838 	if (nb_ndimm > 0 && nb_ndimm <= nb_dimms_per_channel) {
839 		nb_mode = NB_MEMORY_SINGLE_CHANNEL;
840 	}
841 }
842 
843 static void
844 nb_fbd_dimms_init(find_dimm_label_t *label_function)
845 {
846 	int i, j, k, l;
847 	uint16_t mtr;
848 	uint32_t mc, mca;
849 	uint32_t spcpc;
850 	uint8_t spcps;
851 	nb_dimm_t **dimmpp;
852 
853 	mca = MCA_RD();
854 	mc = MC_RD();
855 	if (mca & MCA_SCHDIMM)  /* single-channel mode */
856 		nb_mode = NB_MEMORY_SINGLE_CHANNEL;
857 	else if ((mc & MC_MIRROR) != 0) /* mirror mode */
858 		nb_mode = NB_MEMORY_MIRROR;
859 	else
860 		nb_mode = NB_MEMORY_NORMAL;
861 	nb_dimm_slots = nb_number_memory_controllers * 2 * nb_dimms_per_channel;
862 	nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
863 	    nb_dimm_slots, KM_SLEEP);
864 	dimmpp = nb_dimms;
865 	for (i = 0; i < nb_number_memory_controllers; i++) {
866 		if (nb_mode == NB_MEMORY_NORMAL) {
867 			spcpc = SPCPC_RD(i);
868 			spcps = SPCPS_RD(i);
869 			if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
870 			    (spcps & SPCPS_SPARE_DEPLOYED) != 0)
871 				nb_mode = NB_MEMORY_SPARE_RANK;
872 			spare_rank[i] = SPCPC_SPRANK(spcpc);
873 		}
874 		for (j = 0; j < nb_dimms_per_channel; j++) {
875 			mtr = MTR_RD(i, j);
876 			k = i * 2;
877 			dimmpp[j] = nb_fbd_dimm_init(k, j, mtr);
878 			if (dimmpp[j]) {
879 				nb_ndimm ++;
880 				if (label_function) {
881 					label_function->label_function(
882 					    (k * nb_dimms_per_channel) + j,
883 					    dimmpp[j]->label,
884 					    sizeof (dimmpp[j]->label));
885 				}
886 			}
887 			dimmpp[j + nb_dimms_per_channel] =
888 			    nb_fbd_dimm_init(k + 1, j, mtr);
889 			l = j + nb_dimms_per_channel;
890 			if (dimmpp[l]) {
891 				if (label_function) {
892 					label_function->label_function(
893 					    (k * nb_dimms_per_channel) + l,
894 					    dimmpp[l]->label,
895 					    sizeof (dimmpp[l]->label));
896 				}
897 				nb_ndimm ++;
898 			}
899 		}
900 		dimmpp += nb_dimms_per_channel * 2;
901 	}
902 }
903 
904 static void
905 nb_dimms_init(find_dimm_label_t *label_function)
906 {
907 	if (nb_chipset == INTEL_NB_5100)
908 		nb_ddr2_dimms_init(label_function);
909 	else
910 		nb_fbd_dimms_init(label_function);
911 
912 	if (label_function == NULL)
913 		nb_smbios();
914 }
915 
916 /* Setup the ESI port registers to enable SERR for southbridge */
917 static void
918 nb_pex_init()
919 {
920 	int i = 0; /* ESI port */
921 	uint16_t regw;
922 
923 	emask_uncor_pex[i] = EMASK_UNCOR_PEX_RD(i);
924 	emask_cor_pex[i] = EMASK_COR_PEX_RD(i);
925 	emask_rp_pex[i] = EMASK_RP_PEX_RD(i);
926 	docmd_pex[i] = PEX_ERR_DOCMD_RD(i);
927 	uncerrsev[i] = UNCERRSEV_RD(i);
928 
929 	if (nb5000_reset_uncor_pex)
930 		EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
931 	if (nb5000_reset_cor_pex)
932 		EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
933 	if (nb_chipset == INTEL_NB_5400) {
934 		/* disable masking of ERR pins used by DOCMD */
935 		PEX_ERR_PIN_MASK_WR(i, 0x10);
936 	}
937 
938 	/* RP error message (CE/NFE/FE) detect mask */
939 	EMASK_RP_PEX_WR(i, nb5000_rp_pex);
940 
941 	/* Command Register - Enable SERR */
942 	regw = nb_pci_getw(0, i, 0, PCI_CONF_COMM, 0);
943 	nb_pci_putw(0, i, 0, PCI_CONF_COMM,
944 	    regw | PCI_COMM_SERR_ENABLE);
945 
946 	/* Root Control Register - SERR on NFE/FE */
947 	PEXROOTCTL_WR(i, PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
948 	    PCIE_ROOTCTL_SYS_ERR_ON_FE_EN);
949 
950 	/* AER UE Mask - Mask UR */
951 	UNCERRMSK_WR(i, PCIE_AER_UCE_UR);
952 }
953 
954 static void
955 nb_pex_fini()
956 {
957 	int i = 0; /* ESI port */
958 
959 	EMASK_UNCOR_PEX_WR(i, emask_uncor_pex[i]);
960 	EMASK_COR_PEX_WR(i, emask_cor_pex[i]);
961 	EMASK_RP_PEX_WR(i, emask_rp_pex[i]);
962 	PEX_ERR_DOCMD_WR(i, docmd_pex[i]);
963 
964 	if (nb5000_reset_uncor_pex)
965 		EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
966 	if (nb5000_reset_cor_pex)
967 		EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
968 }
969 
970 void
971 nb_int_init()
972 {
973 	uint32_t err0_int;
974 	uint32_t err1_int;
975 	uint32_t err2_int;
976 	uint32_t mcerr_int;
977 	uint32_t emask_int;
978 	uint32_t nb_mask_bios_int;
979 	uint32_t nb_mask_poll_int;
980 	uint16_t stepping;
981 
982 	if (nb_chipset == INTEL_NB_5100) {
983 		nb_mask_bios_int = nb5100_mask_bios_int;
984 		nb_mask_poll_int = nb5100_mask_poll_int;
985 	} else {
986 		nb_mask_bios_int = nb5000_mask_bios_int;
987 		nb_mask_poll_int = nb5000_mask_poll_int;
988 	}
989 	err0_int = ERR0_INT_RD();
990 	err1_int = ERR1_INT_RD();
991 	err2_int = ERR2_INT_RD();
992 	mcerr_int = MCERR_INT_RD();
993 	emask_int = EMASK_INT_RD();
994 
995 	nb_err0_int = err0_int;
996 	nb_err1_int = err1_int;
997 	nb_err2_int = err2_int;
998 	nb_mcerr_int = mcerr_int;
999 	nb_emask_int = emask_int;
1000 
1001 	ERR0_INT_WR(ERR_INT_ALL);
1002 	ERR1_INT_WR(ERR_INT_ALL);
1003 	ERR2_INT_WR(ERR_INT_ALL);
1004 	MCERR_INT_WR(ERR_INT_ALL);
1005 	EMASK_INT_WR(ERR_INT_ALL);
1006 
1007 	mcerr_int &= ~nb_mask_bios_int;
1008 	mcerr_int |= nb_mask_bios_int & (~err0_int | ~err1_int | ~err2_int);
1009 	mcerr_int |= nb_mask_poll_int;
1010 	err0_int |= nb_mask_poll_int;
1011 	err1_int |= nb_mask_poll_int;
1012 	err2_int |= nb_mask_poll_int;
1013 
1014 	l_mcerr_int = mcerr_int;
1015 	ERR0_INT_WR(err0_int);
1016 	ERR1_INT_WR(err1_int);
1017 	ERR2_INT_WR(err2_int);
1018 	MCERR_INT_WR(mcerr_int);
1019 	if (nb5000_reset_emask_int) {
1020 		if (nb_chipset == INTEL_NB_7300) {
1021 			stepping = NB5000_STEPPING();
1022 			if (stepping == 0)
1023 				EMASK_5000_INT_WR(nb7300_emask_int_step0);
1024 			else
1025 				EMASK_5000_INT_WR(nb7300_emask_int);
1026 		} else if (nb_chipset == INTEL_NB_5400) {
1027 			EMASK_5400_INT_WR(nb5400_emask_int |
1028 			    (emask_int & EMASK_INT_RES));
1029 		} else if (nb_chipset == INTEL_NB_5100) {
1030 			EMASK_5000_INT_WR(nb5100_emask_int);
1031 		} else {
1032 			EMASK_5000_INT_WR(nb5000_emask_int);
1033 		}
1034 	} else {
1035 		EMASK_INT_WR(nb_emask_int);
1036 	}
1037 }
1038 
1039 void
1040 nb_int_fini()
1041 {
1042 	ERR0_INT_WR(ERR_INT_ALL);
1043 	ERR1_INT_WR(ERR_INT_ALL);
1044 	ERR2_INT_WR(ERR_INT_ALL);
1045 	MCERR_INT_WR(ERR_INT_ALL);
1046 	EMASK_INT_WR(ERR_INT_ALL);
1047 
1048 	ERR0_INT_WR(nb_err0_int);
1049 	ERR1_INT_WR(nb_err1_int);
1050 	ERR2_INT_WR(nb_err2_int);
1051 	MCERR_INT_WR(nb_mcerr_int);
1052 	EMASK_INT_WR(nb_emask_int);
1053 }
1054 
1055 void
1056 nb_int_mask_mc(uint32_t mc_mask_int)
1057 {
1058 	uint32_t emask_int;
1059 
1060 	emask_int = MCERR_INT_RD();
1061 	if ((emask_int & mc_mask_int) != mc_mask_int) {
1062 		MCERR_INT_WR(emask_int|mc_mask_int);
1063 		nb_mask_mc_set = 1;
1064 	}
1065 }
1066 
1067 static void
1068 nb_fbd_init()
1069 {
1070 	uint32_t err0_fbd;
1071 	uint32_t err1_fbd;
1072 	uint32_t err2_fbd;
1073 	uint32_t mcerr_fbd;
1074 	uint32_t emask_fbd;
1075 	uint32_t emask_bios_fbd;
1076 	uint32_t emask_poll_fbd;
1077 
1078 	err0_fbd = ERR0_FBD_RD();
1079 	err1_fbd = ERR1_FBD_RD();
1080 	err2_fbd = ERR2_FBD_RD();
1081 	mcerr_fbd = MCERR_FBD_RD();
1082 	emask_fbd = EMASK_FBD_RD();
1083 
1084 	nb_err0_fbd = err0_fbd;
1085 	nb_err1_fbd = err1_fbd;
1086 	nb_err2_fbd = err2_fbd;
1087 	nb_mcerr_fbd = mcerr_fbd;
1088 	nb_emask_fbd = emask_fbd;
1089 
1090 	ERR0_FBD_WR(0xffffffff);
1091 	ERR1_FBD_WR(0xffffffff);
1092 	ERR2_FBD_WR(0xffffffff);
1093 	MCERR_FBD_WR(0xffffffff);
1094 	EMASK_FBD_WR(0xffffffff);
1095 
1096 	if (nb_chipset == INTEL_NB_7300) {
1097 		if (nb_mode == NB_MEMORY_MIRROR) {
1098 			/* MCH 7300 errata 34 */
1099 			emask_bios_fbd = nb7300_mask_bios_fbd & ~EMASK_FBD_M23;
1100 			emask_poll_fbd = nb7300_mask_poll_fbd;
1101 			mcerr_fbd |= EMASK_FBD_M23;
1102 		} else {
1103 			emask_bios_fbd = nb7300_mask_bios_fbd;
1104 			emask_poll_fbd = nb7300_mask_poll_fbd;
1105 		}
1106 	} else if (nb_chipset == INTEL_NB_5400) {
1107 		emask_bios_fbd = nb5400_mask_bios_fbd;
1108 		emask_poll_fbd = nb5400_mask_poll_fbd;
1109 	} else {
1110 		emask_bios_fbd = nb5000_mask_bios_fbd;
1111 		emask_poll_fbd = nb5000_mask_poll_fbd;
1112 	}
1113 	mcerr_fbd &= ~emask_bios_fbd;
1114 	mcerr_fbd |= emask_bios_fbd & (~err0_fbd | ~err1_fbd | ~err2_fbd);
1115 	mcerr_fbd |= emask_poll_fbd;
1116 	err0_fbd |= emask_poll_fbd;
1117 	err1_fbd |= emask_poll_fbd;
1118 	err2_fbd |= emask_poll_fbd;
1119 
1120 	l_mcerr_fbd = mcerr_fbd;
1121 	ERR0_FBD_WR(err0_fbd);
1122 	ERR1_FBD_WR(err1_fbd);
1123 	ERR2_FBD_WR(err2_fbd);
1124 	MCERR_FBD_WR(mcerr_fbd);
1125 	if (nb5000_reset_emask_fbd) {
1126 		if (nb_chipset == INTEL_NB_5400)
1127 			EMASK_FBD_WR(nb5400_emask_fbd);
1128 		else
1129 			EMASK_FBD_WR(nb5000_emask_fbd);
1130 	} else {
1131 		EMASK_FBD_WR(nb_emask_fbd);
1132 	}
1133 }
1134 
1135 void
1136 nb_fbd_mask_mc(uint32_t mc_mask_fbd)
1137 {
1138 	uint32_t emask_fbd;
1139 
1140 	emask_fbd = MCERR_FBD_RD();
1141 	if ((emask_fbd & mc_mask_fbd) != mc_mask_fbd) {
1142 		MCERR_FBD_WR(emask_fbd|mc_mask_fbd);
1143 		nb_mask_mc_set = 1;
1144 	}
1145 }
1146 
1147 static void
1148 nb_fbd_fini()
1149 {
1150 	ERR0_FBD_WR(0xffffffff);
1151 	ERR1_FBD_WR(0xffffffff);
1152 	ERR2_FBD_WR(0xffffffff);
1153 	MCERR_FBD_WR(0xffffffff);
1154 	EMASK_FBD_WR(0xffffffff);
1155 
1156 	ERR0_FBD_WR(nb_err0_fbd);
1157 	ERR1_FBD_WR(nb_err1_fbd);
1158 	ERR2_FBD_WR(nb_err2_fbd);
1159 	MCERR_FBD_WR(nb_mcerr_fbd);
1160 	EMASK_FBD_WR(nb_emask_fbd);
1161 }
1162 
1163 static void
1164 nb_mem_init()
1165 {
1166 	uint32_t err0_mem;
1167 	uint32_t err1_mem;
1168 	uint32_t err2_mem;
1169 	uint32_t mcerr_mem;
1170 	uint32_t emask_mem;
1171 	uint32_t emask_poll_mem;
1172 
1173 	err0_mem = ERR0_MEM_RD();
1174 	err1_mem = ERR1_MEM_RD();
1175 	err2_mem = ERR2_MEM_RD();
1176 	mcerr_mem = MCERR_MEM_RD();
1177 	emask_mem = EMASK_MEM_RD();
1178 
1179 	nb_err0_mem = err0_mem;
1180 	nb_err1_mem = err1_mem;
1181 	nb_err2_mem = err2_mem;
1182 	nb_mcerr_mem = mcerr_mem;
1183 	nb_emask_mem = emask_mem;
1184 
1185 	ERR0_MEM_WR(0xffffffff);
1186 	ERR1_MEM_WR(0xffffffff);
1187 	ERR2_MEM_WR(0xffffffff);
1188 	MCERR_MEM_WR(0xffffffff);
1189 	EMASK_MEM_WR(0xffffffff);
1190 
1191 	emask_poll_mem = nb5100_mask_poll_mem;
1192 	mcerr_mem |= emask_poll_mem;
1193 	err0_mem |= emask_poll_mem;
1194 	err1_mem |= emask_poll_mem;
1195 	err2_mem |= emask_poll_mem;
1196 
1197 	l_mcerr_mem = mcerr_mem;
1198 	ERR0_MEM_WR(err0_mem);
1199 	ERR1_MEM_WR(err1_mem);
1200 	ERR2_MEM_WR(err2_mem);
1201 	MCERR_MEM_WR(mcerr_mem);
1202 	if (nb5100_reset_emask_mem) {
1203 		EMASK_MEM_WR(~nb5100_mask_poll_mem);
1204 	} else {
1205 		EMASK_MEM_WR(nb_emask_mem);
1206 	}
1207 }
1208 
1209 void
1210 nb_mem_mask_mc(uint32_t mc_mask_mem)
1211 {
1212 	uint32_t emask_mem;
1213 
1214 	emask_mem = MCERR_MEM_RD();
1215 	if ((emask_mem & mc_mask_mem) != mc_mask_mem) {
1216 		MCERR_MEM_WR(emask_mem|mc_mask_mem);
1217 		nb_mask_mc_set = 1;
1218 	}
1219 }
1220 
1221 static void
1222 nb_mem_fini()
1223 {
1224 	ERR0_MEM_WR(0xffffffff);
1225 	ERR1_MEM_WR(0xffffffff);
1226 	ERR2_MEM_WR(0xffffffff);
1227 	MCERR_MEM_WR(0xffffffff);
1228 	EMASK_MEM_WR(0xffffffff);
1229 
1230 	ERR0_MEM_WR(nb_err0_mem);
1231 	ERR1_MEM_WR(nb_err1_mem);
1232 	ERR2_MEM_WR(nb_err2_mem);
1233 	MCERR_MEM_WR(nb_mcerr_mem);
1234 	EMASK_MEM_WR(nb_emask_mem);
1235 }
1236 
1237 static void
1238 nb_fsb_init()
1239 {
1240 	uint16_t err0_fsb;
1241 	uint16_t err1_fsb;
1242 	uint16_t err2_fsb;
1243 	uint16_t mcerr_fsb;
1244 	uint16_t emask_fsb;
1245 
1246 	err0_fsb = ERR0_FSB_RD(0);
1247 	err1_fsb = ERR1_FSB_RD(0);
1248 	err2_fsb = ERR2_FSB_RD(0);
1249 	mcerr_fsb = MCERR_FSB_RD(0);
1250 	emask_fsb = EMASK_FSB_RD(0);
1251 
1252 	ERR0_FSB_WR(0, 0xffff);
1253 	ERR1_FSB_WR(0, 0xffff);
1254 	ERR2_FSB_WR(0, 0xffff);
1255 	MCERR_FSB_WR(0, 0xffff);
1256 	EMASK_FSB_WR(0, 0xffff);
1257 
1258 	ERR0_FSB_WR(1, 0xffff);
1259 	ERR1_FSB_WR(1, 0xffff);
1260 	ERR2_FSB_WR(1, 0xffff);
1261 	MCERR_FSB_WR(1, 0xffff);
1262 	EMASK_FSB_WR(1, 0xffff);
1263 
1264 	nb_err0_fsb = err0_fsb;
1265 	nb_err1_fsb = err1_fsb;
1266 	nb_err2_fsb = err2_fsb;
1267 	nb_mcerr_fsb = mcerr_fsb;
1268 	nb_emask_fsb = emask_fsb;
1269 
1270 	mcerr_fsb &= ~nb5000_mask_bios_fsb;
1271 	mcerr_fsb |= nb5000_mask_bios_fsb & (~err2_fsb | ~err1_fsb | ~err0_fsb);
1272 	mcerr_fsb |= nb5000_mask_poll_fsb;
1273 	err0_fsb |= nb5000_mask_poll_fsb;
1274 	err1_fsb |= nb5000_mask_poll_fsb;
1275 	err2_fsb |= nb5000_mask_poll_fsb;
1276 
1277 	l_mcerr_fsb = mcerr_fsb;
1278 	ERR0_FSB_WR(0, err0_fsb);
1279 	ERR1_FSB_WR(0, err1_fsb);
1280 	ERR2_FSB_WR(0, err2_fsb);
1281 	MCERR_FSB_WR(0, mcerr_fsb);
1282 	if (nb5000_reset_emask_fsb) {
1283 		EMASK_FSB_WR(0, nb5000_emask_fsb);
1284 	} else {
1285 		EMASK_FSB_WR(0, nb_emask_fsb);
1286 	}
1287 
1288 	ERR0_FSB_WR(1, err0_fsb);
1289 	ERR1_FSB_WR(1, err1_fsb);
1290 	ERR2_FSB_WR(1, err2_fsb);
1291 	MCERR_FSB_WR(1, mcerr_fsb);
1292 	if (nb5000_reset_emask_fsb) {
1293 		EMASK_FSB_WR(1, nb5000_emask_fsb);
1294 	} else {
1295 		EMASK_FSB_WR(1, nb_emask_fsb);
1296 	}
1297 
1298 	if (nb_chipset == INTEL_NB_7300) {
1299 		ERR0_FSB_WR(2, 0xffff);
1300 		ERR1_FSB_WR(2, 0xffff);
1301 		ERR2_FSB_WR(2, 0xffff);
1302 		MCERR_FSB_WR(2, 0xffff);
1303 		EMASK_FSB_WR(2, 0xffff);
1304 
1305 		ERR0_FSB_WR(3, 0xffff);
1306 		ERR1_FSB_WR(3, 0xffff);
1307 		ERR2_FSB_WR(3, 0xffff);
1308 		MCERR_FSB_WR(3, 0xffff);
1309 		EMASK_FSB_WR(3, 0xffff);
1310 
1311 		ERR0_FSB_WR(2, err0_fsb);
1312 		ERR1_FSB_WR(2, err1_fsb);
1313 		ERR2_FSB_WR(2, err2_fsb);
1314 		MCERR_FSB_WR(2, mcerr_fsb);
1315 		if (nb5000_reset_emask_fsb) {
1316 			EMASK_FSB_WR(2, nb5000_emask_fsb);
1317 		} else {
1318 			EMASK_FSB_WR(2, nb_emask_fsb);
1319 		}
1320 
1321 		ERR0_FSB_WR(3, err0_fsb);
1322 		ERR1_FSB_WR(3, err1_fsb);
1323 		ERR2_FSB_WR(3, err2_fsb);
1324 		MCERR_FSB_WR(3, mcerr_fsb);
1325 		if (nb5000_reset_emask_fsb) {
1326 			EMASK_FSB_WR(3, nb5000_emask_fsb);
1327 		} else {
1328 			EMASK_FSB_WR(3, nb_emask_fsb);
1329 		}
1330 	}
1331 }
1332 
1333 static void
1334 nb_fsb_fini() {
1335 	ERR0_FSB_WR(0, 0xffff);
1336 	ERR1_FSB_WR(0, 0xffff);
1337 	ERR2_FSB_WR(0, 0xffff);
1338 	MCERR_FSB_WR(0, 0xffff);
1339 	EMASK_FSB_WR(0, 0xffff);
1340 
1341 	ERR0_FSB_WR(0, nb_err0_fsb);
1342 	ERR1_FSB_WR(0, nb_err1_fsb);
1343 	ERR2_FSB_WR(0, nb_err2_fsb);
1344 	MCERR_FSB_WR(0, nb_mcerr_fsb);
1345 	EMASK_FSB_WR(0, nb_emask_fsb);
1346 
1347 	ERR0_FSB_WR(1, 0xffff);
1348 	ERR1_FSB_WR(1, 0xffff);
1349 	ERR2_FSB_WR(1, 0xffff);
1350 	MCERR_FSB_WR(1, 0xffff);
1351 	EMASK_FSB_WR(1, 0xffff);
1352 
1353 	ERR0_FSB_WR(1, nb_err0_fsb);
1354 	ERR1_FSB_WR(1, nb_err1_fsb);
1355 	ERR2_FSB_WR(1, nb_err2_fsb);
1356 	MCERR_FSB_WR(1, nb_mcerr_fsb);
1357 	EMASK_FSB_WR(1, nb_emask_fsb);
1358 
1359 	if (nb_chipset == INTEL_NB_7300) {
1360 		ERR0_FSB_WR(2, 0xffff);
1361 		ERR1_FSB_WR(2, 0xffff);
1362 		ERR2_FSB_WR(2, 0xffff);
1363 		MCERR_FSB_WR(2, 0xffff);
1364 		EMASK_FSB_WR(2, 0xffff);
1365 
1366 		ERR0_FSB_WR(2, nb_err0_fsb);
1367 		ERR1_FSB_WR(2, nb_err1_fsb);
1368 		ERR2_FSB_WR(2, nb_err2_fsb);
1369 		MCERR_FSB_WR(2, nb_mcerr_fsb);
1370 		EMASK_FSB_WR(2, nb_emask_fsb);
1371 
1372 		ERR0_FSB_WR(3, 0xffff);
1373 		ERR1_FSB_WR(3, 0xffff);
1374 		ERR2_FSB_WR(3, 0xffff);
1375 		MCERR_FSB_WR(3, 0xffff);
1376 		EMASK_FSB_WR(3, 0xffff);
1377 
1378 		ERR0_FSB_WR(3, nb_err0_fsb);
1379 		ERR1_FSB_WR(3, nb_err1_fsb);
1380 		ERR2_FSB_WR(3, nb_err2_fsb);
1381 		MCERR_FSB_WR(3, nb_mcerr_fsb);
1382 		EMASK_FSB_WR(3, nb_emask_fsb);
1383 	}
1384 }
1385 
1386 void
1387 nb_fsb_mask_mc(int fsb, uint16_t mc_mask_fsb)
1388 {
1389 	uint16_t emask_fsb;
1390 
1391 	emask_fsb = MCERR_FSB_RD(fsb);
1392 	if ((emask_fsb & mc_mask_fsb) != mc_mask_fsb) {
1393 		MCERR_FSB_WR(fsb, emask_fsb|mc_mask_fsb|EMASK_FBD_RES);
1394 		nb_mask_mc_set = 1;
1395 	}
1396 }
1397 
1398 static void
1399 nb_thr_init()
1400 {
1401 	uint16_t err0_thr;
1402 	uint16_t err1_thr;
1403 	uint16_t err2_thr;
1404 	uint16_t mcerr_thr;
1405 	uint16_t emask_thr;
1406 
1407 	if (nb_chipset == INTEL_NB_5400) {
1408 		err0_thr = ERR0_THR_RD(0);
1409 		err1_thr = ERR1_THR_RD(0);
1410 		err2_thr = ERR2_THR_RD(0);
1411 		mcerr_thr = MCERR_THR_RD(0);
1412 		emask_thr = EMASK_THR_RD(0);
1413 
1414 		ERR0_THR_WR(0xffff);
1415 		ERR1_THR_WR(0xffff);
1416 		ERR2_THR_WR(0xffff);
1417 		MCERR_THR_WR(0xffff);
1418 		EMASK_THR_WR(0xffff);
1419 
1420 		nb_err0_thr = err0_thr;
1421 		nb_err1_thr = err1_thr;
1422 		nb_err2_thr = err2_thr;
1423 		nb_mcerr_thr = mcerr_thr;
1424 		nb_emask_thr = emask_thr;
1425 
1426 		mcerr_thr &= ~nb_mask_bios_thr;
1427 		mcerr_thr |= nb_mask_bios_thr &
1428 		    (~err2_thr | ~err1_thr | ~err0_thr);
1429 		mcerr_thr |= nb_mask_poll_thr;
1430 		err0_thr |= nb_mask_poll_thr;
1431 		err1_thr |= nb_mask_poll_thr;
1432 		err2_thr |= nb_mask_poll_thr;
1433 
1434 		l_mcerr_thr = mcerr_thr;
1435 		ERR0_THR_WR(err0_thr);
1436 		ERR1_THR_WR(err1_thr);
1437 		ERR2_THR_WR(err2_thr);
1438 		MCERR_THR_WR(mcerr_thr);
1439 		EMASK_THR_WR(nb_emask_thr);
1440 	}
1441 }
1442 
1443 static void
1444 nb_thr_fini()
1445 {
1446 	if (nb_chipset == INTEL_NB_5400) {
1447 		ERR0_THR_WR(0xffff);
1448 		ERR1_THR_WR(0xffff);
1449 		ERR2_THR_WR(0xffff);
1450 		MCERR_THR_WR(0xffff);
1451 		EMASK_THR_WR(0xffff);
1452 
1453 		ERR0_THR_WR(nb_err0_thr);
1454 		ERR1_THR_WR(nb_err1_thr);
1455 		ERR2_THR_WR(nb_err2_thr);
1456 		MCERR_THR_WR(nb_mcerr_thr);
1457 		EMASK_THR_WR(nb_emask_thr);
1458 	}
1459 }
1460 
1461 void
1462 nb_thr_mask_mc(uint16_t mc_mask_thr)
1463 {
1464 	uint16_t emask_thr;
1465 
1466 	emask_thr = MCERR_THR_RD(0);
1467 	if ((emask_thr & mc_mask_thr) != mc_mask_thr) {
1468 		MCERR_THR_WR(emask_thr|mc_mask_thr);
1469 		nb_mask_mc_set = 1;
1470 	}
1471 }
1472 
1473 void
1474 nb_mask_mc_reset()
1475 {
1476 	if (nb_chipset == INTEL_NB_5100)
1477 		MCERR_MEM_WR(l_mcerr_mem);
1478 	else
1479 		MCERR_FBD_WR(l_mcerr_fbd);
1480 	MCERR_INT_WR(l_mcerr_int);
1481 	MCERR_FSB_WR(0, l_mcerr_fsb);
1482 	MCERR_FSB_WR(1, l_mcerr_fsb);
1483 	if (nb_chipset == INTEL_NB_7300) {
1484 		MCERR_FSB_WR(2, l_mcerr_fsb);
1485 		MCERR_FSB_WR(3, l_mcerr_fsb);
1486 	}
1487 	if (nb_chipset == INTEL_NB_5400) {
1488 		MCERR_THR_WR(l_mcerr_thr);
1489 	}
1490 }
1491 
1492 int
1493 nb_dev_init()
1494 {
1495 	find_dimm_label_t *label_function_p;
1496 
1497 	label_function_p = find_dimms_per_channel();
1498 	mutex_init(&nb_mutex, NULL, MUTEX_DRIVER, NULL);
1499 	nb_queue = errorq_create("nb_queue", nb_drain, NULL, NB_MAX_ERRORS,
1500 	    sizeof (nb_logout_t), 1, ERRORQ_VITAL);
1501 	if (nb_queue == NULL) {
1502 		mutex_destroy(&nb_mutex);
1503 		return (EAGAIN);
1504 	}
1505 	nb_int_init();
1506 	nb_thr_init();
1507 	dimm_init();
1508 	nb_dimms_init(label_function_p);
1509 	nb_mc_init();
1510 	nb_pex_init();
1511 	if (nb_chipset == INTEL_NB_5100)
1512 		nb_mem_init();
1513 	else
1514 		nb_fbd_init();
1515 	nb_fsb_init();
1516 	nb_scrubber_enable();
1517 	return (0);
1518 }
1519 
1520 int
1521 nb_init()
1522 {
1523 	/* return ENOTSUP if there is no PCI config space support. */
1524 	if (pci_getl_func == NULL)
1525 		return (ENOTSUP);
1526 
1527 	/* get vendor and device */
1528 	nb_chipset = (*pci_getl_func)(0, 0, 0, PCI_CONF_VENID);
1529 	switch (nb_chipset) {
1530 	default:
1531 		if (nb_5000_memory_controller == 0)
1532 			return (ENOTSUP);
1533 		break;
1534 	case INTEL_NB_7300:
1535 	case INTEL_NB_5000P:
1536 	case INTEL_NB_5000X:
1537 		break;
1538 	case INTEL_NB_5000V:
1539 	case INTEL_NB_5000Z:
1540 		nb_number_memory_controllers = 1;
1541 		break;
1542 	case INTEL_NB_5100:
1543 		nb_channels_per_branch = 1;
1544 		break;
1545 	case INTEL_NB_5400:
1546 	case INTEL_NB_5400A:
1547 	case INTEL_NB_5400B:
1548 		nb_chipset = INTEL_NB_5400;
1549 		break;
1550 	}
1551 	return (0);
1552 }
1553 
1554 void
1555 nb_dev_reinit()
1556 {
1557 	int i, j;
1558 	int nchannels = nb_number_memory_controllers * 2;
1559 	nb_dimm_t **dimmpp;
1560 	nb_dimm_t *dimmp;
1561 	nb_dimm_t **old_nb_dimms;
1562 	int old_nb_dimms_per_channel;
1563 	find_dimm_label_t *label_function_p;
1564 	int dimm_slot = nb_dimm_slots;
1565 
1566 	old_nb_dimms = nb_dimms;
1567 	old_nb_dimms_per_channel = nb_dimms_per_channel;
1568 
1569 	dimm_fini();
1570 	nb_dimms_per_channel = 0;
1571 	label_function_p = find_dimms_per_channel();
1572 	dimm_init();
1573 	nb_dimms_init(label_function_p);
1574 	nb_mc_init();
1575 	nb_pex_init();
1576 	nb_int_init();
1577 	nb_thr_init();
1578 	if (nb_chipset == INTEL_NB_5100)
1579 		nb_mem_init();
1580 	else
1581 		nb_fbd_init();
1582 	nb_fsb_init();
1583 	nb_scrubber_enable();
1584 
1585 	dimmpp = old_nb_dimms;
1586 	for (i = 0; i < nchannels; i++) {
1587 		for (j = 0; j < old_nb_dimms_per_channel; j++) {
1588 			dimmp = *dimmpp;
1589 			if (dimmp) {
1590 				kmem_free(dimmp, sizeof (nb_dimm_t));
1591 				*dimmpp = NULL;
1592 			}
1593 			dimmp++;
1594 		}
1595 	}
1596 	kmem_free(old_nb_dimms, sizeof (nb_dimm_t *) * dimm_slot);
1597 }
1598 
1599 void
1600 nb_dev_unload()
1601 {
1602 	errorq_destroy(nb_queue);
1603 	nb_queue = NULL;
1604 	mutex_destroy(&nb_mutex);
1605 	nb_int_fini();
1606 	nb_thr_fini();
1607 	if (nb_chipset == INTEL_NB_5100)
1608 		nb_mem_fini();
1609 	else
1610 		nb_fbd_fini();
1611 	nb_fsb_fini();
1612 	nb_pex_fini();
1613 	nb_fini();
1614 }
1615 
1616 void
1617 nb_unload()
1618 {
1619 }
1620