1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/cmn_err.h>
29 #include <sys/errno.h>
30 #include <sys/log.h>
31 #include <sys/systm.h>
32 #include <sys/modctl.h>
33 #include <sys/errorq.h>
34 #include <sys/controlregs.h>
35 #include <sys/fm/util.h>
36 #include <sys/fm/protocol.h>
37 #include <sys/sysevent.h>
38 #include <sys/pghw.h>
39 #include <sys/cyclic.h>
40 #include <sys/pci_cfgspace.h>
41 #include <sys/mc_intel.h>
42 #include <sys/smbios.h>
43 #include <sys/pci.h>
44 #include <sys/pcie.h>
45 #include "nb5000.h"
46 #include "nb_log.h"
47 #include "dimm_phys.h"
48 #include "rank.h"
49 
50 int nb_hw_memory_scrub_enable = 1;
51 static int nb_sw_scrub_disabled = 0;
52 
53 int nb_5000_memory_controller = 0;
54 int nb_number_memory_controllers = NB_5000_MAX_MEM_CONTROLLERS;
55 int nb_channels_per_branch = NB_MAX_CHANNELS_PER_BRANCH;
56 int nb_dimms_per_channel = 0;
57 
58 nb_dimm_t **nb_dimms;
59 int nb_ndimm;
60 uint32_t nb_chipset;
61 enum nb_memory_mode nb_mode;
62 bank_select_t nb_banks[NB_MAX_MEM_BRANCH_SELECT];
63 rank_select_t nb_ranks[NB_5000_MAX_MEM_CONTROLLERS][NB_MAX_MEM_RANK_SELECT];
64 uint32_t top_of_low_memory;
65 uint8_t spare_rank[NB_5000_MAX_MEM_CONTROLLERS];
66 
67 extern int nb_no_smbios;
68 
69 errorq_t *nb_queue;
70 kmutex_t nb_mutex;
71 
72 static int nb_dimm_slots;
73 
74 static uint32_t nb_err0_int;
75 static uint32_t nb_err1_int;
76 static uint32_t nb_err2_int;
77 static uint32_t nb_mcerr_int;
78 static uint32_t nb_emask_int;
79 
80 static uint32_t nb_err0_fbd;
81 static uint32_t nb_err1_fbd;
82 static uint32_t nb_err2_fbd;
83 static uint32_t nb_mcerr_fbd;
84 static uint32_t nb_emask_fbd;
85 
86 static uint32_t nb_err0_mem;
87 static uint32_t nb_err1_mem;
88 static uint32_t nb_err2_mem;
89 static uint32_t nb_mcerr_mem;
90 static uint32_t nb_emask_mem;
91 
92 static uint16_t nb_err0_fsb;
93 static uint16_t nb_err1_fsb;
94 static uint16_t nb_err2_fsb;
95 static uint16_t nb_mcerr_fsb;
96 static uint16_t nb_emask_fsb;
97 
98 static uint16_t nb_err0_thr;
99 static uint16_t nb_err1_thr;
100 static uint16_t nb_err2_thr;
101 static uint16_t nb_mcerr_thr;
102 static uint16_t nb_emask_thr;
103 
104 static uint32_t	emask_uncor_pex[NB_PCI_DEV];
105 static uint32_t emask_cor_pex[NB_PCI_DEV];
106 static uint32_t emask_rp_pex[NB_PCI_DEV];
107 static uint32_t docmd_pex[NB_PCI_DEV];
108 static uint32_t uncerrsev[NB_PCI_DEV];
109 
110 static uint32_t l_mcerr_int;
111 static uint32_t l_mcerr_fbd;
112 static uint32_t l_mcerr_mem;
113 static uint16_t l_mcerr_fsb;
114 static uint16_t l_mcerr_thr;
115 
116 uint_t nb5000_emask_fbd = EMASK_5000_FBD_RES;
117 uint_t nb5400_emask_fbd = 0;
118 int nb5000_reset_emask_fbd = 1;
119 uint_t nb5000_mask_poll_fbd = EMASK_FBD_NF;
120 uint_t nb5000_mask_bios_fbd = EMASK_FBD_FATAL;
121 uint_t nb5400_mask_poll_fbd = EMASK_5400_FBD_NF;
122 uint_t nb5400_mask_bios_fbd = EMASK_5400_FBD_FATAL;
123 
124 int nb5100_reset_emask_mem = 1;
125 uint_t nb5100_mask_poll_mem = EMASK_MEM_NF;
126 
127 uint_t nb5000_emask_fsb = 0;
128 int nb5000_reset_emask_fsb = 1;
129 uint_t nb5000_mask_poll_fsb = EMASK_FSB_NF;
130 uint_t nb5000_mask_bios_fsb = EMASK_FSB_FATAL;
131 
132 uint_t nb5400_emask_int = EMASK_INT_5400;
133 
134 uint_t nb7300_emask_int = EMASK_INT_7300;
135 uint_t nb7300_emask_int_step0 = EMASK_INT_7300_STEP_0;
136 uint_t nb5000_emask_int = EMASK_INT_5000;
137 int nb5000_reset_emask_int = 1;
138 uint_t nb5000_mask_poll_int = EMASK_INT_NF;
139 uint_t nb5000_mask_bios_int = EMASK_INT_FATAL;
140 
141 uint_t nb_mask_poll_thr = EMASK_THR_NF;
142 uint_t nb_mask_bios_thr = EMASK_THR_FATAL;
143 
144 int nb5000_reset_uncor_pex = 0;
145 uint_t nb5000_mask_uncor_pex = 0;
146 int nb5000_reset_cor_pex = 0;
147 uint_t nb5000_mask_cor_pex = 0xffffffff;
148 uint32_t nb5000_rp_pex = 0x1;
149 
150 int nb_mask_mc_set;
151 
152 typedef struct find_dimm_label {
153 	void (*label_function)(int, char *, int);
154 } find_dimm_label_t;
155 
156 static void x8450_dimm_label(int, char *, int);
157 static void cp3250_dimm_label(int, char *, int);
158 
159 static struct platform_label {
160 	const char *sys_vendor;		/* SMB_TYPE_SYSTEM vendor prefix */
161 	const char *sys_product;	/* SMB_TYPE_SYSTEM product prefix */
162 	find_dimm_label_t dimm_label;
163 	int dimms_per_channel;
164 } platform_label[] = {
165 	{ "SUN MICROSYSTEMS", "SUN BLADE X8450 SERVER MODULE",
166 	    x8450_dimm_label, 8 },
167 	{ "MiTAC,Shunde", "CP3250", cp3250_dimm_label, 0 },
168 	{ NULL, NULL, NULL, 0 }
169 };
170 
171 static unsigned short
172 read_spd(int bus)
173 {
174 	unsigned short rt = 0;
175 	int branch = bus >> 1;
176 	int channel = bus & 1;
177 
178 	rt = SPD_RD(branch, channel);
179 
180 	return (rt);
181 }
182 
183 static void
184 write_spdcmd(int bus, uint32_t val)
185 {
186 	int branch = bus >> 1;
187 	int channel = bus & 1;
188 	SPDCMD_WR(branch, channel, val);
189 }
190 
191 static int
192 read_spd_eeprom(int bus, int slave, int addr)
193 {
194 	int retry = 4;
195 	int wait;
196 	int spd;
197 	uint32_t cmd;
198 
199 	for (;;) {
200 		wait = 1000;
201 		for (;;) {
202 			spd = read_spd(bus);
203 			if ((spd & SPD_BUSY) == 0)
204 				break;
205 			if (--wait == 0)
206 				return (-1);
207 			drv_usecwait(10);
208 		}
209 		cmd = SPD_EEPROM_WRITE | SPD_ADDR(slave, addr);
210 		write_spdcmd(bus, cmd);
211 		wait = 1000;
212 		for (;;) {
213 			spd = read_spd(bus);
214 			if ((spd & SPD_BUSY) == 0)
215 				break;
216 			if (--wait == 0) {
217 				spd = SPD_BUS_ERROR;
218 				break;
219 			}
220 			drv_usecwait(10);
221 		}
222 		while ((spd & SPD_BUS_ERROR) == 0 &&
223 		    (spd & (SPD_READ_DATA_VALID|SPD_BUSY)) !=
224 		    SPD_READ_DATA_VALID) {
225 			spd = read_spd(bus);
226 			if (--wait == 0)
227 				return (-1);
228 		}
229 		if ((spd & SPD_BUS_ERROR) == 0)
230 			break;
231 		if (--retry == 0)
232 			return (-1);
233 	}
234 	return (spd & 0xff);
235 }
236 
237 static void
238 nb_fini()
239 {
240 	int i, j;
241 	int nchannels = nb_number_memory_controllers * nb_channels_per_branch;
242 	nb_dimm_t **dimmpp;
243 	nb_dimm_t *dimmp;
244 
245 	dimmpp = nb_dimms;
246 	for (i = 0; i < nchannels; i++) {
247 		for (j = 0; j < nb_dimms_per_channel; j++) {
248 			dimmp = *dimmpp;
249 			if (dimmp) {
250 				kmem_free(dimmp, sizeof (nb_dimm_t));
251 				*dimmpp = NULL;
252 			}
253 			dimmp++;
254 		}
255 	}
256 	kmem_free(nb_dimms, sizeof (nb_dimm_t *) * nb_dimm_slots);
257 	nb_dimms = NULL;
258 	dimm_fini();
259 }
260 
261 void
262 nb_scrubber_enable()
263 {
264 	uint32_t mc;
265 
266 	if (!nb_hw_memory_scrub_enable)
267 		return;
268 
269 	mc = MC_RD();
270 	if ((mc & MC_MIRROR) != 0) /* mirror mode */
271 		mc |= MC_PATROL_SCRUB;
272 	else
273 		mc |= MC_PATROL_SCRUB|MC_DEMAND_SCRUB;
274 	MC_WR(mc);
275 
276 	if (nb_sw_scrub_disabled++)
277 		cmi_mc_sw_memscrub_disable();
278 }
279 
280 static void
281 fbd_eeprom(int channel, int dimm, nb_dimm_t *dp)
282 {
283 	int i, t;
284 	int spd_sz;
285 
286 	t = read_spd_eeprom(channel, dimm, 0) & 0xf;
287 	if (t == 1)
288 		spd_sz = 128;
289 	else if (t == 2)
290 		spd_sz = 176;
291 	else
292 		spd_sz = 256;
293 	dp->manufacture_id = read_spd_eeprom(channel, dimm, 117) |
294 	    (read_spd_eeprom(channel, dimm, 118) << 8);
295 	dp->manufacture_location = read_spd_eeprom(channel, dimm, 119);
296 	dp->serial_number =
297 	    (read_spd_eeprom(channel, dimm, 122) << 24) |
298 	    (read_spd_eeprom(channel, dimm, 123) << 16) |
299 	    (read_spd_eeprom(channel, dimm, 124) << 8) |
300 	    read_spd_eeprom(channel, dimm, 125);
301 	t = read_spd_eeprom(channel, dimm, 121);
302 	dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
303 	dp->manufacture_year = read_spd_eeprom(channel, dimm, 120);
304 	if (spd_sz > 128) {
305 		for (i = 0; i < sizeof (dp->part_number); i++) {
306 			dp->part_number[i] =
307 			    read_spd_eeprom(channel, dimm, 128 + i);
308 		}
309 		for (i = 0; i < sizeof (dp->revision); i++) {
310 			dp->revision[i] =
311 			    read_spd_eeprom(channel, dimm, 146 + i);
312 		}
313 	}
314 }
315 
316 /* read the manR of the DDR2 dimm */
317 static void
318 ddr2_eeprom(int channel, int dimm, nb_dimm_t *dp)
319 {
320 	int i, t;
321 	int slave;
322 
323 	slave = channel & 0x1 ? dimm + 4 : dimm;
324 
325 	/* byte[3]: number of row addresses */
326 	dp->nrow = read_spd_eeprom(channel, slave, 3) & 0x1f;
327 
328 	/* byte[4]: number of column addresses */
329 	dp->ncolumn = read_spd_eeprom(channel, slave, 4) & 0xf;
330 
331 	/* byte[5]: numranks; 0 means one rank */
332 	dp->nranks = (read_spd_eeprom(channel, slave, 5) & 0x3) + 1;
333 
334 	/* byte[6]: data width */
335 	dp->width = (read_spd_eeprom(channel, slave, 6) >> 5) << 2;
336 
337 	/* byte[17]: number of banks */
338 	dp->nbanks = read_spd_eeprom(channel, slave, 17);
339 
340 	dp->dimm_size = DIMMSIZE(dp->nrow, dp->ncolumn, dp->nranks, dp->nbanks,
341 	    dp->width);
342 
343 	/* manufacture-id - byte[64-65] */
344 	dp->manufacture_id = read_spd_eeprom(channel, slave, 64) |
345 	    (read_spd_eeprom(channel, dimm, 65) << 8);
346 
347 	/* location - byte[72] */
348 	dp->manufacture_location = read_spd_eeprom(channel, slave, 72);
349 
350 	/* serial number - byte[95-98] */
351 	dp->serial_number =
352 	    (read_spd_eeprom(channel, slave, 98) << 24) |
353 	    (read_spd_eeprom(channel, slave, 97) << 16) |
354 	    (read_spd_eeprom(channel, slave, 96) << 8) |
355 	    read_spd_eeprom(channel, slave, 95);
356 
357 	/* week - byte[94] */
358 	t = read_spd_eeprom(channel, slave, 94);
359 	dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
360 	/* week - byte[93] */
361 	t = read_spd_eeprom(channel, slave, 93);
362 	dp->manufacture_year = (t >> 4) * 10 + (t & 0xf) + 2000;
363 
364 	/* part number - byte[73-81] */
365 	for (i = 0; i < 8; i++) {
366 		dp->part_number[i] = read_spd_eeprom(channel, slave, 73 + i);
367 	}
368 
369 	/* revision - byte[91-92] */
370 	for (i = 0; i < 2; i++) {
371 		dp->revision[i] = read_spd_eeprom(channel, slave, 91 + i);
372 	}
373 }
374 
375 static boolean_t
376 nb_dimm_present(int channel, int dimm)
377 {
378 	boolean_t rc = B_FALSE;
379 
380 	if (nb_chipset == INTEL_NB_5100) {
381 		int t, slave;
382 		slave = channel & 0x1 ? dimm + 4 : dimm;
383 		/* read the type field from the dimm and check for DDR2 type */
384 		if ((t = read_spd_eeprom(channel, slave, SPD_MEM_TYPE)) == -1)
385 			return (B_FALSE);
386 		rc = (t & 0xf) == SPD_DDR2;
387 	} else {
388 		rc = MTR_PRESENT(MTR_RD(channel, dimm)) != 0;
389 	}
390 
391 	return (rc);
392 }
393 
394 static nb_dimm_t *
395 nb_ddr2_dimm_init(int channel, int dimm, int start_rank)
396 {
397 	nb_dimm_t *dp;
398 
399 	if (nb_dimm_present(channel, dimm) == B_FALSE)
400 		return (NULL);
401 
402 	dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
403 
404 	ddr2_eeprom(channel, dimm, dp);
405 
406 	/* The 1st rank of the dimm takes on this value */
407 	dp->start_rank = (uint8_t)start_rank;
408 
409 	dp->mtr_present = 1;
410 
411 	return (dp);
412 }
413 
414 static nb_dimm_t *
415 nb_fbd_dimm_init(int channel, int dimm, uint16_t mtr)
416 {
417 	nb_dimm_t *dp;
418 	int t;
419 
420 	if (MTR_PRESENT(mtr) == 0)
421 		return (NULL);
422 	t = read_spd_eeprom(channel, dimm, SPD_MEM_TYPE) & 0xf;
423 
424 	/* check for the dimm type */
425 	if (t != SPD_FBDIMM)
426 		return (NULL);
427 
428 	dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
429 
430 	fbd_eeprom(channel, dimm, dp);
431 
432 	dp->mtr_present = MTR_PRESENT(mtr);
433 	dp->start_rank = dimm << 1;
434 	dp->nranks = MTR_NUMRANK(mtr);
435 	dp->nbanks = MTR_NUMBANK(mtr);
436 	dp->ncolumn = MTR_NUMCOL(mtr);
437 	dp->nrow = MTR_NUMROW(mtr);
438 	dp->width = MTR_WIDTH(mtr);
439 	dp->dimm_size = MTR_DIMMSIZE(mtr);
440 
441 	return (dp);
442 }
443 
444 static uint64_t
445 mc_range(int controller, uint64_t base)
446 {
447 	int i;
448 	uint64_t limit = 0;
449 
450 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
451 		if (nb_banks[i].way[controller] && base >= nb_banks[i].base &&
452 		    base < nb_banks[i].limit) {
453 			limit = nb_banks[i].limit;
454 			if (base <= top_of_low_memory &&
455 			    limit > top_of_low_memory) {
456 				limit -= TLOW_MAX - top_of_low_memory;
457 			}
458 			if (nb_banks[i].way[0] && nb_banks[i].way[1] &&
459 			    nb_mode != NB_MEMORY_MIRROR) {
460 				limit = limit / 2;
461 			}
462 		}
463 	}
464 	return (limit);
465 }
466 
467 void
468 nb_mc_init()
469 {
470 	uint16_t tolm;
471 	uint16_t mir;
472 	uint32_t hole_base;
473 	uint32_t hole_size;
474 	uint32_t dmir;
475 	uint64_t base;
476 	uint64_t limit;
477 	uint8_t way0, way1, rank0, rank1, rank2, rank3, branch_interleave;
478 	int i, j, k;
479 	uint8_t interleave;
480 
481 	base = 0;
482 	tolm = TOLM_RD();
483 	top_of_low_memory = ((uint32_t)(tolm >> 12) & 0xf) << 28;
484 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
485 		mir = MIR_RD(i);
486 		limit = (uint64_t)(mir >> 4) << 28;
487 		way0 = mir & 1;
488 		way1 = (mir >> 1) & 1;
489 		if (way0 == 0 && way1 == 0) {
490 			way0 = 1;
491 			way1 = 1;
492 		}
493 		if (limit > top_of_low_memory)
494 			limit += TLOW_MAX - top_of_low_memory;
495 		nb_banks[i].base = base;
496 		nb_banks[i].limit = limit;
497 		nb_banks[i].way[0] = way0;
498 		nb_banks[i].way[1] = way1;
499 		base = limit;
500 	}
501 	for (i = 0; i < nb_number_memory_controllers; i++) {
502 		base = 0;
503 
504 		for (j = 0; j < NB_MEM_RANK_SELECT; j++) {
505 			dmir = DMIR_RD(i, j);
506 			limit = ((uint64_t)(dmir >> 16) & 0xff) << 28;
507 			if (limit == 0) {
508 				limit = mc_range(i, base);
509 			}
510 			branch_interleave = 0;
511 			hole_base = 0;
512 			hole_size = 0;
513 			DMIR_RANKS(dmir, rank0, rank1, rank2, rank3);
514 			if (rank0 == rank1)
515 				interleave = 1;
516 			else if (rank0 == rank2)
517 				interleave = 2;
518 			else
519 				interleave = 4;
520 			if (nb_mode != NB_MEMORY_MIRROR &&
521 			    nb_mode != NB_MEMORY_SINGLE_CHANNEL) {
522 				for (k = 0; k < NB_MEM_BRANCH_SELECT; k++) {
523 					if (base >= nb_banks[k].base &&
524 					    base < nb_banks[k].limit) {
525 						if (nb_banks[i].way[0] &&
526 						    nb_banks[i].way[1]) {
527 							interleave *= 2;
528 							limit *= 2;
529 							branch_interleave = 1;
530 						}
531 						break;
532 					}
533 				}
534 			}
535 			if (base < top_of_low_memory &&
536 			    limit > top_of_low_memory) {
537 				hole_base = top_of_low_memory;
538 				hole_size = TLOW_MAX - top_of_low_memory;
539 				limit += hole_size;
540 			} else if (base > top_of_low_memory) {
541 				limit += TLOW_MAX - top_of_low_memory;
542 			}
543 			nb_ranks[i][j].base = base;
544 			nb_ranks[i][j].limit = limit;
545 			nb_ranks[i][j].rank[0] = rank0;
546 			nb_ranks[i][j].rank[1] = rank1;
547 			nb_ranks[i][j].rank[2] = rank2;
548 			nb_ranks[i][j].rank[3] = rank3;
549 			nb_ranks[i][j].interleave = interleave;
550 			nb_ranks[i][j].branch_interleave = branch_interleave;
551 			nb_ranks[i][j].hole_base = hole_base;
552 			nb_ranks[i][j].hole_size = hole_size;
553 			if (limit > base) {
554 				if (rank0 != rank1) {
555 					dimm_add_rank(i, rank1,
556 					    branch_interleave, 1, base,
557 					    hole_base, hole_size, interleave,
558 					    limit);
559 					if (rank0 != rank2) {
560 						dimm_add_rank(i, rank2,
561 						    branch_interleave, 2, base,
562 						    hole_base, hole_size,
563 						    interleave, limit);
564 						dimm_add_rank(i, rank3,
565 						    branch_interleave, 3, base,
566 						    hole_base, hole_size,
567 						    interleave, limit);
568 					}
569 				}
570 			}
571 			base = limit;
572 		}
573 	}
574 }
575 
576 void
577 nb_used_spare_rank(int branch, int bad_rank)
578 {
579 	int i;
580 	int j;
581 
582 	for (i = 0; i < NB_MEM_RANK_SELECT; i++) {
583 		for (j = 0; j < NB_RANKS_IN_SELECT; j++) {
584 			if (nb_ranks[branch][i].rank[j] == bad_rank) {
585 				nb_ranks[branch][i].rank[j] =
586 				    spare_rank[branch];
587 				i = NB_MEM_RANK_SELECT;
588 				break;
589 			}
590 		}
591 	}
592 }
593 
594 find_dimm_label_t *
595 find_dimms_per_channel()
596 {
597 	struct platform_label *pl;
598 	smbios_info_t si;
599 	smbios_system_t sy;
600 	id_t id;
601 	int i, j;
602 	find_dimm_label_t *rt = NULL;
603 
604 	if (ksmbios != NULL && nb_no_smbios == 0) {
605 		if ((id = smbios_info_system(ksmbios, &sy)) != SMB_ERR &&
606 		    smbios_info_common(ksmbios, id, &si) != SMB_ERR) {
607 			for (pl = platform_label; pl->sys_vendor; pl++) {
608 				if (strncmp(pl->sys_vendor,
609 				    si.smbi_manufacturer,
610 				    strlen(pl->sys_vendor)) == 0 &&
611 				    strncmp(pl->sys_product, si.smbi_product,
612 				    strlen(pl->sys_product)) == 0) {
613 					nb_dimms_per_channel =
614 					    pl->dimms_per_channel;
615 					rt = &pl->dimm_label;
616 					break;
617 				}
618 			}
619 		}
620 	}
621 	if (nb_dimms_per_channel == 0) {
622 		/*
623 		 * Scan all memory channels if we find a channel which has more
624 		 * dimms then we have seen before set nb_dimms_per_channel to
625 		 * the number of dimms on the channel
626 		 */
627 		for (i = 0; i < nb_number_memory_controllers; i++) {
628 			for (j = nb_dimms_per_channel;
629 			    j < NB_MAX_DIMMS_PER_CHANNEL; j++) {
630 				if (nb_dimm_present(i, j))
631 					nb_dimms_per_channel = j + 1;
632 			}
633 		}
634 	}
635 	return (rt);
636 }
637 
638 struct smb_dimm_rec {
639 	int dimms;
640 	int slots;
641 	int populated;
642 	nb_dimm_t **dimmpp;
643 };
644 
645 static int
646 dimm_label(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
647 {
648 	struct smb_dimm_rec *rp = (struct smb_dimm_rec *)arg;
649 	nb_dimm_t ***dimmpp;
650 	nb_dimm_t *dimmp;
651 	smbios_memdevice_t md;
652 
653 	dimmpp = &rp->dimmpp;
654 	if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
655 		if (*dimmpp >= &nb_dimms[nb_dimm_slots])
656 			return (-1);
657 		dimmp = **dimmpp;
658 		if (smbios_info_memdevice(shp, sp->smbstr_id, &md) == 0 &&
659 		    md.smbmd_dloc != NULL) {
660 			if (md.smbmd_size) {
661 				if (dimmp == NULL &&
662 				    (rp->slots == nb_dimm_slots ||
663 				    rp->dimms < rp->populated)) {
664 					(*dimmpp)++;
665 					return (0);
666 				}
667 				/*
668 				 * if there is no physical dimm for this smbios
669 				 * record it is because this system has less
670 				 * physical slots than the controller supports
671 				 * so skip empty slots to find the slot this
672 				 * smbios record belongs too
673 				 */
674 				while (dimmp == NULL) {
675 					(*dimmpp)++;
676 					if (*dimmpp >= &nb_dimms[nb_dimm_slots])
677 						return (-1);
678 					dimmp = **dimmpp;
679 				}
680 				(void) snprintf(dimmp->label,
681 				    sizeof (dimmp->label), "%s", md.smbmd_dloc);
682 				(*dimmpp)++;
683 			}
684 		}
685 	}
686 	return (0);
687 }
688 
689 static int
690 check_memdevice(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
691 {
692 	struct smb_dimm_rec *rp = (struct smb_dimm_rec *)arg;
693 	smbios_memdevice_t md;
694 
695 	if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
696 		if (smbios_info_memdevice(shp, sp->smbstr_id, &md) == 0) {
697 			rp->slots++;
698 			if (md.smbmd_size) {
699 				rp->populated++;
700 			}
701 		}
702 	}
703 	return (0);
704 }
705 
706 void
707 nb_smbios()
708 {
709 	struct smb_dimm_rec r;
710 	int i;
711 
712 	if (ksmbios != NULL && nb_no_smbios == 0) {
713 		r.dimms = 0;
714 		r.slots = 0;
715 		r.populated = 0;
716 		r.dimmpp = nb_dimms;
717 		for (i = 0; i < nb_dimm_slots; i++) {
718 			if (nb_dimms[i] != NULL)
719 				r.dimms++;
720 		}
721 		(void) smbios_iter(ksmbios, check_memdevice, &r);
722 		(void) smbios_iter(ksmbios, dimm_label, &r);
723 	}
724 }
725 
726 static void
727 x8450_dimm_label(int dimm, char *label, int label_sz)
728 {
729 	int channel = dimm >> 3;
730 
731 	dimm = dimm & 0x7;
732 	(void) snprintf(label, label_sz, "D%d", (dimm * 4) + channel);
733 }
734 
735 /*
736  * CP3250 DIMM labels
737  * Channel   Dimm   Label
738  *       0      0      A0
739  *       1      0      B0
740  *       0      1      A1
741  *       1      1      B1
742  *       0      2      A2
743  *       1      2      B2
744  */
745 static void
746 cp3250_dimm_label(int dimm, char *label, int label_sz)
747 {
748 	int channel = dimm / nb_dimms_per_channel;
749 
750 	dimm = dimm % nb_dimms_per_channel;
751 	(void) snprintf(label, label_sz, "%c%d", channel == 0 ? 'A' : 'B',
752 	    dimm);
753 }
754 
755 /*
756  * Map the rank id to dimm id of a channel
757  * For the 5100 chipset, walk through the dimm list of channel the check if
758  * the given rank id is within the rank range assigned to the dimm.
759  * For other chipsets, the dimm is rank/2.
760  */
761 int
762 nb_rank2dimm(int channel, int rank)
763 {
764 	int i;
765 	nb_dimm_t **dimmpp = nb_dimms;
766 
767 	if (nb_chipset != INTEL_NB_5100)
768 		return (rank >> 1);
769 
770 	dimmpp += channel * nb_dimms_per_channel;
771 	for (i = 0; i < nb_dimms_per_channel; i++) {
772 		if ((rank >= dimmpp[i]->start_rank) &&
773 		    (rank < dimmpp[i]->start_rank + dimmpp[i]->nranks)) {
774 			return (i);
775 		}
776 	}
777 	return (-1);
778 }
779 
780 static void
781 nb_ddr2_dimms_init(find_dimm_label_t *label_function)
782 {
783 	int i, j;
784 	int start_rank;
785 	uint32_t spcpc;
786 	uint8_t spcps;
787 	nb_dimm_t **dimmpp;
788 
789 	nb_dimm_slots = nb_number_memory_controllers * nb_channels_per_branch *
790 	    nb_dimms_per_channel;
791 	nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
792 	    nb_dimm_slots, KM_SLEEP);
793 	dimmpp = nb_dimms;
794 	nb_mode = NB_MEMORY_NORMAL;
795 	for (i = 0; i < nb_number_memory_controllers; i++) {
796 		if (nb_mode == NB_MEMORY_NORMAL) {
797 			spcpc = SPCPC_RD(i);
798 			spcps = SPCPS_RD(i);
799 			if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
800 			    (spcps & SPCPS_SPARE_DEPLOYED) != 0)
801 				nb_mode = NB_MEMORY_SPARE_RANK;
802 			spare_rank[i] = SPCPC_SPRANK(spcpc);
803 		}
804 
805 		/* The 1st dimm of a channel starts at rank 0 */
806 		start_rank = 0;
807 
808 		for (j = 0; j < nb_dimms_per_channel; j++) {
809 			dimmpp[j] = nb_ddr2_dimm_init(i, j, start_rank);
810 			if (dimmpp[j]) {
811 				nb_ndimm ++;
812 				if (label_function) {
813 					label_function->label_function(
814 					    (i * nb_dimms_per_channel) + j,
815 					    dimmpp[j]->label,
816 					    sizeof (dimmpp[j]->label));
817 				}
818 				start_rank += dimmpp[j]->nranks;
819 				/*
820 				 * add an extra rank because
821 				 * single-ranked dimm still takes on two ranks.
822 				 */
823 				if (dimmpp[j]->nranks & 0x1)
824 					start_rank++;
825 				}
826 		}
827 		dimmpp += nb_dimms_per_channel;
828 	}
829 
830 	/*
831 	 * single channel is supported.
832 	 */
833 	if (nb_ndimm > 0 && nb_ndimm <= nb_dimms_per_channel) {
834 		nb_mode = NB_MEMORY_SINGLE_CHANNEL;
835 	}
836 }
837 
838 static void
839 nb_fbd_dimms_init(find_dimm_label_t *label_function)
840 {
841 	int i, j, k, l;
842 	uint16_t mtr;
843 	uint32_t mc, mca;
844 	uint32_t spcpc;
845 	uint8_t spcps;
846 	nb_dimm_t **dimmpp;
847 
848 	mca = MCA_RD();
849 	mc = MC_RD();
850 	if (mca & MCA_SCHDIMM)  /* single-channel mode */
851 		nb_mode = NB_MEMORY_SINGLE_CHANNEL;
852 	else if ((mc & MC_MIRROR) != 0) /* mirror mode */
853 		nb_mode = NB_MEMORY_MIRROR;
854 	else
855 		nb_mode = NB_MEMORY_NORMAL;
856 	nb_dimm_slots = nb_number_memory_controllers * 2 * nb_dimms_per_channel;
857 	nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
858 	    nb_dimm_slots, KM_SLEEP);
859 	dimmpp = nb_dimms;
860 	for (i = 0; i < nb_number_memory_controllers; i++) {
861 		if (nb_mode == NB_MEMORY_NORMAL) {
862 			spcpc = SPCPC_RD(i);
863 			spcps = SPCPS_RD(i);
864 			if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
865 			    (spcps & SPCPS_SPARE_DEPLOYED) != 0)
866 				nb_mode = NB_MEMORY_SPARE_RANK;
867 			spare_rank[i] = SPCPC_SPRANK(spcpc);
868 		}
869 		for (j = 0; j < nb_dimms_per_channel; j++) {
870 			mtr = MTR_RD(i, j);
871 			k = i * 2;
872 			dimmpp[j] = nb_fbd_dimm_init(k, j, mtr);
873 			if (dimmpp[j]) {
874 				nb_ndimm ++;
875 				if (label_function) {
876 					label_function->label_function(
877 					    (k * nb_dimms_per_channel) + j,
878 					    dimmpp[j]->label,
879 					    sizeof (dimmpp[j]->label));
880 				}
881 			}
882 			dimmpp[j + nb_dimms_per_channel] =
883 			    nb_fbd_dimm_init(k + 1, j, mtr);
884 			l = j + nb_dimms_per_channel;
885 			if (dimmpp[l]) {
886 				if (label_function) {
887 					label_function->label_function(
888 					    (k * nb_dimms_per_channel) + l,
889 					    dimmpp[l]->label,
890 					    sizeof (dimmpp[l]->label));
891 				}
892 				nb_ndimm ++;
893 			}
894 		}
895 		dimmpp += nb_dimms_per_channel * 2;
896 	}
897 }
898 
899 static void
900 nb_dimms_init(find_dimm_label_t *label_function)
901 {
902 	if (nb_chipset == INTEL_NB_5100)
903 		nb_ddr2_dimms_init(label_function);
904 	else
905 		nb_fbd_dimms_init(label_function);
906 
907 	if (label_function == NULL)
908 		nb_smbios();
909 }
910 
911 /* Setup the ESI port registers to enable SERR for southbridge */
912 static void
913 nb_pex_init()
914 {
915 	int i = 0; /* ESI port */
916 	uint16_t regw;
917 
918 	emask_uncor_pex[i] = EMASK_UNCOR_PEX_RD(i);
919 	emask_cor_pex[i] = EMASK_COR_PEX_RD(i);
920 	emask_rp_pex[i] = EMASK_RP_PEX_RD(i);
921 	docmd_pex[i] = PEX_ERR_DOCMD_RD(i);
922 	uncerrsev[i] = UNCERRSEV_RD(i);
923 
924 	if (nb5000_reset_uncor_pex)
925 		EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
926 	if (nb5000_reset_cor_pex)
927 		EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
928 	if (nb_chipset == INTEL_NB_5400) {
929 		/* disable masking of ERR pins used by DOCMD */
930 		PEX_ERR_PIN_MASK_WR(i, 0x10);
931 	}
932 
933 	/* RP error message (CE/NFE/FE) detect mask */
934 	EMASK_RP_PEX_WR(i, nb5000_rp_pex);
935 
936 	/* Command Register - Enable SERR */
937 	regw = nb_pci_getw(0, i, 0, PCI_CONF_COMM, 0);
938 	nb_pci_putw(0, i, 0, PCI_CONF_COMM,
939 	    regw | PCI_COMM_SERR_ENABLE);
940 
941 	/* Root Control Register - SERR on NFE/FE */
942 	PEXROOTCTL_WR(i, PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
943 	    PCIE_ROOTCTL_SYS_ERR_ON_FE_EN);
944 
945 	/* AER UE Mask - Mask UR */
946 	UNCERRMSK_WR(i, PCIE_AER_UCE_UR);
947 }
948 
949 static void
950 nb_pex_fini()
951 {
952 	int i = 0; /* ESI port */
953 
954 	EMASK_UNCOR_PEX_WR(i, emask_uncor_pex[i]);
955 	EMASK_COR_PEX_WR(i, emask_cor_pex[i]);
956 	EMASK_RP_PEX_WR(i, emask_rp_pex[i]);
957 	PEX_ERR_DOCMD_WR(i, docmd_pex[i]);
958 
959 	if (nb5000_reset_uncor_pex)
960 		EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
961 	if (nb5000_reset_cor_pex)
962 		EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
963 }
964 
965 void
966 nb_int_init()
967 {
968 	uint32_t err0_int;
969 	uint32_t err1_int;
970 	uint32_t err2_int;
971 	uint32_t mcerr_int;
972 	uint32_t emask_int;
973 	uint16_t stepping;
974 
975 	err0_int = ERR0_INT_RD();
976 	err1_int = ERR1_INT_RD();
977 	err2_int = ERR2_INT_RD();
978 	mcerr_int = MCERR_INT_RD();
979 	emask_int = EMASK_INT_RD();
980 
981 	nb_err0_int = err0_int;
982 	nb_err1_int = err1_int;
983 	nb_err2_int = err2_int;
984 	nb_mcerr_int = mcerr_int;
985 	nb_emask_int = emask_int;
986 
987 	ERR0_INT_WR(ERR_INT_ALL);
988 	ERR1_INT_WR(ERR_INT_ALL);
989 	ERR2_INT_WR(ERR_INT_ALL);
990 	MCERR_INT_WR(ERR_INT_ALL);
991 	EMASK_INT_WR(ERR_INT_ALL);
992 
993 	mcerr_int &= ~nb5000_mask_bios_int;
994 	mcerr_int |= nb5000_mask_bios_int & (~err0_int | ~err1_int | ~err2_int);
995 	mcerr_int |= nb5000_mask_poll_int;
996 	err0_int |= nb5000_mask_poll_int;
997 	err1_int |= nb5000_mask_poll_int;
998 	err2_int |= nb5000_mask_poll_int;
999 
1000 	l_mcerr_int = mcerr_int;
1001 	ERR0_INT_WR(err0_int);
1002 	ERR1_INT_WR(err1_int);
1003 	ERR2_INT_WR(err2_int);
1004 	MCERR_INT_WR(mcerr_int);
1005 	if (nb5000_reset_emask_int) {
1006 		if (nb_chipset == INTEL_NB_7300) {
1007 			stepping = NB5000_STEPPING();
1008 			if (stepping == 0)
1009 				EMASK_5000_INT_WR(nb7300_emask_int_step0);
1010 			else
1011 				EMASK_5000_INT_WR(nb7300_emask_int);
1012 		} else if (nb_chipset == INTEL_NB_5400) {
1013 			EMASK_5400_INT_WR(nb5400_emask_int |
1014 			    (emask_int & EMASK_INT_RES));
1015 		} else {
1016 			EMASK_5000_INT_WR(nb5000_emask_int);
1017 		}
1018 	} else {
1019 		EMASK_INT_WR(nb_emask_int);
1020 	}
1021 }
1022 
1023 void
1024 nb_int_fini()
1025 {
1026 	ERR0_INT_WR(ERR_INT_ALL);
1027 	ERR1_INT_WR(ERR_INT_ALL);
1028 	ERR2_INT_WR(ERR_INT_ALL);
1029 	MCERR_INT_WR(ERR_INT_ALL);
1030 	EMASK_INT_WR(ERR_INT_ALL);
1031 
1032 	ERR0_INT_WR(nb_err0_int);
1033 	ERR1_INT_WR(nb_err1_int);
1034 	ERR2_INT_WR(nb_err2_int);
1035 	MCERR_INT_WR(nb_mcerr_int);
1036 	EMASK_INT_WR(nb_emask_int);
1037 }
1038 
1039 void
1040 nb_int_mask_mc(uint32_t mc_mask_int)
1041 {
1042 	uint32_t emask_int;
1043 
1044 	emask_int = MCERR_INT_RD();
1045 	if ((emask_int & mc_mask_int) != mc_mask_int) {
1046 		MCERR_INT_WR(emask_int|mc_mask_int);
1047 		nb_mask_mc_set = 1;
1048 	}
1049 }
1050 
1051 static void
1052 nb_fbd_init()
1053 {
1054 	uint32_t err0_fbd;
1055 	uint32_t err1_fbd;
1056 	uint32_t err2_fbd;
1057 	uint32_t mcerr_fbd;
1058 	uint32_t emask_fbd;
1059 	uint32_t emask_bios_fbd;
1060 	uint32_t emask_poll_fbd;
1061 
1062 	err0_fbd = ERR0_FBD_RD();
1063 	err1_fbd = ERR1_FBD_RD();
1064 	err2_fbd = ERR2_FBD_RD();
1065 	mcerr_fbd = MCERR_FBD_RD();
1066 	emask_fbd = EMASK_FBD_RD();
1067 
1068 	nb_err0_fbd = err0_fbd;
1069 	nb_err1_fbd = err1_fbd;
1070 	nb_err2_fbd = err2_fbd;
1071 	nb_mcerr_fbd = mcerr_fbd;
1072 	nb_emask_fbd = emask_fbd;
1073 
1074 	ERR0_FBD_WR(0xffffffff);
1075 	ERR1_FBD_WR(0xffffffff);
1076 	ERR2_FBD_WR(0xffffffff);
1077 	MCERR_FBD_WR(0xffffffff);
1078 	EMASK_FBD_WR(0xffffffff);
1079 
1080 	if (nb_chipset == INTEL_NB_7300 && nb_mode == NB_MEMORY_MIRROR) {
1081 		/* MCH 7300 errata 34 */
1082 		emask_bios_fbd = nb5000_mask_bios_fbd & ~EMASK_FBD_M23;
1083 		emask_poll_fbd = nb5000_mask_poll_fbd;
1084 		mcerr_fbd |= EMASK_FBD_M23;
1085 	} else if (nb_chipset == INTEL_NB_5400) {
1086 		emask_bios_fbd = nb5400_mask_bios_fbd;
1087 		emask_poll_fbd = nb5400_mask_poll_fbd;
1088 	} else {
1089 		emask_bios_fbd = nb5000_mask_bios_fbd;
1090 		emask_poll_fbd = nb5000_mask_poll_fbd;
1091 	}
1092 	mcerr_fbd &= ~emask_bios_fbd;
1093 	mcerr_fbd |= emask_bios_fbd & (~err0_fbd | ~err1_fbd | ~err2_fbd);
1094 	mcerr_fbd |= emask_poll_fbd;
1095 	err0_fbd |= emask_poll_fbd;
1096 	err1_fbd |= emask_poll_fbd;
1097 	err2_fbd |= emask_poll_fbd;
1098 
1099 	l_mcerr_fbd = mcerr_fbd;
1100 	ERR0_FBD_WR(err0_fbd);
1101 	ERR1_FBD_WR(err1_fbd);
1102 	ERR2_FBD_WR(err2_fbd);
1103 	MCERR_FBD_WR(mcerr_fbd);
1104 	if (nb5000_reset_emask_fbd) {
1105 		if (nb_chipset == INTEL_NB_5400)
1106 			EMASK_FBD_WR(nb5400_emask_fbd);
1107 		else
1108 			EMASK_FBD_WR(nb5000_emask_fbd);
1109 	} else {
1110 		EMASK_FBD_WR(nb_emask_fbd);
1111 	}
1112 }
1113 
1114 void
1115 nb_fbd_mask_mc(uint32_t mc_mask_fbd)
1116 {
1117 	uint32_t emask_fbd;
1118 
1119 	emask_fbd = MCERR_FBD_RD();
1120 	if ((emask_fbd & mc_mask_fbd) != mc_mask_fbd) {
1121 		MCERR_FBD_WR(emask_fbd|mc_mask_fbd);
1122 		nb_mask_mc_set = 1;
1123 	}
1124 }
1125 
1126 static void
1127 nb_fbd_fini()
1128 {
1129 	ERR0_FBD_WR(0xffffffff);
1130 	ERR1_FBD_WR(0xffffffff);
1131 	ERR2_FBD_WR(0xffffffff);
1132 	MCERR_FBD_WR(0xffffffff);
1133 	EMASK_FBD_WR(0xffffffff);
1134 
1135 	ERR0_FBD_WR(nb_err0_fbd);
1136 	ERR1_FBD_WR(nb_err1_fbd);
1137 	ERR2_FBD_WR(nb_err2_fbd);
1138 	MCERR_FBD_WR(nb_mcerr_fbd);
1139 	EMASK_FBD_WR(nb_emask_fbd);
1140 }
1141 
1142 static void
1143 nb_mem_init()
1144 {
1145 	uint32_t err0_mem;
1146 	uint32_t err1_mem;
1147 	uint32_t err2_mem;
1148 	uint32_t mcerr_mem;
1149 	uint32_t emask_mem;
1150 	uint32_t emask_poll_mem;
1151 
1152 	err0_mem = ERR0_MEM_RD();
1153 	err1_mem = ERR1_MEM_RD();
1154 	err2_mem = ERR2_MEM_RD();
1155 	mcerr_mem = MCERR_MEM_RD();
1156 	emask_mem = EMASK_MEM_RD();
1157 
1158 	nb_err0_mem = err0_mem;
1159 	nb_err1_mem = err1_mem;
1160 	nb_err2_mem = err2_mem;
1161 	nb_mcerr_mem = mcerr_mem;
1162 	nb_emask_mem = emask_mem;
1163 
1164 	ERR0_MEM_WR(0xffffffff);
1165 	ERR1_MEM_WR(0xffffffff);
1166 	ERR2_MEM_WR(0xffffffff);
1167 	MCERR_MEM_WR(0xffffffff);
1168 	EMASK_MEM_WR(0xffffffff);
1169 
1170 	emask_poll_mem = nb5100_mask_poll_mem;
1171 	mcerr_mem |= emask_poll_mem;
1172 	err0_mem |= emask_poll_mem;
1173 	err1_mem |= emask_poll_mem;
1174 	err2_mem |= emask_poll_mem;
1175 
1176 	l_mcerr_mem = mcerr_mem;
1177 	ERR0_MEM_WR(err0_mem);
1178 	ERR1_MEM_WR(err1_mem);
1179 	ERR2_MEM_WR(err2_mem);
1180 	MCERR_MEM_WR(mcerr_mem);
1181 	if (nb5100_reset_emask_mem) {
1182 		EMASK_MEM_WR(~nb5100_mask_poll_mem);
1183 	} else {
1184 		EMASK_MEM_WR(nb_emask_mem);
1185 	}
1186 }
1187 
1188 void
1189 nb_mem_mask_mc(uint32_t mc_mask_mem)
1190 {
1191 	uint32_t emask_mem;
1192 
1193 	emask_mem = MCERR_MEM_RD();
1194 	if ((emask_mem & mc_mask_mem) != mc_mask_mem) {
1195 		MCERR_MEM_WR(emask_mem|mc_mask_mem);
1196 		nb_mask_mc_set = 1;
1197 	}
1198 }
1199 
1200 static void
1201 nb_mem_fini()
1202 {
1203 	ERR0_MEM_WR(0xffffffff);
1204 	ERR1_MEM_WR(0xffffffff);
1205 	ERR2_MEM_WR(0xffffffff);
1206 	MCERR_MEM_WR(0xffffffff);
1207 	EMASK_MEM_WR(0xffffffff);
1208 
1209 	ERR0_MEM_WR(nb_err0_mem);
1210 	ERR1_MEM_WR(nb_err1_mem);
1211 	ERR2_MEM_WR(nb_err2_mem);
1212 	MCERR_MEM_WR(nb_mcerr_mem);
1213 	EMASK_MEM_WR(nb_emask_mem);
1214 }
1215 
1216 static void
1217 nb_fsb_init()
1218 {
1219 	uint16_t err0_fsb;
1220 	uint16_t err1_fsb;
1221 	uint16_t err2_fsb;
1222 	uint16_t mcerr_fsb;
1223 	uint16_t emask_fsb;
1224 
1225 	err0_fsb = ERR0_FSB_RD(0);
1226 	err1_fsb = ERR1_FSB_RD(0);
1227 	err2_fsb = ERR2_FSB_RD(0);
1228 	mcerr_fsb = MCERR_FSB_RD(0);
1229 	emask_fsb = EMASK_FSB_RD(0);
1230 
1231 	ERR0_FSB_WR(0, 0xffff);
1232 	ERR1_FSB_WR(0, 0xffff);
1233 	ERR2_FSB_WR(0, 0xffff);
1234 	MCERR_FSB_WR(0, 0xffff);
1235 	EMASK_FSB_WR(0, 0xffff);
1236 
1237 	ERR0_FSB_WR(1, 0xffff);
1238 	ERR1_FSB_WR(1, 0xffff);
1239 	ERR2_FSB_WR(1, 0xffff);
1240 	MCERR_FSB_WR(1, 0xffff);
1241 	EMASK_FSB_WR(1, 0xffff);
1242 
1243 	nb_err0_fsb = err0_fsb;
1244 	nb_err1_fsb = err1_fsb;
1245 	nb_err2_fsb = err2_fsb;
1246 	nb_mcerr_fsb = mcerr_fsb;
1247 	nb_emask_fsb = emask_fsb;
1248 
1249 	mcerr_fsb &= ~nb5000_mask_bios_fsb;
1250 	mcerr_fsb |= nb5000_mask_bios_fsb & (~err2_fsb | ~err1_fsb | ~err0_fsb);
1251 	mcerr_fsb |= nb5000_mask_poll_fsb;
1252 	err0_fsb |= nb5000_mask_poll_fsb;
1253 	err1_fsb |= nb5000_mask_poll_fsb;
1254 	err2_fsb |= nb5000_mask_poll_fsb;
1255 
1256 	l_mcerr_fsb = mcerr_fsb;
1257 	ERR0_FSB_WR(0, err0_fsb);
1258 	ERR1_FSB_WR(0, err1_fsb);
1259 	ERR2_FSB_WR(0, err2_fsb);
1260 	MCERR_FSB_WR(0, mcerr_fsb);
1261 	if (nb5000_reset_emask_fsb) {
1262 		EMASK_FSB_WR(0, nb5000_emask_fsb);
1263 	} else {
1264 		EMASK_FSB_WR(0, nb_emask_fsb);
1265 	}
1266 
1267 	ERR0_FSB_WR(1, err0_fsb);
1268 	ERR1_FSB_WR(1, err1_fsb);
1269 	ERR2_FSB_WR(1, err2_fsb);
1270 	MCERR_FSB_WR(1, mcerr_fsb);
1271 	if (nb5000_reset_emask_fsb) {
1272 		EMASK_FSB_WR(1, nb5000_emask_fsb);
1273 	} else {
1274 		EMASK_FSB_WR(1, nb_emask_fsb);
1275 	}
1276 
1277 	if (nb_chipset == INTEL_NB_7300) {
1278 		ERR0_FSB_WR(2, 0xffff);
1279 		ERR1_FSB_WR(2, 0xffff);
1280 		ERR2_FSB_WR(2, 0xffff);
1281 		MCERR_FSB_WR(2, 0xffff);
1282 		EMASK_FSB_WR(2, 0xffff);
1283 
1284 		ERR0_FSB_WR(3, 0xffff);
1285 		ERR1_FSB_WR(3, 0xffff);
1286 		ERR2_FSB_WR(3, 0xffff);
1287 		MCERR_FSB_WR(3, 0xffff);
1288 		EMASK_FSB_WR(3, 0xffff);
1289 
1290 		ERR0_FSB_WR(2, err0_fsb);
1291 		ERR1_FSB_WR(2, err1_fsb);
1292 		ERR2_FSB_WR(2, err2_fsb);
1293 		MCERR_FSB_WR(2, mcerr_fsb);
1294 		if (nb5000_reset_emask_fsb) {
1295 			EMASK_FSB_WR(2, nb5000_emask_fsb);
1296 		} else {
1297 			EMASK_FSB_WR(2, nb_emask_fsb);
1298 		}
1299 
1300 		ERR0_FSB_WR(3, err0_fsb);
1301 		ERR1_FSB_WR(3, err1_fsb);
1302 		ERR2_FSB_WR(3, err2_fsb);
1303 		MCERR_FSB_WR(3, mcerr_fsb);
1304 		if (nb5000_reset_emask_fsb) {
1305 			EMASK_FSB_WR(3, nb5000_emask_fsb);
1306 		} else {
1307 			EMASK_FSB_WR(3, nb_emask_fsb);
1308 		}
1309 	}
1310 }
1311 
1312 static void
1313 nb_fsb_fini() {
1314 	ERR0_FSB_WR(0, 0xffff);
1315 	ERR1_FSB_WR(0, 0xffff);
1316 	ERR2_FSB_WR(0, 0xffff);
1317 	MCERR_FSB_WR(0, 0xffff);
1318 	EMASK_FSB_WR(0, 0xffff);
1319 
1320 	ERR0_FSB_WR(0, nb_err0_fsb);
1321 	ERR1_FSB_WR(0, nb_err1_fsb);
1322 	ERR2_FSB_WR(0, nb_err2_fsb);
1323 	MCERR_FSB_WR(0, nb_mcerr_fsb);
1324 	EMASK_FSB_WR(0, nb_emask_fsb);
1325 
1326 	ERR0_FSB_WR(1, 0xffff);
1327 	ERR1_FSB_WR(1, 0xffff);
1328 	ERR2_FSB_WR(1, 0xffff);
1329 	MCERR_FSB_WR(1, 0xffff);
1330 	EMASK_FSB_WR(1, 0xffff);
1331 
1332 	ERR0_FSB_WR(1, nb_err0_fsb);
1333 	ERR1_FSB_WR(1, nb_err1_fsb);
1334 	ERR2_FSB_WR(1, nb_err2_fsb);
1335 	MCERR_FSB_WR(1, nb_mcerr_fsb);
1336 	EMASK_FSB_WR(1, nb_emask_fsb);
1337 
1338 	if (nb_chipset == INTEL_NB_7300) {
1339 		ERR0_FSB_WR(2, 0xffff);
1340 		ERR1_FSB_WR(2, 0xffff);
1341 		ERR2_FSB_WR(2, 0xffff);
1342 		MCERR_FSB_WR(2, 0xffff);
1343 		EMASK_FSB_WR(2, 0xffff);
1344 
1345 		ERR0_FSB_WR(2, nb_err0_fsb);
1346 		ERR1_FSB_WR(2, nb_err1_fsb);
1347 		ERR2_FSB_WR(2, nb_err2_fsb);
1348 		MCERR_FSB_WR(2, nb_mcerr_fsb);
1349 		EMASK_FSB_WR(2, nb_emask_fsb);
1350 
1351 		ERR0_FSB_WR(3, 0xffff);
1352 		ERR1_FSB_WR(3, 0xffff);
1353 		ERR2_FSB_WR(3, 0xffff);
1354 		MCERR_FSB_WR(3, 0xffff);
1355 		EMASK_FSB_WR(3, 0xffff);
1356 
1357 		ERR0_FSB_WR(3, nb_err0_fsb);
1358 		ERR1_FSB_WR(3, nb_err1_fsb);
1359 		ERR2_FSB_WR(3, nb_err2_fsb);
1360 		MCERR_FSB_WR(3, nb_mcerr_fsb);
1361 		EMASK_FSB_WR(3, nb_emask_fsb);
1362 	}
1363 }
1364 
1365 void
1366 nb_fsb_mask_mc(int fsb, uint16_t mc_mask_fsb)
1367 {
1368 	uint16_t emask_fsb;
1369 
1370 	emask_fsb = MCERR_FSB_RD(fsb);
1371 	if ((emask_fsb & mc_mask_fsb) != mc_mask_fsb) {
1372 		MCERR_FSB_WR(fsb, emask_fsb|mc_mask_fsb|EMASK_FBD_RES);
1373 		nb_mask_mc_set = 1;
1374 	}
1375 }
1376 
1377 static void
1378 nb_thr_init()
1379 {
1380 	uint16_t err0_thr;
1381 	uint16_t err1_thr;
1382 	uint16_t err2_thr;
1383 	uint16_t mcerr_thr;
1384 	uint16_t emask_thr;
1385 
1386 	if (nb_chipset == INTEL_NB_5400) {
1387 		err0_thr = ERR0_THR_RD(0);
1388 		err1_thr = ERR1_THR_RD(0);
1389 		err2_thr = ERR2_THR_RD(0);
1390 		mcerr_thr = MCERR_THR_RD(0);
1391 		emask_thr = EMASK_THR_RD(0);
1392 
1393 		ERR0_THR_WR(0xffff);
1394 		ERR1_THR_WR(0xffff);
1395 		ERR2_THR_WR(0xffff);
1396 		MCERR_THR_WR(0xffff);
1397 		EMASK_THR_WR(0xffff);
1398 
1399 		nb_err0_thr = err0_thr;
1400 		nb_err1_thr = err1_thr;
1401 		nb_err2_thr = err2_thr;
1402 		nb_mcerr_thr = mcerr_thr;
1403 		nb_emask_thr = emask_thr;
1404 
1405 		mcerr_thr &= ~nb_mask_bios_thr;
1406 		mcerr_thr |= nb_mask_bios_thr &
1407 		    (~err2_thr | ~err1_thr | ~err0_thr);
1408 		mcerr_thr |= nb_mask_poll_thr;
1409 		err0_thr |= nb_mask_poll_thr;
1410 		err1_thr |= nb_mask_poll_thr;
1411 		err2_thr |= nb_mask_poll_thr;
1412 
1413 		l_mcerr_thr = mcerr_thr;
1414 		ERR0_THR_WR(err0_thr);
1415 		ERR1_THR_WR(err1_thr);
1416 		ERR2_THR_WR(err2_thr);
1417 		MCERR_THR_WR(mcerr_thr);
1418 		EMASK_THR_WR(nb_emask_thr);
1419 	}
1420 }
1421 
1422 static void
1423 nb_thr_fini()
1424 {
1425 	if (nb_chipset == INTEL_NB_5400) {
1426 		ERR0_THR_WR(0xffff);
1427 		ERR1_THR_WR(0xffff);
1428 		ERR2_THR_WR(0xffff);
1429 		MCERR_THR_WR(0xffff);
1430 		EMASK_THR_WR(0xffff);
1431 
1432 		ERR0_THR_WR(nb_err0_thr);
1433 		ERR1_THR_WR(nb_err1_thr);
1434 		ERR2_THR_WR(nb_err2_thr);
1435 		MCERR_THR_WR(nb_mcerr_thr);
1436 		EMASK_THR_WR(nb_emask_thr);
1437 	}
1438 }
1439 
1440 void
1441 nb_thr_mask_mc(uint16_t mc_mask_thr)
1442 {
1443 	uint16_t emask_thr;
1444 
1445 	emask_thr = MCERR_THR_RD(0);
1446 	if ((emask_thr & mc_mask_thr) != mc_mask_thr) {
1447 		MCERR_THR_WR(emask_thr|mc_mask_thr);
1448 		nb_mask_mc_set = 1;
1449 	}
1450 }
1451 
1452 void
1453 nb_mask_mc_reset()
1454 {
1455 	if (nb_chipset == INTEL_NB_5100)
1456 		MCERR_MEM_WR(l_mcerr_mem);
1457 	else
1458 		MCERR_FBD_WR(l_mcerr_fbd);
1459 	MCERR_INT_WR(l_mcerr_int);
1460 	MCERR_FSB_WR(0, l_mcerr_fsb);
1461 	MCERR_FSB_WR(1, l_mcerr_fsb);
1462 	if (nb_chipset == INTEL_NB_7300) {
1463 		MCERR_FSB_WR(2, l_mcerr_fsb);
1464 		MCERR_FSB_WR(3, l_mcerr_fsb);
1465 	}
1466 	if (nb_chipset == INTEL_NB_5400) {
1467 		MCERR_THR_WR(l_mcerr_thr);
1468 	}
1469 }
1470 
1471 int
1472 nb_dev_init()
1473 {
1474 	find_dimm_label_t *label_function_p;
1475 
1476 	label_function_p = find_dimms_per_channel();
1477 	mutex_init(&nb_mutex, NULL, MUTEX_DRIVER, NULL);
1478 	nb_queue = errorq_create("nb_queue", nb_drain, NULL, NB_MAX_ERRORS,
1479 	    sizeof (nb_logout_t), 1, ERRORQ_VITAL);
1480 	if (nb_queue == NULL) {
1481 		mutex_destroy(&nb_mutex);
1482 		return (EAGAIN);
1483 	}
1484 	nb_int_init();
1485 	nb_thr_init();
1486 	dimm_init();
1487 	nb_dimms_init(label_function_p);
1488 	nb_mc_init();
1489 	nb_pex_init();
1490 	if (nb_chipset == INTEL_NB_5100)
1491 		nb_mem_init();
1492 	else
1493 		nb_fbd_init();
1494 	nb_fsb_init();
1495 	nb_scrubber_enable();
1496 	return (0);
1497 }
1498 
1499 int
1500 nb_init()
1501 {
1502 	/* return ENOTSUP if there is no PCI config space support. */
1503 	if (pci_getl_func == NULL)
1504 		return (ENOTSUP);
1505 
1506 	/* get vendor and device */
1507 	nb_chipset = (*pci_getl_func)(0, 0, 0, PCI_CONF_VENID);
1508 	switch (nb_chipset) {
1509 	default:
1510 		if (nb_5000_memory_controller == 0)
1511 			return (ENOTSUP);
1512 		break;
1513 	case INTEL_NB_7300:
1514 	case INTEL_NB_5000P:
1515 	case INTEL_NB_5000X:
1516 		break;
1517 	case INTEL_NB_5000V:
1518 	case INTEL_NB_5000Z:
1519 		nb_number_memory_controllers = 1;
1520 		break;
1521 	case INTEL_NB_5100:
1522 		nb_channels_per_branch = 1;
1523 		break;
1524 	case INTEL_NB_5400:
1525 	case INTEL_NB_5400A:
1526 	case INTEL_NB_5400B:
1527 		nb_chipset = INTEL_NB_5400;
1528 		break;
1529 	}
1530 	return (0);
1531 }
1532 
1533 void
1534 nb_dev_reinit()
1535 {
1536 	int i, j;
1537 	int nchannels = nb_number_memory_controllers * 2;
1538 	nb_dimm_t **dimmpp;
1539 	nb_dimm_t *dimmp;
1540 	nb_dimm_t **old_nb_dimms;
1541 	int old_nb_dimms_per_channel;
1542 	find_dimm_label_t *label_function_p;
1543 	int dimm_slot = nb_dimm_slots;
1544 
1545 	old_nb_dimms = nb_dimms;
1546 	old_nb_dimms_per_channel = nb_dimms_per_channel;
1547 
1548 	dimm_fini();
1549 	nb_dimms_per_channel = 0;
1550 	label_function_p = find_dimms_per_channel();
1551 	dimm_init();
1552 	nb_dimms_init(label_function_p);
1553 	nb_mc_init();
1554 	nb_pex_init();
1555 	nb_int_init();
1556 	nb_thr_init();
1557 	if (nb_chipset == INTEL_NB_5100)
1558 		nb_mem_init();
1559 	else
1560 		nb_fbd_init();
1561 	nb_fsb_init();
1562 	nb_scrubber_enable();
1563 
1564 	dimmpp = old_nb_dimms;
1565 	for (i = 0; i < nchannels; i++) {
1566 		for (j = 0; j < old_nb_dimms_per_channel; j++) {
1567 			dimmp = *dimmpp;
1568 			if (dimmp) {
1569 				kmem_free(dimmp, sizeof (nb_dimm_t));
1570 				*dimmpp = NULL;
1571 			}
1572 			dimmp++;
1573 		}
1574 	}
1575 	kmem_free(old_nb_dimms, sizeof (nb_dimm_t *) * dimm_slot);
1576 }
1577 
1578 void
1579 nb_dev_unload()
1580 {
1581 	errorq_destroy(nb_queue);
1582 	nb_queue = NULL;
1583 	mutex_destroy(&nb_mutex);
1584 	nb_int_fini();
1585 	nb_thr_fini();
1586 	if (nb_chipset == INTEL_NB_5100)
1587 		nb_mem_fini();
1588 	else
1589 		nb_fbd_fini();
1590 	nb_fsb_fini();
1591 	nb_pex_fini();
1592 	nb_fini();
1593 }
1594 
1595 void
1596 nb_unload()
1597 {
1598 }
1599