1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/cmn_err.h>
29 #include <sys/errno.h>
30 #include <sys/log.h>
31 #include <sys/systm.h>
32 #include <sys/modctl.h>
33 #include <sys/errorq.h>
34 #include <sys/controlregs.h>
35 #include <sys/fm/util.h>
36 #include <sys/fm/protocol.h>
37 #include <sys/sysevent.h>
38 #include <sys/pghw.h>
39 #include <sys/cyclic.h>
40 #include <sys/pci_cfgspace.h>
41 #include <sys/mc_intel.h>
42 #include <sys/smbios.h>
43 #include <sys/pci.h>
44 #include <sys/pcie.h>
45 #include "nb5000.h"
46 #include "nb_log.h"
47 #include "dimm_phys.h"
48 #include "rank.h"
49 
50 int nb_hw_memory_scrub_enable = 1;
51 static int nb_sw_scrub_disabled = 0;
52 
53 int nb_5000_memory_controller = 0;
54 int nb_number_memory_controllers = NB_5000_MAX_MEM_CONTROLLERS;
55 int nb_dimms_per_channel = 0;
56 static int ndimms = 0;
57 
58 nb_dimm_t **nb_dimms;
59 int nb_ndimm;
60 uint32_t nb_chipset;
61 enum nb_memory_mode nb_mode;
62 bank_select_t nb_banks[NB_MAX_MEM_BRANCH_SELECT];
63 rank_select_t nb_ranks[NB_5000_MAX_MEM_CONTROLLERS][NB_MAX_MEM_RANK_SELECT];
64 uint32_t top_of_low_memory;
65 uint8_t spare_rank[NB_5000_MAX_MEM_CONTROLLERS];
66 
67 errorq_t *nb_queue;
68 kmutex_t nb_mutex;
69 
70 static uint8_t nb_err0_int;
71 static uint8_t nb_err1_int;
72 static uint8_t nb_err2_int;
73 static uint8_t nb_mcerr_int;
74 static uint32_t nb_emask_int;
75 
76 static uint32_t nb_err0_fbd;
77 static uint32_t nb_err1_fbd;
78 static uint32_t nb_err2_fbd;
79 static uint32_t nb_mcerr_fbd;
80 static uint32_t nb_emask_fbd;
81 
82 static uint16_t nb_err0_fsb;
83 static uint16_t nb_err1_fsb;
84 static uint16_t nb_err2_fsb;
85 static uint16_t nb_mcerr_fsb;
86 static uint16_t nb_emask_fsb;
87 
88 static uint16_t nb_err0_thr;
89 static uint16_t nb_err1_thr;
90 static uint16_t nb_err2_thr;
91 static uint16_t nb_mcerr_thr;
92 static uint16_t nb_emask_thr;
93 
94 static uint32_t	emask_uncor_pex[NB_PCI_DEV];
95 static uint32_t emask_cor_pex[NB_PCI_DEV];
96 static uint32_t emask_rp_pex[NB_PCI_DEV];
97 static uint32_t docmd_pex[NB_PCI_DEV];
98 static uint32_t uncerrsev[NB_PCI_DEV];
99 
100 static uint8_t l_mcerr_int;
101 static uint32_t l_mcerr_fbd;
102 static uint16_t l_mcerr_fsb;
103 static uint16_t l_mcerr_thr;
104 
105 uint_t nb5000_emask_fbd = EMASK_5000_FBD_RES;
106 uint_t nb5400_emask_fbd = 0;
107 int nb5000_reset_emask_fbd = 1;
108 uint_t nb5000_mask_poll_fbd = EMASK_FBD_NF;
109 uint_t nb5000_mask_bios_fbd = EMASK_FBD_FATAL;
110 uint_t nb5400_mask_poll_fbd = EMASK_5400_FBD_NF;
111 uint_t nb5400_mask_bios_fbd = EMASK_5400_FBD_FATAL;
112 
113 uint_t nb5000_emask_fsb = 0;
114 int nb5000_reset_emask_fsb = 1;
115 uint_t nb5000_mask_poll_fsb = EMASK_FSB_NF;
116 uint_t nb5000_mask_bios_fsb = EMASK_FSB_FATAL;
117 
118 uint_t nb5400_emask_int = 0;
119 
120 uint_t nb7300_emask_int = EMASK_INT_7300;
121 uint_t nb7300_emask_int_step0 = EMASK_INT_7300_STEP_0;
122 uint_t nb5000_emask_int = EMASK_INT_5000;
123 int nb5000_reset_emask_int = 1;
124 uint_t nb5000_mask_poll_int = EMASK_INT_NF;
125 uint_t nb5000_mask_bios_int = EMASK_INT_FATAL;
126 
127 uint_t nb_mask_poll_thr = EMASK_THR_NF;
128 uint_t nb_mask_bios_thr = EMASK_THR_FATAL;
129 
130 int nb5000_reset_uncor_pex = 0;
131 uint_t nb5000_mask_uncor_pex = 0;
132 int nb5000_reset_cor_pex = 0;
133 uint_t nb5000_mask_cor_pex = 0xffffffff;
134 int nb_set_docmd = 1;
135 uint32_t nb5000_rp_pex = 0x1;
136 uint32_t nb5000_docmd_pex_mask = DOCMD_PEX_MASK;
137 uint32_t nb5400_docmd_pex_mask = DOCMD_5400_PEX_MASK;
138 uint32_t nb5000_docmd_pex = DOCMD_PEX;
139 uint32_t nb5400_docmd_pex = DOCMD_5400_PEX;
140 
141 int nb_mask_mc_set;
142 
143 typedef struct find_dimm_label {
144 	void (*label_function)(int, char *, int);
145 } find_dimm_label_t;
146 
147 static void x8450_dimm_label(int, char *, int);
148 
149 static struct platform_label {
150 	const char *sys_vendor;		/* SMB_TYPE_SYSTEM vendor prefix */
151 	const char *sys_product;	/* SMB_TYPE_SYSTEM product prefix */
152 	find_dimm_label_t dimm_label;
153 	int dimms_per_channel;
154 } platform_label[] = {
155 	{ "SUN MICROSYSTEMS", "SUN BLADE X8450 SERVER MODULE",
156 	    x8450_dimm_label, 8 },
157 	{ NULL, NULL, NULL, 0 }
158 };
159 
160 static unsigned short
161 read_spd(int bus)
162 {
163 	unsigned short rt = 0;
164 	int branch = bus >> 1;
165 	int channel = bus & 1;
166 
167 	rt = SPD_RD(branch, channel);
168 
169 	return (rt);
170 }
171 
172 static void
173 write_spdcmd(int bus, uint32_t val)
174 {
175 	int branch = bus >> 1;
176 	int channel = bus & 1;
177 	SPDCMD_WR(branch, channel, val);
178 }
179 
180 static int
181 read_spd_eeprom(int bus, int slave, int addr)
182 {
183 	int retry = 4;
184 	int wait;
185 	int spd;
186 	uint32_t cmd;
187 
188 	for (;;) {
189 		wait = 1000;
190 		for (;;) {
191 			spd = read_spd(bus);
192 			if ((spd & SPD_BUSY) == 0)
193 				break;
194 			if (--wait == 0)
195 				return (-1);
196 			drv_usecwait(10);
197 		}
198 		cmd = SPD_EEPROM_WRITE | SPD_ADDR(slave, addr);
199 		write_spdcmd(bus, cmd);
200 		wait = 1000;
201 		for (;;) {
202 			spd = read_spd(bus);
203 			if ((spd & SPD_BUSY) == 0)
204 				break;
205 			if (--wait == 0) {
206 				spd = SPD_BUS_ERROR;
207 				break;
208 			}
209 			drv_usecwait(10);
210 		}
211 		while ((spd & SPD_BUS_ERROR) == 0 &&
212 		    (spd & (SPD_READ_DATA_VALID|SPD_BUSY)) !=
213 		    SPD_READ_DATA_VALID) {
214 			spd = read_spd(bus);
215 			if (--wait == 0)
216 				return (-1);
217 		}
218 		if ((spd & SPD_BUS_ERROR) == 0)
219 			break;
220 		if (--retry == 0)
221 			return (-1);
222 	}
223 	return (spd & 0xff);
224 }
225 
226 static void
227 nb_fini()
228 {
229 	int i, j;
230 	int nchannels = nb_number_memory_controllers * 2;
231 	nb_dimm_t **dimmpp;
232 	nb_dimm_t *dimmp;
233 
234 	dimmpp = nb_dimms;
235 	for (i = 0; i < nchannels; i++) {
236 		for (j = 0; j < nb_dimms_per_channel; j++) {
237 			dimmp = *dimmpp;
238 			if (dimmp) {
239 				kmem_free(dimmp, sizeof (nb_dimm_t));
240 				*dimmpp = NULL;
241 			}
242 			dimmp++;
243 		}
244 	}
245 	kmem_free(nb_dimms, sizeof (nb_dimm_t *) *
246 	    nb_number_memory_controllers * 2 * nb_dimms_per_channel);
247 	nb_dimms = NULL;
248 	dimm_fini();
249 }
250 
251 void
252 nb_scrubber_enable()
253 {
254 	uint32_t mc;
255 
256 	if (!nb_hw_memory_scrub_enable)
257 		return;
258 
259 	mc = MC_RD();
260 	if ((mc & MC_MIRROR) != 0) /* mirror mode */
261 		mc |= MC_PATROL_SCRUB;
262 	else
263 		mc |= MC_PATROL_SCRUB|MC_DEMAND_SCRUB;
264 	MC_WR(mc);
265 
266 	if (nb_sw_scrub_disabled++)
267 		cmi_mc_sw_memscrub_disable();
268 }
269 
270 static nb_dimm_t *
271 nb_dimm_init(int channel, int dimm, uint16_t mtr)
272 {
273 	nb_dimm_t *dp;
274 	int i, t;
275 	int spd_sz;
276 
277 	if (MTR_PRESENT(mtr) == 0)
278 		return (NULL);
279 	t = read_spd_eeprom(channel, dimm, 2) & 0xf;
280 
281 	if (t != 9)
282 		return (NULL);
283 
284 	dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
285 
286 	t = read_spd_eeprom(channel, dimm, 0) & 0xf;
287 	if (t == 1)
288 		spd_sz = 128;
289 	else if (t == 2)
290 		spd_sz = 176;
291 	else
292 		spd_sz = 256;
293 	dp->manufacture_id = read_spd_eeprom(channel, dimm, 117) |
294 	    (read_spd_eeprom(channel, dimm, 118) << 8);
295 	dp->manufacture_location = read_spd_eeprom(channel, dimm, 119);
296 	dp->serial_number =
297 	    (read_spd_eeprom(channel, dimm, 122) << 24) |
298 	    (read_spd_eeprom(channel, dimm, 123) << 16) |
299 	    (read_spd_eeprom(channel, dimm, 124) << 8) |
300 	    read_spd_eeprom(channel, dimm, 125);
301 	t = read_spd_eeprom(channel, dimm, 121);
302 	dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
303 	dp->manufacture_year = read_spd_eeprom(channel, dimm, 120);
304 	if (spd_sz > 128) {
305 		for (i = 0; i < sizeof (dp->part_number); i++) {
306 			dp->part_number[i] =
307 			    read_spd_eeprom(channel, dimm, 128 + i);
308 		}
309 		for (i = 0; i < sizeof (dp->revision); i++) {
310 			dp->revision[i] =
311 			    read_spd_eeprom(channel, dimm, 146 + i);
312 		}
313 	}
314 	dp->mtr_present = MTR_PRESENT(mtr);
315 	dp->nranks = MTR_NUMRANK(mtr);
316 	dp->nbanks = MTR_NUMBANK(mtr);
317 	dp->ncolumn = MTR_NUMCOL(mtr);
318 	dp->nrow = MTR_NUMROW(mtr);
319 	dp->width = MTR_WIDTH(mtr);
320 	dp->dimm_size = MTR_DIMMSIZE(mtr);
321 
322 	return (dp);
323 }
324 
325 static uint64_t
326 mc_range(int controller, uint64_t base)
327 {
328 	int i;
329 	uint64_t limit = 0;
330 
331 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
332 		if (nb_banks[i].way[controller] && base >= nb_banks[i].base &&
333 		    base < nb_banks[i].limit) {
334 			limit = nb_banks[i].limit;
335 			if (base <= top_of_low_memory &&
336 			    limit > top_of_low_memory) {
337 				limit -= TLOW_MAX - top_of_low_memory;
338 			}
339 			if (nb_banks[i].way[0] && nb_banks[i].way[1] &&
340 			    nb_mode != NB_MEMORY_MIRROR) {
341 				limit = limit / 2;
342 			}
343 		}
344 	}
345 	return (limit);
346 }
347 
348 void
349 nb_mc_init()
350 {
351 	uint16_t tolm;
352 	uint16_t mir;
353 	uint32_t hole_base;
354 	uint32_t hole_size;
355 	uint32_t dmir;
356 	uint64_t base;
357 	uint64_t limit;
358 	uint8_t way0, way1, rank0, rank1, rank2, rank3, branch_interleave;
359 	int i, j, k;
360 	uint8_t interleave;
361 
362 	base = 0;
363 	tolm = TOLM_RD();
364 	top_of_low_memory = ((uint32_t)(tolm >> 12) & 0xf) << 28;
365 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
366 		mir = MIR_RD(i);
367 		limit = (uint64_t)(mir >> 4) << 28;
368 		way0 = mir & 1;
369 		way1 = (mir >> 1) & 1;
370 		if (way0 == 0 && way1 == 0) {
371 			way0 = 1;
372 			way1 = 1;
373 		}
374 		if (limit > top_of_low_memory)
375 			limit += TLOW_MAX - top_of_low_memory;
376 		nb_banks[i].base = base;
377 		nb_banks[i].limit = limit;
378 		nb_banks[i].way[0] = way0;
379 		nb_banks[i].way[1] = way1;
380 		base = limit;
381 	}
382 	for (i = 0; i < nb_number_memory_controllers; i++) {
383 		base = 0;
384 
385 		for (j = 0; j < NB_MEM_RANK_SELECT; j++) {
386 			dmir = DMIR_RD(i, j);
387 			limit = ((uint64_t)(dmir >> 16) & 0xff) << 28;
388 			if (limit == 0) {
389 				limit = mc_range(i, base);
390 			}
391 			branch_interleave = 0;
392 			hole_base = 0;
393 			hole_size = 0;
394 			DMIR_RANKS(dmir, rank0, rank1, rank2, rank3);
395 			if (rank0 == rank1)
396 				interleave = 1;
397 			else if (rank0 == rank2)
398 				interleave = 2;
399 			else
400 				interleave = 4;
401 			if (nb_mode != NB_MEMORY_MIRROR &&
402 			    nb_mode != NB_MEMORY_SINGLE_CHANNEL) {
403 				for (k = 0; k < NB_MEM_BRANCH_SELECT; k++) {
404 					if (base >= nb_banks[k].base &&
405 					    base < nb_banks[k].limit) {
406 						if (nb_banks[i].way[0] &&
407 						    nb_banks[i].way[1]) {
408 							interleave *= 2;
409 							limit *= 2;
410 							branch_interleave = 1;
411 						}
412 						break;
413 					}
414 				}
415 			}
416 			if (base < top_of_low_memory &&
417 			    limit > top_of_low_memory) {
418 				hole_base = top_of_low_memory;
419 				hole_size = TLOW_MAX - top_of_low_memory;
420 				limit += hole_size;
421 			} else if (base > top_of_low_memory) {
422 				limit += TLOW_MAX - top_of_low_memory;
423 			}
424 			nb_ranks[i][j].base = base;
425 			nb_ranks[i][j].limit = limit;
426 			nb_ranks[i][j].rank[0] = rank0;
427 			nb_ranks[i][j].rank[1] = rank1;
428 			nb_ranks[i][j].rank[2] = rank2;
429 			nb_ranks[i][j].rank[3] = rank3;
430 			nb_ranks[i][j].interleave = interleave;
431 			nb_ranks[i][j].branch_interleave = branch_interleave;
432 			nb_ranks[i][j].hole_base = hole_base;
433 			nb_ranks[i][j].hole_size = hole_size;
434 			if (limit > base) {
435 				dimm_add_rank(i, rank0, branch_interleave, 0,
436 				    base, hole_base, hole_size, interleave,
437 				    limit);
438 				if (rank0 != rank1) {
439 					dimm_add_rank(i, rank1,
440 					    branch_interleave, 1, base,
441 					    hole_base, hole_size, interleave,
442 					    limit);
443 					if (rank0 != rank2) {
444 						dimm_add_rank(i, rank2,
445 						    branch_interleave, 2, base,
446 						    hole_base, hole_size,
447 						    interleave, limit);
448 						dimm_add_rank(i, rank3,
449 						    branch_interleave, 3, base,
450 						    hole_base, hole_size,
451 						    interleave, limit);
452 					}
453 				}
454 			}
455 			base = limit;
456 		}
457 	}
458 }
459 
460 void
461 nb_used_spare_rank(int branch, int bad_rank)
462 {
463 	int i;
464 	int j;
465 
466 	for (i = 0; i < NB_MEM_RANK_SELECT; i++) {
467 		for (j = 0; j < NB_RANKS_IN_SELECT; j++) {
468 			if (nb_ranks[branch][i].rank[j] == bad_rank) {
469 				nb_ranks[branch][i].rank[j] =
470 				    spare_rank[branch];
471 				i = NB_MEM_RANK_SELECT;
472 				break;
473 			}
474 		}
475 	}
476 }
477 
478 /*ARGSUSED*/
479 static int
480 memoryarray(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
481 {
482 	smbios_memarray_t ma;
483 
484 	if (sp->smbstr_type == SMB_TYPE_MEMARRAY &&
485 	    smbios_info_memarray(shp, sp->smbstr_id, &ma) == 0) {
486 		ndimms += ma.smbma_ndevs;
487 	}
488 	return (0);
489 }
490 
491 find_dimm_label_t *
492 find_dimms_per_channel()
493 {
494 	struct platform_label *pl;
495 	smbios_info_t si;
496 	smbios_system_t sy;
497 	id_t id;
498 	int read_memarray = 1;
499 	find_dimm_label_t *rt = NULL;
500 
501 	if (ksmbios != NULL) {
502 		if ((id = smbios_info_system(ksmbios, &sy)) != SMB_ERR &&
503 		    smbios_info_common(ksmbios, id, &si) != SMB_ERR) {
504 			for (pl = platform_label; pl->sys_vendor; pl++) {
505 				if (strncmp(pl->sys_vendor,
506 				    si.smbi_manufacturer,
507 				    strlen(pl->sys_vendor)) == 0 &&
508 				    strncmp(pl->sys_product, si.smbi_product,
509 				    strlen(pl->sys_product)) == 0) {
510 					nb_dimms_per_channel =
511 					    pl->dimms_per_channel;
512 					read_memarray = 0;
513 					rt = &pl->dimm_label;
514 					break;
515 				}
516 			}
517 		}
518 		if (read_memarray)
519 			(void) smbios_iter(ksmbios, memoryarray, 0);
520 	}
521 	if (nb_dimms_per_channel == 0) {
522 		if (ndimms) {
523 			nb_dimms_per_channel = ndimms /
524 			    (nb_number_memory_controllers * 2);
525 		} else {
526 			nb_dimms_per_channel = NB_MAX_DIMMS_PER_CHANNEL;
527 		}
528 	}
529 	return (rt);
530 }
531 
532 static int
533 dimm_label(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
534 {
535 	nb_dimm_t ***dimmpp = arg;
536 	nb_dimm_t *dimmp;
537 	smbios_memdevice_t md;
538 
539 	if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
540 		dimmp = **dimmpp;
541 		if (dimmp && smbios_info_memdevice(shp, sp->smbstr_id,
542 		    &md) == 0 && md.smbmd_dloc != NULL) {
543 			(void) snprintf(dimmp->label,
544 			    sizeof (dimmp->label), "%s", md.smbmd_dloc);
545 		}
546 		(*dimmpp)++;
547 	}
548 	return (0);
549 }
550 
551 void
552 nb_smbios()
553 {
554 	nb_dimm_t **dimmpp;
555 
556 	if (ksmbios != NULL) {
557 		dimmpp = nb_dimms;
558 		(void) smbios_iter(ksmbios, dimm_label, &dimmpp);
559 	}
560 }
561 
562 static void
563 x8450_dimm_label(int dimm, char *label, int label_sz)
564 {
565 	int channel = dimm >> 3;
566 
567 	dimm = dimm & 0x7;
568 	(void) snprintf(label, label_sz, "D%d", (dimm * 4) + channel);
569 }
570 
571 static void
572 nb_dimms_init(find_dimm_label_t *label_function)
573 {
574 	int i, j, k, l;
575 	uint16_t mtr;
576 	uint32_t mc, mca;
577 	uint32_t spcpc;
578 	uint8_t spcps;
579 	nb_dimm_t **dimmpp;
580 
581 	mca = MCA_RD();
582 	mc = MC_RD();
583 	if (mca & MCA_SCHDIMM)  /* single-channel mode */
584 		nb_mode = NB_MEMORY_SINGLE_CHANNEL;
585 	else if ((mc & MC_MIRROR) != 0) /* mirror mode */
586 		nb_mode = NB_MEMORY_MIRROR;
587 	else
588 		nb_mode = NB_MEMORY_NORMAL;
589 	nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
590 	    nb_number_memory_controllers * 2 * nb_dimms_per_channel, KM_SLEEP);
591 	dimmpp = nb_dimms;
592 	for (i = 0; i < nb_number_memory_controllers; i++) {
593 		if (nb_mode == NB_MEMORY_NORMAL) {
594 			spcpc = SPCPC_RD(i);
595 			spcps = SPCPS_RD(i);
596 			if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
597 			    (spcps & SPCPS_SPARE_DEPLOYED) != 0)
598 				nb_mode = NB_MEMORY_SPARE_RANK;
599 			spare_rank[i] = SPCPC_SPRANK(spcpc);
600 		}
601 		for (j = 0; j < nb_dimms_per_channel; j++) {
602 			mtr = MTR_RD(i, j);
603 			k = i * 2;
604 			dimmpp[j] = nb_dimm_init(k, j, mtr);
605 			if (dimmpp[j]) {
606 				nb_ndimm ++;
607 				dimm_add_geometry(i, j, dimmpp[j]->nbanks,
608 				    dimmpp[j]->width, dimmpp[j]->ncolumn,
609 				    dimmpp[j]->nrow);
610 				if (label_function) {
611 					label_function->label_function(
612 					    (k * nb_dimms_per_channel) + j,
613 					    dimmpp[j]->label,
614 					    sizeof (dimmpp[j]->label));
615 				}
616 			}
617 			dimmpp[j + nb_dimms_per_channel] =
618 			    nb_dimm_init(k + 1, j, mtr);
619 			l = j + nb_dimms_per_channel;
620 			if (dimmpp[l]) {
621 				if (label_function) {
622 					label_function->label_function(
623 					    (k * nb_dimms_per_channel) + l,
624 					    dimmpp[l]->label,
625 					    sizeof (dimmpp[l]->label));
626 				}
627 				nb_ndimm ++;
628 			}
629 		}
630 		dimmpp += nb_dimms_per_channel * 2;
631 	}
632 	if (label_function == NULL)
633 		nb_smbios();
634 }
635 
636 static void
637 nb_pex_init()
638 {
639 	int i;
640 	uint32_t mask;
641 
642 	for (i = 0; i < NB_PCI_DEV; i++) {
643 		switch (nb_chipset) {
644 		case INTEL_NB_5000P:
645 		case INTEL_NB_5000X:
646 			if (i == 1 || i > 8)
647 				continue;
648 			break;
649 		case INTEL_NB_5000V:
650 			if (i == 1 || i > 3)
651 				continue;
652 			break;
653 		case INTEL_NB_5000Z:
654 			if (i == 1 || i > 5)
655 				continue;
656 			break;
657 		case INTEL_NB_5400:
658 			break;
659 		case INTEL_NB_7300:
660 			if (i > 8)
661 				continue;
662 			break;
663 		}
664 		emask_uncor_pex[i] = EMASK_UNCOR_PEX_RD(i);
665 		emask_cor_pex[i] = EMASK_COR_PEX_RD(i);
666 		emask_rp_pex[i] = EMASK_RP_PEX_RD(i);
667 		docmd_pex[i] = PEX_ERR_DOCMD_RD(i);
668 		uncerrsev[i] = UNCERRSEV_RD(i);
669 
670 		if (nb5000_reset_uncor_pex)
671 			EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
672 		if (nb5000_reset_cor_pex)
673 			EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
674 		if (nb_set_docmd) {
675 			if (nb_chipset == INTEL_NB_5400) {
676 				/* disable masking of ERR pins used by DOCMD */
677 				PEX_ERR_PIN_MASK_WR(i, 0x10);
678 
679 				mask = (docmd_pex[i] & nb5400_docmd_pex_mask) |
680 				    (nb5400_docmd_pex & ~nb5400_docmd_pex_mask);
681 			} else {
682 				mask = (docmd_pex[i] & nb5000_docmd_pex_mask) |
683 				    (nb5000_docmd_pex & ~nb5000_docmd_pex_mask);
684 			}
685 			PEX_ERR_DOCMD_WR(i, mask);
686 		}
687 
688 		/* RP error message (CE/NFE/FE) detect mask */
689 		EMASK_RP_PEX_WR(i, nb5000_rp_pex);
690 
691 		/* Setup ESI port registers to enable SERR for southbridge */
692 		if (i == 0) {
693 			uint16_t regw;
694 
695 			/* Command Register - Enable SERR */
696 			regw = nb_pci_getw(0, i, 0, PCI_CONF_COMM, 0);
697 			nb_pci_putw(0, i, 0, PCI_CONF_COMM,
698 			    regw | PCI_COMM_SERR_ENABLE);
699 
700 			/* Root Control Register - SERR on NFE/FE */
701 			PEXROOTCTL_WR(i, PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
702 			    PCIE_ROOTCTL_SYS_ERR_ON_FE_EN);
703 
704 			/* AER UE Mask - Mask UR */
705 			UNCERRMSK_WR(i, PCIE_AER_UCE_UR);
706 		}
707 	}
708 }
709 
710 static void
711 nb_pex_fini()
712 {
713 	int i;
714 
715 	for (i = 0; i < NB_PCI_DEV; i++) {
716 		switch (nb_chipset) {
717 		case INTEL_NB_5000P:
718 		case INTEL_NB_5000X:
719 			if (i == 1 && i > 8)
720 				continue;
721 			break;
722 		case INTEL_NB_5000V:
723 			if (i == 1 || i > 3)
724 				continue;
725 			break;
726 		case INTEL_NB_5000Z:
727 			if (i == 1 || i > 5)
728 				continue;
729 			break;
730 		case INTEL_NB_5400:
731 			break;
732 		case INTEL_NB_7300:
733 			if (i > 8)
734 				continue;
735 			break;
736 		}
737 		EMASK_UNCOR_PEX_WR(i, emask_uncor_pex[i]);
738 		EMASK_COR_PEX_WR(i, emask_cor_pex[i]);
739 		EMASK_RP_PEX_WR(i, emask_rp_pex[i]);
740 		PEX_ERR_DOCMD_WR(i, docmd_pex[i]);
741 
742 		if (nb5000_reset_uncor_pex)
743 			EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
744 		if (nb5000_reset_cor_pex)
745 			EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
746 	}
747 }
748 
749 void
750 nb_int_init()
751 {
752 	uint8_t err0_int;
753 	uint8_t err1_int;
754 	uint8_t err2_int;
755 	uint8_t mcerr_int;
756 	uint32_t emask_int;
757 	uint16_t stepping;
758 
759 	err0_int = ERR0_INT_RD();
760 	err1_int = ERR1_INT_RD();
761 	err2_int = ERR2_INT_RD();
762 	mcerr_int = MCERR_INT_RD();
763 	emask_int = EMASK_INT_RD();
764 
765 	nb_err0_int = err0_int;
766 	nb_err1_int = err1_int;
767 	nb_err2_int = err2_int;
768 	nb_mcerr_int = mcerr_int;
769 	nb_emask_int = emask_int;
770 
771 	ERR0_INT_WR(0xff);
772 	ERR1_INT_WR(0xff);
773 	ERR2_INT_WR(0xff);
774 	MCERR_INT_WR(0xff);
775 	EMASK_INT_WR(0xff);
776 
777 	mcerr_int &= ~nb5000_mask_bios_int;
778 	mcerr_int |= nb5000_mask_bios_int & (~err0_int | ~err1_int | ~err2_int);
779 	mcerr_int |= nb5000_mask_poll_int;
780 	err0_int |= nb5000_mask_poll_int;
781 	err1_int |= nb5000_mask_poll_int;
782 	err2_int |= nb5000_mask_poll_int;
783 
784 	l_mcerr_int = mcerr_int;
785 	ERR0_INT_WR(err0_int);
786 	ERR1_INT_WR(err1_int);
787 	ERR2_INT_WR(err2_int);
788 	MCERR_INT_WR(mcerr_int);
789 	if (nb5000_reset_emask_int) {
790 		if (nb_chipset == INTEL_NB_7300) {
791 			stepping = NB5000_STEPPING();
792 			if (stepping == 0)
793 				EMASK_5000_INT_WR(nb7300_emask_int_step0);
794 			else
795 				EMASK_5000_INT_WR(nb7300_emask_int);
796 		} else if (nb_chipset == INTEL_NB_5400) {
797 			EMASK_5400_INT_WR(nb5400_emask_int |
798 			    (emask_int & EMASK_INT_RES));
799 		} else {
800 			EMASK_5000_INT_WR(nb5000_emask_int);
801 		}
802 	} else {
803 		EMASK_INT_WR(nb_emask_int);
804 	}
805 }
806 
807 void
808 nb_int_fini()
809 {
810 	ERR0_INT_WR(0xff);
811 	ERR1_INT_WR(0xff);
812 	ERR2_INT_WR(0xff);
813 	MCERR_INT_WR(0xff);
814 	EMASK_INT_WR(0xff);
815 
816 	ERR0_INT_WR(nb_err0_int);
817 	ERR1_INT_WR(nb_err1_int);
818 	ERR2_INT_WR(nb_err2_int);
819 	MCERR_INT_WR(nb_mcerr_int);
820 	EMASK_INT_WR(nb_emask_int);
821 }
822 
823 void
824 nb_int_mask_mc(uint32_t mc_mask_int)
825 {
826 	uint32_t emask_int;
827 
828 	emask_int = MCERR_INT_RD();
829 	if ((emask_int & mc_mask_int) != mc_mask_int) {
830 		MCERR_INT_WR(emask_int|mc_mask_int);
831 		nb_mask_mc_set = 1;
832 	}
833 }
834 
835 void
836 nb_fbd_init()
837 {
838 	uint32_t err0_fbd;
839 	uint32_t err1_fbd;
840 	uint32_t err2_fbd;
841 	uint32_t mcerr_fbd;
842 	uint32_t emask_fbd;
843 	uint32_t emask_bios_fbd;
844 	uint32_t emask_poll_fbd;
845 
846 	err0_fbd = ERR0_FBD_RD();
847 	err1_fbd = ERR1_FBD_RD();
848 	err2_fbd = ERR2_FBD_RD();
849 	mcerr_fbd = MCERR_FBD_RD();
850 	emask_fbd = EMASK_FBD_RD();
851 
852 	nb_err0_fbd = err0_fbd;
853 	nb_err1_fbd = err1_fbd;
854 	nb_err2_fbd = err2_fbd;
855 	nb_mcerr_fbd = mcerr_fbd;
856 	nb_emask_fbd = emask_fbd;
857 
858 	ERR0_FBD_WR(0xffffffff);
859 	ERR1_FBD_WR(0xffffffff);
860 	ERR2_FBD_WR(0xffffffff);
861 	MCERR_FBD_WR(0xffffffff);
862 	EMASK_FBD_WR(0xffffffff);
863 
864 	if (nb_chipset == INTEL_NB_7300 && nb_mode == NB_MEMORY_MIRROR) {
865 		/* MCH 7300 errata 34 */
866 		emask_bios_fbd = nb5000_mask_bios_fbd & ~EMASK_FBD_M23;
867 		emask_poll_fbd = nb5000_mask_poll_fbd;
868 		mcerr_fbd |= EMASK_FBD_M23;
869 	} else if (nb_chipset == INTEL_NB_5400) {
870 		emask_bios_fbd = nb5400_mask_bios_fbd;
871 		emask_poll_fbd = nb5400_mask_poll_fbd;
872 	} else {
873 		emask_bios_fbd = nb5000_mask_bios_fbd;
874 		emask_poll_fbd = nb5000_mask_poll_fbd;
875 	}
876 	mcerr_fbd &= ~emask_bios_fbd;
877 	mcerr_fbd |= emask_bios_fbd & (~err0_fbd | ~err1_fbd | ~err2_fbd);
878 	mcerr_fbd |= emask_poll_fbd;
879 	err0_fbd |= emask_poll_fbd;
880 	err1_fbd |= emask_poll_fbd;
881 	err2_fbd |= emask_poll_fbd;
882 
883 	l_mcerr_fbd = mcerr_fbd;
884 	ERR0_FBD_WR(err0_fbd);
885 	ERR1_FBD_WR(err1_fbd);
886 	ERR2_FBD_WR(err2_fbd);
887 	MCERR_FBD_WR(mcerr_fbd);
888 	if (nb5000_reset_emask_fbd) {
889 		if (nb_chipset == INTEL_NB_5400)
890 			EMASK_FBD_WR(nb5400_emask_fbd);
891 		else
892 			EMASK_FBD_WR(nb5000_emask_fbd);
893 	} else {
894 		EMASK_FBD_WR(nb_emask_fbd);
895 	}
896 }
897 
898 void
899 nb_fbd_mask_mc(uint32_t mc_mask_fbd)
900 {
901 	uint32_t emask_fbd;
902 
903 	emask_fbd = MCERR_FBD_RD();
904 	if ((emask_fbd & mc_mask_fbd) != mc_mask_fbd) {
905 		MCERR_FBD_WR(emask_fbd|mc_mask_fbd);
906 		nb_mask_mc_set = 1;
907 	}
908 }
909 
910 void
911 nb_fbd_fini()
912 {
913 	ERR0_FBD_WR(0xffffffff);
914 	ERR1_FBD_WR(0xffffffff);
915 	ERR2_FBD_WR(0xffffffff);
916 	MCERR_FBD_WR(0xffffffff);
917 	EMASK_FBD_WR(0xffffffff);
918 
919 	ERR0_FBD_WR(nb_err0_fbd);
920 	ERR1_FBD_WR(nb_err1_fbd);
921 	ERR2_FBD_WR(nb_err2_fbd);
922 	MCERR_FBD_WR(nb_mcerr_fbd);
923 	EMASK_FBD_WR(nb_emask_fbd);
924 }
925 
926 static void
927 nb_fsb_init()
928 {
929 	uint16_t err0_fsb;
930 	uint16_t err1_fsb;
931 	uint16_t err2_fsb;
932 	uint16_t mcerr_fsb;
933 	uint16_t emask_fsb;
934 
935 	err0_fsb = ERR0_FSB_RD(0);
936 	err1_fsb = ERR1_FSB_RD(0);
937 	err2_fsb = ERR2_FSB_RD(0);
938 	mcerr_fsb = MCERR_FSB_RD(0);
939 	emask_fsb = EMASK_FSB_RD(0);
940 
941 	ERR0_FSB_WR(0, 0xffff);
942 	ERR1_FSB_WR(0, 0xffff);
943 	ERR2_FSB_WR(0, 0xffff);
944 	MCERR_FSB_WR(0, 0xffff);
945 	EMASK_FSB_WR(0, 0xffff);
946 
947 	ERR0_FSB_WR(1, 0xffff);
948 	ERR1_FSB_WR(1, 0xffff);
949 	ERR2_FSB_WR(1, 0xffff);
950 	MCERR_FSB_WR(1, 0xffff);
951 	EMASK_FSB_WR(1, 0xffff);
952 
953 	nb_err0_fsb = err0_fsb;
954 	nb_err1_fsb = err1_fsb;
955 	nb_err2_fsb = err2_fsb;
956 	nb_mcerr_fsb = mcerr_fsb;
957 	nb_emask_fsb = emask_fsb;
958 
959 	mcerr_fsb &= ~nb5000_mask_bios_fsb;
960 	mcerr_fsb |= nb5000_mask_bios_fsb & (~err2_fsb | ~err1_fsb | ~err0_fsb);
961 	mcerr_fsb |= nb5000_mask_poll_fsb;
962 	err0_fsb |= nb5000_mask_poll_fsb;
963 	err1_fsb |= nb5000_mask_poll_fsb;
964 	err2_fsb |= nb5000_mask_poll_fsb;
965 
966 	l_mcerr_fsb = mcerr_fsb;
967 	ERR0_FSB_WR(0, err0_fsb);
968 	ERR1_FSB_WR(0, err1_fsb);
969 	ERR2_FSB_WR(0, err2_fsb);
970 	MCERR_FSB_WR(0, mcerr_fsb);
971 	if (nb5000_reset_emask_fsb) {
972 		EMASK_FSB_WR(0, nb5000_emask_fsb);
973 	} else {
974 		EMASK_FSB_WR(0, nb_emask_fsb);
975 	}
976 
977 	ERR0_FSB_WR(1, err0_fsb);
978 	ERR1_FSB_WR(1, err1_fsb);
979 	ERR2_FSB_WR(1, err2_fsb);
980 	MCERR_FSB_WR(1, mcerr_fsb);
981 	if (nb5000_reset_emask_fsb) {
982 		EMASK_FSB_WR(1, nb5000_emask_fsb);
983 	} else {
984 		EMASK_FSB_WR(1, nb_emask_fsb);
985 	}
986 
987 	if (nb_chipset == INTEL_NB_7300) {
988 		ERR0_FSB_WR(2, 0xffff);
989 		ERR1_FSB_WR(2, 0xffff);
990 		ERR2_FSB_WR(2, 0xffff);
991 		MCERR_FSB_WR(2, 0xffff);
992 		EMASK_FSB_WR(2, 0xffff);
993 
994 		ERR0_FSB_WR(3, 0xffff);
995 		ERR1_FSB_WR(3, 0xffff);
996 		ERR2_FSB_WR(3, 0xffff);
997 		MCERR_FSB_WR(3, 0xffff);
998 		EMASK_FSB_WR(3, 0xffff);
999 
1000 		ERR0_FSB_WR(2, err0_fsb);
1001 		ERR1_FSB_WR(2, err1_fsb);
1002 		ERR2_FSB_WR(2, err2_fsb);
1003 		MCERR_FSB_WR(2, mcerr_fsb);
1004 		if (nb5000_reset_emask_fsb) {
1005 			EMASK_FSB_WR(2, nb5000_emask_fsb);
1006 		} else {
1007 			EMASK_FSB_WR(2, nb_emask_fsb);
1008 		}
1009 
1010 		ERR0_FSB_WR(3, err0_fsb);
1011 		ERR1_FSB_WR(3, err1_fsb);
1012 		ERR2_FSB_WR(3, err2_fsb);
1013 		MCERR_FSB_WR(3, mcerr_fsb);
1014 		if (nb5000_reset_emask_fsb) {
1015 			EMASK_FSB_WR(3, nb5000_emask_fsb);
1016 		} else {
1017 			EMASK_FSB_WR(3, nb_emask_fsb);
1018 		}
1019 	}
1020 }
1021 
1022 static void
1023 nb_fsb_fini() {
1024 	ERR0_FSB_WR(0, 0xffff);
1025 	ERR1_FSB_WR(0, 0xffff);
1026 	ERR2_FSB_WR(0, 0xffff);
1027 	MCERR_FSB_WR(0, 0xffff);
1028 	EMASK_FSB_WR(0, 0xffff);
1029 
1030 	ERR0_FSB_WR(0, nb_err0_fsb);
1031 	ERR1_FSB_WR(0, nb_err1_fsb);
1032 	ERR2_FSB_WR(0, nb_err2_fsb);
1033 	MCERR_FSB_WR(0, nb_mcerr_fsb);
1034 	EMASK_FSB_WR(0, nb_emask_fsb);
1035 
1036 	ERR0_FSB_WR(1, 0xffff);
1037 	ERR1_FSB_WR(1, 0xffff);
1038 	ERR2_FSB_WR(1, 0xffff);
1039 	MCERR_FSB_WR(1, 0xffff);
1040 	EMASK_FSB_WR(1, 0xffff);
1041 
1042 	ERR0_FSB_WR(1, nb_err0_fsb);
1043 	ERR1_FSB_WR(1, nb_err1_fsb);
1044 	ERR2_FSB_WR(1, nb_err2_fsb);
1045 	MCERR_FSB_WR(1, nb_mcerr_fsb);
1046 	EMASK_FSB_WR(1, nb_emask_fsb);
1047 
1048 	if (nb_chipset == INTEL_NB_7300) {
1049 		ERR0_FSB_WR(2, 0xffff);
1050 		ERR1_FSB_WR(2, 0xffff);
1051 		ERR2_FSB_WR(2, 0xffff);
1052 		MCERR_FSB_WR(2, 0xffff);
1053 		EMASK_FSB_WR(2, 0xffff);
1054 
1055 		ERR0_FSB_WR(2, nb_err0_fsb);
1056 		ERR1_FSB_WR(2, nb_err1_fsb);
1057 		ERR2_FSB_WR(2, nb_err2_fsb);
1058 		MCERR_FSB_WR(2, nb_mcerr_fsb);
1059 		EMASK_FSB_WR(2, nb_emask_fsb);
1060 
1061 		ERR0_FSB_WR(3, 0xffff);
1062 		ERR1_FSB_WR(3, 0xffff);
1063 		ERR2_FSB_WR(3, 0xffff);
1064 		MCERR_FSB_WR(3, 0xffff);
1065 		EMASK_FSB_WR(3, 0xffff);
1066 
1067 		ERR0_FSB_WR(3, nb_err0_fsb);
1068 		ERR1_FSB_WR(3, nb_err1_fsb);
1069 		ERR2_FSB_WR(3, nb_err2_fsb);
1070 		MCERR_FSB_WR(3, nb_mcerr_fsb);
1071 		EMASK_FSB_WR(3, nb_emask_fsb);
1072 	}
1073 }
1074 
1075 void
1076 nb_fsb_mask_mc(int fsb, uint16_t mc_mask_fsb)
1077 {
1078 	uint16_t emask_fsb;
1079 
1080 	emask_fsb = MCERR_FSB_RD(fsb);
1081 	if ((emask_fsb & mc_mask_fsb) != mc_mask_fsb) {
1082 		MCERR_FSB_WR(fsb, emask_fsb|mc_mask_fsb|EMASK_FBD_RES);
1083 		nb_mask_mc_set = 1;
1084 	}
1085 }
1086 
1087 static void
1088 nb_thr_init()
1089 {
1090 	uint16_t err0_thr;
1091 	uint16_t err1_thr;
1092 	uint16_t err2_thr;
1093 	uint16_t mcerr_thr;
1094 	uint16_t emask_thr;
1095 
1096 	if (nb_chipset == INTEL_NB_5400) {
1097 		err0_thr = ERR0_THR_RD(0);
1098 		err1_thr = ERR1_THR_RD(0);
1099 		err2_thr = ERR2_THR_RD(0);
1100 		mcerr_thr = MCERR_THR_RD(0);
1101 		emask_thr = EMASK_THR_RD(0);
1102 
1103 		ERR0_THR_WR(0xffff);
1104 		ERR1_THR_WR(0xffff);
1105 		ERR2_THR_WR(0xffff);
1106 		MCERR_THR_WR(0xffff);
1107 		EMASK_THR_WR(0xffff);
1108 
1109 		nb_err0_thr = err0_thr;
1110 		nb_err1_thr = err1_thr;
1111 		nb_err2_thr = err2_thr;
1112 		nb_mcerr_thr = mcerr_thr;
1113 		nb_emask_thr = emask_thr;
1114 
1115 		mcerr_thr &= ~nb_mask_bios_thr;
1116 		mcerr_thr |= nb_mask_bios_thr &
1117 		    (~err2_thr | ~err1_thr | ~err0_thr);
1118 		mcerr_thr |= nb_mask_poll_thr;
1119 		err0_thr |= nb_mask_poll_thr;
1120 		err1_thr |= nb_mask_poll_thr;
1121 		err2_thr |= nb_mask_poll_thr;
1122 
1123 		l_mcerr_thr = mcerr_thr;
1124 		ERR0_THR_WR(err0_thr);
1125 		ERR1_THR_WR(err1_thr);
1126 		ERR2_THR_WR(err2_thr);
1127 		MCERR_THR_WR(mcerr_thr);
1128 		EMASK_THR_WR(nb_emask_thr);
1129 	}
1130 }
1131 
1132 static void
1133 nb_thr_fini()
1134 {
1135 	if (nb_chipset == INTEL_NB_5400) {
1136 		ERR0_THR_WR(0xffff);
1137 		ERR1_THR_WR(0xffff);
1138 		ERR2_THR_WR(0xffff);
1139 		MCERR_THR_WR(0xffff);
1140 		EMASK_THR_WR(0xffff);
1141 
1142 		ERR0_THR_WR(nb_err0_thr);
1143 		ERR1_THR_WR(nb_err1_thr);
1144 		ERR2_THR_WR(nb_err2_thr);
1145 		MCERR_THR_WR(nb_mcerr_thr);
1146 		EMASK_THR_WR(nb_emask_thr);
1147 	}
1148 }
1149 
1150 void
1151 nb_thr_mask_mc(uint16_t mc_mask_thr)
1152 {
1153 	uint16_t emask_thr;
1154 
1155 	emask_thr = MCERR_THR_RD(0);
1156 	if ((emask_thr & mc_mask_thr) != mc_mask_thr) {
1157 		MCERR_THR_WR(emask_thr|mc_mask_thr);
1158 		nb_mask_mc_set = 1;
1159 	}
1160 }
1161 
1162 void
1163 nb_mask_mc_reset()
1164 {
1165 	MCERR_FBD_WR(l_mcerr_fbd);
1166 	MCERR_INT_WR(l_mcerr_int);
1167 	MCERR_FSB_WR(0, l_mcerr_fsb);
1168 	MCERR_FSB_WR(1, l_mcerr_fsb);
1169 	if (nb_chipset == INTEL_NB_7300) {
1170 		MCERR_FSB_WR(2, l_mcerr_fsb);
1171 		MCERR_FSB_WR(3, l_mcerr_fsb);
1172 	}
1173 	if (nb_chipset == INTEL_NB_5400) {
1174 		MCERR_THR_WR(l_mcerr_thr);
1175 	}
1176 }
1177 
1178 int
1179 nb_dev_init()
1180 {
1181 	find_dimm_label_t *label_function_p;
1182 
1183 	label_function_p = find_dimms_per_channel();
1184 	mutex_init(&nb_mutex, NULL, MUTEX_DRIVER, NULL);
1185 	nb_queue = errorq_create("nb_queue", nb_drain, NULL, NB_MAX_ERRORS,
1186 	    sizeof (nb_logout_t), 1, ERRORQ_VITAL);
1187 	if (nb_queue == NULL) {
1188 		mutex_destroy(&nb_mutex);
1189 		return (EAGAIN);
1190 	}
1191 	nb_int_init();
1192 	nb_thr_init();
1193 	dimm_init();
1194 	nb_dimms_init(label_function_p);
1195 	nb_mc_init();
1196 	nb_pex_init();
1197 	nb_fbd_init();
1198 	nb_fsb_init();
1199 	nb_scrubber_enable();
1200 	return (0);
1201 }
1202 
1203 int
1204 nb_init()
1205 {
1206 	/* return ENOTSUP if there is no PCI config space support. */
1207 	if (pci_getl_func == NULL)
1208 		return (ENOTSUP);
1209 
1210 	/* get vendor and device */
1211 	nb_chipset = (*pci_getl_func)(0, 0, 0, PCI_CONF_VENID);
1212 	switch (nb_chipset) {
1213 	default:
1214 		if (nb_5000_memory_controller == 0)
1215 			return (ENOTSUP);
1216 		break;
1217 	case INTEL_NB_7300:
1218 	case INTEL_NB_5000P:
1219 	case INTEL_NB_5000X:
1220 		break;
1221 	case INTEL_NB_5000V:
1222 	case INTEL_NB_5000Z:
1223 		nb_number_memory_controllers = 1;
1224 		break;
1225 	case INTEL_NB_5400:
1226 	case INTEL_NB_5400A:
1227 	case INTEL_NB_5400B:
1228 		nb_chipset = INTEL_NB_5400;
1229 		break;
1230 	}
1231 	return (0);
1232 }
1233 
1234 void
1235 nb_dev_reinit()
1236 {
1237 	int i, j;
1238 	int nchannels = nb_number_memory_controllers * 2;
1239 	nb_dimm_t **dimmpp;
1240 	nb_dimm_t *dimmp;
1241 	nb_dimm_t **old_nb_dimms;
1242 	int old_nb_dimms_per_channel;
1243 	find_dimm_label_t *label_function_p;
1244 
1245 	old_nb_dimms = nb_dimms;
1246 	old_nb_dimms_per_channel = nb_dimms_per_channel;
1247 
1248 	dimm_fini();
1249 	label_function_p = find_dimms_per_channel();
1250 	dimm_init();
1251 	nb_dimms_init(label_function_p);
1252 	nb_mc_init();
1253 	nb_pex_init();
1254 	nb_int_init();
1255 	nb_thr_init();
1256 	nb_fbd_init();
1257 	nb_fsb_init();
1258 	nb_scrubber_enable();
1259 
1260 	dimmpp = old_nb_dimms;
1261 	for (i = 0; i < nchannels; i++) {
1262 		for (j = 0; j < old_nb_dimms_per_channel; j++) {
1263 			dimmp = *dimmpp;
1264 			if (dimmp) {
1265 				kmem_free(dimmp, sizeof (nb_dimm_t));
1266 				*dimmpp = NULL;
1267 			}
1268 			dimmp++;
1269 		}
1270 	}
1271 	kmem_free(old_nb_dimms, sizeof (nb_dimm_t *) *
1272 	    nb_number_memory_controllers * 2 * old_nb_dimms_per_channel);
1273 }
1274 
1275 void
1276 nb_dev_unload()
1277 {
1278 	errorq_destroy(nb_queue);
1279 	nb_queue = NULL;
1280 	mutex_destroy(&nb_mutex);
1281 	nb_int_fini();
1282 	nb_thr_fini();
1283 	nb_fbd_fini();
1284 	nb_fsb_fini();
1285 	nb_pex_fini();
1286 	nb_fini();
1287 }
1288 
1289 void
1290 nb_unload()
1291 {
1292 }
1293