1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/cmn_err.h>
31 #include <sys/errno.h>
32 #include <sys/log.h>
33 #include <sys/systm.h>
34 #include <sys/modctl.h>
35 #include <sys/errorq.h>
36 #include <sys/controlregs.h>
37 #include <sys/fm/util.h>
38 #include <sys/fm/protocol.h>
39 #include <sys/sysevent.h>
40 #include <sys/pghw.h>
41 #include <sys/cyclic.h>
42 #include <sys/pci_cfgspace.h>
43 #include <sys/mc_intel.h>
44 #include <sys/cpu_module_impl.h>
45 #include <sys/smbios.h>
46 #include <sys/pci.h>
47 #include <sys/machsystm.h>
48 #include "nb5000.h"
49 #include "nb_log.h"
50 #include "dimm_phys.h"
51 #include "rank.h"
52 
53 int nb_hw_memory_scrub_enable = 1;
54 static int nb_sw_scrub_disabled = 0;
55 
56 int nb_5000_memory_controller = 0;
57 int nb_number_memory_controllers = NB_5000_MAX_MEM_CONTROLLERS;
58 int nb_dimms_per_channel = 0;
59 static int ndimms = 0;
60 
61 nb_dimm_t **nb_dimms;
62 int nb_ndimm;
63 uint32_t nb_chipset;
64 enum nb_memory_mode nb_mode;
65 bank_select_t nb_banks[NB_MEM_BRANCH_SELECT];
66 rank_select_t nb_ranks[NB_5000_MAX_MEM_CONTROLLERS][NB_MEM_RANK_SELECT];
67 uint32_t top_of_low_memory;
68 uint8_t spare_rank[NB_5000_MAX_MEM_CONTROLLERS];
69 
70 errorq_t *nb_queue;
71 kmutex_t nb_mutex;
72 
73 static uint8_t nb_err0_int;
74 static uint8_t nb_err1_int;
75 static uint8_t nb_err2_int;
76 static uint8_t nb_mcerr_int;
77 static uint8_t nb_emask_int;
78 
79 static uint32_t nb_err0_fbd;
80 static uint32_t nb_err1_fbd;
81 static uint32_t nb_err2_fbd;
82 static uint32_t nb_mcerr_fbd;
83 static uint32_t nb_emask_fbd;
84 
85 static uint16_t nb_err0_fsb;
86 static uint16_t nb_err1_fsb;
87 static uint16_t nb_err2_fsb;
88 static uint16_t nb_mcerr_fsb;
89 static uint16_t nb_emask_fsb;
90 
91 static uint32_t	emask_uncor_pex[NB_PCI_DEV];
92 static uint32_t emask_cor_pex[NB_PCI_DEV];
93 static uint32_t emask_rp_pex[NB_PCI_DEV];
94 static uint32_t docmd_pex[NB_PCI_DEV];
95 static uint32_t uncerrsev[NB_PCI_DEV];
96 
97 static uint8_t l_mcerr_int;
98 static uint32_t l_mcerr_fbd;
99 static uint16_t l_mcerr_fsb;
100 
101 uint_t nb5000_emask_fbd = EMASK_FBD_RES;
102 int nb5000_reset_emask_fbd = 1;
103 uint_t nb5000_mask_poll_fbd = EMASK_FBD_NF;
104 uint_t nb5000_mask_bios_fbd = EMASK_FBD_FATAL;
105 
106 uint_t nb5000_emask_fsb = 0;
107 int nb5000_reset_emask_fsb = 1;
108 uint_t nb5000_mask_poll_fsb = EMASK_FSB_NF;
109 uint_t nb5000_mask_bios_fsb = EMASK_FSB_FATAL;
110 
111 uint_t nb7300_emask_int = EMASK_INT_7300;
112 uint_t nb7300_emask_int_step0 = EMASK_INT_7300_STEP_0;
113 uint_t nb5000_emask_int = EMASK_INT_5000;
114 int nb5000_reset_emask_int = 1;
115 uint_t nb5000_mask_poll_int = EMASK_INT_NF;
116 uint_t nb5000_mask_bios_int = EMASK_INT_FATAL;
117 
118 int nb5000_reset_uncor_pex = 0;
119 uint_t nb5000_mask_uncor_pex = 0;
120 int nb5000_reset_cor_pex = 1;
121 uint_t nb5000_mask_cor_pex = 0xffffffff;
122 uint32_t nb5000_docmd_pex_mask = DOCMD_PEX_MASK;
123 uint32_t nb5000_docmd_pex = DOCMD_PEX;
124 
125 int nb_mask_mc_set;
126 
127 static unsigned short
128 read_spd(int bus)
129 {
130 	unsigned short rt = 0;
131 	int branch = bus >> 1;
132 	int channel = bus & 1;
133 
134 	rt = SPD_RD(branch, channel);
135 
136 	return (rt);
137 }
138 
139 static void
140 write_spdcmd(int bus, uint32_t val)
141 {
142 	int branch = bus >> 1;
143 	int channel = bus & 1;
144 	SPDCMD_WR(branch, channel, val);
145 }
146 
147 static int
148 read_spd_eeprom(int bus, int slave, int addr)
149 {
150 	int retry = 4;
151 	int wait;
152 	int spd;
153 	uint32_t cmd;
154 
155 	for (;;) {
156 		wait = 1000;
157 		for (;;) {
158 			spd = read_spd(bus);
159 			if ((spd & SPD_BUSY) == 0)
160 				break;
161 			if (--wait == 0)
162 				return (-1);
163 			drv_usecwait(10);
164 		}
165 		cmd = SPD_EEPROM_WRITE | SPD_ADDR(slave, addr);
166 		write_spdcmd(bus, cmd);
167 		wait = 1000;
168 		for (;;) {
169 			spd = read_spd(bus);
170 			if ((spd & SPD_BUSY) == 0)
171 				break;
172 			if (--wait == 0) {
173 				spd = SPD_BUS_ERROR;
174 				break;
175 			}
176 			drv_usecwait(10);
177 		}
178 		while ((spd & SPD_BUS_ERROR) == 0 &&
179 		    (spd & (SPD_READ_DATA_VALID|SPD_BUSY)) !=
180 		    SPD_READ_DATA_VALID) {
181 			spd = read_spd(bus);
182 			if (--wait == 0)
183 				return (-1);
184 		}
185 		if ((spd & SPD_BUS_ERROR) == 0)
186 			break;
187 		if (--retry == 0)
188 			return (-1);
189 	}
190 	return (spd & 0xff);
191 }
192 
193 static void
194 nb_fini()
195 {
196 	int i, j;
197 	int nchannels = nb_number_memory_controllers * 2;
198 	nb_dimm_t **dimmpp;
199 	nb_dimm_t *dimmp;
200 
201 	dimmpp = nb_dimms;
202 	for (i = 0; i < nchannels; i++) {
203 		for (j = 0; j < nb_dimms_per_channel; j++) {
204 			dimmp = *dimmpp;
205 			if (dimmp) {
206 				kmem_free(dimmp, sizeof (nb_dimm_t));
207 				*dimmpp = NULL;
208 			}
209 			dimmp++;
210 		}
211 	}
212 	kmem_free(nb_dimms, sizeof (nb_dimm_t *) *
213 	    nb_number_memory_controllers * 2 * nb_dimms_per_channel);
214 	nb_dimms = NULL;
215 	dimm_fini();
216 }
217 
218 void
219 nb_scrubber_enable()
220 {
221 	uint32_t mc;
222 
223 	if (!nb_hw_memory_scrub_enable)
224 		return;
225 
226 	mc = MC_RD();
227 	if ((mc & MC_MIRROR) != 0) /* mirror mode */
228 		mc |= MC_PATROL_SCRUB;
229 	else
230 		mc |= MC_PATROL_SCRUB|MC_DEMAND_SCRUB;
231 	MC_WR(mc);
232 
233 	if (nb_sw_scrub_disabled++)
234 		memscrub_disable();
235 }
236 
237 static nb_dimm_t *
238 nb_dimm_init(int channel, int dimm, uint16_t mtr)
239 {
240 	nb_dimm_t *dp;
241 	int i, t;
242 	int spd_sz;
243 
244 	t = read_spd_eeprom(channel, dimm, 2) & 0xf;
245 
246 	if (t != 9)
247 		return (NULL);
248 
249 	dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
250 	t = read_spd_eeprom(channel, dimm, 0) & 0xf;
251 	if (t == 1)
252 		spd_sz = 128;
253 	else if (t == 2)
254 		spd_sz = 176;
255 	else
256 		spd_sz = 256;
257 	if (mtr & 0x10)
258 		dp->mtr_present = 1;
259 	dp->manufacture_id = read_spd_eeprom(channel, dimm, 117) |
260 	    (read_spd_eeprom(channel, dimm, 118) << 8);
261 	dp->manufacture_location = read_spd_eeprom(channel, dimm, 119);
262 	dp->serial_number =
263 	    (read_spd_eeprom(channel, dimm, 122) << 24) |
264 	    (read_spd_eeprom(channel, dimm, 123) << 16) |
265 	    (read_spd_eeprom(channel, dimm, 124) << 8) |
266 	    read_spd_eeprom(channel, dimm, 125);
267 	t = read_spd_eeprom(channel, dimm, 121);
268 	dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
269 	dp->manufacture_year = read_spd_eeprom(channel, dimm, 120);
270 	if (spd_sz > 128) {
271 		for (i = 0; i < sizeof (dp->part_number); i++) {
272 			dp->part_number[i] =
273 			    read_spd_eeprom(channel, dimm, 128 + i);
274 		}
275 		for (i = 0; i < sizeof (dp->revision); i++) {
276 			dp->revision[i] =
277 			    read_spd_eeprom(channel, dimm, 146 + i);
278 		}
279 	}
280 	dp->nranks = MTR_NUMRANK(mtr);
281 	dp->nbanks = MTR_NUMBANK(mtr);
282 	dp->ncolumn = MTR_NUMCOL(mtr);
283 	dp->nrow = MTR_NUMROW(mtr);
284 	dp->width = MTR_WIDTH(mtr);
285 	dp->dimm_size = MTR_DIMMSIZE(mtr);
286 
287 	return (dp);
288 }
289 
290 static uint64_t
291 mc_range(int controller, uint64_t base)
292 {
293 	int i;
294 	uint64_t limit = 0;
295 
296 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
297 		if (nb_banks[i].way[controller] && base >= nb_banks[i].base &&
298 		    base < nb_banks[i].limit) {
299 			limit = nb_banks[i].limit;
300 			if (base <= top_of_low_memory &&
301 			    limit > top_of_low_memory) {
302 				limit -= TLOW_MAX - top_of_low_memory;
303 			}
304 			if (nb_banks[i].way[0] && nb_banks[i].way[1] &&
305 			    nb_mode != NB_MEMORY_MIRROR) {
306 				limit = limit / 2;
307 			}
308 		}
309 	}
310 	return (limit);
311 }
312 
313 void
314 nb_mc_init()
315 {
316 	uint16_t tolm;
317 	uint16_t mir;
318 	uint32_t hole_base;
319 	uint32_t hole_size;
320 	uint32_t dmir;
321 	uint64_t base;
322 	uint64_t limit;
323 	uint8_t way0, way1, rank0, rank1, rank2, rank3, branch_interleave;
324 	int i, j, k;
325 	uint8_t interleave;
326 
327 	base = 0;
328 	tolm = TOLM_RD();
329 	top_of_low_memory = ((uint32_t)(tolm >> 12) & 0xf) << 28;
330 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
331 		mir = MIR_RD(i);
332 		limit = (uint64_t)(mir >> 4) << 28;
333 		way0 = mir & 1;
334 		way1 = (mir >> 1) & 1;
335 		if (way0 == 0 && way1 == 0) {
336 			way0 = 1;
337 			way1 = 1;
338 		}
339 		if (limit > top_of_low_memory)
340 			limit += TLOW_MAX - top_of_low_memory;
341 		nb_banks[i].base = base;
342 		nb_banks[i].limit = limit;
343 		nb_banks[i].way[0] = way0;
344 		nb_banks[i].way[1] = way1;
345 		base = limit;
346 	}
347 	for (i = 0; i < nb_number_memory_controllers; i++) {
348 		base = 0;
349 
350 		for (j = 0; j < NB_MEM_RANK_SELECT; j++) {
351 			dmir = DMIR_RD(i, j);
352 			limit = ((uint64_t)(dmir >> 16) & 0xff) << 28;
353 			if (limit == 0) {
354 				limit = mc_range(i, base);
355 			}
356 			branch_interleave = 0;
357 			hole_base = 0;
358 			hole_size = 0;
359 			DMIR_RANKS(nb_dimms_per_channel, dmir, rank0, rank1,
360 			    rank2, rank3);
361 			if (rank0 == rank1)
362 				interleave = 1;
363 			else if (rank0 == rank2)
364 				interleave = 2;
365 			else
366 				interleave = 4;
367 			if (nb_mode != NB_MEMORY_MIRROR &&
368 			    nb_mode != NB_MEMORY_SINGLE_CHANNEL) {
369 				for (k = 0; k < NB_MEM_BRANCH_SELECT; k++) {
370 					if (base >= nb_banks[k].base &&
371 					    base < nb_banks[k].limit) {
372 						if (nb_banks[i].way[0] &&
373 						    nb_banks[i].way[1]) {
374 							interleave *= 2;
375 							limit *= 2;
376 							branch_interleave = 1;
377 						}
378 						break;
379 					}
380 				}
381 			}
382 			if (base < top_of_low_memory &&
383 			    limit > top_of_low_memory) {
384 				hole_base = top_of_low_memory;
385 				hole_size = TLOW_MAX - top_of_low_memory;
386 				limit += hole_size;
387 			} else if (base > top_of_low_memory) {
388 				limit += TLOW_MAX - top_of_low_memory;
389 			}
390 			nb_ranks[i][j].base = base;
391 			nb_ranks[i][j].limit = limit;
392 			nb_ranks[i][j].rank[0] = rank0;
393 			nb_ranks[i][j].rank[1] = rank1;
394 			nb_ranks[i][j].rank[2] = rank2;
395 			nb_ranks[i][j].rank[3] = rank3;
396 			nb_ranks[i][j].interleave = interleave;
397 			nb_ranks[i][j].branch_interleave = branch_interleave;
398 			nb_ranks[i][j].hole_base = hole_base;
399 			nb_ranks[i][j].hole_size = hole_size;
400 			if (limit > base) {
401 				dimm_add_rank(i, rank0, branch_interleave, 0,
402 				    base, hole_base, hole_size, interleave,
403 				    limit);
404 				if (rank0 != rank1) {
405 					dimm_add_rank(i, rank1,
406 					    branch_interleave, 1, base,
407 					    hole_base, hole_size, interleave,
408 					    limit);
409 					if (rank0 != rank2) {
410 						dimm_add_rank(i, rank2,
411 						    branch_interleave, 2, base,
412 						    hole_base, hole_size,
413 						    interleave, limit);
414 						dimm_add_rank(i, rank3,
415 						    branch_interleave, 3, base,
416 						    hole_base, hole_size,
417 						    interleave, limit);
418 					}
419 				}
420 			}
421 			base = limit;
422 		}
423 	}
424 }
425 
426 void
427 nb_used_spare_rank(int branch, int bad_rank)
428 {
429 	int i;
430 	int j;
431 
432 	for (i = 0; i < NB_MEM_RANK_SELECT; i++) {
433 		for (j = 0; j < NB_RANKS_IN_SELECT; j++) {
434 			if (nb_ranks[branch][i].rank[j] == bad_rank) {
435 				nb_ranks[branch][i].rank[j] =
436 				    spare_rank[branch];
437 				i = NB_MEM_RANK_SELECT;
438 				break;
439 			}
440 		}
441 	}
442 }
443 
444 /*ARGSUSED*/
445 static int
446 memoryarray(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
447 {
448 	smbios_memarray_t ma;
449 
450 	if (sp->smbstr_type == SMB_TYPE_MEMARRAY &&
451 	    smbios_info_memarray(shp, sp->smbstr_id, &ma) == 0) {
452 		ndimms += ma.smbma_ndevs;
453 	}
454 	return (0);
455 }
456 
457 void
458 find_dimms_per_channel()
459 {
460 	if (nb_dimms_per_channel == 0) {
461 		if (ksmbios != NULL) {
462 			(void) smbios_iter(ksmbios, memoryarray, 0);
463 			nb_dimms_per_channel = ndimms /
464 			    (nb_number_memory_controllers * 2);
465 		}
466 		if (nb_dimms_per_channel == 0) {
467 			if (nb_chipset == INTEL_NB_7300)
468 				nb_dimms_per_channel = 8;
469 			else
470 				nb_dimms_per_channel = 4;
471 		}
472 	}
473 }
474 
475 static int
476 dimm_label(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
477 {
478 	nb_dimm_t ***dimmpp = arg;
479 	nb_dimm_t *dimmp;
480 	smbios_memdevice_t md;
481 
482 	if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
483 		dimmp = **dimmpp;
484 		if (dimmp && smbios_info_memdevice(shp, sp->smbstr_id,
485 		    &md) == 0 && md.smbmd_dloc != NULL) {
486 			(void) snprintf(dimmp->label,
487 			    sizeof (dimmp->label), "%s", md.smbmd_dloc);
488 		}
489 		(*dimmpp)++;
490 	}
491 	return (0);
492 }
493 
494 void
495 nb_smbios()
496 {
497 	nb_dimm_t **dimmpp;
498 
499 	if (ksmbios != NULL) {
500 		dimmpp = nb_dimms;
501 		(void) smbios_iter(ksmbios, dimm_label, &dimmpp);
502 	}
503 }
504 
505 static void
506 nb_dimms_init()
507 {
508 	int i, j, k;
509 	uint16_t mtr;
510 	uint32_t mc, mca;
511 	uint32_t spcpc;
512 	uint8_t spcps;
513 	nb_dimm_t **dimmpp;
514 
515 	mca = MCA_RD();
516 	mc = MC_RD();
517 	if (mca & MCA_SCHDIMM)  /* single-channel mode */
518 		nb_mode = NB_MEMORY_SINGLE_CHANNEL;
519 	else if ((mc & MC_MIRROR) != 0) /* mirror mode */
520 		nb_mode = NB_MEMORY_MIRROR;
521 	else
522 		nb_mode = NB_MEMORY_NORMAL;
523 	nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
524 	    nb_number_memory_controllers * 2 * nb_dimms_per_channel, KM_SLEEP);
525 	dimmpp = nb_dimms;
526 	for (i = 0; i < nb_number_memory_controllers; i++) {
527 		if (nb_mode == NB_MEMORY_NORMAL) {
528 			spcpc = SPCPC_RD(i);
529 			spcps = SPCPS_RD(i);
530 			if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
531 			    (spcps & SPCPS_SPARE_DEPLOYED) != 0)
532 				nb_mode = NB_MEMORY_SPARE_RANK;
533 			spare_rank[i] = SPCPC_SPRANK(spcpc);
534 		}
535 		for (j = 0; j < nb_dimms_per_channel; j++) {
536 			mtr = MTR_RD(i, j);
537 			k = i * 2;
538 			dimmpp[j] = nb_dimm_init(k, j, mtr);
539 			if (dimmpp[j]) {
540 				nb_ndimm ++;
541 				dimm_add_geometry(i, j, dimmpp[j]->nbanks,
542 				    dimmpp[j]->width, dimmpp[j]->ncolumn,
543 				    dimmpp[j]->nrow);
544 			}
545 			dimmpp[j + nb_dimms_per_channel] =
546 			    nb_dimm_init(k + 1, j, mtr);
547 			if (dimmpp[j + nb_dimms_per_channel])
548 				nb_ndimm ++;
549 		}
550 		dimmpp += nb_dimms_per_channel * 2;
551 	}
552 	nb_smbios();
553 }
554 
555 static void
556 nb_pex_init()
557 {
558 	int i;
559 
560 	for (i = 0; i < NB_PCI_DEV; i++) {
561 		switch (nb_chipset) {
562 		case INTEL_NB_5000P:
563 		case INTEL_NB_5000X:
564 			if (i == 1)
565 				continue;
566 			break;
567 		case INTEL_NB_5000V:
568 			if (i == 1 || i > 3)
569 				continue;
570 			break;
571 		case INTEL_NB_5000Z:
572 			if (i == 1 || i > 5)
573 				continue;
574 			break;
575 		case INTEL_NB_7300:
576 			break;
577 		}
578 		emask_uncor_pex[i] = EMASK_UNCOR_PEX_RD(i);
579 		emask_cor_pex[i] = EMASK_COR_PEX_RD(i);
580 		emask_rp_pex[i] = EMASK_RP_PEX_RD(i);
581 		docmd_pex[i] = PEX_ERR_DOCMD_RD(i);
582 		uncerrsev[i] = UNCERRSEV_RD(i);
583 
584 		if (nb5000_reset_uncor_pex)
585 			EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
586 		if (nb5000_reset_cor_pex)
587 			EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
588 		PEX_ERR_DOCMD_WR(i, (docmd_pex[i] & nb5000_docmd_pex_mask) |
589 		    (nb5000_docmd_pex & ~nb5000_docmd_pex_mask));
590 	}
591 }
592 
593 static void
594 nb_pex_fini()
595 {
596 	int i;
597 
598 	for (i = 0; i < NB_PCI_DEV; i++) {
599 		switch (nb_chipset) {
600 		case INTEL_NB_5000P:
601 		case INTEL_NB_5000X:
602 			if (i == 1)
603 				continue;
604 			break;
605 		case INTEL_NB_5000V:
606 			if (i == 1 || i > 3)
607 				continue;
608 			break;
609 		case INTEL_NB_5000Z:
610 			if (i == 1 || i > 5)
611 				continue;
612 			break;
613 		case INTEL_NB_7300:
614 			break;
615 		}
616 		EMASK_UNCOR_PEX_WR(i, emask_uncor_pex[i]);
617 		EMASK_COR_PEX_WR(i, emask_cor_pex[i]);
618 		EMASK_RP_PEX_WR(i, emask_rp_pex[i]);
619 		PEX_ERR_DOCMD_WR(i, docmd_pex[i]);
620 		UNCERRSEV_WR(i, uncerrsev[i]);
621 
622 		if (nb5000_reset_uncor_pex)
623 			EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
624 		if (nb5000_reset_cor_pex)
625 			EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
626 	}
627 }
628 
629 void
630 nb_int_init()
631 {
632 	uint8_t err0_int;
633 	uint8_t err1_int;
634 	uint8_t err2_int;
635 	uint8_t mcerr_int;
636 	uint8_t emask_int;
637 	uint16_t stepping;
638 
639 	err0_int = ERR0_INT_RD();
640 	err1_int = ERR1_INT_RD();
641 	err2_int = ERR2_INT_RD();
642 	mcerr_int = MCERR_INT_RD();
643 	emask_int = EMASK_INT_RD();
644 
645 	nb_err0_int = err0_int;
646 	nb_err1_int = err1_int;
647 	nb_err2_int = err2_int;
648 	nb_mcerr_int = mcerr_int;
649 	nb_emask_int = emask_int;
650 
651 	ERR0_INT_WR(0xff);
652 	ERR1_INT_WR(0xff);
653 	ERR2_INT_WR(0xff);
654 	MCERR_INT_WR(0xff);
655 	EMASK_INT_WR(0xff);
656 
657 	mcerr_int &= ~nb5000_mask_bios_int;
658 	mcerr_int |= nb5000_mask_bios_int & (~err0_int | ~err1_int | ~err2_int);
659 	mcerr_int |= nb5000_mask_poll_int;
660 	err0_int |= nb5000_mask_poll_int;
661 	err1_int |= nb5000_mask_poll_int;
662 	err2_int |= nb5000_mask_poll_int;
663 
664 	l_mcerr_int = mcerr_int;
665 	ERR0_INT_WR(err0_int);
666 	ERR1_INT_WR(err1_int);
667 	ERR2_INT_WR(err2_int);
668 	MCERR_INT_WR(mcerr_int);
669 	if (nb5000_reset_emask_int) {
670 		if (nb_chipset == INTEL_NB_7300) {
671 			stepping = NB5000_STEPPING();
672 			if (stepping == 0)
673 				EMASK_INT_WR(nb7300_emask_int_step0);
674 			else
675 				EMASK_INT_WR(nb7300_emask_int);
676 		} else {
677 			EMASK_INT_WR(nb5000_emask_int);
678 		}
679 	} else {
680 		EMASK_INT_WR(nb_emask_int);
681 	}
682 }
683 
684 void
685 nb_int_fini()
686 {
687 	ERR0_INT_WR(0xff);
688 	ERR1_INT_WR(0xff);
689 	ERR2_INT_WR(0xff);
690 	MCERR_INT_WR(0xff);
691 	EMASK_INT_WR(0xff);
692 
693 	ERR0_INT_WR(nb_err0_int);
694 	ERR1_INT_WR(nb_err1_int);
695 	ERR2_INT_WR(nb_err2_int);
696 	MCERR_INT_WR(nb_mcerr_int);
697 	EMASK_INT_WR(nb_emask_int);
698 }
699 
700 void
701 nb_int_mask_mc(uint8_t mc_mask_int)
702 {
703 	uint8_t emask_int;
704 
705 	emask_int = MCERR_INT_RD();
706 	if ((emask_int & mc_mask_int) != mc_mask_int) {
707 		MCERR_INT_WR(emask_int|mc_mask_int);
708 		nb_mask_mc_set = 1;
709 	}
710 }
711 
712 void
713 nb_fbd_init()
714 {
715 	uint32_t err0_fbd;
716 	uint32_t err1_fbd;
717 	uint32_t err2_fbd;
718 	uint32_t mcerr_fbd;
719 	uint32_t emask_fbd;
720 
721 	err0_fbd = ERR0_FBD_RD();
722 	err1_fbd = ERR1_FBD_RD();
723 	err2_fbd = ERR2_FBD_RD();
724 	mcerr_fbd = MCERR_FBD_RD();
725 	emask_fbd = EMASK_FBD_RD();
726 
727 	nb_err0_fbd = err0_fbd;
728 	nb_err1_fbd = err1_fbd;
729 	nb_err2_fbd = err2_fbd;
730 	nb_mcerr_fbd = mcerr_fbd;
731 	nb_emask_fbd = emask_fbd;
732 
733 	ERR0_FBD_WR(0xffffffff);
734 	ERR1_FBD_WR(0xffffffff);
735 	ERR2_FBD_WR(0xffffffff);
736 	MCERR_FBD_WR(0xffffffff);
737 	EMASK_FBD_WR(0xffffffff);
738 
739 	if (nb_chipset == INTEL_NB_7300 && nb_mode == NB_MEMORY_MIRROR) {
740 		/* MCH 7300 errata 34 */
741 		nb5000_mask_bios_fbd &= ~EMASK_FBD_M23;
742 		mcerr_fbd |= EMASK_FBD_M23;
743 	}
744 	mcerr_fbd &= ~nb5000_mask_bios_fbd;
745 	mcerr_fbd |= nb5000_mask_bios_fbd & (~err0_fbd | ~err1_fbd | ~err2_fbd);
746 	mcerr_fbd |= nb5000_mask_poll_fbd;
747 	err0_fbd |= nb5000_mask_poll_fbd;
748 	err1_fbd |= nb5000_mask_poll_fbd;
749 	err2_fbd |= nb5000_mask_poll_fbd;
750 
751 	l_mcerr_fbd = mcerr_fbd;
752 	ERR0_FBD_WR(err0_fbd);
753 	ERR1_FBD_WR(err1_fbd);
754 	ERR2_FBD_WR(err2_fbd);
755 	MCERR_FBD_WR(mcerr_fbd);
756 	if (nb5000_reset_emask_fbd)
757 		EMASK_FBD_WR(nb5000_emask_fbd);
758 	else
759 		EMASK_FBD_WR(nb_emask_fbd);
760 }
761 
762 void
763 nb_fbd_mask_mc(uint32_t mc_mask_fbd)
764 {
765 	uint32_t emask_fbd;
766 
767 	emask_fbd = MCERR_FBD_RD();
768 	if ((emask_fbd & mc_mask_fbd) != mc_mask_fbd) {
769 		MCERR_FBD_WR(emask_fbd|mc_mask_fbd);
770 		nb_mask_mc_set = 1;
771 	}
772 }
773 
774 void
775 nb_fbd_fini()
776 {
777 	ERR0_FBD_WR(0xffffffff);
778 	ERR1_FBD_WR(0xffffffff);
779 	ERR2_FBD_WR(0xffffffff);
780 	MCERR_FBD_WR(0xffffffff);
781 	EMASK_FBD_WR(0xffffffff);
782 
783 	ERR0_FBD_WR(nb_err0_fbd);
784 	ERR1_FBD_WR(nb_err1_fbd);
785 	ERR2_FBD_WR(nb_err2_fbd);
786 	MCERR_FBD_WR(nb_mcerr_fbd);
787 	EMASK_FBD_WR(nb_emask_fbd);
788 }
789 
790 static void
791 nb_fsb_init()
792 {
793 	uint16_t err0_fsb;
794 	uint16_t err1_fsb;
795 	uint16_t err2_fsb;
796 	uint16_t mcerr_fsb;
797 	uint16_t emask_fsb;
798 
799 	err0_fsb = ERR0_FSB_RD(0);
800 	err1_fsb = ERR1_FSB_RD(0);
801 	err2_fsb = ERR2_FSB_RD(0);
802 	mcerr_fsb = MCERR_FSB_RD(0);
803 	emask_fsb = EMASK_FSB_RD(0);
804 
805 	ERR0_FSB_WR(0, 0xffff);
806 	ERR1_FSB_WR(0, 0xffff);
807 	ERR2_FSB_WR(0, 0xffff);
808 	MCERR_FSB_WR(0, 0xffff);
809 	EMASK_FSB_WR(0, 0xffff);
810 
811 	ERR0_FSB_WR(1, 0xffff);
812 	ERR1_FSB_WR(1, 0xffff);
813 	ERR2_FSB_WR(1, 0xffff);
814 	MCERR_FSB_WR(1, 0xffff);
815 	EMASK_FSB_WR(1, 0xffff);
816 
817 	nb_err0_fsb = err0_fsb;
818 	nb_err1_fsb = err1_fsb;
819 	nb_err2_fsb = err2_fsb;
820 	nb_mcerr_fsb = mcerr_fsb;
821 	nb_emask_fsb = emask_fsb;
822 
823 	mcerr_fsb &= ~nb5000_mask_bios_fsb;
824 	mcerr_fsb |= nb5000_mask_bios_fsb & (~err2_fsb | ~err1_fsb | ~err0_fsb);
825 	mcerr_fsb |= nb5000_mask_poll_fsb;
826 	err0_fsb |= nb5000_mask_poll_fsb;
827 	err1_fsb |= nb5000_mask_poll_fsb;
828 	err2_fsb |= nb5000_mask_poll_fsb;
829 
830 	l_mcerr_fsb = mcerr_fsb;
831 	ERR0_FSB_WR(0, err0_fsb);
832 	ERR1_FSB_WR(0, err1_fsb);
833 	ERR2_FSB_WR(0, err2_fsb);
834 	MCERR_FSB_WR(0, mcerr_fsb);
835 	if (nb5000_reset_emask_fsb)
836 		EMASK_FSB_WR(0, nb5000_emask_fsb);
837 	else
838 		EMASK_FSB_WR(0, nb_emask_fsb);
839 
840 	ERR0_FSB_WR(1, err0_fsb);
841 	ERR1_FSB_WR(1, err1_fsb);
842 	ERR2_FSB_WR(1, err2_fsb);
843 	MCERR_FSB_WR(1, mcerr_fsb);
844 	if (nb5000_reset_emask_fsb)
845 		EMASK_FSB_WR(1, nb5000_emask_fsb);
846 	else
847 		EMASK_FSB_WR(1, nb_emask_fsb);
848 
849 	if (nb_chipset == INTEL_NB_7300) {
850 		ERR0_FSB_WR(2, 0xffff);
851 		ERR1_FSB_WR(2, 0xffff);
852 		ERR2_FSB_WR(2, 0xffff);
853 		MCERR_FSB_WR(2, 0xffff);
854 		EMASK_FSB_WR(2, 0xffff);
855 
856 		ERR0_FSB_WR(3, 0xffff);
857 		ERR1_FSB_WR(3, 0xffff);
858 		ERR2_FSB_WR(3, 0xffff);
859 		MCERR_FSB_WR(3, 0xffff);
860 		EMASK_FSB_WR(3, 0xffff);
861 
862 		ERR0_FSB_WR(2, err0_fsb);
863 		ERR1_FSB_WR(2, err1_fsb);
864 		ERR2_FSB_WR(2, err2_fsb);
865 		MCERR_FSB_WR(2, mcerr_fsb);
866 		if (nb5000_reset_emask_fsb)
867 			EMASK_FSB_WR(2, nb5000_emask_fsb);
868 		else
869 			EMASK_FSB_WR(2, nb_emask_fsb);
870 
871 		ERR0_FSB_WR(3, err0_fsb);
872 		ERR1_FSB_WR(3, err1_fsb);
873 		ERR2_FSB_WR(3, err2_fsb);
874 		MCERR_FSB_WR(3, mcerr_fsb);
875 		if (nb5000_reset_emask_fsb)
876 			EMASK_FSB_WR(3, nb5000_emask_fsb);
877 		else
878 			EMASK_FSB_WR(3, nb_emask_fsb);
879 	}
880 }
881 
882 static void
883 nb_fsb_fini() {
884 	ERR0_FSB_WR(0, 0xffff);
885 	ERR1_FSB_WR(0, 0xffff);
886 	ERR2_FSB_WR(0, 0xffff);
887 	MCERR_FSB_WR(0, 0xffff);
888 	EMASK_FSB_WR(0, 0xffff);
889 
890 	ERR0_FSB_WR(0, nb_err0_fsb);
891 	ERR1_FSB_WR(0, nb_err1_fsb);
892 	ERR2_FSB_WR(0, nb_err2_fsb);
893 	MCERR_FSB_WR(0, nb_mcerr_fsb);
894 	EMASK_FSB_WR(0, nb_emask_fsb);
895 
896 	ERR0_FSB_WR(1, 0xffff);
897 	ERR1_FSB_WR(1, 0xffff);
898 	ERR2_FSB_WR(1, 0xffff);
899 	MCERR_FSB_WR(1, 0xffff);
900 	EMASK_FSB_WR(1, 0xffff);
901 
902 	ERR0_FSB_WR(1, nb_err0_fsb);
903 	ERR1_FSB_WR(1, nb_err1_fsb);
904 	ERR2_FSB_WR(1, nb_err2_fsb);
905 	MCERR_FSB_WR(1, nb_mcerr_fsb);
906 	EMASK_FSB_WR(1, nb_emask_fsb);
907 
908 	if (nb_chipset == INTEL_NB_7300) {
909 		ERR0_FSB_WR(2, 0xffff);
910 		ERR1_FSB_WR(2, 0xffff);
911 		ERR2_FSB_WR(2, 0xffff);
912 		MCERR_FSB_WR(2, 0xffff);
913 		EMASK_FSB_WR(2, 0xffff);
914 
915 		ERR0_FSB_WR(2, nb_err0_fsb);
916 		ERR1_FSB_WR(2, nb_err1_fsb);
917 		ERR2_FSB_WR(2, nb_err2_fsb);
918 		MCERR_FSB_WR(2, nb_mcerr_fsb);
919 		EMASK_FSB_WR(2, nb_emask_fsb);
920 
921 		ERR0_FSB_WR(3, 0xffff);
922 		ERR1_FSB_WR(3, 0xffff);
923 		ERR2_FSB_WR(3, 0xffff);
924 		MCERR_FSB_WR(3, 0xffff);
925 		EMASK_FSB_WR(3, 0xffff);
926 
927 		ERR0_FSB_WR(3, nb_err0_fsb);
928 		ERR1_FSB_WR(3, nb_err1_fsb);
929 		ERR2_FSB_WR(3, nb_err2_fsb);
930 		MCERR_FSB_WR(3, nb_mcerr_fsb);
931 		EMASK_FSB_WR(3, nb_emask_fsb);
932 	}
933 }
934 
935 void
936 nb_fsb_mask_mc(int fsb, uint16_t mc_mask_fsb)
937 {
938 	uint16_t emask_fsb;
939 
940 	emask_fsb = MCERR_FSB_RD(fsb);
941 	if ((emask_fsb & mc_mask_fsb) != mc_mask_fsb) {
942 		MCERR_FSB_WR(fsb, emask_fsb|mc_mask_fsb|EMASK_FBD_RES);
943 		nb_mask_mc_set = 1;
944 	}
945 }
946 
947 void
948 nb_mask_mc_reset()
949 {
950 	MCERR_FBD_WR(l_mcerr_fbd);
951 	MCERR_INT_WR(l_mcerr_int);
952 	MCERR_FSB_WR(0, l_mcerr_fsb);
953 	MCERR_FSB_WR(1, l_mcerr_fsb);
954 	if (nb_chipset == INTEL_NB_7300) {
955 		MCERR_FSB_WR(2, l_mcerr_fsb);
956 		MCERR_FSB_WR(3, l_mcerr_fsb);
957 	}
958 }
959 
960 int
961 nb_dev_init()
962 {
963 	find_dimms_per_channel();
964 	mutex_init(&nb_mutex, NULL, MUTEX_DRIVER, NULL);
965 	nb_queue = errorq_create("nb_queue", nb_drain, NULL, NB_MAX_ERRORS,
966 	    sizeof (nb_logout_t), 1, ERRORQ_VITAL);
967 	if (nb_queue == NULL) {
968 		mutex_destroy(&nb_mutex);
969 		return (EAGAIN);
970 	}
971 	dimm_init();
972 	nb_dimms_init();
973 	nb_mc_init();
974 	nb_pex_init();
975 	nb_int_init();
976 	nb_fbd_init();
977 	nb_fsb_init();
978 	nb_scrubber_enable();
979 	return (0);
980 }
981 
982 int
983 nb_init()
984 {
985 	/* get vendor and device */
986 	nb_chipset = (*pci_getl_func)(0, 0, 0, PCI_CONF_VENID);
987 	switch (nb_chipset) {
988 	default:
989 		if (nb_5000_memory_controller == 0)
990 			return (ENOTSUP);
991 		break;
992 	case INTEL_NB_7300:
993 	case INTEL_NB_5000P:
994 	case INTEL_NB_5000X:
995 		break;
996 	case INTEL_NB_5000V:
997 	case INTEL_NB_5000Z:
998 		nb_number_memory_controllers = 1;
999 		break;
1000 	}
1001 	return (0);
1002 }
1003 
1004 void
1005 nb_dev_reinit()
1006 {
1007 	int i, j;
1008 	int nchannels = nb_number_memory_controllers * 2;
1009 	nb_dimm_t **dimmpp;
1010 	nb_dimm_t *dimmp;
1011 	nb_dimm_t **old_nb_dimms;
1012 	int old_nb_dimms_per_channel;
1013 
1014 	old_nb_dimms = nb_dimms;
1015 	old_nb_dimms_per_channel = nb_dimms_per_channel;
1016 
1017 	dimm_fini();
1018 	find_dimms_per_channel();
1019 	dimm_init();
1020 	nb_dimms_init();
1021 	nb_mc_init();
1022 	nb_pex_init();
1023 	nb_int_init();
1024 	nb_fbd_init();
1025 	nb_fsb_init();
1026 	nb_scrubber_enable();
1027 
1028 	dimmpp = old_nb_dimms;
1029 	for (i = 0; i < nchannels; i++) {
1030 		for (j = 0; j < old_nb_dimms_per_channel; j++) {
1031 			dimmp = *dimmpp;
1032 			if (dimmp) {
1033 				kmem_free(dimmp, sizeof (nb_dimm_t));
1034 				*dimmpp = NULL;
1035 			}
1036 			dimmp++;
1037 		}
1038 	}
1039 	kmem_free(old_nb_dimms, sizeof (nb_dimm_t *) *
1040 	    nb_number_memory_controllers * 2 * old_nb_dimms_per_channel);
1041 }
1042 
1043 void
1044 nb_dev_unload()
1045 {
1046 	errorq_destroy(nb_queue);
1047 	nb_queue = NULL;
1048 	mutex_destroy(&nb_mutex);
1049 	nb_int_fini();
1050 	nb_fbd_fini();
1051 	nb_fsb_fini();
1052 	nb_pex_fini();
1053 	nb_fini();
1054 }
1055 
1056 void
1057 nb_unload()
1058 {
1059 }
1060