1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/cmn_err.h>
31 #include <sys/errno.h>
32 #include <sys/log.h>
33 #include <sys/systm.h>
34 #include <sys/modctl.h>
35 #include <sys/errorq.h>
36 #include <sys/controlregs.h>
37 #include <sys/fm/util.h>
38 #include <sys/fm/protocol.h>
39 #include <sys/sysevent.h>
40 #include <sys/pghw.h>
41 #include <sys/cyclic.h>
42 #include <sys/pci_cfgspace.h>
43 #include <sys/mc_intel.h>
44 #include <sys/cpu_module_impl.h>
45 #include <sys/smbios.h>
46 #include <sys/pci.h>
47 #include <sys/machsystm.h>
48 #include "nb5000.h"
49 #include "nb_log.h"
50 #include "dimm_phys.h"
51 #include "rank.h"
52 
53 int nb_hw_memory_scrub_enable = 1;
54 static int nb_sw_scrub_disabled = 0;
55 
56 int nb_5000_memory_controller = 0;
57 int nb_number_memory_controllers = NB_5000_MAX_MEM_CONTROLLERS;
58 int nb_dimms_per_channel = 0;
59 static int ndimms = 0;
60 
61 nb_dimm_t **nb_dimms;
62 int nb_ndimm;
63 uint32_t nb_chipset;
64 enum nb_memory_mode nb_mode;
65 bank_select_t nb_banks[NB_MAX_MEM_BRANCH_SELECT];
66 rank_select_t nb_ranks[NB_5000_MAX_MEM_CONTROLLERS][NB_MAX_MEM_RANK_SELECT];
67 uint32_t top_of_low_memory;
68 uint8_t spare_rank[NB_5000_MAX_MEM_CONTROLLERS];
69 
70 errorq_t *nb_queue;
71 kmutex_t nb_mutex;
72 
73 static uint8_t nb_err0_int;
74 static uint8_t nb_err1_int;
75 static uint8_t nb_err2_int;
76 static uint8_t nb_mcerr_int;
77 static uint8_t nb_emask_int;
78 
79 static uint32_t nb_err0_fbd;
80 static uint32_t nb_err1_fbd;
81 static uint32_t nb_err2_fbd;
82 static uint32_t nb_mcerr_fbd;
83 static uint32_t nb_emask_fbd;
84 
85 static uint16_t nb_err0_fsb;
86 static uint16_t nb_err1_fsb;
87 static uint16_t nb_err2_fsb;
88 static uint16_t nb_mcerr_fsb;
89 static uint16_t nb_emask_fsb;
90 
91 static uint32_t	emask_uncor_pex[NB_PCI_DEV];
92 static uint32_t emask_cor_pex[NB_PCI_DEV];
93 static uint32_t emask_rp_pex[NB_PCI_DEV];
94 static uint32_t docmd_pex[NB_PCI_DEV];
95 static uint32_t uncerrsev[NB_PCI_DEV];
96 
97 static uint8_t l_mcerr_int;
98 static uint32_t l_mcerr_fbd;
99 static uint16_t l_mcerr_fsb;
100 
101 uint_t nb5000_emask_fbd = EMASK_FBD_RES;
102 int nb5000_reset_emask_fbd = 1;
103 uint_t nb5000_mask_poll_fbd = EMASK_FBD_NF;
104 uint_t nb5000_mask_bios_fbd = EMASK_FBD_FATAL;
105 
106 uint_t nb5000_emask_fsb = 0;
107 int nb5000_reset_emask_fsb = 1;
108 uint_t nb5000_mask_poll_fsb = EMASK_FSB_NF;
109 uint_t nb5000_mask_bios_fsb = EMASK_FSB_FATAL;
110 
111 uint_t nb7300_emask_int = EMASK_INT_7300;
112 uint_t nb7300_emask_int_step0 = EMASK_INT_7300_STEP_0;
113 uint_t nb5000_emask_int = EMASK_INT_5000;
114 int nb5000_reset_emask_int = 1;
115 uint_t nb5000_mask_poll_int = EMASK_INT_NF;
116 uint_t nb5000_mask_bios_int = EMASK_INT_FATAL;
117 
118 int nb5000_reset_uncor_pex = 0;
119 uint_t nb5000_mask_uncor_pex = 0;
120 int nb5000_reset_cor_pex = 1;
121 uint_t nb5000_mask_cor_pex = 0xffffffff;
122 uint32_t nb5000_docmd_pex_mask = DOCMD_PEX_MASK;
123 uint32_t nb5000_docmd_pex = DOCMD_PEX;
124 
125 int nb_mask_mc_set;
126 
127 typedef struct find_dimm_label {
128 	void (*label_function)(int, char *, int);
129 } find_dimm_label_t;
130 
131 static void x8450_dimm_label(int, char *, int);
132 
133 static struct platform_label {
134 	const char *sys_vendor;		/* SMB_TYPE_SYSTEM vendor prefix */
135 	const char *sys_product;	/* SMB_TYPE_SYSTEM product prefix */
136 	find_dimm_label_t dimm_label;
137 	int dimms_per_channel;
138 } platform_label[] = {
139 	{ "SUN MICROSYSTEMS", "SUN BLADE X8450 SERVER MODULE",
140 	    x8450_dimm_label, 8 },
141 	{ NULL, NULL, NULL, 0 }
142 };
143 
144 static unsigned short
145 read_spd(int bus)
146 {
147 	unsigned short rt = 0;
148 	int branch = bus >> 1;
149 	int channel = bus & 1;
150 
151 	rt = SPD_RD(branch, channel);
152 
153 	return (rt);
154 }
155 
156 static void
157 write_spdcmd(int bus, uint32_t val)
158 {
159 	int branch = bus >> 1;
160 	int channel = bus & 1;
161 	SPDCMD_WR(branch, channel, val);
162 }
163 
164 static int
165 read_spd_eeprom(int bus, int slave, int addr)
166 {
167 	int retry = 4;
168 	int wait;
169 	int spd;
170 	uint32_t cmd;
171 
172 	for (;;) {
173 		wait = 1000;
174 		for (;;) {
175 			spd = read_spd(bus);
176 			if ((spd & SPD_BUSY) == 0)
177 				break;
178 			if (--wait == 0)
179 				return (-1);
180 			drv_usecwait(10);
181 		}
182 		cmd = SPD_EEPROM_WRITE | SPD_ADDR(slave, addr);
183 		write_spdcmd(bus, cmd);
184 		wait = 1000;
185 		for (;;) {
186 			spd = read_spd(bus);
187 			if ((spd & SPD_BUSY) == 0)
188 				break;
189 			if (--wait == 0) {
190 				spd = SPD_BUS_ERROR;
191 				break;
192 			}
193 			drv_usecwait(10);
194 		}
195 		while ((spd & SPD_BUS_ERROR) == 0 &&
196 		    (spd & (SPD_READ_DATA_VALID|SPD_BUSY)) !=
197 		    SPD_READ_DATA_VALID) {
198 			spd = read_spd(bus);
199 			if (--wait == 0)
200 				return (-1);
201 		}
202 		if ((spd & SPD_BUS_ERROR) == 0)
203 			break;
204 		if (--retry == 0)
205 			return (-1);
206 	}
207 	return (spd & 0xff);
208 }
209 
210 static void
211 nb_fini()
212 {
213 	int i, j;
214 	int nchannels = nb_number_memory_controllers * 2;
215 	nb_dimm_t **dimmpp;
216 	nb_dimm_t *dimmp;
217 
218 	dimmpp = nb_dimms;
219 	for (i = 0; i < nchannels; i++) {
220 		for (j = 0; j < nb_dimms_per_channel; j++) {
221 			dimmp = *dimmpp;
222 			if (dimmp) {
223 				kmem_free(dimmp, sizeof (nb_dimm_t));
224 				*dimmpp = NULL;
225 			}
226 			dimmp++;
227 		}
228 	}
229 	kmem_free(nb_dimms, sizeof (nb_dimm_t *) *
230 	    nb_number_memory_controllers * 2 * nb_dimms_per_channel);
231 	nb_dimms = NULL;
232 	dimm_fini();
233 }
234 
235 void
236 nb_scrubber_enable()
237 {
238 	uint32_t mc;
239 
240 	if (!nb_hw_memory_scrub_enable)
241 		return;
242 
243 	mc = MC_RD();
244 	if ((mc & MC_MIRROR) != 0) /* mirror mode */
245 		mc |= MC_PATROL_SCRUB;
246 	else
247 		mc |= MC_PATROL_SCRUB|MC_DEMAND_SCRUB;
248 	MC_WR(mc);
249 
250 	if (nb_sw_scrub_disabled++)
251 		memscrub_disable();
252 }
253 
254 static nb_dimm_t *
255 nb_dimm_init(int channel, int dimm, uint16_t mtr)
256 {
257 	nb_dimm_t *dp;
258 	int i, t;
259 	int spd_sz;
260 
261 	if (MTR_PRESENT(mtr) == 0)
262 		return (NULL);
263 	t = read_spd_eeprom(channel, dimm, 2) & 0xf;
264 
265 	if (t != 9)
266 		return (NULL);
267 
268 	dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
269 
270 	t = read_spd_eeprom(channel, dimm, 0) & 0xf;
271 	if (t == 1)
272 		spd_sz = 128;
273 	else if (t == 2)
274 		spd_sz = 176;
275 	else
276 		spd_sz = 256;
277 	dp->manufacture_id = read_spd_eeprom(channel, dimm, 117) |
278 	    (read_spd_eeprom(channel, dimm, 118) << 8);
279 	dp->manufacture_location = read_spd_eeprom(channel, dimm, 119);
280 	dp->serial_number =
281 	    (read_spd_eeprom(channel, dimm, 122) << 24) |
282 	    (read_spd_eeprom(channel, dimm, 123) << 16) |
283 	    (read_spd_eeprom(channel, dimm, 124) << 8) |
284 	    read_spd_eeprom(channel, dimm, 125);
285 	t = read_spd_eeprom(channel, dimm, 121);
286 	dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
287 	dp->manufacture_year = read_spd_eeprom(channel, dimm, 120);
288 	if (spd_sz > 128) {
289 		for (i = 0; i < sizeof (dp->part_number); i++) {
290 			dp->part_number[i] =
291 			    read_spd_eeprom(channel, dimm, 128 + i);
292 		}
293 		for (i = 0; i < sizeof (dp->revision); i++) {
294 			dp->revision[i] =
295 			    read_spd_eeprom(channel, dimm, 146 + i);
296 		}
297 	}
298 	dp->mtr_present = MTR_PRESENT(mtr);
299 	dp->nranks = MTR_NUMRANK(mtr);
300 	dp->nbanks = MTR_NUMBANK(mtr);
301 	dp->ncolumn = MTR_NUMCOL(mtr);
302 	dp->nrow = MTR_NUMROW(mtr);
303 	dp->width = MTR_WIDTH(mtr);
304 	dp->dimm_size = MTR_DIMMSIZE(mtr);
305 
306 	return (dp);
307 }
308 
309 static uint64_t
310 mc_range(int controller, uint64_t base)
311 {
312 	int i;
313 	uint64_t limit = 0;
314 
315 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
316 		if (nb_banks[i].way[controller] && base >= nb_banks[i].base &&
317 		    base < nb_banks[i].limit) {
318 			limit = nb_banks[i].limit;
319 			if (base <= top_of_low_memory &&
320 			    limit > top_of_low_memory) {
321 				limit -= TLOW_MAX - top_of_low_memory;
322 			}
323 			if (nb_banks[i].way[0] && nb_banks[i].way[1] &&
324 			    nb_mode != NB_MEMORY_MIRROR) {
325 				limit = limit / 2;
326 			}
327 		}
328 	}
329 	return (limit);
330 }
331 
332 void
333 nb_mc_init()
334 {
335 	uint16_t tolm;
336 	uint16_t mir;
337 	uint32_t hole_base;
338 	uint32_t hole_size;
339 	uint32_t dmir;
340 	uint64_t base;
341 	uint64_t limit;
342 	uint8_t way0, way1, rank0, rank1, rank2, rank3, branch_interleave;
343 	int i, j, k;
344 	uint8_t interleave;
345 
346 	base = 0;
347 	tolm = TOLM_RD();
348 	top_of_low_memory = ((uint32_t)(tolm >> 12) & 0xf) << 28;
349 	for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
350 		mir = MIR_RD(i);
351 		limit = (uint64_t)(mir >> 4) << 28;
352 		way0 = mir & 1;
353 		way1 = (mir >> 1) & 1;
354 		if (way0 == 0 && way1 == 0) {
355 			way0 = 1;
356 			way1 = 1;
357 		}
358 		if (limit > top_of_low_memory)
359 			limit += TLOW_MAX - top_of_low_memory;
360 		nb_banks[i].base = base;
361 		nb_banks[i].limit = limit;
362 		nb_banks[i].way[0] = way0;
363 		nb_banks[i].way[1] = way1;
364 		base = limit;
365 	}
366 	for (i = 0; i < nb_number_memory_controllers; i++) {
367 		base = 0;
368 
369 		for (j = 0; j < NB_MEM_RANK_SELECT; j++) {
370 			dmir = DMIR_RD(i, j);
371 			limit = ((uint64_t)(dmir >> 16) & 0xff) << 28;
372 			if (limit == 0) {
373 				limit = mc_range(i, base);
374 			}
375 			branch_interleave = 0;
376 			hole_base = 0;
377 			hole_size = 0;
378 			DMIR_RANKS(dmir, rank0, rank1, rank2, rank3);
379 			if (rank0 == rank1)
380 				interleave = 1;
381 			else if (rank0 == rank2)
382 				interleave = 2;
383 			else
384 				interleave = 4;
385 			if (nb_mode != NB_MEMORY_MIRROR &&
386 			    nb_mode != NB_MEMORY_SINGLE_CHANNEL) {
387 				for (k = 0; k < NB_MEM_BRANCH_SELECT; k++) {
388 					if (base >= nb_banks[k].base &&
389 					    base < nb_banks[k].limit) {
390 						if (nb_banks[i].way[0] &&
391 						    nb_banks[i].way[1]) {
392 							interleave *= 2;
393 							limit *= 2;
394 							branch_interleave = 1;
395 						}
396 						break;
397 					}
398 				}
399 			}
400 			if (base < top_of_low_memory &&
401 			    limit > top_of_low_memory) {
402 				hole_base = top_of_low_memory;
403 				hole_size = TLOW_MAX - top_of_low_memory;
404 				limit += hole_size;
405 			} else if (base > top_of_low_memory) {
406 				limit += TLOW_MAX - top_of_low_memory;
407 			}
408 			nb_ranks[i][j].base = base;
409 			nb_ranks[i][j].limit = limit;
410 			nb_ranks[i][j].rank[0] = rank0;
411 			nb_ranks[i][j].rank[1] = rank1;
412 			nb_ranks[i][j].rank[2] = rank2;
413 			nb_ranks[i][j].rank[3] = rank3;
414 			nb_ranks[i][j].interleave = interleave;
415 			nb_ranks[i][j].branch_interleave = branch_interleave;
416 			nb_ranks[i][j].hole_base = hole_base;
417 			nb_ranks[i][j].hole_size = hole_size;
418 			if (limit > base) {
419 				dimm_add_rank(i, rank0, branch_interleave, 0,
420 				    base, hole_base, hole_size, interleave,
421 				    limit);
422 				if (rank0 != rank1) {
423 					dimm_add_rank(i, rank1,
424 					    branch_interleave, 1, base,
425 					    hole_base, hole_size, interleave,
426 					    limit);
427 					if (rank0 != rank2) {
428 						dimm_add_rank(i, rank2,
429 						    branch_interleave, 2, base,
430 						    hole_base, hole_size,
431 						    interleave, limit);
432 						dimm_add_rank(i, rank3,
433 						    branch_interleave, 3, base,
434 						    hole_base, hole_size,
435 						    interleave, limit);
436 					}
437 				}
438 			}
439 			base = limit;
440 		}
441 	}
442 }
443 
444 void
445 nb_used_spare_rank(int branch, int bad_rank)
446 {
447 	int i;
448 	int j;
449 
450 	for (i = 0; i < NB_MEM_RANK_SELECT; i++) {
451 		for (j = 0; j < NB_RANKS_IN_SELECT; j++) {
452 			if (nb_ranks[branch][i].rank[j] == bad_rank) {
453 				nb_ranks[branch][i].rank[j] =
454 				    spare_rank[branch];
455 				i = NB_MEM_RANK_SELECT;
456 				break;
457 			}
458 		}
459 	}
460 }
461 
462 /*ARGSUSED*/
463 static int
464 memoryarray(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
465 {
466 	smbios_memarray_t ma;
467 
468 	if (sp->smbstr_type == SMB_TYPE_MEMARRAY &&
469 	    smbios_info_memarray(shp, sp->smbstr_id, &ma) == 0) {
470 		ndimms += ma.smbma_ndevs;
471 	}
472 	return (0);
473 }
474 
475 find_dimm_label_t *
476 find_dimms_per_channel()
477 {
478 	struct platform_label *pl;
479 	smbios_info_t si;
480 	smbios_system_t sy;
481 	id_t id;
482 	int read_memarray = 1;
483 	find_dimm_label_t *rt = NULL;
484 
485 	if (ksmbios != NULL) {
486 		if ((id = smbios_info_system(ksmbios, &sy)) != SMB_ERR &&
487 		    smbios_info_common(ksmbios, id, &si) != SMB_ERR) {
488 			for (pl = platform_label; pl->sys_vendor; pl++) {
489 				if (strncmp(pl->sys_vendor,
490 				    si.smbi_manufacturer,
491 				    strlen(pl->sys_vendor)) == 0 &&
492 				    strncmp(pl->sys_product, si.smbi_product,
493 				    strlen(pl->sys_product)) == 0) {
494 					nb_dimms_per_channel =
495 					    pl->dimms_per_channel;
496 					read_memarray = 0;
497 					rt = &pl->dimm_label;
498 					break;
499 				}
500 			}
501 		}
502 		if (read_memarray)
503 			(void) smbios_iter(ksmbios, memoryarray, 0);
504 	}
505 	if (nb_dimms_per_channel == 0) {
506 		if (ndimms) {
507 			nb_dimms_per_channel = ndimms /
508 			    (nb_number_memory_controllers * 2);
509 		} else {
510 			nb_dimms_per_channel = NB_MAX_DIMMS_PER_CHANNEL;
511 		}
512 	}
513 	return (rt);
514 }
515 
516 static int
517 dimm_label(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
518 {
519 	nb_dimm_t ***dimmpp = arg;
520 	nb_dimm_t *dimmp;
521 	smbios_memdevice_t md;
522 
523 	if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
524 		dimmp = **dimmpp;
525 		if (dimmp && smbios_info_memdevice(shp, sp->smbstr_id,
526 		    &md) == 0 && md.smbmd_dloc != NULL) {
527 			(void) snprintf(dimmp->label,
528 			    sizeof (dimmp->label), "%s", md.smbmd_dloc);
529 		}
530 		(*dimmpp)++;
531 	}
532 	return (0);
533 }
534 
535 void
536 nb_smbios()
537 {
538 	nb_dimm_t **dimmpp;
539 
540 	if (ksmbios != NULL) {
541 		dimmpp = nb_dimms;
542 		(void) smbios_iter(ksmbios, dimm_label, &dimmpp);
543 	}
544 }
545 
546 static void
547 x8450_dimm_label(int dimm, char *label, int label_sz)
548 {
549 	int channel = dimm >> 3;
550 
551 	dimm = dimm & 0x7;
552 	(void) snprintf(label, label_sz, "D%d", (dimm * 4) + channel);
553 }
554 
555 static void
556 nb_dimms_init(find_dimm_label_t *label_function)
557 {
558 	int i, j, k, l;
559 	uint16_t mtr;
560 	uint32_t mc, mca;
561 	uint32_t spcpc;
562 	uint8_t spcps;
563 	nb_dimm_t **dimmpp;
564 
565 	mca = MCA_RD();
566 	mc = MC_RD();
567 	if (mca & MCA_SCHDIMM)  /* single-channel mode */
568 		nb_mode = NB_MEMORY_SINGLE_CHANNEL;
569 	else if ((mc & MC_MIRROR) != 0) /* mirror mode */
570 		nb_mode = NB_MEMORY_MIRROR;
571 	else
572 		nb_mode = NB_MEMORY_NORMAL;
573 	nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
574 	    nb_number_memory_controllers * 2 * nb_dimms_per_channel, KM_SLEEP);
575 	dimmpp = nb_dimms;
576 	for (i = 0; i < nb_number_memory_controllers; i++) {
577 		if (nb_mode == NB_MEMORY_NORMAL) {
578 			spcpc = SPCPC_RD(i);
579 			spcps = SPCPS_RD(i);
580 			if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
581 			    (spcps & SPCPS_SPARE_DEPLOYED) != 0)
582 				nb_mode = NB_MEMORY_SPARE_RANK;
583 			spare_rank[i] = SPCPC_SPRANK(spcpc);
584 		}
585 		for (j = 0; j < nb_dimms_per_channel; j++) {
586 			mtr = MTR_RD(i, j);
587 			k = i * 2;
588 			dimmpp[j] = nb_dimm_init(k, j, mtr);
589 			if (dimmpp[j]) {
590 				nb_ndimm ++;
591 				dimm_add_geometry(i, j, dimmpp[j]->nbanks,
592 				    dimmpp[j]->width, dimmpp[j]->ncolumn,
593 				    dimmpp[j]->nrow);
594 				if (label_function) {
595 					label_function->label_function(
596 					    (k * nb_dimms_per_channel) + j,
597 					    dimmpp[j]->label,
598 					    sizeof (dimmpp[j]->label));
599 				}
600 			}
601 			dimmpp[j + nb_dimms_per_channel] =
602 			    nb_dimm_init(k + 1, j, mtr);
603 			l = j + nb_dimms_per_channel;
604 			if (dimmpp[l]) {
605 				if (label_function) {
606 					label_function->label_function(
607 					    (k * nb_dimms_per_channel) + l,
608 					    dimmpp[l]->label,
609 					    sizeof (dimmpp[l]->label));
610 				}
611 				nb_ndimm ++;
612 			}
613 		}
614 		dimmpp += nb_dimms_per_channel * 2;
615 	}
616 	if (label_function == NULL)
617 		nb_smbios();
618 }
619 
620 static void
621 nb_pex_init()
622 {
623 	int i;
624 
625 	for (i = 0; i < NB_PCI_DEV; i++) {
626 		switch (nb_chipset) {
627 		case INTEL_NB_5000P:
628 		case INTEL_NB_5000X:
629 			if (i == 1)
630 				continue;
631 			break;
632 		case INTEL_NB_5000V:
633 			if (i == 1 || i > 3)
634 				continue;
635 			break;
636 		case INTEL_NB_5000Z:
637 			if (i == 1 || i > 5)
638 				continue;
639 			break;
640 		case INTEL_NB_7300:
641 			break;
642 		}
643 		emask_uncor_pex[i] = EMASK_UNCOR_PEX_RD(i);
644 		emask_cor_pex[i] = EMASK_COR_PEX_RD(i);
645 		emask_rp_pex[i] = EMASK_RP_PEX_RD(i);
646 		docmd_pex[i] = PEX_ERR_DOCMD_RD(i);
647 		uncerrsev[i] = UNCERRSEV_RD(i);
648 
649 		if (nb5000_reset_uncor_pex)
650 			EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
651 		if (nb5000_reset_cor_pex)
652 			EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
653 		PEX_ERR_DOCMD_WR(i, (docmd_pex[i] & nb5000_docmd_pex_mask) |
654 		    (nb5000_docmd_pex & ~nb5000_docmd_pex_mask));
655 	}
656 }
657 
658 static void
659 nb_pex_fini()
660 {
661 	int i;
662 
663 	for (i = 0; i < NB_PCI_DEV; i++) {
664 		switch (nb_chipset) {
665 		case INTEL_NB_5000P:
666 		case INTEL_NB_5000X:
667 			if (i == 1)
668 				continue;
669 			break;
670 		case INTEL_NB_5000V:
671 			if (i == 1 || i > 3)
672 				continue;
673 			break;
674 		case INTEL_NB_5000Z:
675 			if (i == 1 || i > 5)
676 				continue;
677 			break;
678 		case INTEL_NB_7300:
679 			break;
680 		}
681 		EMASK_UNCOR_PEX_WR(i, emask_uncor_pex[i]);
682 		EMASK_COR_PEX_WR(i, emask_cor_pex[i]);
683 		EMASK_RP_PEX_WR(i, emask_rp_pex[i]);
684 		PEX_ERR_DOCMD_WR(i, docmd_pex[i]);
685 		UNCERRSEV_WR(i, uncerrsev[i]);
686 
687 		if (nb5000_reset_uncor_pex)
688 			EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
689 		if (nb5000_reset_cor_pex)
690 			EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
691 	}
692 }
693 
694 void
695 nb_int_init()
696 {
697 	uint8_t err0_int;
698 	uint8_t err1_int;
699 	uint8_t err2_int;
700 	uint8_t mcerr_int;
701 	uint8_t emask_int;
702 	uint16_t stepping;
703 
704 	err0_int = ERR0_INT_RD();
705 	err1_int = ERR1_INT_RD();
706 	err2_int = ERR2_INT_RD();
707 	mcerr_int = MCERR_INT_RD();
708 	emask_int = EMASK_INT_RD();
709 
710 	nb_err0_int = err0_int;
711 	nb_err1_int = err1_int;
712 	nb_err2_int = err2_int;
713 	nb_mcerr_int = mcerr_int;
714 	nb_emask_int = emask_int;
715 
716 	ERR0_INT_WR(0xff);
717 	ERR1_INT_WR(0xff);
718 	ERR2_INT_WR(0xff);
719 	MCERR_INT_WR(0xff);
720 	EMASK_INT_WR(0xff);
721 
722 	mcerr_int &= ~nb5000_mask_bios_int;
723 	mcerr_int |= nb5000_mask_bios_int & (~err0_int | ~err1_int | ~err2_int);
724 	mcerr_int |= nb5000_mask_poll_int;
725 	err0_int |= nb5000_mask_poll_int;
726 	err1_int |= nb5000_mask_poll_int;
727 	err2_int |= nb5000_mask_poll_int;
728 
729 	l_mcerr_int = mcerr_int;
730 	ERR0_INT_WR(err0_int);
731 	ERR1_INT_WR(err1_int);
732 	ERR2_INT_WR(err2_int);
733 	MCERR_INT_WR(mcerr_int);
734 	if (nb5000_reset_emask_int) {
735 		if (nb_chipset == INTEL_NB_7300) {
736 			stepping = NB5000_STEPPING();
737 			if (stepping == 0)
738 				EMASK_INT_WR(nb7300_emask_int_step0);
739 			else
740 				EMASK_INT_WR(nb7300_emask_int);
741 		} else {
742 			EMASK_INT_WR(nb5000_emask_int);
743 		}
744 	} else {
745 		EMASK_INT_WR(nb_emask_int);
746 	}
747 }
748 
749 void
750 nb_int_fini()
751 {
752 	ERR0_INT_WR(0xff);
753 	ERR1_INT_WR(0xff);
754 	ERR2_INT_WR(0xff);
755 	MCERR_INT_WR(0xff);
756 	EMASK_INT_WR(0xff);
757 
758 	ERR0_INT_WR(nb_err0_int);
759 	ERR1_INT_WR(nb_err1_int);
760 	ERR2_INT_WR(nb_err2_int);
761 	MCERR_INT_WR(nb_mcerr_int);
762 	EMASK_INT_WR(nb_emask_int);
763 }
764 
765 void
766 nb_int_mask_mc(uint8_t mc_mask_int)
767 {
768 	uint8_t emask_int;
769 
770 	emask_int = MCERR_INT_RD();
771 	if ((emask_int & mc_mask_int) != mc_mask_int) {
772 		MCERR_INT_WR(emask_int|mc_mask_int);
773 		nb_mask_mc_set = 1;
774 	}
775 }
776 
777 void
778 nb_fbd_init()
779 {
780 	uint32_t err0_fbd;
781 	uint32_t err1_fbd;
782 	uint32_t err2_fbd;
783 	uint32_t mcerr_fbd;
784 	uint32_t emask_fbd;
785 
786 	err0_fbd = ERR0_FBD_RD();
787 	err1_fbd = ERR1_FBD_RD();
788 	err2_fbd = ERR2_FBD_RD();
789 	mcerr_fbd = MCERR_FBD_RD();
790 	emask_fbd = EMASK_FBD_RD();
791 
792 	nb_err0_fbd = err0_fbd;
793 	nb_err1_fbd = err1_fbd;
794 	nb_err2_fbd = err2_fbd;
795 	nb_mcerr_fbd = mcerr_fbd;
796 	nb_emask_fbd = emask_fbd;
797 
798 	ERR0_FBD_WR(0xffffffff);
799 	ERR1_FBD_WR(0xffffffff);
800 	ERR2_FBD_WR(0xffffffff);
801 	MCERR_FBD_WR(0xffffffff);
802 	EMASK_FBD_WR(0xffffffff);
803 
804 	if (nb_chipset == INTEL_NB_7300 && nb_mode == NB_MEMORY_MIRROR) {
805 		/* MCH 7300 errata 34 */
806 		nb5000_mask_bios_fbd &= ~EMASK_FBD_M23;
807 		mcerr_fbd |= EMASK_FBD_M23;
808 	}
809 	mcerr_fbd &= ~nb5000_mask_bios_fbd;
810 	mcerr_fbd |= nb5000_mask_bios_fbd & (~err0_fbd | ~err1_fbd | ~err2_fbd);
811 	mcerr_fbd |= nb5000_mask_poll_fbd;
812 	err0_fbd |= nb5000_mask_poll_fbd;
813 	err1_fbd |= nb5000_mask_poll_fbd;
814 	err2_fbd |= nb5000_mask_poll_fbd;
815 
816 	l_mcerr_fbd = mcerr_fbd;
817 	ERR0_FBD_WR(err0_fbd);
818 	ERR1_FBD_WR(err1_fbd);
819 	ERR2_FBD_WR(err2_fbd);
820 	MCERR_FBD_WR(mcerr_fbd);
821 	if (nb5000_reset_emask_fbd)
822 		EMASK_FBD_WR(nb5000_emask_fbd);
823 	else
824 		EMASK_FBD_WR(nb_emask_fbd);
825 }
826 
827 void
828 nb_fbd_mask_mc(uint32_t mc_mask_fbd)
829 {
830 	uint32_t emask_fbd;
831 
832 	emask_fbd = MCERR_FBD_RD();
833 	if ((emask_fbd & mc_mask_fbd) != mc_mask_fbd) {
834 		MCERR_FBD_WR(emask_fbd|mc_mask_fbd);
835 		nb_mask_mc_set = 1;
836 	}
837 }
838 
839 void
840 nb_fbd_fini()
841 {
842 	ERR0_FBD_WR(0xffffffff);
843 	ERR1_FBD_WR(0xffffffff);
844 	ERR2_FBD_WR(0xffffffff);
845 	MCERR_FBD_WR(0xffffffff);
846 	EMASK_FBD_WR(0xffffffff);
847 
848 	ERR0_FBD_WR(nb_err0_fbd);
849 	ERR1_FBD_WR(nb_err1_fbd);
850 	ERR2_FBD_WR(nb_err2_fbd);
851 	MCERR_FBD_WR(nb_mcerr_fbd);
852 	EMASK_FBD_WR(nb_emask_fbd);
853 }
854 
855 static void
856 nb_fsb_init()
857 {
858 	uint16_t err0_fsb;
859 	uint16_t err1_fsb;
860 	uint16_t err2_fsb;
861 	uint16_t mcerr_fsb;
862 	uint16_t emask_fsb;
863 
864 	err0_fsb = ERR0_FSB_RD(0);
865 	err1_fsb = ERR1_FSB_RD(0);
866 	err2_fsb = ERR2_FSB_RD(0);
867 	mcerr_fsb = MCERR_FSB_RD(0);
868 	emask_fsb = EMASK_FSB_RD(0);
869 
870 	ERR0_FSB_WR(0, 0xffff);
871 	ERR1_FSB_WR(0, 0xffff);
872 	ERR2_FSB_WR(0, 0xffff);
873 	MCERR_FSB_WR(0, 0xffff);
874 	EMASK_FSB_WR(0, 0xffff);
875 
876 	ERR0_FSB_WR(1, 0xffff);
877 	ERR1_FSB_WR(1, 0xffff);
878 	ERR2_FSB_WR(1, 0xffff);
879 	MCERR_FSB_WR(1, 0xffff);
880 	EMASK_FSB_WR(1, 0xffff);
881 
882 	nb_err0_fsb = err0_fsb;
883 	nb_err1_fsb = err1_fsb;
884 	nb_err2_fsb = err2_fsb;
885 	nb_mcerr_fsb = mcerr_fsb;
886 	nb_emask_fsb = emask_fsb;
887 
888 	mcerr_fsb &= ~nb5000_mask_bios_fsb;
889 	mcerr_fsb |= nb5000_mask_bios_fsb & (~err2_fsb | ~err1_fsb | ~err0_fsb);
890 	mcerr_fsb |= nb5000_mask_poll_fsb;
891 	err0_fsb |= nb5000_mask_poll_fsb;
892 	err1_fsb |= nb5000_mask_poll_fsb;
893 	err2_fsb |= nb5000_mask_poll_fsb;
894 
895 	l_mcerr_fsb = mcerr_fsb;
896 	ERR0_FSB_WR(0, err0_fsb);
897 	ERR1_FSB_WR(0, err1_fsb);
898 	ERR2_FSB_WR(0, err2_fsb);
899 	MCERR_FSB_WR(0, mcerr_fsb);
900 	if (nb5000_reset_emask_fsb)
901 		EMASK_FSB_WR(0, nb5000_emask_fsb);
902 	else
903 		EMASK_FSB_WR(0, nb_emask_fsb);
904 
905 	ERR0_FSB_WR(1, err0_fsb);
906 	ERR1_FSB_WR(1, err1_fsb);
907 	ERR2_FSB_WR(1, err2_fsb);
908 	MCERR_FSB_WR(1, mcerr_fsb);
909 	if (nb5000_reset_emask_fsb)
910 		EMASK_FSB_WR(1, nb5000_emask_fsb);
911 	else
912 		EMASK_FSB_WR(1, nb_emask_fsb);
913 
914 	if (nb_chipset == INTEL_NB_7300) {
915 		ERR0_FSB_WR(2, 0xffff);
916 		ERR1_FSB_WR(2, 0xffff);
917 		ERR2_FSB_WR(2, 0xffff);
918 		MCERR_FSB_WR(2, 0xffff);
919 		EMASK_FSB_WR(2, 0xffff);
920 
921 		ERR0_FSB_WR(3, 0xffff);
922 		ERR1_FSB_WR(3, 0xffff);
923 		ERR2_FSB_WR(3, 0xffff);
924 		MCERR_FSB_WR(3, 0xffff);
925 		EMASK_FSB_WR(3, 0xffff);
926 
927 		ERR0_FSB_WR(2, err0_fsb);
928 		ERR1_FSB_WR(2, err1_fsb);
929 		ERR2_FSB_WR(2, err2_fsb);
930 		MCERR_FSB_WR(2, mcerr_fsb);
931 		if (nb5000_reset_emask_fsb)
932 			EMASK_FSB_WR(2, nb5000_emask_fsb);
933 		else
934 			EMASK_FSB_WR(2, nb_emask_fsb);
935 
936 		ERR0_FSB_WR(3, err0_fsb);
937 		ERR1_FSB_WR(3, err1_fsb);
938 		ERR2_FSB_WR(3, err2_fsb);
939 		MCERR_FSB_WR(3, mcerr_fsb);
940 		if (nb5000_reset_emask_fsb)
941 			EMASK_FSB_WR(3, nb5000_emask_fsb);
942 		else
943 			EMASK_FSB_WR(3, nb_emask_fsb);
944 	}
945 }
946 
947 static void
948 nb_fsb_fini() {
949 	ERR0_FSB_WR(0, 0xffff);
950 	ERR1_FSB_WR(0, 0xffff);
951 	ERR2_FSB_WR(0, 0xffff);
952 	MCERR_FSB_WR(0, 0xffff);
953 	EMASK_FSB_WR(0, 0xffff);
954 
955 	ERR0_FSB_WR(0, nb_err0_fsb);
956 	ERR1_FSB_WR(0, nb_err1_fsb);
957 	ERR2_FSB_WR(0, nb_err2_fsb);
958 	MCERR_FSB_WR(0, nb_mcerr_fsb);
959 	EMASK_FSB_WR(0, nb_emask_fsb);
960 
961 	ERR0_FSB_WR(1, 0xffff);
962 	ERR1_FSB_WR(1, 0xffff);
963 	ERR2_FSB_WR(1, 0xffff);
964 	MCERR_FSB_WR(1, 0xffff);
965 	EMASK_FSB_WR(1, 0xffff);
966 
967 	ERR0_FSB_WR(1, nb_err0_fsb);
968 	ERR1_FSB_WR(1, nb_err1_fsb);
969 	ERR2_FSB_WR(1, nb_err2_fsb);
970 	MCERR_FSB_WR(1, nb_mcerr_fsb);
971 	EMASK_FSB_WR(1, nb_emask_fsb);
972 
973 	if (nb_chipset == INTEL_NB_7300) {
974 		ERR0_FSB_WR(2, 0xffff);
975 		ERR1_FSB_WR(2, 0xffff);
976 		ERR2_FSB_WR(2, 0xffff);
977 		MCERR_FSB_WR(2, 0xffff);
978 		EMASK_FSB_WR(2, 0xffff);
979 
980 		ERR0_FSB_WR(2, nb_err0_fsb);
981 		ERR1_FSB_WR(2, nb_err1_fsb);
982 		ERR2_FSB_WR(2, nb_err2_fsb);
983 		MCERR_FSB_WR(2, nb_mcerr_fsb);
984 		EMASK_FSB_WR(2, nb_emask_fsb);
985 
986 		ERR0_FSB_WR(3, 0xffff);
987 		ERR1_FSB_WR(3, 0xffff);
988 		ERR2_FSB_WR(3, 0xffff);
989 		MCERR_FSB_WR(3, 0xffff);
990 		EMASK_FSB_WR(3, 0xffff);
991 
992 		ERR0_FSB_WR(3, nb_err0_fsb);
993 		ERR1_FSB_WR(3, nb_err1_fsb);
994 		ERR2_FSB_WR(3, nb_err2_fsb);
995 		MCERR_FSB_WR(3, nb_mcerr_fsb);
996 		EMASK_FSB_WR(3, nb_emask_fsb);
997 	}
998 }
999 
1000 void
1001 nb_fsb_mask_mc(int fsb, uint16_t mc_mask_fsb)
1002 {
1003 	uint16_t emask_fsb;
1004 
1005 	emask_fsb = MCERR_FSB_RD(fsb);
1006 	if ((emask_fsb & mc_mask_fsb) != mc_mask_fsb) {
1007 		MCERR_FSB_WR(fsb, emask_fsb|mc_mask_fsb|EMASK_FBD_RES);
1008 		nb_mask_mc_set = 1;
1009 	}
1010 }
1011 
1012 void
1013 nb_mask_mc_reset()
1014 {
1015 	MCERR_FBD_WR(l_mcerr_fbd);
1016 	MCERR_INT_WR(l_mcerr_int);
1017 	MCERR_FSB_WR(0, l_mcerr_fsb);
1018 	MCERR_FSB_WR(1, l_mcerr_fsb);
1019 	if (nb_chipset == INTEL_NB_7300) {
1020 		MCERR_FSB_WR(2, l_mcerr_fsb);
1021 		MCERR_FSB_WR(3, l_mcerr_fsb);
1022 	}
1023 }
1024 
1025 int
1026 nb_dev_init()
1027 {
1028 	find_dimm_label_t *label_function_p;
1029 
1030 	label_function_p = find_dimms_per_channel();
1031 	mutex_init(&nb_mutex, NULL, MUTEX_DRIVER, NULL);
1032 	nb_queue = errorq_create("nb_queue", nb_drain, NULL, NB_MAX_ERRORS,
1033 	    sizeof (nb_logout_t), 1, ERRORQ_VITAL);
1034 	if (nb_queue == NULL) {
1035 		mutex_destroy(&nb_mutex);
1036 		return (EAGAIN);
1037 	}
1038 	nb_int_init();
1039 	dimm_init();
1040 	nb_dimms_init(label_function_p);
1041 	nb_mc_init();
1042 	nb_pex_init();
1043 	nb_fbd_init();
1044 	nb_fsb_init();
1045 	nb_scrubber_enable();
1046 	return (0);
1047 }
1048 
1049 int
1050 nb_init()
1051 {
1052 	/* get vendor and device */
1053 	nb_chipset = (*pci_getl_func)(0, 0, 0, PCI_CONF_VENID);
1054 	switch (nb_chipset) {
1055 	default:
1056 		if (nb_5000_memory_controller == 0)
1057 			return (ENOTSUP);
1058 		break;
1059 	case INTEL_NB_7300:
1060 	case INTEL_NB_5000P:
1061 	case INTEL_NB_5000X:
1062 		break;
1063 	case INTEL_NB_5000V:
1064 	case INTEL_NB_5000Z:
1065 		nb_number_memory_controllers = 1;
1066 		break;
1067 	}
1068 	return (0);
1069 }
1070 
1071 void
1072 nb_dev_reinit()
1073 {
1074 	int i, j;
1075 	int nchannels = nb_number_memory_controllers * 2;
1076 	nb_dimm_t **dimmpp;
1077 	nb_dimm_t *dimmp;
1078 	nb_dimm_t **old_nb_dimms;
1079 	int old_nb_dimms_per_channel;
1080 	find_dimm_label_t *label_function_p;
1081 
1082 	old_nb_dimms = nb_dimms;
1083 	old_nb_dimms_per_channel = nb_dimms_per_channel;
1084 
1085 	dimm_fini();
1086 	label_function_p = find_dimms_per_channel();
1087 	dimm_init();
1088 	nb_dimms_init(label_function_p);
1089 	nb_mc_init();
1090 	nb_pex_init();
1091 	nb_int_init();
1092 	nb_fbd_init();
1093 	nb_fsb_init();
1094 	nb_scrubber_enable();
1095 
1096 	dimmpp = old_nb_dimms;
1097 	for (i = 0; i < nchannels; i++) {
1098 		for (j = 0; j < old_nb_dimms_per_channel; j++) {
1099 			dimmp = *dimmpp;
1100 			if (dimmp) {
1101 				kmem_free(dimmp, sizeof (nb_dimm_t));
1102 				*dimmpp = NULL;
1103 			}
1104 			dimmp++;
1105 		}
1106 	}
1107 	kmem_free(old_nb_dimms, sizeof (nb_dimm_t *) *
1108 	    nb_number_memory_controllers * 2 * old_nb_dimms_per_channel);
1109 }
1110 
1111 void
1112 nb_dev_unload()
1113 {
1114 	errorq_destroy(nb_queue);
1115 	nb_queue = NULL;
1116 	mutex_destroy(&nb_mutex);
1117 	nb_int_fini();
1118 	nb_fbd_fini();
1119 	nb_fsb_fini();
1120 	nb_pex_fini();
1121 	nb_fini();
1122 }
1123 
1124 void
1125 nb_unload()
1126 {
1127 }
1128