1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * memory management for serengeti dr memory
31  */
32 
33 #include <sys/obpdefs.h>
34 #include <sys/types.h>
35 #include <sys/conf.h>
36 #include <sys/ddi.h>
37 #include <sys/cpuvar.h>
38 #include <sys/memlist_impl.h>
39 #include <sys/machsystm.h>
40 #include <sys/promif.h>
41 #include <sys/mem_cage.h>
42 #include <sys/kmem.h>
43 #include <sys/note.h>
44 #include <sys/lgrp.h>
45 
46 #include <sys/sbd_ioctl.h>
47 #include <sys/sbd.h>
48 #include <sys/sbdp_priv.h>
49 #include <sys/sbdp_mem.h>
50 #include <sys/sun4asi.h>
51 #include <sys/cheetahregs.h>
52 #include <sys/cpu_module.h>
53 #include <sys/esunddi.h>
54 
55 #include <vm/page.h>
56 
57 static int	sbdp_get_meminfo(pnode_t, int, uint64_t *, uint64_t *);
58 int		mc_read_regs(pnode_t, mc_regs_t *);
59 uint64_t	mc_get_addr(pnode_t, int, uint_t *);
60 static pnode_t	mc_get_sibling_cpu(pnode_t nodeid);
61 static int	mc_get_sibling_cpu_impl(pnode_t nodeid);
62 static sbd_cond_t mc_check_sibling_cpu(pnode_t nodeid);
63 static void	_sbdp_copy_rename_end(void);
64 static int	sbdp_copy_rename__relocatable(sbdp_cr_handle_t *,
65 			struct memlist *, sbdp_rename_script_t *);
66 static int	sbdp_prep_rename_script(sbdp_cr_handle_t *);
67 static int	sbdp_get_lowest_addr_in_node(pnode_t, uint64_t *);
68 
69 extern void bcopy32_il(uint64_t, uint64_t);
70 extern void flush_ecache_il(uint64_t physaddr, size_t size, size_t linesize);
71 extern uint64_t lddphys_il(uint64_t physaddr);
72 extern uint64_t ldxasi_il(uint64_t physaddr, uint_t asi);
73 extern void sbdp_exec_script_il(sbdp_rename_script_t *rsp);
74 void sbdp_fill_bank_info(uint64_t, sbdp_bank_t **);
75 int sbdp_add_nodes_banks(pnode_t node, sbdp_bank_t **banks);
76 void sbdp_add_bank_to_seg(sbdp_bank_t *);
77 void sbdp_remove_bank_from_seg(sbdp_bank_t *);
78 uint64_t sbdp_determine_slice(sbdp_handle_t *);
79 sbdp_seg_t *sbdp_get_seg(uint64_t);
80 #ifdef DEBUG
81 void sbdp_print_seg(sbdp_seg_t *);
82 #endif
83 
84 /*
85  * Head to the system segments link list
86  */
87 sbdp_seg_t *sys_seg = NULL;
88 
89 uint64_t
90 sbdp_determine_slice(sbdp_handle_t *hp)
91 {
92 	int size;
93 
94 	size = sbdp_get_mem_size(hp);
95 
96 	if (size <= SG_SLICE_16G_SIZE) {
97 		return (SG_SLICE_16G_SIZE);
98 	} else if (size <= SG_SLICE_32G_SIZE) {
99 		return (SG_SLICE_32G_SIZE);
100 	} else {
101 		return (SG_SLICE_64G_SIZE);
102 	}
103 }
104 
105 /* ARGSUSED */
106 int
107 sbdp_get_mem_alignment(sbdp_handle_t *hp, dev_info_t *dip, uint64_t *align)
108 {
109 	*align = sbdp_determine_slice(hp);
110 	return (0);
111 }
112 
113 
114 void
115 sbdp_memlist_dump(struct memlist *mlist)
116 {
117 	register struct memlist *ml;
118 
119 	if (mlist == NULL) {
120 		SBDP_DBG_MEM("memlist> EMPTY\n");
121 	} else {
122 		for (ml = mlist; ml; ml = ml->next)
123 			SBDP_DBG_MEM("memlist>  0x%" PRIx64", 0x%" PRIx64"\n",
124 			    ml->address, ml->size);
125 	}
126 }
127 
128 struct mem_arg {
129 	int	board;
130 	int	ndips;
131 	dev_info_t **list;
132 };
133 
134 /*
135  * Returns mem dip held
136  */
137 static int
138 sbdp_get_mem_dip(pnode_t node, void *arg, uint_t flags)
139 {
140 	_NOTE(ARGUNUSED(flags))
141 
142 	dev_info_t *dip;
143 	pnode_t nodeid;
144 	mem_op_t mem = {0};
145 	struct mem_arg *ap = arg;
146 
147 	if (node == OBP_BADNODE || node == OBP_NONODE)
148 		return (DDI_FAILURE);
149 
150 	mem.nodes = &nodeid;
151 	mem.board = ap->board;
152 	mem.nmem = 0;
153 
154 	(void) sbdp_is_mem(node, &mem);
155 
156 	ASSERT(mem.nmem == 0 || mem.nmem == 1);
157 
158 	if (mem.nmem == 0 || nodeid != node)
159 		return (DDI_FAILURE);
160 
161 	dip = e_ddi_nodeid_to_dip(nodeid);
162 	if (dip) {
163 		ASSERT(ap->ndips < SBDP_MAX_MEM_NODES_PER_BOARD);
164 		ap->list[ap->ndips++] = dip;
165 	}
166 	return (DDI_SUCCESS);
167 }
168 
169 struct memlist *
170 sbdp_get_memlist(sbdp_handle_t *hp, dev_info_t *dip)
171 {
172 	_NOTE(ARGUNUSED(dip))
173 
174 	int i, j, skip = 0;
175 	dev_info_t	*list[SBDP_MAX_MEM_NODES_PER_BOARD];
176 	struct mem_arg	arg = {0};
177 	uint64_t	base_pa, size;
178 	struct memlist	*mlist = NULL;
179 
180 	list[0] = NULL;
181 	arg.board = hp->h_board;
182 	arg.list = list;
183 
184 	sbdp_walk_prom_tree(prom_rootnode(), sbdp_get_mem_dip, &arg);
185 
186 	for (i = 0; i < arg.ndips; i++) {
187 		if (list[i] == NULL)
188 			continue;
189 
190 		size = 0;
191 		for (j = 0; j < SBDP_MAX_MCS_PER_NODE; j++) {
192 			if (sbdp_get_meminfo(ddi_get_nodeid(list[i]), j,
193 			    &size, &base_pa)) {
194 				skip++;
195 				continue;
196 			}
197 			if (size == -1 || size == 0)
198 				continue;
199 
200 			(void) memlist_add_span(base_pa, size, &mlist);
201 		}
202 
203 		/*
204 		 * Release hold acquired in sbdp_get_mem_dip()
205 		 */
206 		ddi_release_devi(list[i]);
207 	}
208 
209 	/*
210 	 * XXX - The following two lines are from existing code.
211 	 * However, this appears to be incorrect - this check should be
212 	 * made for each dip in list i.e within the for(i) loop.
213 	 */
214 	if (skip == SBDP_MAX_MCS_PER_NODE)
215 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
216 
217 	SBDP_DBG_MEM("memlist for board %d\n", hp->h_board);
218 	sbdp_memlist_dump(mlist);
219 	return (mlist);
220 }
221 
222 struct memlist *
223 sbdp_memlist_dup(struct memlist *mlist)
224 {
225 	struct memlist *hl, *prev;
226 
227 	if (mlist == NULL)
228 		return (NULL);
229 
230 	prev = NULL;
231 	hl = NULL;
232 	for (; mlist; mlist = mlist->next) {
233 		struct memlist *mp;
234 
235 		mp = memlist_get_one();
236 		if (mp == NULL) {
237 			if (hl != NULL)
238 				memlist_free_list(hl);
239 			hl = NULL;
240 			break;
241 		}
242 		mp->address = mlist->address;
243 		mp->size = mlist->size;
244 		mp->next = NULL;
245 		mp->prev = prev;
246 
247 		if (prev == NULL)
248 			hl = mp;
249 		else
250 			prev->next = mp;
251 		prev = mp;
252 	}
253 
254 	return (hl);
255 }
256 
257 int
258 sbdp_del_memlist(sbdp_handle_t *hp, struct memlist *mlist)
259 {
260 	_NOTE(ARGUNUSED(hp))
261 
262 	memlist_free_list(mlist);
263 
264 	return (0);
265 }
266 
267 /*ARGSUSED*/
268 static void
269 sbdp_flush_ecache(uint64_t a, uint64_t b)
270 {
271 	cpu_flush_ecache();
272 }
273 
274 typedef enum {
275 	SBDP_CR_OK,
276 	SBDP_CR_MC_IDLE_ERR
277 } sbdp_cr_err_t;
278 
279 int
280 sbdp_move_memory(sbdp_handle_t *hp, int t_bd)
281 {
282 	sbdp_bd_t	*s_bdp, *t_bdp;
283 	int		err = 0;
284 	caddr_t		mempage;
285 	ulong_t		data_area, index_area;
286 	ulong_t		e_area, e_page;
287 	int		availlen, indexlen, funclen, scriptlen;
288 	int		*indexp;
289 	time_t		copytime;
290 	int		(*funcp)();
291 	size_t		size;
292 	struct memlist	*mlist;
293 	sbdp_sr_handle_t	*srhp;
294 	sbdp_rename_script_t	*rsp;
295 	sbdp_rename_script_t	*rsbuffer;
296 	sbdp_cr_handle_t	*cph;
297 	int		linesize;
298 	uint64_t	neer;
299 	sbdp_cr_err_t	cr_err;
300 
301 	cph =  kmem_zalloc(sizeof (sbdp_cr_handle_t), KM_SLEEP);
302 
303 	SBDP_DBG_MEM("moving memory from memory board %d to board %d\n",
304 	    hp->h_board, t_bd);
305 
306 	s_bdp = sbdp_get_bd_info(hp->h_wnode, hp->h_board);
307 	t_bdp = sbdp_get_bd_info(hp->h_wnode, t_bd);
308 
309 	if ((s_bdp == NULL) || (t_bdp == NULL)) {
310 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
311 		return (-1);
312 	}
313 
314 	funclen = (int)((ulong_t)_sbdp_copy_rename_end -
315 			(ulong_t)sbdp_copy_rename__relocatable);
316 
317 	if (funclen > PAGESIZE) {
318 		cmn_err(CE_WARN,
319 		    "sbdp: copy-rename funclen (%d) > PAGESIZE (%d)",
320 		    funclen, PAGESIZE);
321 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
322 		return (-1);
323 	}
324 
325 	/*
326 	 * mempage will be page aligned, since we're calling
327 	 * kmem_alloc() with an exact multiple of PAGESIZE.
328 	 */
329 	mempage = kmem_alloc(PAGESIZE, KM_SLEEP);
330 
331 	SBDP_DBG_MEM("mempage = 0x%p\n", mempage);
332 
333 	/*
334 	 * Copy the code for the copy-rename routine into
335 	 * a page aligned piece of memory.  We do this to guarantee
336 	 * that we're executing within the same page and thus reduce
337 	 * the possibility of cache collisions between different
338 	 * pages.
339 	 */
340 	bcopy((caddr_t)sbdp_copy_rename__relocatable, mempage, funclen);
341 
342 	funcp = (int (*)())mempage;
343 
344 	SBDP_DBG_MEM("copy-rename funcp = 0x%p (len = 0x%x)\n", funcp, funclen);
345 
346 	/*
347 	 * Prepare data page that will contain script of
348 	 * operations to perform during copy-rename.
349 	 * Allocate temporary buffer to hold script.
350 	 */
351 
352 	size = sizeof (sbdp_rename_script_t) * SBDP_RENAME_MAXOP;
353 	rsbuffer = kmem_zalloc(size, KM_SLEEP);
354 
355 	cph->s_bdp = s_bdp;
356 	cph->t_bdp = t_bdp;
357 	cph->script = rsbuffer;
358 
359 	/*
360 	 * We need to make sure we don't switch cpus since we depend on the
361 	 * correct cpu processing
362 	 */
363 	affinity_set(CPU_CURRENT);
364 	scriptlen = sbdp_prep_rename_script(cph);
365 	if (scriptlen <= 0) {
366 		cmn_err(CE_WARN,
367 			"sbdp failed to prep for copy-rename");
368 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
369 		err = 1;
370 		goto cleanup;
371 	}
372 	SBDP_DBG_MEM("copy-rename script length = 0x%x\n", scriptlen);
373 
374 	indexlen = sizeof (*indexp) << 1;
375 
376 	if ((funclen + scriptlen + indexlen) > PAGESIZE) {
377 		cmn_err(CE_WARN,
378 			"sbdp: func len (%d) + script len (%d) "
379 			"+ index len (%d) > PAGESIZE (%d)",
380 			funclen, scriptlen, indexlen, PAGESIZE);
381 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
382 		err = 1;
383 		goto cleanup;
384 	}
385 
386 	linesize = cpunodes[CPU->cpu_id].ecache_linesize;
387 
388 	/*
389 	 * Find aligned area within data page to maintain script.
390 	 */
391 	data_area = (ulong_t)mempage;
392 	data_area += (ulong_t)funclen + (ulong_t)(linesize - 1);
393 	data_area &= ~((ulong_t)(linesize - 1));
394 
395 	availlen = PAGESIZE - indexlen;
396 	availlen -= (int)(data_area - (ulong_t)mempage);
397 
398 	if (availlen < scriptlen) {
399 		cmn_err(CE_WARN,
400 			"sbdp: available len (%d) < script len (%d)",
401 			availlen, scriptlen);
402 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
403 		err = 1;
404 		goto cleanup;
405 	}
406 
407 	SBDP_DBG_MEM("copy-rename script data area = 0x%lx\n",
408 		data_area);
409 
410 	bcopy((caddr_t)rsbuffer, (caddr_t)data_area, scriptlen);
411 	rsp = (sbdp_rename_script_t *)data_area;
412 
413 	index_area = data_area + (ulong_t)scriptlen +
414 			(ulong_t)(linesize - 1);
415 	index_area &= ~((ulong_t)(linesize - 1));
416 	indexp = (int *)index_area;
417 	indexp[0] = 0;
418 	indexp[1] = 0;
419 
420 	e_area = index_area + (ulong_t)indexlen;
421 	e_page = (ulong_t)mempage + PAGESIZE;
422 	if (e_area > e_page) {
423 		cmn_err(CE_WARN,
424 			"sbdp: index area size (%d) > available (%d)\n",
425 			indexlen, (int)(e_page - index_area));
426 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
427 		err = 1;
428 		goto cleanup;
429 	}
430 
431 	SBDP_DBG_MEM("copy-rename index area = 0x%p\n", indexp);
432 
433 	SBDP_DBG_MEM("cpu %d\n", CPU->cpu_id);
434 
435 	srhp = sbdp_get_sr_handle();
436 	ASSERT(srhp);
437 
438 	srhp->sr_flags = hp->h_flags;
439 
440 	copytime = lbolt;
441 
442 	mutex_enter(&s_bdp->bd_mutex);
443 	mlist = sbdp_memlist_dup(s_bdp->ml);
444 	mutex_exit(&s_bdp->bd_mutex);
445 
446 	if (mlist == NULL) {
447 		SBDP_DBG_MEM("Didn't find memory list\n");
448 	}
449 	SBDP_DBG_MEM("src\n\tbd\t%d\n\tnode\t%d\n\tbpa 0x%lx\n\tnodes\t%p\n",
450 	    s_bdp->bd, s_bdp->wnode, s_bdp->bpa, s_bdp->nodes);
451 	sbdp_memlist_dump(s_bdp->ml);
452 	SBDP_DBG_MEM("tgt\n\tbd\t%d\n\tnode\t%d\n\tbpa 0x%lx\n\tnodes\t%p\n",
453 	    t_bdp->bd, t_bdp->wnode, t_bdp->bpa, t_bdp->nodes);
454 	sbdp_memlist_dump(t_bdp->ml);
455 
456 	/*
457 	 * Quiesce the OS.
458 	 */
459 	if (sbdp_suspend(srhp)) {
460 		sbd_error_t	*sep;
461 		cmn_err(CE_WARN,
462 			"sbdp: failed to quiesce OS for copy-rename");
463 		sep = &srhp->sep;
464 		sbdp_set_err(hp->h_err, sep->e_code, sep->e_rsc);
465 		sbdp_release_sr_handle(srhp);
466 		sbdp_del_memlist(hp, mlist);
467 		err = 1;
468 		goto cleanup;
469 	}
470 
471 	/*
472 	 * =================================
473 	 * COPY-RENAME BEGIN.
474 	 * =================================
475 	 */
476 	SBDP_DBG_MEM("s_base 0x%lx t_base 0x%lx\n", cph->s_bdp->bpa,
477 	    cph->t_bdp->bpa);
478 
479 	cph->ret = 0;
480 
481 	SBDP_DBG_MEM("cph return 0x%lx\n", cph->ret);
482 
483 	SBDP_DBG_MEM("Flushing all of the cpu caches\n");
484 	xc_all(sbdp_flush_ecache, 0, 0);
485 
486 	/* disable CE reporting */
487 	neer = get_error_enable();
488 	set_error_enable(neer & ~EN_REG_CEEN);
489 
490 	cr_err = (*funcp)(cph, mlist, rsp);
491 
492 	/* enable CE reporting */
493 	set_error_enable(neer);
494 
495 	SBDP_DBG_MEM("s_base 0x%lx t_base 0x%lx\n", cph->s_bdp->bpa,
496 	    cph->t_bdp->bpa);
497 	SBDP_DBG_MEM("cph return 0x%lx\n", cph->ret);
498 	SBDP_DBG_MEM("after execking the function\n");
499 
500 	/*
501 	 * =================================
502 	 * COPY-RENAME END.
503 	 * =================================
504 	 */
505 	SBDP_DBG_MEM("err is 0x%d\n", err);
506 
507 	/*
508 	 * Resume the OS.
509 	 */
510 	sbdp_resume(srhp);
511 	if (srhp->sep.e_code) {
512 		sbd_error_t	*sep;
513 		cmn_err(CE_WARN,
514 		    "sbdp: failed to resume OS for copy-rename");
515 		sep = &srhp->sep;
516 		sbdp_set_err(hp->h_err, sep->e_code, sep->e_rsc);
517 		err = 1;
518 	}
519 
520 	copytime = lbolt - copytime;
521 
522 	sbdp_release_sr_handle(srhp);
523 	sbdp_del_memlist(hp, mlist);
524 
525 	SBDP_DBG_MEM("copy-rename elapsed time = %ld ticks (%ld secs)\n",
526 		copytime, copytime / hz);
527 
528 	switch (cr_err) {
529 	case SBDP_CR_OK:
530 		break;
531 	case SBDP_CR_MC_IDLE_ERR: {
532 		dev_info_t *dip;
533 		pnode_t nodeid = cph->busy_mc->node;
534 		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
535 
536 		dip = e_ddi_nodeid_to_dip(nodeid);
537 
538 		ASSERT(dip != NULL);
539 
540 		(void) ddi_pathname(dip, path);
541 		ddi_release_devi(dip);
542 		cmn_err(CE_WARN, "failed to idle memory controller %s: "
543 		    "copy-rename aborted", path);
544 		kmem_free(path, MAXPATHLEN);
545 		sbdp_set_err(hp->h_err, ESBD_MEMFAIL, NULL);
546 		err = 1;
547 		break;
548 	}
549 	default:
550 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
551 		cmn_err(CE_WARN, "unknown copy-rename error code (%d)", cr_err);
552 		err = 1;
553 		break;
554 	}
555 
556 	if (err)
557 		goto cleanup;
558 
559 	/*
560 	 * Rename memory for lgroup.
561 	 * Source and target board numbers are packaged in arg.
562 	 */
563 	lgrp_plat_config(LGRP_CONFIG_MEM_RENAME,
564 		(uintptr_t)(s_bdp->bd | (t_bdp->bd << 16)));
565 
566 	/*
567 	 * swap list of banks
568 	 */
569 	sbdp_swap_list_of_banks(s_bdp, t_bdp);
570 
571 	/*
572 	 * Update the cached board info for both the source and the target
573 	 */
574 	sbdp_update_bd_info(s_bdp);
575 	sbdp_update_bd_info(t_bdp);
576 
577 	/*
578 	 * Tell the sc that we have swapped slices.
579 	 */
580 	if (sbdp_swap_slices(s_bdp->bd, t_bdp->bd) != 0) {
581 		/* This is dangerous. The in use slice could be re-used! */
582 		SBDP_DBG_MEM("swaping slices failed\n");
583 	}
584 
585 cleanup:
586 	kmem_free(rsbuffer, size);
587 	kmem_free(mempage, PAGESIZE);
588 	kmem_free(cph, sizeof (sbdp_cr_handle_t));
589 	affinity_clear();
590 
591 	return (err ? -1 : 0);
592 }
593 
594 static int
595 sbdp_copy_regs(pnode_t node, uint64_t bpa, uint64_t new_base, int inval,
596 	sbdp_rename_script_t *rsp, int *index)
597 {
598 	int		i, m;
599 	mc_regs_t	regs;
600 	uint64_t	*mc_decode;
601 
602 	if (mc_read_regs(node, &regs)) {
603 		SBDP_DBG_MEM("sbdp_copy_regs: failed to read source Decode "
604 		    "Regs");
605 		return (-1);
606 	}
607 
608 	mc_decode = regs.mc_decode;
609 
610 	m = *index;
611 	for (i = 0; i < SBDP_MAX_MCS_PER_NODE; i++) {
612 		uint64_t	offset, seg_pa, tmp_base;
613 
614 		/*
615 		 * Skip invalid banks
616 		 */
617 		if ((mc_decode[i] & SG_DECODE_VALID) != SG_DECODE_VALID) {
618 			continue;
619 		}
620 
621 		tmp_base = new_base;
622 		if (!inval) {
623 			/*
624 			 * We need to calculate the offset from the base pa
625 			 * to add it appropriately to the new_base.
626 			 * The offset needs to be in UM relative to the mc
627 			 * decode register.  Since we are going from physical
628 			 * address to UM, we need to shift it by PHYS2UM_SHIFT.
629 			 * To get it ready to OR it with the MC decode reg,
630 			 * we need to shift it left MC_UM_SHIFT
631 			 */
632 			seg_pa = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
633 			offset = (seg_pa - bpa);
634 			/* Convert tmp_base into a physical address */
635 			tmp_base = (tmp_base >> MC_UM_SHIFT) << PHYS2UM_SHIFT;
636 			tmp_base += offset;
637 			/* Convert tmp_base to be MC reg ready */
638 			tmp_base = (tmp_base >> PHYS2UM_SHIFT) << MC_UM_SHIFT;
639 		}
640 
641 		mc_decode[i] &= ~SG_DECODE_UM;
642 		mc_decode[i] |= tmp_base;
643 		mc_decode[i] |= SG_DECODE_VALID;
644 
645 		/*
646 		 * Step 1:	Write source base address to the MC
647 		 *		with present bit off.
648 		 */
649 		rsp[m].masr_addr = mc_get_addr(node, i, &rsp[m].asi);
650 		rsp[m].masr = mc_decode[i] & ~SG_DECODE_VALID;
651 		m++;
652 		/*
653 		 * Step 2:	Now rewrite the mc reg with present bit on.
654 		 */
655 		rsp[m].masr_addr = rsp[m-1].masr_addr;
656 		rsp[m].masr = mc_decode[i];
657 		rsp[m].asi = rsp[m-1].asi;
658 		m++;
659 	}
660 
661 	*index = m;
662 	return (0);
663 }
664 
665 static int
666 sbdp_get_reg_addr(pnode_t nodeid, uint64_t *pa)
667 {
668 	mc_regspace	reg;
669 	int		len;
670 
671 	len = prom_getproplen(nodeid, "reg");
672 	if (len != sizeof (mc_regspace))
673 		return (-1);
674 
675 	if (prom_getprop(nodeid, "reg", (caddr_t)&reg) < 0)
676 		return (-1);
677 
678 	ASSERT(pa != NULL);
679 
680 	*pa = ((uint64_t)reg.regspec_addr_hi) << 32;
681 	*pa |= (uint64_t)reg.regspec_addr_lo;
682 
683 	return (0);
684 }
685 
686 static int
687 mc_get_sibling_cpu_impl(pnode_t mc_node)
688 {
689 	int	len, impl;
690 	pnode_t	cpu_node;
691 	char	namebuf[OBP_MAXPROPNAME];
692 
693 	cpu_node = mc_get_sibling_cpu(mc_node);
694 	if (cpu_node == OBP_NONODE) {
695 		SBDP_DBG_MEM("mc_get_sibling_cpu failed: dnode=0x%x\n",
696 		    mc_node);
697 		return (-1);
698 	}
699 
700 	len = prom_getproplen(cpu_node, "name");
701 	if (len < 0) {
702 		SBDP_DBG_MEM("invalid prom_getproplen for name prop: "
703 		    "len=%d, dnode=0x%x\n", len, cpu_node);
704 		return (-1);
705 	}
706 
707 	if (prom_getprop(cpu_node, "name", (caddr_t)namebuf) == -1) {
708 		SBDP_DBG_MEM("failed to read name property for dnode=0x%x\n",
709 		    cpu_node);
710 		return (-1);
711 	}
712 
713 	/*
714 	 * If this is a CMP node, the child has the implementation
715 	 * property.
716 	 */
717 	if (strcmp(namebuf, "cmp") == 0) {
718 		cpu_node = prom_childnode(cpu_node);
719 		ASSERT(cpu_node != OBP_NONODE);
720 	}
721 
722 	if (prom_getprop(cpu_node, "implementation#", (caddr_t)&impl) == -1) {
723 		SBDP_DBG_MEM("failed to read implementation# property for "
724 		    "dnode=0x%x\n", cpu_node);
725 		return (-1);
726 	}
727 
728 	SBDP_DBG_MEM("mc_get_sibling_cpu_impl: found impl=0x%x, dnode=0x%x\n",
729 	    impl, cpu_node);
730 
731 	return (impl);
732 }
733 
734 /*
735  * Provide EMU Activity Status register ASI and address.  Only valid for
736  * Panther processors.
737  */
738 static int
739 mc_get_idle_reg(pnode_t nodeid, uint64_t *addr, uint_t *asi)
740 {
741 	int	portid;
742 	uint64_t reg_pa;
743 
744 	ASSERT(nodeid != OBP_NONODE);
745 	ASSERT(mc_get_sibling_cpu_impl(nodeid) == PANTHER_IMPL);
746 
747 	if (prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0 ||
748 	    portid == -1) {
749 		SBDP_DBG_MEM("mc_get_idle_reg: failed to read portid prop "
750 		    "for dnode=0x%x\n", nodeid);
751 		return (-1);
752 	}
753 
754 	if (sbdp_get_reg_addr(nodeid, &reg_pa) != 0) {
755 		SBDP_DBG_MEM("mc_get_idle_reg: failed to read reg prop "
756 		    "for dnode=0x%x\n", nodeid);
757 		return (-1);
758 	}
759 
760 	/*
761 	 * Local access will be via ASI 0x4a, otherwise via Safari PIO.
762 	 * This assumes the copy-rename will later run on the same proc,
763 	 * hence there is an assumption we are already bound.
764 	 */
765 	ASSERT(curthread->t_bound_cpu == CPU);
766 	if (SG_CPUID_TO_PORTID(CPU->cpu_id) == portid) {
767 		*addr = ASI_EMU_ACT_STATUS_VA;
768 		*asi = ASI_SAFARI_CONFIG;
769 	} else {
770 		*addr = MC_ACTIVITY_STATUS(reg_pa);
771 		*asi = ASI_IO;
772 	}
773 
774 	return (0);
775 }
776 
777 /*
778  * If non-Panther board, add phys_banks entry for each physical bank.
779  * If Panther board, add mc_idle_regs entry for each EMU Activity Status
780  * register.  Increment the array indices b_idx and r_idx for each entry
781  * populated by this routine.
782  *
783  * The caller is responsible for allocating sufficient array entries.
784  */
785 static int
786 sbdp_prep_mc_idle_one(sbdp_bd_t *bp, sbdp_rename_script_t phys_banks[],
787     int *b_idx, sbdp_mc_idle_script_t mc_idle_regs[], int *r_idx)
788 {
789 	int		i, j;
790 	pnode_t		*memnodes;
791 	mc_regs_t	regs;
792 	uint64_t	addr;
793 	uint_t		asi;
794 	sbd_cond_t	sibling_cpu_cond;
795 	int		impl = -1;
796 
797 	memnodes = bp->nodes;
798 
799 	for (i = 0; i < SBDP_MAX_MEM_NODES_PER_BOARD; i++) {
800 		if (memnodes[i] == OBP_NONODE) {
801 			continue;
802 		}
803 
804 		/* MC should not be accessed if cpu has failed  */
805 		sibling_cpu_cond = mc_check_sibling_cpu(memnodes[i]);
806 		if (sibling_cpu_cond == SBD_COND_FAILED ||
807 		    sibling_cpu_cond == SBD_COND_UNUSABLE) {
808 			SBDP_DBG_MEM("sbdp: skipping MC with failed cpu: "
809 			    "board=%d, mem node=%d, condition=%d",
810 			    bp->bd, i, sibling_cpu_cond);
811 			continue;
812 		}
813 
814 		/*
815 		 * Initialize the board cpu type, assuming all board cpus are
816 		 * the same type.  This is true of all Cheetah-based processors.
817 		 * Failure to read the cpu type is considered a fatal error.
818 		 */
819 		if (impl == -1) {
820 			impl = mc_get_sibling_cpu_impl(memnodes[i]);
821 			if (impl == -1) {
822 				SBDP_DBG_MEM("sbdp: failed to get cpu impl "
823 				    "for MC dnode=0x%x\n", memnodes[i]);
824 				return (-1);
825 			}
826 		}
827 
828 		switch (impl) {
829 		case CHEETAH_IMPL:
830 		case CHEETAH_PLUS_IMPL:
831 		case JAGUAR_IMPL:
832 			if (mc_read_regs(memnodes[i], &regs)) {
833 				SBDP_DBG_MEM("sbdp: failed to read source "
834 				    "Decode Regs of board %d", bp->bd);
835 				return (-1);
836 			}
837 
838 			for (j = 0; j < SBDP_MAX_MCS_PER_NODE; j++) {
839 				uint64_t mc_decode = regs.mc_decode[j];
840 
841 				if ((mc_decode & SG_DECODE_VALID) !=
842 				    SG_DECODE_VALID) {
843 					continue;
844 				}
845 
846 				addr = (MC_BASE(mc_decode) << PHYS2UM_SHIFT) |
847 				    (MC_LM(mc_decode) << MC_LM_SHIFT);
848 
849 				phys_banks[*b_idx].masr_addr = addr;
850 				phys_banks[*b_idx].masr = 0;	/* unused */
851 				phys_banks[*b_idx].asi = ASI_MEM;
852 				(*b_idx)++;
853 			}
854 			break;
855 		case PANTHER_IMPL:
856 			if (mc_get_idle_reg(memnodes[i], &addr, &asi)) {
857 				return (-1);
858 			}
859 
860 			mc_idle_regs[*r_idx].addr = addr;
861 			mc_idle_regs[*r_idx].asi = asi;
862 			mc_idle_regs[*r_idx].node = memnodes[i];
863 			mc_idle_regs[*r_idx].bd_id = bp->bd;
864 			(*r_idx)++;
865 			break;
866 		default:
867 			cmn_err(CE_WARN, "Unknown cpu implementation=0x%x",
868 			    impl);
869 			ASSERT(0);
870 			return (-1);
871 		}
872 	}
873 
874 	return (0);
875 }
876 
877 /*
878  * For non-Panther MCs that do not support read-bypass-write, we do a read
879  * to each physical bank, relying on the reads to block until all outstanding
880  * write requests have completed.  This mechanism is referred to as the bus
881  * sync list and is used for Cheetah, Cheetah+, and Jaguar processors.  The
882  * bus sync list PAs for the source and target are kept together and comprise
883  * Section 1 of the rename script.
884  *
885  * For Panther processors that support the EMU Activity Status register,
886  * we ensure the writes have completed by polling the MCU_ACT_STATUS
887  * field several times to make sure the MC queues are empty.  The
888  * EMU Activity Status register PAs for the source and target are
889  * kept together and comprise Section 2 of the rename script.
890  */
891 static int
892 sbdp_prep_mc_idle_script(sbdp_bd_t *s_bp, sbdp_bd_t *t_bp,
893     sbdp_rename_script_t *rsp, int *rsp_idx)
894 {
895 	sbdp_rename_script_t *phys_banks;
896 	sbdp_mc_idle_script_t *mc_idle_regs;
897 	int	max_banks, max_regs;
898 	size_t	bsize, msize;
899 	int	nbanks = 0, nregs = 0;
900 	int	i;
901 
902 	/* CONSTCOND */
903 	ASSERT(sizeof (sbdp_rename_script_t) ==
904 	    sizeof (sbdp_mc_idle_script_t));
905 
906 	/* allocate space for both source and target */
907 	max_banks = SBDP_MAX_MEM_NODES_PER_BOARD *
908 	    SG_MAX_BANKS_PER_MC * 2;
909 	max_regs = SBDP_MAX_MEM_NODES_PER_BOARD * 2;
910 
911 	bsize = sizeof (sbdp_rename_script_t) * max_banks;
912 	msize = sizeof (sbdp_mc_idle_script_t) * max_regs;
913 
914 	phys_banks = kmem_zalloc(bsize, KM_SLEEP);
915 	mc_idle_regs = kmem_zalloc(msize, KM_SLEEP);
916 
917 	if (sbdp_prep_mc_idle_one(t_bp, phys_banks, &nbanks,
918 	    mc_idle_regs, &nregs) != 0 ||
919 	    sbdp_prep_mc_idle_one(s_bp, phys_banks, &nbanks,
920 	    mc_idle_regs, &nregs) != 0) {
921 		kmem_free(phys_banks, bsize);
922 		kmem_free(mc_idle_regs, msize);
923 		return (-1);
924 	}
925 
926 	/* section 1 */
927 	for (i = 0; i < nbanks; i++)
928 		rsp[(*rsp_idx)++] = phys_banks[i];
929 
930 	/* section 2 */
931 	for (i = 0; i < nregs; i++)
932 		rsp[(*rsp_idx)++] = *(sbdp_rename_script_t *)&mc_idle_regs[i];
933 
934 	kmem_free(phys_banks, bsize);
935 	kmem_free(mc_idle_regs, msize);
936 
937 	return (0);
938 }
939 
940 /*
941  * code assumes single mem-unit.
942  */
943 static int
944 sbdp_prep_rename_script(sbdp_cr_handle_t *cph)
945 {
946 	pnode_t			*s_nodes, *t_nodes;
947 	int			m = 0, i;
948 	sbdp_bd_t		s_bd, t_bd, *s_bdp, *t_bdp;
949 	sbdp_rename_script_t	*rsp;
950 	uint64_t		new_base, old_base, temp_base;
951 	int			s_num, t_num;
952 
953 	mutex_enter(&cph->s_bdp->bd_mutex);
954 	s_bd = *cph->s_bdp;
955 	mutex_exit(&cph->s_bdp->bd_mutex);
956 	mutex_enter(&cph->t_bdp->bd_mutex);
957 	t_bd = *cph->t_bdp;
958 	mutex_exit(&cph->t_bdp->bd_mutex);
959 
960 	s_bdp = &s_bd;
961 	t_bdp = &t_bd;
962 	s_nodes = s_bdp->nodes;
963 	t_nodes = t_bdp->nodes;
964 	s_num = s_bdp->nnum;
965 	t_num = t_bdp->nnum;
966 	rsp = cph->script;
967 
968 	/*
969 	 * Calculate the new base address for the target bd
970 	 */
971 
972 	new_base = (s_bdp->bpa >> PHYS2UM_SHIFT) << MC_UM_SHIFT;
973 
974 	/*
975 	 * Calculate the old base address for the source bd
976 	 */
977 
978 	old_base = (t_bdp->bpa >> PHYS2UM_SHIFT) << MC_UM_SHIFT;
979 
980 	temp_base = SG_INVAL_UM;
981 
982 	SBDP_DBG_MEM("new 0x%lx old_base ox%lx temp_base 0x%lx\n", new_base,
983 	    old_base, temp_base);
984 
985 	m = 0;
986 
987 	/*
988 	 * Ensure the MC queues have been idled on the source and target
989 	 * following the copy.
990 	 */
991 	if (sbdp_prep_mc_idle_script(s_bdp, t_bdp, rsp, &m) < 0)
992 		return (-1);
993 
994 	/*
995 	 * Script section terminator
996 	 */
997 	rsp[m].masr_addr = 0ull;
998 	rsp[m].masr = 0;
999 	rsp[m].asi = 0;
1000 	m++;
1001 
1002 	/*
1003 	 * Invalidate the base in the target mc registers
1004 	 */
1005 	for (i = 0; i < t_num; i++) {
1006 		if (sbdp_copy_regs(t_nodes[i], t_bdp->bpa, temp_base, 1, rsp,
1007 		    &m) < 0)
1008 			return (-1);
1009 	}
1010 	/*
1011 	 * Invalidate the base in the source mc registers
1012 	 */
1013 	for (i = 0; i < s_num; i++) {
1014 		if (sbdp_copy_regs(s_nodes[i], s_bdp->bpa, temp_base, 1, rsp,
1015 		    &m) < 0)
1016 			return (-1);
1017 	}
1018 	/*
1019 	 * Copy the new base into the targets mc registers
1020 	 */
1021 	for (i = 0; i < t_num; i++) {
1022 		if (sbdp_copy_regs(t_nodes[i], t_bdp->bpa, new_base, 0, rsp,
1023 		    &m) < 0)
1024 			return (-1);
1025 	}
1026 	/*
1027 	 * Copy the old base into the source mc registers
1028 	 */
1029 	for (i = 0; i < s_num; i++) {
1030 		if (sbdp_copy_regs(s_nodes[i], s_bdp->bpa, old_base, 0, rsp,
1031 		    &m) < 0)
1032 			return (-1);
1033 	}
1034 	/*
1035 	 * Zero masr_addr value indicates the END.
1036 	 */
1037 	rsp[m].masr_addr = 0ull;
1038 	rsp[m].masr = 0;
1039 	rsp[m].asi = 0;
1040 	m++;
1041 
1042 #ifdef DEBUG
1043 	{
1044 		int	i;
1045 
1046 		SBDP_DBG_MEM("dumping copy-rename script:\n");
1047 		for (i = 0; i < m; i++) {
1048 			SBDP_DBG_MEM("0x%lx = 0x%lx, asi 0x%x\n",
1049 				rsp[i].masr_addr, rsp[i].masr, rsp[i].asi);
1050 		}
1051 		DELAY(1000000);
1052 	}
1053 #endif /* DEBUG */
1054 
1055 	return (m * sizeof (sbdp_rename_script_t));
1056 }
1057 
1058 /*
1059  * EMU Activity Status Register needs to be read idle several times.
1060  * See Panther PRM 12.5.
1061  */
1062 #define	SBDP_MCU_IDLE_RETRIES	10
1063 #define	SBDP_MCU_IDLE_READS	3
1064 
1065 /*
1066  * Using the "__relocatable" suffix informs DTrace providers (and anything
1067  * else, for that matter) that this function's text may be manually relocated
1068  * elsewhere before it is executed.  That is, it cannot be safely instrumented
1069  * with any methodology that is PC-relative.
1070  */
1071 static int
1072 sbdp_copy_rename__relocatable(sbdp_cr_handle_t *hp, struct memlist *mlist,
1073 		register sbdp_rename_script_t *rsp)
1074 {
1075 	sbdp_cr_err_t	err = SBDP_CR_OK;
1076 	size_t		csize;
1077 	size_t		linesize;
1078 	uint_t		size;
1079 	uint64_t	caddr;
1080 	uint64_t	s_base, t_base;
1081 	sbdp_bd_t	*s_sbp, *t_sbp;
1082 	struct memlist	*ml;
1083 	sbdp_mc_idle_script_t *isp;
1084 	int		i;
1085 
1086 	caddr = ecache_flushaddr;
1087 	csize = (size_t)(cpunodes[CPU->cpu_id].ecache_size * 2);
1088 	linesize = (size_t)(cpunodes[CPU->cpu_id].ecache_linesize);
1089 
1090 	size = 0;
1091 	s_sbp = hp->s_bdp;
1092 	t_sbp = hp->t_bdp;
1093 
1094 	s_base = (uint64_t)s_sbp->bpa;
1095 	t_base = (uint64_t)t_sbp->bpa;
1096 
1097 	hp->ret = s_base;
1098 	/*
1099 	 * DO COPY.
1100 	 */
1101 	for (ml = mlist; ml; ml = ml->next) {
1102 		uint64_t	s_pa, t_pa;
1103 		uint64_t	nbytes;
1104 
1105 		s_pa = ml->address;
1106 		t_pa = t_base + (ml->address - s_base);
1107 		nbytes = ml->size;
1108 
1109 		size += nbytes;
1110 		while (nbytes != 0ull) {
1111 			/*
1112 			 * This copy does NOT use an ASI
1113 			 * that avoids the Ecache, therefore
1114 			 * the dst_pa addresses may remain
1115 			 * in our Ecache after the dst_pa
1116 			 * has been removed from the system.
1117 			 * A subsequent write-back to memory
1118 			 * will cause an ARB-stop because the
1119 			 * physical address no longer exists
1120 			 * in the system. Therefore we must
1121 			 * flush out local Ecache after we
1122 			 * finish the copy.
1123 			 */
1124 
1125 			/* copy 32 bytes at src_pa to dst_pa */
1126 			bcopy32_il(s_pa, t_pa);
1127 
1128 			/* increment by 32 bytes */
1129 			s_pa += (4 * sizeof (uint64_t));
1130 			t_pa += (4 * sizeof (uint64_t));
1131 
1132 			/* decrement by 32 bytes */
1133 			nbytes -= (4 * sizeof (uint64_t));
1134 		}
1135 	}
1136 
1137 	/*
1138 	 * Since bcopy32_il() does NOT use an ASI to bypass
1139 	 * the Ecache, we need to flush our Ecache after
1140 	 * the copy is complete.
1141 	 */
1142 	flush_ecache_il(caddr, csize, linesize);	/* inline version */
1143 
1144 	/*
1145 	 * Non-Panther MCs are idled by reading each physical bank.
1146 	 */
1147 	for (i = 0; rsp[i].asi == ASI_MEM; i++) {
1148 		(void) lddphys_il(rsp[i].masr_addr);
1149 	}
1150 
1151 	isp = (sbdp_mc_idle_script_t *)&rsp[i];
1152 
1153 	/*
1154 	 * Panther MCs are idled by polling until the MCU idle state
1155 	 * is read SBDP_MCU_IDLE_READS times in succession.
1156 	 */
1157 	while (isp->addr != 0ull) {
1158 		for (i = 0; i < SBDP_MCU_IDLE_RETRIES; i++) {
1159 			register uint64_t v;
1160 			register int n_idle = 0;
1161 
1162 
1163 			do {
1164 				v = ldxasi_il(isp->addr, isp->asi) &
1165 				    MCU_ACT_STATUS;
1166 			} while (v != MCU_ACT_STATUS &&
1167 			    ++n_idle < SBDP_MCU_IDLE_READS);
1168 
1169 			if (n_idle == SBDP_MCU_IDLE_READS)
1170 				break;
1171 		}
1172 
1173 		if (i == SBDP_MCU_IDLE_RETRIES) {
1174 			/* bailout */
1175 			hp->busy_mc = isp;
1176 			return (SBDP_CR_MC_IDLE_ERR);
1177 		}
1178 
1179 		isp++;
1180 	}
1181 
1182 	/* skip terminator */
1183 	isp++;
1184 
1185 	/*
1186 	 * The following inline assembly routine caches
1187 	 * the rename script and then caches the code that
1188 	 * will do the rename.  This is necessary
1189 	 * so that we don't have any memory references during
1190 	 * the reprogramming.  We accomplish this by first
1191 	 * jumping through the code to guarantee it's cached
1192 	 * before we actually execute it.
1193 	 */
1194 	sbdp_exec_script_il((sbdp_rename_script_t *)isp);
1195 
1196 	return (err);
1197 }
1198 static void
1199 _sbdp_copy_rename_end(void)
1200 {
1201 	/*
1202 	 * IMPORTANT:   This function's location MUST be located immediately
1203 	 *		following sbdp_copy_rename__relocatable to accurately
1204 	 *		estimate its size.  Note that this assumes (!)the
1205 	 *		compiler keeps these functions in the order in which
1206 	 *		they appear :-o
1207 	 */
1208 }
1209 int
1210 sbdp_memory_rename(sbdp_handle_t *hp)
1211 {
1212 #ifdef lint
1213 	/*
1214 	 * Delete when implemented
1215 	 */
1216 	hp = hp;
1217 #endif
1218 	return (0);
1219 }
1220 
1221 
1222 /*
1223  * In Serengeti this is a nop
1224  */
1225 int
1226 sbdp_post_configure_mem(sbdp_handle_t *hp)
1227 {
1228 #ifdef lint
1229 	hp = hp;
1230 #endif
1231 	return (0);
1232 }
1233 
1234 /*
1235  * In Serengeti this is a nop
1236  */
1237 int
1238 sbdp_post_unconfigure_mem(sbdp_handle_t *hp)
1239 {
1240 #ifdef lint
1241 	hp = hp;
1242 #endif
1243 	return (0);
1244 }
1245 
1246 /* ARGSUSED */
1247 int
1248 sbdphw_disable_memctrl(sbdp_handle_t *hp, dev_info_t *dip)
1249 {
1250 	return (0);
1251 }
1252 
1253 /* ARGSUSED */
1254 int
1255 sbdphw_enable_memctrl(sbdp_handle_t *hp, dev_info_t *dip)
1256 {
1257 	return (0);
1258 }
1259 
1260 /*
1261  * We are assuming one memory node therefore the base address is the lowest
1262  * segment possible
1263  */
1264 #define	PA_ABOVE_MAX	(0x8000000000000000ull)
1265 int
1266 sbdphw_get_base_physaddr(sbdp_handle_t *hp, dev_info_t *dip, uint64_t *pa)
1267 {
1268 	_NOTE(ARGUNUSED(hp))
1269 
1270 	int i, board = -1, wnode;
1271 	pnode_t	nodeid;
1272 	struct mem_arg arg = {0};
1273 	uint64_t seg_pa, tmp_pa;
1274 	dev_info_t *list[SBDP_MAX_MEM_NODES_PER_BOARD];
1275 	int rc;
1276 
1277 	if (dip == NULL)
1278 		return (-1);
1279 
1280 	nodeid = ddi_get_nodeid(dip);
1281 
1282 	if (sbdp_get_bd_and_wnode_num(nodeid, &board, &wnode) < 0)
1283 		return (-1);
1284 
1285 	list[0] = NULL;
1286 	arg.board = board;
1287 	arg.list = list;
1288 
1289 	(void) sbdp_walk_prom_tree(prom_rootnode(), sbdp_get_mem_dip, &arg);
1290 
1291 	if (arg.ndips <= 0)
1292 		return (-1);
1293 
1294 	seg_pa = PA_ABOVE_MAX;
1295 
1296 	rc = -1;
1297 	for (i = 0; i < arg.ndips; i++) {
1298 		if (list[i] == NULL)
1299 			continue;
1300 		if (sbdp_get_lowest_addr_in_node(ddi_get_nodeid(list[i]),
1301 		    &tmp_pa) == 0) {
1302 			rc = 0;
1303 			if (tmp_pa < seg_pa)
1304 				seg_pa = tmp_pa;
1305 		}
1306 
1307 		/*
1308 		 * Release hold acquired in sbdp_get_mem_dip()
1309 		 */
1310 		ddi_release_devi(list[i]);
1311 	}
1312 
1313 	if (rc == 0)
1314 		*pa = seg_pa;
1315 	else {
1316 		/*
1317 		 * Record the fact that an error has occurred
1318 		 */
1319 		sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
1320 	}
1321 
1322 	return (rc);
1323 }
1324 
1325 static int
1326 sbdp_get_lowest_addr_in_node(pnode_t node, uint64_t *pa)
1327 {
1328 	uint64_t	mc_decode, seg_pa, tmp_pa;
1329 	mc_regs_t	mc_regs, *mc_regsp = &mc_regs;
1330 	int		i, valid;
1331 	int		rc;
1332 
1333 
1334 	seg_pa = PA_ABOVE_MAX;
1335 
1336 	if (mc_read_regs(node, mc_regsp)) {
1337 		SBDP_DBG_MEM("sbdp_get_lowest_addr_in_node: failed to "
1338 		    "read source Decode Regs\n");
1339 		return (-1);
1340 	}
1341 
1342 	rc = -1;
1343 	for (i = 0; i < SBDP_MAX_MCS_PER_NODE; i++) {
1344 		mc_decode = mc_regsp->mc_decode[i];
1345 		valid = mc_decode >> MC_VALID_SHIFT;
1346 		tmp_pa = MC_BASE(mc_decode) << PHYS2UM_SHIFT;
1347 		if (valid)
1348 			rc = 0;
1349 		if (valid && (tmp_pa < seg_pa))
1350 			seg_pa = tmp_pa;
1351 	}
1352 
1353 	if (rc == 0)
1354 		*pa = seg_pa;
1355 
1356 	return (rc);
1357 }
1358 
1359 int
1360 sbdp_is_mem(pnode_t node, void *arg)
1361 {
1362 	mem_op_t	*memp = (mem_op_t *)arg;
1363 	char		type[OBP_MAXPROPNAME];
1364 	int		bd;
1365 	pnode_t		*list;
1366 	int		board;
1367 	char		name[OBP_MAXDRVNAME];
1368 	int		len;
1369 
1370 	ASSERT(memp);
1371 
1372 	list = memp->nodes;
1373 	board = memp->board;
1374 
1375 	/*
1376 	 * Make sure that this node doesn't have its status
1377 	 * as failed
1378 	 */
1379 	if (sbdp_get_comp_status(node) != SBD_COND_OK) {
1380 		return (DDI_FAILURE);
1381 	}
1382 
1383 	len = prom_getproplen(node, "device_type");
1384 	if ((len > 0) && (len < OBP_MAXPROPNAME))
1385 		(void) prom_getprop(node, "device_type", (caddr_t)type);
1386 	else
1387 		type[0] = '\0';
1388 
1389 	if (strcmp(type, "memory-controller") == 0) {
1390 		int	wnode;
1391 
1392 		if (sbdp_get_bd_and_wnode_num(node, &bd, &wnode) < 0)
1393 			return (DDI_FAILURE);
1394 
1395 		if (bd == board) {
1396 			/*
1397 			 * Make sure we don't overwrite the array
1398 			 */
1399 			if (memp->nmem >= SBDP_MAX_MEM_NODES_PER_BOARD)
1400 				return (DDI_FAILURE);
1401 			(void) prom_getprop(node, OBP_NAME, (caddr_t)name);
1402 			SBDP_DBG_MEM("name %s  boot bd %d board %d\n", name,
1403 			    board, bd);
1404 			list[memp->nmem++] = node;
1405 			return (DDI_SUCCESS);
1406 		}
1407 	}
1408 
1409 	return (DDI_FAILURE);
1410 }
1411 
1412 static int
1413 sbdp_get_meminfo(pnode_t nodeid, int mc, uint64_t *size, uint64_t *base_pa)
1414 {
1415 	int		board, wnode;
1416 	int		valid;
1417 	mc_regs_t	mc_regs, *mc_regsp = &mc_regs;
1418 	uint64_t	mc_decode = 0;
1419 
1420 	if (sbdp_get_bd_and_wnode_num(nodeid, &board, &wnode) < 0)
1421 		return (-1);
1422 
1423 	if (mc_read_regs(nodeid, mc_regsp)) {
1424 		SBDP_DBG_MEM("sbdp_get_meminfo: failed to read source "
1425 		    "Decode Regs");
1426 		return (-1);
1427 	}
1428 	/*
1429 	 * Calculate memory size
1430 	 */
1431 	mc_decode = mc_regsp->mc_decode[mc];
1432 
1433 	/*
1434 	 * Check the valid bit to see if bank is there
1435 	 */
1436 	valid = mc_decode >> MC_VALID_SHIFT;
1437 	if (valid) {
1438 		*size = MC_UK2SPAN(mc_decode);
1439 		*base_pa = MC_BASE(mc_decode) << PHYS2UM_SHIFT;
1440 	}
1441 
1442 	return (0);
1443 }
1444 
1445 
1446 /*
1447  * Luckily for us mem nodes and cpu/CMP nodes are siblings.  All we need to
1448  * do is search in the same branch as the mem node for its sibling cpu or
1449  * CMP node.
1450  */
1451 pnode_t
1452 mc_get_sibling_cpu(pnode_t nodeid)
1453 {
1454 	int	portid;
1455 
1456 	if (prom_getprop(nodeid, OBP_PORTID, (caddr_t)&portid) < 0)
1457 		return (OBP_NONODE);
1458 
1459 	/*
1460 	 * cpus and memory are siblings so we don't need to traverse
1461 	 * the whole tree, just a branch
1462 	 */
1463 	return (sbdp_find_nearby_cpu_by_portid(nodeid, portid));
1464 }
1465 
1466 /*
1467  * Given a memory node, check it's sibling cpu or CMP to see if
1468  * access to mem will be ok. We need to search for the node and
1469  * if found get its condition.
1470  */
1471 sbd_cond_t
1472 mc_check_sibling_cpu(pnode_t nodeid)
1473 {
1474 	pnode_t	cpu_node;
1475 	sbd_cond_t	cond;
1476 	int		i;
1477 
1478 	cpu_node = mc_get_sibling_cpu(nodeid);
1479 
1480 	cond = sbdp_get_comp_status(cpu_node);
1481 
1482 	if (cond == SBD_COND_OK) {
1483 		int 		wnode;
1484 		int		bd;
1485 		int		unit;
1486 		int		portid;
1487 
1488 		if (sbdp_get_bd_and_wnode_num(nodeid, &bd, &wnode) < 0)
1489 			return (SBD_COND_UNKNOWN);
1490 
1491 		(void) prom_getprop(nodeid, OBP_PORTID, (caddr_t)&portid);
1492 
1493 		/*
1494 		 * Access to the memory controller should not
1495 		 * be attempted if any of the cores are marked
1496 		 * as being in reset.
1497 		 */
1498 		for (i = 0; i < SBDP_MAX_CORES_PER_CMP; i++) {
1499 			unit = SG_PORTID_TO_CPU_UNIT(portid, i);
1500 			if (sbdp_is_cpu_present(wnode, bd, unit) &&
1501 			    sbdp_is_cpu_in_reset(wnode, bd, unit)) {
1502 				cond = SBD_COND_UNUSABLE;
1503 				break;
1504 			}
1505 		}
1506 	}
1507 
1508 	return (cond);
1509 }
1510 
1511 int
1512 mc_read_regs(pnode_t nodeid, mc_regs_t *mc_regsp)
1513 {
1514 	int			len;
1515 	uint64_t		mc_addr, mask;
1516 	mc_regspace		reg;
1517 	sbd_cond_t		sibling_cpu_cond;
1518 	int			local_mc;
1519 	int			portid;
1520 	int			i;
1521 
1522 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
1523 	    (portid == -1))
1524 		return (-1);
1525 
1526 	/*
1527 	 * mc should not be accessed if their corresponding cpu
1528 	 * has failed.
1529 	 */
1530 	sibling_cpu_cond = mc_check_sibling_cpu(nodeid);
1531 
1532 	if ((sibling_cpu_cond == SBD_COND_FAILED) ||
1533 	    (sibling_cpu_cond == SBD_COND_UNUSABLE)) {
1534 		return (-1);
1535 	}
1536 
1537 	len = prom_getproplen(nodeid, "reg");
1538 	if (len != sizeof (mc_regspace))
1539 		return (-1);
1540 
1541 	if (prom_getprop(nodeid, "reg", (caddr_t)&reg) < 0)
1542 		return (-1);
1543 
1544 	mc_addr = ((uint64_t)reg.regspec_addr_hi) << 32;
1545 	mc_addr |= (uint64_t)reg.regspec_addr_lo;
1546 
1547 	/*
1548 	 * Make sure we don't switch cpus
1549 	 */
1550 	affinity_set(CPU_CURRENT);
1551 	if (portid == cpunodes[CPU->cpu_id].portid)
1552 		local_mc = 1;
1553 	else
1554 		local_mc = 0;
1555 
1556 	for (i = 0; i < SG_MAX_BANKS_PER_MC; i++) {
1557 		mask = SG_REG_2_OFFSET(i);
1558 
1559 		/*
1560 		 * If the memory controller is local to this CPU, we use
1561 		 * the special ASI to read the decode registers.
1562 		 * Otherwise, we load the values from a magic address in
1563 		 * I/O space.
1564 		 */
1565 		if (local_mc) {
1566 			mc_regsp->mc_decode[i] = lddmcdecode(
1567 			    mask & MC_OFFSET_MASK);
1568 		} else {
1569 			mc_regsp->mc_decode[i] = lddphysio(
1570 			    (mc_addr | mask));
1571 		}
1572 	}
1573 	affinity_clear();
1574 
1575 	return (0);
1576 }
1577 
1578 uint64_t
1579 mc_get_addr(pnode_t nodeid, int mc, uint_t *asi)
1580 {
1581 	int			len;
1582 	uint64_t		mc_addr, addr;
1583 	mc_regspace		reg;
1584 	int			portid;
1585 	int			local_mc;
1586 
1587 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
1588 	    (portid == -1))
1589 		return (-1);
1590 
1591 	len = prom_getproplen(nodeid, "reg");
1592 	if (len != sizeof (mc_regspace))
1593 		return (-1);
1594 
1595 	if (prom_getprop(nodeid, "reg", (caddr_t)&reg) < 0)
1596 		return (-1);
1597 
1598 	mc_addr = ((uint64_t)reg.regspec_addr_hi) << 32;
1599 	mc_addr |= (uint64_t)reg.regspec_addr_lo;
1600 
1601 	/*
1602 	 * Make sure we don't switch cpus
1603 	 */
1604 	affinity_set(CPU_CURRENT);
1605 	if (portid == cpunodes[CPU->cpu_id].portid)
1606 		local_mc = 1;
1607 	else
1608 		local_mc = 0;
1609 
1610 	if (local_mc) {
1611 		*asi = ASI_MC_DECODE;
1612 		addr = SG_REG_2_OFFSET(mc) & MC_OFFSET_MASK;
1613 	} else {
1614 		*asi = ASI_IO;
1615 		addr = SG_REG_2_OFFSET(mc) | mc_addr;
1616 	}
1617 	affinity_clear();
1618 
1619 	return (addr);
1620 }
1621 
1622 /* ARGSUSED */
1623 int
1624 sbdp_mem_add_span(sbdp_handle_t *hp, uint64_t address, uint64_t size)
1625 {
1626 	return (0);
1627 }
1628 
1629 int
1630 sbdp_mem_del_span(sbdp_handle_t *hp, uint64_t address, uint64_t size)
1631 {
1632 	pfn_t		 basepfn = (pfn_t)(address >> PAGESHIFT);
1633 	pgcnt_t		 npages = (pgcnt_t)(size >> PAGESHIFT);
1634 
1635 	if (size > 0) {
1636 		int rv;
1637 		rv = kcage_range_delete_post_mem_del(basepfn, npages);
1638 		if (rv != 0) {
1639 			cmn_err(CE_WARN,
1640 			    "unexpected kcage_range_delete_post_mem_del"
1641 			    " return value %d", rv);
1642 			sbdp_set_err(hp->h_err, ESGT_INTERNAL, NULL);
1643 			return (-1);
1644 		}
1645 	}
1646 	return (0);
1647 }
1648 
1649 /*
1650  * This routine gets the size including the
1651  * bad banks
1652  */
1653 int
1654 sbdp_get_mem_size(sbdp_handle_t *hp)
1655 {
1656 	uint64_t	size = 0;
1657 	struct memlist	*mlist, *ml;
1658 
1659 	mlist = sbdp_get_memlist(hp, (dev_info_t *)NULL);
1660 
1661 	for (ml = mlist; ml; ml = ml->next)
1662 		size += ml->size;
1663 
1664 	(void) sbdp_del_memlist(hp, mlist);
1665 
1666 	SBDP_DBG_MEM("sbdp_get_mem_size: size 0x%" PRIx64 "\n", size);
1667 
1668 	return (btop(size));
1669 }
1670 
1671 /*
1672  * This function compares the list of banks passed with the banks
1673  * in the segment
1674  */
1675 int
1676 sbdp_check_seg_with_banks(sbdp_seg_t *seg, sbdp_bank_t *banks)
1677 {
1678 	sbdp_bank_t	*cur_bank, *bank;
1679 	int		i = 0;
1680 
1681 	for (cur_bank = seg->banks; cur_bank; cur_bank = cur_bank->seg_next) {
1682 		for (bank = banks; bank; bank = bank->bd_next) {
1683 			if (!bank->valid)
1684 				continue;
1685 
1686 			if (cur_bank == bank) {
1687 				i++;
1688 			}
1689 		}
1690 	}
1691 
1692 	SBDP_DBG_MEM("banks found = %d total banks = %d\n", i, seg->nbanks);
1693 	/*
1694 	 * If we find the same num of banks that are equal, then this segment
1695 	 * is not interleaved across boards
1696 	 */
1697 	if (i == seg->nbanks)
1698 		return (0);
1699 
1700 	return (1);
1701 }
1702 
1703 
1704 /*
1705  * This routine determines if any of the memory banks on the board
1706  * participate in across board memory interleaving
1707  */
1708 int
1709 sbdp_isinterleaved(sbdp_handle_t *hp, dev_info_t *dip)
1710 {
1711 	_NOTE(ARGUNUSED(dip))
1712 
1713 	sbdp_bank_t	*bankp;
1714 	int		wnode, board;
1715 	int		is_interleave = 0;
1716 	sbdp_bd_t	*bdp;
1717 	uint64_t	base;
1718 	sbdp_seg_t	*seg;
1719 
1720 	board = hp->h_board;
1721 	wnode = hp->h_wnode;
1722 
1723 #ifdef DEBUG
1724 	sbdp_print_all_segs();
1725 #endif
1726 	/*
1727 	 * Get the banks for this board
1728 	 */
1729 	bdp = sbdp_get_bd_info(wnode, board);
1730 
1731 	if (bdp == NULL)
1732 		return (-1);
1733 
1734 	/*
1735 	 * Search for the first bank with valid memory
1736 	 */
1737 	for (bankp = bdp->banks; bankp; bankp = bankp->bd_next)
1738 		if (bankp->valid)
1739 			break;
1740 
1741 	/*
1742 	 * If there are no banks in the board, then the board is
1743 	 * not interleaved across boards
1744 	 */
1745 	if (bankp == NULL) {
1746 		return (0);
1747 	}
1748 
1749 	base = bankp->um & ~(bankp->uk);
1750 
1751 	/*
1752 	 * Find the segment for the first bank
1753 	 */
1754 	if ((seg = sbdp_get_seg(base)) == NULL) {
1755 		/*
1756 		 * Something bad has happened.
1757 		 */
1758 		return (-1);
1759 	}
1760 	/*
1761 	 * Make sure that this segment is only composed of the banks
1762 	 * in this board. If one is missing or we have an extra one
1763 	 * the board is interleaved across boards
1764 	 */
1765 	is_interleave = sbdp_check_seg_with_banks(seg, bdp->banks);
1766 
1767 	SBDP_DBG_MEM("interleave is %d\n", is_interleave);
1768 
1769 	return (is_interleave);
1770 }
1771 
1772 
1773 /*
1774  * Each node has 4 logical banks.  This routine adds all the banks (including
1775  * the invalid ones to the passed list. Note that we use the bd list and not
1776  * the seg list
1777  */
1778 int
1779 sbdp_add_nodes_banks(pnode_t node, sbdp_bank_t **banks)
1780 {
1781 	int		i;
1782 	mc_regs_t	regs;
1783 	uint64_t	*mc_decode;
1784 	sbdp_bank_t 	*bank;
1785 
1786 	if (mc_read_regs(node, &regs) == -1)
1787 		return (-1);
1788 
1789 	mc_decode = regs.mc_decode;
1790 
1791 	for (i = 0; i < SBDP_MAX_MCS_PER_NODE; i++) {
1792 		/*
1793 		 * This creates the mem for the new member of the list
1794 		 */
1795 		sbdp_fill_bank_info(mc_decode[i], &bank);
1796 
1797 		SBDP_DBG_MEM("adding bank %d\n", bank->id);
1798 
1799 		/*
1800 		 * Insert bank into the beginning of the list
1801 		 */
1802 		bank->bd_next = *banks;
1803 		*banks = bank;
1804 
1805 		/*
1806 		 * Add this bank into its corresponding
1807 		 * segment
1808 		 */
1809 		sbdp_add_bank_to_seg(bank);
1810 	}
1811 	return (0);
1812 }
1813 
1814 /*
1815  * given the info, create a new bank node and set the info
1816  * as appropriate. We allocate the memory for the bank. It is
1817  * up to the caller to ensure the mem is freed
1818  */
1819 void
1820 sbdp_fill_bank_info(uint64_t mc_decode, sbdp_bank_t **bank)
1821 {
1822 	static int	id = 0;
1823 	sbdp_bank_t	*new;
1824 
1825 	new = kmem_zalloc(sizeof (sbdp_bank_t), KM_SLEEP);
1826 
1827 	new->id = id++;
1828 	new->valid = (mc_decode >> MC_VALID_SHIFT);
1829 	new->uk = MC_UK(mc_decode);
1830 	new->um = MC_UM(mc_decode);
1831 	new->lk = MC_LK(mc_decode);
1832 	new->lm = MC_LM(mc_decode);
1833 	new->bd_next = NULL;
1834 	new->seg_next = NULL;
1835 
1836 	*bank = new;
1837 }
1838 
1839 /*
1840  * Each bd has the potential of having mem banks on it.  The banks
1841  * may be empty or not.  This routine gets all the mem banks
1842  * for this bd
1843  */
1844 void
1845 sbdp_init_bd_banks(sbdp_bd_t *bdp)
1846 {
1847 	int		i, nmem;
1848 	pnode_t		*lists;
1849 
1850 	lists = bdp->nodes;
1851 	nmem = bdp->nnum;
1852 
1853 	if (bdp->banks != NULL) {
1854 		return;
1855 	}
1856 
1857 	bdp->banks = NULL;
1858 
1859 	for (i = 0; i < nmem; i++) {
1860 		(void) sbdp_add_nodes_banks(lists[i], &bdp->banks);
1861 	}
1862 }
1863 
1864 /*
1865  * swap the list of banks for the 2 boards
1866  */
1867 void
1868 sbdp_swap_list_of_banks(sbdp_bd_t *bdp1, sbdp_bd_t *bdp2)
1869 {
1870 	sbdp_bank_t	*tmp_ptr;
1871 
1872 	if ((bdp1 == NULL) || (bdp2 == NULL))
1873 		return;
1874 
1875 	tmp_ptr = bdp1->banks;
1876 	bdp1->banks = bdp2->banks;
1877 	bdp2->banks = tmp_ptr;
1878 }
1879 
1880 /*
1881  * free all the banks on the board.  Note that a bank node belongs
1882  * to 2 lists. The first list is the board list. The second one is
1883  * the seg list. We only need to remove the bank from both lists but only
1884  * free the node once.
1885  */
1886 void
1887 sbdp_fini_bd_banks(sbdp_bd_t *bdp)
1888 {
1889 	sbdp_bank_t	*bkp, *nbkp;
1890 
1891 	for (bkp = bdp->banks; bkp; ) {
1892 		/*
1893 		 * Remove the bank from the seg list first
1894 		 */
1895 		SBDP_DBG_MEM("Removing bank %d\n", bkp->id);
1896 		sbdp_remove_bank_from_seg(bkp);
1897 		nbkp = bkp->bd_next;
1898 		bkp->bd_next = NULL;
1899 		kmem_free(bkp, sizeof (sbdp_bank_t));
1900 
1901 		bkp = nbkp;
1902 	}
1903 	bdp->banks = NULL;
1904 }
1905 
1906 #ifdef DEBUG
1907 void
1908 sbdp_print_bd_banks(sbdp_bd_t *bdp)
1909 {
1910 	sbdp_bank_t	*bp;
1911 	int		i;
1912 
1913 	SBDP_DBG_MEM("BOARD %d\n", bdp->bd);
1914 
1915 	for (bp = bdp->banks, i = 0; bp; bp = bp->bd_next, i++) {
1916 		SBDP_DBG_MEM("BANK [%d]:\n", bp->id);
1917 		SBDP_DBG_MEM("\tvalid %d\tuk 0x%x\tum 0x%x\tlk 0x%x"
1918 		    "\tlm 0x%x\n", bp->valid, bp->uk, bp->um,
1919 		    bp->lk, bp->lm);
1920 	}
1921 }
1922 
1923 void
1924 sbdp_print_all_segs(void)
1925 {
1926 	sbdp_seg_t	*cur_seg;
1927 
1928 	for (cur_seg = sys_seg; cur_seg; cur_seg = cur_seg->next)
1929 		sbdp_print_seg(cur_seg);
1930 }
1931 
1932 void
1933 sbdp_print_seg(sbdp_seg_t *seg)
1934 {
1935 	sbdp_bank_t	*bp;
1936 	int		i;
1937 
1938 	SBDP_DBG_MEM("SEG %d\n", seg->id);
1939 
1940 	for (bp = seg->banks, i = 0; bp; bp = bp->seg_next, i++) {
1941 		SBDP_DBG_MEM("BANK [%d]:\n", bp->id);
1942 		SBDP_DBG_MEM("\tvalid %d\tuk 0x%x\tum 0x%x\tlk 0x%x"
1943 		    "\tlm 0x%x\n", bp->valid, bp->uk, bp->um,
1944 		    bp->lk, bp->lm);
1945 	}
1946 }
1947 #endif
1948 
1949 void
1950 sbdp_add_bank_to_seg(sbdp_bank_t *bank)
1951 {
1952 	uint64_t	base;
1953 	sbdp_seg_t	*cur_seg;
1954 	static int	id = 0;
1955 
1956 	/*
1957 	 * if we got an invalid bank just skip it
1958 	 */
1959 	if (bank == NULL || !bank->valid)
1960 		return;
1961 	base = bank->um & ~(bank->uk);
1962 
1963 	if ((cur_seg = sbdp_get_seg(base)) == NULL) {
1964 		/*
1965 		 * This bank is part of a new segment, so create
1966 		 * a struct for it and added to the list of segments
1967 		 */
1968 		cur_seg = kmem_zalloc(sizeof (sbdp_seg_t), KM_SLEEP);
1969 		cur_seg->id = id++;
1970 		cur_seg->base = base;
1971 		cur_seg->size = ((bank->uk +1) << PHYS2UM_SHIFT);
1972 		cur_seg->intlv = ((bank->lk ^ 0xF) + 1);
1973 		/*
1974 		 * add to the seg list
1975 		 */
1976 		cur_seg->next = sys_seg;
1977 		sys_seg = cur_seg;
1978 	}
1979 
1980 	cur_seg->nbanks++;
1981 	/*
1982 	 * add bank into segs bank list.  Note we add at the head
1983 	 */
1984 	bank->seg_next = cur_seg->banks;
1985 	cur_seg->banks = bank;
1986 }
1987 
1988 /*
1989  * Remove this segment from the seg list
1990  */
1991 void
1992 sbdp_rm_seg(sbdp_seg_t *seg)
1993 {
1994 	sbdp_seg_t	**curpp, *curp;
1995 
1996 	curpp = &sys_seg;
1997 
1998 	while ((curp = *curpp) != NULL) {
1999 		if (curp == seg) {
2000 			*curpp = curp->next;
2001 			break;
2002 		}
2003 		curpp = &curp->next;
2004 	}
2005 
2006 	if (curp != NULL) {
2007 		kmem_free(curp, sizeof (sbdp_seg_t));
2008 		curp = NULL;
2009 	}
2010 }
2011 
2012 /*
2013  * remove this bank from its seg list
2014  */
2015 void
2016 sbdp_remove_bank_from_seg(sbdp_bank_t *bank)
2017 {
2018 	uint64_t	base;
2019 	sbdp_seg_t	*cur_seg;
2020 	sbdp_bank_t	**curpp, *curp;
2021 
2022 	/*
2023 	 * if we got an invalid bank just skip it
2024 	 */
2025 	if (bank == NULL || !bank->valid)
2026 		return;
2027 	base = bank->um & ~(bank->uk);
2028 
2029 	/*
2030 	 * If the bank doesn't belong to any seg just return
2031 	 */
2032 	if ((cur_seg = sbdp_get_seg(base)) == NULL) {
2033 		SBDP_DBG_MEM("bank %d with no segment\n", bank->id);
2034 		return;
2035 	}
2036 
2037 	/*
2038 	 * Find bank in the seg
2039 	 */
2040 	curpp = &cur_seg->banks;
2041 
2042 	while ((curp = *curpp) != NULL) {
2043 		if (curp->id == bank->id) {
2044 			/*
2045 			 * found node, remove it
2046 			 */
2047 			*curpp = curp->seg_next;
2048 			break;
2049 		}
2050 		curpp = &curp->seg_next;
2051 	}
2052 
2053 	if (curp != NULL) {
2054 		cur_seg->nbanks--;
2055 	}
2056 
2057 	if (cur_seg->nbanks == 0) {
2058 		/*
2059 		 * No banks left on this segment, remove the segment
2060 		 */
2061 		SBDP_DBG_MEM("No banks left in this segment, removing it\n");
2062 		sbdp_rm_seg(cur_seg);
2063 	}
2064 }
2065 
2066 sbdp_seg_t *
2067 sbdp_get_seg(uint64_t base)
2068 {
2069 	sbdp_seg_t	*cur_seg;
2070 
2071 	for (cur_seg = sys_seg; cur_seg; cur_seg = cur_seg->next) {
2072 		if (cur_seg-> base == base)
2073 			break;
2074 	}
2075 
2076 	return (cur_seg);
2077 }
2078 
2079 #ifdef DEBUG
2080 int
2081 sbdp_passthru_readmem(sbdp_handle_t *hp, void *arg)
2082 {
2083 	_NOTE(ARGUNUSED(hp))
2084 	_NOTE(ARGUNUSED(arg))
2085 
2086 	struct memlist	*ml;
2087 	uint64_t	src_pa;
2088 	uint64_t	dst_pa;
2089 	uint64_t	dst;
2090 
2091 
2092 	dst_pa = va_to_pa(&dst);
2093 
2094 	memlist_read_lock();
2095 	for (ml = phys_install; ml; ml = ml->next) {
2096 		uint64_t	nbytes;
2097 
2098 		src_pa = ml->address;
2099 		nbytes = ml->size;
2100 
2101 		while (nbytes != 0ull) {
2102 
2103 			/* copy 32 bytes at src_pa to dst_pa */
2104 			bcopy32_il(src_pa, dst_pa);
2105 
2106 			/* increment by 32 bytes */
2107 			src_pa += (4 * sizeof (uint64_t));
2108 
2109 			/* decrement by 32 bytes */
2110 			nbytes -= (4 * sizeof (uint64_t));
2111 		}
2112 	}
2113 	memlist_read_unlock();
2114 
2115 	return (0);
2116 }
2117 
2118 static int
2119 isdigit(int ch)
2120 {
2121 	return (ch >= '0' && ch <= '9');
2122 }
2123 
2124 #define	isspace(c)	((c) == ' ' || (c) == '\t' || (c) == '\n')
2125 
2126 int
2127 sbdp_strtoi(char *p, char **pos)
2128 {
2129 	int n;
2130 	int c, neg = 0;
2131 
2132 	if (!isdigit(c = *p)) {
2133 		while (isspace(c))
2134 			c = *++p;
2135 		switch (c) {
2136 			case '-':
2137 				neg++;
2138 				/* FALLTHROUGH */
2139 			case '+':
2140 				c = *++p;
2141 		}
2142 		if (!isdigit(c)) {
2143 			if (pos != NULL)
2144 				*pos = p;
2145 			return (0);
2146 		}
2147 	}
2148 	for (n = '0' - c; isdigit(c = *++p); ) {
2149 		n *= 10; /* two steps to avoid unnecessary overflow */
2150 		n += '0' - c; /* accum neg to avoid surprises at MAX */
2151 	}
2152 	if (pos != NULL)
2153 		*pos = p;
2154 	return (neg ? n : -n);
2155 }
2156 
2157 int
2158 sbdp_passthru_prep_script(sbdp_handle_t *hp, void *arg)
2159 {
2160 	int			board, i;
2161 	sbdp_bd_t		*t_bdp, *s_bdp;
2162 	char			*opts;
2163 	int			t_board;
2164 	sbdp_rename_script_t	*rsbuffer;
2165 	sbdp_cr_handle_t	*cph;
2166 	int			scriptlen, size;
2167 
2168 	opts = (char *)arg;
2169 	board = hp->h_board;
2170 
2171 	opts += strlen("prep-script=");
2172 	t_board = sbdp_strtoi(opts, NULL);
2173 
2174 	cph =  kmem_zalloc(sizeof (sbdp_cr_handle_t), KM_SLEEP);
2175 
2176 	size = sizeof (sbdp_rename_script_t) * SBDP_RENAME_MAXOP;
2177 	rsbuffer = kmem_zalloc(size, KM_SLEEP);
2178 
2179 	s_bdp = sbdp_get_bd_info(hp->h_wnode, board);
2180 	t_bdp = sbdp_get_bd_info(hp->h_wnode, t_board);
2181 
2182 	cph->s_bdp = s_bdp;
2183 	cph->t_bdp = t_bdp;
2184 	cph->script = rsbuffer;
2185 
2186 	affinity_set(CPU_CURRENT);
2187 	scriptlen = sbdp_prep_rename_script(cph);
2188 
2189 	if (scriptlen <= 0) {
2190 		cmn_err(CE_WARN,
2191 		"sbdp failed to prep for copy-rename");
2192 	}
2193 	prom_printf("SCRIPT from board %d to board %d ->\n", board, t_board);
2194 	for (i = 0;  i < (scriptlen / (sizeof (sbdp_rename_script_t))); i++) {
2195 		prom_printf("0x%lx = 0x%lx, asi 0x%x\n",
2196 		    rsbuffer[i].masr_addr, rsbuffer[i].masr, rsbuffer[i].asi);
2197 	}
2198 	prom_printf("\n");
2199 
2200 	affinity_clear();
2201 	kmem_free(rsbuffer, size);
2202 	kmem_free(cph, sizeof (sbdp_cr_handle_t));
2203 
2204 	return (0);
2205 }
2206 #endif
2207