1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#include <sys/time.h>
28#include <sys/cpuvar.h>
29#include <sys/dditypes.h>
30#include <sys/ddipropdefs.h>
31#include <sys/ddi_impldefs.h>
32#include <sys/sunddi.h>
33#include <sys/esunddi.h>
34#include <sys/sunndi.h>
35#include <sys/platform_module.h>
36#include <sys/errno.h>
37#include <sys/conf.h>
38#include <sys/modctl.h>
39#include <sys/promif.h>
40#include <sys/promimpl.h>
41#include <sys/prom_plat.h>
42#include <sys/cmn_err.h>
43#include <sys/sysmacros.h>
44#include <sys/mem_cage.h>
45#include <sys/kobj.h>
46#include <sys/utsname.h>
47#include <sys/cpu_sgnblk_defs.h>
48#include <sys/atomic.h>
49#include <sys/kdi_impl.h>
50
51#include <sys/sgsbbc.h>
52#include <sys/sgsbbc_iosram.h>
53#include <sys/sgsbbc_iosram_priv.h>
54#include <sys/sgsbbc_mailbox.h>
55#include <sys/sgsgn.h>
56#include <sys/serengeti.h>
57#include <sys/sgfrutypes.h>
58#include <sys/machsystm.h>
59#include <sys/sbd_ioctl.h>
60#include <sys/sbd.h>
61#include <sys/sbdp_mem.h>
62#include <sys/sgcn.h>
63
64#include <sys/memnode.h>
65#include <vm/vm_dep.h>
66#include <vm/page.h>
67
68#include <sys/cheetahregs.h>
69#include <sys/plat_ecc_unum.h>
70#include <sys/plat_ecc_dimm.h>
71
72#include <sys/lgrp.h>
73#include <sys/clock_impl.h>
74
75static int sg_debug = 0;
76
77#ifdef DEBUG
78#define	DCMNERR if (sg_debug) cmn_err
79#else
80#define	DCMNERR
81#endif
82
83int (*p2get_mem_unum)(int, uint64_t, char *, int, int *);
84
85/* local functions */
86static void cpu_sgn_update(ushort_t sgn, uchar_t state,
87    uchar_t sub_state, int cpuid);
88
89
90/*
91 * Local data.
92 *
93 * iosram_write_ptr is a pointer to iosram_write().  Because of
94 * kernel dynamic linking, we can't get to the function by name,
95 * but we can look up its address, and store it in this variable
96 * instead.
97 *
98 * We include the extern for iosram_write() here not because we call
99 * it, but to force compilation errors if its prototype doesn't
100 * match the prototype of iosram_write_ptr.
101 *
102 * The same issues apply to iosram_read() and iosram_read_ptr.
103 */
104/*CSTYLED*/
105extern int   iosram_write     (int, uint32_t, caddr_t, uint32_t);
106static int (*iosram_write_ptr)(int, uint32_t, caddr_t, uint32_t) = NULL;
107/*CSTYLED*/
108extern int   iosram_read     (int, uint32_t, caddr_t, uint32_t);
109static int (*iosram_read_ptr)(int, uint32_t, caddr_t, uint32_t) = NULL;
110
111
112/*
113 * Variable to indicate if the date should be obtained from the SC or not.
114 */
115int todsg_use_sc = FALSE;	/* set the false at the beginning */
116
117/*
118 * Preallocation of spare tsb's for DR
119 *
120 * We don't allocate spares for Wildcat since TSBs should come
121 * out of memory local to the node.
122 */
123#define	IOMMU_PER_SCHIZO	2
124int serengeti_tsb_spares = (SG_MAX_IO_BDS * SG_SCHIZO_PER_IO_BD *
125	IOMMU_PER_SCHIZO);
126
127/*
128 * sg_max_ncpus is the maximum number of CPUs supported on lw8.
129 * sg_max_ncpus is set to be smaller than NCPU to reduce the amount of
130 * memory the logs take up until we have a dynamic log memory allocation
131 * solution.
132 */
133int sg_max_ncpus = (12 * 2);    /* (max # of processors * # of cores/proc) */
134
135/*
136 * variables to control mailbox message timeouts.
137 * These can be patched via /etc/system or mdb.
138 */
139int	sbbc_mbox_default_timeout = MBOX_DEFAULT_TIMEOUT;
140int	sbbc_mbox_min_timeout = MBOX_MIN_TIMEOUT;
141
142/* cached 'chosen' node_id */
143pnode_t chosen_nodeid = (pnode_t)0;
144
145/*
146 * Table that maps memory slices to a specific memnode.
147 */
148int slice_to_memnode[SG_MAX_SLICE];
149
150/*
151 * We define and use LW8_MAX_CPU_BDS here instead of SG_MAX_CPU_BDS
152 * since a LW8 machine will never have a CPU/Mem board #5 (SB5).
153 * A LW8 machine can only have a maximum of three CPU/Mem boards, but
154 * the board numbers assigned are 0, 2, and 4.  LW8_MAX_CPU_BDS is
155 * defined to be 5 since the entries in the domain_dimm_sids array
156 * are keyed by board number.  Not perfect but some wasted space
157 * is avoided.
158 */
159#define	LW8_MAX_CPU_BDS		5
160
161plat_dimm_sid_board_t	domain_dimm_sids[LW8_MAX_CPU_BDS];
162
163int
164set_platform_tsb_spares()
165{
166	return (MIN(serengeti_tsb_spares, MAX_UPA));
167}
168
169#pragma weak mmu_init_large_pages
170
171void
172set_platform_defaults(void)
173{
174	extern int watchdog_enable;
175	extern uint64_t xc_tick_limit_scale;
176	extern void mmu_init_large_pages(size_t);
177
178#ifdef DEBUG
179	char *todsg_name = "todsg";
180	ce_verbose_memory = 2;
181	ce_verbose_other = 2;
182#endif /* DEBUG */
183
184	watchdog_enable = TRUE;
185	watchdog_available = TRUE;
186
187	cpu_sgn_func = cpu_sgn_update;
188
189#ifdef DEBUG
190	/* tod_module_name should be set to "todsg" from OBP property */
191	if (tod_module_name && (strcmp(tod_module_name, todsg_name) == 0))
192		prom_printf("Using todsg driver\n");
193	else {
194		prom_printf("Force using todsg driver\n");
195		tod_module_name = todsg_name;
196	}
197#endif /* DEBUG */
198
199	/* lw8 does not support forthdebug */
200	forthdebug_supported = 0;
201
202
203	/*
204	 * Some DR operations require the system to be sync paused.
205	 * Sync pause on Serengeti could potentially take up to 4
206	 * seconds to complete depending on the load on the SC.  To
207	 * avoid send_mond panics during such operations, we need to
208	 * increase xc_tick_limit to a larger value on Serengeti by
209	 * setting xc_tick_limit_scale to 5.
210	 */
211	xc_tick_limit_scale = 5;
212
213	if ((mmu_page_sizes == max_mmu_page_sizes) &&
214	    (mmu_ism_pagesize != DEFAULT_ISM_PAGESIZE)) {
215		if (&mmu_init_large_pages)
216			mmu_init_large_pages(mmu_ism_pagesize);
217	}
218}
219
220void
221load_platform_modules(void)
222{
223	if (modload("misc", "pcihp") < 0) {
224		cmn_err(CE_NOTE, "pcihp driver failed to load");
225	}
226}
227
228/*ARGSUSED*/
229int
230plat_cpu_poweron(struct cpu *cp)
231{
232	int (*serengeti_cpu_poweron)(struct cpu *) = NULL;
233
234	serengeti_cpu_poweron =
235	    (int (*)(struct cpu *))modgetsymvalue("sbdp_cpu_poweron", 0);
236
237	if (serengeti_cpu_poweron == NULL)
238		return (ENOTSUP);
239	else
240		return ((serengeti_cpu_poweron)(cp));
241}
242
243/*ARGSUSED*/
244int
245plat_cpu_poweroff(struct cpu *cp)
246{
247	int (*serengeti_cpu_poweroff)(struct cpu *) = NULL;
248
249	serengeti_cpu_poweroff =
250	    (int (*)(struct cpu *))modgetsymvalue("sbdp_cpu_poweroff", 0);
251
252	if (serengeti_cpu_poweroff == NULL)
253		return (ENOTSUP);
254	else
255		return ((serengeti_cpu_poweroff)(cp));
256}
257
258#ifdef DEBUG
259pgcnt_t serengeti_cage_size_limit;
260#endif
261
262/* Preferred minimum cage size (expressed in pages)... for DR */
263pgcnt_t serengeti_minimum_cage_size = 0;
264
265void
266set_platform_cage_params(void)
267{
268	extern pgcnt_t total_pages;
269	extern struct memlist *phys_avail;
270
271	if (kernel_cage_enable) {
272		pgcnt_t preferred_cage_size;
273
274		preferred_cage_size =
275		    MAX(serengeti_minimum_cage_size, total_pages / 256);
276#ifdef DEBUG
277		if (serengeti_cage_size_limit)
278			preferred_cage_size = serengeti_cage_size_limit;
279#endif
280		/*
281		 * Post copies obp into the lowest slice.  This requires the
282		 * cage to grow upwards
283		 */
284		kcage_range_init(phys_avail, KCAGE_UP, preferred_cage_size);
285	}
286
287	kcage_startup_dir = KCAGE_UP;
288
289	/* Only note when the cage is off since it should always be on. */
290	if (!kcage_on)
291		cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
292}
293
294#define	ALIGN(x, a)	((a) == 0 ? (uint64_t)(x) : \
295	(((uint64_t)(x) + (uint64_t)(a) - 1l) & ~((uint64_t)(a) - 1l)))
296
297void
298update_mem_bounds(int brd, uint64_t base, uint64_t sz)
299{
300	uint64_t	end;
301	int		mnode;
302
303	end = base + sz - 1;
304
305	/*
306	 * First see if this board already has a memnode associated
307	 * with it.  If not, see if this slice has a memnode.  This
308	 * covers the cases where a single slice covers multiple
309	 * boards (cross-board interleaving) and where a single
310	 * board has multiple slices (1+GB DIMMs).
311	 */
312	if ((mnode = plat_lgrphand_to_mem_node(brd)) == -1) {
313		if ((mnode = slice_to_memnode[PA_2_SLICE(base)]) == -1)
314			mnode = mem_node_alloc();
315		plat_assign_lgrphand_to_mem_node(brd, mnode);
316	}
317
318	/*
319	 * Align base at 16GB boundary
320	 */
321	base = ALIGN(base, (1ul << PA_SLICE_SHIFT));
322
323	while (base < end) {
324		slice_to_memnode[PA_2_SLICE(base)] = mnode;
325		base += (1ul << PA_SLICE_SHIFT);
326	}
327}
328
329/*
330 * Dynamically detect memory slices in the system by decoding
331 * the cpu memory decoder registers at boot time.
332 */
333void
334plat_fill_mc(pnode_t nodeid)
335{
336	uint64_t	mc_addr, mask;
337	uint64_t	mc_decode[SG_MAX_BANKS_PER_MC];
338	uint64_t	base, size;
339	uint32_t	regs[4];
340	int		len;
341	int		local_mc;
342	int		portid;
343	int		boardid;
344	int		i;
345
346	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
347	    (portid == -1))
348		return;
349
350	/*
351	 * Decode the board number from the MC portid
352	 */
353	boardid = SG_PORTID_TO_BOARD_NUM(portid);
354
355	/*
356	 * The "reg" property returns 4 32-bit values. The first two are
357	 * combined to form a 64-bit address.  The second two are for a
358	 * 64-bit size, but we don't actually need to look at that value.
359	 */
360	len = prom_getproplen(nodeid, "reg");
361	if (len != (sizeof (uint32_t) * 4)) {
362		prom_printf("Warning: malformed 'reg' property\n");
363		return;
364	}
365	if (prom_getprop(nodeid, "reg", (caddr_t)regs) < 0)
366		return;
367	mc_addr = ((uint64_t)regs[0]) << 32;
368	mc_addr |= (uint64_t)regs[1];
369
370	/*
371	 * Figure out whether the memory controller we are examining
372	 * belongs to this CPU or a different one.
373	 */
374	if (portid == cpunodes[CPU->cpu_id].portid)
375		local_mc = 1;
376	else
377		local_mc = 0;
378
379	for (i = 0; i < SG_MAX_BANKS_PER_MC; i++) {
380		mask = SG_REG_2_OFFSET(i);
381
382		/*
383		 * If the memory controller is local to this CPU, we use
384		 * the special ASI to read the decode registers.
385		 * Otherwise, we load the values from a magic address in
386		 * I/O space.
387		 */
388		if (local_mc)
389			mc_decode[i] = lddmcdecode(mask & MC_OFFSET_MASK);
390		else
391			mc_decode[i] = lddphysio((mc_addr | mask));
392
393		if (mc_decode[i] >> MC_VALID_SHIFT) {
394			/*
395			 * The memory decode register is a bitmask field,
396			 * so we can decode that into both a base and
397			 * a span.
398			 */
399			base = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
400			size = MC_UK2SPAN(mc_decode[i]);
401			update_mem_bounds(boardid, base, size);
402		}
403	}
404}
405
406/*
407 * This routine is run midway through the boot process.  By the time we get
408 * here, we know about all the active CPU boards in the system, and we have
409 * extracted information about each board's memory from the memory
410 * controllers.  We have also figured out which ranges of memory will be
411 * assigned to which memnodes, so we walk the slice table to build the table
412 * of memnodes.
413 */
414/* ARGSUSED */
415void
416plat_build_mem_nodes(prom_memlist_t *list, size_t  nelems)
417{
418	int	slice;
419	pfn_t	basepfn;
420	pgcnt_t	npgs;
421
422	mem_node_pfn_shift = PFN_SLICE_SHIFT;
423	mem_node_physalign = (1ull << PA_SLICE_SHIFT);
424
425	for (slice = 0; slice < SG_MAX_SLICE; slice++) {
426		if (slice_to_memnode[slice] == -1)
427			continue;
428		basepfn = (uint64_t)slice << PFN_SLICE_SHIFT;
429		npgs = 1ull << PFN_SLICE_SHIFT;
430		mem_node_add_slice(basepfn, basepfn + npgs - 1);
431	}
432}
433
434int
435plat_pfn_to_mem_node(pfn_t pfn)
436{
437	int node;
438
439	node = slice_to_memnode[PFN_2_SLICE(pfn)];
440
441	return (node);
442}
443
444/*
445 * Serengeti support for lgroups.
446 *
447 * On Serengeti, an lgroup platform handle == board number.
448 *
449 * Mappings between lgroup handles and memnodes are managed
450 * in addition to mappings between memory slices and memnodes
451 * to support cross-board interleaving as well as multiple
452 * slices per board (e.g. >1GB DIMMs). The initial mapping
453 * of memnodes to lgroup handles is determined at boot time.
454 * A DR addition of memory adds a new mapping. A DR copy-rename
455 * swaps mappings.
456 */
457
458/*
459 * Macro for extracting the board number from the CPU id
460 */
461#define	CPUID_TO_BOARD(id)	(((id) >> 2) & 0x7)
462
463/*
464 * Return the platform handle for the lgroup containing the given CPU
465 *
466 * For Serengeti, lgroup platform handle == board number
467 */
468lgrp_handle_t
469plat_lgrp_cpu_to_hand(processorid_t id)
470{
471	return (CPUID_TO_BOARD(id));
472}
473
474/*
475 * Platform specific lgroup initialization
476 */
477void
478plat_lgrp_init(void)
479{
480	int i;
481	extern uint32_t lgrp_expand_proc_thresh;
482	extern uint32_t lgrp_expand_proc_diff;
483
484	/*
485	 * Initialize lookup tables to invalid values so we catch
486	 * any illegal use of them.
487	 */
488	for (i = 0; i < SG_MAX_SLICE; i++) {
489		slice_to_memnode[i] = -1;
490	}
491
492	/*
493	 * Set tuneables for Serengeti architecture
494	 *
495	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
496	 * this process is currently running on before considering
497	 * expanding threads to another lgroup.
498	 *
499	 * lgrp_expand_proc_diff determines how much less the remote lgroup
500	 * must be loaded before expanding to it.
501	 *
502	 * Bandwidth is maximized on Serengeti by spreading load across
503	 * the machine. The impact to inter-thread communication isn't
504	 * too costly since remote latencies are relatively low.  These
505	 * values equate to one CPU's load and so attempt to spread the
506	 * load out across as many lgroups as possible one CPU at a time.
507	 */
508	lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX;
509	lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
510}
511
512/*
513 * Platform notification of lgroup (re)configuration changes
514 */
515/*ARGSUSED*/
516void
517plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
518{
519	update_membounds_t	*umb;
520	lgrp_config_mem_rename_t lmr;
521	lgrp_handle_t		shand, thand;
522	int			snode, tnode;
523
524	switch (evt) {
525
526	case LGRP_CONFIG_MEM_ADD:
527		umb = (update_membounds_t *)arg;
528		update_mem_bounds(umb->u_board, umb->u_base, umb->u_len);
529
530		break;
531
532	case LGRP_CONFIG_MEM_DEL:
533		/* We don't have to do anything */
534
535		break;
536
537	case LGRP_CONFIG_MEM_RENAME:
538		/*
539		 * During a DR copy-rename operation, all of the memory
540		 * on one board is moved to another board -- but the
541		 * addresses/pfns and memnodes don't change. This means
542		 * the memory has changed locations without changing identity.
543		 *
544		 * Source is where we are copying from and target is where we
545		 * are copying to.  After source memnode is copied to target
546		 * memnode, the physical addresses of the target memnode are
547		 * renamed to match what the source memnode had.  Then target
548		 * memnode can be removed and source memnode can take its
549		 * place.
550		 *
551		 * To do this, swap the lgroup handle to memnode mappings for
552		 * the boards, so target lgroup will have source memnode and
553		 * source lgroup will have empty target memnode which is where
554		 * its memory will go (if any is added to it later).
555		 *
556		 * Then source memnode needs to be removed from its lgroup
557		 * and added to the target lgroup where the memory was living
558		 * but under a different name/memnode.  The memory was in the
559		 * target memnode and now lives in the source memnode with
560		 * different physical addresses even though it is the same
561		 * memory.
562		 */
563		shand = arg & 0xffff;
564		thand = (arg & 0xffff0000) >> 16;
565		snode = plat_lgrphand_to_mem_node(shand);
566		tnode = plat_lgrphand_to_mem_node(thand);
567
568		plat_assign_lgrphand_to_mem_node(thand, snode);
569		plat_assign_lgrphand_to_mem_node(shand, tnode);
570
571		/*
572		 * Remove source memnode of copy rename from its lgroup
573		 * and add it to its new target lgroup
574		 */
575		lmr.lmem_rename_from = shand;
576		lmr.lmem_rename_to = thand;
577
578		lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
579		    (uintptr_t)&lmr);
580
581		break;
582
583	default:
584		break;
585	}
586}
587
588/*
589 * Return latency between "from" and "to" lgroups
590 *
591 * This latency number can only be used for relative comparison
592 * between lgroups on the running system, cannot be used across platforms,
593 * and may not reflect the actual latency.  It is platform and implementation
594 * specific, so platform gets to decide its value.  It would be nice if the
595 * number was at least proportional to make comparisons more meaningful though.
596 * NOTE: The numbers below are supposed to be load latencies for uncached
597 * memory divided by 10.
598 */
599int
600plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
601{
602	/*
603	 * Return min remote latency when there are more than two lgroups
604	 * (root and child) and getting latency between two different lgroups
605	 * or root is involved
606	 */
607	if (lgrp_optimizations() && (from != to ||
608	    from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
609		return (28);
610	else
611		return (23);
612}
613
614/* ARGSUSED */
615void
616plat_freelist_process(int mnode)
617{
618}
619
620/*
621 * Find dip for chosen IOSRAM
622 */
623dev_info_t *
624find_chosen_dip(void)
625{
626	dev_info_t	*dip;
627	char		master_sbbc[MAXNAMELEN];
628	int		nodeid;
629	uint_t		tunnel;
630
631	/*
632	 * find the /chosen SBBC node, prom interface will handle errors
633	 */
634	nodeid = prom_chosennode();
635	/*
636	 * get the 'iosram' property from the /chosen node
637	 */
638	if (prom_getprop(nodeid, IOSRAM_CHOSEN_PROP, (caddr_t)&tunnel) <= 0) {
639		SBBC_ERR(CE_PANIC, "No iosram property found! \n");
640	}
641
642	if (prom_phandle_to_path((phandle_t)tunnel, master_sbbc,
643	    sizeof (master_sbbc)) < 0) {
644		SBBC_ERR1(CE_PANIC, "prom_phandle_to_path(%d) failed\n",
645		    tunnel);
646	}
647
648	chosen_nodeid = nodeid;
649
650	/*
651	 * load and attach the sgsbbc driver.
652	 * This will also attach all the sgsbbc driver instances
653	 */
654	if (i_ddi_attach_hw_nodes("sgsbbc") != DDI_SUCCESS) {
655		cmn_err(CE_WARN, "sgsbbc failed to load\n");
656	}
657	/* translate a path name to a dev_info_t */
658	dip = e_ddi_hold_devi_by_path(master_sbbc, 0);
659	if ((dip == NULL) || (ddi_get_nodeid(dip) != tunnel)) {
660		cmn_err(CE_PANIC,
661		    "e_ddi_hold_devi_by_path(%x) failed for SBBC\n", tunnel);
662	}
663
664	/* make sure devi_ref is ZERO */
665	ndi_rele_devi(dip);
666	DCMNERR(CE_CONT, "Chosen IOSRAM is at %s \n", master_sbbc);
667
668	return (dip);
669}
670
671void
672load_platform_drivers(void)
673{
674	int ret;
675
676	/*
677	 * Load the mc-us3 memory driver.
678	 */
679	if (i_ddi_attach_hw_nodes("mc-us3") != DDI_SUCCESS)
680		cmn_err(CE_WARN, "mc-us3 failed to load");
681	else
682		(void) ddi_hold_driver(ddi_name_to_major("mc-us3"));
683
684	/*
685	 * Initialize the chosen IOSRAM before its clients
686	 * are loaded.
687	 */
688	(void) find_chosen_dip();
689
690	/*
691	 * Load the environmentals driver (sgenv)
692	 *
693	 * We need this driver to handle events from the SC when state
694	 * changes occur in the environmental data.
695	 */
696	if (i_ddi_attach_hw_nodes("sgenv") != DDI_SUCCESS)
697		cmn_err(CE_WARN, "sgenv failed to load");
698
699	/*
700	 * Ideally, we'd do this in set_platform_defaults(), but
701	 * at that point it's too early to look up symbols.
702	 */
703	iosram_write_ptr = (int (*)(int, uint32_t, caddr_t, uint32_t))
704	    modgetsymvalue("iosram_write", 0);
705
706	if (iosram_write_ptr == NULL) {
707		DCMNERR(CE_WARN, "load_platform_defaults: iosram_write()"
708		    " not found; signatures will not be updated\n");
709	} else {
710		/*
711		 * The iosram read ptr is only needed if we can actually
712		 * write CPU signatures, so only bother setting it if we
713		 * set a valid write pointer, above.
714		 */
715		iosram_read_ptr = (int (*)(int, uint32_t, caddr_t, uint32_t))
716		    modgetsymvalue("iosram_read", 0);
717
718		if (iosram_read_ptr == NULL)
719			DCMNERR(CE_WARN, "load_platform_defaults: iosram_read()"
720			    " not found\n");
721	}
722
723	/*
724	 * Set todsg_use_sc to TRUE so that we will be getting date
725	 * from the SC.
726	 */
727	todsg_use_sc = TRUE;
728
729	/*
730	 * Now is a good time to activate hardware watchdog (if one exists).
731	 */
732	mutex_enter(&tod_lock);
733	if (watchdog_enable)
734		ret = tod_ops.tod_set_watchdog_timer(watchdog_timeout_seconds);
735	mutex_exit(&tod_lock);
736	if (ret != 0)
737		printf("Hardware watchdog enabled\n");
738
739	plat_ecc_init();
740}
741
742/*
743 * No platform drivers on this platform
744 */
745char *platform_module_list[] = {
746	(char *)0
747};
748
749/*ARGSUSED*/
750void
751plat_tod_fault(enum tod_fault_type tod_bad)
752{
753}
754int
755plat_max_boards()
756{
757	return (SG_MAX_BDS);
758}
759int
760plat_max_io_units_per_board()
761{
762	return (SG_MAX_IO_PER_BD);
763}
764int
765plat_max_cmp_units_per_board()
766{
767	return (SG_MAX_CMPS_PER_BD);
768}
769int
770plat_max_cpu_units_per_board()
771{
772	return (SG_MAX_CPUS_PER_BD);
773}
774
775int
776plat_max_mc_units_per_board()
777{
778	return (SG_MAX_CMPS_PER_BD); /* each CPU die has a memory controller */
779}
780
781int
782plat_max_mem_units_per_board()
783{
784	return (SG_MAX_MEM_PER_BD);
785}
786
787int
788plat_max_cpumem_boards(void)
789{
790	return (LW8_MAX_CPU_BDS);
791}
792
793int
794set_platform_max_ncpus(void)
795{
796	return (sg_max_ncpus);
797}
798
799void
800plat_dmv_params(uint_t *hwint, uint_t *swint)
801{
802	*hwint = MAX_UPA;
803	*swint = 0;
804}
805
806static int (*sg_mbox)(sbbc_msg_t *, sbbc_msg_t *, time_t) = NULL;
807
808/*
809 * Our nodename has been set, pass it along to the SC.
810 */
811void
812plat_nodename_set(void)
813{
814	sbbc_msg_t	req;	/* request */
815	sbbc_msg_t	resp;	/* response */
816	int		rv;	/* return value from call to mbox */
817	struct nodename_info {
818		int32_t	namelen;
819		char	nodename[_SYS_NMLN];
820	} nni;
821
822	/*
823	 * find the symbol for the mailbox routine
824	 */
825	if (sg_mbox == NULL)
826		sg_mbox = (int (*)(sbbc_msg_t *, sbbc_msg_t *, time_t))
827		    modgetsymvalue("sbbc_mbox_request_response", 0);
828
829	if (sg_mbox == NULL) {
830		cmn_err(CE_NOTE, "!plat_nodename_set: sg_mbox not found\n");
831		return;
832	}
833
834	/*
835	 * construct the message telling the SC our nodename
836	 */
837	(void) strcpy(nni.nodename, utsname.nodename);
838	nni.namelen = (int32_t)strlen(nni.nodename);
839
840	req.msg_type.type = INFO_MBOX;
841	req.msg_type.sub_type = INFO_MBOX_NODENAME;
842	req.msg_status = 0;
843	req.msg_len = (int)(nni.namelen + sizeof (nni.namelen));
844	req.msg_bytes = 0;
845	req.msg_buf = (caddr_t)&nni;
846	req.msg_data[0] = 0;
847	req.msg_data[1] = 0;
848
849	/*
850	 * initialize the response back from the SC
851	 */
852	resp.msg_type.type = INFO_MBOX;
853	resp.msg_type.sub_type = INFO_MBOX_NODENAME;
854	resp.msg_status = 0;
855	resp.msg_len = 0;
856	resp.msg_bytes = 0;
857	resp.msg_buf = (caddr_t)0;
858	resp.msg_data[0] = 0;
859	resp.msg_data[1] = 0;
860
861	/*
862	 * ship it and check for success
863	 */
864	rv = (sg_mbox)(&req, &resp, sbbc_mbox_default_timeout);
865
866	if (rv != 0) {
867		cmn_err(CE_NOTE, "!plat_nodename_set: sg_mbox retval %d\n", rv);
868	} else if (resp.msg_status != 0) {
869		cmn_err(CE_NOTE, "!plat_nodename_set: msg_status %d\n",
870		    resp.msg_status);
871	} else {
872		DCMNERR(CE_NOTE, "!plat_nodename_set was successful\n");
873
874		/*
875		 * It is necessary to exchange capability the bitmap
876		 * with SC before sending any ecc error information and
877		 * indictment. We are calling the plat_ecc_capability_send()
878		 * here just after sending the nodename successfully.
879		 */
880		rv = plat_ecc_capability_send();
881		if (rv == 0) {
882			DCMNERR(CE_NOTE, "!plat_ecc_capability_send was"
883			    "successful\n");
884		}
885	}
886}
887
888/*
889 * flag to allow users switch between using OBP's
890 * prom_get_unum() and mc-us3 driver's p2get_mem_unum()
891 * (for main memory errors only).
892 */
893int sg_use_prom_get_unum = 0;
894
895/*
896 * Debugging flag: set to 1 to call into obp for get_unum, or set it to 0
897 * to call into the unum cache system.  This is the E$ equivalent of
898 * sg_use_prom_get_unum.
899 */
900int sg_use_prom_ecache_unum = 0;
901
902/* used for logging ECC errors to the SC */
903#define	SG_MEMORY_ECC	1
904#define	SG_ECACHE_ECC	2
905#define	SG_UNKNOWN_ECC	(-1)
906
907/*
908 * plat_get_mem_unum() generates a string identifying either the
909 * memory or E$ DIMM(s) during error logging. Depending on whether
910 * the error is E$ or memory related, the appropriate support
911 * routine is called to assist in the string generation.
912 *
913 * - For main memory errors we can use the mc-us3 drivers p2getunum()
914 *   (or prom_get_unum() for debugging purposes).
915 *
916 * - For E$ errors we call sg_get_ecacheunum() to generate the unum (or
917 *   prom_serengeti_get_ecacheunum() for debugging purposes).
918 */
919
920static int
921sg_prom_get_unum(int synd_code, uint64_t paddr, char *buf, int buflen,
922    int *lenp)
923{
924	if ((prom_get_unum(synd_code, (unsigned long long)paddr,
925	    buf, buflen, lenp)) != 0)
926		return (EIO);
927	else if (*lenp <= 1)
928		return (EINVAL);
929	else
930		return (0);
931}
932
933/*ARGSUSED*/
934int
935plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
936    int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
937{
938	/*
939	 * unum_func will either point to the memory drivers p2get_mem_unum()
940	 * or to prom_get_unum() for memory errors.
941	 */
942	int (*unum_func)(int synd_code, uint64_t paddr, char *buf,
943	    int buflen, int *lenp) = p2get_mem_unum;
944
945	/*
946	 * check if it's a Memory or an Ecache error.
947	 */
948	if (flt_in_memory) {
949		/*
950		 * It's a main memory error.
951		 *
952		 * For debugging we allow the user to switch between
953		 * using OBP's get_unum and the memory driver's get_unum
954		 * so we create a pointer to the functions and switch
955		 * depending on the sg_use_prom_get_unum flag.
956		 */
957		if (sg_use_prom_get_unum) {
958			DCMNERR(CE_NOTE, "Using prom_get_unum from OBP");
959			return (sg_prom_get_unum(synd_code,
960			    P2ALIGN(flt_addr, 8), buf, buflen, lenp));
961		} else if (unum_func != NULL) {
962			return (unum_func(synd_code, P2ALIGN(flt_addr, 8),
963			    buf, buflen, lenp));
964		} else {
965			return (ENOTSUP);
966		}
967	} else if (flt_status & ECC_ECACHE) {
968		/*
969		 * It's an E$ error.
970		 */
971		if (sg_use_prom_ecache_unum) {
972			/*
973			 * We call to OBP to handle this.
974			 */
975			DCMNERR(CE_NOTE,
976			    "Using prom_serengeti_get_ecacheunum from OBP");
977			if (prom_serengeti_get_ecacheunum(flt_bus_id,
978			    P2ALIGN(flt_addr, 8), buf, buflen, lenp) != 0) {
979				return (EIO);
980			}
981		} else {
982			return (sg_get_ecacheunum(flt_bus_id, flt_addr,
983			    buf, buflen, lenp));
984		}
985	} else {
986		return (ENOTSUP);
987	}
988
989	return (0);
990}
991
992/*
993 * This platform hook gets called from mc_add_mem_unum_label() in the mc-us3
994 * driver giving each platform the opportunity to add platform
995 * specific label information to the unum for ECC error logging purposes.
996 */
997void
998plat_add_mem_unum_label(char *unum, int mcid, int bank, int dimm)
999{
1000	char	new_unum[UNUM_NAMLEN] = "";
1001	int	node = SG_PORTID_TO_NODEID(mcid);
1002	int	board = SG_CPU_BD_PORTID_TO_BD_NUM(mcid);
1003	int	position = SG_PORTID_TO_CPU_POSN(mcid);
1004
1005	/*
1006	 * The mc-us3 driver deals with logical banks but for unum
1007	 * purposes we need to use physical banks so that the correct
1008	 * dimm can be physically located. Logical banks 0 and 2
1009	 * make up physical bank 0. Logical banks 1 and 3 make up
1010	 * physical bank 1. Here we do the necessary conversion.
1011	 */
1012	bank = (bank % 2);
1013
1014	if (dimm == -1) {
1015		SG_SET_FRU_NAME_NODE(new_unum, node);
1016		SG_SET_FRU_NAME_CPU_BOARD(new_unum, board);
1017		SG_SET_FRU_NAME_MODULE(new_unum, position);
1018		SG_SET_FRU_NAME_BANK(new_unum, bank);
1019
1020	} else {
1021		SG_SET_FRU_NAME_NODE(new_unum, node);
1022		SG_SET_FRU_NAME_CPU_BOARD(new_unum, board);
1023		SG_SET_FRU_NAME_MODULE(new_unum, position);
1024		SG_SET_FRU_NAME_BANK(new_unum, bank);
1025		SG_SET_FRU_NAME_DIMM(new_unum, dimm);
1026
1027		(void) strcat(new_unum, " ");
1028		(void) strcat(new_unum, unum);
1029	}
1030
1031	(void) strcpy(unum, new_unum);
1032}
1033
1034int
1035plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
1036{
1037	int	node = SG_PORTID_TO_NODEID(cpuid);
1038	int	board = SG_CPU_BD_PORTID_TO_BD_NUM(cpuid);
1039
1040	if (snprintf(buf, buflen, "/N%d/%s%d", node,
1041	    SG_HPU_TYPE_CPU_BOARD_ID, board) >= buflen) {
1042		return (ENOSPC);
1043	} else {
1044		*lenp = strlen(buf);
1045		return (0);
1046	}
1047}
1048
1049static void (*sg_ecc_taskq_func)(sbbc_ecc_mbox_t *) = NULL;
1050static int (*sg_ecc_mbox_func)(sbbc_ecc_mbox_t *) = NULL;
1051
1052/*
1053 * We log all ECC errors to the SC so we send a mailbox
1054 * message to the SC passing it the relevant data.
1055 * ECC mailbox messages are sent via a taskq mechanism to
1056 * prevent impaired system performance during ECC floods.
1057 * Indictments have already passed through a taskq, so they
1058 * are not queued here.
1059 */
1060int
1061plat_send_ecc_mailbox_msg(plat_ecc_message_type_t msg_type, void *datap)
1062{
1063	sbbc_ecc_mbox_t	*msgp;
1064	uint16_t	msg_subtype;
1065	int		sleep_flag, log_error;
1066	size_t		msg_size;
1067
1068	if (sg_ecc_taskq_func == NULL) {
1069		sg_ecc_taskq_func = (void (*)(sbbc_ecc_mbox_t *))
1070		    modgetsymvalue("sbbc_mbox_queue_ecc_event", 0);
1071		if (sg_ecc_taskq_func == NULL) {
1072			cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1073			    "sbbc_mbox_queue_ecc_event not found");
1074			return (ENODEV);
1075		}
1076	}
1077	if (sg_ecc_mbox_func == NULL) {
1078		sg_ecc_mbox_func = (int (*)(sbbc_ecc_mbox_t *))
1079		    modgetsymvalue("sbbc_mbox_ecc_output", 0);
1080		if (sg_ecc_mbox_func == NULL) {
1081			cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1082			    "sbbc_mbox_ecc_output not found");
1083			return (ENODEV);
1084		}
1085	}
1086
1087	/*
1088	 * Initialize the request and response structures
1089	 */
1090	switch (msg_type) {
1091	case PLAT_ECC_ERROR_MESSAGE:
1092		msg_subtype = INFO_MBOX_ERROR_ECC;
1093		msg_size = sizeof (plat_ecc_error_data_t);
1094		sleep_flag = KM_NOSLEEP;
1095		log_error = 1;
1096		break;
1097	case PLAT_ECC_ERROR2_MESSAGE:
1098		msg_subtype = INFO_MBOX_ECC;
1099		msg_size = sizeof (plat_ecc_error2_data_t);
1100		sleep_flag = KM_NOSLEEP;
1101		log_error = 1;
1102		break;
1103	case PLAT_ECC_INDICTMENT_MESSAGE:
1104		msg_subtype = INFO_MBOX_ERROR_INDICT;
1105		msg_size = sizeof (plat_ecc_indictment_data_t);
1106		sleep_flag = KM_SLEEP;
1107		log_error = 0;
1108		break;
1109	case PLAT_ECC_INDICTMENT2_MESSAGE:
1110		msg_subtype = INFO_MBOX_ECC;
1111		msg_size = sizeof (plat_ecc_indictment2_data_t);
1112		sleep_flag = KM_SLEEP;
1113		log_error = 0;
1114		break;
1115	case PLAT_ECC_CAPABILITY_MESSAGE:
1116		msg_subtype = INFO_MBOX_ECC_CAP;
1117		msg_size = sizeof (plat_capability_data_t) +
1118		    strlen(utsname.release) + strlen(utsname.version) + 2;
1119		sleep_flag = KM_SLEEP;
1120		log_error = 0;
1121		break;
1122	case PLAT_ECC_DIMM_SID_MESSAGE:
1123		msg_subtype = INFO_MBOX_ECC;
1124		msg_size = sizeof (plat_dimm_sid_request_data_t);
1125		sleep_flag = KM_SLEEP;
1126		log_error = 0;
1127		break;
1128	default:
1129		return (EINVAL);
1130	}
1131
1132	msgp = (sbbc_ecc_mbox_t *)kmem_zalloc(sizeof (sbbc_ecc_mbox_t),
1133	    sleep_flag);
1134	if (msgp == NULL) {
1135		cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1136		    "unable to allocate sbbc_ecc_mbox");
1137		return (ENOMEM);
1138	}
1139
1140	msgp->ecc_log_error = log_error;
1141
1142	msgp->ecc_req.msg_type.type = INFO_MBOX;
1143	msgp->ecc_req.msg_type.sub_type = msg_subtype;
1144	msgp->ecc_req.msg_status = 0;
1145	msgp->ecc_req.msg_len = (int)msg_size;
1146	msgp->ecc_req.msg_bytes = 0;
1147	msgp->ecc_req.msg_buf = (caddr_t)kmem_zalloc(msg_size, sleep_flag);
1148	msgp->ecc_req.msg_data[0] = 0;
1149	msgp->ecc_req.msg_data[1] = 0;
1150
1151	if (msgp->ecc_req.msg_buf == NULL) {
1152		cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1153		    "unable to allocate request msg_buf");
1154		kmem_free((void *)msgp, sizeof (sbbc_ecc_mbox_t));
1155		return (ENOMEM);
1156	}
1157
1158	bcopy(datap, (void *)msgp->ecc_req.msg_buf, msg_size);
1159
1160	/*
1161	 * initialize the response back from the SC
1162	 */
1163	msgp->ecc_resp.msg_type.type = INFO_MBOX;
1164	msgp->ecc_resp.msg_type.sub_type = msg_subtype;
1165	msgp->ecc_resp.msg_status = 0;
1166	msgp->ecc_resp.msg_len = 0;
1167	msgp->ecc_resp.msg_bytes = 0;
1168	msgp->ecc_resp.msg_buf = NULL;
1169	msgp->ecc_resp.msg_data[0] = 0;
1170	msgp->ecc_resp.msg_data[1] = 0;
1171
1172	switch (msg_type) {
1173	case PLAT_ECC_ERROR_MESSAGE:
1174	case PLAT_ECC_ERROR2_MESSAGE:
1175		/*
1176		 * For Error Messages, we go through a taskq.
1177		 * Queue up message for processing
1178		 */
1179		(*sg_ecc_taskq_func)(msgp);
1180		return (0);
1181
1182	case PLAT_ECC_CAPABILITY_MESSAGE:
1183		/*
1184		 * For indictment and capability messages, we've already gone
1185		 * through the taskq, so we can call the mailbox routine
1186		 * directly.  Find the symbol for the routine that sends
1187		 * the mailbox msg
1188		 */
1189		msgp->ecc_resp.msg_len = (int)msg_size;
1190		msgp->ecc_resp.msg_buf = (caddr_t)kmem_zalloc(msg_size,
1191		    sleep_flag);
1192		/* FALLTHRU */
1193
1194	case PLAT_ECC_INDICTMENT_MESSAGE:
1195	case PLAT_ECC_INDICTMENT2_MESSAGE:
1196		return ((*sg_ecc_mbox_func)(msgp));
1197
1198	case PLAT_ECC_DIMM_SID_MESSAGE:
1199		msgp->ecc_resp.msg_len = sizeof (plat_dimm_sid_board_data_t);
1200		msgp->ecc_resp.msg_buf = (caddr_t)kmem_zalloc(
1201		    sizeof (plat_dimm_sid_board_data_t), sleep_flag);
1202
1203		return ((*sg_ecc_mbox_func)(msgp));
1204
1205	default:
1206		ASSERT(0);
1207		return (EINVAL);
1208	}
1209}
1210
1211/*
1212 * m is redundant on serengeti as the multiplyer is always 4
1213 */
1214/*ARGSUSED*/
1215int
1216plat_make_fru_cpuid(int sb, int m, int proc)
1217{
1218	return (MAKE_CPUID(sb, proc));
1219}
1220
1221/*
1222 * board number for a given proc
1223 */
1224int
1225plat_make_fru_boardnum(int proc)
1226{
1227	return (SG_PORTID_TO_BOARD_NUM(proc));
1228}
1229
1230static
1231void
1232cpu_sgn_update(ushort_t sig, uchar_t state, uchar_t sub_state, int cpuid)
1233{
1234	uint32_t signature = CPU_SIG_BLD(sig, state, sub_state);
1235	sig_state_t current_sgn;
1236	int i;
1237
1238	if (iosram_write_ptr == NULL) {
1239		/*
1240		 * If the IOSRAM write pointer isn't set, we won't be able
1241		 * to write signatures to ANYTHING, so we may as well just
1242		 * write out an error message (if desired) and exit this
1243		 * routine now...
1244		 */
1245		DCMNERR(CE_WARN,
1246		    "cpu_sgn_update: iosram_write() not found;"
1247		    " cannot write signature 0x%x for CPU(s) or domain\n",
1248		    signature);
1249		return;
1250	}
1251
1252
1253	/*
1254	 * Differentiate a panic reboot from a non-panic reboot in the
1255	 * setting of the substate of the signature.
1256	 *
1257	 * If the new substate is REBOOT and we're rebooting due to a panic,
1258	 * then set the new substate to a special value indicating a panic
1259	 * reboot, SIGSUBST_PANIC_REBOOT.
1260	 *
1261	 * A panic reboot is detected by a current (previous) domain signature
1262	 * state of SIGST_EXIT, and a new signature substate of SIGSUBST_REBOOT.
1263	 * The domain signature state SIGST_EXIT is used as the panic flow
1264	 * progresses.
1265	 *
1266	 * At the end of the panic flow, the reboot occurs but we should now
1267	 * one that was involuntary, something that may be quite useful to know
1268	 * at OBP level.
1269	 */
1270	if (sub_state == SIGSUBST_REBOOT) {
1271		if (iosram_read_ptr == NULL) {
1272			DCMNERR(CE_WARN,
1273			    "cpu_sgn_update: iosram_read() not found;"
1274			    " could not check current domain signature\n");
1275		} else {
1276			(void) (*iosram_read_ptr)(SBBC_SIGBLCK_KEY,
1277			    SG_SGNBLK_DOMAINSIG_OFFSET,
1278			    (char *)&current_sgn, sizeof (current_sgn));
1279			if (current_sgn.state_t.state == SIGST_EXIT)
1280				signature = CPU_SIG_BLD(sig, state,
1281				    SIGSUBST_PANIC_REBOOT);
1282		}
1283	}
1284
1285	/*
1286	 * cpuid == -1 indicates that the operation applies to all cpus.
1287	 */
1288	if (cpuid >= 0) {
1289		(void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1290		    SG_SGNBLK_CPUSIG_OFFSET(cpuid), (char *)&signature,
1291		    sizeof (signature));
1292	} else {
1293		for (i = 0; i < NCPU; i++) {
1294			if (cpu[i] == NULL || !(cpu[i]->cpu_flags &
1295			    (CPU_EXISTS|CPU_QUIESCED))) {
1296				continue;
1297			}
1298			(void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1299			    SG_SGNBLK_CPUSIG_OFFSET(i), (char *)&signature,
1300			    sizeof (signature));
1301		}
1302	}
1303
1304	if (state == SIGST_OFFLINE || state == SIGST_DETACHED) {
1305		return;
1306	}
1307
1308	(void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1309	    SG_SGNBLK_DOMAINSIG_OFFSET, (char *)&signature,
1310	    sizeof (signature));
1311}
1312
1313void
1314startup_platform(void)
1315{
1316}
1317
1318/*
1319 * A routine to convert a number (represented as a string) to
1320 * the integer value it represents.
1321 */
1322
1323static int
1324isdigit(int ch)
1325{
1326	return (ch >= '0' && ch <= '9');
1327}
1328
1329#define	isspace(c)	((c) == ' ' || (c) == '\t' || (c) == '\n')
1330
1331static int
1332strtoi(char *p, char **pos)
1333{
1334	int n;
1335	int c, neg = 0;
1336
1337	if (!isdigit(c = *p)) {
1338		while (isspace(c))
1339			c = *++p;
1340		switch (c) {
1341			case '-':
1342				neg++;
1343				/* FALLTHROUGH */
1344			case '+':
1345			c = *++p;
1346		}
1347		if (!isdigit(c)) {
1348			if (pos != NULL)
1349				*pos = p;
1350			return (0);
1351		}
1352	}
1353	for (n = '0' - c; isdigit(c = *++p); ) {
1354		n *= 10; /* two steps to avoid unnecessary overflow */
1355		n += '0' - c; /* accum neg to avoid surprises at MAX */
1356	}
1357	if (pos != NULL)
1358		*pos = p;
1359	return (neg ? n : -n);
1360}
1361
1362/*
1363 * Get the three parts of the Serengeti PROM version.
1364 * Used for feature readiness tests.
1365 *
1366 * Return 0 if version extracted successfully, -1 otherwise.
1367 */
1368
1369int
1370sg_get_prom_version(int *sysp, int *intfp, int *bldp)
1371{
1372	int plen;
1373	char vers[512];
1374	static pnode_t node;
1375	static char version[] = "version";
1376	char *verp, *ep;
1377
1378	node = prom_finddevice("/openprom");
1379	if (node == OBP_BADNODE)
1380		return (-1);
1381
1382	plen = prom_getproplen(node, version);
1383	if (plen <= 0 || plen >= sizeof (vers))
1384		return (-1);
1385	(void) prom_getprop(node, version, vers);
1386	vers[plen] = '\0';
1387
1388	/* Make sure it's an OBP flashprom */
1389	if (vers[0] != 'O' && vers[1] != 'B' && vers[2] != 'P') {
1390		cmn_err(CE_WARN, "sg_get_prom_version: "
1391		    "unknown <version> string in </openprom>\n");
1392		return (-1);
1393	}
1394	verp = &vers[4];
1395
1396	*sysp = strtoi(verp, &ep);
1397	if (ep == verp || *ep != '.')
1398		return (-1);
1399	verp = ep + 1;
1400
1401	*intfp = strtoi(verp, &ep);
1402	if (ep == verp || *ep != '.')
1403		return (-1);
1404	verp = ep + 1;
1405
1406	*bldp = strtoi(verp, &ep);
1407	if (ep == verp || (*ep != '\0' && !isspace(*ep)))
1408		return (-1);
1409	return (0);
1410}
1411
1412/*
1413 * Return 0 if system board Dynamic Reconfiguration
1414 * is supported by the firmware, -1 otherwise.
1415 */
1416int
1417sg_prom_sb_dr_check(void)
1418{
1419	static int prom_res = 1;
1420
1421	if (prom_res == 1) {
1422		int sys, intf, bld;
1423		int rv;
1424
1425		rv = sg_get_prom_version(&sys, &intf, &bld);
1426		if (rv == 0 && sys == 5 &&
1427		    (intf >= 12 || (intf == 11 && bld >= 200))) {
1428			prom_res = 0;
1429		} else {
1430			prom_res = -1;
1431		}
1432	}
1433	return (prom_res);
1434}
1435
1436/*
1437 * Return 0 if cPCI Dynamic Reconfiguration
1438 * is supported by the firmware, -1 otherwise.
1439 */
1440int
1441sg_prom_cpci_dr_check(void)
1442{
1443	/*
1444	 * The version check is currently the same as for
1445	 * system boards. Since the two DR sub-systems are
1446	 * independent, this could change.
1447	 */
1448	return (sg_prom_sb_dr_check());
1449}
1450
1451/*
1452 * Our implementation of this KDI op updates the CPU signature in the system
1453 * controller.  Note that we set the signature to OBP_SIG, rather than DBG_SIG.
1454 * The Forth words we execute will, among other things, transform our OBP_SIG
1455 * into DBG_SIG.  They won't function properly if we try to use DBG_SIG.
1456 */
1457static void
1458sg_system_claim(void)
1459{
1460	lbolt_debug_entry();
1461
1462	prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
1463}
1464
1465static void
1466sg_system_release(void)
1467{
1468	prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
1469
1470	lbolt_debug_return();
1471}
1472
1473static void
1474sg_console_claim(void)
1475{
1476	(void) prom_serengeti_set_console_input(SGCN_OBP_STR);
1477}
1478
1479static void
1480sg_console_release(void)
1481{
1482	(void) prom_serengeti_set_console_input(SGCN_CLNT_STR);
1483}
1484
1485void
1486plat_kdi_init(kdi_t *kdi)
1487{
1488	kdi->pkdi_system_claim = sg_system_claim;
1489	kdi->pkdi_system_release = sg_system_release;
1490	kdi->pkdi_console_claim = sg_console_claim;
1491	kdi->pkdi_console_release = sg_console_release;
1492}
1493