1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 * Copyright (c) 2016 by Delphix. All rights reserved.
25 */
26
27#include <sys/conf.h>
28#include <sys/kmem.h>
29#include <sys/debug.h>
30#include <sys/modctl.h>
31#include <sys/autoconf.h>
32#include <sys/hwconf.h>
33#include <sys/ddi_impldefs.h>
34#include <sys/ddi.h>
35#include <sys/sunddi.h>
36#include <sys/sunndi.h>
37#include <sys/ndi_impldefs.h>
38#include <sys/machsystm.h>
39#include <sys/fcode.h>
40#include <sys/promif.h>
41#include <sys/promimpl.h>
42#include <sys/opl_cfg.h>
43#include <sys/scfd/scfostoescf.h>
44
45static unsigned int		opl_cfg_inited;
46static opl_board_cfg_t		opl_boards[HWD_SBS_PER_DOMAIN];
47
48/*
49 * Module control operations
50 */
51
52extern struct mod_ops mod_miscops;
53
54static struct modlmisc modlmisc = {
55	&mod_miscops,				/* Type of module */
56	"OPL opl_cfg"
57};
58
59static struct modlinkage modlinkage = {
60	MODREV_1, (void *)&modlmisc, NULL
61};
62
63static int	opl_map_in(dev_info_t *, fco_handle_t, fc_ci_t *);
64static int	opl_map_out(dev_info_t *, fco_handle_t, fc_ci_t *);
65static int	opl_register_fetch(dev_info_t *, fco_handle_t, fc_ci_t *);
66static int	opl_register_store(dev_info_t *, fco_handle_t, fc_ci_t *);
67
68static int	opl_claim_memory(dev_info_t *, fco_handle_t, fc_ci_t *);
69static int	opl_release_memory(dev_info_t *, fco_handle_t, fc_ci_t *);
70static int	opl_vtop(dev_info_t *, fco_handle_t, fc_ci_t *);
71
72static int	opl_config_child(dev_info_t *, fco_handle_t, fc_ci_t *);
73
74static int	opl_get_fcode_size(dev_info_t *, fco_handle_t, fc_ci_t *);
75static int	opl_get_fcode(dev_info_t *, fco_handle_t, fc_ci_t *);
76
77static int	opl_map_phys(dev_info_t *, struct regspec *,  caddr_t *,
78				ddi_device_acc_attr_t *, ddi_acc_handle_t *);
79static void	opl_unmap_phys(ddi_acc_handle_t *);
80static int	opl_get_hwd_va(dev_info_t *, fco_handle_t, fc_ci_t *);
81static int	opl_master_interrupt(dev_info_t *, fco_handle_t, fc_ci_t *);
82
83extern int	prom_get_fcode_size(char *);
84extern int	prom_get_fcode(char *, char *);
85
86static int	master_interrupt_init(uint32_t, uint32_t);
87
88#define	PROBE_STR_SIZE	64
89#define	UNIT_ADDR_SIZE	64
90
91opl_fc_ops_t	opl_fc_ops[] = {
92
93	{	FC_MAP_IN,		opl_map_in},
94	{	FC_MAP_OUT,		opl_map_out},
95	{	"rx@",			opl_register_fetch},
96	{	FC_RL_FETCH,		opl_register_fetch},
97	{	FC_RW_FETCH,		opl_register_fetch},
98	{	FC_RB_FETCH,		opl_register_fetch},
99	{	"rx!",			opl_register_store},
100	{	FC_RL_STORE,		opl_register_store},
101	{	FC_RW_STORE,		opl_register_store},
102	{	FC_RB_STORE,		opl_register_store},
103	{	"claim-memory",		opl_claim_memory},
104	{	"release-memory",	opl_release_memory},
105	{	"vtop",			opl_vtop},
106	{	FC_CONFIG_CHILD,	opl_config_child},
107	{	FC_GET_FCODE_SIZE,	opl_get_fcode_size},
108	{	FC_GET_FCODE,		opl_get_fcode},
109	{	"get-hwd-va",		opl_get_hwd_va},
110	{	"master-interrupt",	opl_master_interrupt},
111	{	NULL,			NULL}
112
113};
114
115extern caddr_t	efcode_vaddr;
116extern int	efcode_size;
117
118#ifdef DEBUG
119#define	HWDDUMP_OFFSETS		1
120#define	HWDDUMP_ALL_STATUS	2
121#define	HWDDUMP_CHUNKS		3
122#define	HWDDUMP_SBP		4
123
124int		hwddump_flags = HWDDUMP_SBP | HWDDUMP_CHUNKS;
125#endif
126
127static int	master_interrupt_inited = 0;
128
129int
130_init()
131{
132	int	err = 0;
133
134	/*
135	 * Create a resource map for the contiguous memory allocated
136	 * at start-of-day in startup.c
137	 */
138	err = ndi_ra_map_setup(ddi_root_node(), "opl-fcodemem");
139	if (err == NDI_FAILURE) {
140		cmn_err(CE_WARN, "Cannot setup resource map opl-fcodemem\n");
141		return (1);
142	}
143
144	/*
145	 * Put the allocated memory into the pool.
146	 */
147	(void) ndi_ra_free(ddi_root_node(), (uint64_t)efcode_vaddr,
148	    (uint64_t)efcode_size, "opl-fcodemem", 0);
149
150	if ((err = mod_install(&modlinkage)) != 0) {
151		cmn_err(CE_WARN, "opl_cfg failed to load, error=%d", err);
152		(void) ndi_ra_map_destroy(ddi_root_node(), "opl-fcodemem");
153	}
154
155	return (err);
156}
157
158int
159_fini(void)
160{
161	int ret;
162
163	ret = (mod_remove(&modlinkage));
164	if (ret != 0)
165		return (ret);
166
167	(void) ndi_ra_map_destroy(ddi_root_node(), "opl-fcodemem");
168
169	return (ret);
170}
171
172int
173_info(modinfop)
174struct modinfo *modinfop;
175{
176	return (mod_info(&modlinkage, modinfop));
177}
178
179#ifdef DEBUG
180static void
181opl_dump_hwd(opl_probe_t *probe)
182{
183	hwd_header_t		*hdrp;
184	hwd_sb_status_t		*statp;
185	hwd_domain_info_t	*dinfop;
186	hwd_sb_t		*sbp;
187	hwd_cpu_chip_t		*chips;
188	hwd_pci_ch_t		*channels;
189	int			board, i, status;
190
191	board = probe->pr_board;
192
193	hdrp = probe->pr_hdr;
194	statp = probe->pr_sb_status;
195	dinfop = probe->pr_dinfo;
196	sbp = probe->pr_sb;
197
198	printf("HWD: board %d\n", board);
199	printf("HWD:magic = 0x%x\n", hdrp->hdr_magic);
200	printf("HWD:version = 0x%x.%x\n", hdrp->hdr_version.major,
201	    hdrp->hdr_version.minor);
202
203	if (hwddump_flags & HWDDUMP_OFFSETS) {
204		printf("HWD:status offset = 0x%x\n",
205		    hdrp->hdr_sb_status_offset);
206		printf("HWD:domain offset = 0x%x\n",
207		    hdrp->hdr_domain_info_offset);
208		printf("HWD:board offset = 0x%x\n", hdrp->hdr_sb_info_offset);
209	}
210
211	if (hwddump_flags & HWDDUMP_SBP)
212		printf("HWD:sb_t ptr = 0x%p\n", (void *)probe->pr_sb);
213
214	if (hwddump_flags & HWDDUMP_ALL_STATUS) {
215		int bd;
216		printf("HWD:board status =");
217		for (bd = 0; bd < HWD_SBS_PER_DOMAIN; bd++)
218			printf("%x ", statp->sb_status[bd]);
219		printf("\n");
220	} else {
221		printf("HWD:board status = %d\n", statp->sb_status[board]);
222	}
223
224	printf("HWD:banner name = %s\n", dinfop->dinf_banner_name);
225	printf("HWD:platform = %s\n", dinfop->dinf_platform_token);
226
227	printf("HWD:chip status:\n");
228	chips = &sbp->sb_cmu.cmu_cpu_chips[0];
229	for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) {
230
231		status = chips[i].chip_status;
232		printf("chip[%d] = ", i);
233		if (HWD_STATUS_NONE(status))
234			printf("none");
235		else if (HWD_STATUS_FAILED(status))
236			printf("fail");
237		else if (HWD_STATUS_OK(status))
238			printf("ok");
239		printf("\n");
240	}
241
242	if (hwddump_flags & HWDDUMP_CHUNKS) {
243		int chunk;
244		hwd_memory_t *mem = &sbp->sb_cmu.cmu_memory;
245		printf("HWD:chunks:\n");
246		for (chunk = 0; chunk < HWD_MAX_MEM_CHUNKS; chunk++)
247			printf("\t%d 0x%lx 0x%lx\n", chunk,
248			    mem->mem_chunks[chunk].chnk_start_address,
249			    mem->mem_chunks[chunk].chnk_size);
250	}
251
252	printf("HWD:channel status:\n");
253	channels = &sbp->sb_pci_ch[0];
254	for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) {
255
256		status = channels[i].pci_status;
257		printf("channels[%d] = ", i);
258		if (HWD_STATUS_NONE(status))
259			printf("none");
260		else if (HWD_STATUS_FAILED(status))
261			printf("fail");
262		else if (HWD_STATUS_OK(status))
263			printf("ok");
264		printf("\n");
265	}
266	printf("channels[%d] = ", i);
267	status = sbp->sb_cmu.cmu_ch.chan_status;
268	if (HWD_STATUS_NONE(status))
269		printf("none");
270	else if (HWD_STATUS_FAILED(status))
271		printf("fail");
272	else if (HWD_STATUS_OK(status))
273		printf("ok");
274	printf("\n");
275}
276#endif /* DEBUG */
277
278#ifdef UCTEST
279	/*
280	 * For SesamI debugging, just map the SRAM directly to a kernel
281	 * VA and read it out from there
282	 */
283
284#include <sys/vmem.h>
285#include <vm/seg_kmem.h>
286
287/*
288 * 0x4081F1323000LL is the HWD base address for LSB 0. But we need to map
289 * at page boundaries. So, we use a base address of 0x4081F1322000LL.
290 * Note that this has to match the HWD base pa set in .sesami-common-defs.
291 *
292 * The size specified for the HWD in the SCF spec is 36K. But since
293 * we adjusted the base address by 4K, we need to use 40K for the
294 * mapping size to cover the HWD. And 40K is also a multiple of the
295 * base page size.
296 */
297#define	OPL_HWD_BASE(lsb)       \
298(0x4081F1322000LL | (((uint64_t)(lsb)) << 40))
299
300	void    *opl_hwd_vaddr;
301#endif /* UCTEST */
302
303/*
304 * Get the hardware descriptor from SCF.
305 */
306
307/*ARGSUSED*/
308int
309opl_read_hwd(int board, hwd_header_t **hdrp, hwd_sb_status_t **statp,
310	hwd_domain_info_t **dinfop, hwd_sb_t **sbp)
311{
312	static int (*getinfop)(uint32_t, uint8_t, uint32_t, uint32_t *,
313	    void *) = NULL;
314	void *hwdp;
315
316	uint32_t key = KEY_ESCF;	/* required value */
317	uint8_t  type = 0x40;		/* SUB_OS_RECEIVE_HWD */
318	uint32_t transid = board;
319	uint32_t datasize = HWD_DATA_SIZE;
320
321	hwd_header_t		*hd;
322	hwd_sb_status_t		*st;
323	hwd_domain_info_t	*di;
324	hwd_sb_t		*sb;
325
326	int	ret;
327
328	if (opl_boards[board].cfg_hwd == NULL) {
329#ifdef UCTEST
330		/*
331		 * Just map the HWD in SRAM to a kernel VA
332		 */
333
334		size_t			size;
335		pfn_t			pfn;
336
337		size = 0xA000;
338
339		opl_hwd_vaddr = vmem_alloc(heap_arena, size, VM_SLEEP);
340		if (opl_hwd_vaddr == NULL) {
341			cmn_err(CE_NOTE, "No space for HWD");
342			return (-1);
343		}
344
345		pfn = btop(OPL_HWD_BASE(board));
346		hat_devload(kas.a_hat, opl_hwd_vaddr, size, pfn, PROT_READ,
347		    HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
348
349		hwdp = (void *)((char *)opl_hwd_vaddr + 0x1000);
350		opl_boards[board].cfg_hwd = hwdp;
351		ret = 0;
352#else
353
354		/* find the scf_service_getinfo() function */
355		if (getinfop == NULL)
356			getinfop = (int (*)(uint32_t, uint8_t, uint32_t,
357			    uint32_t *,
358			    void *))modgetsymvalue("scf_service_getinfo", 0);
359
360		if (getinfop == NULL)
361			return (-1);
362
363		/* allocate memory to receive the data */
364		hwdp = kmem_alloc(HWD_DATA_SIZE, KM_SLEEP);
365
366		/* get the HWD */
367		ret = (*getinfop)(key, type, transid, &datasize, hwdp);
368		if (ret == 0)
369			opl_boards[board].cfg_hwd = hwdp;
370		else
371			kmem_free(hwdp, HWD_DATA_SIZE);
372#endif
373	} else {
374		hwdp = opl_boards[board].cfg_hwd;
375		ret = 0;
376	}
377
378	/* copy the data to the destination */
379	if (ret == 0) {
380		hd = (hwd_header_t *)hwdp;
381		st = (hwd_sb_status_t *)
382		    ((char *)hwdp + hd->hdr_sb_status_offset);
383		di = (hwd_domain_info_t *)
384		    ((char *)hwdp + hd->hdr_domain_info_offset);
385		sb = (hwd_sb_t *)
386		    ((char *)hwdp + hd->hdr_sb_info_offset);
387		if (hdrp != NULL)
388			*hdrp = hd;
389		if (statp != NULL)
390			*statp = st;
391		if (dinfop != NULL)
392			*dinfop = di;
393		if (sbp != NULL)
394			*sbp = sb;
395	}
396
397	return (ret);
398}
399
400/*
401 * The opl_probe_t probe structure is used to pass all sorts of parameters
402 * to callback functions during probing. It also contains a snapshot of
403 * the hardware descriptor that is taken at the beginning of a probe.
404 */
405static int
406opl_probe_init(opl_probe_t *probe)
407{
408	hwd_header_t		**hdrp;
409	hwd_sb_status_t		**statp;
410	hwd_domain_info_t	**dinfop;
411	hwd_sb_t		**sbp;
412	int			board, ret;
413
414	board = probe->pr_board;
415
416	hdrp = &probe->pr_hdr;
417	statp = &probe->pr_sb_status;
418	dinfop = &probe->pr_dinfo;
419	sbp = &probe->pr_sb;
420
421	/*
422	 * Read the hardware descriptor.
423	 */
424	ret = opl_read_hwd(board, hdrp, statp, dinfop, sbp);
425	if (ret != 0) {
426
427		cmn_err(CE_WARN, "IKP: failed to read HWD header");
428		return (-1);
429	}
430
431#ifdef DEBUG
432	opl_dump_hwd(probe);
433#endif
434	return (0);
435}
436
437/*
438 * This function is used to obtain pointers to relevant device nodes
439 * which are created by Solaris at boot time.
440 *
441 * This function walks the child nodes of a given node, extracts
442 * the "name" property, if it exists, and passes the node to a
443 * callback init function. The callback determines if this node is
444 * interesting or not. If it is, then a pointer to the node is
445 * stored away by the callback for use during unprobe.
446 *
447 * The DDI get property function allocates storage for the name
448 * property. That needs to be freed within this function.
449 */
450static int
451opl_init_nodes(dev_info_t *parent, opl_init_func_t init)
452{
453	dev_info_t	*node;
454	char		*name;
455	int 		circ, ret;
456	int		len;
457
458	ASSERT(parent != NULL);
459
460	/*
461	 * Hold parent node busy to walk its child list
462	 */
463	ndi_devi_enter(parent, &circ);
464	node = ddi_get_child(parent);
465
466	while (node != NULL) {
467
468		ret = OPL_GET_PROP(string, node, "name", &name, &len);
469		if (ret != DDI_PROP_SUCCESS) {
470			/*
471			 * The property does not exist for this node.
472			 */
473			node = ddi_get_next_sibling(node);
474			continue;
475		}
476
477		ret = init(node, name, len);
478		kmem_free(name, len);
479		if (ret != 0) {
480
481			ndi_devi_exit(parent, circ);
482			return (-1);
483		}
484
485		node = ddi_get_next_sibling(node);
486	}
487
488	ndi_devi_exit(parent, circ);
489
490	return (0);
491}
492
493/*
494 * This init function finds all the interesting nodes under the
495 * root node and stores pointers to them. The following nodes
496 * are considered interesting by this implementation:
497 *
498 *	"cmp"
499 *		These are nodes that represent processor chips.
500 *
501 *	"pci"
502 *		These are nodes that represent PCI leaves.
503 *
504 *	"pseudo-mc"
505 *		These are nodes that contain memory information.
506 */
507static int
508opl_init_root_nodes(dev_info_t *node, char *name, int len)
509{
510	int		portid, board, chip, channel, leaf;
511	int		ret;
512
513	if (strncmp(name, OPL_CPU_CHIP_NODE, len) == 0) {
514
515		ret = OPL_GET_PROP(int, node, "portid", &portid, -1);
516		if (ret != DDI_PROP_SUCCESS)
517			return (-1);
518
519		ret = OPL_GET_PROP(int, node, "board#", &board, -1);
520		if (ret != DDI_PROP_SUCCESS)
521			return (-1);
522
523		chip = OPL_CPU_CHIP(portid);
524		opl_boards[board].cfg_cpu_chips[chip] = node;
525
526	} else if (strncmp(name, OPL_PCI_LEAF_NODE, len) == 0) {
527
528		ret = OPL_GET_PROP(int, node, "portid", &portid, -1);
529		if (ret != DDI_PROP_SUCCESS)
530			return (-1);
531
532		board = OPL_IO_PORTID_TO_LSB(portid);
533		channel = OPL_PORTID_TO_CHANNEL(portid);
534
535		if (channel == OPL_CMU_CHANNEL) {
536
537			opl_boards[board].cfg_cmuch_leaf = node;
538
539		} else {
540
541			leaf = OPL_PORTID_TO_LEAF(portid);
542			opl_boards[board].cfg_pcich_leaf[channel][leaf] = node;
543		}
544	} else if (strncmp(name, OPL_PSEUDO_MC_NODE, len) == 0) {
545
546		ret = OPL_GET_PROP(int, node, "board#", &board, -1);
547		if (ret != DDI_PROP_SUCCESS)
548			return (-1);
549
550		ASSERT((board >= 0) && (board < HWD_SBS_PER_DOMAIN));
551
552		opl_boards[board].cfg_pseudo_mc = node;
553	}
554
555	return (0);
556}
557
558/*
559 * This function initializes the OPL IKP feature. Currently, all it does
560 * is find the interesting nodes that Solaris has created at boot time
561 * for boards present at boot time and store pointers to them. This
562 * is useful if those boards are unprobed by DR.
563 */
564int
565opl_init_cfg()
566{
567	dev_info_t	*root;
568
569	if (opl_cfg_inited == 0) {
570
571		root = ddi_root_node();
572		if ((opl_init_nodes(root, opl_init_root_nodes) != 0)) {
573			cmn_err(CE_WARN, "IKP: init failed");
574			return (1);
575		}
576
577		opl_cfg_inited = 1;
578	}
579
580	return (0);
581}
582
583/*
584 * When DR is initialized, we walk the device tree and acquire a hold on
585 * all the nodes that are interesting to IKP. This is so that the corresponding
586 * branches cannot be deleted.
587 *
588 * The following function informs the walk about which nodes are interesting
589 * so that it can hold the corresponding branches.
590 */
591static int
592opl_hold_node(char *name)
593{
594	/*
595	 * We only need to hold/release the following nodes which
596	 * represent separate branches that must be managed.
597	 */
598	return ((strcmp(name, OPL_CPU_CHIP_NODE) == 0) ||
599	    (strcmp(name, OPL_PSEUDO_MC_NODE) == 0) ||
600	    (strcmp(name, OPL_PCI_LEAF_NODE) == 0));
601}
602
603static int
604opl_hold_rele_devtree(dev_info_t *rdip, void *arg)
605{
606
607	int	*holdp = (int *)arg;
608	char	*name = ddi_node_name(rdip);
609
610	/*
611	 * We only need to hold/release the following nodes which
612	 * represent separate branches that must be managed.
613	 */
614	if (opl_hold_node(name) == 0) {
615		/* Not of interest to us */
616		return (DDI_WALK_PRUNECHILD);
617	}
618	if (*holdp) {
619		ASSERT(!e_ddi_branch_held(rdip));
620		e_ddi_branch_hold(rdip);
621	} else {
622		ASSERT(e_ddi_branch_held(rdip));
623		e_ddi_branch_rele(rdip);
624	}
625
626	return (DDI_WALK_PRUNECHILD);
627}
628
629void
630opl_hold_devtree()
631{
632	dev_info_t *dip;
633	int circ;
634	int hold = 1;
635
636	dip = ddi_root_node();
637	ndi_devi_enter(dip, &circ);
638	ddi_walk_devs(ddi_get_child(dip), opl_hold_rele_devtree, &hold);
639	ndi_devi_exit(dip, circ);
640}
641
642void
643opl_release_devtree()
644{
645	dev_info_t *dip;
646	int circ;
647	int hold = 0;
648
649	dip = ddi_root_node();
650	ndi_devi_enter(dip, &circ);
651	ddi_walk_devs(ddi_get_child(dip), opl_hold_rele_devtree, &hold);
652	ndi_devi_exit(dip, circ);
653}
654
655/*
656 * This is a helper function that allows opl_create_node() to return a
657 * pointer to a newly created node to its caller.
658 */
659/*ARGSUSED*/
660static void
661opl_set_node(dev_info_t *node, void *arg, uint_t flags)
662{
663	opl_probe_t	*probe;
664
665	probe = arg;
666	probe->pr_node = node;
667}
668
669/*
670 * Function to create a node in the device tree under a specified parent.
671 *
672 * e_ddi_branch_create() allows the creation of a whole branch with a
673 * single call of the function. However, we only use it to create one node
674 * at a time in the case of non-I/O device nodes. In other words, we
675 * create branches by repeatedly using this function. This makes the
676 * code more readable.
677 *
678 * The branch descriptor passed to e_ddi_branch_create() takes two
679 * callbacks. The create() callback is used to set the properties of a
680 * newly created node. The other callback is used to return a pointer
681 * to the newly created node. The create() callback is passed by the
682 * caller of this function based on the kind of node it wishes to
683 * create.
684 *
685 * e_ddi_branch_create() returns with the newly created node held. We
686 * only need to hold the top nodes of the branches we create. We release
687 * the hold for the others. E.g., the "cmp" node needs to be held. Since
688 * we hold the "cmp" node, there is no need to hold the "core" and "cpu"
689 * nodes below it.
690 */
691static dev_info_t *
692opl_create_node(opl_probe_t *probe)
693{
694	devi_branch_t	branch;
695
696	probe->pr_node = NULL;
697
698	branch.arg = probe;
699	branch.type = DEVI_BRANCH_SID;
700	branch.create.sid_branch_create = probe->pr_create;
701	branch.devi_branch_callback = opl_set_node;
702
703	if (e_ddi_branch_create(probe->pr_parent, &branch, NULL, 0) != 0)
704		return (NULL);
705
706	ASSERT(probe->pr_node != NULL);
707
708	if (probe->pr_hold == 0)
709		e_ddi_branch_rele(probe->pr_node);
710
711	return (probe->pr_node);
712}
713
714/*
715 * Function to tear down a whole branch rooted at the specified node.
716 *
717 * Although we create each node of a branch individually, we destroy
718 * a whole branch in one call. This is more efficient.
719 */
720static int
721opl_destroy_node(dev_info_t *node)
722{
723	if (e_ddi_branch_destroy(node, NULL, 0) != 0) {
724		char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
725		(void) ddi_pathname(node, path);
726		cmn_err(CE_WARN, "OPL node removal failed: %s (%p)", path,
727		    (void *)node);
728		kmem_free(path, MAXPATHLEN);
729		return (-1);
730	}
731
732	return (0);
733}
734
735/*
736 * Set the properties for a "cpu" node.
737 */
738/*ARGSUSED*/
739static int
740opl_create_cpu(dev_info_t *node, void *arg, uint_t flags)
741{
742	opl_probe_t	*probe;
743	hwd_cpu_chip_t	*chip;
744	hwd_core_t	*core;
745	hwd_cpu_t	*cpu;
746	int		ret;
747
748	probe = arg;
749	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
750	core = &chip->chip_cores[probe->pr_core];
751	cpu = &core->core_cpus[probe->pr_cpu];
752	OPL_UPDATE_PROP(string, node, "name", OPL_CPU_NODE);
753	OPL_UPDATE_PROP(string, node, "device_type", OPL_CPU_NODE);
754
755	OPL_UPDATE_PROP(int, node, "cpuid", cpu->cpu_cpuid);
756	OPL_UPDATE_PROP(int, node, "reg", probe->pr_cpu);
757
758	OPL_UPDATE_PROP(string, node, "status", "okay");
759
760	return (DDI_WALK_TERMINATE);
761}
762
763/*
764 * Create "cpu" nodes as child nodes of a given "core" node.
765 */
766static int
767opl_probe_cpus(opl_probe_t *probe)
768{
769	int		i;
770	hwd_cpu_chip_t	*chip;
771	hwd_core_t	*core;
772	hwd_cpu_t	*cpus;
773
774	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
775	core = &chip->chip_cores[probe->pr_core];
776	cpus = &core->core_cpus[0];
777
778	for (i = 0; i < HWD_CPUS_PER_CORE; i++) {
779
780		/*
781		 * Olympus-C has 2 cpus per core.
782		 * Jupiter has 4 cpus per core.
783		 * For the Olympus-C based platform, we expect the cpu_status
784		 * of the non-existent cpus to be set to missing.
785		 */
786		if (!HWD_STATUS_OK(cpus[i].cpu_status))
787			continue;
788
789		probe->pr_create = opl_create_cpu;
790		probe->pr_cpu = i;
791		if (opl_create_node(probe) == NULL) {
792
793			cmn_err(CE_WARN, "IKP: create cpu (%d-%d-%d-%d) failed",
794			    probe->pr_board, probe->pr_cpu_chip, probe->pr_core,
795			    probe->pr_cpu);
796			return (-1);
797		}
798	}
799
800	return (0);
801}
802
803/*
804 * Set the properties for a "core" node.
805 */
806/*ARGSUSED*/
807static int
808opl_create_core(dev_info_t *node, void *arg, uint_t flags)
809{
810	opl_probe_t	*probe;
811	hwd_cpu_chip_t	*chip;
812	hwd_core_t	*core;
813	int		sharing[2];
814	int		ret;
815
816	probe = arg;
817	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
818	core = &chip->chip_cores[probe->pr_core];
819
820	OPL_UPDATE_PROP(string, node, "name", OPL_CORE_NODE);
821	OPL_UPDATE_PROP(string, node, "device_type", OPL_CORE_NODE);
822	OPL_UPDATE_PROP(string, node, "compatible", chip->chip_compatible);
823
824	OPL_UPDATE_PROP(int, node, "reg", probe->pr_core);
825	OPL_UPDATE_PROP(int, node, "manufacturer#", core->core_manufacturer);
826	OPL_UPDATE_PROP(int, node, "implementation#",
827	    core->core_implementation);
828	OPL_UPDATE_PROP(int, node, "mask#", core->core_mask);
829
830	OPL_UPDATE_PROP(int, node, "sparc-version", 9);
831	OPL_UPDATE_PROP(int, node, "clock-frequency", core->core_frequency);
832
833	OPL_UPDATE_PROP(int, node, "l1-icache-size", core->core_l1_icache_size);
834	OPL_UPDATE_PROP(int, node, "l1-icache-line-size",
835	    core->core_l1_icache_line_size);
836	OPL_UPDATE_PROP(int, node, "l1-icache-associativity",
837	    core->core_l1_icache_associativity);
838	OPL_UPDATE_PROP(int, node, "#itlb-entries",
839	    core->core_num_itlb_entries);
840
841	OPL_UPDATE_PROP(int, node, "l1-dcache-size", core->core_l1_dcache_size);
842	OPL_UPDATE_PROP(int, node, "l1-dcache-line-size",
843	    core->core_l1_dcache_line_size);
844	OPL_UPDATE_PROP(int, node, "l1-dcache-associativity",
845	    core->core_l1_dcache_associativity);
846	OPL_UPDATE_PROP(int, node, "#dtlb-entries",
847	    core->core_num_dtlb_entries);
848
849	OPL_UPDATE_PROP(int, node, "l2-cache-size", core->core_l2_cache_size);
850	OPL_UPDATE_PROP(int, node, "l2-cache-line-size",
851	    core->core_l2_cache_line_size);
852	OPL_UPDATE_PROP(int, node, "l2-cache-associativity",
853	    core->core_l2_cache_associativity);
854	sharing[0] = 0;
855	sharing[1] = core->core_l2_cache_sharing;
856	OPL_UPDATE_PROP_ARRAY(int, node, "l2-cache-sharing", sharing, 2);
857
858	OPL_UPDATE_PROP(string, node, "status", "okay");
859
860	return (DDI_WALK_TERMINATE);
861}
862
863/*
864 * Create "core" nodes as child nodes of a given "cmp" node.
865 *
866 * Create the branch below each "core" node".
867 */
868static int
869opl_probe_cores(opl_probe_t *probe)
870{
871	int		i;
872	hwd_cpu_chip_t	*chip;
873	hwd_core_t	*cores;
874	dev_info_t	*parent, *node;
875
876	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
877	cores = &chip->chip_cores[0];
878	parent = probe->pr_parent;
879
880	for (i = 0; i < HWD_CORES_PER_CPU_CHIP; i++) {
881
882		if (!HWD_STATUS_OK(cores[i].core_status))
883			continue;
884
885		probe->pr_parent = parent;
886		probe->pr_create = opl_create_core;
887		probe->pr_core = i;
888		node = opl_create_node(probe);
889		if (node == NULL) {
890
891			cmn_err(CE_WARN, "IKP: create core (%d-%d-%d) failed",
892			    probe->pr_board, probe->pr_cpu_chip,
893			    probe->pr_core);
894			return (-1);
895		}
896
897		/*
898		 * Create "cpu" nodes below "core".
899		 */
900		probe->pr_parent = node;
901		if (opl_probe_cpus(probe) != 0)
902			return (-1);
903		probe->pr_cpu_impl |= (1 << cores[i].core_implementation);
904	}
905
906	return (0);
907}
908
909/*
910 * Set the properties for a "cmp" node.
911 */
912/*ARGSUSED*/
913static int
914opl_create_cpu_chip(dev_info_t *node, void *arg, uint_t flags)
915{
916	opl_probe_t	*probe;
917	hwd_cpu_chip_t	*chip;
918	opl_range_t	range;
919	uint64_t	dummy_addr;
920	int		ret;
921
922	probe = arg;
923	chip = &probe->pr_sb->sb_cmu.cmu_cpu_chips[probe->pr_cpu_chip];
924
925	OPL_UPDATE_PROP(string, node, "name", OPL_CPU_CHIP_NODE);
926
927	OPL_UPDATE_PROP(int, node, "portid", chip->chip_portid);
928	OPL_UPDATE_PROP(int, node, "board#", probe->pr_board);
929
930	dummy_addr = OPL_PROC_AS(probe->pr_board, probe->pr_cpu_chip);
931	range.rg_addr_hi = OPL_HI(dummy_addr);
932	range.rg_addr_lo = OPL_LO(dummy_addr);
933	range.rg_size_hi = 0;
934	range.rg_size_lo = 0;
935	OPL_UPDATE_PROP_ARRAY(int, node, "reg", (int *)&range, 4);
936
937	OPL_UPDATE_PROP(int, node, "#address-cells", 1);
938	OPL_UPDATE_PROP(int, node, "#size-cells", 0);
939
940	OPL_UPDATE_PROP(string, node, "status", "okay");
941
942	return (DDI_WALK_TERMINATE);
943}
944
945/*
946 * Create "cmp" nodes as child nodes of the root node.
947 *
948 * Create the branch below each "cmp" node.
949 */
950static int
951opl_probe_cpu_chips(opl_probe_t *probe)
952{
953	int		i;
954	dev_info_t	**cfg_cpu_chips;
955	hwd_cpu_chip_t	*chips;
956	dev_info_t	*node;
957
958	cfg_cpu_chips = opl_boards[probe->pr_board].cfg_cpu_chips;
959	chips = &probe->pr_sb->sb_cmu.cmu_cpu_chips[0];
960
961	for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) {
962
963		ASSERT(cfg_cpu_chips[i] == NULL);
964
965		if (!HWD_STATUS_OK(chips[i].chip_status))
966			continue;
967
968		probe->pr_parent = ddi_root_node();
969		probe->pr_create = opl_create_cpu_chip;
970		probe->pr_cpu_chip = i;
971		probe->pr_hold = 1;
972		node = opl_create_node(probe);
973		if (node == NULL) {
974
975			cmn_err(CE_WARN, "IKP: create chip (%d-%d) failed",
976			    probe->pr_board, probe->pr_cpu_chip);
977			return (-1);
978		}
979
980		cfg_cpu_chips[i] = node;
981
982		/*
983		 * Create "core" nodes below "cmp".
984		 * We hold the "cmp" node. So, there is no need to hold
985		 * the "core" and "cpu" nodes below it.
986		 */
987		probe->pr_parent = node;
988		probe->pr_hold = 0;
989		if (opl_probe_cores(probe) != 0)
990			return (-1);
991	}
992
993	return (0);
994}
995
996/*
997 * Set the properties for a "pseudo-mc" node.
998 */
999/*ARGSUSED*/
1000static int
1001opl_create_pseudo_mc(dev_info_t *node, void *arg, uint_t flags)
1002{
1003	opl_probe_t	*probe;
1004	int		board, portid;
1005	hwd_bank_t	*bank;
1006	hwd_memory_t	*mem;
1007	opl_range_t	range;
1008	opl_mc_addr_t	mc[HWD_BANKS_PER_CMU];
1009	int		status[2][7];
1010	int		i, j;
1011	int		ret;
1012
1013	probe = arg;
1014	board = probe->pr_board;
1015
1016	OPL_UPDATE_PROP(string, node, "name", OPL_PSEUDO_MC_NODE);
1017	OPL_UPDATE_PROP(string, node, "device_type", "memory-controller");
1018	OPL_UPDATE_PROP(string, node, "compatible", "FJSV,oplmc");
1019
1020	portid = OPL_LSB_TO_PSEUDOMC_PORTID(board);
1021	OPL_UPDATE_PROP(int, node, "portid", portid);
1022
1023	range.rg_addr_hi = OPL_HI(OPL_MC_AS(board));
1024	range.rg_addr_lo = 0x200;
1025	range.rg_size_hi = 0;
1026	range.rg_size_lo = 0;
1027	OPL_UPDATE_PROP_ARRAY(int, node, "reg", (int *)&range, 4);
1028
1029	OPL_UPDATE_PROP(int, node, "board#", board);
1030	OPL_UPDATE_PROP(int, node, "physical-board#",
1031	    probe->pr_sb->sb_psb_number);
1032
1033	OPL_UPDATE_PROP(int, node, "#address-cells", 1);
1034	OPL_UPDATE_PROP(int, node, "#size-cells", 2);
1035
1036	mem = &probe->pr_sb->sb_cmu.cmu_memory;
1037
1038	range.rg_addr_hi = OPL_HI(mem->mem_start_address);
1039	range.rg_addr_lo = OPL_LO(mem->mem_start_address);
1040	range.rg_size_hi = OPL_HI(mem->mem_size);
1041	range.rg_size_lo = OPL_LO(mem->mem_size);
1042	OPL_UPDATE_PROP_ARRAY(int, node, "sb-mem-ranges", (int *)&range, 4);
1043
1044	bank = probe->pr_sb->sb_cmu.cmu_memory.mem_banks;
1045	for (i = 0, j = 0; i < HWD_BANKS_PER_CMU; i++) {
1046
1047		if (!HWD_STATUS_OK(bank[i].bank_status))
1048			continue;
1049
1050		mc[j].mc_bank = i;
1051		mc[j].mc_hi = OPL_HI(bank[i].bank_register_address);
1052		mc[j].mc_lo = OPL_LO(bank[i].bank_register_address);
1053		j++;
1054	}
1055
1056	if (j > 0) {
1057		OPL_UPDATE_PROP_ARRAY(int, node, "mc-addr", (int *)mc, j*3);
1058	} else {
1059		/*
1060		 * If there is no memory, we need the mc-addr property, but
1061		 * it is length 0.  The only way to do this using ndi seems
1062		 * to be by creating a boolean property.
1063		 */
1064		ret = ndi_prop_create_boolean(DDI_DEV_T_NONE, node, "mc-addr");
1065		OPL_UPDATE_PROP_ERR(ret, "mc-addr");
1066	}
1067
1068	OPL_UPDATE_PROP_ARRAY(byte, node, "cs0-mc-pa-trans-table",
1069	    mem->mem_cs[0].cs_pa_mac_table, 64);
1070	OPL_UPDATE_PROP_ARRAY(byte, node, "cs1-mc-pa-trans-table",
1071	    mem->mem_cs[1].cs_pa_mac_table, 64);
1072
1073#define	CS_PER_MEM 2
1074
1075	for (i = 0, j = 0; i < CS_PER_MEM; i++) {
1076		if (HWD_STATUS_OK(mem->mem_cs[i].cs_status) ||
1077		    HWD_STATUS_FAILED(mem->mem_cs[i].cs_status)) {
1078			status[j][0] = i;
1079			if (HWD_STATUS_OK(mem->mem_cs[i].cs_status))
1080				status[j][1] = 0;
1081			else
1082				status[j][1] = 1;
1083			status[j][2] =
1084			    OPL_HI(mem->mem_cs[i].cs_available_capacity);
1085			status[j][3] =
1086			    OPL_LO(mem->mem_cs[i].cs_available_capacity);
1087			status[j][4] = OPL_HI(mem->mem_cs[i].cs_dimm_capacity);
1088			status[j][5] = OPL_LO(mem->mem_cs[i].cs_dimm_capacity);
1089			status[j][6] = mem->mem_cs[i].cs_number_of_dimms;
1090			j++;
1091		}
1092	}
1093
1094	if (j > 0) {
1095		OPL_UPDATE_PROP_ARRAY(int, node, "cs-status", (int *)status,
1096		    j*7);
1097	} else {
1098		/*
1099		 * If there is no memory, we need the cs-status property, but
1100		 * it is length 0.  The only way to do this using ndi seems
1101		 * to be by creating a boolean property.
1102		 */
1103		ret = ndi_prop_create_boolean(DDI_DEV_T_NONE, node,
1104		    "cs-status");
1105		OPL_UPDATE_PROP_ERR(ret, "cs-status");
1106	}
1107
1108	return (DDI_WALK_TERMINATE);
1109}
1110
1111/*
1112 * Create "pseudo-mc" nodes
1113 */
1114static int
1115opl_probe_memory(opl_probe_t *probe)
1116{
1117	int		board;
1118	opl_board_cfg_t	*board_cfg;
1119	dev_info_t	*node;
1120
1121	board = probe->pr_board;
1122	board_cfg = &opl_boards[board];
1123
1124	ASSERT(board_cfg->cfg_pseudo_mc == NULL);
1125
1126	probe->pr_parent = ddi_root_node();
1127	probe->pr_create = opl_create_pseudo_mc;
1128	probe->pr_hold = 1;
1129	node = opl_create_node(probe);
1130	if (node == NULL) {
1131
1132		cmn_err(CE_WARN, "IKP: create pseudo-mc (%d) failed", board);
1133		return (-1);
1134	}
1135
1136	board_cfg->cfg_pseudo_mc = node;
1137
1138	return (0);
1139}
1140
1141/*
1142 * Allocate the fcode ops handle.
1143 */
1144/*ARGSUSED*/
1145static
1146fco_handle_t
1147opl_fc_ops_alloc_handle(dev_info_t *parent, dev_info_t *child,
1148			void *fcode, size_t fcode_size, char *unit_address,
1149			char *my_args)
1150{
1151	fco_handle_t	rp;
1152	phandle_t	h;
1153	char		*buf;
1154
1155	rp = kmem_zalloc(sizeof (struct fc_resource_list), KM_SLEEP);
1156	rp->next_handle = fc_ops_alloc_handle(parent, child, fcode, fcode_size,
1157	    unit_address, NULL);
1158	rp->ap = parent;
1159	rp->child = child;
1160	rp->fcode = fcode;
1161	rp->fcode_size = fcode_size;
1162	rp->my_args = my_args;
1163
1164	if (unit_address) {
1165		buf = kmem_zalloc(UNIT_ADDR_SIZE, KM_SLEEP);
1166		(void) strcpy(buf, unit_address);
1167		rp->unit_address = buf;
1168	}
1169
1170	/*
1171	 * Add the child's nodeid to our table...
1172	 */
1173	h = ddi_get_nodeid(rp->child);
1174	fc_add_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child, h);
1175
1176	return (rp);
1177}
1178
1179
1180static void
1181opl_fc_ops_free_handle(fco_handle_t rp)
1182{
1183	struct fc_resource	*resp, *nresp;
1184
1185	ASSERT(rp);
1186
1187	if (rp->next_handle)
1188		fc_ops_free_handle(rp->next_handle);
1189	if (rp->unit_address)
1190		kmem_free(rp->unit_address, UNIT_ADDR_SIZE);
1191
1192	/*
1193	 * Release all the resources from the resource list
1194	 */
1195	for (resp = rp->head; resp != NULL; resp = nresp) {
1196		nresp = resp->next;
1197		switch (resp->type) {
1198
1199		case RT_MAP:
1200			/*
1201			 * If this is still mapped, we'd better unmap it now,
1202			 * or all our structures that are tracking it will
1203			 * be leaked.
1204			 */
1205			if (resp->fc_map_handle != NULL)
1206				opl_unmap_phys(&resp->fc_map_handle);
1207			break;
1208
1209		case RT_DMA:
1210			/*
1211			 * DMA has to be freed up at exit time.
1212			 */
1213			cmn_err(CE_CONT,
1214			    "opl_fc_ops_free_handle: Unexpected DMA seen!");
1215			break;
1216
1217		case RT_CONTIGIOUS:
1218			FC_DEBUG2(1, CE_CONT, "opl_fc_ops_free: "
1219			    "Free claim-memory resource 0x%lx size 0x%x\n",
1220			    resp->fc_contig_virt, resp->fc_contig_len);
1221
1222			(void) ndi_ra_free(ddi_root_node(),
1223			    (uint64_t)resp->fc_contig_virt,
1224			    resp->fc_contig_len, "opl-fcodemem",
1225			    NDI_RA_PASS);
1226
1227			break;
1228
1229		default:
1230			cmn_err(CE_CONT, "opl_fc_ops_free: "
1231			    "unknown resource type %d", resp->type);
1232			break;
1233		}
1234		fc_rem_resource(rp, resp);
1235		kmem_free(resp, sizeof (struct fc_resource));
1236	}
1237
1238	kmem_free(rp, sizeof (struct fc_resource_list));
1239}
1240
1241int
1242opl_fc_do_op(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1243{
1244	opl_fc_ops_t	*op;
1245	char		*service = fc_cell2ptr(cp->svc_name);
1246
1247	ASSERT(rp);
1248
1249	FC_DEBUG1(1, CE_CONT, "opl_fc_do_op: <%s>\n", service);
1250
1251	/*
1252	 * First try the generic fc_ops.
1253	 */
1254	if (fc_ops(ap, rp->next_handle, cp) == 0)
1255		return (0);
1256
1257	/*
1258	 * Now try the Jupiter-specific ops.
1259	 */
1260	for (op = opl_fc_ops; op->fc_service != NULL; ++op)
1261		if (strcmp(op->fc_service, service) == 0)
1262			return (op->fc_op(ap, rp, cp));
1263
1264	FC_DEBUG1(9, CE_CONT, "opl_fc_do_op: <%s> not serviced\n", service);
1265
1266	return (-1);
1267}
1268
1269/*
1270 * map-in  (phys.lo phys.hi size -- virt)
1271 */
1272static int
1273opl_map_in(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1274{
1275	size_t			len;
1276	int			error;
1277	caddr_t			virt;
1278	struct fc_resource	*resp;
1279	struct regspec		rspec;
1280	ddi_device_acc_attr_t	acc;
1281	ddi_acc_handle_t	h;
1282
1283	if (fc_cell2int(cp->nargs) != 3)
1284		return (fc_syntax_error(cp, "nargs must be 3"));
1285
1286	if (fc_cell2int(cp->nresults) < 1)
1287		return (fc_syntax_error(cp, "nresults must be >= 1"));
1288
1289	rspec.regspec_size = len = fc_cell2size(fc_arg(cp, 0));
1290	rspec.regspec_bustype = fc_cell2uint(fc_arg(cp, 1));
1291	rspec.regspec_addr = fc_cell2uint(fc_arg(cp, 2));
1292
1293	acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1294	acc.devacc_attr_endian_flags = DDI_STRUCTURE_BE_ACC;
1295	acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1296
1297	FC_DEBUG3(1, CE_CONT, "opl_map_in: attempting map in "
1298	    "address 0x%08x.%08x length %x\n", rspec.regspec_bustype,
1299	    rspec.regspec_addr, rspec.regspec_size);
1300
1301	error = opl_map_phys(rp->child, &rspec, &virt, &acc, &h);
1302
1303	if (error)  {
1304		FC_DEBUG3(1, CE_CONT, "opl_map_in: map in failed - "
1305		    "address 0x%08x.%08x length %x\n", rspec.regspec_bustype,
1306		    rspec.regspec_addr, rspec.regspec_size);
1307
1308		return (fc_priv_error(cp, "opl map-in failed"));
1309	}
1310
1311	FC_DEBUG1(3, CE_CONT, "opl_map_in: returning virt %p\n", virt);
1312
1313	cp->nresults = fc_int2cell(1);
1314	fc_result(cp, 0) = fc_ptr2cell(virt);
1315
1316	/*
1317	 * Log this resource ...
1318	 */
1319	resp = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
1320	resp->type = RT_MAP;
1321	resp->fc_map_virt = virt;
1322	resp->fc_map_len = len;
1323	resp->fc_map_handle = h;
1324	fc_add_resource(rp, resp);
1325
1326	return (fc_success_op(ap, rp, cp));
1327}
1328
1329/*
1330 * map-out (virt size -- )
1331 */
1332static int
1333opl_map_out(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1334{
1335	caddr_t			virt;
1336	size_t			len;
1337	struct fc_resource	*resp;
1338
1339	if (fc_cell2int(cp->nargs) != 2)
1340		return (fc_syntax_error(cp, "nargs must be 2"));
1341
1342	virt = fc_cell2ptr(fc_arg(cp, 1));
1343
1344	len = fc_cell2size(fc_arg(cp, 0));
1345
1346	FC_DEBUG2(1, CE_CONT, "opl_map_out: attempting map out %p %x\n",
1347	    virt, len);
1348
1349	/*
1350	 * Find if this request matches a mapping resource we set up.
1351	 */
1352	fc_lock_resource_list(rp);
1353	for (resp = rp->head; resp != NULL; resp = resp->next) {
1354		if (resp->type != RT_MAP)
1355			continue;
1356		if (resp->fc_map_virt != virt)
1357			continue;
1358		if (resp->fc_map_len == len)
1359			break;
1360	}
1361	fc_unlock_resource_list(rp);
1362
1363	if (resp == NULL)
1364		return (fc_priv_error(cp, "request doesn't match a "
1365		    "known mapping"));
1366
1367	opl_unmap_phys(&resp->fc_map_handle);
1368
1369	/*
1370	 * remove the resource from the list and release it.
1371	 */
1372	fc_rem_resource(rp, resp);
1373	kmem_free(resp, sizeof (struct fc_resource));
1374
1375	cp->nresults = fc_int2cell(0);
1376	return (fc_success_op(ap, rp, cp));
1377}
1378
1379static int
1380opl_register_fetch(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1381{
1382	size_t			len;
1383	caddr_t			virt;
1384	int			error = 0;
1385	uint64_t		v;
1386	uint64_t		x;
1387	uint32_t		l;
1388	uint16_t		w;
1389	uint8_t			b;
1390	char			*service = fc_cell2ptr(cp->svc_name);
1391	struct fc_resource	*resp;
1392
1393	if (fc_cell2int(cp->nargs) != 1)
1394		return (fc_syntax_error(cp, "nargs must be 1"));
1395
1396	if (fc_cell2int(cp->nresults) < 1)
1397		return (fc_syntax_error(cp, "nresults must be >= 1"));
1398
1399	virt = fc_cell2ptr(fc_arg(cp, 0));
1400
1401	/*
1402	 * Determine the access width .. we can switch on the 2nd
1403	 * character of the name which is "rx@", "rl@", "rb@" or "rw@"
1404	 */
1405	switch (*(service + 1)) {
1406	case 'x':	len = sizeof (x); break;
1407	case 'l':	len = sizeof (l); break;
1408	case 'w':	len = sizeof (w); break;
1409	case 'b':	len = sizeof (b); break;
1410	}
1411
1412	/*
1413	 * Check the alignment ...
1414	 */
1415	if (((intptr_t)virt & (len - 1)) != 0)
1416		return (fc_priv_error(cp, "unaligned access"));
1417
1418	/*
1419	 * Find if this virt is 'within' a request we know about
1420	 */
1421	fc_lock_resource_list(rp);
1422	for (resp = rp->head; resp != NULL; resp = resp->next) {
1423		if (resp->type == RT_MAP) {
1424			if ((virt >= (caddr_t)resp->fc_map_virt) &&
1425			    ((virt + len) <=
1426			    ((caddr_t)resp->fc_map_virt + resp->fc_map_len)))
1427				break;
1428		} else if (resp->type == RT_CONTIGIOUS) {
1429			if ((virt >= (caddr_t)resp->fc_contig_virt) &&
1430			    ((virt + len) <= ((caddr_t)resp->fc_contig_virt +
1431			    resp->fc_contig_len)))
1432				break;
1433		}
1434	}
1435	fc_unlock_resource_list(rp);
1436
1437	if (resp == NULL) {
1438		return (fc_priv_error(cp, "request not within "
1439		    "known mappings"));
1440	}
1441
1442	switch (len) {
1443	case sizeof (x):
1444		if (resp->type == RT_MAP)
1445			error = ddi_peek64(rp->child, (int64_t *)virt,
1446			    (int64_t *)&x);
1447		else /* RT_CONTIGIOUS */
1448			x = *(int64_t *)virt;
1449		v = x;
1450		break;
1451	case sizeof (l):
1452		if (resp->type == RT_MAP)
1453			error = ddi_peek32(rp->child, (int32_t *)virt,
1454			    (int32_t *)&l);
1455		else /* RT_CONTIGIOUS */
1456			l = *(int32_t *)virt;
1457		v = l;
1458		break;
1459	case sizeof (w):
1460		if (resp->type == RT_MAP)
1461			error = ddi_peek16(rp->child, (int16_t *)virt,
1462			    (int16_t *)&w);
1463		else /* RT_CONTIGIOUS */
1464			w = *(int16_t *)virt;
1465		v = w;
1466		break;
1467	case sizeof (b):
1468		if (resp->type == RT_MAP)
1469			error = ddi_peek8(rp->child, (int8_t *)virt,
1470			    (int8_t *)&b);
1471		else /* RT_CONTIGIOUS */
1472			b = *(int8_t *)virt;
1473		v = b;
1474		break;
1475	}
1476
1477	if (error == DDI_FAILURE) {
1478		FC_DEBUG2(1, CE_CONT, "opl_register_fetch: access error "
1479		    "accessing virt %p len %d\n", virt, len);
1480		return (fc_priv_error(cp, "access error"));
1481	}
1482
1483	FC_DEBUG3(1, CE_CONT, "register_fetch (%s) %llx %llx\n",
1484	    service, virt, v);
1485
1486	cp->nresults = fc_int2cell(1);
1487	switch (len) {
1488	case sizeof (x): fc_result(cp, 0) = x; break;
1489	case sizeof (l): fc_result(cp, 0) = fc_uint32_t2cell(l); break;
1490	case sizeof (w): fc_result(cp, 0) = fc_uint16_t2cell(w); break;
1491	case sizeof (b): fc_result(cp, 0) = fc_uint8_t2cell(b); break;
1492	}
1493	return (fc_success_op(ap, rp, cp));
1494}
1495
1496static int
1497opl_register_store(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1498{
1499	size_t			len;
1500	caddr_t			virt;
1501	uint64_t		v;
1502	uint64_t		x;
1503	uint32_t		l;
1504	uint16_t		w;
1505	uint8_t			b;
1506	char			*service = fc_cell2ptr(cp->svc_name);
1507	struct fc_resource	*resp;
1508	int			error = 0;
1509
1510	if (fc_cell2int(cp->nargs) != 2)
1511		return (fc_syntax_error(cp, "nargs must be 2"));
1512
1513	virt = fc_cell2ptr(fc_arg(cp, 0));
1514
1515	/*
1516	 * Determine the access width .. we can switch on the 2nd
1517	 * character of the name which is "rx!", "rl!", "rb!" or "rw!"
1518	 */
1519	switch (*(service + 1)) {
1520	case 'x':
1521		len = sizeof (x);
1522		x = fc_arg(cp, 1);
1523		v = x;
1524		break;
1525	case 'l':
1526		len = sizeof (l);
1527		l = fc_cell2uint32_t(fc_arg(cp, 1));
1528		v = l;
1529		break;
1530	case 'w':
1531		len = sizeof (w);
1532		w = fc_cell2uint16_t(fc_arg(cp, 1));
1533		v = w;
1534		break;
1535	case 'b':
1536		len = sizeof (b);
1537		b = fc_cell2uint8_t(fc_arg(cp, 1));
1538		v = b;
1539		break;
1540	}
1541
1542	FC_DEBUG3(1, CE_CONT, "register_store (%s) %llx %llx\n",
1543	    service, virt, v);
1544
1545	/*
1546	 * Check the alignment ...
1547	 */
1548	if (((intptr_t)virt & (len - 1)) != 0)
1549		return (fc_priv_error(cp, "unaligned access"));
1550
1551	/*
1552	 * Find if this virt is 'within' a request we know about
1553	 */
1554	fc_lock_resource_list(rp);
1555	for (resp = rp->head; resp != NULL; resp = resp->next) {
1556		if (resp->type == RT_MAP) {
1557			if ((virt >= (caddr_t)resp->fc_map_virt) &&
1558			    ((virt + len) <=
1559			    ((caddr_t)resp->fc_map_virt + resp->fc_map_len)))
1560				break;
1561		} else if (resp->type == RT_CONTIGIOUS) {
1562			if ((virt >= (caddr_t)resp->fc_contig_virt) &&
1563			    ((virt + len) <= ((caddr_t)resp->fc_contig_virt +
1564			    resp->fc_contig_len)))
1565				break;
1566		}
1567	}
1568	fc_unlock_resource_list(rp);
1569
1570	if (resp == NULL)
1571		return (fc_priv_error(cp, "request not within"
1572		    "known mappings"));
1573
1574	switch (len) {
1575	case sizeof (x):
1576		if (resp->type == RT_MAP)
1577			error = ddi_poke64(rp->child, (int64_t *)virt, x);
1578		else if (resp->type == RT_CONTIGIOUS)
1579			*(uint64_t *)virt = x;
1580		break;
1581	case sizeof (l):
1582		if (resp->type == RT_MAP)
1583			error = ddi_poke32(rp->child, (int32_t *)virt, l);
1584		else if (resp->type == RT_CONTIGIOUS)
1585			*(uint32_t *)virt = l;
1586		break;
1587	case sizeof (w):
1588		if (resp->type == RT_MAP)
1589			error = ddi_poke16(rp->child, (int16_t *)virt, w);
1590		else if (resp->type == RT_CONTIGIOUS)
1591			*(uint16_t *)virt = w;
1592		break;
1593	case sizeof (b):
1594		if (resp->type == RT_MAP)
1595			error = ddi_poke8(rp->child, (int8_t *)virt, b);
1596		else if (resp->type == RT_CONTIGIOUS)
1597			*(uint8_t *)virt = b;
1598		break;
1599	}
1600
1601	if (error == DDI_FAILURE) {
1602		FC_DEBUG2(1, CE_CONT, "opl_register_store: access error "
1603		    "accessing virt %p len %d\n", virt, len);
1604		return (fc_priv_error(cp, "access error"));
1605	}
1606
1607	cp->nresults = fc_int2cell(0);
1608	return (fc_success_op(ap, rp, cp));
1609}
1610
1611/*
1612 * opl_claim_memory
1613 *
1614 * claim-memory (align size vhint -- vaddr)
1615 */
1616static int
1617opl_claim_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1618{
1619	int			align, size, vhint;
1620	uint64_t		answer, alen;
1621	ndi_ra_request_t	request;
1622	struct fc_resource	*resp;
1623
1624	if (fc_cell2int(cp->nargs) != 3)
1625		return (fc_syntax_error(cp, "nargs must be 3"));
1626
1627	if (fc_cell2int(cp->nresults) < 1)
1628		return (fc_syntax_error(cp, "nresults must be >= 1"));
1629
1630	vhint = fc_cell2int(fc_arg(cp, 2));
1631	size  = fc_cell2int(fc_arg(cp, 1));
1632	align = fc_cell2int(fc_arg(cp, 0));
1633
1634	FC_DEBUG3(1, CE_CONT, "opl_claim_memory: align=0x%x size=0x%x "
1635	    "vhint=0x%x\n", align, size, vhint);
1636
1637	if (size == 0) {
1638		cmn_err(CE_WARN, "opl_claim_memory - unable to allocate "
1639		    "contiguous memory of size zero\n");
1640		return (fc_priv_error(cp, "allocation error"));
1641	}
1642
1643	if (vhint) {
1644		cmn_err(CE_WARN, "opl_claim_memory - vhint is not zero "
1645		    "vhint=0x%x - Ignoring Argument\n", vhint);
1646	}
1647
1648	bzero((caddr_t)&request, sizeof (ndi_ra_request_t));
1649	request.ra_flags	= NDI_RA_ALLOC_BOUNDED;
1650	request.ra_boundbase	= 0;
1651	request.ra_boundlen	= 0xffffffff;
1652	request.ra_len		= size;
1653	request.ra_align_mask	= align - 1;
1654
1655	if (ndi_ra_alloc(ddi_root_node(), &request, &answer, &alen,
1656	    "opl-fcodemem", NDI_RA_PASS) != NDI_SUCCESS) {
1657		cmn_err(CE_WARN, "opl_claim_memory - unable to allocate "
1658		    "contiguous memory\n");
1659		return (fc_priv_error(cp, "allocation error"));
1660	}
1661
1662	FC_DEBUG2(1, CE_CONT, "opl_claim_memory: address allocated=0x%lx "
1663	    "size=0x%x\n", answer, alen);
1664
1665	cp->nresults = fc_int2cell(1);
1666	fc_result(cp, 0) = answer;
1667
1668	/*
1669	 * Log this resource ...
1670	 */
1671	resp = kmem_zalloc(sizeof (struct fc_resource), KM_SLEEP);
1672	resp->type = RT_CONTIGIOUS;
1673	resp->fc_contig_virt = (void *)answer;
1674	resp->fc_contig_len = size;
1675	fc_add_resource(rp, resp);
1676
1677	return (fc_success_op(ap, rp, cp));
1678}
1679
1680/*
1681 * opl_release_memory
1682 *
1683 * release-memory (size vaddr -- )
1684 */
1685static int
1686opl_release_memory(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1687{
1688	int32_t			vaddr, size;
1689	struct fc_resource	*resp;
1690
1691	if (fc_cell2int(cp->nargs) != 2)
1692		return (fc_syntax_error(cp, "nargs must be 2"));
1693
1694	if (fc_cell2int(cp->nresults) != 0)
1695		return (fc_syntax_error(cp, "nresults must be 0"));
1696
1697	vaddr = fc_cell2int(fc_arg(cp, 1));
1698	size  = fc_cell2int(fc_arg(cp, 0));
1699
1700	FC_DEBUG2(1, CE_CONT, "opl_release_memory: vaddr=0x%x size=0x%x\n",
1701	    vaddr, size);
1702
1703	/*
1704	 * Find if this request matches a mapping resource we set up.
1705	 */
1706	fc_lock_resource_list(rp);
1707	for (resp = rp->head; resp != NULL; resp = resp->next) {
1708		if (resp->type != RT_CONTIGIOUS)
1709			continue;
1710		if (resp->fc_contig_virt != (void *)(uintptr_t)vaddr)
1711			continue;
1712		if (resp->fc_contig_len == size)
1713			break;
1714	}
1715	fc_unlock_resource_list(rp);
1716
1717	if (resp == NULL)
1718		return (fc_priv_error(cp, "request doesn't match a "
1719		    "known mapping"));
1720
1721	(void) ndi_ra_free(ddi_root_node(), vaddr, size,
1722	    "opl-fcodemem", NDI_RA_PASS);
1723
1724	/*
1725	 * remove the resource from the list and release it.
1726	 */
1727	fc_rem_resource(rp, resp);
1728	kmem_free(resp, sizeof (struct fc_resource));
1729
1730	cp->nresults = fc_int2cell(0);
1731
1732	return (fc_success_op(ap, rp, cp));
1733}
1734
1735/*
1736 * opl_vtop
1737 *
1738 * vtop (vaddr -- paddr.lo paddr.hi)
1739 */
1740static int
1741opl_vtop(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1742{
1743	int			vaddr;
1744	uint64_t		paddr;
1745	struct fc_resource	*resp;
1746
1747	if (fc_cell2int(cp->nargs) != 1)
1748		return (fc_syntax_error(cp, "nargs must be 1"));
1749
1750	if (fc_cell2int(cp->nresults) >= 3)
1751		return (fc_syntax_error(cp, "nresults must be less than 2"));
1752
1753	vaddr = fc_cell2int(fc_arg(cp, 0));
1754
1755	/*
1756	 * Find if this request matches a mapping resource we set up.
1757	 */
1758	fc_lock_resource_list(rp);
1759	for (resp = rp->head; resp != NULL; resp = resp->next) {
1760		if (resp->type != RT_CONTIGIOUS)
1761			continue;
1762		if (((uint64_t)resp->fc_contig_virt <= vaddr) &&
1763		    (vaddr < (uint64_t)resp->fc_contig_virt +
1764		    resp->fc_contig_len))
1765			break;
1766	}
1767	fc_unlock_resource_list(rp);
1768
1769	if (resp == NULL)
1770		return (fc_priv_error(cp, "request doesn't match a "
1771		    "known mapping"));
1772
1773	paddr = va_to_pa((void *)(uintptr_t)vaddr);
1774
1775	FC_DEBUG2(1, CE_CONT, "opl_vtop: vaddr=0x%x paddr=0x%x\n",
1776	    vaddr, paddr);
1777
1778	cp->nresults = fc_int2cell(2);
1779
1780	fc_result(cp, 0) = paddr;
1781	fc_result(cp, 1) = 0;
1782
1783	return (fc_success_op(ap, rp, cp));
1784}
1785
1786static int
1787opl_config_child(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1788{
1789	fc_phandle_t h;
1790
1791	if (fc_cell2int(cp->nargs) != 0)
1792		return (fc_syntax_error(cp, "nargs must be 0"));
1793
1794	if (fc_cell2int(cp->nresults) < 1)
1795		return (fc_syntax_error(cp, "nresults must be >= 1"));
1796
1797	h = fc_dip_to_phandle(fc_handle_to_phandle_head(rp), rp->child);
1798
1799	cp->nresults = fc_int2cell(1);
1800	fc_result(cp, 0) = fc_phandle2cell(h);
1801
1802	return (fc_success_op(ap, rp, cp));
1803}
1804
1805static int
1806opl_get_fcode(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1807{
1808	caddr_t		dropin_name_virt, fcode_virt;
1809	char		*dropin_name, *fcode;
1810	int		fcode_len, status;
1811
1812	if (fc_cell2int(cp->nargs) != 3)
1813		return (fc_syntax_error(cp, "nargs must be 3"));
1814
1815	if (fc_cell2int(cp->nresults) < 1)
1816		return (fc_syntax_error(cp, "nresults must be >= 1"));
1817
1818	dropin_name_virt = fc_cell2ptr(fc_arg(cp, 0));
1819
1820	fcode_virt = fc_cell2ptr(fc_arg(cp, 1));
1821
1822	fcode_len = fc_cell2int(fc_arg(cp, 2));
1823
1824	dropin_name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1825
1826	FC_DEBUG2(1, CE_CONT, "get_fcode: %x %d\n", fcode_virt, fcode_len);
1827
1828	if (copyinstr(fc_cell2ptr(dropin_name_virt), dropin_name,
1829	    FC_SVC_NAME_LEN - 1, NULL))  {
1830		FC_DEBUG1(1, CE_CONT, "opl_get_fcode: "
1831		    "fault copying in drop in name %p\n", dropin_name_virt);
1832		status = 0;
1833	} else {
1834		FC_DEBUG1(1, CE_CONT, "get_fcode: %s\n", dropin_name);
1835
1836		fcode = kmem_zalloc(fcode_len, KM_SLEEP);
1837
1838		if ((status = prom_get_fcode(dropin_name, fcode)) != 0) {
1839
1840			if (copyout((void *)fcode, (void *)fcode_virt,
1841			    fcode_len)) {
1842				cmn_err(CE_WARN, " opl_get_fcode: Unable "
1843				    "to copy out fcode image");
1844				status = 0;
1845			}
1846		}
1847
1848		kmem_free(fcode, fcode_len);
1849	}
1850
1851	kmem_free(dropin_name, FC_SVC_NAME_LEN);
1852
1853	cp->nresults = fc_int2cell(1);
1854	fc_result(cp, 0) = status;
1855
1856	return (fc_success_op(ap, rp, cp));
1857}
1858
1859static int
1860opl_get_fcode_size(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1861{
1862	caddr_t		virt;
1863	char		*dropin_name;
1864	int		len;
1865
1866	if (fc_cell2int(cp->nargs) != 1)
1867		return (fc_syntax_error(cp, "nargs must be 1"));
1868
1869	if (fc_cell2int(cp->nresults) < 1)
1870		return (fc_syntax_error(cp, "nresults must be >= 1"));
1871
1872	virt = fc_cell2ptr(fc_arg(cp, 0));
1873
1874	dropin_name = kmem_zalloc(FC_SVC_NAME_LEN, KM_SLEEP);
1875
1876	FC_DEBUG0(1, CE_CONT, "opl_get_fcode_size:\n");
1877
1878	if (copyinstr(fc_cell2ptr(virt), dropin_name,
1879	    FC_SVC_NAME_LEN - 1, NULL))  {
1880		FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: "
1881		    "fault copying in drop in name %p\n", virt);
1882		len = 0;
1883	} else {
1884		FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: %s\n", dropin_name);
1885
1886		len = prom_get_fcode_size(dropin_name);
1887	}
1888
1889	kmem_free(dropin_name, FC_SVC_NAME_LEN);
1890
1891	FC_DEBUG1(1, CE_CONT, "opl_get_fcode_size: fcode_len = %d\n", len);
1892
1893	cp->nresults = fc_int2cell(1);
1894	fc_result(cp, 0) = len;
1895
1896	return (fc_success_op(ap, rp, cp));
1897}
1898
1899static int
1900opl_map_phys(dev_info_t *dip, struct regspec *phys_spec,
1901    caddr_t *addrp, ddi_device_acc_attr_t *accattrp,
1902    ddi_acc_handle_t *handlep)
1903{
1904	ddi_map_req_t 	mapreq;
1905	ddi_acc_hdl_t	*acc_handlep;
1906	int		result;
1907	struct regspec	*rspecp;
1908
1909	*handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
1910	acc_handlep = impl_acc_hdl_get(*handlep);
1911	acc_handlep->ah_vers = VERS_ACCHDL;
1912	acc_handlep->ah_dip = dip;
1913	acc_handlep->ah_rnumber = 0;
1914	acc_handlep->ah_offset = 0;
1915	acc_handlep->ah_len = 0;
1916	acc_handlep->ah_acc = *accattrp;
1917	rspecp = kmem_zalloc(sizeof (struct regspec), KM_SLEEP);
1918	*rspecp = *phys_spec;
1919	/*
1920	 * cache a copy of the reg spec
1921	 */
1922	acc_handlep->ah_bus_private = rspecp;
1923
1924	mapreq.map_op = DDI_MO_MAP_LOCKED;
1925	mapreq.map_type = DDI_MT_REGSPEC;
1926	mapreq.map_obj.rp = (struct regspec *)phys_spec;
1927	mapreq.map_prot = PROT_READ | PROT_WRITE;
1928	mapreq.map_flags = DDI_MF_KERNEL_MAPPING;
1929	mapreq.map_handlep = acc_handlep;
1930	mapreq.map_vers = DDI_MAP_VERSION;
1931
1932	result = ddi_map(dip, &mapreq, 0, 0, addrp);
1933
1934	if (result != DDI_SUCCESS) {
1935		impl_acc_hdl_free(*handlep);
1936		kmem_free(rspecp, sizeof (struct regspec));
1937		*handlep = (ddi_acc_handle_t)NULL;
1938	} else {
1939		acc_handlep->ah_addr = *addrp;
1940	}
1941
1942	return (result);
1943}
1944
1945static void
1946opl_unmap_phys(ddi_acc_handle_t *handlep)
1947{
1948	ddi_map_req_t	mapreq;
1949	ddi_acc_hdl_t	*acc_handlep;
1950	struct regspec	*rspecp;
1951
1952	acc_handlep = impl_acc_hdl_get(*handlep);
1953	ASSERT(acc_handlep);
1954	rspecp = acc_handlep->ah_bus_private;
1955
1956	mapreq.map_op = DDI_MO_UNMAP;
1957	mapreq.map_type = DDI_MT_REGSPEC;
1958	mapreq.map_obj.rp = (struct regspec *)rspecp;
1959	mapreq.map_prot = PROT_READ | PROT_WRITE;
1960	mapreq.map_flags = DDI_MF_KERNEL_MAPPING;
1961	mapreq.map_handlep = acc_handlep;
1962	mapreq.map_vers = DDI_MAP_VERSION;
1963
1964	(void) ddi_map(acc_handlep->ah_dip, &mapreq, acc_handlep->ah_offset,
1965	    acc_handlep->ah_len, &acc_handlep->ah_addr);
1966
1967	impl_acc_hdl_free(*handlep);
1968	/*
1969	 * Free the cached copy
1970	 */
1971	kmem_free(rspecp, sizeof (struct regspec));
1972	*handlep = (ddi_acc_handle_t)NULL;
1973}
1974
1975static int
1976opl_get_hwd_va(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
1977{
1978	uint32_t	portid;
1979	void		*hwd_virt;
1980	hwd_header_t	*hwd_h = NULL;
1981	hwd_sb_t	*hwd_sb = NULL;
1982	int		lsb, ch, leaf;
1983	int		status = 1;
1984
1985	/* Check the argument */
1986	if (fc_cell2int(cp->nargs) != 2)
1987		return (fc_syntax_error(cp, "nargs must be 2"));
1988
1989	if (fc_cell2int(cp->nresults) < 1)
1990		return (fc_syntax_error(cp, "nresults must be >= 1"));
1991
1992	/* Get the parameters */
1993	portid = fc_cell2uint32_t(fc_arg(cp, 0));
1994	hwd_virt = (void *)fc_cell2ptr(fc_arg(cp, 1));
1995
1996	/* Get the ID numbers */
1997	lsb  = OPL_IO_PORTID_TO_LSB(portid);
1998	ch   = OPL_PORTID_TO_CHANNEL(portid);
1999	leaf = OPL_PORTID_TO_LEAF(portid);
2000	ASSERT(OPL_IO_PORTID(lsb, ch, leaf) == portid);
2001
2002	/* Set the pointer of hwd. */
2003	if ((hwd_h = (hwd_header_t *)opl_boards[lsb].cfg_hwd) == NULL) {
2004		return (fc_priv_error(cp, "null hwd header"));
2005	}
2006	/* Set the pointer of hwd sb. */
2007	if ((hwd_sb = (hwd_sb_t *)((char *)hwd_h + hwd_h->hdr_sb_info_offset))
2008	    == NULL) {
2009		return (fc_priv_error(cp, "null hwd sb"));
2010	}
2011
2012	if (ch == OPL_CMU_CHANNEL) {
2013		/* Copyout CMU-CH HW Descriptor */
2014		if (copyout((void *)&hwd_sb->sb_cmu.cmu_ch,
2015		    (void *)hwd_virt, sizeof (hwd_cmu_chan_t))) {
2016			cmn_err(CE_WARN, "opl_get_hwd_va: "
2017			"Unable to copy out cmuch descriptor for %x",
2018			    portid);
2019			status = 0;
2020		}
2021	} else {
2022		/* Copyout PCI-CH HW Descriptor */
2023		if (copyout((void *)&hwd_sb->sb_pci_ch[ch].pci_leaf[leaf],
2024		    (void *)hwd_virt, sizeof (hwd_leaf_t))) {
2025			cmn_err(CE_WARN, "opl_get_hwd_va: "
2026			"Unable to copy out pcich descriptor for %x",
2027			    portid);
2028			status = 0;
2029		}
2030	}
2031
2032	cp->nresults = fc_int2cell(1);
2033	fc_result(cp, 0) = status;
2034
2035	return (fc_success_op(ap, rp, cp));
2036}
2037
2038/*
2039 * After Solaris boots, a user can enter OBP using L1A, etc. While in OBP,
2040 * interrupts may be received from PCI devices. These interrupts
2041 * cannot be handled meaningfully since the system is in OBP. These
2042 * interrupts need to be cleared on the CPU side so that the CPU may
2043 * continue with whatever it is doing. Devices that have raised the
2044 * interrupts are expected to reraise the interrupts after sometime
2045 * as they have not been handled. At that time, Solaris will have a
2046 * chance to properly service the interrupts.
2047 *
2048 * The location of the interrupt registers depends on what is present
2049 * at a port. OPL currently supports the Oberon and the CMU channel.
2050 * The following handler handles both kinds of ports and computes
2051 * interrupt register addresses from the specifications and Jupiter Bus
2052 * device bindings.
2053 *
2054 * Fcode drivers install their interrupt handler via a "master-interrupt"
2055 * service. For boot time devices, this takes place within OBP. In the case
2056 * of DR, OPL uses IKP. The Fcode drivers that run within the efcode framework
2057 * attempt to install their handler via the "master-interrupt" service.
2058 * However, we cannot meaningfully install the Fcode driver's handler.
2059 * Instead, we install our own handler in OBP which does the same thing.
2060 *
2061 * Note that the only handling done for interrupts here is to clear it
2062 * on the CPU side. If any device in the future requires more special
2063 * handling, we would have to put in some kind of framework for adding
2064 * device-specific handlers. This is *highly* unlikely, but possible.
2065 *
2066 * Finally, OBP provides a hook called "unix-interrupt-handler" to install
2067 * a Solaris-defined master-interrupt handler for a port. The default
2068 * definition for this method does nothing. Solaris may override this
2069 * with its own definition. This is the way the following handler gets
2070 * control from OBP when interrupts happen at a port after L1A, etc.
2071 */
2072
2073static char define_master_interrupt_handler[] =
2074
2075/*
2076 * This method translates an Oberon port id to the base (physical) address
2077 * of the interrupt clear registers for that port id.
2078 */
2079
2080": pcich-mid>clear-int-pa   ( mid -- pa ) "
2081"   dup 1 >> 7 and          ( mid ch# ) "
2082"   over 4 >> h# 1f and     ( mid ch# lsb# ) "
2083"   1 d# 46 <<              ( mid ch# lsb# pa ) "
2084"   swap d# 40 << or        ( mid ch# pa ) "
2085"   swap d# 37 << or        ( mid pa ) "
2086"   swap 1 and if h# 70.0000 else h# 60.0000 then "
2087"   or h# 1400 or           ( pa ) "
2088"; "
2089
2090/*
2091 * This method translates a CMU channel port id to the base (physical) address
2092 * of the interrupt clear registers for that port id. There are two classes of
2093 * interrupts that need to be handled for a CMU channel:
2094 *	- obio interrupts
2095 *	- pci interrupts
2096 * So, there are two addresses that need to be computed.
2097 */
2098
2099": cmuch-mid>clear-int-pa   ( mid -- obio-pa pci-pa ) "
2100"   dup 1 >> 7 and          ( mid ch# ) "
2101"   over 4 >> h# 1f and     ( mid ch# lsb# ) "
2102"   1 d# 46 <<              ( mid ch# lsb# pa ) "
2103"   swap d# 40 << or        ( mid ch# pa ) "
2104"   swap d# 37 << or        ( mid pa ) "
2105"   nip dup h# 1800 +       ( pa obio-pa ) "
2106"   swap h# 1400 +          ( obio-pa pci-pa ) "
2107"; "
2108
2109/*
2110 * This method checks if a given I/O port ID is valid or not.
2111 * For a given LSB,
2112 *	Oberon ports range from 0 - 3
2113 *	CMU ch ports range from 4 - 4
2114 *
2115 * Also, the Oberon supports leaves 0 and 1.
2116 * The CMU ch supports only one leaf, leaf 0.
2117 */
2118
2119": valid-io-mid? ( mid -- flag ) "
2120"   dup 1 >> 7 and                     ( mid ch# ) "
2121"   dup 4 > if 2drop false exit then   ( mid ch# ) "
2122"   4 = swap 1 and 1 = and not "
2123"; "
2124
2125/*
2126 * This method checks if a given port id is a CMU ch.
2127 */
2128
2129": cmuch? ( mid -- flag ) 1 >> 7 and 4 = ; "
2130
2131/*
2132 * Given the base address of the array of interrupt clear registers for
2133 * a port id, this method iterates over the given interrupt number bitmap
2134 * and resets the interrupt on the CPU side for every interrupt number
2135 * in the bitmap. Note that physical addresses are used to perform the
2136 * writes, not virtual addresses. This allows the handler to work without
2137 * any involvement from Solaris.
2138 */
2139
2140": clear-ints ( pa bitmap count -- ) "
2141"   0 do                            ( pa bitmap ) "
2142"      dup 0= if 2drop unloop exit then "
2143"      tuck                         ( bitmap pa bitmap ) "
2144"      1 and if                     ( bitmap pa ) "
2145"	 dup i 8 * + 0 swap         ( bitmap pa 0 pa' ) "
2146"	 h# 15 spacex!              ( bitmap pa ) "
2147"      then                         ( bitmap pa ) "
2148"      swap 1 >>                    ( pa bitmap ) "
2149"   loop "
2150"; "
2151
2152/*
2153 * This method replaces the master-interrupt handler in OBP. Once
2154 * this method is plumbed into OBP, OBP transfers control to this
2155 * handler while returning to Solaris from OBP after L1A. This method's
2156 * task is to simply reset received interrupts on the CPU side.
2157 * When the devices reassert the interrupts later, Solaris will
2158 * be able to see them and handle them.
2159 *
2160 * For each port ID that has interrupts, this method is called
2161 * once by OBP. The input arguments are:
2162 *	mid	portid
2163 *	bitmap	bitmap of interrupts that have happened
2164 *
2165 * This method returns true, if it is able to handle the interrupts.
2166 * OBP does nothing further.
2167 *
2168 * This method returns false, if it encountered a problem. Currently,
2169 * the only problem could be an invalid port id. OBP needs to do
2170 * its own processing in that case. If this method returns false,
2171 * it preserves the mid and bitmap arguments for OBP.
2172 */
2173
2174": unix-resend-mondos ( mid bitmap -- [ mid bitmap false ] | true ) "
2175
2176/*
2177 * Uncomment the following line if you want to display the input arguments.
2178 * This is meant for debugging.
2179 * "   .\" Bitmap=\" dup u. .\" MID=\" over u. cr "
2180 */
2181
2182/*
2183 * If the port id is not valid (according to the Oberon and CMU ch
2184 * specifications, then return false to OBP to continue further
2185 * processing.
2186 */
2187
2188"   over valid-io-mid? not if       ( mid bitmap ) "
2189"      false exit "
2190"   then "
2191
2192/*
2193 * If the port is a CMU ch, then the 64-bit bitmap represents
2194 * 2 32-bit bitmaps:
2195 *	- obio interrupt bitmap (20 bits)
2196 *	- pci interrupt bitmap (32 bits)
2197 *
2198 * - Split the bitmap into two
2199 * - Compute the base addresses of the interrupt clear registers
2200 *   for both pci interrupts and obio interrupts
2201 * - Clear obio interrupts
2202 * - Clear pci interrupts
2203 */
2204
2205"   over cmuch? if                  ( mid bitmap ) "
2206"      xlsplit                      ( mid pci-bit obio-bit ) "
2207"      rot cmuch-mid>clear-int-pa   ( pci-bit obio-bit obio-pa pci-pa ) "
2208"      >r                           ( pci-bit obio-bit obio-pa ) ( r: pci-pa ) "
2209"      swap d# 20 clear-ints        ( pci-bit ) ( r: pci-pa ) "
2210"      r> swap d# 32 clear-ints     (  ) ( r: ) "
2211
2212/*
2213 * If the port is an Oberon, then the 64-bit bitmap is used fully.
2214 *
2215 * - Compute the base address of the interrupt clear registers
2216 * - Clear interrupts
2217 */
2218
2219"   else                            ( mid bitmap ) "
2220"      swap pcich-mid>clear-int-pa  ( bitmap pa ) "
2221"      swap d# 64 clear-ints        (  ) "
2222"   then "
2223
2224/*
2225 * Always return true from here.
2226 */
2227
2228"   true                            ( true ) "
2229"; "
2230;
2231
2232static char	install_master_interrupt_handler[] =
2233	"' unix-resend-mondos to unix-interrupt-handler";
2234static char	handler[] = "unix-interrupt-handler";
2235static char	handler_defined[] = "p\" %s\" find nip swap l! ";
2236
2237/*ARGSUSED*/
2238static int
2239master_interrupt_init(uint32_t portid, uint32_t xt)
2240{
2241	uint_t	defined;
2242	char	buf[sizeof (handler) + sizeof (handler_defined)];
2243
2244	if (master_interrupt_inited)
2245		return (1);
2246
2247	/*
2248	 * Check if the defer word "unix-interrupt-handler" is defined.
2249	 * This must be defined for OPL systems. So, this is only a
2250	 * sanity check.
2251	 */
2252	(void) sprintf(buf, handler_defined, handler);
2253	prom_interpret(buf, (uintptr_t)&defined, 0, 0, 0, 0);
2254	if (!defined) {
2255		cmn_err(CE_WARN, "master_interrupt_init: "
2256		    "%s is not defined\n", handler);
2257		return (0);
2258	}
2259
2260	/*
2261	 * Install the generic master-interrupt handler. Note that
2262	 * this is only done one time on the first DR operation.
2263	 * This is because, for OPL, one, single generic handler
2264	 * handles all ports (Oberon and CMU channel) and all
2265	 * interrupt sources within each port.
2266	 *
2267	 * The current support is only for the Oberon and CMU-channel.
2268	 * If any others need to be supported, the handler has to be
2269	 * modified accordingly.
2270	 */
2271
2272	/*
2273	 * Define the OPL master interrupt handler
2274	 */
2275	prom_interpret(define_master_interrupt_handler, 0, 0, 0, 0, 0);
2276
2277	/*
2278	 * Take over the master interrupt handler from OBP.
2279	 */
2280	prom_interpret(install_master_interrupt_handler, 0, 0, 0, 0, 0);
2281
2282	master_interrupt_inited = 1;
2283
2284	/*
2285	 * prom_interpret() does not return a status. So, we assume
2286	 * that the calls succeeded. In reality, the calls may fail
2287	 * if there is a syntax error, etc in the strings.
2288	 */
2289
2290	return (1);
2291}
2292
2293/*
2294 * Install the master-interrupt handler for a device.
2295 */
2296static int
2297opl_master_interrupt(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
2298{
2299	uint32_t	portid, xt;
2300	int		board, channel, leaf;
2301	int		status;
2302
2303	/* Check the argument */
2304	if (fc_cell2int(cp->nargs) != 2)
2305		return (fc_syntax_error(cp, "nargs must be 2"));
2306
2307	if (fc_cell2int(cp->nresults) < 1)
2308		return (fc_syntax_error(cp, "nresults must be >= 1"));
2309
2310	/* Get the parameters */
2311	portid = fc_cell2uint32_t(fc_arg(cp, 0));
2312	xt = fc_cell2uint32_t(fc_arg(cp, 1));
2313
2314	board = OPL_IO_PORTID_TO_LSB(portid);
2315	channel = OPL_PORTID_TO_CHANNEL(portid);
2316	leaf = OPL_PORTID_TO_LEAF(portid);
2317
2318	if ((board >= HWD_SBS_PER_DOMAIN) || !OPL_VALID_CHANNEL(channel) ||
2319	    (OPL_OBERON_CHANNEL(channel) && !OPL_VALID_LEAF(leaf)) ||
2320	    ((channel == OPL_CMU_CHANNEL) && (leaf != 0))) {
2321		FC_DEBUG1(1, CE_CONT, "opl_master_interrupt: invalid port %x\n",
2322		    portid);
2323		status = 0;
2324	} else {
2325		status = master_interrupt_init(portid, xt);
2326	}
2327
2328	cp->nresults = fc_int2cell(1);
2329	fc_result(cp, 0) = status;
2330
2331	return (fc_success_op(ap, rp, cp));
2332}
2333
2334/*
2335 * Set the properties for a leaf node (Oberon leaf or CMU channel leaf).
2336 */
2337/*ARGSUSED*/
2338static int
2339opl_create_leaf(dev_info_t *node, void *arg, uint_t flags)
2340{
2341	int ret;
2342
2343	OPL_UPDATE_PROP(string, node, "name", OPL_PCI_LEAF_NODE);
2344
2345	OPL_UPDATE_PROP(string, node, "status", "okay");
2346
2347	return (DDI_WALK_TERMINATE);
2348}
2349
2350static char *
2351opl_get_probe_string(opl_probe_t *probe, int channel, int leaf)
2352{
2353	char 		*probe_string;
2354	int		portid;
2355
2356	probe_string = kmem_zalloc(PROBE_STR_SIZE, KM_SLEEP);
2357
2358	if (channel == OPL_CMU_CHANNEL)
2359		portid = probe->pr_sb->sb_cmu.cmu_ch.chan_portid;
2360	else
2361		portid = probe->
2362		    pr_sb->sb_pci_ch[channel].pci_leaf[leaf].leaf_port_id;
2363
2364	(void) sprintf(probe_string, "%x", portid);
2365
2366	return (probe_string);
2367}
2368
2369static int
2370opl_probe_leaf(opl_probe_t *probe)
2371{
2372	int		channel, leaf, portid, error, circ;
2373	int		board;
2374	fco_handle_t	fco_handle, *cfg_handle;
2375	dev_info_t	*parent, *leaf_node;
2376	char		unit_address[UNIT_ADDR_SIZE];
2377	char		*probe_string;
2378	opl_board_cfg_t	*board_cfg;
2379
2380	board = probe->pr_board;
2381	channel = probe->pr_channel;
2382	leaf = probe->pr_leaf;
2383	parent = ddi_root_node();
2384	board_cfg = &opl_boards[board];
2385
2386	ASSERT(OPL_VALID_CHANNEL(channel));
2387	ASSERT(OPL_VALID_LEAF(leaf));
2388
2389	if (channel == OPL_CMU_CHANNEL) {
2390		portid = probe->pr_sb->sb_cmu.cmu_ch.chan_portid;
2391		cfg_handle = &board_cfg->cfg_cmuch_handle;
2392	} else {
2393		portid = probe->
2394		    pr_sb->sb_pci_ch[channel].pci_leaf[leaf].leaf_port_id;
2395		cfg_handle = &board_cfg->cfg_pcich_handle[channel][leaf];
2396	}
2397
2398	/*
2399	 * Prevent any changes to leaf_node until we have bound
2400	 * it to the correct driver.
2401	 */
2402	ndi_devi_enter(parent, &circ);
2403
2404	/*
2405	 * Ideally, fcode would be run from the "sid_branch_create"
2406	 * callback (that is the primary purpose of that callback).
2407	 * However, the fcode interpreter was written with the
2408	 * assumption that the "new_child" was linked into the
2409	 * device tree. The callback is invoked with the devinfo node
2410	 * in the DS_PROTO state. More investigation is needed before
2411	 * we can invoke the interpreter from the callback. For now,
2412	 * we create the "new_child" in the BOUND state, invoke the
2413	 * fcode interpreter and then rebind the dip to use any
2414	 * compatible properties created by fcode.
2415	 */
2416
2417	probe->pr_parent = parent;
2418	probe->pr_create = opl_create_leaf;
2419	probe->pr_hold = 1;
2420
2421	leaf_node = opl_create_node(probe);
2422	if (leaf_node == NULL) {
2423
2424		cmn_err(CE_WARN, "IKP: create leaf (%d-%d-%d) failed",
2425		    probe->pr_board, probe->pr_channel, probe->pr_leaf);
2426		ndi_devi_exit(parent, circ);
2427		return (-1);
2428	}
2429
2430	/*
2431	 * The platform DR interfaces created the dip in
2432	 * bound state. Bring devinfo node down to linked
2433	 * state and hold it there until compatible
2434	 * properties are created.
2435	 */
2436	e_ddi_branch_rele(leaf_node);
2437	(void) i_ndi_unconfig_node(leaf_node, DS_LINKED, 0);
2438	ASSERT(i_ddi_node_state(leaf_node) == DS_LINKED);
2439	e_ddi_branch_hold(leaf_node);
2440
2441	mutex_enter(&DEVI(leaf_node)->devi_lock);
2442	DEVI(leaf_node)->devi_flags |= DEVI_NO_BIND;
2443	mutex_exit(&DEVI(leaf_node)->devi_lock);
2444
2445	/*
2446	 * Drop the busy-hold on parent before calling
2447	 * fcode_interpreter to prevent potential deadlocks
2448	 */
2449	ndi_devi_exit(parent, circ);
2450
2451	(void) sprintf(unit_address, "%x", portid);
2452
2453	/*
2454	 * Get the probe string
2455	 */
2456	probe_string = opl_get_probe_string(probe, channel, leaf);
2457
2458	/*
2459	 * The fcode pointer specified here is NULL and the fcode
2460	 * size specified here is 0. This causes the user-level
2461	 * fcode interpreter to issue a request to the fcode
2462	 * driver to get the Oberon/cmu-ch fcode.
2463	 */
2464	fco_handle = opl_fc_ops_alloc_handle(parent, leaf_node,
2465	    NULL, 0, unit_address, probe_string);
2466
2467	error = fcode_interpreter(parent, &opl_fc_do_op, fco_handle);
2468
2469	if (error != 0) {
2470		cmn_err(CE_WARN, "IKP: Unable to probe PCI leaf (%d-%d-%d)",
2471		    probe->pr_board, probe->pr_channel, probe->pr_leaf);
2472
2473		opl_fc_ops_free_handle(fco_handle);
2474
2475		if (probe_string != NULL)
2476			kmem_free(probe_string, PROBE_STR_SIZE);
2477
2478		(void) opl_destroy_node(leaf_node);
2479	} else {
2480		*cfg_handle = fco_handle;
2481
2482		if (channel == OPL_CMU_CHANNEL)
2483			board_cfg->cfg_cmuch_probe_str = probe_string;
2484		else
2485			board_cfg->cfg_pcich_probe_str[channel][leaf]
2486			    = probe_string;
2487
2488		/*
2489		 * Compatible properties (if any) have been created,
2490		 * so bind driver.
2491		 */
2492		ndi_devi_enter(parent, &circ);
2493		ASSERT(i_ddi_node_state(leaf_node) <= DS_LINKED);
2494
2495		mutex_enter(&DEVI(leaf_node)->devi_lock);
2496		DEVI(leaf_node)->devi_flags &= ~DEVI_NO_BIND;
2497		mutex_exit(&DEVI(leaf_node)->devi_lock);
2498
2499		ndi_devi_exit(parent, circ);
2500
2501		if (ndi_devi_bind_driver(leaf_node, 0) != DDI_SUCCESS) {
2502			cmn_err(CE_WARN, "IKP: Unable to bind PCI leaf "
2503			    "(%d-%d-%d)", probe->pr_board, probe->pr_channel,
2504			    probe->pr_leaf);
2505		}
2506	}
2507
2508	if ((error != 0) && (channel == OPL_CMU_CHANNEL))
2509		return (-1);
2510
2511	return (0);
2512}
2513
2514static void
2515opl_init_leaves(int myboard)
2516{
2517	dev_info_t	*parent, *node;
2518	char		*name;
2519	int 		circ, ret;
2520	int		len, portid, board, channel, leaf;
2521	opl_board_cfg_t	*cfg;
2522
2523	parent = ddi_root_node();
2524
2525	/*
2526	 * Hold parent node busy to walk its child list
2527	 */
2528	ndi_devi_enter(parent, &circ);
2529
2530	for (node = ddi_get_child(parent); (node != NULL); node =
2531	    ddi_get_next_sibling(node)) {
2532
2533		ret = OPL_GET_PROP(string, node, "name", &name, &len);
2534		if (ret != DDI_PROP_SUCCESS) {
2535			/*
2536			 * The property does not exist for this node.
2537			 */
2538			continue;
2539		}
2540
2541		if (strncmp(name, OPL_PCI_LEAF_NODE, len) == 0) {
2542
2543			ret = OPL_GET_PROP(int, node, "portid", &portid, -1);
2544			if (ret == DDI_PROP_SUCCESS) {
2545
2546				ret = OPL_GET_PROP(int, node, "board#",
2547				    &board, -1);
2548				if ((ret != DDI_PROP_SUCCESS) ||
2549				    (board != myboard)) {
2550					kmem_free(name, len);
2551					continue;
2552				}
2553
2554				cfg = &opl_boards[board];
2555				channel = OPL_PORTID_TO_CHANNEL(portid);
2556				if (channel == OPL_CMU_CHANNEL) {
2557
2558					if (cfg->cfg_cmuch_handle != NULL)
2559						cfg->cfg_cmuch_leaf = node;
2560
2561				} else {
2562
2563					leaf = OPL_PORTID_TO_LEAF(portid);
2564					if (cfg->cfg_pcich_handle[
2565					    channel][leaf] != NULL)
2566						cfg->cfg_pcich_leaf[
2567						    channel][leaf] = node;
2568				}
2569			}
2570		}
2571
2572		kmem_free(name, len);
2573		if (ret != DDI_PROP_SUCCESS)
2574			break;
2575	}
2576
2577	ndi_devi_exit(parent, circ);
2578}
2579
2580/*
2581 * Create "pci" node and hierarchy for the Oberon channels and the
2582 * CMU channel.
2583 */
2584/*ARGSUSED*/
2585static int
2586opl_probe_io(opl_probe_t *probe)
2587{
2588
2589	int		i, j;
2590	hwd_pci_ch_t	*channels;
2591
2592	if (HWD_STATUS_OK(probe->pr_sb->sb_cmu.cmu_ch.chan_status)) {
2593
2594		probe->pr_channel = HWD_CMU_CHANNEL;
2595		probe->pr_channel_status =
2596		    probe->pr_sb->sb_cmu.cmu_ch.chan_status;
2597		probe->pr_leaf = 0;
2598		probe->pr_leaf_status = probe->pr_channel_status;
2599
2600		if (opl_probe_leaf(probe) != 0)
2601			return (-1);
2602	}
2603
2604	channels = &probe->pr_sb->sb_pci_ch[0];
2605
2606	for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) {
2607
2608		if (!HWD_STATUS_OK(channels[i].pci_status))
2609			continue;
2610
2611		probe->pr_channel = i;
2612		probe->pr_channel_status = channels[i].pci_status;
2613
2614		for (j = 0; j < HWD_LEAVES_PER_PCI_CHANNEL; j++) {
2615
2616			probe->pr_leaf = j;
2617			probe->pr_leaf_status =
2618			    channels[i].pci_leaf[j].leaf_status;
2619
2620			if (!HWD_STATUS_OK(probe->pr_leaf_status))
2621				continue;
2622
2623			(void) opl_probe_leaf(probe);
2624		}
2625	}
2626	opl_init_leaves(probe->pr_board);
2627	return (0);
2628}
2629
2630/*
2631 * Perform the probe in the following order:
2632 *
2633 *	processors
2634 *	memory
2635 *	IO
2636 *
2637 * Each probe function returns 0 on sucess and a non-zero value on failure.
2638 * What is a failure is determined by the implementor of the probe function.
2639 * For example, while probing CPUs, any error encountered during probe
2640 * is considered a failure and causes the whole probe operation to fail.
2641 * However, for I/O, an error encountered while probing one device
2642 * should not prevent other devices from being probed. It should not cause
2643 * the whole probe operation to fail.
2644 */
2645int
2646opl_probe_sb(int board, unsigned *cpu_impl)
2647{
2648	opl_probe_t	*probe;
2649	int		ret;
2650
2651	if ((board < 0) || (board >= HWD_SBS_PER_DOMAIN))
2652		return (-1);
2653
2654	ASSERT(opl_cfg_inited != 0);
2655
2656	/*
2657	 * If the previous probe failed and left a partially configured
2658	 * board, we need to unprobe the board and start with a clean slate.
2659	 */
2660	if ((opl_boards[board].cfg_hwd != NULL) &&
2661	    (opl_unprobe_sb(board) != 0))
2662		return (-1);
2663
2664	ret = 0;
2665
2666	probe = kmem_zalloc(sizeof (opl_probe_t), KM_SLEEP);
2667	probe->pr_board = board;
2668
2669	if ((opl_probe_init(probe) != 0) ||
2670
2671	    (opl_probe_cpu_chips(probe) != 0) ||
2672
2673	    (opl_probe_memory(probe) != 0) ||
2674
2675	    (opl_probe_io(probe) != 0)) {
2676
2677		/*
2678		 * Probe failed. Perform cleanup.
2679		 */
2680		(void) opl_unprobe_sb(board);
2681		ret = -1;
2682	}
2683
2684	*cpu_impl = probe->pr_cpu_impl;
2685
2686	kmem_free(probe, sizeof (opl_probe_t));
2687
2688	return (ret);
2689}
2690
2691/*
2692 * This unprobing also includes CMU-CH.
2693 */
2694/*ARGSUSED*/
2695static int
2696opl_unprobe_io(int board)
2697{
2698	int		i, j, ret;
2699	opl_board_cfg_t	*board_cfg;
2700	dev_info_t	**node;
2701	fco_handle_t	*hand;
2702	char		**probe_str;
2703
2704	board_cfg = &opl_boards[board];
2705
2706	for (i = 0; i < HWD_PCI_CHANNELS_PER_SB; i++) {
2707
2708		for (j = 0; j < HWD_LEAVES_PER_PCI_CHANNEL; j++) {
2709
2710			node = &board_cfg->cfg_pcich_leaf[i][j];
2711			hand = &board_cfg->cfg_pcich_handle[i][j];
2712			probe_str = &board_cfg->cfg_pcich_probe_str[i][j];
2713
2714			if (*node == NULL)
2715				continue;
2716
2717			if (*hand != NULL) {
2718				opl_fc_ops_free_handle(*hand);
2719				*hand = NULL;
2720			}
2721
2722			if (*probe_str != NULL) {
2723				kmem_free(*probe_str, PROBE_STR_SIZE);
2724				*probe_str = NULL;
2725			}
2726
2727			ret = opl_destroy_node(*node);
2728			if (ret != 0) {
2729
2730				cmn_err(CE_WARN, "IKP: destroy pci (%d-%d-%d) "
2731				    "failed", board, i, j);
2732				return (-1);
2733			}
2734
2735			*node = NULL;
2736
2737		}
2738	}
2739
2740	node = &board_cfg->cfg_cmuch_leaf;
2741	hand = &board_cfg->cfg_cmuch_handle;
2742	probe_str = &board_cfg->cfg_cmuch_probe_str;
2743
2744	if (*node == NULL)
2745		return (0);
2746
2747	if (*hand != NULL) {
2748		opl_fc_ops_free_handle(*hand);
2749		*hand = NULL;
2750	}
2751
2752	if (*probe_str != NULL) {
2753		kmem_free(*probe_str, PROBE_STR_SIZE);
2754		*probe_str = NULL;
2755	}
2756
2757	if (opl_destroy_node(*node) != 0) {
2758
2759		cmn_err(CE_WARN, "IKP: destroy pci (%d-%d-%d) failed", board,
2760		    OPL_CMU_CHANNEL, 0);
2761		return (-1);
2762	}
2763
2764	*node = NULL;
2765
2766	return (0);
2767}
2768
2769/*
2770 * Destroy the "pseudo-mc" node for a board.
2771 */
2772static int
2773opl_unprobe_memory(int board)
2774{
2775	opl_board_cfg_t	*board_cfg;
2776
2777	board_cfg = &opl_boards[board];
2778
2779	if (board_cfg->cfg_pseudo_mc == NULL)
2780		return (0);
2781
2782	if (opl_destroy_node(board_cfg->cfg_pseudo_mc) != 0) {
2783
2784		cmn_err(CE_WARN, "IKP: destroy pseudo-mc (%d) failed", board);
2785		return (-1);
2786	}
2787
2788	board_cfg->cfg_pseudo_mc = NULL;
2789
2790	return (0);
2791}
2792
2793/*
2794 * Destroy the "cmp" nodes for a board. This also destroys the "core"
2795 * and "cpu" nodes below the "cmp" nodes.
2796 */
2797static int
2798opl_unprobe_processors(int board)
2799{
2800	int		i;
2801	dev_info_t	**cfg_cpu_chips;
2802
2803	cfg_cpu_chips = opl_boards[board].cfg_cpu_chips;
2804
2805	for (i = 0; i < HWD_CPU_CHIPS_PER_CMU; i++) {
2806
2807		if (cfg_cpu_chips[i] == NULL)
2808			continue;
2809
2810		if (opl_destroy_node(cfg_cpu_chips[i]) != 0) {
2811
2812			cmn_err(CE_WARN, "IKP: destroy chip (%d-%d) failed",
2813			    board, i);
2814			return (-1);
2815		}
2816
2817		cfg_cpu_chips[i] = NULL;
2818	}
2819
2820	return (0);
2821}
2822
2823/*
2824 * Perform the unprobe in the following order:
2825 *
2826 *	IO
2827 *	memory
2828 *	processors
2829 */
2830int
2831opl_unprobe_sb(int board)
2832{
2833	if ((board < 0) || (board >= HWD_SBS_PER_DOMAIN))
2834		return (-1);
2835
2836	ASSERT(opl_cfg_inited != 0);
2837
2838	if ((opl_unprobe_io(board) != 0) ||
2839
2840	    (opl_unprobe_memory(board) != 0) ||
2841
2842	    (opl_unprobe_processors(board) != 0))
2843
2844		return (-1);
2845
2846	if (opl_boards[board].cfg_hwd != NULL) {
2847#ifdef UCTEST
2848		size_t			size = 0xA000;
2849#endif
2850		/* Release the memory for the HWD */
2851		void *hwdp = opl_boards[board].cfg_hwd;
2852		opl_boards[board].cfg_hwd = NULL;
2853#ifdef UCTEST
2854		hwdp = (void *)((char *)hwdp - 0x1000);
2855		hat_unload(kas.a_hat, hwdp, size, HAT_UNLOAD_UNLOCK);
2856		vmem_free(heap_arena, hwdp, size);
2857#else
2858		kmem_free(hwdp, HWD_DATA_SIZE);
2859#endif
2860	}
2861	return (0);
2862}
2863
2864/*
2865 * For MAC patrol support, we need to update the PA-related properties
2866 * when there is a copy-rename event.  This should be called after the
2867 * physical copy and rename has been done by DR, and before the MAC
2868 * patrol is restarted.
2869 */
2870int
2871oplcfg_pa_swap(int from, int to)
2872{
2873	dev_info_t *from_node = opl_boards[from].cfg_pseudo_mc;
2874	dev_info_t *to_node = opl_boards[to].cfg_pseudo_mc;
2875	opl_range_t *rangef, *ranget;
2876	int elems;
2877	int ret;
2878
2879	if ((OPL_GET_PROP_ARRAY(int, from_node, "sb-mem-ranges", rangef,
2880	    elems) != DDI_SUCCESS) || (elems != 4)) {
2881		/* XXX -- bad news */
2882		return (-1);
2883	}
2884	if ((OPL_GET_PROP_ARRAY(int, to_node, "sb-mem-ranges", ranget,
2885	    elems) != DDI_SUCCESS) || (elems != 4)) {
2886		/* XXX -- bad news */
2887		return (-1);
2888	}
2889	OPL_UPDATE_PROP_ARRAY(int, from_node, "sb-mem-ranges", (int *)ranget,
2890	    4);
2891	OPL_UPDATE_PROP_ARRAY(int, to_node, "sb-mem-ranges", (int *)rangef,
2892	    4);
2893
2894	OPL_FREE_PROP(ranget);
2895	OPL_FREE_PROP(rangef);
2896
2897	return (0);
2898}
2899