1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  */
22*7c478bd9Sstevel@tonic-gate /*
23*7c478bd9Sstevel@tonic-gate  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24*7c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
25*7c478bd9Sstevel@tonic-gate  */
26*7c478bd9Sstevel@tonic-gate 
27*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*7c478bd9Sstevel@tonic-gate 
29*7c478bd9Sstevel@tonic-gate /*
30*7c478bd9Sstevel@tonic-gate  * This module provides for the management of interconnect adapters
31*7c478bd9Sstevel@tonic-gate  * inter-node connections (aka paths), and IPC.  Adapter descriptors are
32*7c478bd9Sstevel@tonic-gate  * maintained on a linked list; one list per adapter devname.  Each
33*7c478bd9Sstevel@tonic-gate  * adapter descriptor heads a linked list of path descriptors.  There is
34*7c478bd9Sstevel@tonic-gate  * also a linked list of ipc_info descriptors; one for each node.  Each
35*7c478bd9Sstevel@tonic-gate  * ipc_info descriptor heads a circular list of ipc tokens (the tokens are
36*7c478bd9Sstevel@tonic-gate  * embedded within a path descriptor). The tokens are used in round robin
37*7c478bd9Sstevel@tonic-gate  * fashion.
38*7c478bd9Sstevel@tonic-gate  *
39*7c478bd9Sstevel@tonic-gate  *
40*7c478bd9Sstevel@tonic-gate  * The exported interface consists of the following functions:
41*7c478bd9Sstevel@tonic-gate  *	- rsmka_add_adapter
42*7c478bd9Sstevel@tonic-gate  *	- rsmka_remove_adapter
43*7c478bd9Sstevel@tonic-gate  *
44*7c478bd9Sstevel@tonic-gate  *      [add_path and remove_path only called for current adapters]
45*7c478bd9Sstevel@tonic-gate  *	- rsmka_add_path
46*7c478bd9Sstevel@tonic-gate  *	- rsmka_remove_path	[a path down request is implicit]
47*7c478bd9Sstevel@tonic-gate  *
48*7c478bd9Sstevel@tonic-gate  *	- rsmka_path_up           [called at clock ipl for Sun Cluster]
49*7c478bd9Sstevel@tonic-gate  *	- rsmka_path_down         [called at clock ipl for Sun Cluster]
50*7c478bd9Sstevel@tonic-gate  *	- rsmka_disconnect_node   [called at clock ipl for Sun Cluster;
51*7c478bd9Sstevel@tonic-gate  *				treat like path-down for all node paths;
52*7c478bd9Sstevel@tonic-gate  *				can be before node_alive; always before
53*7c478bd9Sstevel@tonic-gate  *				node_died.]
54*7c478bd9Sstevel@tonic-gate  *
55*7c478bd9Sstevel@tonic-gate  *	[node_alive and node_died are always paired]
56*7c478bd9Sstevel@tonic-gate  *	- rsmka_node_alive   called after the first cluster path is up
57*7c478bd9Sstevel@tonic-gate  *                           for this node
58*7c478bd9Sstevel@tonic-gate  *	- rsmka_node_died
59*7c478bd9Sstevel@tonic-gate  *
60*7c478bd9Sstevel@tonic-gate  *      [set the local node id]
61*7c478bd9Sstevel@tonic-gate  *      - rsmka_set_my_nodeid    called to set the variable my_nodeid to the
62*7c478bd9Sstevel@tonic-gate  *                           local node id
63*7c478bd9Sstevel@tonic-gate  *
64*7c478bd9Sstevel@tonic-gate  * Processing for these functions is setup as a state machine supported
65*7c478bd9Sstevel@tonic-gate  * by the data structures described above.
66*7c478bd9Sstevel@tonic-gate  *
67*7c478bd9Sstevel@tonic-gate  * For Sun Cluster these are called from the Path-Manager/Kernel-Agent
68*7c478bd9Sstevel@tonic-gate  * Interface (rsmka_pm_interface.cc).
69*7c478bd9Sstevel@tonic-gate  *
70*7c478bd9Sstevel@tonic-gate  * The functions rsm_path_up, rsm_path_down, and rsm_disconnect_node are
71*7c478bd9Sstevel@tonic-gate  * called at clock interrupt level from the Path-Manager/Kernel-Agent
72*7c478bd9Sstevel@tonic-gate  * Interface which precludes sleeping; so these functions may (optionally)
73*7c478bd9Sstevel@tonic-gate  * defer processing to an independent thread running at normal ipl.
74*7c478bd9Sstevel@tonic-gate  *
75*7c478bd9Sstevel@tonic-gate  *
76*7c478bd9Sstevel@tonic-gate  * lock definitions:
77*7c478bd9Sstevel@tonic-gate  *
78*7c478bd9Sstevel@tonic-gate  *	(mutex) work_queue.work_mutex
79*7c478bd9Sstevel@tonic-gate  *			protects linked list of work tokens and used
80*7c478bd9Sstevel@tonic-gate  *			with cv_wait/cv_signal thread synchronization.
81*7c478bd9Sstevel@tonic-gate  *			No other locks acquired when held.
82*7c478bd9Sstevel@tonic-gate  *
83*7c478bd9Sstevel@tonic-gate  *	(mutex) adapter_listhead_base.listlock
84*7c478bd9Sstevel@tonic-gate  *			protects linked list of adapter listheads
85*7c478bd9Sstevel@tonic-gate  *			Always acquired before listhead->mutex
86*7c478bd9Sstevel@tonic-gate  *
87*7c478bd9Sstevel@tonic-gate  *
88*7c478bd9Sstevel@tonic-gate  *	(mutex) ipc_info_lock
89*7c478bd9Sstevel@tonic-gate  *			protects ipc_info list and sendq token lists
90*7c478bd9Sstevel@tonic-gate  *			Always acquired before listhead->mutex
91*7c478bd9Sstevel@tonic-gate  *
92*7c478bd9Sstevel@tonic-gate  *      (mutex) listhead->mutex
93*7c478bd9Sstevel@tonic-gate  *			protects adapter listhead, linked list of
94*7c478bd9Sstevel@tonic-gate  *			adapters, and linked list of paths.
95*7c478bd9Sstevel@tonic-gate  *
96*7c478bd9Sstevel@tonic-gate  *      (mutex) path->mutex
97*7c478bd9Sstevel@tonic-gate  *			protects the path descriptor.
98*7c478bd9Sstevel@tonic-gate  *			work_queue.work_mutex may be acquired when holding
99*7c478bd9Sstevel@tonic-gate  *			this lock.
100*7c478bd9Sstevel@tonic-gate  *
101*7c478bd9Sstevel@tonic-gate  *	(mutex) adapter->mutex
102*7c478bd9Sstevel@tonic-gate  *			protects adapter descriptor contents.  used
103*7c478bd9Sstevel@tonic-gate  *			mainly for ref_cnt update.
104*7c478bd9Sstevel@tonic-gate  */
105*7c478bd9Sstevel@tonic-gate 
106*7c478bd9Sstevel@tonic-gate #include <sys/param.h>
107*7c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
108*7c478bd9Sstevel@tonic-gate #include <sys/errno.h>
109*7c478bd9Sstevel@tonic-gate #include <sys/time.h>
110*7c478bd9Sstevel@tonic-gate #include <sys/devops.h>
111*7c478bd9Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
112*7c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
113*7c478bd9Sstevel@tonic-gate #include <sys/ddi.h>
114*7c478bd9Sstevel@tonic-gate #include <sys/sunddi.h>
115*7c478bd9Sstevel@tonic-gate #include <sys/proc.h>
116*7c478bd9Sstevel@tonic-gate #include <sys/thread.h>
117*7c478bd9Sstevel@tonic-gate #include <sys/taskq.h>
118*7c478bd9Sstevel@tonic-gate #include <sys/callb.h>
119*7c478bd9Sstevel@tonic-gate 
120*7c478bd9Sstevel@tonic-gate #include <sys/rsm/rsm.h>
121*7c478bd9Sstevel@tonic-gate #include <rsm_in.h>
122*7c478bd9Sstevel@tonic-gate #include <sys/rsm/rsmka_path_int.h>
123*7c478bd9Sstevel@tonic-gate 
124*7c478bd9Sstevel@tonic-gate extern void _cplpl_init();
125*7c478bd9Sstevel@tonic-gate extern void _cplpl_fini();
126*7c478bd9Sstevel@tonic-gate extern pri_t maxclsyspri;
127*7c478bd9Sstevel@tonic-gate extern int   rsm_hash_size;
128*7c478bd9Sstevel@tonic-gate 
129*7c478bd9Sstevel@tonic-gate extern rsm_node_id_t my_nodeid;
130*7c478bd9Sstevel@tonic-gate extern rsmhash_table_t rsm_import_segs;
131*7c478bd9Sstevel@tonic-gate extern rsm_intr_hand_ret_t rsm_srv_func();
132*7c478bd9Sstevel@tonic-gate extern void rsmseg_unload(rsmseg_t *);
133*7c478bd9Sstevel@tonic-gate extern void rsm_suspend_complete(rsm_node_id_t src_node, int flag);
134*7c478bd9Sstevel@tonic-gate extern int rsmipc_send_controlmsg(path_t *path, int msgtype);
135*7c478bd9Sstevel@tonic-gate extern void rsmka_path_monitor_initialize();
136*7c478bd9Sstevel@tonic-gate extern void rsmka_path_monitor_terminate();
137*7c478bd9Sstevel@tonic-gate 
138*7c478bd9Sstevel@tonic-gate extern adapter_t loopback_adapter;
139*7c478bd9Sstevel@tonic-gate /*
140*7c478bd9Sstevel@tonic-gate  * Lint errors and warnings are displayed; informational messages
141*7c478bd9Sstevel@tonic-gate  * are suppressed.
142*7c478bd9Sstevel@tonic-gate  */
143*7c478bd9Sstevel@tonic-gate /* lint -w2 */
144*7c478bd9Sstevel@tonic-gate 
145*7c478bd9Sstevel@tonic-gate 
146*7c478bd9Sstevel@tonic-gate /*
147*7c478bd9Sstevel@tonic-gate  * macros SQ_TOKEN_TO_PATH and WORK_TOKEN_TO_PATH use a null pointer
148*7c478bd9Sstevel@tonic-gate  * for computational purposes.  Ignore the lint warning.
149*7c478bd9Sstevel@tonic-gate  */
150*7c478bd9Sstevel@tonic-gate /* lint -save -e413 */
151*7c478bd9Sstevel@tonic-gate /* FUNCTION PROTOTYPES */
152*7c478bd9Sstevel@tonic-gate static adapter_t *init_adapter(char *, int, rsm_addr_t,
153*7c478bd9Sstevel@tonic-gate     rsm_controller_handle_t, rsm_ops_t *, srv_handler_arg_t *);
154*7c478bd9Sstevel@tonic-gate adapter_t *rsmka_lookup_adapter(char *, int);
155*7c478bd9Sstevel@tonic-gate static ipc_info_t *lookup_ipc_info(rsm_node_id_t);
156*7c478bd9Sstevel@tonic-gate static ipc_info_t *init_ipc_info(rsm_node_id_t, boolean_t);
157*7c478bd9Sstevel@tonic-gate static path_t *lookup_path(char *, int, rsm_node_id_t, rsm_addr_t);
158*7c478bd9Sstevel@tonic-gate static void pathup_to_pathactive(ipc_info_t *, rsm_node_id_t);
159*7c478bd9Sstevel@tonic-gate static void path_importer_disconnect(path_t *);
160*7c478bd9Sstevel@tonic-gate boolean_t rsmka_do_path_active(path_t *, int);
161*7c478bd9Sstevel@tonic-gate static boolean_t do_path_up(path_t *, int);
162*7c478bd9Sstevel@tonic-gate static void do_path_down(path_t *, int);
163*7c478bd9Sstevel@tonic-gate static void enqueue_work(work_token_t *);
164*7c478bd9Sstevel@tonic-gate static boolean_t cancel_work(work_token_t *);
165*7c478bd9Sstevel@tonic-gate static void link_path(path_t *);
166*7c478bd9Sstevel@tonic-gate static void destroy_path(path_t *);
167*7c478bd9Sstevel@tonic-gate static void link_sendq_token(sendq_token_t *, rsm_node_id_t);
168*7c478bd9Sstevel@tonic-gate static void unlink_sendq_token(sendq_token_t *, rsm_node_id_t);
169*7c478bd9Sstevel@tonic-gate boolean_t rsmka_check_node_alive(rsm_node_id_t);
170*7c478bd9Sstevel@tonic-gate static void do_deferred_work(caddr_t);
171*7c478bd9Sstevel@tonic-gate static int create_ipc_sendq(path_t *);
172*7c478bd9Sstevel@tonic-gate static void destroy_ipc_info(ipc_info_t *);
173*7c478bd9Sstevel@tonic-gate void rsmka_pathmanager_cleanup();
174*7c478bd9Sstevel@tonic-gate void rsmka_release_adapter(adapter_t *);
175*7c478bd9Sstevel@tonic-gate 
176*7c478bd9Sstevel@tonic-gate kt_did_t rsm_thread_id;
177*7c478bd9Sstevel@tonic-gate int rsmka_terminate_workthread_loop = 0;
178*7c478bd9Sstevel@tonic-gate 
179*7c478bd9Sstevel@tonic-gate static struct adapter_listhead_list adapter_listhead_base;
180*7c478bd9Sstevel@tonic-gate static work_queue_t work_queue;
181*7c478bd9Sstevel@tonic-gate 
182*7c478bd9Sstevel@tonic-gate /* protect ipc_info descriptor manipulation */
183*7c478bd9Sstevel@tonic-gate static kmutex_t ipc_info_lock;
184*7c478bd9Sstevel@tonic-gate 
185*7c478bd9Sstevel@tonic-gate static ipc_info_t *ipc_info_head = NULL;
186*7c478bd9Sstevel@tonic-gate 
187*7c478bd9Sstevel@tonic-gate static int category = RSM_PATH_MANAGER | RSM_KERNEL_AGENT;
188*7c478bd9Sstevel@tonic-gate 
189*7c478bd9Sstevel@tonic-gate /* for synchronization with rsmipc_send() in rsm.c */
190*7c478bd9Sstevel@tonic-gate kmutex_t ipc_info_cvlock;
191*7c478bd9Sstevel@tonic-gate kcondvar_t ipc_info_cv;
192*7c478bd9Sstevel@tonic-gate 
193*7c478bd9Sstevel@tonic-gate 
194*7c478bd9Sstevel@tonic-gate 
195*7c478bd9Sstevel@tonic-gate /*
196*7c478bd9Sstevel@tonic-gate  * RSMKA PATHMANAGER INITIALIZATION AND CLEANUP ROUTINES
197*7c478bd9Sstevel@tonic-gate  *
198*7c478bd9Sstevel@tonic-gate  */
199*7c478bd9Sstevel@tonic-gate 
200*7c478bd9Sstevel@tonic-gate 
201*7c478bd9Sstevel@tonic-gate /*
202*7c478bd9Sstevel@tonic-gate  * Called from the rsm module (rsm.c)  _init() routine
203*7c478bd9Sstevel@tonic-gate  */
204*7c478bd9Sstevel@tonic-gate void
205*7c478bd9Sstevel@tonic-gate rsmka_pathmanager_init()
206*7c478bd9Sstevel@tonic-gate {
207*7c478bd9Sstevel@tonic-gate 	kthread_t *tp;
208*7c478bd9Sstevel@tonic-gate 
209*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
210*7c478bd9Sstevel@tonic-gate 	    "rsmka_pathmanager_init enter\n"));
211*7c478bd9Sstevel@tonic-gate 
212*7c478bd9Sstevel@tonic-gate 	/* initialization for locks and condition variables  */
213*7c478bd9Sstevel@tonic-gate 	mutex_init(&work_queue.work_mutex, NULL, MUTEX_DEFAULT, NULL);
214*7c478bd9Sstevel@tonic-gate 	mutex_init(&ipc_info_lock, NULL, MUTEX_DEFAULT, NULL);
215*7c478bd9Sstevel@tonic-gate 	mutex_init(&ipc_info_cvlock, NULL, MUTEX_DEFAULT, NULL);
216*7c478bd9Sstevel@tonic-gate 	mutex_init(&adapter_listhead_base.listlock, NULL,
217*7c478bd9Sstevel@tonic-gate 	    MUTEX_DEFAULT, NULL);
218*7c478bd9Sstevel@tonic-gate 
219*7c478bd9Sstevel@tonic-gate 	cv_init(&work_queue.work_cv, NULL, CV_DEFAULT, NULL);
220*7c478bd9Sstevel@tonic-gate 	cv_init(&ipc_info_cv, NULL, CV_DEFAULT, NULL);
221*7c478bd9Sstevel@tonic-gate 
222*7c478bd9Sstevel@tonic-gate 	tp = thread_create(NULL, 0, do_deferred_work, NULL, 0, &p0,
223*7c478bd9Sstevel@tonic-gate 	    TS_RUN, maxclsyspri);
224*7c478bd9Sstevel@tonic-gate 	rsm_thread_id = tp->t_did;
225*7c478bd9Sstevel@tonic-gate 
226*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
227*7c478bd9Sstevel@tonic-gate 	    "rsmka_pathmanager_init done\n"));
228*7c478bd9Sstevel@tonic-gate }
229*7c478bd9Sstevel@tonic-gate 
230*7c478bd9Sstevel@tonic-gate void
231*7c478bd9Sstevel@tonic-gate rsmka_pathmanager_cleanup()
232*7c478bd9Sstevel@tonic-gate {
233*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
234*7c478bd9Sstevel@tonic-gate 	    "rsmka_pathmanager_cleanup enter\n"));
235*7c478bd9Sstevel@tonic-gate 
236*7c478bd9Sstevel@tonic-gate 	ASSERT(work_queue.head == NULL);
237*7c478bd9Sstevel@tonic-gate 
238*7c478bd9Sstevel@tonic-gate 	/*
239*7c478bd9Sstevel@tonic-gate 	 * In processing the remove path callbacks from the path monitor
240*7c478bd9Sstevel@tonic-gate 	 * object, all deferred work will have been completed. So
241*7c478bd9Sstevel@tonic-gate 	 * awaken the deferred work thread to give it a chance to exit
242*7c478bd9Sstevel@tonic-gate 	 * the loop.
243*7c478bd9Sstevel@tonic-gate 	 */
244*7c478bd9Sstevel@tonic-gate 	mutex_enter(&work_queue.work_mutex);
245*7c478bd9Sstevel@tonic-gate 	rsmka_terminate_workthread_loop++;
246*7c478bd9Sstevel@tonic-gate 	cv_signal(&work_queue.work_cv);
247*7c478bd9Sstevel@tonic-gate 	mutex_exit(&work_queue.work_mutex);
248*7c478bd9Sstevel@tonic-gate 
249*7c478bd9Sstevel@tonic-gate 	/*
250*7c478bd9Sstevel@tonic-gate 	 * Wait for the deferred work thread to exit before
251*7c478bd9Sstevel@tonic-gate 	 * destroying the locks and cleaning up other data
252*7c478bd9Sstevel@tonic-gate 	 * structures.
253*7c478bd9Sstevel@tonic-gate 	 */
254*7c478bd9Sstevel@tonic-gate 	if (rsm_thread_id)
255*7c478bd9Sstevel@tonic-gate 		thread_join(rsm_thread_id);
256*7c478bd9Sstevel@tonic-gate 
257*7c478bd9Sstevel@tonic-gate 	/*
258*7c478bd9Sstevel@tonic-gate 	 * Destroy locks & condition variables
259*7c478bd9Sstevel@tonic-gate 	 */
260*7c478bd9Sstevel@tonic-gate 	mutex_destroy(&work_queue.work_mutex);
261*7c478bd9Sstevel@tonic-gate 	cv_destroy(&work_queue.work_cv);
262*7c478bd9Sstevel@tonic-gate 
263*7c478bd9Sstevel@tonic-gate 	mutex_enter(&ipc_info_lock);
264*7c478bd9Sstevel@tonic-gate 	while (ipc_info_head)
265*7c478bd9Sstevel@tonic-gate 		destroy_ipc_info(ipc_info_head);
266*7c478bd9Sstevel@tonic-gate 	mutex_exit(&ipc_info_lock);
267*7c478bd9Sstevel@tonic-gate 
268*7c478bd9Sstevel@tonic-gate 	mutex_destroy(&ipc_info_lock);
269*7c478bd9Sstevel@tonic-gate 
270*7c478bd9Sstevel@tonic-gate 	mutex_destroy(&ipc_info_cvlock);
271*7c478bd9Sstevel@tonic-gate 	cv_destroy(&ipc_info_cv);
272*7c478bd9Sstevel@tonic-gate 
273*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
274*7c478bd9Sstevel@tonic-gate 	    "rsmka_pathmanager_cleanup done\n"));
275*7c478bd9Sstevel@tonic-gate 
276*7c478bd9Sstevel@tonic-gate }
277*7c478bd9Sstevel@tonic-gate 
278*7c478bd9Sstevel@tonic-gate void
279*7c478bd9Sstevel@tonic-gate rsmka_set_my_nodeid(rsm_node_id_t local_nodeid)
280*7c478bd9Sstevel@tonic-gate {
281*7c478bd9Sstevel@tonic-gate 	my_nodeid = local_nodeid;
282*7c478bd9Sstevel@tonic-gate 
283*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
284*7c478bd9Sstevel@tonic-gate 	    "rsm: node %d \n", my_nodeid));
285*7c478bd9Sstevel@tonic-gate 
286*7c478bd9Sstevel@tonic-gate }
287*7c478bd9Sstevel@tonic-gate 
288*7c478bd9Sstevel@tonic-gate /*
289*7c478bd9Sstevel@tonic-gate  * DEFERRED WORK THREAD AND WORK QUEUE SUPPORT ROUTINES
290*7c478bd9Sstevel@tonic-gate  *
291*7c478bd9Sstevel@tonic-gate  */
292*7c478bd9Sstevel@tonic-gate 
293*7c478bd9Sstevel@tonic-gate /*
294*7c478bd9Sstevel@tonic-gate  * This function is the executable code of the thread which handles
295*7c478bd9Sstevel@tonic-gate  * deferred work.  Work is deferred when a function is called at
296*7c478bd9Sstevel@tonic-gate  * clock ipl and processing may require blocking.
297*7c478bd9Sstevel@tonic-gate  *
298*7c478bd9Sstevel@tonic-gate  *
299*7c478bd9Sstevel@tonic-gate  * The thread is created by a call to taskq_create in rsmka_pathmanager_init.
300*7c478bd9Sstevel@tonic-gate  * After creation, a call to taskq_dispatch causes this function to
301*7c478bd9Sstevel@tonic-gate  * execute.  It loops forever - blocked until work is enqueued from
302*7c478bd9Sstevel@tonic-gate  * rsmka_do_path_active, do_path_down, or rsmka_disconnect_node.
303*7c478bd9Sstevel@tonic-gate  * rsmka_pathmanager_cleanup (called from _fini) will
304*7c478bd9Sstevel@tonic-gate  * set rsmka_terminate_workthread_loop and the task processing will
305*7c478bd9Sstevel@tonic-gate  * terminate.
306*7c478bd9Sstevel@tonic-gate  */
307*7c478bd9Sstevel@tonic-gate static void
308*7c478bd9Sstevel@tonic-gate do_deferred_work(caddr_t arg /*ARGSUSED*/)
309*7c478bd9Sstevel@tonic-gate {
310*7c478bd9Sstevel@tonic-gate 
311*7c478bd9Sstevel@tonic-gate 	adapter_t 			*adapter;
312*7c478bd9Sstevel@tonic-gate 	path_t				*path;
313*7c478bd9Sstevel@tonic-gate 	work_token_t			*work_token;
314*7c478bd9Sstevel@tonic-gate 	int				work_opcode;
315*7c478bd9Sstevel@tonic-gate 	rsm_send_q_handle_t		sendq_handle;
316*7c478bd9Sstevel@tonic-gate 	int				error;
317*7c478bd9Sstevel@tonic-gate 	timespec_t			tv;
318*7c478bd9Sstevel@tonic-gate 	callb_cpr_t			cprinfo;
319*7c478bd9Sstevel@tonic-gate 
320*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "do_deferred_work enter\n"));
321*7c478bd9Sstevel@tonic-gate 
322*7c478bd9Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, &work_queue.work_mutex, callb_generic_cpr,
323*7c478bd9Sstevel@tonic-gate 	    "rsm_deferred_work");
324*7c478bd9Sstevel@tonic-gate 
325*7c478bd9Sstevel@tonic-gate 	for (;;) {
326*7c478bd9Sstevel@tonic-gate 		mutex_enter(&work_queue.work_mutex);
327*7c478bd9Sstevel@tonic-gate 
328*7c478bd9Sstevel@tonic-gate 		if (rsmka_terminate_workthread_loop) {
329*7c478bd9Sstevel@tonic-gate 			goto exit;
330*7c478bd9Sstevel@tonic-gate 		}
331*7c478bd9Sstevel@tonic-gate 
332*7c478bd9Sstevel@tonic-gate 		/* When there is no work to do, block here */
333*7c478bd9Sstevel@tonic-gate 		while (work_queue.head == NULL) {
334*7c478bd9Sstevel@tonic-gate 			/* Since no work to do, Safe to CPR */
335*7c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
336*7c478bd9Sstevel@tonic-gate 			cv_wait(&work_queue.work_cv, &work_queue.work_mutex);
337*7c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_END(&cprinfo, &work_queue.work_mutex);
338*7c478bd9Sstevel@tonic-gate 
339*7c478bd9Sstevel@tonic-gate 			if (rsmka_terminate_workthread_loop) {
340*7c478bd9Sstevel@tonic-gate 				goto exit;
341*7c478bd9Sstevel@tonic-gate 			}
342*7c478bd9Sstevel@tonic-gate 		}
343*7c478bd9Sstevel@tonic-gate 
344*7c478bd9Sstevel@tonic-gate 		/*
345*7c478bd9Sstevel@tonic-gate 		 * Remove a work token and begin work
346*7c478bd9Sstevel@tonic-gate 		 */
347*7c478bd9Sstevel@tonic-gate 		work_token = work_queue.head;
348*7c478bd9Sstevel@tonic-gate 		work_queue.head = work_token->next;
349*7c478bd9Sstevel@tonic-gate 		if (work_queue.tail == work_token)
350*7c478bd9Sstevel@tonic-gate 			work_queue.tail = NULL;
351*7c478bd9Sstevel@tonic-gate 
352*7c478bd9Sstevel@tonic-gate 		work_opcode = work_token->opcode;
353*7c478bd9Sstevel@tonic-gate 		path = WORK_TOKEN_TO_PATH(work_token, work_opcode -1);
354*7c478bd9Sstevel@tonic-gate 		work_token->next = NULL;
355*7c478bd9Sstevel@tonic-gate 		mutex_exit(&work_queue.work_mutex);
356*7c478bd9Sstevel@tonic-gate 
357*7c478bd9Sstevel@tonic-gate 
358*7c478bd9Sstevel@tonic-gate 		switch (work_opcode) {
359*7c478bd9Sstevel@tonic-gate 		case RSMKA_IPC_UP:
360*7c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG,
361*7c478bd9Sstevel@tonic-gate 			    "do_deferred_work:up,  path = %lx\n", path));
362*7c478bd9Sstevel@tonic-gate 			error = create_ipc_sendq(path);
363*7c478bd9Sstevel@tonic-gate 			mutex_enter(&path->mutex);
364*7c478bd9Sstevel@tonic-gate 			if (path->state != RSMKA_PATH_UP) {
365*7c478bd9Sstevel@tonic-gate 				/*
366*7c478bd9Sstevel@tonic-gate 				 * path state has changed, if sendq was created,
367*7c478bd9Sstevel@tonic-gate 				 * destroy it and return. Don't need to worry
368*7c478bd9Sstevel@tonic-gate 				 * about sendq ref_cnt since no one starts
369*7c478bd9Sstevel@tonic-gate 				 * using the sendq till path state becomes
370*7c478bd9Sstevel@tonic-gate 				 * active
371*7c478bd9Sstevel@tonic-gate 				 */
372*7c478bd9Sstevel@tonic-gate 				if (error == RSM_SUCCESS) {
373*7c478bd9Sstevel@tonic-gate 					sendq_handle = path->sendq_token.
374*7c478bd9Sstevel@tonic-gate 					    rsmpi_sendq_handle;
375*7c478bd9Sstevel@tonic-gate 					path->sendq_token.rsmpi_sendq_handle =
376*7c478bd9Sstevel@tonic-gate 					    NULL;
377*7c478bd9Sstevel@tonic-gate 					adapter = path->local_adapter;
378*7c478bd9Sstevel@tonic-gate 					mutex_exit(&path->mutex);
379*7c478bd9Sstevel@tonic-gate 
380*7c478bd9Sstevel@tonic-gate 					if (sendq_handle != NULL) {
381*7c478bd9Sstevel@tonic-gate 						adapter->rsmpi_ops->
382*7c478bd9Sstevel@tonic-gate 						    rsm_sendq_destroy(
383*7c478bd9Sstevel@tonic-gate 							    sendq_handle);
384*7c478bd9Sstevel@tonic-gate 					}
385*7c478bd9Sstevel@tonic-gate 					mutex_enter(&path->mutex);
386*7c478bd9Sstevel@tonic-gate 				}
387*7c478bd9Sstevel@tonic-gate 				/* free up work token */
388*7c478bd9Sstevel@tonic-gate 				work_token->opcode = 0;
389*7c478bd9Sstevel@tonic-gate 
390*7c478bd9Sstevel@tonic-gate 				/*
391*7c478bd9Sstevel@tonic-gate 				 * decrement reference count for the path
392*7c478bd9Sstevel@tonic-gate 				 * descriptor and signal for synchronization
393*7c478bd9Sstevel@tonic-gate 				 * with rsmka_remove_path. PATH_HOLD_NOLOCK was
394*7c478bd9Sstevel@tonic-gate 				 * done by rsmka_path_up.
395*7c478bd9Sstevel@tonic-gate 				 */
396*7c478bd9Sstevel@tonic-gate 				PATH_RELE_NOLOCK(path);
397*7c478bd9Sstevel@tonic-gate 				mutex_exit(&path->mutex);
398*7c478bd9Sstevel@tonic-gate 				break;
399*7c478bd9Sstevel@tonic-gate 			}
400*7c478bd9Sstevel@tonic-gate 
401*7c478bd9Sstevel@tonic-gate 			if (error == RSM_SUCCESS) {
402*7c478bd9Sstevel@tonic-gate 				DBG_PRINTF((category, RSM_DEBUG,
403*7c478bd9Sstevel@tonic-gate 				    "do_deferred_work:success on up\n"));
404*7c478bd9Sstevel@tonic-gate 				/* clear flag since sendq_create succeeded */
405*7c478bd9Sstevel@tonic-gate 				path->flags &= ~RSMKA_SQCREATE_PENDING;
406*7c478bd9Sstevel@tonic-gate 				path->state = RSMKA_PATH_ACTIVE;
407*7c478bd9Sstevel@tonic-gate 
408*7c478bd9Sstevel@tonic-gate 				/*
409*7c478bd9Sstevel@tonic-gate 				 * now that path is active we send the
410*7c478bd9Sstevel@tonic-gate 				 * RSMIPC_MSG_SQREADY to the remote endpoint
411*7c478bd9Sstevel@tonic-gate 				 */
412*7c478bd9Sstevel@tonic-gate 				path->procmsg_cnt = 0;
413*7c478bd9Sstevel@tonic-gate 				path->sendq_token.msgbuf_avail = 0;
414*7c478bd9Sstevel@tonic-gate 
415*7c478bd9Sstevel@tonic-gate 				/* Calculate local incarnation number */
416*7c478bd9Sstevel@tonic-gate 				gethrestime(&tv);
417*7c478bd9Sstevel@tonic-gate 				if (tv.tv_sec == RSM_UNKNOWN_INCN)
418*7c478bd9Sstevel@tonic-gate 					tv.tv_sec = 1;
419*7c478bd9Sstevel@tonic-gate 				path->local_incn = (int64_t)tv.tv_sec;
420*7c478bd9Sstevel@tonic-gate 
421*7c478bd9Sstevel@tonic-gate 				/*
422*7c478bd9Sstevel@tonic-gate 				 * if send fails here its due to some
423*7c478bd9Sstevel@tonic-gate 				 * non-transient error because QUEUE_FULL is
424*7c478bd9Sstevel@tonic-gate 				 * not possible here since we are the first
425*7c478bd9Sstevel@tonic-gate 				 * message on this sendq. The error will cause
426*7c478bd9Sstevel@tonic-gate 				 * the path to go down anyways, so ignore
427*7c478bd9Sstevel@tonic-gate 				 * the return value.
428*7c478bd9Sstevel@tonic-gate 				 */
429*7c478bd9Sstevel@tonic-gate 				(void) rsmipc_send_controlmsg(path,
430*7c478bd9Sstevel@tonic-gate 				    RSMIPC_MSG_SQREADY);
431*7c478bd9Sstevel@tonic-gate 				/* wait for SQREADY_ACK message */
432*7c478bd9Sstevel@tonic-gate 				path->flags |= RSMKA_WAIT_FOR_SQACK;
433*7c478bd9Sstevel@tonic-gate 			} else {
434*7c478bd9Sstevel@tonic-gate 				/*
435*7c478bd9Sstevel@tonic-gate 				 * sendq create failed possibly because
436*7c478bd9Sstevel@tonic-gate 				 * the remote end is not yet ready eg.
437*7c478bd9Sstevel@tonic-gate 				 * handler not registered, set a flag
438*7c478bd9Sstevel@tonic-gate 				 * so that when there is an indication
439*7c478bd9Sstevel@tonic-gate 				 * that the remote end is ready
440*7c478bd9Sstevel@tonic-gate 				 * rsmka_do_path_active will be retried.
441*7c478bd9Sstevel@tonic-gate 				 */
442*7c478bd9Sstevel@tonic-gate 				path->flags |= RSMKA_SQCREATE_PENDING;
443*7c478bd9Sstevel@tonic-gate 			}
444*7c478bd9Sstevel@tonic-gate 
445*7c478bd9Sstevel@tonic-gate 			/* free up work token */
446*7c478bd9Sstevel@tonic-gate 			work_token->opcode = 0;
447*7c478bd9Sstevel@tonic-gate 
448*7c478bd9Sstevel@tonic-gate 			/*
449*7c478bd9Sstevel@tonic-gate 			 * decrement reference count for the path
450*7c478bd9Sstevel@tonic-gate 			 * descriptor and signal for synchronization with
451*7c478bd9Sstevel@tonic-gate 			 * rsmka_remove_path. PATH_HOLD_NOLOCK was done
452*7c478bd9Sstevel@tonic-gate 			 * by rsmka_path_up.
453*7c478bd9Sstevel@tonic-gate 			 */
454*7c478bd9Sstevel@tonic-gate 			PATH_RELE_NOLOCK(path);
455*7c478bd9Sstevel@tonic-gate 			mutex_exit(&path->mutex);
456*7c478bd9Sstevel@tonic-gate 
457*7c478bd9Sstevel@tonic-gate 			break;
458*7c478bd9Sstevel@tonic-gate 		case RSMKA_IPC_DOWN:
459*7c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG,
460*7c478bd9Sstevel@tonic-gate 			    "do_deferred_work:down, path = %lx\n", path));
461*7c478bd9Sstevel@tonic-gate 
462*7c478bd9Sstevel@tonic-gate 			/*
463*7c478bd9Sstevel@tonic-gate 			 * Unlike the processing of path_down in the case
464*7c478bd9Sstevel@tonic-gate 			 * where the RSMKA_NO_SLEEP flag is not set, here,
465*7c478bd9Sstevel@tonic-gate 			 * the state of the path is changed directly to
466*7c478bd9Sstevel@tonic-gate 			 * RSMKA_PATH_DOWN. This is because in this case
467*7c478bd9Sstevel@tonic-gate 			 * where the RSMKA_NO_SLEEP flag is set, any other
468*7c478bd9Sstevel@tonic-gate 			 * calls referring this path will just queue up
469*7c478bd9Sstevel@tonic-gate 			 * and will be processed only after the path
470*7c478bd9Sstevel@tonic-gate 			 * down processing has completed.
471*7c478bd9Sstevel@tonic-gate 			 */
472*7c478bd9Sstevel@tonic-gate 			mutex_enter(&path->mutex);
473*7c478bd9Sstevel@tonic-gate 			path->state = RSMKA_PATH_DOWN;
474*7c478bd9Sstevel@tonic-gate 			/*
475*7c478bd9Sstevel@tonic-gate 			 * clear the WAIT_FOR_SQACK flag since path is down.
476*7c478bd9Sstevel@tonic-gate 			 */
477*7c478bd9Sstevel@tonic-gate 			path->flags &= ~RSMKA_WAIT_FOR_SQACK;
478*7c478bd9Sstevel@tonic-gate 
479*7c478bd9Sstevel@tonic-gate 			/*
480*7c478bd9Sstevel@tonic-gate 			 * this wakes up any thread waiting to receive credits
481*7c478bd9Sstevel@tonic-gate 			 * in rsmipc_send to tell it that the path is down
482*7c478bd9Sstevel@tonic-gate 			 * thus releasing the sendq.
483*7c478bd9Sstevel@tonic-gate 			 */
484*7c478bd9Sstevel@tonic-gate 			cv_broadcast(&path->sendq_token.sendq_cv);
485*7c478bd9Sstevel@tonic-gate 
486*7c478bd9Sstevel@tonic-gate 			mutex_exit(&path->mutex);
487*7c478bd9Sstevel@tonic-gate 
488*7c478bd9Sstevel@tonic-gate 			/* drain the messages from the receive msgbuf */
489*7c478bd9Sstevel@tonic-gate 			taskq_wait(path->recv_taskq);
490*7c478bd9Sstevel@tonic-gate 
491*7c478bd9Sstevel@tonic-gate 			/*
492*7c478bd9Sstevel@tonic-gate 			 * The path_importer_disconnect function has to
493*7c478bd9Sstevel@tonic-gate 			 * be called after releasing the mutex on the path
494*7c478bd9Sstevel@tonic-gate 			 * in order to avoid any recursive mutex enter panics
495*7c478bd9Sstevel@tonic-gate 			 */
496*7c478bd9Sstevel@tonic-gate 			path_importer_disconnect(path);
497*7c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG,
498*7c478bd9Sstevel@tonic-gate 			    "do_deferred_work: success on down\n"));
499*7c478bd9Sstevel@tonic-gate 			/*
500*7c478bd9Sstevel@tonic-gate 			 * decrement reference count for the path
501*7c478bd9Sstevel@tonic-gate 			 * descriptor and signal for synchronization with
502*7c478bd9Sstevel@tonic-gate 			 * rsmka_remove_path. PATH_HOLD_NOLOCK was done
503*7c478bd9Sstevel@tonic-gate 			 * by rsmka_path_down.
504*7c478bd9Sstevel@tonic-gate 			 */
505*7c478bd9Sstevel@tonic-gate 			mutex_enter(&path->mutex);
506*7c478bd9Sstevel@tonic-gate 
507*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
508*7c478bd9Sstevel@tonic-gate 			/*
509*7c478bd9Sstevel@tonic-gate 			 * Some IPC messages left in the recv_buf,
510*7c478bd9Sstevel@tonic-gate 			 * they'll be dropped
511*7c478bd9Sstevel@tonic-gate 			 */
512*7c478bd9Sstevel@tonic-gate 			if (path->msgbuf_cnt != 0)
513*7c478bd9Sstevel@tonic-gate 				cmn_err(CE_NOTE,
514*7c478bd9Sstevel@tonic-gate 				    "path=%lx msgbuf_cnt != 0\n",
515*7c478bd9Sstevel@tonic-gate 				    (uintptr_t)path);
516*7c478bd9Sstevel@tonic-gate #endif
517*7c478bd9Sstevel@tonic-gate 
518*7c478bd9Sstevel@tonic-gate 			/*
519*7c478bd9Sstevel@tonic-gate 			 * Don't want to destroy a send queue when a token
520*7c478bd9Sstevel@tonic-gate 			 * has been acquired; so wait 'til the token is
521*7c478bd9Sstevel@tonic-gate 			 * no longer referenced (with a cv_wait).
522*7c478bd9Sstevel@tonic-gate 			 */
523*7c478bd9Sstevel@tonic-gate 			while (path->sendq_token.ref_cnt != 0)
524*7c478bd9Sstevel@tonic-gate 				cv_wait(&path->sendq_token.sendq_cv,
525*7c478bd9Sstevel@tonic-gate 				    &path->mutex);
526*7c478bd9Sstevel@tonic-gate 
527*7c478bd9Sstevel@tonic-gate 			sendq_handle = path->sendq_token.rsmpi_sendq_handle;
528*7c478bd9Sstevel@tonic-gate 			path->sendq_token.rsmpi_sendq_handle = NULL;
529*7c478bd9Sstevel@tonic-gate 
530*7c478bd9Sstevel@tonic-gate 			/* destroy the send queue and release the handle */
531*7c478bd9Sstevel@tonic-gate 			if (sendq_handle != NULL) {
532*7c478bd9Sstevel@tonic-gate 				adapter = path->local_adapter;
533*7c478bd9Sstevel@tonic-gate 				adapter->rsmpi_ops->rsm_sendq_destroy(
534*7c478bd9Sstevel@tonic-gate 				    sendq_handle);
535*7c478bd9Sstevel@tonic-gate 			}
536*7c478bd9Sstevel@tonic-gate 
537*7c478bd9Sstevel@tonic-gate 			work_token->opcode = 0;
538*7c478bd9Sstevel@tonic-gate 			PATH_RELE_NOLOCK(path);
539*7c478bd9Sstevel@tonic-gate 			mutex_exit(&path->mutex);
540*7c478bd9Sstevel@tonic-gate 			break;
541*7c478bd9Sstevel@tonic-gate 		default:
542*7c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG,
543*7c478bd9Sstevel@tonic-gate 			    "do_deferred_work: bad work token opcode\n"));
544*7c478bd9Sstevel@tonic-gate 			break;
545*7c478bd9Sstevel@tonic-gate 		}
546*7c478bd9Sstevel@tonic-gate 	}
547*7c478bd9Sstevel@tonic-gate 
548*7c478bd9Sstevel@tonic-gate exit:
549*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "do_deferred_work done\n"));
550*7c478bd9Sstevel@tonic-gate 	/*
551*7c478bd9Sstevel@tonic-gate 	 * CALLB_CPR_EXIT does a mutex_exit for
552*7c478bd9Sstevel@tonic-gate 	 * the work_queue.work_mutex
553*7c478bd9Sstevel@tonic-gate 	 */
554*7c478bd9Sstevel@tonic-gate 	CALLB_CPR_EXIT(&cprinfo);
555*7c478bd9Sstevel@tonic-gate }
556*7c478bd9Sstevel@tonic-gate 
557*7c478bd9Sstevel@tonic-gate /*
558*7c478bd9Sstevel@tonic-gate  * Work is inserted at the tail of the list and processed from the
559*7c478bd9Sstevel@tonic-gate  * head of the list.
560*7c478bd9Sstevel@tonic-gate  */
561*7c478bd9Sstevel@tonic-gate static void
562*7c478bd9Sstevel@tonic-gate enqueue_work(work_token_t *token)
563*7c478bd9Sstevel@tonic-gate {
564*7c478bd9Sstevel@tonic-gate 	work_token_t	*tail_token;
565*7c478bd9Sstevel@tonic-gate 
566*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "enqueue_work enter\n"));
567*7c478bd9Sstevel@tonic-gate 
568*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&work_queue.work_mutex));
569*7c478bd9Sstevel@tonic-gate 
570*7c478bd9Sstevel@tonic-gate 	token->next = NULL;
571*7c478bd9Sstevel@tonic-gate 	if (work_queue.head == NULL) {
572*7c478bd9Sstevel@tonic-gate 		work_queue.head = work_queue.tail = token;
573*7c478bd9Sstevel@tonic-gate 	} else {
574*7c478bd9Sstevel@tonic-gate 		tail_token = work_queue.tail;
575*7c478bd9Sstevel@tonic-gate 		work_queue.tail = tail_token->next = token;
576*7c478bd9Sstevel@tonic-gate 	}
577*7c478bd9Sstevel@tonic-gate 
578*7c478bd9Sstevel@tonic-gate 	/* wake up deferred work thread */
579*7c478bd9Sstevel@tonic-gate 	cv_signal(&work_queue.work_cv);
580*7c478bd9Sstevel@tonic-gate 
581*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "enqueue_work done\n"));
582*7c478bd9Sstevel@tonic-gate }
583*7c478bd9Sstevel@tonic-gate 
584*7c478bd9Sstevel@tonic-gate 
585*7c478bd9Sstevel@tonic-gate /*
586*7c478bd9Sstevel@tonic-gate  * If the work_token is found on the work queue, the work is cancelled
587*7c478bd9Sstevel@tonic-gate  * by removing the token from the work queue.
588*7c478bd9Sstevel@tonic-gate  *
589*7c478bd9Sstevel@tonic-gate  * Return true if a work_token was found and cancelled, otherwise return false
590*7c478bd9Sstevel@tonic-gate  *
591*7c478bd9Sstevel@tonic-gate  * enqueue_work increments the path refcnt to make sure that the path doesn't
592*7c478bd9Sstevel@tonic-gate  * go away, callers of cancel_work need to decrement the refcnt of the path to
593*7c478bd9Sstevel@tonic-gate  * which this work_token belongs if a work_token is found in the work_queue
594*7c478bd9Sstevel@tonic-gate  * and cancelled ie. when the return value is B_TRUE.
595*7c478bd9Sstevel@tonic-gate  */
596*7c478bd9Sstevel@tonic-gate static boolean_t
597*7c478bd9Sstevel@tonic-gate cancel_work(work_token_t *work_token)
598*7c478bd9Sstevel@tonic-gate {
599*7c478bd9Sstevel@tonic-gate 	work_token_t	*current_token;
600*7c478bd9Sstevel@tonic-gate 	work_token_t	*prev_token = NULL;
601*7c478bd9Sstevel@tonic-gate 	boolean_t	cancelled = B_FALSE;
602*7c478bd9Sstevel@tonic-gate 
603*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "cancel_work enter\n"));
604*7c478bd9Sstevel@tonic-gate 
605*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&work_queue.work_mutex));
606*7c478bd9Sstevel@tonic-gate 
607*7c478bd9Sstevel@tonic-gate 
608*7c478bd9Sstevel@tonic-gate 	current_token = work_queue.head;
609*7c478bd9Sstevel@tonic-gate 	while (current_token != NULL) {
610*7c478bd9Sstevel@tonic-gate 		if (current_token == work_token) {
611*7c478bd9Sstevel@tonic-gate 			if (work_token == work_queue.head)
612*7c478bd9Sstevel@tonic-gate 				work_queue.head = work_token->next;
613*7c478bd9Sstevel@tonic-gate 			else
614*7c478bd9Sstevel@tonic-gate 				prev_token->next = work_token->next;
615*7c478bd9Sstevel@tonic-gate 			if (work_token == work_queue.tail)
616*7c478bd9Sstevel@tonic-gate 				work_queue.tail = prev_token;
617*7c478bd9Sstevel@tonic-gate 
618*7c478bd9Sstevel@tonic-gate 			current_token->opcode = 0;
619*7c478bd9Sstevel@tonic-gate 			current_token->next = NULL;
620*7c478bd9Sstevel@tonic-gate 			/* found and cancelled work */
621*7c478bd9Sstevel@tonic-gate 			cancelled = B_TRUE;
622*7c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG,
623*7c478bd9Sstevel@tonic-gate 			    "cancelled_work = 0x%p\n", work_token));
624*7c478bd9Sstevel@tonic-gate 			break;
625*7c478bd9Sstevel@tonic-gate 		}
626*7c478bd9Sstevel@tonic-gate 		prev_token = current_token;
627*7c478bd9Sstevel@tonic-gate 		current_token = current_token->next;
628*7c478bd9Sstevel@tonic-gate 	}
629*7c478bd9Sstevel@tonic-gate 
630*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "cancel_work done\n"));
631*7c478bd9Sstevel@tonic-gate 	return (cancelled);
632*7c478bd9Sstevel@tonic-gate }
633*7c478bd9Sstevel@tonic-gate 
634*7c478bd9Sstevel@tonic-gate /*
635*7c478bd9Sstevel@tonic-gate  * EXTERNAL INTERFACES
636*7c478bd9Sstevel@tonic-gate  *
637*7c478bd9Sstevel@tonic-gate  * For Galileo Clustering, these routine are called from
638*7c478bd9Sstevel@tonic-gate  * rsmka_pm_interface.cc
639*7c478bd9Sstevel@tonic-gate  *
640*7c478bd9Sstevel@tonic-gate  */
641*7c478bd9Sstevel@tonic-gate 
642*7c478bd9Sstevel@tonic-gate /*
643*7c478bd9Sstevel@tonic-gate  *
644*7c478bd9Sstevel@tonic-gate  * If the adapter is supported by rsmpi then initialize an adapter descriptor
645*7c478bd9Sstevel@tonic-gate  * and link it to the list of adapters.  The adapter attributes are obtained
646*7c478bd9Sstevel@tonic-gate  * from rsmpi and stored in the descriptor.  Finally, a service handler
647*7c478bd9Sstevel@tonic-gate  * for incoming ipc on this adapter is registered with rsmpi.
648*7c478bd9Sstevel@tonic-gate  * A pointer for the adapter descriptor is returned as a cookie to the
649*7c478bd9Sstevel@tonic-gate  * caller.  The cookie may be use with subsequent calls to save the time of
650*7c478bd9Sstevel@tonic-gate  * adapter descriptor lookup.
651*7c478bd9Sstevel@tonic-gate  *
652*7c478bd9Sstevel@tonic-gate  * The adapter descriptor maintains a reference count which is intialized
653*7c478bd9Sstevel@tonic-gate  * to 1 and incremented on lookups; when a cookie is used in place of
654*7c478bd9Sstevel@tonic-gate  * a lookup, an explicit ADAPTER_HOLD is required.
655*7c478bd9Sstevel@tonic-gate  */
656*7c478bd9Sstevel@tonic-gate 
657*7c478bd9Sstevel@tonic-gate void *
658*7c478bd9Sstevel@tonic-gate rsmka_add_adapter(char *name, int instance, rsm_addr_t hwaddr)
659*7c478bd9Sstevel@tonic-gate {
660*7c478bd9Sstevel@tonic-gate 	adapter_t		*adapter;
661*7c478bd9Sstevel@tonic-gate 	rsm_controller_object_t	rsmpi_adapter_object;
662*7c478bd9Sstevel@tonic-gate 	rsm_controller_handle_t	rsmpi_adapter_handle;
663*7c478bd9Sstevel@tonic-gate 	rsm_ops_t		*rsmpi_ops_vector;
664*7c478bd9Sstevel@tonic-gate 	int			adapter_is_supported;
665*7c478bd9Sstevel@tonic-gate 	rsm_controller_attr_t	*attr;
666*7c478bd9Sstevel@tonic-gate 	srv_handler_arg_t	*srv_hdlr_argp;
667*7c478bd9Sstevel@tonic-gate 	int result;
668*7c478bd9Sstevel@tonic-gate 
669*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_add_adapter enter\n"));
670*7c478bd9Sstevel@tonic-gate 
671*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG,
672*7c478bd9Sstevel@tonic-gate 	    "rsmka_add_adapter: name = %s instance = %d hwaddr = %llx \n",
673*7c478bd9Sstevel@tonic-gate 	    name, instance, hwaddr));
674*7c478bd9Sstevel@tonic-gate 
675*7c478bd9Sstevel@tonic-gate 	/* verify name length */
676*7c478bd9Sstevel@tonic-gate 	if (strlen(name) >= MAXNAMELEN) {
677*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
678*7c478bd9Sstevel@tonic-gate 		    "rsmka_add_adapter done: name too long\n"));
679*7c478bd9Sstevel@tonic-gate 		return (NULL);
680*7c478bd9Sstevel@tonic-gate 	}
681*7c478bd9Sstevel@tonic-gate 
682*7c478bd9Sstevel@tonic-gate 
683*7c478bd9Sstevel@tonic-gate 	/* Check if rsmpi supports this adapter type */
684*7c478bd9Sstevel@tonic-gate 	adapter_is_supported = rsm_get_controller(name, instance,
685*7c478bd9Sstevel@tonic-gate 	    &rsmpi_adapter_object, RSM_VERSION);
686*7c478bd9Sstevel@tonic-gate 
687*7c478bd9Sstevel@tonic-gate 	if (adapter_is_supported != RSM_SUCCESS) {
688*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_ERR,
689*7c478bd9Sstevel@tonic-gate 		    "rsmka_add_adapter done: adapter not supported\n"));
690*7c478bd9Sstevel@tonic-gate 		return (NULL);
691*7c478bd9Sstevel@tonic-gate 	}
692*7c478bd9Sstevel@tonic-gate 
693*7c478bd9Sstevel@tonic-gate 	rsmpi_adapter_handle = rsmpi_adapter_object.handle;
694*7c478bd9Sstevel@tonic-gate 	rsmpi_ops_vector = rsmpi_adapter_object.ops;
695*7c478bd9Sstevel@tonic-gate 
696*7c478bd9Sstevel@tonic-gate 	/* Get adapter attributes */
697*7c478bd9Sstevel@tonic-gate 	result = rsm_get_controller_attr(rsmpi_adapter_handle, &attr);
698*7c478bd9Sstevel@tonic-gate 	if (result != RSM_SUCCESS) {
699*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_ERR,
700*7c478bd9Sstevel@tonic-gate 		    "rsm: get_controller_attr(%d) Failed %x\n",
701*7c478bd9Sstevel@tonic-gate 		    instance, result));
702*7c478bd9Sstevel@tonic-gate 		(void) rsm_release_controller(name, instance,
703*7c478bd9Sstevel@tonic-gate 		    &rsmpi_adapter_object);
704*7c478bd9Sstevel@tonic-gate 		return (NULL);
705*7c478bd9Sstevel@tonic-gate 	}
706*7c478bd9Sstevel@tonic-gate 
707*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG,
708*7c478bd9Sstevel@tonic-gate 	    "rsmka_add_adapter: register service offset = %d\n", hwaddr));
709*7c478bd9Sstevel@tonic-gate 
710*7c478bd9Sstevel@tonic-gate 	/*
711*7c478bd9Sstevel@tonic-gate 	 * create a srv_handler_arg_t object, initialize it and register
712*7c478bd9Sstevel@tonic-gate 	 * it along with rsm_srv_func. This get passed as the
713*7c478bd9Sstevel@tonic-gate 	 * rsm_intr_hand_arg_t when the handler gets invoked.
714*7c478bd9Sstevel@tonic-gate 	 */
715*7c478bd9Sstevel@tonic-gate 	srv_hdlr_argp = kmem_zalloc(sizeof (srv_handler_arg_t), KM_SLEEP);
716*7c478bd9Sstevel@tonic-gate 
717*7c478bd9Sstevel@tonic-gate 	(void) strcpy(srv_hdlr_argp->adapter_name, name);
718*7c478bd9Sstevel@tonic-gate 	srv_hdlr_argp->adapter_instance = instance;
719*7c478bd9Sstevel@tonic-gate 	srv_hdlr_argp->adapter_hwaddr = hwaddr;
720*7c478bd9Sstevel@tonic-gate 
721*7c478bd9Sstevel@tonic-gate 	/* Have rsmpi register the ipc receive handler for this adapter */
722*7c478bd9Sstevel@tonic-gate 	/*
723*7c478bd9Sstevel@tonic-gate 	 * Currently, we need to pass in a separate service identifier for
724*7c478bd9Sstevel@tonic-gate 	 * each adapter. In order to obtain a unique service identifier
725*7c478bd9Sstevel@tonic-gate 	 * value for an adapter, we add the hardware address of the
726*7c478bd9Sstevel@tonic-gate 	 * adapter to the base service identifier(RSM_SERVICE which is
727*7c478bd9Sstevel@tonic-gate 	 * defined as RSM_INTR_T_KA as per the RSMPI specification).
728*7c478bd9Sstevel@tonic-gate 	 * NOTE: This may result in using some of the service identifier
729*7c478bd9Sstevel@tonic-gate 	 * values defined for RSM_INTR_T_XPORT(the Sun Cluster Transport).
730*7c478bd9Sstevel@tonic-gate 	 */
731*7c478bd9Sstevel@tonic-gate 	result = rsmpi_ops_vector->rsm_register_handler(
732*7c478bd9Sstevel@tonic-gate 	    rsmpi_adapter_handle, &rsmpi_adapter_object,
733*7c478bd9Sstevel@tonic-gate 	    RSM_SERVICE+(uint_t)hwaddr, rsm_srv_func,
734*7c478bd9Sstevel@tonic-gate 	    (rsm_intr_hand_arg_t)srv_hdlr_argp, NULL, 0);
735*7c478bd9Sstevel@tonic-gate 
736*7c478bd9Sstevel@tonic-gate 	if (result != RSM_SUCCESS) {
737*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_ERR,
738*7c478bd9Sstevel@tonic-gate 		    "rsmka_add_adapter done: rsm_register_handler"
739*7c478bd9Sstevel@tonic-gate 		    "failed %d\n",
740*7c478bd9Sstevel@tonic-gate 		    instance));
741*7c478bd9Sstevel@tonic-gate 		return (NULL);
742*7c478bd9Sstevel@tonic-gate 	}
743*7c478bd9Sstevel@tonic-gate 
744*7c478bd9Sstevel@tonic-gate 	/* Initialize an adapter descriptor and add it to the adapter list */
745*7c478bd9Sstevel@tonic-gate 	adapter = init_adapter(name, instance, hwaddr,
746*7c478bd9Sstevel@tonic-gate 	    rsmpi_adapter_handle, rsmpi_ops_vector, srv_hdlr_argp);
747*7c478bd9Sstevel@tonic-gate 
748*7c478bd9Sstevel@tonic-gate 	/* Copy over the attributes from the pointer returned to us */
749*7c478bd9Sstevel@tonic-gate 	adapter->rsm_attr = *attr;
750*7c478bd9Sstevel@tonic-gate 
751*7c478bd9Sstevel@tonic-gate 	/*
752*7c478bd9Sstevel@tonic-gate 	 * With the addition of the topology obtainment interface, applications
753*7c478bd9Sstevel@tonic-gate 	 * now get the local nodeid from the topology data structure.
754*7c478bd9Sstevel@tonic-gate 	 *
755*7c478bd9Sstevel@tonic-gate 	 * adapter->rsm_attr.attr_node_id = my_nodeid;
756*7c478bd9Sstevel@tonic-gate 	 */
757*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_ERR,
758*7c478bd9Sstevel@tonic-gate 	    "rsmka_add_adapter: adapter = %lx\n", adapter));
759*7c478bd9Sstevel@tonic-gate 
760*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_add_adapter done\n"));
761*7c478bd9Sstevel@tonic-gate 
762*7c478bd9Sstevel@tonic-gate 	/* return adapter pointer as a cookie for later fast access */
763*7c478bd9Sstevel@tonic-gate 	return ((void *)adapter);
764*7c478bd9Sstevel@tonic-gate }
765*7c478bd9Sstevel@tonic-gate 
766*7c478bd9Sstevel@tonic-gate 
767*7c478bd9Sstevel@tonic-gate /*
768*7c478bd9Sstevel@tonic-gate  * Unlink the adapter descriptor and call rsmka_release_adapter which
769*7c478bd9Sstevel@tonic-gate  * will decrement the reference count and possibly free the desriptor.
770*7c478bd9Sstevel@tonic-gate  */
771*7c478bd9Sstevel@tonic-gate boolean_t
772*7c478bd9Sstevel@tonic-gate rsmka_remove_adapter(char *name, uint_t instance, void *cookie, int flags)
773*7c478bd9Sstevel@tonic-gate {
774*7c478bd9Sstevel@tonic-gate 	adapter_t		*adapter;
775*7c478bd9Sstevel@tonic-gate 	adapter_listhead_t	*listhead;
776*7c478bd9Sstevel@tonic-gate 	adapter_t		*prev, *current;
777*7c478bd9Sstevel@tonic-gate 	rsm_controller_object_t	rsm_cntl_obj;
778*7c478bd9Sstevel@tonic-gate 
779*7c478bd9Sstevel@tonic-gate 
780*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
781*7c478bd9Sstevel@tonic-gate 	    "rsmka_remove_adapter enter\n"));
782*7c478bd9Sstevel@tonic-gate 
783*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
784*7c478bd9Sstevel@tonic-gate 	    "rsmka_remove_adapter: cookie = %lx\n", cookie));
785*7c478bd9Sstevel@tonic-gate 
786*7c478bd9Sstevel@tonic-gate 	if (flags & RSMKA_USE_COOKIE) {
787*7c478bd9Sstevel@tonic-gate 		adapter = (adapter_t *)cookie;
788*7c478bd9Sstevel@tonic-gate 	} else {
789*7c478bd9Sstevel@tonic-gate 		adapter = rsmka_lookup_adapter(name, instance);
790*7c478bd9Sstevel@tonic-gate 		/*
791*7c478bd9Sstevel@tonic-gate 		 * rsmka_lookup_adapter increments the ref_cnt; need
792*7c478bd9Sstevel@tonic-gate 		 * to decrement here to get true count
793*7c478bd9Sstevel@tonic-gate 		 */
794*7c478bd9Sstevel@tonic-gate 		ADAPTER_RELE(adapter);
795*7c478bd9Sstevel@tonic-gate 	}
796*7c478bd9Sstevel@tonic-gate 	ASSERT(adapter->next_path == NULL);
797*7c478bd9Sstevel@tonic-gate 
798*7c478bd9Sstevel@tonic-gate 	listhead = adapter->listhead;
799*7c478bd9Sstevel@tonic-gate 
800*7c478bd9Sstevel@tonic-gate 	mutex_enter(&adapter_listhead_base.listlock);
801*7c478bd9Sstevel@tonic-gate 
802*7c478bd9Sstevel@tonic-gate 	mutex_enter(&listhead->mutex);
803*7c478bd9Sstevel@tonic-gate 
804*7c478bd9Sstevel@tonic-gate 	/* find the adapter in the list and remove it */
805*7c478bd9Sstevel@tonic-gate 	prev = NULL;
806*7c478bd9Sstevel@tonic-gate 	current = listhead->next_adapter;
807*7c478bd9Sstevel@tonic-gate 	while (current != NULL) {
808*7c478bd9Sstevel@tonic-gate 		if (adapter->instance == current->instance) {
809*7c478bd9Sstevel@tonic-gate 			break;
810*7c478bd9Sstevel@tonic-gate 		} else {
811*7c478bd9Sstevel@tonic-gate 			prev = current;
812*7c478bd9Sstevel@tonic-gate 			current = current->next;
813*7c478bd9Sstevel@tonic-gate 		}
814*7c478bd9Sstevel@tonic-gate 	}
815*7c478bd9Sstevel@tonic-gate 	ASSERT(current != NULL);
816*7c478bd9Sstevel@tonic-gate 
817*7c478bd9Sstevel@tonic-gate 	if (prev == NULL)
818*7c478bd9Sstevel@tonic-gate 		listhead->next_adapter = current->next;
819*7c478bd9Sstevel@tonic-gate 	else
820*7c478bd9Sstevel@tonic-gate 		prev->next = current->next;
821*7c478bd9Sstevel@tonic-gate 
822*7c478bd9Sstevel@tonic-gate 	listhead->adapter_count--;
823*7c478bd9Sstevel@tonic-gate 
824*7c478bd9Sstevel@tonic-gate 	mutex_exit(&listhead->mutex);
825*7c478bd9Sstevel@tonic-gate 
826*7c478bd9Sstevel@tonic-gate 	mutex_exit(&adapter_listhead_base.listlock);
827*7c478bd9Sstevel@tonic-gate 
828*7c478bd9Sstevel@tonic-gate 	mutex_enter(&current->mutex);
829*7c478bd9Sstevel@tonic-gate 
830*7c478bd9Sstevel@tonic-gate 	/*
831*7c478bd9Sstevel@tonic-gate 	 * unregister the handler
832*7c478bd9Sstevel@tonic-gate 	 */
833*7c478bd9Sstevel@tonic-gate 	current->rsmpi_ops->rsm_unregister_handler(current->rsmpi_handle,
834*7c478bd9Sstevel@tonic-gate 	    RSM_SERVICE+current->hwaddr, rsm_srv_func,
835*7c478bd9Sstevel@tonic-gate 	    (rsm_intr_hand_arg_t)current->hdlr_argp);
836*7c478bd9Sstevel@tonic-gate 
837*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG, "rsmka_remove_adapter: unreg hdlr "
838*7c478bd9Sstevel@tonic-gate 	    ":adapter=%lx, hwaddr=%lx\n", current, current->hwaddr));
839*7c478bd9Sstevel@tonic-gate 
840*7c478bd9Sstevel@tonic-gate 	rsm_cntl_obj.handle = current->rsmpi_handle;
841*7c478bd9Sstevel@tonic-gate 	rsm_cntl_obj.ops = current->rsmpi_ops;
842*7c478bd9Sstevel@tonic-gate 
843*7c478bd9Sstevel@tonic-gate 	(void) rsm_release_controller(current->listhead->adapter_devname,
844*7c478bd9Sstevel@tonic-gate 	    current->instance, &rsm_cntl_obj);
845*7c478bd9Sstevel@tonic-gate 
846*7c478bd9Sstevel@tonic-gate 	mutex_exit(&current->mutex);
847*7c478bd9Sstevel@tonic-gate 
848*7c478bd9Sstevel@tonic-gate 	rsmka_release_adapter(current);
849*7c478bd9Sstevel@tonic-gate 
850*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
851*7c478bd9Sstevel@tonic-gate 	    "rsmka_remove_adapter done\n"));
852*7c478bd9Sstevel@tonic-gate 
853*7c478bd9Sstevel@tonic-gate 	return (B_TRUE);
854*7c478bd9Sstevel@tonic-gate }
855*7c478bd9Sstevel@tonic-gate 
856*7c478bd9Sstevel@tonic-gate /*
857*7c478bd9Sstevel@tonic-gate  * An adapter descriptor will exist from an earlier add_adapter. This
858*7c478bd9Sstevel@tonic-gate  * function does:
859*7c478bd9Sstevel@tonic-gate  *		initialize the path descriptor
860*7c478bd9Sstevel@tonic-gate  *		initialize the ipc descriptor (it may already exist)
861*7c478bd9Sstevel@tonic-gate  *		initialize and link a sendq token for this path
862*7c478bd9Sstevel@tonic-gate  */
863*7c478bd9Sstevel@tonic-gate void *
864*7c478bd9Sstevel@tonic-gate rsmka_add_path(char *adapter_name, int adapter_instance,
865*7c478bd9Sstevel@tonic-gate     rsm_node_id_t remote_node,
866*7c478bd9Sstevel@tonic-gate     rsm_addr_t remote_hwaddr, int rem_adapt_instance,
867*7c478bd9Sstevel@tonic-gate     void *cookie, int flags)
868*7c478bd9Sstevel@tonic-gate {
869*7c478bd9Sstevel@tonic-gate 
870*7c478bd9Sstevel@tonic-gate 	path_t			*path;
871*7c478bd9Sstevel@tonic-gate 	adapter_t		*adapter;
872*7c478bd9Sstevel@tonic-gate 	char			tq_name[TASKQ_NAMELEN];
873*7c478bd9Sstevel@tonic-gate 
874*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_add_path enter\n"));
875*7c478bd9Sstevel@tonic-gate 
876*7c478bd9Sstevel@tonic-gate 	/* allocate new path descriptor */
877*7c478bd9Sstevel@tonic-gate 	path = kmem_zalloc(sizeof (path_t), KM_SLEEP);
878*7c478bd9Sstevel@tonic-gate 
879*7c478bd9Sstevel@tonic-gate 	if (flags & RSMKA_USE_COOKIE) {
880*7c478bd9Sstevel@tonic-gate 		adapter = (adapter_t *)cookie;
881*7c478bd9Sstevel@tonic-gate 		ADAPTER_HOLD(adapter);
882*7c478bd9Sstevel@tonic-gate 	} else {
883*7c478bd9Sstevel@tonic-gate 		adapter = rsmka_lookup_adapter(adapter_name, adapter_instance);
884*7c478bd9Sstevel@tonic-gate 	}
885*7c478bd9Sstevel@tonic-gate 
886*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG,
887*7c478bd9Sstevel@tonic-gate 	    "rsmka_add_path: adapter = %lx\n", adapter));
888*7c478bd9Sstevel@tonic-gate 
889*7c478bd9Sstevel@tonic-gate 	/*
890*7c478bd9Sstevel@tonic-gate 	 * initialize path descriptor
891*7c478bd9Sstevel@tonic-gate 	 * don't need to increment adapter reference count because
892*7c478bd9Sstevel@tonic-gate 	 * it can't be removed if paths exist for it.
893*7c478bd9Sstevel@tonic-gate 	 */
894*7c478bd9Sstevel@tonic-gate 	mutex_init(&path->mutex, NULL, MUTEX_DEFAULT, NULL);
895*7c478bd9Sstevel@tonic-gate 
896*7c478bd9Sstevel@tonic-gate 	PATH_HOLD(path);
897*7c478bd9Sstevel@tonic-gate 	path->state = RSMKA_PATH_DOWN;
898*7c478bd9Sstevel@tonic-gate 	path->remote_node = remote_node;
899*7c478bd9Sstevel@tonic-gate 	path->remote_hwaddr = remote_hwaddr;
900*7c478bd9Sstevel@tonic-gate 	path->remote_devinst = rem_adapt_instance;
901*7c478bd9Sstevel@tonic-gate 	path->local_adapter = adapter;
902*7c478bd9Sstevel@tonic-gate 
903*7c478bd9Sstevel@tonic-gate 	/* taskq is for sendq on adapter with remote_hwaddr on remote_node */
904*7c478bd9Sstevel@tonic-gate 	(void) snprintf(tq_name, sizeof (tq_name), "%x_%llx",
905*7c478bd9Sstevel@tonic-gate 	    remote_node, (unsigned long long) remote_hwaddr);
906*7c478bd9Sstevel@tonic-gate 
907*7c478bd9Sstevel@tonic-gate 	path->recv_taskq = taskq_create_instance(tq_name, adapter_instance,
908*7c478bd9Sstevel@tonic-gate 	    RSMKA_ONE_THREAD, maxclsyspri, RSMIPC_MAX_MESSAGES,
909*7c478bd9Sstevel@tonic-gate 	    RSMIPC_MAX_MESSAGES, TASKQ_PREPOPULATE);
910*7c478bd9Sstevel@tonic-gate 
911*7c478bd9Sstevel@tonic-gate 	/* allocate the message buffer array */
912*7c478bd9Sstevel@tonic-gate 	path->msgbuf_queue = (msgbuf_elem_t *)kmem_zalloc(
913*7c478bd9Sstevel@tonic-gate 	    RSMIPC_MAX_MESSAGES * sizeof (msgbuf_elem_t), KM_SLEEP);
914*7c478bd9Sstevel@tonic-gate 
915*7c478bd9Sstevel@tonic-gate 	/*
916*7c478bd9Sstevel@tonic-gate 	 * init cond variables for synch with rsmipc_send()
917*7c478bd9Sstevel@tonic-gate 	 * and rsmka_remove_path
918*7c478bd9Sstevel@tonic-gate 	 */
919*7c478bd9Sstevel@tonic-gate 	cv_init(&path->sendq_token.sendq_cv, NULL, CV_DEFAULT, NULL);
920*7c478bd9Sstevel@tonic-gate 	cv_init(&path->hold_cv, NULL, CV_DEFAULT, NULL);
921*7c478bd9Sstevel@tonic-gate 
922*7c478bd9Sstevel@tonic-gate 	/* link path descriptor on adapter path list */
923*7c478bd9Sstevel@tonic-gate 	link_path(path);
924*7c478bd9Sstevel@tonic-gate 
925*7c478bd9Sstevel@tonic-gate 	/* link the path sendq token on the ipc_info token list */
926*7c478bd9Sstevel@tonic-gate 	link_sendq_token(&path->sendq_token, remote_node);
927*7c478bd9Sstevel@tonic-gate 
928*7c478bd9Sstevel@tonic-gate 	/* ADAPTER_HOLD done above by rsmka_lookup_adapter */
929*7c478bd9Sstevel@tonic-gate 	ADAPTER_RELE(adapter);
930*7c478bd9Sstevel@tonic-gate 
931*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG, "rsmka_add_path: path = %lx\n", path));
932*7c478bd9Sstevel@tonic-gate 
933*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_add_path done\n"));
934*7c478bd9Sstevel@tonic-gate 	return ((void *)path);
935*7c478bd9Sstevel@tonic-gate }
936*7c478bd9Sstevel@tonic-gate 
937*7c478bd9Sstevel@tonic-gate /*
938*7c478bd9Sstevel@tonic-gate  * Wait for the path descriptor reference count to become zero then
939*7c478bd9Sstevel@tonic-gate  * directly call path down processing.  Finally, unlink the sendq token and
940*7c478bd9Sstevel@tonic-gate  * free the path descriptor memory.
941*7c478bd9Sstevel@tonic-gate  *
942*7c478bd9Sstevel@tonic-gate  * Note: lookup_path locks the path and increments the path hold count
943*7c478bd9Sstevel@tonic-gate  */
944*7c478bd9Sstevel@tonic-gate void
945*7c478bd9Sstevel@tonic-gate rsmka_remove_path(char *adapter_name, int instance, rsm_node_id_t remote_node,
946*7c478bd9Sstevel@tonic-gate     rsm_addr_t remote_hwaddr, void *path_cookie, int flags)
947*7c478bd9Sstevel@tonic-gate {
948*7c478bd9Sstevel@tonic-gate 	path_t		*path;
949*7c478bd9Sstevel@tonic-gate 
950*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_remove_path enter\n"));
951*7c478bd9Sstevel@tonic-gate 
952*7c478bd9Sstevel@tonic-gate 	if (flags & RSMKA_USE_COOKIE) {
953*7c478bd9Sstevel@tonic-gate 		path = (path_t *)path_cookie;
954*7c478bd9Sstevel@tonic-gate 		mutex_enter(&path->mutex);
955*7c478bd9Sstevel@tonic-gate 	} else {
956*7c478bd9Sstevel@tonic-gate 		path = lookup_path(adapter_name, instance,  remote_node,
957*7c478bd9Sstevel@tonic-gate 		    remote_hwaddr);
958*7c478bd9Sstevel@tonic-gate 
959*7c478bd9Sstevel@tonic-gate 		/*
960*7c478bd9Sstevel@tonic-gate 		 * remember, lookup_path increments the reference
961*7c478bd9Sstevel@tonic-gate 		 * count - so decrement now so we can get to zero
962*7c478bd9Sstevel@tonic-gate 		 */
963*7c478bd9Sstevel@tonic-gate 		PATH_RELE_NOLOCK(path);
964*7c478bd9Sstevel@tonic-gate 	}
965*7c478bd9Sstevel@tonic-gate 
966*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG,
967*7c478bd9Sstevel@tonic-gate 	    "rsmka_remove_path: path = %lx\n", path));
968*7c478bd9Sstevel@tonic-gate 
969*7c478bd9Sstevel@tonic-gate 	while (path->state == RSMKA_PATH_GOING_DOWN)
970*7c478bd9Sstevel@tonic-gate 		cv_wait(&path->hold_cv, &path->mutex);
971*7c478bd9Sstevel@tonic-gate 
972*7c478bd9Sstevel@tonic-gate 	/* attempt to cancel any possibly pending work */
973*7c478bd9Sstevel@tonic-gate 	mutex_enter(&work_queue.work_mutex);
974*7c478bd9Sstevel@tonic-gate 	if (cancel_work(&path->work_token[RSMKA_IPC_UP_INDEX])) {
975*7c478bd9Sstevel@tonic-gate 		PATH_RELE_NOLOCK(path);
976*7c478bd9Sstevel@tonic-gate 	}
977*7c478bd9Sstevel@tonic-gate 	if (cancel_work(&path->work_token[RSMKA_IPC_DOWN_INDEX])) {
978*7c478bd9Sstevel@tonic-gate 		PATH_RELE_NOLOCK(path);
979*7c478bd9Sstevel@tonic-gate 	}
980*7c478bd9Sstevel@tonic-gate 	mutex_exit(&work_queue.work_mutex);
981*7c478bd9Sstevel@tonic-gate 
982*7c478bd9Sstevel@tonic-gate 	/*
983*7c478bd9Sstevel@tonic-gate 	 * The path descriptor ref cnt was set to 1 initially when
984*7c478bd9Sstevel@tonic-gate 	 * the path was added.  So we need to do a decrement here to
985*7c478bd9Sstevel@tonic-gate 	 * balance that.
986*7c478bd9Sstevel@tonic-gate 	 */
987*7c478bd9Sstevel@tonic-gate 	PATH_RELE_NOLOCK(path);
988*7c478bd9Sstevel@tonic-gate 
989*7c478bd9Sstevel@tonic-gate 	switch (path->state) {
990*7c478bd9Sstevel@tonic-gate 	case RSMKA_PATH_UP:
991*7c478bd9Sstevel@tonic-gate 		/* clear the flag */
992*7c478bd9Sstevel@tonic-gate 		path->flags &= ~RSMKA_SQCREATE_PENDING;
993*7c478bd9Sstevel@tonic-gate 		path->state = RSMKA_PATH_DOWN;
994*7c478bd9Sstevel@tonic-gate 		break;
995*7c478bd9Sstevel@tonic-gate 	case RSMKA_PATH_DOWN:
996*7c478bd9Sstevel@tonic-gate 		break;
997*7c478bd9Sstevel@tonic-gate 
998*7c478bd9Sstevel@tonic-gate 	case RSMKA_PATH_ACTIVE:
999*7c478bd9Sstevel@tonic-gate 		/*
1000*7c478bd9Sstevel@tonic-gate 		 * rsmka_remove_path should not call do_path_down
1001*7c478bd9Sstevel@tonic-gate 		 * with the RSMKA_NO_SLEEP flag set since for
1002*7c478bd9Sstevel@tonic-gate 		 * this code path, the deferred work would
1003*7c478bd9Sstevel@tonic-gate 		 * incorrectly do a PATH_RELE_NOLOCK.
1004*7c478bd9Sstevel@tonic-gate 		 */
1005*7c478bd9Sstevel@tonic-gate 		do_path_down(path, 0);
1006*7c478bd9Sstevel@tonic-gate 		break;
1007*7c478bd9Sstevel@tonic-gate 	default:
1008*7c478bd9Sstevel@tonic-gate 		mutex_exit(&path->mutex);
1009*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_ERR,
1010*7c478bd9Sstevel@tonic-gate 		    "rsm_remove_path: invalid path state %d\n",
1011*7c478bd9Sstevel@tonic-gate 		    path->state));
1012*7c478bd9Sstevel@tonic-gate 		return;
1013*7c478bd9Sstevel@tonic-gate 
1014*7c478bd9Sstevel@tonic-gate 	}
1015*7c478bd9Sstevel@tonic-gate 
1016*7c478bd9Sstevel@tonic-gate 	/*
1017*7c478bd9Sstevel@tonic-gate 	 * wait for all references to the path to be released. If a thread
1018*7c478bd9Sstevel@tonic-gate 	 * was waiting to receive credits do_path_down should wake it up
1019*7c478bd9Sstevel@tonic-gate 	 * since the path is going down and that will cause the sleeping
1020*7c478bd9Sstevel@tonic-gate 	 * thread to release its hold on the path.
1021*7c478bd9Sstevel@tonic-gate 	 */
1022*7c478bd9Sstevel@tonic-gate 	while (path->ref_cnt != 0) {
1023*7c478bd9Sstevel@tonic-gate 		cv_wait(&path->hold_cv, &path->mutex);
1024*7c478bd9Sstevel@tonic-gate 	}
1025*7c478bd9Sstevel@tonic-gate 
1026*7c478bd9Sstevel@tonic-gate 	mutex_exit(&path->mutex);
1027*7c478bd9Sstevel@tonic-gate 
1028*7c478bd9Sstevel@tonic-gate 	/*
1029*7c478bd9Sstevel@tonic-gate 	 * remove from ipc token list
1030*7c478bd9Sstevel@tonic-gate 	 * NOTE: use the remote_node value from the path structure
1031*7c478bd9Sstevel@tonic-gate 	 * since for RSMKA_USE_COOKIE being set, the remote_node
1032*7c478bd9Sstevel@tonic-gate 	 * value passed into rsmka_remove_path is 0.
1033*7c478bd9Sstevel@tonic-gate 	 */
1034*7c478bd9Sstevel@tonic-gate 	unlink_sendq_token(&path->sendq_token, path->remote_node);
1035*7c478bd9Sstevel@tonic-gate 
1036*7c478bd9Sstevel@tonic-gate 	/* unlink from adapter path list and free path descriptor */
1037*7c478bd9Sstevel@tonic-gate 	destroy_path(path);
1038*7c478bd9Sstevel@tonic-gate 
1039*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_remove_path done\n"));
1040*7c478bd9Sstevel@tonic-gate }
1041*7c478bd9Sstevel@tonic-gate 
1042*7c478bd9Sstevel@tonic-gate /*
1043*7c478bd9Sstevel@tonic-gate  *
1044*7c478bd9Sstevel@tonic-gate  * LOCKING:
1045*7c478bd9Sstevel@tonic-gate  * lookup_path locks the path and increments the path hold count. If the remote
1046*7c478bd9Sstevel@tonic-gate  * node is not in the alive state, do_path_up will release the lock and
1047*7c478bd9Sstevel@tonic-gate  * decrement the hold count.  Otherwise rsmka_do_path_active will release the
1048*7c478bd9Sstevel@tonic-gate  * lock prior to waking up the work thread.
1049*7c478bd9Sstevel@tonic-gate  *
1050*7c478bd9Sstevel@tonic-gate  * REF_CNT:
1051*7c478bd9Sstevel@tonic-gate  * The path descriptor ref_cnt is incremented here; it will be decremented
1052*7c478bd9Sstevel@tonic-gate  * when path up processing is completed in do_path_up or by the work thread
1053*7c478bd9Sstevel@tonic-gate  * if the path up is deferred.
1054*7c478bd9Sstevel@tonic-gate  *
1055*7c478bd9Sstevel@tonic-gate  */
1056*7c478bd9Sstevel@tonic-gate boolean_t
1057*7c478bd9Sstevel@tonic-gate rsmka_path_up(char *adapter_name, uint_t adapter_instance,
1058*7c478bd9Sstevel@tonic-gate     rsm_node_id_t remote_node, rsm_addr_t remote_hwaddr,
1059*7c478bd9Sstevel@tonic-gate     void *path_cookie, int flags)
1060*7c478bd9Sstevel@tonic-gate {
1061*7c478bd9Sstevel@tonic-gate 
1062*7c478bd9Sstevel@tonic-gate 	path_t			*path;
1063*7c478bd9Sstevel@tonic-gate 	boolean_t		rval = B_TRUE;
1064*7c478bd9Sstevel@tonic-gate 
1065*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_path_up enter\n"));
1066*7c478bd9Sstevel@tonic-gate 
1067*7c478bd9Sstevel@tonic-gate 	if (flags & RSMKA_USE_COOKIE) {
1068*7c478bd9Sstevel@tonic-gate 		path = (path_t *)path_cookie;
1069*7c478bd9Sstevel@tonic-gate 		mutex_enter(&path->mutex);
1070*7c478bd9Sstevel@tonic-gate 		PATH_HOLD_NOLOCK(path);
1071*7c478bd9Sstevel@tonic-gate 	} else {
1072*7c478bd9Sstevel@tonic-gate 		path = lookup_path(adapter_name, adapter_instance,
1073*7c478bd9Sstevel@tonic-gate 		    remote_node, remote_hwaddr);
1074*7c478bd9Sstevel@tonic-gate 	}
1075*7c478bd9Sstevel@tonic-gate 
1076*7c478bd9Sstevel@tonic-gate 	while (path->state == RSMKA_PATH_GOING_DOWN)
1077*7c478bd9Sstevel@tonic-gate 		cv_wait(&path->hold_cv, &path->mutex);
1078*7c478bd9Sstevel@tonic-gate 
1079*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG, "rsmka_path_up: path = %lx\n", path));
1080*7c478bd9Sstevel@tonic-gate 	rval = do_path_up(path, flags);
1081*7c478bd9Sstevel@tonic-gate 	mutex_exit(&path->mutex);
1082*7c478bd9Sstevel@tonic-gate 
1083*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_path_up done\n"));
1084*7c478bd9Sstevel@tonic-gate 	return (rval);
1085*7c478bd9Sstevel@tonic-gate }
1086*7c478bd9Sstevel@tonic-gate 
1087*7c478bd9Sstevel@tonic-gate /*
1088*7c478bd9Sstevel@tonic-gate  *
1089*7c478bd9Sstevel@tonic-gate  * LOCKING:
1090*7c478bd9Sstevel@tonic-gate  * lookup_path locks the path and increments the path hold count. If the
1091*7c478bd9Sstevel@tonic-gate  * current state is ACTIVE the path lock is release prior to waking up
1092*7c478bd9Sstevel@tonic-gate  * the work thread in do_path_down .  The work thread will decrement the hold
1093*7c478bd9Sstevel@tonic-gate  * count when the work for this is finished.
1094*7c478bd9Sstevel@tonic-gate  *
1095*7c478bd9Sstevel@tonic-gate  *
1096*7c478bd9Sstevel@tonic-gate  * REF_CNT:
1097*7c478bd9Sstevel@tonic-gate  * The path descriptor ref_cnt is incremented here; it will be decremented
1098*7c478bd9Sstevel@tonic-gate  * when path down processing is completed in do_path_down or by the work thread
1099*7c478bd9Sstevel@tonic-gate  * if the path down is deferred.
1100*7c478bd9Sstevel@tonic-gate  *
1101*7c478bd9Sstevel@tonic-gate  */
1102*7c478bd9Sstevel@tonic-gate boolean_t
1103*7c478bd9Sstevel@tonic-gate rsmka_path_down(char *adapter_devname, int instance, rsm_node_id_t remote_node,
1104*7c478bd9Sstevel@tonic-gate     rsm_addr_t remote_hwaddr,  void *path_cookie, int flags)
1105*7c478bd9Sstevel@tonic-gate {
1106*7c478bd9Sstevel@tonic-gate 	path_t			*path;
1107*7c478bd9Sstevel@tonic-gate 	boolean_t		rval = B_TRUE;
1108*7c478bd9Sstevel@tonic-gate 
1109*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_path_down enter\n"));
1110*7c478bd9Sstevel@tonic-gate 
1111*7c478bd9Sstevel@tonic-gate 	if (flags & RSMKA_USE_COOKIE) {
1112*7c478bd9Sstevel@tonic-gate 		path = (path_t *)path_cookie;
1113*7c478bd9Sstevel@tonic-gate 		mutex_enter(&path->mutex);
1114*7c478bd9Sstevel@tonic-gate 		PATH_HOLD_NOLOCK(path);
1115*7c478bd9Sstevel@tonic-gate 	} else {
1116*7c478bd9Sstevel@tonic-gate 		path = lookup_path(adapter_devname, instance, remote_node,
1117*7c478bd9Sstevel@tonic-gate 		    remote_hwaddr);
1118*7c478bd9Sstevel@tonic-gate 	}
1119*7c478bd9Sstevel@tonic-gate 
1120*7c478bd9Sstevel@tonic-gate 	while (path->state == RSMKA_PATH_GOING_DOWN)
1121*7c478bd9Sstevel@tonic-gate 		cv_wait(&path->hold_cv, &path->mutex);
1122*7c478bd9Sstevel@tonic-gate 
1123*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG,
1124*7c478bd9Sstevel@tonic-gate 	    "rsmka_path_down: path = %lx\n", path));
1125*7c478bd9Sstevel@tonic-gate 
1126*7c478bd9Sstevel@tonic-gate 	switch (path->state) {
1127*7c478bd9Sstevel@tonic-gate 	case RSMKA_PATH_UP:
1128*7c478bd9Sstevel@tonic-gate 		/* clear the flag */
1129*7c478bd9Sstevel@tonic-gate 		path->flags &= ~RSMKA_SQCREATE_PENDING;
1130*7c478bd9Sstevel@tonic-gate 		path->state = RSMKA_PATH_GOING_DOWN;
1131*7c478bd9Sstevel@tonic-gate 		mutex_exit(&path->mutex);
1132*7c478bd9Sstevel@tonic-gate 
1133*7c478bd9Sstevel@tonic-gate 		/*
1134*7c478bd9Sstevel@tonic-gate 		 * release path->mutex since enqueued tasks acquire it.
1135*7c478bd9Sstevel@tonic-gate 		 * Drain all the enqueued tasks.
1136*7c478bd9Sstevel@tonic-gate 		 */
1137*7c478bd9Sstevel@tonic-gate 		taskq_wait(path->recv_taskq);
1138*7c478bd9Sstevel@tonic-gate 
1139*7c478bd9Sstevel@tonic-gate 		mutex_enter(&path->mutex);
1140*7c478bd9Sstevel@tonic-gate 		path->state = RSMKA_PATH_DOWN;
1141*7c478bd9Sstevel@tonic-gate 		PATH_RELE_NOLOCK(path);
1142*7c478bd9Sstevel@tonic-gate 		break;
1143*7c478bd9Sstevel@tonic-gate 	case RSMKA_PATH_DOWN:
1144*7c478bd9Sstevel@tonic-gate 		PATH_RELE_NOLOCK(path);
1145*7c478bd9Sstevel@tonic-gate 		break;
1146*7c478bd9Sstevel@tonic-gate 	case RSMKA_PATH_ACTIVE:
1147*7c478bd9Sstevel@tonic-gate 		do_path_down(path, flags);
1148*7c478bd9Sstevel@tonic-gate 		/*
1149*7c478bd9Sstevel@tonic-gate 		 * Need to release the path refcnt. Either done in do_path_down
1150*7c478bd9Sstevel@tonic-gate 		 * or do_deferred_work for RSMKA_NO_SLEEP being set. Has to be
1151*7c478bd9Sstevel@tonic-gate 		 * done here for RSMKA_NO_SLEEP not set.
1152*7c478bd9Sstevel@tonic-gate 		 */
1153*7c478bd9Sstevel@tonic-gate 		if (!(flags & RSMKA_NO_SLEEP))
1154*7c478bd9Sstevel@tonic-gate 			PATH_RELE_NOLOCK(path);
1155*7c478bd9Sstevel@tonic-gate 		break;
1156*7c478bd9Sstevel@tonic-gate 	default:
1157*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_ERR,
1158*7c478bd9Sstevel@tonic-gate 		    "rsm_path_down: invalid path state %d\n", path->state));
1159*7c478bd9Sstevel@tonic-gate 		rval = B_FALSE;
1160*7c478bd9Sstevel@tonic-gate 	}
1161*7c478bd9Sstevel@tonic-gate 
1162*7c478bd9Sstevel@tonic-gate 	mutex_exit(&path->mutex);
1163*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_path_down done\n"));
1164*7c478bd9Sstevel@tonic-gate 	return (rval);
1165*7c478bd9Sstevel@tonic-gate }
1166*7c478bd9Sstevel@tonic-gate 
1167*7c478bd9Sstevel@tonic-gate 
1168*7c478bd9Sstevel@tonic-gate /*
1169*7c478bd9Sstevel@tonic-gate  * Paths cannot become active until node_is_alive is marked true
1170*7c478bd9Sstevel@tonic-gate  * in the ipc_info descriptor for the node
1171*7c478bd9Sstevel@tonic-gate  *
1172*7c478bd9Sstevel@tonic-gate  * In the event this is called before any paths have been added,
1173*7c478bd9Sstevel@tonic-gate  * init_ipc_info if called here.
1174*7c478bd9Sstevel@tonic-gate  *
1175*7c478bd9Sstevel@tonic-gate  */
1176*7c478bd9Sstevel@tonic-gate boolean_t
1177*7c478bd9Sstevel@tonic-gate rsmka_node_alive(rsm_node_id_t remote_node)
1178*7c478bd9Sstevel@tonic-gate {
1179*7c478bd9Sstevel@tonic-gate 	ipc_info_t *ipc_info;
1180*7c478bd9Sstevel@tonic-gate 
1181*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_node_alive enter\n"));
1182*7c478bd9Sstevel@tonic-gate 
1183*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG,
1184*7c478bd9Sstevel@tonic-gate 	    "rsmka_node_alive: remote_node = %x\n", remote_node));
1185*7c478bd9Sstevel@tonic-gate 
1186*7c478bd9Sstevel@tonic-gate 	ipc_info = lookup_ipc_info(remote_node);
1187*7c478bd9Sstevel@tonic-gate 
1188*7c478bd9Sstevel@tonic-gate 	if (ipc_info == NULL) {
1189*7c478bd9Sstevel@tonic-gate 		ipc_info = init_ipc_info(remote_node, B_TRUE);
1190*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG,
1191*7c478bd9Sstevel@tonic-gate 		    "rsmka_node_alive: new ipc_info = %lx\n", ipc_info));
1192*7c478bd9Sstevel@tonic-gate 	} else {
1193*7c478bd9Sstevel@tonic-gate 		ASSERT(ipc_info->node_is_alive == B_FALSE);
1194*7c478bd9Sstevel@tonic-gate 		ipc_info->node_is_alive = B_TRUE;
1195*7c478bd9Sstevel@tonic-gate 	}
1196*7c478bd9Sstevel@tonic-gate 
1197*7c478bd9Sstevel@tonic-gate 	pathup_to_pathactive(ipc_info, remote_node);
1198*7c478bd9Sstevel@tonic-gate 
1199*7c478bd9Sstevel@tonic-gate 	mutex_exit(&ipc_info_lock);
1200*7c478bd9Sstevel@tonic-gate 
1201*7c478bd9Sstevel@tonic-gate 	/* rsmipc_send() may be waiting for a sendq_token */
1202*7c478bd9Sstevel@tonic-gate 	mutex_enter(&ipc_info_cvlock);
1203*7c478bd9Sstevel@tonic-gate 	cv_broadcast(&ipc_info_cv);
1204*7c478bd9Sstevel@tonic-gate 	mutex_exit(&ipc_info_cvlock);
1205*7c478bd9Sstevel@tonic-gate 
1206*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_node_alive done\n"));
1207*7c478bd9Sstevel@tonic-gate 
1208*7c478bd9Sstevel@tonic-gate 	return (B_TRUE);
1209*7c478bd9Sstevel@tonic-gate }
1210*7c478bd9Sstevel@tonic-gate 
1211*7c478bd9Sstevel@tonic-gate 
1212*7c478bd9Sstevel@tonic-gate 
1213*7c478bd9Sstevel@tonic-gate /*
1214*7c478bd9Sstevel@tonic-gate  * Paths cannot become active when node_is_alive is marked false
1215*7c478bd9Sstevel@tonic-gate  * in the ipc_info descriptor for the node
1216*7c478bd9Sstevel@tonic-gate  */
1217*7c478bd9Sstevel@tonic-gate boolean_t
1218*7c478bd9Sstevel@tonic-gate rsmka_node_died(rsm_node_id_t remote_node)
1219*7c478bd9Sstevel@tonic-gate {
1220*7c478bd9Sstevel@tonic-gate 	ipc_info_t *ipc_info;
1221*7c478bd9Sstevel@tonic-gate 
1222*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_node_died enter\n"));
1223*7c478bd9Sstevel@tonic-gate 
1224*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG,
1225*7c478bd9Sstevel@tonic-gate 	    "rsmka_node_died: remote_node = %x\n", remote_node));
1226*7c478bd9Sstevel@tonic-gate 
1227*7c478bd9Sstevel@tonic-gate 	ipc_info = lookup_ipc_info(remote_node);
1228*7c478bd9Sstevel@tonic-gate 	if (ipc_info == NULL)
1229*7c478bd9Sstevel@tonic-gate 		return (B_FALSE);
1230*7c478bd9Sstevel@tonic-gate 
1231*7c478bd9Sstevel@tonic-gate 	ASSERT(ipc_info->node_is_alive == B_TRUE);
1232*7c478bd9Sstevel@tonic-gate 	ipc_info->node_is_alive = B_FALSE;
1233*7c478bd9Sstevel@tonic-gate 
1234*7c478bd9Sstevel@tonic-gate 	rsm_suspend_complete(remote_node, RSM_SUSPEND_NODEDEAD);
1235*7c478bd9Sstevel@tonic-gate 
1236*7c478bd9Sstevel@tonic-gate 	mutex_exit(&ipc_info_lock);
1237*7c478bd9Sstevel@tonic-gate 
1238*7c478bd9Sstevel@tonic-gate 	/* rsmipc_send() may be waiting for a sendq_token */
1239*7c478bd9Sstevel@tonic-gate 	mutex_enter(&ipc_info_cvlock);
1240*7c478bd9Sstevel@tonic-gate 	cv_broadcast(&ipc_info_cv);
1241*7c478bd9Sstevel@tonic-gate 	mutex_exit(&ipc_info_cvlock);
1242*7c478bd9Sstevel@tonic-gate 
1243*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_node_died done\n"));
1244*7c478bd9Sstevel@tonic-gate 
1245*7c478bd9Sstevel@tonic-gate 	return (B_TRUE);
1246*7c478bd9Sstevel@tonic-gate }
1247*7c478bd9Sstevel@tonic-gate 
1248*7c478bd9Sstevel@tonic-gate /*
1249*7c478bd9Sstevel@tonic-gate  * Treat like path_down for all paths for the specified remote node.
1250*7c478bd9Sstevel@tonic-gate  * Always invoked before node died.
1251*7c478bd9Sstevel@tonic-gate  *
1252*7c478bd9Sstevel@tonic-gate  * NOTE: This routine is not called from the cluster path interface; the
1253*7c478bd9Sstevel@tonic-gate  * rsmka_path_down is called directly for each path.
1254*7c478bd9Sstevel@tonic-gate  */
1255*7c478bd9Sstevel@tonic-gate void
1256*7c478bd9Sstevel@tonic-gate rsmka_disconnect_node(rsm_node_id_t remote_node, int flags)
1257*7c478bd9Sstevel@tonic-gate {
1258*7c478bd9Sstevel@tonic-gate 	ipc_info_t	*ipc_info;
1259*7c478bd9Sstevel@tonic-gate 	path_t		*path;
1260*7c478bd9Sstevel@tonic-gate 	sendq_token_t	*sendq_token;
1261*7c478bd9Sstevel@tonic-gate 	work_token_t 	*up_token;
1262*7c478bd9Sstevel@tonic-gate 	work_token_t 	*down_token;
1263*7c478bd9Sstevel@tonic-gate 	boolean_t	do_work = B_FALSE;
1264*7c478bd9Sstevel@tonic-gate 	boolean_t	cancelled = B_FALSE;
1265*7c478bd9Sstevel@tonic-gate 
1266*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1267*7c478bd9Sstevel@tonic-gate 	    "rsmka_disconnect_node enter\n"));
1268*7c478bd9Sstevel@tonic-gate 
1269*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG,
1270*7c478bd9Sstevel@tonic-gate 	    "rsmka_disconnect_node: node = %d\n", remote_node));
1271*7c478bd9Sstevel@tonic-gate 
1272*7c478bd9Sstevel@tonic-gate 	if (flags & RSMKA_NO_SLEEP) {
1273*7c478bd9Sstevel@tonic-gate 		ipc_info = lookup_ipc_info(remote_node);
1274*7c478bd9Sstevel@tonic-gate 
1275*7c478bd9Sstevel@tonic-gate 		sendq_token = ipc_info->token_list;
1276*7c478bd9Sstevel@tonic-gate 
1277*7c478bd9Sstevel@tonic-gate 		while (sendq_token != NULL) {
1278*7c478bd9Sstevel@tonic-gate 			path = SQ_TOKEN_TO_PATH(sendq_token);
1279*7c478bd9Sstevel@tonic-gate 			PATH_HOLD(path);
1280*7c478bd9Sstevel@tonic-gate 			up_token = &path->work_token[RSMKA_IPC_UP_INDEX];
1281*7c478bd9Sstevel@tonic-gate 			down_token = &path->work_token[RSMKA_IPC_DOWN_INDEX];
1282*7c478bd9Sstevel@tonic-gate 
1283*7c478bd9Sstevel@tonic-gate 			mutex_enter(&work_queue.work_mutex);
1284*7c478bd9Sstevel@tonic-gate 
1285*7c478bd9Sstevel@tonic-gate 			/* if an up token is enqueued, remove it */
1286*7c478bd9Sstevel@tonic-gate 			cancelled = cancel_work(up_token);
1287*7c478bd9Sstevel@tonic-gate 
1288*7c478bd9Sstevel@tonic-gate 			/*
1289*7c478bd9Sstevel@tonic-gate 			 * If the path is active and down work hasn't
1290*7c478bd9Sstevel@tonic-gate 			 * already been setup then down work is needed.
1291*7c478bd9Sstevel@tonic-gate 			 * else
1292*7c478bd9Sstevel@tonic-gate 			 * if up work wasn't canceled because it was
1293*7c478bd9Sstevel@tonic-gate 			 * already being processed then down work is needed
1294*7c478bd9Sstevel@tonic-gate 			 */
1295*7c478bd9Sstevel@tonic-gate 			if (path->state == RSMKA_PATH_ACTIVE) {
1296*7c478bd9Sstevel@tonic-gate 				if (down_token->opcode == 0)
1297*7c478bd9Sstevel@tonic-gate 					do_work = B_TRUE;
1298*7c478bd9Sstevel@tonic-gate 			} else
1299*7c478bd9Sstevel@tonic-gate 				if (up_token->opcode == RSMKA_IPC_UP)
1300*7c478bd9Sstevel@tonic-gate 					do_work = B_TRUE;
1301*7c478bd9Sstevel@tonic-gate 
1302*7c478bd9Sstevel@tonic-gate 			if (do_work == B_TRUE) {
1303*7c478bd9Sstevel@tonic-gate 				down_token->opcode = RSMKA_IPC_DOWN;
1304*7c478bd9Sstevel@tonic-gate 				enqueue_work(down_token);
1305*7c478bd9Sstevel@tonic-gate 			}
1306*7c478bd9Sstevel@tonic-gate 			mutex_exit(&work_queue.work_mutex);
1307*7c478bd9Sstevel@tonic-gate 
1308*7c478bd9Sstevel@tonic-gate 			if (do_work == B_FALSE)
1309*7c478bd9Sstevel@tonic-gate 				PATH_RELE(path);
1310*7c478bd9Sstevel@tonic-gate 
1311*7c478bd9Sstevel@tonic-gate 			if (cancelled) {
1312*7c478bd9Sstevel@tonic-gate 				PATH_RELE(path);
1313*7c478bd9Sstevel@tonic-gate 			}
1314*7c478bd9Sstevel@tonic-gate 			sendq_token = sendq_token->next;
1315*7c478bd9Sstevel@tonic-gate 		}
1316*7c478bd9Sstevel@tonic-gate 
1317*7c478bd9Sstevel@tonic-gate 		/*
1318*7c478bd9Sstevel@tonic-gate 		 * Now that all the work is enqueued, wakeup the work
1319*7c478bd9Sstevel@tonic-gate 		 * thread.
1320*7c478bd9Sstevel@tonic-gate 		 */
1321*7c478bd9Sstevel@tonic-gate 		mutex_enter(&work_queue.work_mutex);
1322*7c478bd9Sstevel@tonic-gate 		cv_signal(&work_queue.work_cv);
1323*7c478bd9Sstevel@tonic-gate 		mutex_exit(&work_queue.work_mutex);
1324*7c478bd9Sstevel@tonic-gate 
1325*7c478bd9Sstevel@tonic-gate 		IPCINFO_RELE_NOLOCK(ipc_info);
1326*7c478bd9Sstevel@tonic-gate 		mutex_exit(&ipc_info_lock);
1327*7c478bd9Sstevel@tonic-gate 
1328*7c478bd9Sstevel@tonic-gate 	} else {
1329*7c478bd9Sstevel@tonic-gate 		/* get locked ipc_info descriptor */
1330*7c478bd9Sstevel@tonic-gate 		ipc_info = lookup_ipc_info(remote_node);
1331*7c478bd9Sstevel@tonic-gate 
1332*7c478bd9Sstevel@tonic-gate 		sendq_token = ipc_info->token_list;
1333*7c478bd9Sstevel@tonic-gate 		while (sendq_token != NULL) {
1334*7c478bd9Sstevel@tonic-gate 			path = SQ_TOKEN_TO_PATH(sendq_token);
1335*7c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG,
1336*7c478bd9Sstevel@tonic-gate 			    "rsmka_disconnect_node: path_down"
1337*7c478bd9Sstevel@tonic-gate 			    "for path = %x\n",
1338*7c478bd9Sstevel@tonic-gate 			    path));
1339*7c478bd9Sstevel@tonic-gate 			(void) rsmka_path_down(0, 0, 0, 0,
1340*7c478bd9Sstevel@tonic-gate 			    path, RSMKA_USE_COOKIE);
1341*7c478bd9Sstevel@tonic-gate 			sendq_token = sendq_token->next;
1342*7c478bd9Sstevel@tonic-gate 			if (sendq_token == ipc_info->token_list)
1343*7c478bd9Sstevel@tonic-gate 				break;
1344*7c478bd9Sstevel@tonic-gate 		}
1345*7c478bd9Sstevel@tonic-gate 		mutex_exit(&ipc_info_lock);
1346*7c478bd9Sstevel@tonic-gate 	}
1347*7c478bd9Sstevel@tonic-gate 
1348*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1349*7c478bd9Sstevel@tonic-gate 	    "rsmka_disconnect_node done\n"));
1350*7c478bd9Sstevel@tonic-gate }
1351*7c478bd9Sstevel@tonic-gate 
1352*7c478bd9Sstevel@tonic-gate 
1353*7c478bd9Sstevel@tonic-gate /*
1354*7c478bd9Sstevel@tonic-gate  * Called from rsm_node_alive - if a path to a remote node is in
1355*7c478bd9Sstevel@tonic-gate  * state RSMKA_PATH_UP, transition the state to RSMKA_PATH_ACTIVE with a
1356*7c478bd9Sstevel@tonic-gate  * call to rsmka_do_path_active.
1357*7c478bd9Sstevel@tonic-gate  *
1358*7c478bd9Sstevel@tonic-gate  * REF_CNT:
1359*7c478bd9Sstevel@tonic-gate  * The path descriptor ref_cnt is incremented here; it will be decremented
1360*7c478bd9Sstevel@tonic-gate  * when path up processing is completed in rsmka_do_path_active or by the work
1361*7c478bd9Sstevel@tonic-gate  * thread if the path up is deferred.
1362*7c478bd9Sstevel@tonic-gate  */
1363*7c478bd9Sstevel@tonic-gate static void
1364*7c478bd9Sstevel@tonic-gate pathup_to_pathactive(ipc_info_t *ipc_info, rsm_node_id_t remote_node)
1365*7c478bd9Sstevel@tonic-gate {
1366*7c478bd9Sstevel@tonic-gate 	path_t		*path;
1367*7c478bd9Sstevel@tonic-gate 	sendq_token_t	*token;
1368*7c478bd9Sstevel@tonic-gate 
1369*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1370*7c478bd9Sstevel@tonic-gate 	    "pathup_to_pathactive enter\n"));
1371*7c478bd9Sstevel@tonic-gate 
1372*7c478bd9Sstevel@tonic-gate 	remote_node = remote_node;
1373*7c478bd9Sstevel@tonic-gate 
1374*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ipc_info_lock));
1375*7c478bd9Sstevel@tonic-gate 
1376*7c478bd9Sstevel@tonic-gate 	token = ipc_info->token_list;
1377*7c478bd9Sstevel@tonic-gate 	while (token != NULL) {
1378*7c478bd9Sstevel@tonic-gate 		path = SQ_TOKEN_TO_PATH(token);
1379*7c478bd9Sstevel@tonic-gate 		mutex_enter(&path->mutex);
1380*7c478bd9Sstevel@tonic-gate 		if (path->state == RSMKA_PATH_UP)  {
1381*7c478bd9Sstevel@tonic-gate 			PATH_HOLD_NOLOCK(path);
1382*7c478bd9Sstevel@tonic-gate 			(void) rsmka_do_path_active(path, 0);
1383*7c478bd9Sstevel@tonic-gate 		}
1384*7c478bd9Sstevel@tonic-gate 		mutex_exit(&path->mutex);
1385*7c478bd9Sstevel@tonic-gate 		token = token->next;
1386*7c478bd9Sstevel@tonic-gate 		if (token == ipc_info->token_list)
1387*7c478bd9Sstevel@tonic-gate 			break;
1388*7c478bd9Sstevel@tonic-gate 	}
1389*7c478bd9Sstevel@tonic-gate 
1390*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1391*7c478bd9Sstevel@tonic-gate 	    "pathup_to_pathactive done\n"));
1392*7c478bd9Sstevel@tonic-gate }
1393*7c478bd9Sstevel@tonic-gate 
1394*7c478bd9Sstevel@tonic-gate /*
1395*7c478bd9Sstevel@tonic-gate  * Called from pathup_to_pathactive and do_path_up. The objective is to
1396*7c478bd9Sstevel@tonic-gate  * create an ipc send queue and transition to state RSMKA_PATH_ACTIVE.
1397*7c478bd9Sstevel@tonic-gate  * For the no sleep case we may need to defer the work using a token.
1398*7c478bd9Sstevel@tonic-gate  *
1399*7c478bd9Sstevel@tonic-gate  */
1400*7c478bd9Sstevel@tonic-gate boolean_t
1401*7c478bd9Sstevel@tonic-gate rsmka_do_path_active(path_t *path, int flags)
1402*7c478bd9Sstevel@tonic-gate {
1403*7c478bd9Sstevel@tonic-gate 	work_token_t	*up_token = &path->work_token[RSMKA_IPC_UP_INDEX];
1404*7c478bd9Sstevel@tonic-gate 	work_token_t	*down_token = &path->work_token[RSMKA_IPC_DOWN_INDEX];
1405*7c478bd9Sstevel@tonic-gate 	boolean_t	do_work = B_FALSE;
1406*7c478bd9Sstevel@tonic-gate 	int		error;
1407*7c478bd9Sstevel@tonic-gate 	timespec_t	tv;
1408*7c478bd9Sstevel@tonic-gate 	adapter_t	*adapter;
1409*7c478bd9Sstevel@tonic-gate 	rsm_send_q_handle_t	sqhdl;
1410*7c478bd9Sstevel@tonic-gate 
1411*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1412*7c478bd9Sstevel@tonic-gate 	    "rsmka_do_path_active enter\n"));
1413*7c478bd9Sstevel@tonic-gate 
1414*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&path->mutex));
1415*7c478bd9Sstevel@tonic-gate 
1416*7c478bd9Sstevel@tonic-gate 	if (flags & RSMKA_NO_SLEEP) {
1417*7c478bd9Sstevel@tonic-gate 		mutex_enter(&work_queue.work_mutex);
1418*7c478bd9Sstevel@tonic-gate 
1419*7c478bd9Sstevel@tonic-gate 		/* if a down token is enqueued, remove it */
1420*7c478bd9Sstevel@tonic-gate 		if (cancel_work(down_token)) {
1421*7c478bd9Sstevel@tonic-gate 			PATH_RELE_NOLOCK(path);
1422*7c478bd9Sstevel@tonic-gate 		}
1423*7c478bd9Sstevel@tonic-gate 
1424*7c478bd9Sstevel@tonic-gate 		/*
1425*7c478bd9Sstevel@tonic-gate 		 * If the path is not active and up work hasn't
1426*7c478bd9Sstevel@tonic-gate 		 * already been setup then up work is needed.
1427*7c478bd9Sstevel@tonic-gate 		 * else
1428*7c478bd9Sstevel@tonic-gate 		 * if down work wasn't canceled because it was
1429*7c478bd9Sstevel@tonic-gate 		 * already being processed then up work is needed
1430*7c478bd9Sstevel@tonic-gate 		 */
1431*7c478bd9Sstevel@tonic-gate 		if (path->state != RSMKA_PATH_ACTIVE) {
1432*7c478bd9Sstevel@tonic-gate 			if (up_token->opcode == 0)
1433*7c478bd9Sstevel@tonic-gate 				do_work = B_TRUE;
1434*7c478bd9Sstevel@tonic-gate 		} else
1435*7c478bd9Sstevel@tonic-gate 			if (down_token->opcode == RSMKA_IPC_DOWN)
1436*7c478bd9Sstevel@tonic-gate 				do_work = B_TRUE;
1437*7c478bd9Sstevel@tonic-gate 
1438*7c478bd9Sstevel@tonic-gate 		if (do_work == B_TRUE) {
1439*7c478bd9Sstevel@tonic-gate 			up_token->opcode = RSMKA_IPC_UP;
1440*7c478bd9Sstevel@tonic-gate 			enqueue_work(up_token);
1441*7c478bd9Sstevel@tonic-gate 		}
1442*7c478bd9Sstevel@tonic-gate 		else
1443*7c478bd9Sstevel@tonic-gate 			PATH_RELE_NOLOCK(path);
1444*7c478bd9Sstevel@tonic-gate 
1445*7c478bd9Sstevel@tonic-gate 		mutex_exit(&work_queue.work_mutex);
1446*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1447*7c478bd9Sstevel@tonic-gate 		    "rsmka_do_path_active done\n"));
1448*7c478bd9Sstevel@tonic-gate 		return (B_TRUE);
1449*7c478bd9Sstevel@tonic-gate 	} else {
1450*7c478bd9Sstevel@tonic-gate 		/*
1451*7c478bd9Sstevel@tonic-gate 		 * Drop the path lock before calling create_ipc_sendq, shouldn't
1452*7c478bd9Sstevel@tonic-gate 		 * hold locks across calls to RSMPI routines.
1453*7c478bd9Sstevel@tonic-gate 		 */
1454*7c478bd9Sstevel@tonic-gate 		mutex_exit(&path->mutex);
1455*7c478bd9Sstevel@tonic-gate 
1456*7c478bd9Sstevel@tonic-gate 		error = create_ipc_sendq(path);
1457*7c478bd9Sstevel@tonic-gate 
1458*7c478bd9Sstevel@tonic-gate 		mutex_enter(&path->mutex);
1459*7c478bd9Sstevel@tonic-gate 		if (path->state != RSMKA_PATH_UP) {
1460*7c478bd9Sstevel@tonic-gate 			/*
1461*7c478bd9Sstevel@tonic-gate 			 * path state has changed, if sendq was created,
1462*7c478bd9Sstevel@tonic-gate 			 * destroy it and return
1463*7c478bd9Sstevel@tonic-gate 			 */
1464*7c478bd9Sstevel@tonic-gate 			if (error == RSM_SUCCESS) {
1465*7c478bd9Sstevel@tonic-gate 				sqhdl = path->sendq_token.rsmpi_sendq_handle;
1466*7c478bd9Sstevel@tonic-gate 				path->sendq_token.rsmpi_sendq_handle = NULL;
1467*7c478bd9Sstevel@tonic-gate 				adapter = path->local_adapter;
1468*7c478bd9Sstevel@tonic-gate 				mutex_exit(&path->mutex);
1469*7c478bd9Sstevel@tonic-gate 
1470*7c478bd9Sstevel@tonic-gate 				if (sqhdl != NULL) {
1471*7c478bd9Sstevel@tonic-gate 					adapter->rsmpi_ops->rsm_sendq_destroy(
1472*7c478bd9Sstevel@tonic-gate 						sqhdl);
1473*7c478bd9Sstevel@tonic-gate 				}
1474*7c478bd9Sstevel@tonic-gate 				mutex_enter(&path->mutex);
1475*7c478bd9Sstevel@tonic-gate 			}
1476*7c478bd9Sstevel@tonic-gate 			PATH_RELE_NOLOCK(path);
1477*7c478bd9Sstevel@tonic-gate 
1478*7c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1479*7c478bd9Sstevel@tonic-gate 			    "rsmka_do_path_active done: path=%lx not UP\n",
1480*7c478bd9Sstevel@tonic-gate 			    (uintptr_t)path));
1481*7c478bd9Sstevel@tonic-gate 			return (error ? B_FALSE : B_TRUE);
1482*7c478bd9Sstevel@tonic-gate 		}
1483*7c478bd9Sstevel@tonic-gate 
1484*7c478bd9Sstevel@tonic-gate 		if (error == RSM_SUCCESS) {
1485*7c478bd9Sstevel@tonic-gate 			/* clear flag since sendq_create succeeded */
1486*7c478bd9Sstevel@tonic-gate 			path->flags &= ~RSMKA_SQCREATE_PENDING;
1487*7c478bd9Sstevel@tonic-gate 			path->state = RSMKA_PATH_ACTIVE;
1488*7c478bd9Sstevel@tonic-gate 			/*
1489*7c478bd9Sstevel@tonic-gate 			 * now that path is active we send the
1490*7c478bd9Sstevel@tonic-gate 			 * RSMIPC_MSG_SQREADY to the remote endpoint
1491*7c478bd9Sstevel@tonic-gate 			 */
1492*7c478bd9Sstevel@tonic-gate 			path->procmsg_cnt = 0;
1493*7c478bd9Sstevel@tonic-gate 			path->sendq_token.msgbuf_avail = 0;
1494*7c478bd9Sstevel@tonic-gate 
1495*7c478bd9Sstevel@tonic-gate 			/* Calculate local incarnation number */
1496*7c478bd9Sstevel@tonic-gate 			gethrestime(&tv);
1497*7c478bd9Sstevel@tonic-gate 			if (tv.tv_sec == RSM_UNKNOWN_INCN)
1498*7c478bd9Sstevel@tonic-gate 				tv.tv_sec = 1;
1499*7c478bd9Sstevel@tonic-gate 			path->local_incn = (int64_t)tv.tv_sec;
1500*7c478bd9Sstevel@tonic-gate 
1501*7c478bd9Sstevel@tonic-gate 			/*
1502*7c478bd9Sstevel@tonic-gate 			 * if send fails here its due to some non-transient
1503*7c478bd9Sstevel@tonic-gate 			 * error because QUEUE_FULL is not possible here since
1504*7c478bd9Sstevel@tonic-gate 			 * we are the first message on this sendq. The error
1505*7c478bd9Sstevel@tonic-gate 			 * will cause the path to go down anyways so ignore
1506*7c478bd9Sstevel@tonic-gate 			 * the return value
1507*7c478bd9Sstevel@tonic-gate 			 */
1508*7c478bd9Sstevel@tonic-gate 			(void) rsmipc_send_controlmsg(path, RSMIPC_MSG_SQREADY);
1509*7c478bd9Sstevel@tonic-gate 			/* wait for SQREADY_ACK message */
1510*7c478bd9Sstevel@tonic-gate 			path->flags |= RSMKA_WAIT_FOR_SQACK;
1511*7c478bd9Sstevel@tonic-gate 
1512*7c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG,
1513*7c478bd9Sstevel@tonic-gate 			    "rsmka_do_path_active success\n"));
1514*7c478bd9Sstevel@tonic-gate 		} else {
1515*7c478bd9Sstevel@tonic-gate 			/*
1516*7c478bd9Sstevel@tonic-gate 			 * sendq create failed possibly because
1517*7c478bd9Sstevel@tonic-gate 			 * the remote end is not yet ready eg.
1518*7c478bd9Sstevel@tonic-gate 			 * handler not registered, set a flag
1519*7c478bd9Sstevel@tonic-gate 			 * so that when there is an indication
1520*7c478bd9Sstevel@tonic-gate 			 * that the remote end is ready rsmka_do_path_active
1521*7c478bd9Sstevel@tonic-gate 			 * will be retried.
1522*7c478bd9Sstevel@tonic-gate 			 */
1523*7c478bd9Sstevel@tonic-gate 			path->flags |= RSMKA_SQCREATE_PENDING;
1524*7c478bd9Sstevel@tonic-gate 		}
1525*7c478bd9Sstevel@tonic-gate 
1526*7c478bd9Sstevel@tonic-gate 		PATH_RELE_NOLOCK(path);
1527*7c478bd9Sstevel@tonic-gate 
1528*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1529*7c478bd9Sstevel@tonic-gate 		    "rsmka_do_path_active done\n"));
1530*7c478bd9Sstevel@tonic-gate 		return (error ? B_FALSE : B_TRUE);
1531*7c478bd9Sstevel@tonic-gate 	}
1532*7c478bd9Sstevel@tonic-gate 
1533*7c478bd9Sstevel@tonic-gate }
1534*7c478bd9Sstevel@tonic-gate 
1535*7c478bd9Sstevel@tonic-gate /*
1536*7c478bd9Sstevel@tonic-gate  * Called from rsm_path_up.
1537*7c478bd9Sstevel@tonic-gate  * If the remote node state is "alive" then call rsmka_do_path_active
1538*7c478bd9Sstevel@tonic-gate  * otherwise just transition path state to RSMKA_PATH_UP.
1539*7c478bd9Sstevel@tonic-gate  */
1540*7c478bd9Sstevel@tonic-gate static boolean_t
1541*7c478bd9Sstevel@tonic-gate do_path_up(path_t *path, int flags)
1542*7c478bd9Sstevel@tonic-gate {
1543*7c478bd9Sstevel@tonic-gate 	boolean_t	rval;
1544*7c478bd9Sstevel@tonic-gate 	boolean_t	node_alive;
1545*7c478bd9Sstevel@tonic-gate 
1546*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "do_path_up enter\n"));
1547*7c478bd9Sstevel@tonic-gate 
1548*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&path->mutex));
1549*7c478bd9Sstevel@tonic-gate 
1550*7c478bd9Sstevel@tonic-gate 	/* path moved to ACTIVE by rsm_sqcreateop_callback - just return */
1551*7c478bd9Sstevel@tonic-gate 	if (path->state == RSMKA_PATH_ACTIVE) {
1552*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1553*7c478bd9Sstevel@tonic-gate 			"do_path_up done: already ACTIVE\n"));
1554*7c478bd9Sstevel@tonic-gate 		PATH_RELE_NOLOCK(path);
1555*7c478bd9Sstevel@tonic-gate 		return (B_TRUE);
1556*7c478bd9Sstevel@tonic-gate 	}
1557*7c478bd9Sstevel@tonic-gate 
1558*7c478bd9Sstevel@tonic-gate 	path->state = RSMKA_PATH_UP;
1559*7c478bd9Sstevel@tonic-gate 
1560*7c478bd9Sstevel@tonic-gate 	/* initialize the receive msgbuf counters */
1561*7c478bd9Sstevel@tonic-gate 	path->msgbuf_head = 0;
1562*7c478bd9Sstevel@tonic-gate 	path->msgbuf_tail = RSMIPC_MAX_MESSAGES - 1;
1563*7c478bd9Sstevel@tonic-gate 	path->msgbuf_cnt = 0;
1564*7c478bd9Sstevel@tonic-gate 	path->procmsg_cnt = 0;
1565*7c478bd9Sstevel@tonic-gate 	/*
1566*7c478bd9Sstevel@tonic-gate 	 * rsmka_check_node_alive acquires ipc_info_lock, in order to maintain
1567*7c478bd9Sstevel@tonic-gate 	 * correct lock ordering drop the path lock before calling it.
1568*7c478bd9Sstevel@tonic-gate 	 */
1569*7c478bd9Sstevel@tonic-gate 	mutex_exit(&path->mutex);
1570*7c478bd9Sstevel@tonic-gate 
1571*7c478bd9Sstevel@tonic-gate 	node_alive = rsmka_check_node_alive(path->remote_node);
1572*7c478bd9Sstevel@tonic-gate 
1573*7c478bd9Sstevel@tonic-gate 	mutex_enter(&path->mutex);
1574*7c478bd9Sstevel@tonic-gate 	if (node_alive == B_TRUE)
1575*7c478bd9Sstevel@tonic-gate 		rval = rsmka_do_path_active(path, flags);
1576*7c478bd9Sstevel@tonic-gate 	else {
1577*7c478bd9Sstevel@tonic-gate 		PATH_RELE_NOLOCK(path);
1578*7c478bd9Sstevel@tonic-gate 		rval = B_TRUE;
1579*7c478bd9Sstevel@tonic-gate 	}
1580*7c478bd9Sstevel@tonic-gate 
1581*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "do_path_up done\n"));
1582*7c478bd9Sstevel@tonic-gate 	return (rval);
1583*7c478bd9Sstevel@tonic-gate }
1584*7c478bd9Sstevel@tonic-gate 
1585*7c478bd9Sstevel@tonic-gate 
1586*7c478bd9Sstevel@tonic-gate 
1587*7c478bd9Sstevel@tonic-gate /*
1588*7c478bd9Sstevel@tonic-gate  * Called from rsm_remove_path, rsm_path_down, deferred_work.
1589*7c478bd9Sstevel@tonic-gate  * Destroy the send queue on this path.
1590*7c478bd9Sstevel@tonic-gate  * Disconnect segments being imported from the remote node
1591*7c478bd9Sstevel@tonic-gate  * Disconnect segments being imported by the remote node
1592*7c478bd9Sstevel@tonic-gate  *
1593*7c478bd9Sstevel@tonic-gate  */
1594*7c478bd9Sstevel@tonic-gate static void
1595*7c478bd9Sstevel@tonic-gate do_path_down(path_t *path, int flags)
1596*7c478bd9Sstevel@tonic-gate {
1597*7c478bd9Sstevel@tonic-gate 	work_token_t *up_token = &path->work_token[RSMKA_IPC_UP_INDEX];
1598*7c478bd9Sstevel@tonic-gate 	work_token_t *down_token = &path->work_token[RSMKA_IPC_DOWN_INDEX];
1599*7c478bd9Sstevel@tonic-gate 	boolean_t do_work = B_FALSE;
1600*7c478bd9Sstevel@tonic-gate 
1601*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "do_path_down enter\n"));
1602*7c478bd9Sstevel@tonic-gate 
1603*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&path->mutex));
1604*7c478bd9Sstevel@tonic-gate 
1605*7c478bd9Sstevel@tonic-gate 	if (flags & RSMKA_NO_SLEEP) {
1606*7c478bd9Sstevel@tonic-gate 		mutex_enter(&work_queue.work_mutex);
1607*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG,
1608*7c478bd9Sstevel@tonic-gate 		    "do_path_down: after work_mutex\n"));
1609*7c478bd9Sstevel@tonic-gate 
1610*7c478bd9Sstevel@tonic-gate 		/* if an up token is enqueued, remove it */
1611*7c478bd9Sstevel@tonic-gate 		if (cancel_work(up_token)) {
1612*7c478bd9Sstevel@tonic-gate 			PATH_RELE_NOLOCK(path);
1613*7c478bd9Sstevel@tonic-gate 		}
1614*7c478bd9Sstevel@tonic-gate 
1615*7c478bd9Sstevel@tonic-gate 		/*
1616*7c478bd9Sstevel@tonic-gate 		 * If the path is active and down work hasn't
1617*7c478bd9Sstevel@tonic-gate 		 * already been setup then down work is needed.
1618*7c478bd9Sstevel@tonic-gate 		 * else
1619*7c478bd9Sstevel@tonic-gate 		 * if up work wasn't canceled because it was
1620*7c478bd9Sstevel@tonic-gate 		 * already being processed then down work is needed
1621*7c478bd9Sstevel@tonic-gate 		 */
1622*7c478bd9Sstevel@tonic-gate 		if (path->state == RSMKA_PATH_ACTIVE) {
1623*7c478bd9Sstevel@tonic-gate 			if (down_token->opcode == 0)
1624*7c478bd9Sstevel@tonic-gate 				do_work = B_TRUE;
1625*7c478bd9Sstevel@tonic-gate 		} else
1626*7c478bd9Sstevel@tonic-gate 			if (up_token->opcode == RSMKA_IPC_UP)
1627*7c478bd9Sstevel@tonic-gate 				do_work = B_TRUE;
1628*7c478bd9Sstevel@tonic-gate 
1629*7c478bd9Sstevel@tonic-gate 		if (do_work == B_TRUE) {
1630*7c478bd9Sstevel@tonic-gate 			down_token->opcode = RSMKA_IPC_DOWN;
1631*7c478bd9Sstevel@tonic-gate 			enqueue_work(down_token);
1632*7c478bd9Sstevel@tonic-gate 		} else
1633*7c478bd9Sstevel@tonic-gate 			PATH_RELE_NOLOCK(path);
1634*7c478bd9Sstevel@tonic-gate 
1635*7c478bd9Sstevel@tonic-gate 
1636*7c478bd9Sstevel@tonic-gate 		mutex_exit(&work_queue.work_mutex);
1637*7c478bd9Sstevel@tonic-gate 
1638*7c478bd9Sstevel@tonic-gate 	} else {
1639*7c478bd9Sstevel@tonic-gate 
1640*7c478bd9Sstevel@tonic-gate 		/*
1641*7c478bd9Sstevel@tonic-gate 		 * Change state of the path to RSMKA_PATH_GOING_DOWN and
1642*7c478bd9Sstevel@tonic-gate 		 * release the path mutex. Any other thread referring
1643*7c478bd9Sstevel@tonic-gate 		 * this path would cv_wait till the state of the path
1644*7c478bd9Sstevel@tonic-gate 		 * remains RSMKA_PATH_GOING_DOWN.
1645*7c478bd9Sstevel@tonic-gate 		 * On completing the path down processing, change the
1646*7c478bd9Sstevel@tonic-gate 		 * state of RSMKA_PATH_DOWN indicating that the path
1647*7c478bd9Sstevel@tonic-gate 		 * is indeed down.
1648*7c478bd9Sstevel@tonic-gate 		 */
1649*7c478bd9Sstevel@tonic-gate 		path->state = RSMKA_PATH_GOING_DOWN;
1650*7c478bd9Sstevel@tonic-gate 
1651*7c478bd9Sstevel@tonic-gate 		/*
1652*7c478bd9Sstevel@tonic-gate 		 * clear the WAIT_FOR_SQACK flag since path is going down.
1653*7c478bd9Sstevel@tonic-gate 		 */
1654*7c478bd9Sstevel@tonic-gate 		path->flags &= ~RSMKA_WAIT_FOR_SQACK;
1655*7c478bd9Sstevel@tonic-gate 
1656*7c478bd9Sstevel@tonic-gate 		/*
1657*7c478bd9Sstevel@tonic-gate 		 * this wakes up any thread waiting to receive credits
1658*7c478bd9Sstevel@tonic-gate 		 * in rsmipc_send to tell it that the path is going down
1659*7c478bd9Sstevel@tonic-gate 		 */
1660*7c478bd9Sstevel@tonic-gate 		cv_broadcast(&path->sendq_token.sendq_cv);
1661*7c478bd9Sstevel@tonic-gate 
1662*7c478bd9Sstevel@tonic-gate 		mutex_exit(&path->mutex);
1663*7c478bd9Sstevel@tonic-gate 
1664*7c478bd9Sstevel@tonic-gate 		/*
1665*7c478bd9Sstevel@tonic-gate 		 * drain the messages from the receive msgbuf, the
1666*7c478bd9Sstevel@tonic-gate 		 * tasks in the taskq_thread acquire the path->mutex
1667*7c478bd9Sstevel@tonic-gate 		 * so we drop the path mutex before taskq_wait.
1668*7c478bd9Sstevel@tonic-gate 		 */
1669*7c478bd9Sstevel@tonic-gate 		taskq_wait(path->recv_taskq);
1670*7c478bd9Sstevel@tonic-gate 
1671*7c478bd9Sstevel@tonic-gate 		/*
1672*7c478bd9Sstevel@tonic-gate 		 * Disconnect segments being imported from the remote node
1673*7c478bd9Sstevel@tonic-gate 		 * The path_importer_disconnect function needs to be called
1674*7c478bd9Sstevel@tonic-gate 		 * only after releasing the mutex on the path. This is to
1675*7c478bd9Sstevel@tonic-gate 		 * avoid a recursive mutex enter when doing the
1676*7c478bd9Sstevel@tonic-gate 		 * rsmka_get_sendq_token.
1677*7c478bd9Sstevel@tonic-gate 		 */
1678*7c478bd9Sstevel@tonic-gate 		path_importer_disconnect(path);
1679*7c478bd9Sstevel@tonic-gate 
1680*7c478bd9Sstevel@tonic-gate 		/*
1681*7c478bd9Sstevel@tonic-gate 		 * Get the path mutex, change the state of the path to
1682*7c478bd9Sstevel@tonic-gate 		 * RSMKA_PATH_DOWN since the path down processing has
1683*7c478bd9Sstevel@tonic-gate 		 * completed and cv_signal anyone who was waiting since
1684*7c478bd9Sstevel@tonic-gate 		 * the state was RSMKA_PATH_GOING_DOWN.
1685*7c478bd9Sstevel@tonic-gate 		 * NOTE: Do not do a mutex_exit here. We entered this
1686*7c478bd9Sstevel@tonic-gate 		 * routine with the path lock held by the caller. The
1687*7c478bd9Sstevel@tonic-gate 		 * caller eventually releases the path lock by doing a
1688*7c478bd9Sstevel@tonic-gate 		 * mutex_exit.
1689*7c478bd9Sstevel@tonic-gate 		 */
1690*7c478bd9Sstevel@tonic-gate 		mutex_enter(&path->mutex);
1691*7c478bd9Sstevel@tonic-gate 
1692*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
1693*7c478bd9Sstevel@tonic-gate 		/*
1694*7c478bd9Sstevel@tonic-gate 		 * Some IPC messages left in the recv_buf,
1695*7c478bd9Sstevel@tonic-gate 		 * they'll be dropped
1696*7c478bd9Sstevel@tonic-gate 		 */
1697*7c478bd9Sstevel@tonic-gate 		if (path->msgbuf_cnt != 0)
1698*7c478bd9Sstevel@tonic-gate 			cmn_err(CE_NOTE, "path=%lx msgbuf_cnt != 0\n",
1699*7c478bd9Sstevel@tonic-gate 			    (uintptr_t)path);
1700*7c478bd9Sstevel@tonic-gate #endif
1701*7c478bd9Sstevel@tonic-gate 		while (path->sendq_token.ref_cnt != 0)
1702*7c478bd9Sstevel@tonic-gate 			cv_wait(&path->sendq_token.sendq_cv,
1703*7c478bd9Sstevel@tonic-gate 			    &path->mutex);
1704*7c478bd9Sstevel@tonic-gate 
1705*7c478bd9Sstevel@tonic-gate 		/* release the rsmpi handle */
1706*7c478bd9Sstevel@tonic-gate 		if (path->sendq_token.rsmpi_sendq_handle != NULL)
1707*7c478bd9Sstevel@tonic-gate 			path->local_adapter->rsmpi_ops->rsm_sendq_destroy(
1708*7c478bd9Sstevel@tonic-gate 			    path->sendq_token.rsmpi_sendq_handle);
1709*7c478bd9Sstevel@tonic-gate 
1710*7c478bd9Sstevel@tonic-gate 		path->sendq_token.rsmpi_sendq_handle = NULL;
1711*7c478bd9Sstevel@tonic-gate 
1712*7c478bd9Sstevel@tonic-gate 		path->state = RSMKA_PATH_DOWN;
1713*7c478bd9Sstevel@tonic-gate 
1714*7c478bd9Sstevel@tonic-gate 		cv_signal(&path->hold_cv);
1715*7c478bd9Sstevel@tonic-gate 
1716*7c478bd9Sstevel@tonic-gate 	}
1717*7c478bd9Sstevel@tonic-gate 
1718*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "do_path_down done\n"));
1719*7c478bd9Sstevel@tonic-gate 
1720*7c478bd9Sstevel@tonic-gate }
1721*7c478bd9Sstevel@tonic-gate 
1722*7c478bd9Sstevel@tonic-gate /*
1723*7c478bd9Sstevel@tonic-gate  * Search through the list of imported segments for segments using this path
1724*7c478bd9Sstevel@tonic-gate  * and unload the memory mappings for each one.  The application will
1725*7c478bd9Sstevel@tonic-gate  * get an error return when a barrier close is invoked.
1726*7c478bd9Sstevel@tonic-gate  * NOTE: This function has to be called only after releasing the mutex on
1727*7c478bd9Sstevel@tonic-gate  * the path. This is to avoid any recursive mutex panics on the path mutex
1728*7c478bd9Sstevel@tonic-gate  * since the path_importer_disconnect function would end up calling
1729*7c478bd9Sstevel@tonic-gate  * rsmka_get_sendq_token which requires the path mutex.
1730*7c478bd9Sstevel@tonic-gate  */
1731*7c478bd9Sstevel@tonic-gate 
1732*7c478bd9Sstevel@tonic-gate static void
1733*7c478bd9Sstevel@tonic-gate path_importer_disconnect(path_t *path)
1734*7c478bd9Sstevel@tonic-gate {
1735*7c478bd9Sstevel@tonic-gate 	int i;
1736*7c478bd9Sstevel@tonic-gate 	adapter_t *adapter = path->local_adapter;
1737*7c478bd9Sstevel@tonic-gate 	rsm_node_id_t remote_node = path->remote_node;
1738*7c478bd9Sstevel@tonic-gate 	rsmresource_t		*p = NULL;
1739*7c478bd9Sstevel@tonic-gate 	rsmseg_t *seg;
1740*7c478bd9Sstevel@tonic-gate 
1741*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1742*7c478bd9Sstevel@tonic-gate 	    "path_importer_disconnect enter\n"));
1743*7c478bd9Sstevel@tonic-gate 
1744*7c478bd9Sstevel@tonic-gate 	rw_enter(&rsm_import_segs.rsmhash_rw, RW_READER);
1745*7c478bd9Sstevel@tonic-gate 
1746*7c478bd9Sstevel@tonic-gate 	if (rsm_import_segs.bucket != NULL) {
1747*7c478bd9Sstevel@tonic-gate 		for (i = 0; i < rsm_hash_size; i++) {
1748*7c478bd9Sstevel@tonic-gate 			p = rsm_import_segs.bucket[i];
1749*7c478bd9Sstevel@tonic-gate 			for (; p; p = p->rsmrc_next) {
1750*7c478bd9Sstevel@tonic-gate 				if ((p->rsmrc_node == remote_node) &&
1751*7c478bd9Sstevel@tonic-gate 				    (p->rsmrc_adapter == adapter)) {
1752*7c478bd9Sstevel@tonic-gate 					seg = (rsmseg_t *)p;
1753*7c478bd9Sstevel@tonic-gate 			/*
1754*7c478bd9Sstevel@tonic-gate 			 * In order to make rsmseg_unload and
1755*7c478bd9Sstevel@tonic-gate 			 * path_importer_disconnect thread safe, acquire the
1756*7c478bd9Sstevel@tonic-gate 			 * segment lock here. rsmseg_unload is responsible for
1757*7c478bd9Sstevel@tonic-gate 			 * releasing the lock. rsmseg_unload releases the lock
1758*7c478bd9Sstevel@tonic-gate 			 * just before a call to rsmipc_send or in case of an
1759*7c478bd9Sstevel@tonic-gate 			 * early exit which occurs if the segment was in the
1760*7c478bd9Sstevel@tonic-gate 			 * state RSM_STATE_CONNECTING or RSM_STATE_NEW.
1761*7c478bd9Sstevel@tonic-gate 			 */
1762*7c478bd9Sstevel@tonic-gate 					rsmseglock_acquire(seg);
1763*7c478bd9Sstevel@tonic-gate 					seg->s_flags |= RSM_FORCE_DISCONNECT;
1764*7c478bd9Sstevel@tonic-gate 					rsmseg_unload(seg);
1765*7c478bd9Sstevel@tonic-gate 				}
1766*7c478bd9Sstevel@tonic-gate 			}
1767*7c478bd9Sstevel@tonic-gate 		}
1768*7c478bd9Sstevel@tonic-gate 	}
1769*7c478bd9Sstevel@tonic-gate 	rw_exit(&rsm_import_segs.rsmhash_rw);
1770*7c478bd9Sstevel@tonic-gate 
1771*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1772*7c478bd9Sstevel@tonic-gate 	    "path_importer_disconnect done\n"));
1773*7c478bd9Sstevel@tonic-gate }
1774*7c478bd9Sstevel@tonic-gate 
1775*7c478bd9Sstevel@tonic-gate 
1776*7c478bd9Sstevel@tonic-gate 
1777*7c478bd9Sstevel@tonic-gate 
1778*7c478bd9Sstevel@tonic-gate /*
1779*7c478bd9Sstevel@tonic-gate  *
1780*7c478bd9Sstevel@tonic-gate  * ADAPTER UTILITY FUNCTIONS
1781*7c478bd9Sstevel@tonic-gate  *
1782*7c478bd9Sstevel@tonic-gate  */
1783*7c478bd9Sstevel@tonic-gate 
1784*7c478bd9Sstevel@tonic-gate 
1785*7c478bd9Sstevel@tonic-gate 
1786*7c478bd9Sstevel@tonic-gate /*
1787*7c478bd9Sstevel@tonic-gate  * Allocate new adapter list head structure and add it to the beginning of
1788*7c478bd9Sstevel@tonic-gate  * the list of adapter list heads.  There is one list for each adapter
1789*7c478bd9Sstevel@tonic-gate  * device name (or type).
1790*7c478bd9Sstevel@tonic-gate  */
1791*7c478bd9Sstevel@tonic-gate static adapter_listhead_t *
1792*7c478bd9Sstevel@tonic-gate init_listhead(char *name)
1793*7c478bd9Sstevel@tonic-gate {
1794*7c478bd9Sstevel@tonic-gate 	adapter_listhead_t *listhead;
1795*7c478bd9Sstevel@tonic-gate 
1796*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "init_listhead enter\n"));
1797*7c478bd9Sstevel@tonic-gate 
1798*7c478bd9Sstevel@tonic-gate 	/* allocation and initialization */
1799*7c478bd9Sstevel@tonic-gate 	listhead = kmem_zalloc(sizeof (adapter_listhead_t), KM_SLEEP);
1800*7c478bd9Sstevel@tonic-gate 	mutex_init(&listhead->mutex, NULL, MUTEX_DEFAULT, NULL);
1801*7c478bd9Sstevel@tonic-gate 	(void) strcpy(listhead->adapter_devname, name);
1802*7c478bd9Sstevel@tonic-gate 
1803*7c478bd9Sstevel@tonic-gate 	/* link into list of listheads */
1804*7c478bd9Sstevel@tonic-gate 	mutex_enter(&adapter_listhead_base.listlock);
1805*7c478bd9Sstevel@tonic-gate 	if (adapter_listhead_base.next == NULL) {
1806*7c478bd9Sstevel@tonic-gate 		adapter_listhead_base.next = listhead;
1807*7c478bd9Sstevel@tonic-gate 		listhead->next_listhead = NULL;
1808*7c478bd9Sstevel@tonic-gate 	} else {
1809*7c478bd9Sstevel@tonic-gate 		listhead->next_listhead = adapter_listhead_base.next;
1810*7c478bd9Sstevel@tonic-gate 		adapter_listhead_base.next = listhead;
1811*7c478bd9Sstevel@tonic-gate 	}
1812*7c478bd9Sstevel@tonic-gate 	mutex_exit(&adapter_listhead_base.listlock);
1813*7c478bd9Sstevel@tonic-gate 
1814*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "init_listhead done\n"));
1815*7c478bd9Sstevel@tonic-gate 
1816*7c478bd9Sstevel@tonic-gate 	return (listhead);
1817*7c478bd9Sstevel@tonic-gate }
1818*7c478bd9Sstevel@tonic-gate 
1819*7c478bd9Sstevel@tonic-gate 
1820*7c478bd9Sstevel@tonic-gate /*
1821*7c478bd9Sstevel@tonic-gate  * Search the list of adapter list heads for a match on name.
1822*7c478bd9Sstevel@tonic-gate  *
1823*7c478bd9Sstevel@tonic-gate  */
1824*7c478bd9Sstevel@tonic-gate static adapter_listhead_t *
1825*7c478bd9Sstevel@tonic-gate lookup_adapter_listhead(char *name)
1826*7c478bd9Sstevel@tonic-gate {
1827*7c478bd9Sstevel@tonic-gate 	adapter_listhead_t *listhead;
1828*7c478bd9Sstevel@tonic-gate 
1829*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1830*7c478bd9Sstevel@tonic-gate 	    "lookup_adapter_listhead enter\n"));
1831*7c478bd9Sstevel@tonic-gate 
1832*7c478bd9Sstevel@tonic-gate 	mutex_enter(&adapter_listhead_base.listlock);
1833*7c478bd9Sstevel@tonic-gate 	listhead = adapter_listhead_base.next;
1834*7c478bd9Sstevel@tonic-gate 	while (listhead != NULL) {
1835*7c478bd9Sstevel@tonic-gate 		if (strcmp(name, listhead->adapter_devname) == 0)
1836*7c478bd9Sstevel@tonic-gate 			break;
1837*7c478bd9Sstevel@tonic-gate 		listhead = listhead->next_listhead;
1838*7c478bd9Sstevel@tonic-gate 	}
1839*7c478bd9Sstevel@tonic-gate 	mutex_exit(&adapter_listhead_base.listlock);
1840*7c478bd9Sstevel@tonic-gate 
1841*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1842*7c478bd9Sstevel@tonic-gate 	    "lookup_adapter_listhead done\n"));
1843*7c478bd9Sstevel@tonic-gate 
1844*7c478bd9Sstevel@tonic-gate 	return (listhead);
1845*7c478bd9Sstevel@tonic-gate }
1846*7c478bd9Sstevel@tonic-gate 
1847*7c478bd9Sstevel@tonic-gate 
1848*7c478bd9Sstevel@tonic-gate /*
1849*7c478bd9Sstevel@tonic-gate  * Get the adapter list head corresponding to devname and search for
1850*7c478bd9Sstevel@tonic-gate  * an adapter descriptor with a match on the instance number. If
1851*7c478bd9Sstevel@tonic-gate  * successful, increment the descriptor reference count and return
1852*7c478bd9Sstevel@tonic-gate  * the descriptor pointer to the caller.
1853*7c478bd9Sstevel@tonic-gate  *
1854*7c478bd9Sstevel@tonic-gate  */
1855*7c478bd9Sstevel@tonic-gate adapter_t *
1856*7c478bd9Sstevel@tonic-gate rsmka_lookup_adapter(char *devname, int instance)
1857*7c478bd9Sstevel@tonic-gate {
1858*7c478bd9Sstevel@tonic-gate 	adapter_listhead_t *listhead;
1859*7c478bd9Sstevel@tonic-gate 	adapter_t *current = NULL;
1860*7c478bd9Sstevel@tonic-gate 
1861*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1862*7c478bd9Sstevel@tonic-gate 	    "rsmka_lookup_adapter enter\n"));
1863*7c478bd9Sstevel@tonic-gate 
1864*7c478bd9Sstevel@tonic-gate 	listhead = lookup_adapter_listhead(devname);
1865*7c478bd9Sstevel@tonic-gate 	if (listhead != NULL) {
1866*7c478bd9Sstevel@tonic-gate 		mutex_enter(&listhead->mutex);
1867*7c478bd9Sstevel@tonic-gate 
1868*7c478bd9Sstevel@tonic-gate 		current = listhead->next_adapter;
1869*7c478bd9Sstevel@tonic-gate 		while (current != NULL) {
1870*7c478bd9Sstevel@tonic-gate 			if (current->instance == instance) {
1871*7c478bd9Sstevel@tonic-gate 				ADAPTER_HOLD(current);
1872*7c478bd9Sstevel@tonic-gate 				break;
1873*7c478bd9Sstevel@tonic-gate 			} else
1874*7c478bd9Sstevel@tonic-gate 				current = current->next;
1875*7c478bd9Sstevel@tonic-gate 		}
1876*7c478bd9Sstevel@tonic-gate 
1877*7c478bd9Sstevel@tonic-gate 		mutex_exit(&listhead->mutex);
1878*7c478bd9Sstevel@tonic-gate 	}
1879*7c478bd9Sstevel@tonic-gate 
1880*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1881*7c478bd9Sstevel@tonic-gate 	    "rsmka_lookup_adapter done\n"));
1882*7c478bd9Sstevel@tonic-gate 
1883*7c478bd9Sstevel@tonic-gate 	return (current);
1884*7c478bd9Sstevel@tonic-gate }
1885*7c478bd9Sstevel@tonic-gate 
1886*7c478bd9Sstevel@tonic-gate /*
1887*7c478bd9Sstevel@tonic-gate  * Called from rsmka_remove_adapter or rsmseg_free.
1888*7c478bd9Sstevel@tonic-gate  * rsm_bind() and rsm_connect() store the adapter pointer returned
1889*7c478bd9Sstevel@tonic-gate  * from rsmka_getadapter.  The pointer is kept in the segment descriptor.
1890*7c478bd9Sstevel@tonic-gate  * When the segment is freed, this routine is called by rsmseg_free to decrement
1891*7c478bd9Sstevel@tonic-gate  * the adapter descriptor reference count and possibly free the
1892*7c478bd9Sstevel@tonic-gate  * descriptor.
1893*7c478bd9Sstevel@tonic-gate  */
1894*7c478bd9Sstevel@tonic-gate void
1895*7c478bd9Sstevel@tonic-gate rsmka_release_adapter(adapter_t *adapter)
1896*7c478bd9Sstevel@tonic-gate {
1897*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1898*7c478bd9Sstevel@tonic-gate 	    "rsmka_release_adapter enter\n"));
1899*7c478bd9Sstevel@tonic-gate 
1900*7c478bd9Sstevel@tonic-gate 	if (adapter == &loopback_adapter) {
1901*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1902*7c478bd9Sstevel@tonic-gate 		    "rsmka_release_adapter done\n"));
1903*7c478bd9Sstevel@tonic-gate 		return;
1904*7c478bd9Sstevel@tonic-gate 	}
1905*7c478bd9Sstevel@tonic-gate 
1906*7c478bd9Sstevel@tonic-gate 	mutex_enter(&adapter->mutex);
1907*7c478bd9Sstevel@tonic-gate 
1908*7c478bd9Sstevel@tonic-gate 	/* decrement reference count */
1909*7c478bd9Sstevel@tonic-gate 	ADAPTER_RELE_NOLOCK(adapter);
1910*7c478bd9Sstevel@tonic-gate 
1911*7c478bd9Sstevel@tonic-gate 	/*
1912*7c478bd9Sstevel@tonic-gate 	 * if the adapter descriptor reference count is equal to the
1913*7c478bd9Sstevel@tonic-gate 	 * initialization value of one, then the descriptor has been
1914*7c478bd9Sstevel@tonic-gate 	 * unlinked and can now be freed.
1915*7c478bd9Sstevel@tonic-gate 	 */
1916*7c478bd9Sstevel@tonic-gate 	if (adapter->ref_cnt == 1) {
1917*7c478bd9Sstevel@tonic-gate 		mutex_exit(&adapter->mutex);
1918*7c478bd9Sstevel@tonic-gate 
1919*7c478bd9Sstevel@tonic-gate 		mutex_destroy(&adapter->mutex);
1920*7c478bd9Sstevel@tonic-gate 		kmem_free(adapter->hdlr_argp, sizeof (srv_handler_arg_t));
1921*7c478bd9Sstevel@tonic-gate 		kmem_free(adapter, sizeof (adapter_t));
1922*7c478bd9Sstevel@tonic-gate 	}
1923*7c478bd9Sstevel@tonic-gate 	else
1924*7c478bd9Sstevel@tonic-gate 		mutex_exit(&adapter->mutex);
1925*7c478bd9Sstevel@tonic-gate 
1926*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
1927*7c478bd9Sstevel@tonic-gate 	    "rsmka_release_adapter done\n"));
1928*7c478bd9Sstevel@tonic-gate 
1929*7c478bd9Sstevel@tonic-gate }
1930*7c478bd9Sstevel@tonic-gate 
1931*7c478bd9Sstevel@tonic-gate 
1932*7c478bd9Sstevel@tonic-gate 
1933*7c478bd9Sstevel@tonic-gate /*
1934*7c478bd9Sstevel@tonic-gate  * Singly linked list. Add to the front.
1935*7c478bd9Sstevel@tonic-gate  */
1936*7c478bd9Sstevel@tonic-gate static void
1937*7c478bd9Sstevel@tonic-gate link_adapter(adapter_t *adapter)
1938*7c478bd9Sstevel@tonic-gate {
1939*7c478bd9Sstevel@tonic-gate 
1940*7c478bd9Sstevel@tonic-gate 	adapter_listhead_t *listhead;
1941*7c478bd9Sstevel@tonic-gate 	adapter_t *current;
1942*7c478bd9Sstevel@tonic-gate 
1943*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "link_adapter enter\n"));
1944*7c478bd9Sstevel@tonic-gate 
1945*7c478bd9Sstevel@tonic-gate 	mutex_enter(&adapter_listhead_base.listlock);
1946*7c478bd9Sstevel@tonic-gate 
1947*7c478bd9Sstevel@tonic-gate 	mutex_enter(&adapter->listhead->mutex);
1948*7c478bd9Sstevel@tonic-gate 
1949*7c478bd9Sstevel@tonic-gate 	listhead = adapter->listhead;
1950*7c478bd9Sstevel@tonic-gate 	current = listhead->next_adapter;
1951*7c478bd9Sstevel@tonic-gate 	listhead->next_adapter = adapter;
1952*7c478bd9Sstevel@tonic-gate 	adapter->next = current;
1953*7c478bd9Sstevel@tonic-gate 	ADAPTER_HOLD(adapter);
1954*7c478bd9Sstevel@tonic-gate 
1955*7c478bd9Sstevel@tonic-gate 	adapter->listhead->adapter_count++;
1956*7c478bd9Sstevel@tonic-gate 
1957*7c478bd9Sstevel@tonic-gate 	mutex_exit(&adapter->listhead->mutex);
1958*7c478bd9Sstevel@tonic-gate 
1959*7c478bd9Sstevel@tonic-gate 	mutex_exit(&adapter_listhead_base.listlock);
1960*7c478bd9Sstevel@tonic-gate 
1961*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "link_adapter done\n"));
1962*7c478bd9Sstevel@tonic-gate }
1963*7c478bd9Sstevel@tonic-gate 
1964*7c478bd9Sstevel@tonic-gate 
1965*7c478bd9Sstevel@tonic-gate /*
1966*7c478bd9Sstevel@tonic-gate  * Return adapter descriptor
1967*7c478bd9Sstevel@tonic-gate  *
1968*7c478bd9Sstevel@tonic-gate  * lookup_adapter_listhead returns with the the list of adapter listheads
1969*7c478bd9Sstevel@tonic-gate  * locked.  After adding the adapter descriptor, the adapter listhead list
1970*7c478bd9Sstevel@tonic-gate  * lock is dropped.
1971*7c478bd9Sstevel@tonic-gate  */
1972*7c478bd9Sstevel@tonic-gate static adapter_t *
1973*7c478bd9Sstevel@tonic-gate init_adapter(char *name, int instance, rsm_addr_t hwaddr,
1974*7c478bd9Sstevel@tonic-gate     rsm_controller_handle_t handle, rsm_ops_t *ops,
1975*7c478bd9Sstevel@tonic-gate     srv_handler_arg_t *hdlr_argp)
1976*7c478bd9Sstevel@tonic-gate {
1977*7c478bd9Sstevel@tonic-gate 	adapter_t *adapter;
1978*7c478bd9Sstevel@tonic-gate 	adapter_listhead_t *listhead;
1979*7c478bd9Sstevel@tonic-gate 
1980*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "init_adapter enter\n"));
1981*7c478bd9Sstevel@tonic-gate 
1982*7c478bd9Sstevel@tonic-gate 	adapter = kmem_zalloc(sizeof (adapter_t), KM_SLEEP);
1983*7c478bd9Sstevel@tonic-gate 	adapter->instance = instance;
1984*7c478bd9Sstevel@tonic-gate 	adapter->hwaddr = hwaddr;
1985*7c478bd9Sstevel@tonic-gate 	adapter->rsmpi_handle = handle;
1986*7c478bd9Sstevel@tonic-gate 	adapter->rsmpi_ops = ops;
1987*7c478bd9Sstevel@tonic-gate 	adapter->hdlr_argp = hdlr_argp;
1988*7c478bd9Sstevel@tonic-gate 	mutex_init(&adapter->mutex, NULL, MUTEX_DEFAULT, NULL);
1989*7c478bd9Sstevel@tonic-gate 	ADAPTER_HOLD(adapter);
1990*7c478bd9Sstevel@tonic-gate 
1991*7c478bd9Sstevel@tonic-gate 
1992*7c478bd9Sstevel@tonic-gate 	listhead = lookup_adapter_listhead(name);
1993*7c478bd9Sstevel@tonic-gate 	if (listhead == NULL)  {
1994*7c478bd9Sstevel@tonic-gate 		listhead = init_listhead(name);
1995*7c478bd9Sstevel@tonic-gate 	}
1996*7c478bd9Sstevel@tonic-gate 
1997*7c478bd9Sstevel@tonic-gate 	adapter->listhead = listhead;
1998*7c478bd9Sstevel@tonic-gate 
1999*7c478bd9Sstevel@tonic-gate 	link_adapter(adapter);
2000*7c478bd9Sstevel@tonic-gate 
2001*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "init_adapter done\n"));
2002*7c478bd9Sstevel@tonic-gate 
2003*7c478bd9Sstevel@tonic-gate 	return (adapter);
2004*7c478bd9Sstevel@tonic-gate }
2005*7c478bd9Sstevel@tonic-gate 
2006*7c478bd9Sstevel@tonic-gate /*
2007*7c478bd9Sstevel@tonic-gate  *
2008*7c478bd9Sstevel@tonic-gate  * PATH UTILITY FUNCTIONS
2009*7c478bd9Sstevel@tonic-gate  *
2010*7c478bd9Sstevel@tonic-gate  */
2011*7c478bd9Sstevel@tonic-gate 
2012*7c478bd9Sstevel@tonic-gate 
2013*7c478bd9Sstevel@tonic-gate /*
2014*7c478bd9Sstevel@tonic-gate  * Search the per adapter path list for a match on remote node and
2015*7c478bd9Sstevel@tonic-gate  * hwaddr.  The path ref_cnt must be greater than zero or the path
2016*7c478bd9Sstevel@tonic-gate  * is in the process of being removed.
2017*7c478bd9Sstevel@tonic-gate  *
2018*7c478bd9Sstevel@tonic-gate  * Acquire the path lock and increment the path hold count.
2019*7c478bd9Sstevel@tonic-gate  */
2020*7c478bd9Sstevel@tonic-gate static path_t *
2021*7c478bd9Sstevel@tonic-gate lookup_path(char *adapter_devname, int adapter_instance,
2022*7c478bd9Sstevel@tonic-gate     rsm_node_id_t remote_node, rsm_addr_t hwaddr)
2023*7c478bd9Sstevel@tonic-gate {
2024*7c478bd9Sstevel@tonic-gate 	path_t		*current;
2025*7c478bd9Sstevel@tonic-gate 	adapter_t	*adapter;
2026*7c478bd9Sstevel@tonic-gate 
2027*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "lookup_path enter\n"));
2028*7c478bd9Sstevel@tonic-gate 
2029*7c478bd9Sstevel@tonic-gate 	adapter = rsmka_lookup_adapter(adapter_devname, adapter_instance);
2030*7c478bd9Sstevel@tonic-gate 	ASSERT(adapter != NULL);
2031*7c478bd9Sstevel@tonic-gate 
2032*7c478bd9Sstevel@tonic-gate 	mutex_enter(&adapter->listhead->mutex);
2033*7c478bd9Sstevel@tonic-gate 
2034*7c478bd9Sstevel@tonic-gate 	/* start at the list head */
2035*7c478bd9Sstevel@tonic-gate 	current = adapter->next_path;
2036*7c478bd9Sstevel@tonic-gate 
2037*7c478bd9Sstevel@tonic-gate 	while (current != NULL) {
2038*7c478bd9Sstevel@tonic-gate 		if ((current->remote_node == remote_node) &&
2039*7c478bd9Sstevel@tonic-gate 		    (current->remote_hwaddr == hwaddr) &&
2040*7c478bd9Sstevel@tonic-gate 		    (current->ref_cnt > 0))
2041*7c478bd9Sstevel@tonic-gate 			break;
2042*7c478bd9Sstevel@tonic-gate 		else
2043*7c478bd9Sstevel@tonic-gate 			current = current->next_path;
2044*7c478bd9Sstevel@tonic-gate 	}
2045*7c478bd9Sstevel@tonic-gate 	if (current != NULL) {
2046*7c478bd9Sstevel@tonic-gate 		mutex_enter(&current->mutex);
2047*7c478bd9Sstevel@tonic-gate 		PATH_HOLD_NOLOCK(current);
2048*7c478bd9Sstevel@tonic-gate 	}
2049*7c478bd9Sstevel@tonic-gate 
2050*7c478bd9Sstevel@tonic-gate 	mutex_exit(&adapter->listhead->mutex);
2051*7c478bd9Sstevel@tonic-gate 	ADAPTER_RELE(adapter);
2052*7c478bd9Sstevel@tonic-gate 
2053*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "lookup_path done\n"));
2054*7c478bd9Sstevel@tonic-gate 
2055*7c478bd9Sstevel@tonic-gate 	return (current);
2056*7c478bd9Sstevel@tonic-gate }
2057*7c478bd9Sstevel@tonic-gate 
2058*7c478bd9Sstevel@tonic-gate /*
2059*7c478bd9Sstevel@tonic-gate  * This interface is similar to lookup_path but takes only the local
2060*7c478bd9Sstevel@tonic-gate  * adapter name, instance and remote adapters hwaddr to identify the
2061*7c478bd9Sstevel@tonic-gate  * path. This is used in the interrupt handler routines where nodeid
2062*7c478bd9Sstevel@tonic-gate  * is not always available.
2063*7c478bd9Sstevel@tonic-gate  */
2064*7c478bd9Sstevel@tonic-gate path_t *
2065*7c478bd9Sstevel@tonic-gate rsm_find_path(char *adapter_devname, int adapter_instance, rsm_addr_t hwaddr)
2066*7c478bd9Sstevel@tonic-gate {
2067*7c478bd9Sstevel@tonic-gate 	path_t		*current;
2068*7c478bd9Sstevel@tonic-gate 	adapter_t	*adapter;
2069*7c478bd9Sstevel@tonic-gate 
2070*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsm_find_path enter\n"));
2071*7c478bd9Sstevel@tonic-gate 
2072*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2073*7c478bd9Sstevel@tonic-gate 	    "rsm_find_path:adapter=%s:%d,rem=%llx\n",
2074*7c478bd9Sstevel@tonic-gate 	    adapter_devname, adapter_instance, hwaddr));
2075*7c478bd9Sstevel@tonic-gate 
2076*7c478bd9Sstevel@tonic-gate 	adapter = rsmka_lookup_adapter(adapter_devname, adapter_instance);
2077*7c478bd9Sstevel@tonic-gate 
2078*7c478bd9Sstevel@tonic-gate 	/*
2079*7c478bd9Sstevel@tonic-gate 	 * its possible that we are here due to an interrupt but the adapter
2080*7c478bd9Sstevel@tonic-gate 	 * has been removed after we received the callback.
2081*7c478bd9Sstevel@tonic-gate 	 */
2082*7c478bd9Sstevel@tonic-gate 	if (adapter == NULL)
2083*7c478bd9Sstevel@tonic-gate 		return (NULL);
2084*7c478bd9Sstevel@tonic-gate 
2085*7c478bd9Sstevel@tonic-gate 	mutex_enter(&adapter->listhead->mutex);
2086*7c478bd9Sstevel@tonic-gate 
2087*7c478bd9Sstevel@tonic-gate 	/* start at the list head */
2088*7c478bd9Sstevel@tonic-gate 	current = adapter->next_path;
2089*7c478bd9Sstevel@tonic-gate 
2090*7c478bd9Sstevel@tonic-gate 	while (current != NULL) {
2091*7c478bd9Sstevel@tonic-gate 		if ((current->remote_hwaddr == hwaddr) &&
2092*7c478bd9Sstevel@tonic-gate 		    (current->ref_cnt > 0))
2093*7c478bd9Sstevel@tonic-gate 			break;
2094*7c478bd9Sstevel@tonic-gate 		else
2095*7c478bd9Sstevel@tonic-gate 			current = current->next_path;
2096*7c478bd9Sstevel@tonic-gate 	}
2097*7c478bd9Sstevel@tonic-gate 	if (current != NULL) {
2098*7c478bd9Sstevel@tonic-gate 		mutex_enter(&current->mutex);
2099*7c478bd9Sstevel@tonic-gate 		PATH_HOLD_NOLOCK(current);
2100*7c478bd9Sstevel@tonic-gate 	}
2101*7c478bd9Sstevel@tonic-gate 
2102*7c478bd9Sstevel@tonic-gate 	mutex_exit(&adapter->listhead->mutex);
2103*7c478bd9Sstevel@tonic-gate 
2104*7c478bd9Sstevel@tonic-gate 	rsmka_release_adapter(adapter);
2105*7c478bd9Sstevel@tonic-gate 
2106*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsm_find_path done\n"));
2107*7c478bd9Sstevel@tonic-gate 
2108*7c478bd9Sstevel@tonic-gate 	return (current);
2109*7c478bd9Sstevel@tonic-gate }
2110*7c478bd9Sstevel@tonic-gate 
2111*7c478bd9Sstevel@tonic-gate 
2112*7c478bd9Sstevel@tonic-gate /*
2113*7c478bd9Sstevel@tonic-gate  * Add the path to the head of the (per adapter) list of paths
2114*7c478bd9Sstevel@tonic-gate  */
2115*7c478bd9Sstevel@tonic-gate static void
2116*7c478bd9Sstevel@tonic-gate link_path(path_t *path)
2117*7c478bd9Sstevel@tonic-gate {
2118*7c478bd9Sstevel@tonic-gate 
2119*7c478bd9Sstevel@tonic-gate 	adapter_t *adapter = path->local_adapter;
2120*7c478bd9Sstevel@tonic-gate 	path_t *first_path;
2121*7c478bd9Sstevel@tonic-gate 
2122*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "link_path enter\n"));
2123*7c478bd9Sstevel@tonic-gate 
2124*7c478bd9Sstevel@tonic-gate 	mutex_enter(&adapter_listhead_base.listlock);
2125*7c478bd9Sstevel@tonic-gate 
2126*7c478bd9Sstevel@tonic-gate 	mutex_enter(&adapter->listhead->mutex);
2127*7c478bd9Sstevel@tonic-gate 
2128*7c478bd9Sstevel@tonic-gate 	first_path = adapter->next_path;
2129*7c478bd9Sstevel@tonic-gate 	adapter->next_path = path;
2130*7c478bd9Sstevel@tonic-gate 	path->next_path = first_path;
2131*7c478bd9Sstevel@tonic-gate 
2132*7c478bd9Sstevel@tonic-gate 	adapter->listhead->path_count++;
2133*7c478bd9Sstevel@tonic-gate 
2134*7c478bd9Sstevel@tonic-gate 	mutex_exit(&adapter->listhead->mutex);
2135*7c478bd9Sstevel@tonic-gate 
2136*7c478bd9Sstevel@tonic-gate 	mutex_exit(&adapter_listhead_base.listlock);
2137*7c478bd9Sstevel@tonic-gate 
2138*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "link_path done\n"));
2139*7c478bd9Sstevel@tonic-gate }
2140*7c478bd9Sstevel@tonic-gate 
2141*7c478bd9Sstevel@tonic-gate /*
2142*7c478bd9Sstevel@tonic-gate  * Search the per-adapter list of paths for the specified path, beginning
2143*7c478bd9Sstevel@tonic-gate  * at the head of the list.  Unlink the path and free the descriptor
2144*7c478bd9Sstevel@tonic-gate  * memory.
2145*7c478bd9Sstevel@tonic-gate  */
2146*7c478bd9Sstevel@tonic-gate static void
2147*7c478bd9Sstevel@tonic-gate destroy_path(path_t *path)
2148*7c478bd9Sstevel@tonic-gate {
2149*7c478bd9Sstevel@tonic-gate 
2150*7c478bd9Sstevel@tonic-gate 	adapter_t *adapter = path->local_adapter;
2151*7c478bd9Sstevel@tonic-gate 	path_t *prev, *current;
2152*7c478bd9Sstevel@tonic-gate 
2153*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "destroy_path enter\n"));
2154*7c478bd9Sstevel@tonic-gate 
2155*7c478bd9Sstevel@tonic-gate 	mutex_enter(&adapter_listhead_base.listlock);
2156*7c478bd9Sstevel@tonic-gate 
2157*7c478bd9Sstevel@tonic-gate 	mutex_enter(&path->local_adapter->listhead->mutex);
2158*7c478bd9Sstevel@tonic-gate 	ASSERT(path->ref_cnt == 0);
2159*7c478bd9Sstevel@tonic-gate 
2160*7c478bd9Sstevel@tonic-gate 	/* start at the list head */
2161*7c478bd9Sstevel@tonic-gate 	prev = NULL;
2162*7c478bd9Sstevel@tonic-gate 	current =  adapter->next_path;
2163*7c478bd9Sstevel@tonic-gate 
2164*7c478bd9Sstevel@tonic-gate 	while (current != NULL) {
2165*7c478bd9Sstevel@tonic-gate 		if (path->remote_node == current->remote_node &&
2166*7c478bd9Sstevel@tonic-gate 		    path->remote_hwaddr == current->remote_hwaddr)
2167*7c478bd9Sstevel@tonic-gate 			break;
2168*7c478bd9Sstevel@tonic-gate 		else {
2169*7c478bd9Sstevel@tonic-gate 			prev = current;
2170*7c478bd9Sstevel@tonic-gate 			current = current->next_path;
2171*7c478bd9Sstevel@tonic-gate 		}
2172*7c478bd9Sstevel@tonic-gate 	}
2173*7c478bd9Sstevel@tonic-gate 
2174*7c478bd9Sstevel@tonic-gate 	if (prev == NULL)
2175*7c478bd9Sstevel@tonic-gate 		adapter->next_path = current->next_path;
2176*7c478bd9Sstevel@tonic-gate 	else
2177*7c478bd9Sstevel@tonic-gate 		prev->next_path = current->next_path;
2178*7c478bd9Sstevel@tonic-gate 
2179*7c478bd9Sstevel@tonic-gate 	path->local_adapter->listhead->path_count--;
2180*7c478bd9Sstevel@tonic-gate 
2181*7c478bd9Sstevel@tonic-gate 	mutex_exit(&path->local_adapter->listhead->mutex);
2182*7c478bd9Sstevel@tonic-gate 
2183*7c478bd9Sstevel@tonic-gate 	mutex_exit(&adapter_listhead_base.listlock);
2184*7c478bd9Sstevel@tonic-gate 
2185*7c478bd9Sstevel@tonic-gate 	taskq_destroy(path->recv_taskq);
2186*7c478bd9Sstevel@tonic-gate 
2187*7c478bd9Sstevel@tonic-gate 	kmem_free(path->msgbuf_queue,
2188*7c478bd9Sstevel@tonic-gate 	    RSMIPC_MAX_MESSAGES * sizeof (msgbuf_elem_t));
2189*7c478bd9Sstevel@tonic-gate 
2190*7c478bd9Sstevel@tonic-gate 	mutex_destroy(&current->mutex);
2191*7c478bd9Sstevel@tonic-gate 	cv_destroy(&current->sendq_token.sendq_cv);
2192*7c478bd9Sstevel@tonic-gate 	cv_destroy(&path->hold_cv);
2193*7c478bd9Sstevel@tonic-gate 	kmem_free(current, sizeof (path_t));
2194*7c478bd9Sstevel@tonic-gate 
2195*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "destroy_path done\n"));
2196*7c478bd9Sstevel@tonic-gate }
2197*7c478bd9Sstevel@tonic-gate 
2198*7c478bd9Sstevel@tonic-gate void
2199*7c478bd9Sstevel@tonic-gate rsmka_enqueue_msgbuf(path_t *path, void *data)
2200*7c478bd9Sstevel@tonic-gate {
2201*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2202*7c478bd9Sstevel@tonic-gate 	    "rsmka_enqueue_msgbuf enter\n"));
2203*7c478bd9Sstevel@tonic-gate 
2204*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&path->mutex));
2205*7c478bd9Sstevel@tonic-gate 
2206*7c478bd9Sstevel@tonic-gate 	ASSERT(path->msgbuf_cnt < RSMIPC_MAX_MESSAGES);
2207*7c478bd9Sstevel@tonic-gate 
2208*7c478bd9Sstevel@tonic-gate 	/* increment the count and advance the tail */
2209*7c478bd9Sstevel@tonic-gate 
2210*7c478bd9Sstevel@tonic-gate 	path->msgbuf_cnt++;
2211*7c478bd9Sstevel@tonic-gate 
2212*7c478bd9Sstevel@tonic-gate 	if (path->msgbuf_tail == RSMIPC_MAX_MESSAGES - 1) {
2213*7c478bd9Sstevel@tonic-gate 		path->msgbuf_tail = 0;
2214*7c478bd9Sstevel@tonic-gate 	} else {
2215*7c478bd9Sstevel@tonic-gate 		path->msgbuf_tail++;
2216*7c478bd9Sstevel@tonic-gate 	}
2217*7c478bd9Sstevel@tonic-gate 
2218*7c478bd9Sstevel@tonic-gate 	path->msgbuf_queue[path->msgbuf_tail].active = B_TRUE;
2219*7c478bd9Sstevel@tonic-gate 
2220*7c478bd9Sstevel@tonic-gate 	bcopy(data, &(path->msgbuf_queue[path->msgbuf_tail].msg),
2221*7c478bd9Sstevel@tonic-gate 	    sizeof (rsmipc_request_t));
2222*7c478bd9Sstevel@tonic-gate 
2223*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2224*7c478bd9Sstevel@tonic-gate 	    "rsmka_enqueue_msgbuf done\n"));
2225*7c478bd9Sstevel@tonic-gate 
2226*7c478bd9Sstevel@tonic-gate }
2227*7c478bd9Sstevel@tonic-gate 
2228*7c478bd9Sstevel@tonic-gate /*
2229*7c478bd9Sstevel@tonic-gate  * get the head of the queue using rsmka_gethead_msgbuf and then call
2230*7c478bd9Sstevel@tonic-gate  * rsmka_dequeue_msgbuf to remove it.
2231*7c478bd9Sstevel@tonic-gate  */
2232*7c478bd9Sstevel@tonic-gate void
2233*7c478bd9Sstevel@tonic-gate rsmka_dequeue_msgbuf(path_t *path)
2234*7c478bd9Sstevel@tonic-gate {
2235*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2236*7c478bd9Sstevel@tonic-gate 	    "rsmka_dequeue_msgbuf enter\n"));
2237*7c478bd9Sstevel@tonic-gate 
2238*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&path->mutex));
2239*7c478bd9Sstevel@tonic-gate 
2240*7c478bd9Sstevel@tonic-gate 	if (path->msgbuf_cnt == 0)
2241*7c478bd9Sstevel@tonic-gate 		return;
2242*7c478bd9Sstevel@tonic-gate 
2243*7c478bd9Sstevel@tonic-gate 	path->msgbuf_cnt--;
2244*7c478bd9Sstevel@tonic-gate 
2245*7c478bd9Sstevel@tonic-gate 	path->msgbuf_queue[path->msgbuf_head].active = B_FALSE;
2246*7c478bd9Sstevel@tonic-gate 
2247*7c478bd9Sstevel@tonic-gate 	if (path->msgbuf_head == RSMIPC_MAX_MESSAGES - 1) {
2248*7c478bd9Sstevel@tonic-gate 		path->msgbuf_head = 0;
2249*7c478bd9Sstevel@tonic-gate 	} else {
2250*7c478bd9Sstevel@tonic-gate 		path->msgbuf_head++;
2251*7c478bd9Sstevel@tonic-gate 	}
2252*7c478bd9Sstevel@tonic-gate 
2253*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2254*7c478bd9Sstevel@tonic-gate 	    "rsmka_dequeue_msgbuf done\n"));
2255*7c478bd9Sstevel@tonic-gate 
2256*7c478bd9Sstevel@tonic-gate }
2257*7c478bd9Sstevel@tonic-gate 
2258*7c478bd9Sstevel@tonic-gate msgbuf_elem_t *
2259*7c478bd9Sstevel@tonic-gate rsmka_gethead_msgbuf(path_t *path)
2260*7c478bd9Sstevel@tonic-gate {
2261*7c478bd9Sstevel@tonic-gate 	msgbuf_elem_t	*head;
2262*7c478bd9Sstevel@tonic-gate 
2263*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&path->mutex));
2264*7c478bd9Sstevel@tonic-gate 
2265*7c478bd9Sstevel@tonic-gate 	if (path->msgbuf_cnt == 0)
2266*7c478bd9Sstevel@tonic-gate 		return (NULL);
2267*7c478bd9Sstevel@tonic-gate 
2268*7c478bd9Sstevel@tonic-gate 	head = &path->msgbuf_queue[path->msgbuf_head];
2269*7c478bd9Sstevel@tonic-gate 
2270*7c478bd9Sstevel@tonic-gate 	return (head);
2271*7c478bd9Sstevel@tonic-gate 
2272*7c478bd9Sstevel@tonic-gate }
2273*7c478bd9Sstevel@tonic-gate /*
2274*7c478bd9Sstevel@tonic-gate  * Called by rsm_connect which needs the hardware address of the
2275*7c478bd9Sstevel@tonic-gate  * remote adapter.  A search is done through the paths for the local
2276*7c478bd9Sstevel@tonic-gate  * adapter for a match on the specified remote node.
2277*7c478bd9Sstevel@tonic-gate  */
2278*7c478bd9Sstevel@tonic-gate rsm_node_id_t
2279*7c478bd9Sstevel@tonic-gate get_remote_nodeid(adapter_t *adapter, rsm_addr_t remote_hwaddr)
2280*7c478bd9Sstevel@tonic-gate {
2281*7c478bd9Sstevel@tonic-gate 
2282*7c478bd9Sstevel@tonic-gate 	rsm_node_id_t remote_node;
2283*7c478bd9Sstevel@tonic-gate 	path_t	   *current = adapter->next_path;
2284*7c478bd9Sstevel@tonic-gate 
2285*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_remote_nodeid enter\n"));
2286*7c478bd9Sstevel@tonic-gate 
2287*7c478bd9Sstevel@tonic-gate 	mutex_enter(&adapter->listhead->mutex);
2288*7c478bd9Sstevel@tonic-gate 	while (current != NULL) {
2289*7c478bd9Sstevel@tonic-gate 		if (current->remote_hwaddr == remote_hwaddr) {
2290*7c478bd9Sstevel@tonic-gate 			remote_node = current->remote_node;
2291*7c478bd9Sstevel@tonic-gate 			break;
2292*7c478bd9Sstevel@tonic-gate 		}
2293*7c478bd9Sstevel@tonic-gate 		current = current->next_path;
2294*7c478bd9Sstevel@tonic-gate 	}
2295*7c478bd9Sstevel@tonic-gate 
2296*7c478bd9Sstevel@tonic-gate 	if (current == NULL)
2297*7c478bd9Sstevel@tonic-gate 		remote_node = (rsm_node_id_t)-1;
2298*7c478bd9Sstevel@tonic-gate 
2299*7c478bd9Sstevel@tonic-gate 	mutex_exit(&adapter->listhead->mutex);
2300*7c478bd9Sstevel@tonic-gate 
2301*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_remote_nodeid done\n"));
2302*7c478bd9Sstevel@tonic-gate 
2303*7c478bd9Sstevel@tonic-gate 	return (remote_node);
2304*7c478bd9Sstevel@tonic-gate }
2305*7c478bd9Sstevel@tonic-gate 
2306*7c478bd9Sstevel@tonic-gate /*
2307*7c478bd9Sstevel@tonic-gate  * Called by rsm_connect which needs the hardware address of the
2308*7c478bd9Sstevel@tonic-gate  * remote adapter.  A search is done through the paths for the local
2309*7c478bd9Sstevel@tonic-gate  * adapter for a match on the specified remote node.
2310*7c478bd9Sstevel@tonic-gate  */
2311*7c478bd9Sstevel@tonic-gate rsm_addr_t
2312*7c478bd9Sstevel@tonic-gate get_remote_hwaddr(adapter_t *adapter, rsm_node_id_t remote_node)
2313*7c478bd9Sstevel@tonic-gate {
2314*7c478bd9Sstevel@tonic-gate 
2315*7c478bd9Sstevel@tonic-gate 	rsm_addr_t remote_hwaddr;
2316*7c478bd9Sstevel@tonic-gate 	path_t	   *current = adapter->next_path;
2317*7c478bd9Sstevel@tonic-gate 
2318*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_remote_hwaddr enter\n"));
2319*7c478bd9Sstevel@tonic-gate 
2320*7c478bd9Sstevel@tonic-gate 	mutex_enter(&adapter->listhead->mutex);
2321*7c478bd9Sstevel@tonic-gate 	while (current != NULL) {
2322*7c478bd9Sstevel@tonic-gate 		if (current->remote_node == remote_node) {
2323*7c478bd9Sstevel@tonic-gate 			remote_hwaddr = current->remote_hwaddr;
2324*7c478bd9Sstevel@tonic-gate 			break;
2325*7c478bd9Sstevel@tonic-gate 		}
2326*7c478bd9Sstevel@tonic-gate 		current = current->next_path;
2327*7c478bd9Sstevel@tonic-gate 	}
2328*7c478bd9Sstevel@tonic-gate 	if (current == NULL)
2329*7c478bd9Sstevel@tonic-gate 		remote_hwaddr = -1;
2330*7c478bd9Sstevel@tonic-gate 	mutex_exit(&adapter->listhead->mutex);
2331*7c478bd9Sstevel@tonic-gate 
2332*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_remote_hwaddr done\n"));
2333*7c478bd9Sstevel@tonic-gate 
2334*7c478bd9Sstevel@tonic-gate 	return (remote_hwaddr);
2335*7c478bd9Sstevel@tonic-gate }
2336*7c478bd9Sstevel@tonic-gate /*
2337*7c478bd9Sstevel@tonic-gate  * IPC UTILITY FUNCTIONS
2338*7c478bd9Sstevel@tonic-gate  */
2339*7c478bd9Sstevel@tonic-gate 
2340*7c478bd9Sstevel@tonic-gate 
2341*7c478bd9Sstevel@tonic-gate /*
2342*7c478bd9Sstevel@tonic-gate  * If an entry exists, return with the ipc_info_lock held
2343*7c478bd9Sstevel@tonic-gate  */
2344*7c478bd9Sstevel@tonic-gate static ipc_info_t *
2345*7c478bd9Sstevel@tonic-gate lookup_ipc_info(rsm_node_id_t remote_node)
2346*7c478bd9Sstevel@tonic-gate {
2347*7c478bd9Sstevel@tonic-gate 	ipc_info_t  *ipc_info;
2348*7c478bd9Sstevel@tonic-gate 
2349*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "lookup_ipc_info enter\n"));
2350*7c478bd9Sstevel@tonic-gate 
2351*7c478bd9Sstevel@tonic-gate 	mutex_enter(&ipc_info_lock);
2352*7c478bd9Sstevel@tonic-gate 
2353*7c478bd9Sstevel@tonic-gate 	ipc_info = ipc_info_head;
2354*7c478bd9Sstevel@tonic-gate 	if (ipc_info == NULL) {
2355*7c478bd9Sstevel@tonic-gate 		mutex_exit(&ipc_info_lock);
2356*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2357*7c478bd9Sstevel@tonic-gate 		    "lookup_ipc_info done: ipc_info is NULL\n"));
2358*7c478bd9Sstevel@tonic-gate 		return (NULL);
2359*7c478bd9Sstevel@tonic-gate 	}
2360*7c478bd9Sstevel@tonic-gate 
2361*7c478bd9Sstevel@tonic-gate 	while (ipc_info->remote_node != remote_node) {
2362*7c478bd9Sstevel@tonic-gate 		ipc_info = ipc_info->next;
2363*7c478bd9Sstevel@tonic-gate 		if (ipc_info == NULL) {
2364*7c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2365*7c478bd9Sstevel@tonic-gate 			    "lookup_ipc_info: ipc_info not found\n"));
2366*7c478bd9Sstevel@tonic-gate 			mutex_exit(&ipc_info_lock);
2367*7c478bd9Sstevel@tonic-gate 			break;
2368*7c478bd9Sstevel@tonic-gate 		}
2369*7c478bd9Sstevel@tonic-gate 	}
2370*7c478bd9Sstevel@tonic-gate 
2371*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "lookup_ipc_info done\n"));
2372*7c478bd9Sstevel@tonic-gate 
2373*7c478bd9Sstevel@tonic-gate 	return (ipc_info);
2374*7c478bd9Sstevel@tonic-gate }
2375*7c478bd9Sstevel@tonic-gate 
2376*7c478bd9Sstevel@tonic-gate /*
2377*7c478bd9Sstevel@tonic-gate  * Create an ipc_info descriptor and return with ipc_info_lock held
2378*7c478bd9Sstevel@tonic-gate  */
2379*7c478bd9Sstevel@tonic-gate static ipc_info_t *
2380*7c478bd9Sstevel@tonic-gate init_ipc_info(rsm_node_id_t remote_node, boolean_t state)
2381*7c478bd9Sstevel@tonic-gate {
2382*7c478bd9Sstevel@tonic-gate 	ipc_info_t *ipc_info;
2383*7c478bd9Sstevel@tonic-gate 
2384*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "init_ipc_info enter\n"));
2385*7c478bd9Sstevel@tonic-gate 
2386*7c478bd9Sstevel@tonic-gate 	/*
2387*7c478bd9Sstevel@tonic-gate 	 * allocate an ipc_info descriptor and add it to a
2388*7c478bd9Sstevel@tonic-gate 	 * singly linked list
2389*7c478bd9Sstevel@tonic-gate 	 */
2390*7c478bd9Sstevel@tonic-gate 
2391*7c478bd9Sstevel@tonic-gate 	ipc_info = kmem_zalloc(sizeof (ipc_info_t), KM_SLEEP);
2392*7c478bd9Sstevel@tonic-gate 	ipc_info->remote_node = remote_node;
2393*7c478bd9Sstevel@tonic-gate 	ipc_info->node_is_alive = state;
2394*7c478bd9Sstevel@tonic-gate 
2395*7c478bd9Sstevel@tonic-gate 	mutex_enter(&ipc_info_lock);
2396*7c478bd9Sstevel@tonic-gate 	if (ipc_info_head == NULL) {
2397*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG,
2398*7c478bd9Sstevel@tonic-gate 		    "init_ipc_info:ipc_info_head = %lx\n", ipc_info));
2399*7c478bd9Sstevel@tonic-gate 		ipc_info_head = ipc_info;
2400*7c478bd9Sstevel@tonic-gate 		ipc_info->next = NULL;
2401*7c478bd9Sstevel@tonic-gate 	} else {
2402*7c478bd9Sstevel@tonic-gate 		ipc_info->next = ipc_info_head;
2403*7c478bd9Sstevel@tonic-gate 		ipc_info_head = ipc_info;
2404*7c478bd9Sstevel@tonic-gate 	}
2405*7c478bd9Sstevel@tonic-gate 
2406*7c478bd9Sstevel@tonic-gate 	ipc_info->remote_node = remote_node;
2407*7c478bd9Sstevel@tonic-gate 
2408*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "init_ipc_info done\n"));
2409*7c478bd9Sstevel@tonic-gate 
2410*7c478bd9Sstevel@tonic-gate 	return (ipc_info);
2411*7c478bd9Sstevel@tonic-gate }
2412*7c478bd9Sstevel@tonic-gate 
2413*7c478bd9Sstevel@tonic-gate static void
2414*7c478bd9Sstevel@tonic-gate destroy_ipc_info(ipc_info_t *ipc_info)
2415*7c478bd9Sstevel@tonic-gate {
2416*7c478bd9Sstevel@tonic-gate 	ipc_info_t *current = ipc_info_head;
2417*7c478bd9Sstevel@tonic-gate 	ipc_info_t *prev;
2418*7c478bd9Sstevel@tonic-gate 
2419*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "destroy_ipc_info enter\n"));
2420*7c478bd9Sstevel@tonic-gate 
2421*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ipc_info_lock));
2422*7c478bd9Sstevel@tonic-gate 
2423*7c478bd9Sstevel@tonic-gate 	while (current != ipc_info) {
2424*7c478bd9Sstevel@tonic-gate 		prev = current;
2425*7c478bd9Sstevel@tonic-gate 		current = current->next;
2426*7c478bd9Sstevel@tonic-gate 	}
2427*7c478bd9Sstevel@tonic-gate 	ASSERT(current != NULL);
2428*7c478bd9Sstevel@tonic-gate 
2429*7c478bd9Sstevel@tonic-gate 	if (current != ipc_info_head)
2430*7c478bd9Sstevel@tonic-gate 		prev->next = current->next;
2431*7c478bd9Sstevel@tonic-gate 	else
2432*7c478bd9Sstevel@tonic-gate 		ipc_info_head = current->next;
2433*7c478bd9Sstevel@tonic-gate 
2434*7c478bd9Sstevel@tonic-gate 	kmem_free(current, sizeof (ipc_info_t));
2435*7c478bd9Sstevel@tonic-gate 
2436*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "destroy_ipc_info done\n"));
2437*7c478bd9Sstevel@tonic-gate 
2438*7c478bd9Sstevel@tonic-gate }
2439*7c478bd9Sstevel@tonic-gate 
2440*7c478bd9Sstevel@tonic-gate /*
2441*7c478bd9Sstevel@tonic-gate  * Sendq tokens are kept on a circular list.  If tokens A, B, C, & D are
2442*7c478bd9Sstevel@tonic-gate  * on the list headed by ipc_info, then ipc_info points to A, A points to
2443*7c478bd9Sstevel@tonic-gate  * D, D to C, C to B, and B to A.
2444*7c478bd9Sstevel@tonic-gate  */
2445*7c478bd9Sstevel@tonic-gate static void
2446*7c478bd9Sstevel@tonic-gate link_sendq_token(sendq_token_t *token, rsm_node_id_t remote_node)
2447*7c478bd9Sstevel@tonic-gate {
2448*7c478bd9Sstevel@tonic-gate 	ipc_info_t *ipc_info;
2449*7c478bd9Sstevel@tonic-gate 
2450*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "link_sendq_token enter\n"));
2451*7c478bd9Sstevel@tonic-gate 
2452*7c478bd9Sstevel@tonic-gate 	ipc_info = lookup_ipc_info(remote_node);
2453*7c478bd9Sstevel@tonic-gate 	if (ipc_info == NULL) {
2454*7c478bd9Sstevel@tonic-gate 		ipc_info = init_ipc_info(remote_node, B_FALSE);
2455*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG,
2456*7c478bd9Sstevel@tonic-gate 		    "link_sendq_token: new ipc_info = %lx\n", ipc_info));
2457*7c478bd9Sstevel@tonic-gate 	}
2458*7c478bd9Sstevel@tonic-gate 	else
2459*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG,
2460*7c478bd9Sstevel@tonic-gate 		    "link_sendq_token: ipc_info = %lx\n", ipc_info));
2461*7c478bd9Sstevel@tonic-gate 
2462*7c478bd9Sstevel@tonic-gate 	if (ipc_info->token_list == NULL) {
2463*7c478bd9Sstevel@tonic-gate 		ipc_info->token_list = token;
2464*7c478bd9Sstevel@tonic-gate 		ipc_info->current_token = token;
2465*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG,
2466*7c478bd9Sstevel@tonic-gate 		    "link_sendq_token: current = %lx\n", token));
2467*7c478bd9Sstevel@tonic-gate 		token->next = token;
2468*7c478bd9Sstevel@tonic-gate 	} else {
2469*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG,
2470*7c478bd9Sstevel@tonic-gate 		    "link_sendq_token: token = %lx\n", token));
2471*7c478bd9Sstevel@tonic-gate 		token->next = ipc_info->token_list->next;
2472*7c478bd9Sstevel@tonic-gate 		ipc_info->token_list->next = token;
2473*7c478bd9Sstevel@tonic-gate 		ipc_info->token_list = token;
2474*7c478bd9Sstevel@tonic-gate 	}
2475*7c478bd9Sstevel@tonic-gate 
2476*7c478bd9Sstevel@tonic-gate 
2477*7c478bd9Sstevel@tonic-gate 	mutex_exit(&ipc_info_lock);
2478*7c478bd9Sstevel@tonic-gate 
2479*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "link_sendq_token done\n"));
2480*7c478bd9Sstevel@tonic-gate 
2481*7c478bd9Sstevel@tonic-gate }
2482*7c478bd9Sstevel@tonic-gate 
2483*7c478bd9Sstevel@tonic-gate static void
2484*7c478bd9Sstevel@tonic-gate unlink_sendq_token(sendq_token_t *token, rsm_node_id_t remote_node)
2485*7c478bd9Sstevel@tonic-gate {
2486*7c478bd9Sstevel@tonic-gate 	sendq_token_t *prev, *start,  *current;
2487*7c478bd9Sstevel@tonic-gate 	ipc_info_t *ipc_info;
2488*7c478bd9Sstevel@tonic-gate 	path_t *path = SQ_TOKEN_TO_PATH(token);
2489*7c478bd9Sstevel@tonic-gate 
2490*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2491*7c478bd9Sstevel@tonic-gate 	    "unlink_sendq_token enter\n"));
2492*7c478bd9Sstevel@tonic-gate 
2493*7c478bd9Sstevel@tonic-gate 	ASSERT(path->ref_cnt == 0);
2494*7c478bd9Sstevel@tonic-gate 
2495*7c478bd9Sstevel@tonic-gate 	ipc_info = lookup_ipc_info(remote_node);
2496*7c478bd9Sstevel@tonic-gate 	if (ipc_info == NULL) {
2497*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG,
2498*7c478bd9Sstevel@tonic-gate 		    "ipc_info for %d not found\n", remote_node));
2499*7c478bd9Sstevel@tonic-gate 		return;
2500*7c478bd9Sstevel@tonic-gate 	}
2501*7c478bd9Sstevel@tonic-gate 
2502*7c478bd9Sstevel@tonic-gate 	prev = ipc_info->token_list;
2503*7c478bd9Sstevel@tonic-gate 	start = current = ipc_info->token_list->next;
2504*7c478bd9Sstevel@tonic-gate 
2505*7c478bd9Sstevel@tonic-gate 	for (;;) {
2506*7c478bd9Sstevel@tonic-gate 		if (current == token) {
2507*7c478bd9Sstevel@tonic-gate 			if (current->next != current) {
2508*7c478bd9Sstevel@tonic-gate 				DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2509*7c478bd9Sstevel@tonic-gate 				    "found token, removed it\n"));
2510*7c478bd9Sstevel@tonic-gate 				prev->next = token->next;
2511*7c478bd9Sstevel@tonic-gate 				if (ipc_info->token_list == token)
2512*7c478bd9Sstevel@tonic-gate 					ipc_info->token_list = prev;
2513*7c478bd9Sstevel@tonic-gate 				ipc_info->current_token = token->next;
2514*7c478bd9Sstevel@tonic-gate 			} else {
2515*7c478bd9Sstevel@tonic-gate 				/* list will be empty  */
2516*7c478bd9Sstevel@tonic-gate 				DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2517*7c478bd9Sstevel@tonic-gate 				    "removed token, list empty\n"));
2518*7c478bd9Sstevel@tonic-gate 				ipc_info->token_list = NULL;
2519*7c478bd9Sstevel@tonic-gate 				ipc_info->current_token = NULL;
2520*7c478bd9Sstevel@tonic-gate 			}
2521*7c478bd9Sstevel@tonic-gate 			break;
2522*7c478bd9Sstevel@tonic-gate 		}
2523*7c478bd9Sstevel@tonic-gate 		prev = current;
2524*7c478bd9Sstevel@tonic-gate 		current = current->next;
2525*7c478bd9Sstevel@tonic-gate 		if (current == start) {
2526*7c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG,
2527*7c478bd9Sstevel@tonic-gate 			    "unlink_sendq_token: token not found\n"));
2528*7c478bd9Sstevel@tonic-gate 			break;
2529*7c478bd9Sstevel@tonic-gate 		}
2530*7c478bd9Sstevel@tonic-gate 	}
2531*7c478bd9Sstevel@tonic-gate 	mutex_exit(&ipc_info_lock);
2532*7c478bd9Sstevel@tonic-gate 
2533*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "unlink_sendq_token done\n"));
2534*7c478bd9Sstevel@tonic-gate }
2535*7c478bd9Sstevel@tonic-gate 
2536*7c478bd9Sstevel@tonic-gate 
2537*7c478bd9Sstevel@tonic-gate void
2538*7c478bd9Sstevel@tonic-gate rele_sendq_token(sendq_token_t *token)
2539*7c478bd9Sstevel@tonic-gate {
2540*7c478bd9Sstevel@tonic-gate 	path_t *path;
2541*7c478bd9Sstevel@tonic-gate 
2542*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rele_sendq_token enter\n"));
2543*7c478bd9Sstevel@tonic-gate 
2544*7c478bd9Sstevel@tonic-gate 	path = SQ_TOKEN_TO_PATH(token);
2545*7c478bd9Sstevel@tonic-gate 	mutex_enter(&path->mutex);
2546*7c478bd9Sstevel@tonic-gate 	PATH_RELE_NOLOCK(path);
2547*7c478bd9Sstevel@tonic-gate 	SENDQ_TOKEN_RELE(path);
2548*7c478bd9Sstevel@tonic-gate 	mutex_exit(&path->mutex);
2549*7c478bd9Sstevel@tonic-gate 
2550*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rele_sendq_token done\n"));
2551*7c478bd9Sstevel@tonic-gate 
2552*7c478bd9Sstevel@tonic-gate }
2553*7c478bd9Sstevel@tonic-gate 
2554*7c478bd9Sstevel@tonic-gate /*
2555*7c478bd9Sstevel@tonic-gate  * A valid ipc token can only be returned if the remote node is alive.
2556*7c478bd9Sstevel@tonic-gate  * Tokens are on a circular list.  Starting with the current token
2557*7c478bd9Sstevel@tonic-gate  * search for a token with an endpoint in state RSM_PATH_ACTIVE.
2558*7c478bd9Sstevel@tonic-gate  * rsmipc_send which calls rsmka_get_sendq_token expects that if there are
2559*7c478bd9Sstevel@tonic-gate  * multiple paths available between a node-pair then consecutive calls from
2560*7c478bd9Sstevel@tonic-gate  * a particular invocation of rsmipc_send will return a sendq that is
2561*7c478bd9Sstevel@tonic-gate  * different from the one that was used in the previous iteration. When
2562*7c478bd9Sstevel@tonic-gate  * prev_used is NULL it indicates that this is the first interation in a
2563*7c478bd9Sstevel@tonic-gate  * specific rsmipc_send invocation.
2564*7c478bd9Sstevel@tonic-gate  *
2565*7c478bd9Sstevel@tonic-gate  * Updating the current token provides round robin selection and this
2566*7c478bd9Sstevel@tonic-gate  * is done only in the first iteration ie. when prev_used is NULL
2567*7c478bd9Sstevel@tonic-gate  */
2568*7c478bd9Sstevel@tonic-gate sendq_token_t *
2569*7c478bd9Sstevel@tonic-gate rsmka_get_sendq_token(rsm_node_id_t remote_node, sendq_token_t *prev_used)
2570*7c478bd9Sstevel@tonic-gate {
2571*7c478bd9Sstevel@tonic-gate 	sendq_token_t *token, *first_token;
2572*7c478bd9Sstevel@tonic-gate 	path_t *path;
2573*7c478bd9Sstevel@tonic-gate 	ipc_info_t *ipc_info;
2574*7c478bd9Sstevel@tonic-gate 
2575*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2576*7c478bd9Sstevel@tonic-gate 	    "rsmka_get_sendq_token enter\n"));
2577*7c478bd9Sstevel@tonic-gate 
2578*7c478bd9Sstevel@tonic-gate 	ipc_info = lookup_ipc_info(remote_node);
2579*7c478bd9Sstevel@tonic-gate 	if (ipc_info == NULL) {
2580*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2581*7c478bd9Sstevel@tonic-gate 		    "rsmka_get_sendq_token done: ipc_info is NULL\n"));
2582*7c478bd9Sstevel@tonic-gate 		return (NULL);
2583*7c478bd9Sstevel@tonic-gate 	}
2584*7c478bd9Sstevel@tonic-gate 
2585*7c478bd9Sstevel@tonic-gate 	if (ipc_info->node_is_alive == B_TRUE) {
2586*7c478bd9Sstevel@tonic-gate 		token = first_token = ipc_info->current_token;
2587*7c478bd9Sstevel@tonic-gate 		if (token == NULL) {
2588*7c478bd9Sstevel@tonic-gate 			mutex_exit(&ipc_info_lock);
2589*7c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2590*7c478bd9Sstevel@tonic-gate 			    "rsmka_get_sendq_token done: token=NULL\n"));
2591*7c478bd9Sstevel@tonic-gate 			return (NULL);
2592*7c478bd9Sstevel@tonic-gate 		}
2593*7c478bd9Sstevel@tonic-gate 
2594*7c478bd9Sstevel@tonic-gate 		for (;;) {
2595*7c478bd9Sstevel@tonic-gate 			path = SQ_TOKEN_TO_PATH(token);
2596*7c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2597*7c478bd9Sstevel@tonic-gate 			    "path %lx\n", path));
2598*7c478bd9Sstevel@tonic-gate 			mutex_enter(&path->mutex);
2599*7c478bd9Sstevel@tonic-gate 			if (path->state != RSMKA_PATH_ACTIVE ||
2600*7c478bd9Sstevel@tonic-gate 			    path->ref_cnt == 0) {
2601*7c478bd9Sstevel@tonic-gate 				mutex_exit(&path->mutex);
2602*7c478bd9Sstevel@tonic-gate 			} else {
2603*7c478bd9Sstevel@tonic-gate 				if (token != prev_used) {
2604*7c478bd9Sstevel@tonic-gate 					/* found a new token */
2605*7c478bd9Sstevel@tonic-gate 					break;
2606*7c478bd9Sstevel@tonic-gate 				}
2607*7c478bd9Sstevel@tonic-gate 				mutex_exit(&path->mutex);
2608*7c478bd9Sstevel@tonic-gate 			}
2609*7c478bd9Sstevel@tonic-gate 
2610*7c478bd9Sstevel@tonic-gate 			token = token->next;
2611*7c478bd9Sstevel@tonic-gate 			if (token == first_token) {
2612*7c478bd9Sstevel@tonic-gate 				/*
2613*7c478bd9Sstevel@tonic-gate 				 * we didn't find a new token reuse prev_used
2614*7c478bd9Sstevel@tonic-gate 				 * if the corresponding path is still up
2615*7c478bd9Sstevel@tonic-gate 				 */
2616*7c478bd9Sstevel@tonic-gate 				if (prev_used) {
2617*7c478bd9Sstevel@tonic-gate 					path = SQ_TOKEN_TO_PATH(prev_used);
2618*7c478bd9Sstevel@tonic-gate 					DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2619*7c478bd9Sstevel@tonic-gate 					    "path %lx\n", path));
2620*7c478bd9Sstevel@tonic-gate 					mutex_enter(&path->mutex);
2621*7c478bd9Sstevel@tonic-gate 					if (path->state != RSMKA_PATH_ACTIVE ||
2622*7c478bd9Sstevel@tonic-gate 					    path->ref_cnt == 0) {
2623*7c478bd9Sstevel@tonic-gate 						mutex_exit(&path->mutex);
2624*7c478bd9Sstevel@tonic-gate 					} else {
2625*7c478bd9Sstevel@tonic-gate 						token = prev_used;
2626*7c478bd9Sstevel@tonic-gate 						break;
2627*7c478bd9Sstevel@tonic-gate 					}
2628*7c478bd9Sstevel@tonic-gate 				}
2629*7c478bd9Sstevel@tonic-gate 				mutex_exit(&ipc_info_lock);
2630*7c478bd9Sstevel@tonic-gate 				DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2631*7c478bd9Sstevel@tonic-gate 				    "rsmka_get_sendq_token: token=NULL\n"));
2632*7c478bd9Sstevel@tonic-gate 				return (NULL);
2633*7c478bd9Sstevel@tonic-gate 			}
2634*7c478bd9Sstevel@tonic-gate 		}
2635*7c478bd9Sstevel@tonic-gate 
2636*7c478bd9Sstevel@tonic-gate 		PATH_HOLD_NOLOCK(path);
2637*7c478bd9Sstevel@tonic-gate 		SENDQ_TOKEN_HOLD(path);
2638*7c478bd9Sstevel@tonic-gate 		if (prev_used == NULL) {
2639*7c478bd9Sstevel@tonic-gate 			/* change current_token only the first time */
2640*7c478bd9Sstevel@tonic-gate 			ipc_info->current_token = token->next;
2641*7c478bd9Sstevel@tonic-gate 		}
2642*7c478bd9Sstevel@tonic-gate 
2643*7c478bd9Sstevel@tonic-gate 		mutex_exit(&path->mutex);
2644*7c478bd9Sstevel@tonic-gate 		mutex_exit(&ipc_info_lock);
2645*7c478bd9Sstevel@tonic-gate 
2646*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2647*7c478bd9Sstevel@tonic-gate 		    "rsmka_get_sendq_token done\n"));
2648*7c478bd9Sstevel@tonic-gate 		return (token);
2649*7c478bd9Sstevel@tonic-gate 	} else {
2650*7c478bd9Sstevel@tonic-gate 		mutex_exit(&ipc_info_lock);
2651*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2652*7c478bd9Sstevel@tonic-gate 		    "rsmka_get_sendq_token done\n"));
2653*7c478bd9Sstevel@tonic-gate 		return (NULL);
2654*7c478bd9Sstevel@tonic-gate 	}
2655*7c478bd9Sstevel@tonic-gate }
2656*7c478bd9Sstevel@tonic-gate 
2657*7c478bd9Sstevel@tonic-gate 
2658*7c478bd9Sstevel@tonic-gate 
2659*7c478bd9Sstevel@tonic-gate /*
2660*7c478bd9Sstevel@tonic-gate  */
2661*7c478bd9Sstevel@tonic-gate static int
2662*7c478bd9Sstevel@tonic-gate create_ipc_sendq(path_t *path)
2663*7c478bd9Sstevel@tonic-gate {
2664*7c478bd9Sstevel@tonic-gate 	int		rval;
2665*7c478bd9Sstevel@tonic-gate 	sendq_token_t	*token;
2666*7c478bd9Sstevel@tonic-gate 	adapter_t 	*adapter;
2667*7c478bd9Sstevel@tonic-gate 	int64_t		srvc_offset;
2668*7c478bd9Sstevel@tonic-gate 
2669*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "create_ipc_sendq enter\n"));
2670*7c478bd9Sstevel@tonic-gate 
2671*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG, "create_ipc_sendq: path = %lx\n",
2672*7c478bd9Sstevel@tonic-gate 	    path));
2673*7c478bd9Sstevel@tonic-gate 
2674*7c478bd9Sstevel@tonic-gate 	adapter = path->local_adapter;
2675*7c478bd9Sstevel@tonic-gate 	token = &path->sendq_token;
2676*7c478bd9Sstevel@tonic-gate 
2677*7c478bd9Sstevel@tonic-gate 	srvc_offset = path->remote_hwaddr;
2678*7c478bd9Sstevel@tonic-gate 
2679*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG,
2680*7c478bd9Sstevel@tonic-gate 	    "create_ipc_sendq: srvc_offset = %lld\n",
2681*7c478bd9Sstevel@tonic-gate 	    srvc_offset));
2682*7c478bd9Sstevel@tonic-gate 
2683*7c478bd9Sstevel@tonic-gate 	rval = adapter->rsmpi_ops->rsm_sendq_create(adapter->rsmpi_handle,
2684*7c478bd9Sstevel@tonic-gate 	    path->remote_hwaddr,
2685*7c478bd9Sstevel@tonic-gate 	    (rsm_intr_service_t)(RSM_SERVICE+srvc_offset),
2686*7c478bd9Sstevel@tonic-gate 	    (rsm_intr_pri_t)RSM_PRI, (size_t)RSM_QUEUE_SZ,
2687*7c478bd9Sstevel@tonic-gate 	    RSM_INTR_SEND_Q_NO_FENCE,
2688*7c478bd9Sstevel@tonic-gate 	    RSM_RESOURCE_SLEEP, NULL, &token->rsmpi_sendq_handle);
2689*7c478bd9Sstevel@tonic-gate 	if (rval == RSM_SUCCESS) {
2690*7c478bd9Sstevel@tonic-gate 		/* rsmipc_send() may be waiting for a sendq_token */
2691*7c478bd9Sstevel@tonic-gate 		mutex_enter(&ipc_info_cvlock);
2692*7c478bd9Sstevel@tonic-gate 		cv_broadcast(&ipc_info_cv);
2693*7c478bd9Sstevel@tonic-gate 		mutex_exit(&ipc_info_cvlock);
2694*7c478bd9Sstevel@tonic-gate 	}
2695*7c478bd9Sstevel@tonic-gate 
2696*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG, "create_ipc_sendq: handle = %lx\n",
2697*7c478bd9Sstevel@tonic-gate 	    token->rsmpi_sendq_handle));
2698*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG, "create_ipc_sendq: rval = %d\n",
2699*7c478bd9Sstevel@tonic-gate 	    rval));
2700*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "create_ipc_sendq done\n"));
2701*7c478bd9Sstevel@tonic-gate 
2702*7c478bd9Sstevel@tonic-gate 	return (rval);
2703*7c478bd9Sstevel@tonic-gate }
2704*7c478bd9Sstevel@tonic-gate 
2705*7c478bd9Sstevel@tonic-gate 
2706*7c478bd9Sstevel@tonic-gate boolean_t
2707*7c478bd9Sstevel@tonic-gate rsmka_check_node_alive(rsm_node_id_t remote_node)
2708*7c478bd9Sstevel@tonic-gate {
2709*7c478bd9Sstevel@tonic-gate 	ipc_info_t *ipc_info;
2710*7c478bd9Sstevel@tonic-gate 
2711*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG, "rsmka_check_node_alive enter\n"));
2712*7c478bd9Sstevel@tonic-gate 
2713*7c478bd9Sstevel@tonic-gate 	ipc_info = lookup_ipc_info(remote_node);
2714*7c478bd9Sstevel@tonic-gate 	if (ipc_info == NULL) {
2715*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG,
2716*7c478bd9Sstevel@tonic-gate 		    "rsmka_check_node_alive done: ipc_info NULL\n"));
2717*7c478bd9Sstevel@tonic-gate 		return (B_FALSE);
2718*7c478bd9Sstevel@tonic-gate 	}
2719*7c478bd9Sstevel@tonic-gate 
2720*7c478bd9Sstevel@tonic-gate 	if (ipc_info->node_is_alive == B_TRUE) {
2721*7c478bd9Sstevel@tonic-gate 		mutex_exit(&ipc_info_lock);
2722*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG,
2723*7c478bd9Sstevel@tonic-gate 		    "rsmka_check_node_alive done: node is alive\n"));
2724*7c478bd9Sstevel@tonic-gate 		return (B_TRUE);
2725*7c478bd9Sstevel@tonic-gate 	} else {
2726*7c478bd9Sstevel@tonic-gate 		mutex_exit(&ipc_info_lock);
2727*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG,
2728*7c478bd9Sstevel@tonic-gate 		    "rsmka_check_node_alive done: node is not alive\n"));
2729*7c478bd9Sstevel@tonic-gate 		return (B_FALSE);
2730*7c478bd9Sstevel@tonic-gate 	}
2731*7c478bd9Sstevel@tonic-gate }
2732*7c478bd9Sstevel@tonic-gate 
2733*7c478bd9Sstevel@tonic-gate 
2734*7c478bd9Sstevel@tonic-gate 
2735*7c478bd9Sstevel@tonic-gate 
2736*7c478bd9Sstevel@tonic-gate /*
2737*7c478bd9Sstevel@tonic-gate  *  TOPOLOGY IOCTL SUPPORT
2738*7c478bd9Sstevel@tonic-gate  */
2739*7c478bd9Sstevel@tonic-gate 
2740*7c478bd9Sstevel@tonic-gate static uint32_t
2741*7c478bd9Sstevel@tonic-gate get_topology_size(int mode)
2742*7c478bd9Sstevel@tonic-gate {
2743*7c478bd9Sstevel@tonic-gate 	uint32_t	topology_size;
2744*7c478bd9Sstevel@tonic-gate 	int		pointer_area_size;
2745*7c478bd9Sstevel@tonic-gate 	adapter_listhead_t	*listhead;
2746*7c478bd9Sstevel@tonic-gate 	int		total_num_of_adapters;
2747*7c478bd9Sstevel@tonic-gate 	int		total_num_of_paths;
2748*7c478bd9Sstevel@tonic-gate 
2749*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_topology_size enter\n"));
2750*7c478bd9Sstevel@tonic-gate 
2751*7c478bd9Sstevel@tonic-gate 	/*
2752*7c478bd9Sstevel@tonic-gate 	 * Find the total number of adapters and paths by adding up the
2753*7c478bd9Sstevel@tonic-gate 	 * individual adapter and path counts from all the listheads
2754*7c478bd9Sstevel@tonic-gate 	 */
2755*7c478bd9Sstevel@tonic-gate 	total_num_of_adapters = 0;
2756*7c478bd9Sstevel@tonic-gate 	total_num_of_paths = 0;
2757*7c478bd9Sstevel@tonic-gate 	listhead = adapter_listhead_base.next;
2758*7c478bd9Sstevel@tonic-gate 	while (listhead != NULL) {
2759*7c478bd9Sstevel@tonic-gate 		total_num_of_adapters += listhead->adapter_count;
2760*7c478bd9Sstevel@tonic-gate 		total_num_of_paths += listhead->path_count;
2761*7c478bd9Sstevel@tonic-gate 		listhead = listhead->next_listhead;
2762*7c478bd9Sstevel@tonic-gate 	}
2763*7c478bd9Sstevel@tonic-gate 
2764*7c478bd9Sstevel@tonic-gate #ifdef	_MULTI_DATAMODEL
2765*7c478bd9Sstevel@tonic-gate 	if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32)
2766*7c478bd9Sstevel@tonic-gate 		/*
2767*7c478bd9Sstevel@tonic-gate 		 * Add extra 4-bytes to make sure connections header
2768*7c478bd9Sstevel@tonic-gate 		 * is double-word aligned
2769*7c478bd9Sstevel@tonic-gate 		 */
2770*7c478bd9Sstevel@tonic-gate 		pointer_area_size =
2771*7c478bd9Sstevel@tonic-gate 		    (total_num_of_adapters + total_num_of_adapters%2) *
2772*7c478bd9Sstevel@tonic-gate 		    sizeof (caddr32_t);
2773*7c478bd9Sstevel@tonic-gate 	else
2774*7c478bd9Sstevel@tonic-gate 		pointer_area_size = total_num_of_adapters * sizeof (caddr_t);
2775*7c478bd9Sstevel@tonic-gate #else	/* _MULTI_DATAMODEL */
2776*7c478bd9Sstevel@tonic-gate 	mode = mode;
2777*7c478bd9Sstevel@tonic-gate 	pointer_area_size = total_num_of_adapters * sizeof (caddr_t);
2778*7c478bd9Sstevel@tonic-gate #endif	/* _MULTI_DATAMODEL */
2779*7c478bd9Sstevel@tonic-gate 
2780*7c478bd9Sstevel@tonic-gate 
2781*7c478bd9Sstevel@tonic-gate 	topology_size = sizeof (rsmka_topology_hdr_t) +
2782*7c478bd9Sstevel@tonic-gate 	    pointer_area_size +
2783*7c478bd9Sstevel@tonic-gate 	    (total_num_of_adapters * sizeof (rsmka_connections_hdr_t)) +
2784*7c478bd9Sstevel@tonic-gate 	    (total_num_of_paths * sizeof (rsmka_remote_cntlr_t));
2785*7c478bd9Sstevel@tonic-gate 
2786*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_topology_size done\n"));
2787*7c478bd9Sstevel@tonic-gate 
2788*7c478bd9Sstevel@tonic-gate 	return (topology_size);
2789*7c478bd9Sstevel@tonic-gate }
2790*7c478bd9Sstevel@tonic-gate 
2791*7c478bd9Sstevel@tonic-gate 
2792*7c478bd9Sstevel@tonic-gate 
2793*7c478bd9Sstevel@tonic-gate static void
2794*7c478bd9Sstevel@tonic-gate get_topology(caddr_t arg, char *bufp, int mode)
2795*7c478bd9Sstevel@tonic-gate {
2796*7c478bd9Sstevel@tonic-gate 
2797*7c478bd9Sstevel@tonic-gate 	rsmka_topology_t	*tp = (rsmka_topology_t *)bufp;
2798*7c478bd9Sstevel@tonic-gate 	adapter_listhead_t	*listhead;
2799*7c478bd9Sstevel@tonic-gate 	adapter_t		*adapter;
2800*7c478bd9Sstevel@tonic-gate 	path_t			*path;
2801*7c478bd9Sstevel@tonic-gate 	int			cntlr = 0;
2802*7c478bd9Sstevel@tonic-gate 	rsmka_connections_t	*connection;
2803*7c478bd9Sstevel@tonic-gate 	rsmka_remote_cntlr_t	*rem_cntlr;
2804*7c478bd9Sstevel@tonic-gate 	int			total_num_of_adapters;
2805*7c478bd9Sstevel@tonic-gate 
2806*7c478bd9Sstevel@tonic-gate #ifdef	_MULTI_DATAMODEL
2807*7c478bd9Sstevel@tonic-gate 	rsmka_topology32_t	*tp32 = (rsmka_topology32_t *)bufp;
2808*7c478bd9Sstevel@tonic-gate #else
2809*7c478bd9Sstevel@tonic-gate 	mode = mode;
2810*7c478bd9Sstevel@tonic-gate #endif	/* _MULTI_DATAMODEL */
2811*7c478bd9Sstevel@tonic-gate 
2812*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_topology enter\n"));
2813*7c478bd9Sstevel@tonic-gate 
2814*7c478bd9Sstevel@tonic-gate 	/*
2815*7c478bd9Sstevel@tonic-gate 	 * Find the total number of adapters by adding up the
2816*7c478bd9Sstevel@tonic-gate 	 * individual adapter counts from all the listheads
2817*7c478bd9Sstevel@tonic-gate 	 */
2818*7c478bd9Sstevel@tonic-gate 	total_num_of_adapters = 0;
2819*7c478bd9Sstevel@tonic-gate 	listhead = adapter_listhead_base.next;
2820*7c478bd9Sstevel@tonic-gate 	while (listhead != NULL) {
2821*7c478bd9Sstevel@tonic-gate 		total_num_of_adapters += listhead->adapter_count;
2822*7c478bd9Sstevel@tonic-gate 		listhead = listhead->next_listhead;
2823*7c478bd9Sstevel@tonic-gate 	}
2824*7c478bd9Sstevel@tonic-gate 
2825*7c478bd9Sstevel@tonic-gate 	/* fill topology header and adjust bufp */
2826*7c478bd9Sstevel@tonic-gate 	tp->topology_hdr.local_nodeid = my_nodeid;
2827*7c478bd9Sstevel@tonic-gate 	tp->topology_hdr.local_cntlr_count = total_num_of_adapters;
2828*7c478bd9Sstevel@tonic-gate 	bufp = (char *)&tp->connections[0];
2829*7c478bd9Sstevel@tonic-gate 
2830*7c478bd9Sstevel@tonic-gate 	/* leave room for connection pointer area */
2831*7c478bd9Sstevel@tonic-gate #ifdef	_MULTI_DATAMODEL
2832*7c478bd9Sstevel@tonic-gate 	if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32)
2833*7c478bd9Sstevel@tonic-gate 		/* make sure bufp is double-word aligned */
2834*7c478bd9Sstevel@tonic-gate 		bufp += (total_num_of_adapters + total_num_of_adapters%2) *
2835*7c478bd9Sstevel@tonic-gate 		    sizeof (caddr32_t);
2836*7c478bd9Sstevel@tonic-gate 	else
2837*7c478bd9Sstevel@tonic-gate 		bufp += total_num_of_adapters * sizeof (caddr_t);
2838*7c478bd9Sstevel@tonic-gate #else	/* _MULTI_DATAMODEL */
2839*7c478bd9Sstevel@tonic-gate 	bufp += total_num_of_adapters * sizeof (caddr_t);
2840*7c478bd9Sstevel@tonic-gate #endif	/* _MULTI_DATAMODEL */
2841*7c478bd9Sstevel@tonic-gate 
2842*7c478bd9Sstevel@tonic-gate 	/* fill topology from the adapter and path data */
2843*7c478bd9Sstevel@tonic-gate 	listhead = adapter_listhead_base.next;
2844*7c478bd9Sstevel@tonic-gate 	while (listhead != NULL) {
2845*7c478bd9Sstevel@tonic-gate 		adapter = listhead->next_adapter;
2846*7c478bd9Sstevel@tonic-gate 		while (adapter != NULL) {
2847*7c478bd9Sstevel@tonic-gate 			/* fill in user based connection pointer */
2848*7c478bd9Sstevel@tonic-gate #ifdef	_MULTI_DATAMODEL
2849*7c478bd9Sstevel@tonic-gate 			if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2850*7c478bd9Sstevel@tonic-gate 				ulong_t delta = (ulong_t)bufp - (ulong_t)tp32;
2851*7c478bd9Sstevel@tonic-gate 				caddr32_t userbase = (caddr32_t)((ulong_t)arg &
2852*7c478bd9Sstevel@tonic-gate 				    0xffffffff);
2853*7c478bd9Sstevel@tonic-gate 				tp32->connections[cntlr++] = userbase + delta;
2854*7c478bd9Sstevel@tonic-gate 			} else {
2855*7c478bd9Sstevel@tonic-gate 				tp->connections[cntlr++] = arg +
2856*7c478bd9Sstevel@tonic-gate 				    (ulong_t)bufp -
2857*7c478bd9Sstevel@tonic-gate 				    (ulong_t)tp;
2858*7c478bd9Sstevel@tonic-gate 			}
2859*7c478bd9Sstevel@tonic-gate #else	/* _MULTI_DATAMODEL */
2860*7c478bd9Sstevel@tonic-gate 				tp->connections[cntlr++] = arg +
2861*7c478bd9Sstevel@tonic-gate 				    (ulong_t)bufp -
2862*7c478bd9Sstevel@tonic-gate 				    (ulong_t)tp;
2863*7c478bd9Sstevel@tonic-gate #endif	/* _MULTI_DATAMODEL */
2864*7c478bd9Sstevel@tonic-gate 			connection = (rsmka_connections_t *)bufp;
2865*7c478bd9Sstevel@tonic-gate 			(void) snprintf(connection->hdr.cntlr_name,
2866*7c478bd9Sstevel@tonic-gate 			    MAXNAMELEN, "%s%d",
2867*7c478bd9Sstevel@tonic-gate 			    listhead->adapter_devname,
2868*7c478bd9Sstevel@tonic-gate 			    adapter->instance);
2869*7c478bd9Sstevel@tonic-gate 			connection->hdr.local_hwaddr = adapter->hwaddr;
2870*7c478bd9Sstevel@tonic-gate 			connection->hdr.remote_cntlr_count = 0;
2871*7c478bd9Sstevel@tonic-gate 			bufp += sizeof (rsmka_connections_hdr_t);
2872*7c478bd9Sstevel@tonic-gate 			rem_cntlr = (rsmka_remote_cntlr_t *)bufp;
2873*7c478bd9Sstevel@tonic-gate 			path = adapter->next_path;
2874*7c478bd9Sstevel@tonic-gate 			while (path != NULL) {
2875*7c478bd9Sstevel@tonic-gate 				connection->hdr.remote_cntlr_count++;
2876*7c478bd9Sstevel@tonic-gate 				rem_cntlr->remote_nodeid = path->remote_node;
2877*7c478bd9Sstevel@tonic-gate 				(void) snprintf(rem_cntlr->remote_cntlrname,
2878*7c478bd9Sstevel@tonic-gate 				    MAXNAMELEN, "%s%d",
2879*7c478bd9Sstevel@tonic-gate 				    listhead->adapter_devname,
2880*7c478bd9Sstevel@tonic-gate 				    path->remote_devinst);
2881*7c478bd9Sstevel@tonic-gate 				rem_cntlr->remote_hwaddr = path->remote_hwaddr;
2882*7c478bd9Sstevel@tonic-gate 				rem_cntlr->connection_state = path->state;
2883*7c478bd9Sstevel@tonic-gate 				++rem_cntlr;
2884*7c478bd9Sstevel@tonic-gate 				path = path->next_path;
2885*7c478bd9Sstevel@tonic-gate 			}
2886*7c478bd9Sstevel@tonic-gate 			adapter = adapter->next;
2887*7c478bd9Sstevel@tonic-gate 			bufp = (char *)rem_cntlr;
2888*7c478bd9Sstevel@tonic-gate 		}
2889*7c478bd9Sstevel@tonic-gate 		listhead = listhead->next_listhead;
2890*7c478bd9Sstevel@tonic-gate 	}
2891*7c478bd9Sstevel@tonic-gate 
2892*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "get_topology done\n"));
2893*7c478bd9Sstevel@tonic-gate 
2894*7c478bd9Sstevel@tonic-gate }
2895*7c478bd9Sstevel@tonic-gate 
2896*7c478bd9Sstevel@tonic-gate 
2897*7c478bd9Sstevel@tonic-gate /*
2898*7c478bd9Sstevel@tonic-gate  * Called from rsm_ioctl() in rsm.c
2899*7c478bd9Sstevel@tonic-gate  * Make sure there is no possiblity of blocking while holding
2900*7c478bd9Sstevel@tonic-gate  * adapter_listhead_base.lock
2901*7c478bd9Sstevel@tonic-gate  */
2902*7c478bd9Sstevel@tonic-gate int
2903*7c478bd9Sstevel@tonic-gate rsmka_topology_ioctl(caddr_t arg, int cmd, int mode)
2904*7c478bd9Sstevel@tonic-gate {
2905*7c478bd9Sstevel@tonic-gate 	uint32_t	topology_size;
2906*7c478bd9Sstevel@tonic-gate 	uint32_t 	request_size;
2907*7c478bd9Sstevel@tonic-gate 	char		*bufp;
2908*7c478bd9Sstevel@tonic-gate 	int		error = RSM_SUCCESS;
2909*7c478bd9Sstevel@tonic-gate 	size_t		max_toposize;
2910*7c478bd9Sstevel@tonic-gate 
2911*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category | RSM_IOCTL, RSM_DEBUG_VERBOSE,
2912*7c478bd9Sstevel@tonic-gate 	    "rsmka_topology_ioctl enter\n"));
2913*7c478bd9Sstevel@tonic-gate 
2914*7c478bd9Sstevel@tonic-gate 	switch (cmd) {
2915*7c478bd9Sstevel@tonic-gate 	case RSM_IOCTL_TOPOLOGY_SIZE:
2916*7c478bd9Sstevel@tonic-gate 		mutex_enter(&adapter_listhead_base.listlock);
2917*7c478bd9Sstevel@tonic-gate 		topology_size = get_topology_size(mode);
2918*7c478bd9Sstevel@tonic-gate 		mutex_exit(&adapter_listhead_base.listlock);
2919*7c478bd9Sstevel@tonic-gate 		if (ddi_copyout((caddr_t)&topology_size,
2920*7c478bd9Sstevel@tonic-gate 		    (caddr_t)arg, sizeof (uint32_t), mode))
2921*7c478bd9Sstevel@tonic-gate 			error = RSMERR_BAD_ADDR;
2922*7c478bd9Sstevel@tonic-gate 		break;
2923*7c478bd9Sstevel@tonic-gate 	case RSM_IOCTL_TOPOLOGY_DATA:
2924*7c478bd9Sstevel@tonic-gate 		/*
2925*7c478bd9Sstevel@tonic-gate 		 * The size of the buffer which the caller has allocated
2926*7c478bd9Sstevel@tonic-gate 		 * is passed in.  If the size needed for the topology data
2927*7c478bd9Sstevel@tonic-gate 		 * is not sufficient, E2BIG is returned
2928*7c478bd9Sstevel@tonic-gate 		 */
2929*7c478bd9Sstevel@tonic-gate 		if (ddi_copyin(arg, &request_size, sizeof (uint32_t), mode)) {
2930*7c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category | RSM_IOCTL, RSM_DEBUG_VERBOSE,
2931*7c478bd9Sstevel@tonic-gate 			    "rsmka_topology_ioctl done: BAD_ADDR\n"));
2932*7c478bd9Sstevel@tonic-gate 			return (RSMERR_BAD_ADDR);
2933*7c478bd9Sstevel@tonic-gate 		}
2934*7c478bd9Sstevel@tonic-gate 		/* calculate the max size of the topology structure */
2935*7c478bd9Sstevel@tonic-gate 		max_toposize = sizeof (rsmka_topology_hdr_t) +
2936*7c478bd9Sstevel@tonic-gate 		    RSM_MAX_CTRL * (sizeof (caddr_t) +
2937*7c478bd9Sstevel@tonic-gate 			sizeof (rsmka_connections_hdr_t)) +
2938*7c478bd9Sstevel@tonic-gate 		    RSM_MAX_NODE * sizeof (rsmka_remote_cntlr_t);
2939*7c478bd9Sstevel@tonic-gate 
2940*7c478bd9Sstevel@tonic-gate 		if (request_size > max_toposize) { /* validate request_size */
2941*7c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category | RSM_IOCTL, RSM_DEBUG_VERBOSE,
2942*7c478bd9Sstevel@tonic-gate 			    "rsmka_topology_ioctl done: size too large\n"));
2943*7c478bd9Sstevel@tonic-gate 			return (EINVAL);
2944*7c478bd9Sstevel@tonic-gate 		}
2945*7c478bd9Sstevel@tonic-gate 		bufp = kmem_zalloc(request_size, KM_SLEEP);
2946*7c478bd9Sstevel@tonic-gate 		mutex_enter(&adapter_listhead_base.listlock);
2947*7c478bd9Sstevel@tonic-gate 		topology_size = get_topology_size(mode);
2948*7c478bd9Sstevel@tonic-gate 		if (request_size < topology_size) {
2949*7c478bd9Sstevel@tonic-gate 			kmem_free(bufp, request_size);
2950*7c478bd9Sstevel@tonic-gate 			mutex_exit(&adapter_listhead_base.listlock);
2951*7c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category | RSM_IOCTL, RSM_DEBUG_VERBOSE,
2952*7c478bd9Sstevel@tonic-gate 			    "rsmka_topology_ioctl done: E2BIG\n"));
2953*7c478bd9Sstevel@tonic-gate 			return (E2BIG);
2954*7c478bd9Sstevel@tonic-gate 		}
2955*7c478bd9Sstevel@tonic-gate 
2956*7c478bd9Sstevel@tonic-gate 		/* get the topology data and copyout to the caller */
2957*7c478bd9Sstevel@tonic-gate 		get_topology(arg, bufp, mode);
2958*7c478bd9Sstevel@tonic-gate 		mutex_exit(&adapter_listhead_base.listlock);
2959*7c478bd9Sstevel@tonic-gate 		if (ddi_copyout((caddr_t)bufp, (caddr_t)arg,
2960*7c478bd9Sstevel@tonic-gate 		    topology_size, mode))
2961*7c478bd9Sstevel@tonic-gate 			error = RSMERR_BAD_ADDR;
2962*7c478bd9Sstevel@tonic-gate 
2963*7c478bd9Sstevel@tonic-gate 		kmem_free(bufp, request_size);
2964*7c478bd9Sstevel@tonic-gate 		break;
2965*7c478bd9Sstevel@tonic-gate 	default:
2966*7c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category | RSM_IOCTL, RSM_DEBUG,
2967*7c478bd9Sstevel@tonic-gate 		    "rsmka_topology_ioctl: cmd not supported\n"));
2968*7c478bd9Sstevel@tonic-gate 		error = DDI_FAILURE;
2969*7c478bd9Sstevel@tonic-gate 	}
2970*7c478bd9Sstevel@tonic-gate 
2971*7c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category | RSM_IOCTL, RSM_DEBUG_VERBOSE,
2972*7c478bd9Sstevel@tonic-gate 	    "rsmka_topology_ioctl done: %d\n", error));
2973*7c478bd9Sstevel@tonic-gate 	return (error);
2974*7c478bd9Sstevel@tonic-gate }
2975