17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
57c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
67c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
77c478bd9Sstevel@tonic-gate  * with the License.
87c478bd9Sstevel@tonic-gate  *
97c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
107c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
117c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
127c478bd9Sstevel@tonic-gate  * and limitations under the License.
137c478bd9Sstevel@tonic-gate  *
147c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
157c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
167c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
177c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
187c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
197c478bd9Sstevel@tonic-gate  *
207c478bd9Sstevel@tonic-gate  * CDDL HEADER END
217c478bd9Sstevel@tonic-gate  */
227c478bd9Sstevel@tonic-gate /*
237c478bd9Sstevel@tonic-gate  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
247c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
25*b97d6ca7SMilan Jurik  * Copyright 2012 Milan Jurik. All rights reserved.
267c478bd9Sstevel@tonic-gate  */
277c478bd9Sstevel@tonic-gate 
287c478bd9Sstevel@tonic-gate /*
297c478bd9Sstevel@tonic-gate  * This module provides for the management of interconnect adapters
307c478bd9Sstevel@tonic-gate  * inter-node connections (aka paths), and IPC.  Adapter descriptors are
317c478bd9Sstevel@tonic-gate  * maintained on a linked list; one list per adapter devname.  Each
327c478bd9Sstevel@tonic-gate  * adapter descriptor heads a linked list of path descriptors.  There is
337c478bd9Sstevel@tonic-gate  * also a linked list of ipc_info descriptors; one for each node.  Each
347c478bd9Sstevel@tonic-gate  * ipc_info descriptor heads a circular list of ipc tokens (the tokens are
357c478bd9Sstevel@tonic-gate  * embedded within a path descriptor). The tokens are used in round robin
367c478bd9Sstevel@tonic-gate  * fashion.
377c478bd9Sstevel@tonic-gate  *
387c478bd9Sstevel@tonic-gate  *
397c478bd9Sstevel@tonic-gate  * The exported interface consists of the following functions:
407c478bd9Sstevel@tonic-gate  *	- rsmka_add_adapter
417c478bd9Sstevel@tonic-gate  *	- rsmka_remove_adapter
427c478bd9Sstevel@tonic-gate  *
437c478bd9Sstevel@tonic-gate  *      [add_path and remove_path only called for current adapters]
447c478bd9Sstevel@tonic-gate  *	- rsmka_add_path
457c478bd9Sstevel@tonic-gate  *	- rsmka_remove_path	[a path down request is implicit]
467c478bd9Sstevel@tonic-gate  *
477c478bd9Sstevel@tonic-gate  *	- rsmka_path_up           [called at clock ipl for Sun Cluster]
487c478bd9Sstevel@tonic-gate  *	- rsmka_path_down         [called at clock ipl for Sun Cluster]
497c478bd9Sstevel@tonic-gate  *	- rsmka_disconnect_node   [called at clock ipl for Sun Cluster;
507c478bd9Sstevel@tonic-gate  *				treat like path-down for all node paths;
517c478bd9Sstevel@tonic-gate  *				can be before node_alive; always before
527c478bd9Sstevel@tonic-gate  *				node_died.]
537c478bd9Sstevel@tonic-gate  *
547c478bd9Sstevel@tonic-gate  *	[node_alive and node_died are always paired]
557c478bd9Sstevel@tonic-gate  *	- rsmka_node_alive   called after the first cluster path is up
567c478bd9Sstevel@tonic-gate  *                           for this node
577c478bd9Sstevel@tonic-gate  *	- rsmka_node_died
587c478bd9Sstevel@tonic-gate  *
597c478bd9Sstevel@tonic-gate  *      [set the local node id]
607c478bd9Sstevel@tonic-gate  *      - rsmka_set_my_nodeid    called to set the variable my_nodeid to the
617c478bd9Sstevel@tonic-gate  *                           local node id
627c478bd9Sstevel@tonic-gate  *
637c478bd9Sstevel@tonic-gate  * Processing for these functions is setup as a state machine supported
647c478bd9Sstevel@tonic-gate  * by the data structures described above.
657c478bd9Sstevel@tonic-gate  *
667c478bd9Sstevel@tonic-gate  * For Sun Cluster these are called from the Path-Manager/Kernel-Agent
677c478bd9Sstevel@tonic-gate  * Interface (rsmka_pm_interface.cc).
687c478bd9Sstevel@tonic-gate  *
697c478bd9Sstevel@tonic-gate  * The functions rsm_path_up, rsm_path_down, and rsm_disconnect_node are
707c478bd9Sstevel@tonic-gate  * called at clock interrupt level from the Path-Manager/Kernel-Agent
717c478bd9Sstevel@tonic-gate  * Interface which precludes sleeping; so these functions may (optionally)
727c478bd9Sstevel@tonic-gate  * defer processing to an independent thread running at normal ipl.
737c478bd9Sstevel@tonic-gate  *
747c478bd9Sstevel@tonic-gate  *
757c478bd9Sstevel@tonic-gate  * lock definitions:
767c478bd9Sstevel@tonic-gate  *
777c478bd9Sstevel@tonic-gate  *	(mutex) work_queue.work_mutex
787c478bd9Sstevel@tonic-gate  *			protects linked list of work tokens and used
797c478bd9Sstevel@tonic-gate  *			with cv_wait/cv_signal thread synchronization.
807c478bd9Sstevel@tonic-gate  *			No other locks acquired when held.
817c478bd9Sstevel@tonic-gate  *
827c478bd9Sstevel@tonic-gate  *	(mutex) adapter_listhead_base.listlock
837c478bd9Sstevel@tonic-gate  *			protects linked list of adapter listheads
847c478bd9Sstevel@tonic-gate  *			Always acquired before listhead->mutex
857c478bd9Sstevel@tonic-gate  *
867c478bd9Sstevel@tonic-gate  *
877c478bd9Sstevel@tonic-gate  *	(mutex) ipc_info_lock
887c478bd9Sstevel@tonic-gate  *			protects ipc_info list and sendq token lists
897c478bd9Sstevel@tonic-gate  *			Always acquired before listhead->mutex
907c478bd9Sstevel@tonic-gate  *
917c478bd9Sstevel@tonic-gate  *      (mutex) listhead->mutex
927c478bd9Sstevel@tonic-gate  *			protects adapter listhead, linked list of
937c478bd9Sstevel@tonic-gate  *			adapters, and linked list of paths.
947c478bd9Sstevel@tonic-gate  *
957c478bd9Sstevel@tonic-gate  *      (mutex) path->mutex
967c478bd9Sstevel@tonic-gate  *			protects the path descriptor.
977c478bd9Sstevel@tonic-gate  *			work_queue.work_mutex may be acquired when holding
987c478bd9Sstevel@tonic-gate  *			this lock.
997c478bd9Sstevel@tonic-gate  *
1007c478bd9Sstevel@tonic-gate  *	(mutex) adapter->mutex
1017c478bd9Sstevel@tonic-gate  *			protects adapter descriptor contents.  used
1027c478bd9Sstevel@tonic-gate  *			mainly for ref_cnt update.
1037c478bd9Sstevel@tonic-gate  */
1047c478bd9Sstevel@tonic-gate 
1057c478bd9Sstevel@tonic-gate #include <sys/param.h>
1067c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
1077c478bd9Sstevel@tonic-gate #include <sys/errno.h>
1087c478bd9Sstevel@tonic-gate #include <sys/time.h>
1097c478bd9Sstevel@tonic-gate #include <sys/devops.h>
1107c478bd9Sstevel@tonic-gate #include <sys/ddi_impldefs.h>
1117c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
1127c478bd9Sstevel@tonic-gate #include <sys/ddi.h>
1137c478bd9Sstevel@tonic-gate #include <sys/sunddi.h>
1147c478bd9Sstevel@tonic-gate #include <sys/proc.h>
1157c478bd9Sstevel@tonic-gate #include <sys/thread.h>
1167c478bd9Sstevel@tonic-gate #include <sys/taskq.h>
1177c478bd9Sstevel@tonic-gate #include <sys/callb.h>
1187c478bd9Sstevel@tonic-gate 
1197c478bd9Sstevel@tonic-gate #include <sys/rsm/rsm.h>
1207c478bd9Sstevel@tonic-gate #include <rsm_in.h>
1217c478bd9Sstevel@tonic-gate #include <sys/rsm/rsmka_path_int.h>
1227c478bd9Sstevel@tonic-gate 
1237c478bd9Sstevel@tonic-gate extern void _cplpl_init();
1247c478bd9Sstevel@tonic-gate extern void _cplpl_fini();
1257c478bd9Sstevel@tonic-gate extern pri_t maxclsyspri;
1267c478bd9Sstevel@tonic-gate extern int   rsm_hash_size;
1277c478bd9Sstevel@tonic-gate 
1287c478bd9Sstevel@tonic-gate extern rsm_node_id_t my_nodeid;
1297c478bd9Sstevel@tonic-gate extern rsmhash_table_t rsm_import_segs;
130*b97d6ca7SMilan Jurik extern rsm_intr_hand_ret_t rsm_srv_func(rsm_controller_object_t *,
131*b97d6ca7SMilan Jurik     rsm_intr_q_op_t, rsm_addr_t, void *, size_t, rsm_intr_hand_arg_t);
1327c478bd9Sstevel@tonic-gate extern void rsmseg_unload(rsmseg_t *);
1337c478bd9Sstevel@tonic-gate extern void rsm_suspend_complete(rsm_node_id_t src_node, int flag);
1347c478bd9Sstevel@tonic-gate extern int rsmipc_send_controlmsg(path_t *path, int msgtype);
1357c478bd9Sstevel@tonic-gate extern void rsmka_path_monitor_initialize();
1367c478bd9Sstevel@tonic-gate extern void rsmka_path_monitor_terminate();
1377c478bd9Sstevel@tonic-gate 
1387c478bd9Sstevel@tonic-gate extern adapter_t loopback_adapter;
1397c478bd9Sstevel@tonic-gate /*
1407c478bd9Sstevel@tonic-gate  * Lint errors and warnings are displayed; informational messages
1417c478bd9Sstevel@tonic-gate  * are suppressed.
1427c478bd9Sstevel@tonic-gate  */
1437c478bd9Sstevel@tonic-gate /* lint -w2 */
1447c478bd9Sstevel@tonic-gate 
1457c478bd9Sstevel@tonic-gate 
1467c478bd9Sstevel@tonic-gate /*
1477c478bd9Sstevel@tonic-gate  * macros SQ_TOKEN_TO_PATH and WORK_TOKEN_TO_PATH use a null pointer
1487c478bd9Sstevel@tonic-gate  * for computational purposes.  Ignore the lint warning.
1497c478bd9Sstevel@tonic-gate  */
1507c478bd9Sstevel@tonic-gate /* lint -save -e413 */
1517c478bd9Sstevel@tonic-gate /* FUNCTION PROTOTYPES */
1527c478bd9Sstevel@tonic-gate static adapter_t *init_adapter(char *, int, rsm_addr_t,
1537c478bd9Sstevel@tonic-gate     rsm_controller_handle_t, rsm_ops_t *, srv_handler_arg_t *);
1547c478bd9Sstevel@tonic-gate adapter_t *rsmka_lookup_adapter(char *, int);
1557c478bd9Sstevel@tonic-gate static ipc_info_t *lookup_ipc_info(rsm_node_id_t);
1567c478bd9Sstevel@tonic-gate static ipc_info_t *init_ipc_info(rsm_node_id_t, boolean_t);
1577c478bd9Sstevel@tonic-gate static path_t *lookup_path(char *, int, rsm_node_id_t, rsm_addr_t);
1587c478bd9Sstevel@tonic-gate static void pathup_to_pathactive(ipc_info_t *, rsm_node_id_t);
1597c478bd9Sstevel@tonic-gate static void path_importer_disconnect(path_t *);
1607c478bd9Sstevel@tonic-gate boolean_t rsmka_do_path_active(path_t *, int);
1617c478bd9Sstevel@tonic-gate static boolean_t do_path_up(path_t *, int);
1627c478bd9Sstevel@tonic-gate static void do_path_down(path_t *, int);
1637c478bd9Sstevel@tonic-gate static void enqueue_work(work_token_t *);
1647c478bd9Sstevel@tonic-gate static boolean_t cancel_work(work_token_t *);
1657c478bd9Sstevel@tonic-gate static void link_path(path_t *);
1667c478bd9Sstevel@tonic-gate static void destroy_path(path_t *);
1677c478bd9Sstevel@tonic-gate static void link_sendq_token(sendq_token_t *, rsm_node_id_t);
1687c478bd9Sstevel@tonic-gate static void unlink_sendq_token(sendq_token_t *, rsm_node_id_t);
1697c478bd9Sstevel@tonic-gate boolean_t rsmka_check_node_alive(rsm_node_id_t);
1707c478bd9Sstevel@tonic-gate static void do_deferred_work(caddr_t);
1717c478bd9Sstevel@tonic-gate static int create_ipc_sendq(path_t *);
1727c478bd9Sstevel@tonic-gate static void destroy_ipc_info(ipc_info_t *);
1737c478bd9Sstevel@tonic-gate void rsmka_pathmanager_cleanup();
1747c478bd9Sstevel@tonic-gate void rsmka_release_adapter(adapter_t *);
1757c478bd9Sstevel@tonic-gate 
1767c478bd9Sstevel@tonic-gate kt_did_t rsm_thread_id;
1777c478bd9Sstevel@tonic-gate int rsmka_terminate_workthread_loop = 0;
1787c478bd9Sstevel@tonic-gate 
1797c478bd9Sstevel@tonic-gate static struct adapter_listhead_list adapter_listhead_base;
1807c478bd9Sstevel@tonic-gate static work_queue_t work_queue;
1817c478bd9Sstevel@tonic-gate 
1827c478bd9Sstevel@tonic-gate /* protect ipc_info descriptor manipulation */
1837c478bd9Sstevel@tonic-gate static kmutex_t ipc_info_lock;
1847c478bd9Sstevel@tonic-gate 
1857c478bd9Sstevel@tonic-gate static ipc_info_t *ipc_info_head = NULL;
1867c478bd9Sstevel@tonic-gate 
1877c478bd9Sstevel@tonic-gate static int category = RSM_PATH_MANAGER | RSM_KERNEL_AGENT;
1887c478bd9Sstevel@tonic-gate 
1897c478bd9Sstevel@tonic-gate /* for synchronization with rsmipc_send() in rsm.c */
1907c478bd9Sstevel@tonic-gate kmutex_t ipc_info_cvlock;
1917c478bd9Sstevel@tonic-gate kcondvar_t ipc_info_cv;
1927c478bd9Sstevel@tonic-gate 
1937c478bd9Sstevel@tonic-gate 
1947c478bd9Sstevel@tonic-gate 
1957c478bd9Sstevel@tonic-gate /*
1967c478bd9Sstevel@tonic-gate  * RSMKA PATHMANAGER INITIALIZATION AND CLEANUP ROUTINES
1977c478bd9Sstevel@tonic-gate  *
1987c478bd9Sstevel@tonic-gate  */
1997c478bd9Sstevel@tonic-gate 
2007c478bd9Sstevel@tonic-gate 
2017c478bd9Sstevel@tonic-gate /*
2027c478bd9Sstevel@tonic-gate  * Called from the rsm module (rsm.c)  _init() routine
2037c478bd9Sstevel@tonic-gate  */
2047c478bd9Sstevel@tonic-gate void
rsmka_pathmanager_init()2057c478bd9Sstevel@tonic-gate rsmka_pathmanager_init()
2067c478bd9Sstevel@tonic-gate {
2077c478bd9Sstevel@tonic-gate 	kthread_t *tp;
2087c478bd9Sstevel@tonic-gate 
2097c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2107c478bd9Sstevel@tonic-gate 	    "rsmka_pathmanager_init enter\n"));
2117c478bd9Sstevel@tonic-gate 
2127c478bd9Sstevel@tonic-gate 	/* initialization for locks and condition variables  */
2137c478bd9Sstevel@tonic-gate 	mutex_init(&work_queue.work_mutex, NULL, MUTEX_DEFAULT, NULL);
2147c478bd9Sstevel@tonic-gate 	mutex_init(&ipc_info_lock, NULL, MUTEX_DEFAULT, NULL);
2157c478bd9Sstevel@tonic-gate 	mutex_init(&ipc_info_cvlock, NULL, MUTEX_DEFAULT, NULL);
2167c478bd9Sstevel@tonic-gate 	mutex_init(&adapter_listhead_base.listlock, NULL,
2177c478bd9Sstevel@tonic-gate 	    MUTEX_DEFAULT, NULL);
2187c478bd9Sstevel@tonic-gate 
2197c478bd9Sstevel@tonic-gate 	cv_init(&work_queue.work_cv, NULL, CV_DEFAULT, NULL);
2207c478bd9Sstevel@tonic-gate 	cv_init(&ipc_info_cv, NULL, CV_DEFAULT, NULL);
2217c478bd9Sstevel@tonic-gate 
2227c478bd9Sstevel@tonic-gate 	tp = thread_create(NULL, 0, do_deferred_work, NULL, 0, &p0,
2237c478bd9Sstevel@tonic-gate 	    TS_RUN, maxclsyspri);
2247c478bd9Sstevel@tonic-gate 	rsm_thread_id = tp->t_did;
2257c478bd9Sstevel@tonic-gate 
2267c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2277c478bd9Sstevel@tonic-gate 	    "rsmka_pathmanager_init done\n"));
2287c478bd9Sstevel@tonic-gate }
2297c478bd9Sstevel@tonic-gate 
2307c478bd9Sstevel@tonic-gate void
rsmka_pathmanager_cleanup()2317c478bd9Sstevel@tonic-gate rsmka_pathmanager_cleanup()
2327c478bd9Sstevel@tonic-gate {
2337c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2347c478bd9Sstevel@tonic-gate 	    "rsmka_pathmanager_cleanup enter\n"));
2357c478bd9Sstevel@tonic-gate 
2367c478bd9Sstevel@tonic-gate 	ASSERT(work_queue.head == NULL);
2377c478bd9Sstevel@tonic-gate 
2387c478bd9Sstevel@tonic-gate 	/*
2397c478bd9Sstevel@tonic-gate 	 * In processing the remove path callbacks from the path monitor
2407c478bd9Sstevel@tonic-gate 	 * object, all deferred work will have been completed. So
2417c478bd9Sstevel@tonic-gate 	 * awaken the deferred work thread to give it a chance to exit
2427c478bd9Sstevel@tonic-gate 	 * the loop.
2437c478bd9Sstevel@tonic-gate 	 */
2447c478bd9Sstevel@tonic-gate 	mutex_enter(&work_queue.work_mutex);
2457c478bd9Sstevel@tonic-gate 	rsmka_terminate_workthread_loop++;
2467c478bd9Sstevel@tonic-gate 	cv_signal(&work_queue.work_cv);
2477c478bd9Sstevel@tonic-gate 	mutex_exit(&work_queue.work_mutex);
2487c478bd9Sstevel@tonic-gate 
2497c478bd9Sstevel@tonic-gate 	/*
2507c478bd9Sstevel@tonic-gate 	 * Wait for the deferred work thread to exit before
2517c478bd9Sstevel@tonic-gate 	 * destroying the locks and cleaning up other data
2527c478bd9Sstevel@tonic-gate 	 * structures.
2537c478bd9Sstevel@tonic-gate 	 */
2547c478bd9Sstevel@tonic-gate 	if (rsm_thread_id)
2557c478bd9Sstevel@tonic-gate 		thread_join(rsm_thread_id);
2567c478bd9Sstevel@tonic-gate 
2577c478bd9Sstevel@tonic-gate 	/*
2587c478bd9Sstevel@tonic-gate 	 * Destroy locks & condition variables
2597c478bd9Sstevel@tonic-gate 	 */
2607c478bd9Sstevel@tonic-gate 	mutex_destroy(&work_queue.work_mutex);
2617c478bd9Sstevel@tonic-gate 	cv_destroy(&work_queue.work_cv);
2627c478bd9Sstevel@tonic-gate 
2637c478bd9Sstevel@tonic-gate 	mutex_enter(&ipc_info_lock);
2647c478bd9Sstevel@tonic-gate 	while (ipc_info_head)
2657c478bd9Sstevel@tonic-gate 		destroy_ipc_info(ipc_info_head);
2667c478bd9Sstevel@tonic-gate 	mutex_exit(&ipc_info_lock);
2677c478bd9Sstevel@tonic-gate 
2687c478bd9Sstevel@tonic-gate 	mutex_destroy(&ipc_info_lock);
2697c478bd9Sstevel@tonic-gate 
2707c478bd9Sstevel@tonic-gate 	mutex_destroy(&ipc_info_cvlock);
2717c478bd9Sstevel@tonic-gate 	cv_destroy(&ipc_info_cv);
2727c478bd9Sstevel@tonic-gate 
2737c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2747c478bd9Sstevel@tonic-gate 	    "rsmka_pathmanager_cleanup done\n"));
2757c478bd9Sstevel@tonic-gate 
2767c478bd9Sstevel@tonic-gate }
2777c478bd9Sstevel@tonic-gate 
2787c478bd9Sstevel@tonic-gate void
rsmka_set_my_nodeid(rsm_node_id_t local_nodeid)2797c478bd9Sstevel@tonic-gate rsmka_set_my_nodeid(rsm_node_id_t local_nodeid)
2807c478bd9Sstevel@tonic-gate {
2817c478bd9Sstevel@tonic-gate 	my_nodeid = local_nodeid;
2827c478bd9Sstevel@tonic-gate 
2837c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
2847c478bd9Sstevel@tonic-gate 	    "rsm: node %d \n", my_nodeid));
2857c478bd9Sstevel@tonic-gate 
2867c478bd9Sstevel@tonic-gate }
2877c478bd9Sstevel@tonic-gate 
2887c478bd9Sstevel@tonic-gate /*
2897c478bd9Sstevel@tonic-gate  * DEFERRED WORK THREAD AND WORK QUEUE SUPPORT ROUTINES
2907c478bd9Sstevel@tonic-gate  *
2917c478bd9Sstevel@tonic-gate  */
2927c478bd9Sstevel@tonic-gate 
2937c478bd9Sstevel@tonic-gate /*
2947c478bd9Sstevel@tonic-gate  * This function is the executable code of the thread which handles
2957c478bd9Sstevel@tonic-gate  * deferred work.  Work is deferred when a function is called at
2967c478bd9Sstevel@tonic-gate  * clock ipl and processing may require blocking.
2977c478bd9Sstevel@tonic-gate  *
2987c478bd9Sstevel@tonic-gate  *
2997c478bd9Sstevel@tonic-gate  * The thread is created by a call to taskq_create in rsmka_pathmanager_init.
3007c478bd9Sstevel@tonic-gate  * After creation, a call to taskq_dispatch causes this function to
3017c478bd9Sstevel@tonic-gate  * execute.  It loops forever - blocked until work is enqueued from
3027c478bd9Sstevel@tonic-gate  * rsmka_do_path_active, do_path_down, or rsmka_disconnect_node.
3037c478bd9Sstevel@tonic-gate  * rsmka_pathmanager_cleanup (called from _fini) will
3047c478bd9Sstevel@tonic-gate  * set rsmka_terminate_workthread_loop and the task processing will
3057c478bd9Sstevel@tonic-gate  * terminate.
3067c478bd9Sstevel@tonic-gate  */
3077c478bd9Sstevel@tonic-gate static void
do_deferred_work(caddr_t arg)3087c478bd9Sstevel@tonic-gate do_deferred_work(caddr_t arg /*ARGSUSED*/)
3097c478bd9Sstevel@tonic-gate {
3107c478bd9Sstevel@tonic-gate 
3117c478bd9Sstevel@tonic-gate 	adapter_t 			*adapter;
3127c478bd9Sstevel@tonic-gate 	path_t				*path;
3137c478bd9Sstevel@tonic-gate 	work_token_t			*work_token;
3147c478bd9Sstevel@tonic-gate 	int				work_opcode;
3157c478bd9Sstevel@tonic-gate 	rsm_send_q_handle_t		sendq_handle;
3167c478bd9Sstevel@tonic-gate 	int				error;
3177c478bd9Sstevel@tonic-gate 	timespec_t			tv;
3187c478bd9Sstevel@tonic-gate 	callb_cpr_t			cprinfo;
3197c478bd9Sstevel@tonic-gate 
3207c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "do_deferred_work enter\n"));
3217c478bd9Sstevel@tonic-gate 
3227c478bd9Sstevel@tonic-gate 	CALLB_CPR_INIT(&cprinfo, &work_queue.work_mutex, callb_generic_cpr,
3237c478bd9Sstevel@tonic-gate 	    "rsm_deferred_work");
3247c478bd9Sstevel@tonic-gate 
3257c478bd9Sstevel@tonic-gate 	for (;;) {
3267c478bd9Sstevel@tonic-gate 		mutex_enter(&work_queue.work_mutex);
3277c478bd9Sstevel@tonic-gate 
3287c478bd9Sstevel@tonic-gate 		if (rsmka_terminate_workthread_loop) {
3297c478bd9Sstevel@tonic-gate 			goto exit;
3307c478bd9Sstevel@tonic-gate 		}
3317c478bd9Sstevel@tonic-gate 
3327c478bd9Sstevel@tonic-gate 		/* When there is no work to do, block here */
3337c478bd9Sstevel@tonic-gate 		while (work_queue.head == NULL) {
3347c478bd9Sstevel@tonic-gate 			/* Since no work to do, Safe to CPR */
3357c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_BEGIN(&cprinfo);
3367c478bd9Sstevel@tonic-gate 			cv_wait(&work_queue.work_cv, &work_queue.work_mutex);
3377c478bd9Sstevel@tonic-gate 			CALLB_CPR_SAFE_END(&cprinfo, &work_queue.work_mutex);
3387c478bd9Sstevel@tonic-gate 
3397c478bd9Sstevel@tonic-gate 			if (rsmka_terminate_workthread_loop) {
3407c478bd9Sstevel@tonic-gate 				goto exit;
3417c478bd9Sstevel@tonic-gate 			}
3427c478bd9Sstevel@tonic-gate 		}
3437c478bd9Sstevel@tonic-gate 
3447c478bd9Sstevel@tonic-gate 		/*
3457c478bd9Sstevel@tonic-gate 		 * Remove a work token and begin work
3467c478bd9Sstevel@tonic-gate 		 */
3477c478bd9Sstevel@tonic-gate 		work_token = work_queue.head;
3487c478bd9Sstevel@tonic-gate 		work_queue.head = work_token->next;
3497c478bd9Sstevel@tonic-gate 		if (work_queue.tail == work_token)
3507c478bd9Sstevel@tonic-gate 			work_queue.tail = NULL;
3517c478bd9Sstevel@tonic-gate 
3527c478bd9Sstevel@tonic-gate 		work_opcode = work_token->opcode;
3537c478bd9Sstevel@tonic-gate 		path = WORK_TOKEN_TO_PATH(work_token, work_opcode -1);
3547c478bd9Sstevel@tonic-gate 		work_token->next = NULL;
3557c478bd9Sstevel@tonic-gate 		mutex_exit(&work_queue.work_mutex);
3567c478bd9Sstevel@tonic-gate 
3577c478bd9Sstevel@tonic-gate 
3587c478bd9Sstevel@tonic-gate 		switch (work_opcode) {
3597c478bd9Sstevel@tonic-gate 		case RSMKA_IPC_UP:
3607c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG,
3617c478bd9Sstevel@tonic-gate 			    "do_deferred_work:up,  path = %lx\n", path));
3627c478bd9Sstevel@tonic-gate 			error = create_ipc_sendq(path);
3637c478bd9Sstevel@tonic-gate 			mutex_enter(&path->mutex);
3647c478bd9Sstevel@tonic-gate 			if (path->state != RSMKA_PATH_UP) {
3657c478bd9Sstevel@tonic-gate 				/*
3667c478bd9Sstevel@tonic-gate 				 * path state has changed, if sendq was created,
3677c478bd9Sstevel@tonic-gate 				 * destroy it and return. Don't need to worry
3687c478bd9Sstevel@tonic-gate 				 * about sendq ref_cnt since no one starts
3697c478bd9Sstevel@tonic-gate 				 * using the sendq till path state becomes
3707c478bd9Sstevel@tonic-gate 				 * active
3717c478bd9Sstevel@tonic-gate 				 */
3727c478bd9Sstevel@tonic-gate 				if (error == RSM_SUCCESS) {
3737c478bd9Sstevel@tonic-gate 					sendq_handle = path->sendq_token.
3747c478bd9Sstevel@tonic-gate 					    rsmpi_sendq_handle;
3757c478bd9Sstevel@tonic-gate 					path->sendq_token.rsmpi_sendq_handle =
3767c478bd9Sstevel@tonic-gate 					    NULL;
3777c478bd9Sstevel@tonic-gate 					adapter = path->local_adapter;
3787c478bd9Sstevel@tonic-gate 					mutex_exit(&path->mutex);
3797c478bd9Sstevel@tonic-gate 
3807c478bd9Sstevel@tonic-gate 					if (sendq_handle != NULL) {
3817c478bd9Sstevel@tonic-gate 						adapter->rsmpi_ops->
3827c478bd9Sstevel@tonic-gate 						    rsm_sendq_destroy(
383*b97d6ca7SMilan Jurik 						    sendq_handle);
3847c478bd9Sstevel@tonic-gate 					}
3857c478bd9Sstevel@tonic-gate 					mutex_enter(&path->mutex);
3867c478bd9Sstevel@tonic-gate 				}
3877c478bd9Sstevel@tonic-gate 				/* free up work token */
3887c478bd9Sstevel@tonic-gate 				work_token->opcode = 0;
3897c478bd9Sstevel@tonic-gate 
3907c478bd9Sstevel@tonic-gate 				/*
3917c478bd9Sstevel@tonic-gate 				 * decrement reference count for the path
3927c478bd9Sstevel@tonic-gate 				 * descriptor and signal for synchronization
3937c478bd9Sstevel@tonic-gate 				 * with rsmka_remove_path. PATH_HOLD_NOLOCK was
3947c478bd9Sstevel@tonic-gate 				 * done by rsmka_path_up.
3957c478bd9Sstevel@tonic-gate 				 */
3967c478bd9Sstevel@tonic-gate 				PATH_RELE_NOLOCK(path);
3977c478bd9Sstevel@tonic-gate 				mutex_exit(&path->mutex);
3987c478bd9Sstevel@tonic-gate 				break;
3997c478bd9Sstevel@tonic-gate 			}
4007c478bd9Sstevel@tonic-gate 
4017c478bd9Sstevel@tonic-gate 			if (error == RSM_SUCCESS) {
4027c478bd9Sstevel@tonic-gate 				DBG_PRINTF((category, RSM_DEBUG,
4037c478bd9Sstevel@tonic-gate 				    "do_deferred_work:success on up\n"));
4047c478bd9Sstevel@tonic-gate 				/* clear flag since sendq_create succeeded */
4057c478bd9Sstevel@tonic-gate 				path->flags &= ~RSMKA_SQCREATE_PENDING;
4067c478bd9Sstevel@tonic-gate 				path->state = RSMKA_PATH_ACTIVE;
4077c478bd9Sstevel@tonic-gate 
4087c478bd9Sstevel@tonic-gate 				/*
4097c478bd9Sstevel@tonic-gate 				 * now that path is active we send the
4107c478bd9Sstevel@tonic-gate 				 * RSMIPC_MSG_SQREADY to the remote endpoint
4117c478bd9Sstevel@tonic-gate 				 */
4127c478bd9Sstevel@tonic-gate 				path->procmsg_cnt = 0;
4137c478bd9Sstevel@tonic-gate 				path->sendq_token.msgbuf_avail = 0;
4147c478bd9Sstevel@tonic-gate 
4157c478bd9Sstevel@tonic-gate 				/* Calculate local incarnation number */
4167c478bd9Sstevel@tonic-gate 				gethrestime(&tv);
4177c478bd9Sstevel@tonic-gate 				if (tv.tv_sec == RSM_UNKNOWN_INCN)
4187c478bd9Sstevel@tonic-gate 					tv.tv_sec = 1;
4197c478bd9Sstevel@tonic-gate 				path->local_incn = (int64_t)tv.tv_sec;
4207c478bd9Sstevel@tonic-gate 
4217c478bd9Sstevel@tonic-gate 				/*
4227c478bd9Sstevel@tonic-gate 				 * if send fails here its due to some
4237c478bd9Sstevel@tonic-gate 				 * non-transient error because QUEUE_FULL is
4247c478bd9Sstevel@tonic-gate 				 * not possible here since we are the first
4257c478bd9Sstevel@tonic-gate 				 * message on this sendq. The error will cause
4267c478bd9Sstevel@tonic-gate 				 * the path to go down anyways, so ignore
4277c478bd9Sstevel@tonic-gate 				 * the return value.
4287c478bd9Sstevel@tonic-gate 				 */
4297c478bd9Sstevel@tonic-gate 				(void) rsmipc_send_controlmsg(path,
4307c478bd9Sstevel@tonic-gate 				    RSMIPC_MSG_SQREADY);
4317c478bd9Sstevel@tonic-gate 				/* wait for SQREADY_ACK message */
4327c478bd9Sstevel@tonic-gate 				path->flags |= RSMKA_WAIT_FOR_SQACK;
4337c478bd9Sstevel@tonic-gate 			} else {
4347c478bd9Sstevel@tonic-gate 				/*
4357c478bd9Sstevel@tonic-gate 				 * sendq create failed possibly because
4367c478bd9Sstevel@tonic-gate 				 * the remote end is not yet ready eg.
4377c478bd9Sstevel@tonic-gate 				 * handler not registered, set a flag
4387c478bd9Sstevel@tonic-gate 				 * so that when there is an indication
4397c478bd9Sstevel@tonic-gate 				 * that the remote end is ready
4407c478bd9Sstevel@tonic-gate 				 * rsmka_do_path_active will be retried.
4417c478bd9Sstevel@tonic-gate 				 */
4427c478bd9Sstevel@tonic-gate 				path->flags |= RSMKA_SQCREATE_PENDING;
4437c478bd9Sstevel@tonic-gate 			}
4447c478bd9Sstevel@tonic-gate 
4457c478bd9Sstevel@tonic-gate 			/* free up work token */
4467c478bd9Sstevel@tonic-gate 			work_token->opcode = 0;
4477c478bd9Sstevel@tonic-gate 
4487c478bd9Sstevel@tonic-gate 			/*
4497c478bd9Sstevel@tonic-gate 			 * decrement reference count for the path
4507c478bd9Sstevel@tonic-gate 			 * descriptor and signal for synchronization with
4517c478bd9Sstevel@tonic-gate 			 * rsmka_remove_path. PATH_HOLD_NOLOCK was done
4527c478bd9Sstevel@tonic-gate 			 * by rsmka_path_up.
4537c478bd9Sstevel@tonic-gate 			 */
4547c478bd9Sstevel@tonic-gate 			PATH_RELE_NOLOCK(path);
4557c478bd9Sstevel@tonic-gate 			mutex_exit(&path->mutex);
4567c478bd9Sstevel@tonic-gate 
4577c478bd9Sstevel@tonic-gate 			break;
4587c478bd9Sstevel@tonic-gate 		case RSMKA_IPC_DOWN:
4597c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG,
4607c478bd9Sstevel@tonic-gate 			    "do_deferred_work:down, path = %lx\n", path));
4617c478bd9Sstevel@tonic-gate 
4627c478bd9Sstevel@tonic-gate 			/*
4637c478bd9Sstevel@tonic-gate 			 * Unlike the processing of path_down in the case
4647c478bd9Sstevel@tonic-gate 			 * where the RSMKA_NO_SLEEP flag is not set, here,
4657c478bd9Sstevel@tonic-gate 			 * the state of the path is changed directly to
4667c478bd9Sstevel@tonic-gate 			 * RSMKA_PATH_DOWN. This is because in this case
4677c478bd9Sstevel@tonic-gate 			 * where the RSMKA_NO_SLEEP flag is set, any other
4687c478bd9Sstevel@tonic-gate 			 * calls referring this path will just queue up
4697c478bd9Sstevel@tonic-gate 			 * and will be processed only after the path
4707c478bd9Sstevel@tonic-gate 			 * down processing has completed.
4717c478bd9Sstevel@tonic-gate 			 */
4727c478bd9Sstevel@tonic-gate 			mutex_enter(&path->mutex);
4737c478bd9Sstevel@tonic-gate 			path->state = RSMKA_PATH_DOWN;
4747c478bd9Sstevel@tonic-gate 			/*
4757c478bd9Sstevel@tonic-gate 			 * clear the WAIT_FOR_SQACK flag since path is down.
4767c478bd9Sstevel@tonic-gate 			 */
4777c478bd9Sstevel@tonic-gate 			path->flags &= ~RSMKA_WAIT_FOR_SQACK;
4787c478bd9Sstevel@tonic-gate 
4797c478bd9Sstevel@tonic-gate 			/*
4807c478bd9Sstevel@tonic-gate 			 * this wakes up any thread waiting to receive credits
4817c478bd9Sstevel@tonic-gate 			 * in rsmipc_send to tell it that the path is down
4827c478bd9Sstevel@tonic-gate 			 * thus releasing the sendq.
4837c478bd9Sstevel@tonic-gate 			 */
4847c478bd9Sstevel@tonic-gate 			cv_broadcast(&path->sendq_token.sendq_cv);
4857c478bd9Sstevel@tonic-gate 
4867c478bd9Sstevel@tonic-gate 			mutex_exit(&path->mutex);
4877c478bd9Sstevel@tonic-gate 
4887c478bd9Sstevel@tonic-gate 			/* drain the messages from the receive msgbuf */
4897c478bd9Sstevel@tonic-gate 			taskq_wait(path->recv_taskq);
4907c478bd9Sstevel@tonic-gate 
4917c478bd9Sstevel@tonic-gate 			/*
4927c478bd9Sstevel@tonic-gate 			 * The path_importer_disconnect function has to
4937c478bd9Sstevel@tonic-gate 			 * be called after releasing the mutex on the path
4947c478bd9Sstevel@tonic-gate 			 * in order to avoid any recursive mutex enter panics
4957c478bd9Sstevel@tonic-gate 			 */
4967c478bd9Sstevel@tonic-gate 			path_importer_disconnect(path);
4977c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG,
4987c478bd9Sstevel@tonic-gate 			    "do_deferred_work: success on down\n"));
4997c478bd9Sstevel@tonic-gate 			/*
5007c478bd9Sstevel@tonic-gate 			 * decrement reference count for the path
5017c478bd9Sstevel@tonic-gate 			 * descriptor and signal for synchronization with
5027c478bd9Sstevel@tonic-gate 			 * rsmka_remove_path. PATH_HOLD_NOLOCK was done
5037c478bd9Sstevel@tonic-gate 			 * by rsmka_path_down.
5047c478bd9Sstevel@tonic-gate 			 */
5057c478bd9Sstevel@tonic-gate 			mutex_enter(&path->mutex);
5067c478bd9Sstevel@tonic-gate 
5077c478bd9Sstevel@tonic-gate #ifdef DEBUG
5087c478bd9Sstevel@tonic-gate 			/*
5097c478bd9Sstevel@tonic-gate 			 * Some IPC messages left in the recv_buf,
5107c478bd9Sstevel@tonic-gate 			 * they'll be dropped
5117c478bd9Sstevel@tonic-gate 			 */
5127c478bd9Sstevel@tonic-gate 			if (path->msgbuf_cnt != 0)
5137c478bd9Sstevel@tonic-gate 				cmn_err(CE_NOTE,
5147c478bd9Sstevel@tonic-gate 				    "path=%lx msgbuf_cnt != 0\n",
5157c478bd9Sstevel@tonic-gate 				    (uintptr_t)path);
5167c478bd9Sstevel@tonic-gate #endif
5177c478bd9Sstevel@tonic-gate 
5187c478bd9Sstevel@tonic-gate 			/*
5197c478bd9Sstevel@tonic-gate 			 * Don't want to destroy a send queue when a token
5207c478bd9Sstevel@tonic-gate 			 * has been acquired; so wait 'til the token is
5217c478bd9Sstevel@tonic-gate 			 * no longer referenced (with a cv_wait).
5227c478bd9Sstevel@tonic-gate 			 */
5237c478bd9Sstevel@tonic-gate 			while (path->sendq_token.ref_cnt != 0)
5247c478bd9Sstevel@tonic-gate 				cv_wait(&path->sendq_token.sendq_cv,
5257c478bd9Sstevel@tonic-gate 				    &path->mutex);
5267c478bd9Sstevel@tonic-gate 
5277c478bd9Sstevel@tonic-gate 			sendq_handle = path->sendq_token.rsmpi_sendq_handle;
5287c478bd9Sstevel@tonic-gate 			path->sendq_token.rsmpi_sendq_handle = NULL;
5297c478bd9Sstevel@tonic-gate 
5307c478bd9Sstevel@tonic-gate 			/* destroy the send queue and release the handle */
5317c478bd9Sstevel@tonic-gate 			if (sendq_handle != NULL) {
5327c478bd9Sstevel@tonic-gate 				adapter = path->local_adapter;
5337c478bd9Sstevel@tonic-gate 				adapter->rsmpi_ops->rsm_sendq_destroy(
5347c478bd9Sstevel@tonic-gate 				    sendq_handle);
5357c478bd9Sstevel@tonic-gate 			}
5367c478bd9Sstevel@tonic-gate 
5377c478bd9Sstevel@tonic-gate 			work_token->opcode = 0;
5387c478bd9Sstevel@tonic-gate 			PATH_RELE_NOLOCK(path);
5397c478bd9Sstevel@tonic-gate 			mutex_exit(&path->mutex);
5407c478bd9Sstevel@tonic-gate 			break;
5417c478bd9Sstevel@tonic-gate 		default:
5427c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG,
5437c478bd9Sstevel@tonic-gate 			    "do_deferred_work: bad work token opcode\n"));
5447c478bd9Sstevel@tonic-gate 			break;
5457c478bd9Sstevel@tonic-gate 		}
5467c478bd9Sstevel@tonic-gate 	}
5477c478bd9Sstevel@tonic-gate 
5487c478bd9Sstevel@tonic-gate exit:
5497c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "do_deferred_work done\n"));
5507c478bd9Sstevel@tonic-gate 	/*
5517c478bd9Sstevel@tonic-gate 	 * CALLB_CPR_EXIT does a mutex_exit for
5527c478bd9Sstevel@tonic-gate 	 * the work_queue.work_mutex
5537c478bd9Sstevel@tonic-gate 	 */
5547c478bd9Sstevel@tonic-gate 	CALLB_CPR_EXIT(&cprinfo);
5557c478bd9Sstevel@tonic-gate }
5567c478bd9Sstevel@tonic-gate 
5577c478bd9Sstevel@tonic-gate /*
5587c478bd9Sstevel@tonic-gate  * Work is inserted at the tail of the list and processed from the
5597c478bd9Sstevel@tonic-gate  * head of the list.
5607c478bd9Sstevel@tonic-gate  */
5617c478bd9Sstevel@tonic-gate static void
enqueue_work(work_token_t * token)5627c478bd9Sstevel@tonic-gate enqueue_work(work_token_t *token)
5637c478bd9Sstevel@tonic-gate {
5647c478bd9Sstevel@tonic-gate 	work_token_t	*tail_token;
5657c478bd9Sstevel@tonic-gate 
5667c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "enqueue_work enter\n"));
5677c478bd9Sstevel@tonic-gate 
5687c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&work_queue.work_mutex));
5697c478bd9Sstevel@tonic-gate 
5707c478bd9Sstevel@tonic-gate 	token->next = NULL;
5717c478bd9Sstevel@tonic-gate 	if (work_queue.head == NULL) {
5727c478bd9Sstevel@tonic-gate 		work_queue.head = work_queue.tail = token;
5737c478bd9Sstevel@tonic-gate 	} else {
5747c478bd9Sstevel@tonic-gate 		tail_token = work_queue.tail;
5757c478bd9Sstevel@tonic-gate 		work_queue.tail = tail_token->next = token;
5767c478bd9Sstevel@tonic-gate 	}
5777c478bd9Sstevel@tonic-gate 
5787c478bd9Sstevel@tonic-gate 	/* wake up deferred work thread */
5797c478bd9Sstevel@tonic-gate 	cv_signal(&work_queue.work_cv);
5807c478bd9Sstevel@tonic-gate 
5817c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "enqueue_work done\n"));
5827c478bd9Sstevel@tonic-gate }
5837c478bd9Sstevel@tonic-gate 
5847c478bd9Sstevel@tonic-gate 
5857c478bd9Sstevel@tonic-gate /*
5867c478bd9Sstevel@tonic-gate  * If the work_token is found on the work queue, the work is cancelled
5877c478bd9Sstevel@tonic-gate  * by removing the token from the work queue.
5887c478bd9Sstevel@tonic-gate  *
5897c478bd9Sstevel@tonic-gate  * Return true if a work_token was found and cancelled, otherwise return false
5907c478bd9Sstevel@tonic-gate  *
5917c478bd9Sstevel@tonic-gate  * enqueue_work increments the path refcnt to make sure that the path doesn't
5927c478bd9Sstevel@tonic-gate  * go away, callers of cancel_work need to decrement the refcnt of the path to
5937c478bd9Sstevel@tonic-gate  * which this work_token belongs if a work_token is found in the work_queue
5947c478bd9Sstevel@tonic-gate  * and cancelled ie. when the return value is B_TRUE.
5957c478bd9Sstevel@tonic-gate  */
5967c478bd9Sstevel@tonic-gate static boolean_t
cancel_work(work_token_t * work_token)5977c478bd9Sstevel@tonic-gate cancel_work(work_token_t *work_token)
5987c478bd9Sstevel@tonic-gate {
5997c478bd9Sstevel@tonic-gate 	work_token_t	*current_token;
6007c478bd9Sstevel@tonic-gate 	work_token_t	*prev_token = NULL;
6017c478bd9Sstevel@tonic-gate 	boolean_t	cancelled = B_FALSE;
6027c478bd9Sstevel@tonic-gate 
6037c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "cancel_work enter\n"));
6047c478bd9Sstevel@tonic-gate 
6057c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&work_queue.work_mutex));
6067c478bd9Sstevel@tonic-gate 
6077c478bd9Sstevel@tonic-gate 
6087c478bd9Sstevel@tonic-gate 	current_token = work_queue.head;
6097c478bd9Sstevel@tonic-gate 	while (current_token != NULL) {
6107c478bd9Sstevel@tonic-gate 		if (current_token == work_token) {
6117c478bd9Sstevel@tonic-gate 			if (work_token == work_queue.head)
6127c478bd9Sstevel@tonic-gate 				work_queue.head = work_token->next;
6137c478bd9Sstevel@tonic-gate 			else
6147c478bd9Sstevel@tonic-gate 				prev_token->next = work_token->next;
6157c478bd9Sstevel@tonic-gate 			if (work_token == work_queue.tail)
6167c478bd9Sstevel@tonic-gate 				work_queue.tail = prev_token;
6177c478bd9Sstevel@tonic-gate 
6187c478bd9Sstevel@tonic-gate 			current_token->opcode = 0;
6197c478bd9Sstevel@tonic-gate 			current_token->next = NULL;
6207c478bd9Sstevel@tonic-gate 			/* found and cancelled work */
6217c478bd9Sstevel@tonic-gate 			cancelled = B_TRUE;
6227c478bd9Sstevel@tonic-gate 			DBG_PRINTF((category, RSM_DEBUG,
6237c478bd9Sstevel@tonic-gate 			    "cancelled_work = 0x%p\n", work_token));
6247c478bd9Sstevel@tonic-gate 			break;
6257c478bd9Sstevel@tonic-gate 		}
6267c478bd9Sstevel@tonic-gate 		prev_token = current_token;
6277c478bd9Sstevel@tonic-gate 		current_token = current_token->next;
6287c478bd9Sstevel@tonic-gate 	}
6297c478bd9Sstevel@tonic-gate 
6307c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "cancel_work done\n"));
6317c478bd9Sstevel@tonic-gate 	return (cancelled);
6327c478bd9Sstevel@tonic-gate }
6337c478bd9Sstevel@tonic-gate 
6347c478bd9Sstevel@tonic-gate /*
6357c478bd9Sstevel@tonic-gate  * EXTERNAL INTERFACES
6367c478bd9Sstevel@tonic-gate  *
6377c478bd9Sstevel@tonic-gate  * For Galileo Clustering, these routine are called from
6387c478bd9Sstevel@tonic-gate  * rsmka_pm_interface.cc
6397c478bd9Sstevel@tonic-gate  *
6407c478bd9Sstevel@tonic-gate  */
6417c478bd9Sstevel@tonic-gate 
6427c478bd9Sstevel@tonic-gate /*
6437c478bd9Sstevel@tonic-gate  *
6447c478bd9Sstevel@tonic-gate  * If the adapter is supported by rsmpi then initialize an adapter descriptor
6457c478bd9Sstevel@tonic-gate  * and link it to the list of adapters.  The adapter attributes are obtained
6467c478bd9Sstevel@tonic-gate  * from rsmpi and stored in the descriptor.  Finally, a service handler
6477c478bd9Sstevel@tonic-gate  * for incoming ipc on this adapter is registered with rsmpi.
6487c478bd9Sstevel@tonic-gate  * A pointer for the adapter descriptor is returned as a cookie to the
6497c478bd9Sstevel@tonic-gate  * caller.  The cookie may be use with subsequent calls to save the time of
6507c478bd9Sstevel@tonic-gate  * adapter descriptor lookup.
6517c478bd9Sstevel@tonic-gate  *
6527c478bd9Sstevel@tonic-gate  * The adapter descriptor maintains a reference count which is intialized
6537c478bd9Sstevel@tonic-gate  * to 1 and incremented on lookups; when a cookie is used in place of
6547c478bd9Sstevel@tonic-gate  * a lookup, an explicit ADAPTER_HOLD is required.
6557c478bd9Sstevel@tonic-gate  */
6567c478bd9Sstevel@tonic-gate 
6577c478bd9Sstevel@tonic-gate void *
rsmka_add_adapter(char * name,int instance,rsm_addr_t hwaddr)6587c478bd9Sstevel@tonic-gate rsmka_add_adapter(char *name, int instance, rsm_addr_t hwaddr)
6597c478bd9Sstevel@tonic-gate {
6607c478bd9Sstevel@tonic-gate 	adapter_t		*adapter;
6617c478bd9Sstevel@tonic-gate 	rsm_controller_object_t	rsmpi_adapter_object;
6627c478bd9Sstevel@tonic-gate 	rsm_controller_handle_t	rsmpi_adapter_handle;
6637c478bd9Sstevel@tonic-gate 	rsm_ops_t		*rsmpi_ops_vector;
6647c478bd9Sstevel@tonic-gate 	int			adapter_is_supported;
6657c478bd9Sstevel@tonic-gate 	rsm_controller_attr_t	*attr;
6667c478bd9Sstevel@tonic-gate 	srv_handler_arg_t	*srv_hdlr_argp;
6677c478bd9Sstevel@tonic-gate 	int result;
6687c478bd9Sstevel@tonic-gate 
6697c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_add_adapter enter\n"));
6707c478bd9Sstevel@tonic-gate 
6717c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG,
6727c478bd9Sstevel@tonic-gate 	    "rsmka_add_adapter: name = %s instance = %d hwaddr = %llx \n",
6737c478bd9Sstevel@tonic-gate 	    name, instance, hwaddr));
6747c478bd9Sstevel@tonic-gate 
6757c478bd9Sstevel@tonic-gate 	/* verify name length */
6767c478bd9Sstevel@tonic-gate 	if (strlen(name) >= MAXNAMELEN) {
6777c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
6787c478bd9Sstevel@tonic-gate 		    "rsmka_add_adapter done: name too long\n"));
6797c478bd9Sstevel@tonic-gate 		return (NULL);
6807c478bd9Sstevel@tonic-gate 	}
6817c478bd9Sstevel@tonic-gate 
6827c478bd9Sstevel@tonic-gate 
6837c478bd9Sstevel@tonic-gate 	/* Check if rsmpi supports this adapter type */
6847c478bd9Sstevel@tonic-gate 	adapter_is_supported = rsm_get_controller(name, instance,
6857c478bd9Sstevel@tonic-gate 	    &rsmpi_adapter_object, RSM_VERSION);
6867c478bd9Sstevel@tonic-gate 
6877c478bd9Sstevel@tonic-gate 	if (adapter_is_supported != RSM_SUCCESS) {
6887c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_ERR,
6897c478bd9Sstevel@tonic-gate 		    "rsmka_add_adapter done: adapter not supported\n"));
6907c478bd9Sstevel@tonic-gate 		return (NULL);
6917c478bd9Sstevel@tonic-gate 	}
6927c478bd9Sstevel@tonic-gate 
6937c478bd9Sstevel@tonic-gate 	rsmpi_adapter_handle = rsmpi_adapter_object.handle;
6947c478bd9Sstevel@tonic-gate 	rsmpi_ops_vector = rsmpi_adapter_object.ops;
6957c478bd9Sstevel@tonic-gate 
6967c478bd9Sstevel@tonic-gate 	/* Get adapter attributes */
6977c478bd9Sstevel@tonic-gate 	result = rsm_get_controller_attr(rsmpi_adapter_handle, &attr);
6987c478bd9Sstevel@tonic-gate 	if (result != RSM_SUCCESS) {
6997c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_ERR,
7007c478bd9Sstevel@tonic-gate 		    "rsm: get_controller_attr(%d) Failed %x\n",
7017c478bd9Sstevel@tonic-gate 		    instance, result));
7027c478bd9Sstevel@tonic-gate 		(void) rsm_release_controller(name, instance,
7037c478bd9Sstevel@tonic-gate 		    &rsmpi_adapter_object);
7047c478bd9Sstevel@tonic-gate 		return (NULL);
7057c478bd9Sstevel@tonic-gate 	}
7067c478bd9Sstevel@tonic-gate 
7077c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG,
7087c478bd9Sstevel@tonic-gate 	    "rsmka_add_adapter: register service offset = %d\n", hwaddr));
7097c478bd9Sstevel@tonic-gate 
7107c478bd9Sstevel@tonic-gate 	/*
7117c478bd9Sstevel@tonic-gate 	 * create a srv_handler_arg_t object, initialize it and register
7127c478bd9Sstevel@tonic-gate 	 * it along with rsm_srv_func. This get passed as the
7137c478bd9Sstevel@tonic-gate 	 * rsm_intr_hand_arg_t when the handler gets invoked.
7147c478bd9Sstevel@tonic-gate 	 */
7157c478bd9Sstevel@tonic-gate 	srv_hdlr_argp = kmem_zalloc(sizeof (srv_handler_arg_t), KM_SLEEP);
7167c478bd9Sstevel@tonic-gate 
7177c478bd9Sstevel@tonic-gate 	(void) strcpy(srv_hdlr_argp->adapter_name, name);
7187c478bd9Sstevel@tonic-gate 	srv_hdlr_argp->adapter_instance = instance;
7197c478bd9Sstevel@tonic-gate 	srv_hdlr_argp->adapter_hwaddr = hwaddr;
7207c478bd9Sstevel@tonic-gate 
7217c478bd9Sstevel@tonic-gate 	/* Have rsmpi register the ipc receive handler for this adapter */
7227c478bd9Sstevel@tonic-gate 	/*
7237c478bd9Sstevel@tonic-gate 	 * Currently, we need to pass in a separate service identifier for
7247c478bd9Sstevel@tonic-gate 	 * each adapter. In order to obtain a unique service identifier
7257c478bd9Sstevel@tonic-gate 	 * value for an adapter, we add the hardware address of the
7267c478bd9Sstevel@tonic-gate 	 * adapter to the base service identifier(RSM_SERVICE which is
7277c478bd9Sstevel@tonic-gate 	 * defined as RSM_INTR_T_KA as per the RSMPI specification).
7287c478bd9Sstevel@tonic-gate 	 * NOTE: This may result in using some of the service identifier
7297c478bd9Sstevel@tonic-gate 	 * values defined for RSM_INTR_T_XPORT(the Sun Cluster Transport).
7307c478bd9Sstevel@tonic-gate 	 */
7317c478bd9Sstevel@tonic-gate 	result = rsmpi_ops_vector->rsm_register_handler(
7327c478bd9Sstevel@tonic-gate 	    rsmpi_adapter_handle, &rsmpi_adapter_object,
7337c478bd9Sstevel@tonic-gate 	    RSM_SERVICE+(uint_t)hwaddr, rsm_srv_func,
7347c478bd9Sstevel@tonic-gate 	    (rsm_intr_hand_arg_t)srv_hdlr_argp, NULL, 0);
7357c478bd9Sstevel@tonic-gate 
7367c478bd9Sstevel@tonic-gate 	if (result != RSM_SUCCESS) {
7377c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_ERR,
7387c478bd9Sstevel@tonic-gate 		    "rsmka_add_adapter done: rsm_register_handler"
7397c478bd9Sstevel@tonic-gate 		    "failed %d\n",
7407c478bd9Sstevel@tonic-gate 		    instance));
7417c478bd9Sstevel@tonic-gate 		return (NULL);
7427c478bd9Sstevel@tonic-gate 	}
7437c478bd9Sstevel@tonic-gate 
7447c478bd9Sstevel@tonic-gate 	/* Initialize an adapter descriptor and add it to the adapter list */
7457c478bd9Sstevel@tonic-gate 	adapter = init_adapter(name, instance, hwaddr,
7467c478bd9Sstevel@tonic-gate 	    rsmpi_adapter_handle, rsmpi_ops_vector, srv_hdlr_argp);
7477c478bd9Sstevel@tonic-gate 
7487c478bd9Sstevel@tonic-gate 	/* Copy over the attributes from the pointer returned to us */
7497c478bd9Sstevel@tonic-gate 	adapter->rsm_attr = *attr;
7507c478bd9Sstevel@tonic-gate 
7517c478bd9Sstevel@tonic-gate 	/*
7527c478bd9Sstevel@tonic-gate 	 * With the addition of the topology obtainment interface, applications
7537c478bd9Sstevel@tonic-gate 	 * now get the local nodeid from the topology data structure.
7547c478bd9Sstevel@tonic-gate 	 *
7557c478bd9Sstevel@tonic-gate 	 * adapter->rsm_attr.attr_node_id = my_nodeid;
7567c478bd9Sstevel@tonic-gate 	 */
7577c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_ERR,
7587c478bd9Sstevel@tonic-gate 	    "rsmka_add_adapter: adapter = %lx\n", adapter));
7597c478bd9Sstevel@tonic-gate 
7607c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_add_adapter done\n"));
7617c478bd9Sstevel@tonic-gate 
7627c478bd9Sstevel@tonic-gate 	/* return adapter pointer as a cookie for later fast access */
7637c478bd9Sstevel@tonic-gate 	return ((void *)adapter);
7647c478bd9Sstevel@tonic-gate }
7657c478bd9Sstevel@tonic-gate 
7667c478bd9Sstevel@tonic-gate 
7677c478bd9Sstevel@tonic-gate /*
7687c478bd9Sstevel@tonic-gate  * Unlink the adapter descriptor and call rsmka_release_adapter which
7697c478bd9Sstevel@tonic-gate  * will decrement the reference count and possibly free the desriptor.
7707c478bd9Sstevel@tonic-gate  */
7717c478bd9Sstevel@tonic-gate boolean_t
rsmka_remove_adapter(char * name,uint_t instance,void * cookie,int flags)7727c478bd9Sstevel@tonic-gate rsmka_remove_adapter(char *name, uint_t instance, void *cookie, int flags)
7737c478bd9Sstevel@tonic-gate {
7747c478bd9Sstevel@tonic-gate 	adapter_t		*adapter;
7757c478bd9Sstevel@tonic-gate 	adapter_listhead_t	*listhead;
7767c478bd9Sstevel@tonic-gate 	adapter_t		*prev, *current;
7777c478bd9Sstevel@tonic-gate 	rsm_controller_object_t	rsm_cntl_obj;
7787c478bd9Sstevel@tonic-gate 
7797c478bd9Sstevel@tonic-gate 
7807c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
7817c478bd9Sstevel@tonic-gate 	    "rsmka_remove_adapter enter\n"));
7827c478bd9Sstevel@tonic-gate 
7837c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
7847c478bd9Sstevel@tonic-gate 	    "rsmka_remove_adapter: cookie = %lx\n", cookie));
7857c478bd9Sstevel@tonic-gate 
7867c478bd9Sstevel@tonic-gate 	if (flags & RSMKA_USE_COOKIE) {
7877c478bd9Sstevel@tonic-gate 		adapter = (adapter_t *)cookie;
7887c478bd9Sstevel@tonic-gate 	} else {
7897c478bd9Sstevel@tonic-gate 		adapter = rsmka_lookup_adapter(name, instance);
7907c478bd9Sstevel@tonic-gate 		/*
7917c478bd9Sstevel@tonic-gate 		 * rsmka_lookup_adapter increments the ref_cnt; need
7927c478bd9Sstevel@tonic-gate 		 * to decrement here to get true count
7937c478bd9Sstevel@tonic-gate 		 */
7947c478bd9Sstevel@tonic-gate 		ADAPTER_RELE(adapter);
7957c478bd9Sstevel@tonic-gate 	}
7967c478bd9Sstevel@tonic-gate 	ASSERT(adapter->next_path == NULL);
7977c478bd9Sstevel@tonic-gate 
7987c478bd9Sstevel@tonic-gate 	listhead = adapter->listhead;
7997c478bd9Sstevel@tonic-gate 
8007c478bd9Sstevel@tonic-gate 	mutex_enter(&adapter_listhead_base.listlock);
8017c478bd9Sstevel@tonic-gate 
8027c478bd9Sstevel@tonic-gate 	mutex_enter(&listhead->mutex);
8037c478bd9Sstevel@tonic-gate 
8047c478bd9Sstevel@tonic-gate 	/* find the adapter in the list and remove it */
8057c478bd9Sstevel@tonic-gate 	prev = NULL;
8067c478bd9Sstevel@tonic-gate 	current = listhead->next_adapter;
8077c478bd9Sstevel@tonic-gate 	while (current != NULL) {
8087c478bd9Sstevel@tonic-gate 		if (adapter->instance == current->instance) {
8097c478bd9Sstevel@tonic-gate 			break;
8107c478bd9Sstevel@tonic-gate 		} else {
8117c478bd9Sstevel@tonic-gate 			prev = current;
8127c478bd9Sstevel@tonic-gate 			current = current->next;
8137c478bd9Sstevel@tonic-gate 		}
8147c478bd9Sstevel@tonic-gate 	}
8157c478bd9Sstevel@tonic-gate 	ASSERT(current != NULL);
8167c478bd9Sstevel@tonic-gate 
8177c478bd9Sstevel@tonic-gate 	if (prev == NULL)
8187c478bd9Sstevel@tonic-gate 		listhead->next_adapter = current->next;
8197c478bd9Sstevel@tonic-gate 	else
8207c478bd9Sstevel@tonic-gate 		prev->next = current->next;
8217c478bd9Sstevel@tonic-gate 
8227c478bd9Sstevel@tonic-gate 	listhead->adapter_count--;
8237c478bd9Sstevel@tonic-gate 
8247c478bd9Sstevel@tonic-gate 	mutex_exit(&listhead->mutex);
8257c478bd9Sstevel@tonic-gate 
8267c478bd9Sstevel@tonic-gate 	mutex_exit(&adapter_listhead_base.listlock);
8277c478bd9Sstevel@tonic-gate 
8287c478bd9Sstevel@tonic-gate 	mutex_enter(&current->mutex);
8297c478bd9Sstevel@tonic-gate 
8307c478bd9Sstevel@tonic-gate 	/*
8317c478bd9Sstevel@tonic-gate 	 * unregister the handler
8327c478bd9Sstevel@tonic-gate 	 */
8337c478bd9Sstevel@tonic-gate 	current->rsmpi_ops->rsm_unregister_handler(current->rsmpi_handle,
8347c478bd9Sstevel@tonic-gate 	    RSM_SERVICE+current->hwaddr, rsm_srv_func,
8357c478bd9Sstevel@tonic-gate 	    (rsm_intr_hand_arg_t)current->hdlr_argp);
8367c478bd9Sstevel@tonic-gate 
8377c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG, "rsmka_remove_adapter: unreg hdlr "
8387c478bd9Sstevel@tonic-gate 	    ":adapter=%lx, hwaddr=%lx\n", current, current->hwaddr));
8397c478bd9Sstevel@tonic-gate 
8407c478bd9Sstevel@tonic-gate 	rsm_cntl_obj.handle = current->rsmpi_handle;
8417c478bd9Sstevel@tonic-gate 	rsm_cntl_obj.ops = current->rsmpi_ops;
8427c478bd9Sstevel@tonic-gate 
8437c478bd9Sstevel@tonic-gate 	(void) rsm_release_controller(current->listhead->adapter_devname,
8447c478bd9Sstevel@tonic-gate 	    current->instance, &rsm_cntl_obj);
8457c478bd9Sstevel@tonic-gate 
8467c478bd9Sstevel@tonic-gate 	mutex_exit(&current->mutex);
8477c478bd9Sstevel@tonic-gate 
8487c478bd9Sstevel@tonic-gate 	rsmka_release_adapter(current);
8497c478bd9Sstevel@tonic-gate 
8507c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE,
8517c478bd9Sstevel@tonic-gate 	    "rsmka_remove_adapter done\n"));
8527c478bd9Sstevel@tonic-gate 
8537c478bd9Sstevel@tonic-gate 	return (B_TRUE);
8547c478bd9Sstevel@tonic-gate }
8557c478bd9Sstevel@tonic-gate 
8567c478bd9Sstevel@tonic-gate /*
8577c478bd9Sstevel@tonic-gate  * An adapter descriptor will exist from an earlier add_adapter. This
8587c478bd9Sstevel@tonic-gate  * function does:
8597c478bd9Sstevel@tonic-gate  *		initialize the path descriptor
8607c478bd9Sstevel@tonic-gate  *		initialize the ipc descriptor (it may already exist)
8617c478bd9Sstevel@tonic-gate  *		initialize and link a sendq token for this path
8627c478bd9Sstevel@tonic-gate  */
8637c478bd9Sstevel@tonic-gate void *
rsmka_add_path(char * adapter_name,int adapter_instance,rsm_node_id_t remote_node,rsm_addr_t remote_hwaddr,int rem_adapt_instance,void * cookie,int flags)8647c478bd9Sstevel@tonic-gate rsmka_add_path(char *adapter_name, int adapter_instance,
8657c478bd9Sstevel@tonic-gate     rsm_node_id_t remote_node,
8667c478bd9Sstevel@tonic-gate     rsm_addr_t remote_hwaddr, int rem_adapt_instance,
8677c478bd9Sstevel@tonic-gate     void *cookie, int flags)
8687c478bd9Sstevel@tonic-gate {
8697c478bd9Sstevel@tonic-gate 
8707c478bd9Sstevel@tonic-gate 	path_t			*path;
8717c478bd9Sstevel@tonic-gate 	adapter_t		*adapter;
8727c478bd9Sstevel@tonic-gate 	char			tq_name[TASKQ_NAMELEN];
8737c478bd9Sstevel@tonic-gate 
8747c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_add_path enter\n"));
8757c478bd9Sstevel@tonic-gate 
8767c478bd9Sstevel@tonic-gate 	/* allocate new path descriptor */
8777c478bd9Sstevel@tonic-gate 	path = kmem_zalloc(sizeof (path_t), KM_SLEEP);
8787c478bd9Sstevel@tonic-gate 
8797c478bd9Sstevel@tonic-gate 	if (flags & RSMKA_USE_COOKIE) {
8807c478bd9Sstevel@tonic-gate 		adapter = (adapter_t *)cookie;
8817c478bd9Sstevel@tonic-gate 		ADAPTER_HOLD(adapter);
8827c478bd9Sstevel@tonic-gate 	} else {
8837c478bd9Sstevel@tonic-gate 		adapter = rsmka_lookup_adapter(adapter_name, adapter_instance);
8847c478bd9Sstevel@tonic-gate 	}
8857c478bd9Sstevel@tonic-gate 
8867c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG,
8877c478bd9Sstevel@tonic-gate 	    "rsmka_add_path: adapter = %lx\n", adapter));
8887c478bd9Sstevel@tonic-gate 
8897c478bd9Sstevel@tonic-gate 	/*
8907c478bd9Sstevel@tonic-gate 	 * initialize path descriptor
8917c478bd9Sstevel@tonic-gate 	 * don't need to increment adapter reference count because
8927c478bd9Sstevel@tonic-gate 	 * it can't be removed if paths exist for it.
8937c478bd9Sstevel@tonic-gate 	 */
8947c478bd9Sstevel@tonic-gate 	mutex_init(&path->mutex, NULL, MUTEX_DEFAULT, NULL);
8957c478bd9Sstevel@tonic-gate 
8967c478bd9Sstevel@tonic-gate 	PATH_HOLD(path);
8977c478bd9Sstevel@tonic-gate 	path->state = RSMKA_PATH_DOWN;
8987c478bd9Sstevel@tonic-gate 	path->remote_node = remote_node;
8997c478bd9Sstevel@tonic-gate 	path->remote_hwaddr = remote_hwaddr;
9007c478bd9Sstevel@tonic-gate 	path->remote_devinst = rem_adapt_instance;
9017c478bd9Sstevel@tonic-gate 	path->local_adapter = adapter;
9027c478bd9Sstevel@tonic-gate 
9037c478bd9Sstevel@tonic-gate 	/* taskq is for sendq on adapter with remote_hwaddr on remote_node */
9047c478bd9Sstevel@tonic-gate 	(void) snprintf(tq_name, sizeof (tq_name), "%x_%llx",
9057c478bd9Sstevel@tonic-gate 	    remote_node, (unsigned long long) remote_hwaddr);
9067c478bd9Sstevel@tonic-gate 
9077c478bd9Sstevel@tonic-gate 	path->recv_taskq = taskq_create_instance(tq_name, adapter_instance,
9087c478bd9Sstevel@tonic-gate 	    RSMKA_ONE_THREAD, maxclsyspri, RSMIPC_MAX_MESSAGES,
9097c478bd9Sstevel@tonic-gate 	    RSMIPC_MAX_MESSAGES, TASKQ_PREPOPULATE);
9107c478bd9Sstevel@tonic-gate 
9117c478bd9Sstevel@tonic-gate 	/* allocate the message buffer array */
9127c478bd9Sstevel@tonic-gate 	path->msgbuf_queue = (msgbuf_elem_t *)kmem_zalloc(
9137c478bd9Sstevel@tonic-gate 	    RSMIPC_MAX_MESSAGES * sizeof (msgbuf_elem_t), KM_SLEEP);
9147c478bd9Sstevel@tonic-gate 
9157c478bd9Sstevel@tonic-gate 	/*
9167c478bd9Sstevel@tonic-gate 	 * init cond variables for synch with rsmipc_send()
9177c478bd9Sstevel@tonic-gate 	 * and rsmka_remove_path
9187c478bd9Sstevel@tonic-gate 	 */
9197c478bd9Sstevel@tonic-gate 	cv_init(&path->sendq_token.sendq_cv, NULL, CV_DEFAULT, NULL);
9207c478bd9Sstevel@tonic-gate 	cv_init(&path->hold_cv, NULL, CV_DEFAULT, NULL);
9217c478bd9Sstevel@tonic-gate 
9227c478bd9Sstevel@tonic-gate 	/* link path descriptor on adapter path list */
9237c478bd9Sstevel@tonic-gate 	link_path(path);
9247c478bd9Sstevel@tonic-gate 
9257c478bd9Sstevel@tonic-gate 	/* link the path sendq token on the ipc_info token list */
9267c478bd9Sstevel@tonic-gate 	link_sendq_token(&path->sendq_token, remote_node);
9277c478bd9Sstevel@tonic-gate 
9287c478bd9Sstevel@tonic-gate 	/* ADAPTER_HOLD done above by rsmka_lookup_adapter */
9297c478bd9Sstevel@tonic-gate 	ADAPTER_RELE(adapter);
9307c478bd9Sstevel@tonic-gate 
9317c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG, "rsmka_add_path: path = %lx\n", path));
9327c478bd9Sstevel@tonic-gate 
9337c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_add_path done\n"));
9347c478bd9Sstevel@tonic-gate 	return ((void *)path);
9357c478bd9Sstevel@tonic-gate }
9367c478bd9Sstevel@tonic-gate 
9377c478bd9Sstevel@tonic-gate /*
9387c478bd9Sstevel@tonic-gate  * Wait for the path descriptor reference count to become zero then
9397c478bd9Sstevel@tonic-gate  * directly call path down processing.  Finally, unlink the sendq token and
9407c478bd9Sstevel@tonic-gate  * free the path descriptor memory.
9417c478bd9Sstevel@tonic-gate  *
9427c478bd9Sstevel@tonic-gate  * Note: lookup_path locks the path and increments the path hold count
9437c478bd9Sstevel@tonic-gate  */
9447c478bd9Sstevel@tonic-gate void
rsmka_remove_path(char * adapter_name,int instance,rsm_node_id_t remote_node,rsm_addr_t remote_hwaddr,void * path_cookie,int flags)9457c478bd9Sstevel@tonic-gate rsmka_remove_path(char *adapter_name, int instance, rsm_node_id_t remote_node,
9467c478bd9Sstevel@tonic-gate     rsm_addr_t remote_hwaddr, void *path_cookie, int flags)
9477c478bd9Sstevel@tonic-gate {
9487c478bd9Sstevel@tonic-gate 	path_t		*path;
9497c478bd9Sstevel@tonic-gate 
9507c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_remove_path enter\n"));
9517c478bd9Sstevel@tonic-gate 
9527c478bd9Sstevel@tonic-gate 	if (flags & RSMKA_USE_COOKIE) {
9537c478bd9Sstevel@tonic-gate 		path = (path_t *)path_cookie;
9547c478bd9Sstevel@tonic-gate 		mutex_enter(&path->mutex);
9557c478bd9Sstevel@tonic-gate 	} else {
9567c478bd9Sstevel@tonic-gate 		path = lookup_path(adapter_name, instance,  remote_node,
9577c478bd9Sstevel@tonic-gate 		    remote_hwaddr);
9587c478bd9Sstevel@tonic-gate 
9597c478bd9Sstevel@tonic-gate 		/*
9607c478bd9Sstevel@tonic-gate 		 * remember, lookup_path increments the reference
9617c478bd9Sstevel@tonic-gate 		 * count - so decrement now so we can get to zero
9627c478bd9Sstevel@tonic-gate 		 */
9637c478bd9Sstevel@tonic-gate 		PATH_RELE_NOLOCK(path);
9647c478bd9Sstevel@tonic-gate 	}
9657c478bd9Sstevel@tonic-gate 
9667c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG,
9677c478bd9Sstevel@tonic-gate 	    "rsmka_remove_path: path = %lx\n", path));
9687c478bd9Sstevel@tonic-gate 
9697c478bd9Sstevel@tonic-gate 	while (path->state == RSMKA_PATH_GOING_DOWN)
9707c478bd9Sstevel@tonic-gate 		cv_wait(&path->hold_cv, &path->mutex);
9717c478bd9Sstevel@tonic-gate 
9727c478bd9Sstevel@tonic-gate 	/* attempt to cancel any possibly pending work */
9737c478bd9Sstevel@tonic-gate 	mutex_enter(&work_queue.work_mutex);
9747c478bd9Sstevel@tonic-gate 	if (cancel_work(&path->work_token[RSMKA_IPC_UP_INDEX])) {
9757c478bd9Sstevel@tonic-gate 		PATH_RELE_NOLOCK(path);
9767c478bd9Sstevel@tonic-gate 	}
9777c478bd9Sstevel@tonic-gate 	if (cancel_work(&path->work_token[RSMKA_IPC_DOWN_INDEX])) {
9787c478bd9Sstevel@tonic-gate 		PATH_RELE_NOLOCK(path);
9797c478bd9Sstevel@tonic-gate 	}
9807c478bd9Sstevel@tonic-gate 	mutex_exit(&work_queue.work_mutex);
9817c478bd9Sstevel@tonic-gate 
9827c478bd9Sstevel@tonic-gate 	/*
9837c478bd9Sstevel@tonic-gate 	 * The path descriptor ref cnt was set to 1 initially when
9847c478bd9Sstevel@tonic-gate 	 * the path was added.  So we need to do a decrement here to
9857c478bd9Sstevel@tonic-gate 	 * balance that.
9867c478bd9Sstevel@tonic-gate 	 */
9877c478bd9Sstevel@tonic-gate 	PATH_RELE_NOLOCK(path);
9887c478bd9Sstevel@tonic-gate 
9897c478bd9Sstevel@tonic-gate 	switch (path->state) {
9907c478bd9Sstevel@tonic-gate 	case RSMKA_PATH_UP:
9917c478bd9Sstevel@tonic-gate 		/* clear the flag */
9927c478bd9Sstevel@tonic-gate 		path->flags &= ~RSMKA_SQCREATE_PENDING;
9937c478bd9Sstevel@tonic-gate 		path->state = RSMKA_PATH_DOWN;
9947c478bd9Sstevel@tonic-gate 		break;
9957c478bd9Sstevel@tonic-gate 	case RSMKA_PATH_DOWN:
9967c478bd9Sstevel@tonic-gate 		break;
9977c478bd9Sstevel@tonic-gate 
9987c478bd9Sstevel@tonic-gate 	case RSMKA_PATH_ACTIVE:
9997c478bd9Sstevel@tonic-gate 		/*
10007c478bd9Sstevel@tonic-gate 		 * rsmka_remove_path should not call do_path_down
10017c478bd9Sstevel@tonic-gate 		 * with the RSMKA_NO_SLEEP flag set since for
10027c478bd9Sstevel@tonic-gate 		 * this code path, the deferred work would
10037c478bd9Sstevel@tonic-gate 		 * incorrectly do a PATH_RELE_NOLOCK.
10047c478bd9Sstevel@tonic-gate 		 */
10057c478bd9Sstevel@tonic-gate 		do_path_down(path, 0);
10067c478bd9Sstevel@tonic-gate 		break;
10077c478bd9Sstevel@tonic-gate 	default:
10087c478bd9Sstevel@tonic-gate 		mutex_exit(&path->mutex);
10097c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_ERR,
10107c478bd9Sstevel@tonic-gate 		    "rsm_remove_path: invalid path state %d\n",
10117c478bd9Sstevel@tonic-gate 		    path->state));
10127c478bd9Sstevel@tonic-gate 		return;
10137c478bd9Sstevel@tonic-gate 
10147c478bd9Sstevel@tonic-gate 	}
10157c478bd9Sstevel@tonic-gate 
10167c478bd9Sstevel@tonic-gate 	/*
10177c478bd9Sstevel@tonic-gate 	 * wait for all references to the path to be released. If a thread
10187c478bd9Sstevel@tonic-gate 	 * was waiting to receive credits do_path_down should wake it up
10197c478bd9Sstevel@tonic-gate 	 * since the path is going down and that will cause the sleeping
10207c478bd9Sstevel@tonic-gate 	 * thread to release its hold on the path.
10217c478bd9Sstevel@tonic-gate 	 */
10227c478bd9Sstevel@tonic-gate 	while (path->ref_cnt != 0) {
10237c478bd9Sstevel@tonic-gate 		cv_wait(&path->hold_cv, &path->mutex);
10247c478bd9Sstevel@tonic-gate 	}
10257c478bd9Sstevel@tonic-gate 
10267c478bd9Sstevel@tonic-gate 	mutex_exit(&path->mutex);
10277c478bd9Sstevel@tonic-gate 
10287c478bd9Sstevel@tonic-gate 	/*
10297c478bd9Sstevel@tonic-gate 	 * remove from ipc token list
10307c478bd9Sstevel@tonic-gate 	 * NOTE: use the remote_node value from the path structure
10317c478bd9Sstevel@tonic-gate 	 * since for RSMKA_USE_COOKIE being set, the remote_node
10327c478bd9Sstevel@tonic-gate 	 * value passed into rsmka_remove_path is 0.
10337c478bd9Sstevel@tonic-gate 	 */
10347c478bd9Sstevel@tonic-gate 	unlink_sendq_token(&path->sendq_token, path->remote_node);
10357c478bd9Sstevel@tonic-gate 
10367c478bd9Sstevel@tonic-gate 	/* unlink from adapter path list and free path descriptor */
10377c478bd9Sstevel@tonic-gate 	destroy_path(path);
10387c478bd9Sstevel@tonic-gate 
10397c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_remove_path done\n"));
10407c478bd9Sstevel@tonic-gate }
10417c478bd9Sstevel@tonic-gate 
10427c478bd9Sstevel@tonic-gate /*
10437c478bd9Sstevel@tonic-gate  *
10447c478bd9Sstevel@tonic-gate  * LOCKING:
10457c478bd9Sstevel@tonic-gate  * lookup_path locks the path and increments the path hold count. If the remote
10467c478bd9Sstevel@tonic-gate  * node is not in the alive state, do_path_up will release the lock and
10477c478bd9Sstevel@tonic-gate  * decrement the hold count.  Otherwise rsmka_do_path_active will release the
10487c478bd9Sstevel@tonic-gate  * lock prior to waking up the work thread.
10497c478bd9Sstevel@tonic-gate  *
10507c478bd9Sstevel@tonic-gate  * REF_CNT:
10517c478bd9Sstevel@tonic-gate  * The path descriptor ref_cnt is incremented here; it will be decremented
10527c478bd9Sstevel@tonic-gate  * when path up processing is completed in do_path_up or by the work thread
10537c478bd9Sstevel@tonic-gate  * if the path up is deferred.
10547c478bd9Sstevel@tonic-gate  *
10557c478bd9Sstevel@tonic-gate  */
10567c478bd9Sstevel@tonic-gate boolean_t
rsmka_path_up(char * adapter_name,uint_t adapter_instance,rsm_node_id_t remote_node,rsm_addr_t remote_hwaddr,void * path_cookie,int flags)10577c478bd9Sstevel@tonic-gate rsmka_path_up(char *adapter_name, uint_t adapter_instance,
10587c478bd9Sstevel@tonic-gate     rsm_node_id_t remote_node, rsm_addr_t remote_hwaddr,
10597c478bd9Sstevel@tonic-gate     void *path_cookie, int flags)
10607c478bd9Sstevel@tonic-gate {
10617c478bd9Sstevel@tonic-gate 
10627c478bd9Sstevel@tonic-gate 	path_t			*path;
10637c478bd9Sstevel@tonic-gate 	boolean_t		rval = B_TRUE;
10647c478bd9Sstevel@tonic-gate 
10657c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_path_up enter\n"));
10667c478bd9Sstevel@tonic-gate 
10677c478bd9Sstevel@tonic-gate 	if (flags & RSMKA_USE_COOKIE) {
10687c478bd9Sstevel@tonic-gate 		path = (path_t *)path_cookie;
10697c478bd9Sstevel@tonic-gate 		mutex_enter(&path->mutex);
10707c478bd9Sstevel@tonic-gate 		PATH_HOLD_NOLOCK(path);
10717c478bd9Sstevel@tonic-gate 	} else {
10727c478bd9Sstevel@tonic-gate 		path = lookup_path(adapter_name, adapter_instance,
10737c478bd9Sstevel@tonic-gate 		    remote_node, remote_hwaddr);
10747c478bd9Sstevel@tonic-gate 	}
10757c478bd9Sstevel@tonic-gate 
10767c478bd9Sstevel@tonic-gate 	while (path->state == RSMKA_PATH_GOING_DOWN)
10777c478bd9Sstevel@tonic-gate 		cv_wait(&path->hold_cv, &path->mutex);
10787c478bd9Sstevel@tonic-gate 
10797c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG, "rsmka_path_up: path = %lx\n", path));
10807c478bd9Sstevel@tonic-gate 	rval = do_path_up(path, flags);
10817c478bd9Sstevel@tonic-gate 	mutex_exit(&path->mutex);
10827c478bd9Sstevel@tonic-gate 
10837c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_path_up done\n"));
10847c478bd9Sstevel@tonic-gate 	return (rval);
10857c478bd9Sstevel@tonic-gate }
10867c478bd9Sstevel@tonic-gate 
10877c478bd9Sstevel@tonic-gate /*
10887c478bd9Sstevel@tonic-gate  *
10897c478bd9Sstevel@tonic-gate  * LOCKING:
10907c478bd9Sstevel@tonic-gate  * lookup_path locks the path and increments the path hold count. If the
10917c478bd9Sstevel@tonic-gate  * current state is ACTIVE the path lock is release prior to waking up
10927c478bd9Sstevel@tonic-gate  * the work thread in do_path_down .  The work thread will decrement the hold
10937c478bd9Sstevel@tonic-gate  * count when the work for this is finished.
10947c478bd9Sstevel@tonic-gate  *
10957c478bd9Sstevel@tonic-gate  *
10967c478bd9Sstevel@tonic-gate  * REF_CNT:
10977c478bd9Sstevel@tonic-gate  * The path descriptor ref_cnt is incremented here; it will be decremented
10987c478bd9Sstevel@tonic-gate  * when path down processing is completed in do_path_down or by the work thread
10997c478bd9Sstevel@tonic-gate  * if the path down is deferred.
11007c478bd9Sstevel@tonic-gate  *
11017c478bd9Sstevel@tonic-gate  */
11027c478bd9Sstevel@tonic-gate boolean_t
rsmka_path_down(char * adapter_devname,int instance,rsm_node_id_t remote_node,rsm_addr_t remote_hwaddr,void * path_cookie,int flags)11037c478bd9Sstevel@tonic-gate rsmka_path_down(char *adapter_devname, int instance, rsm_node_id_t remote_node,
11047c478bd9Sstevel@tonic-gate     rsm_addr_t remote_hwaddr,  void *path_cookie, int flags)
11057c478bd9Sstevel@tonic-gate {
11067c478bd9Sstevel@tonic-gate 	path_t			*path;
11077c478bd9Sstevel@tonic-gate 	boolean_t		rval = B_TRUE;
11087c478bd9Sstevel@tonic-gate 
11097c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_path_down enter\n"));
11107c478bd9Sstevel@tonic-gate 
11117c478bd9Sstevel@tonic-gate 	if (flags & RSMKA_USE_COOKIE) {
11127c478bd9Sstevel@tonic-gate 		path = (path_t *)path_cookie;
11137c478bd9Sstevel@tonic-gate 		mutex_enter(&path->mutex);
11147c478bd9Sstevel@tonic-gate 		PATH_HOLD_NOLOCK(path);
11157c478bd9Sstevel@tonic-gate 	} else {
11167c478bd9Sstevel@tonic-gate 		path = lookup_path(adapter_devname, instance, remote_node,
11177c478bd9Sstevel@tonic-gate 		    remote_hwaddr);
11187c478bd9Sstevel@tonic-gate 	}
11197c478bd9Sstevel@tonic-gate 
11207c478bd9Sstevel@tonic-gate 	while (path->state == RSMKA_PATH_GOING_DOWN)
11217c478bd9Sstevel@tonic-gate 		cv_wait(&path->hold_cv, &path->mutex);
11227c478bd9Sstevel@tonic-gate 
11237c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG,
11247c478bd9Sstevel@tonic-gate 	    "rsmka_path_down: path = %lx\n", path));
11257c478bd9Sstevel@tonic-gate 
11267c478bd9Sstevel@tonic-gate 	switch (path->state) {
11277c478bd9Sstevel@tonic-gate 	case RSMKA_PATH_UP:
11287c478bd9Sstevel@tonic-gate 		/* clear the flag */
11297c478bd9Sstevel@tonic-gate 		path->flags &= ~RSMKA_SQCREATE_PENDING;
11307c478bd9Sstevel@tonic-gate 		path->state = RSMKA_PATH_GOING_DOWN;
11317c478bd9Sstevel@tonic-gate 		mutex_exit(&path->mutex);
11327c478bd9Sstevel@tonic-gate 
11337c478bd9Sstevel@tonic-gate 		/*
11347c478bd9Sstevel@tonic-gate 		 * release path->mutex since enqueued tasks acquire it.
11357c478bd9Sstevel@tonic-gate 		 * Drain all the enqueued tasks.
11367c478bd9Sstevel@tonic-gate 		 */
11377c478bd9Sstevel@tonic-gate 		taskq_wait(path->recv_taskq);
11387c478bd9Sstevel@tonic-gate 
11397c478bd9Sstevel@tonic-gate 		mutex_enter(&path->mutex);
11407c478bd9Sstevel@tonic-gate 		path->state = RSMKA_PATH_DOWN;
11417c478bd9Sstevel@tonic-gate 		PATH_RELE_NOLOCK(path);
11427c478bd9Sstevel@tonic-gate 		break;
11437c478bd9Sstevel@tonic-gate 	case RSMKA_PATH_DOWN:
11447c478bd9Sstevel@tonic-gate 		PATH_RELE_NOLOCK(path);
11457c478bd9Sstevel@tonic-gate 		break;
11467c478bd9Sstevel@tonic-gate 	case RSMKA_PATH_ACTIVE:
11477c478bd9Sstevel@tonic-gate 		do_path_down(path, flags);
11487c478bd9Sstevel@tonic-gate 		/*
11497c478bd9Sstevel@tonic-gate 		 * Need to release the path refcnt. Either done in do_path_down
11507c478bd9Sstevel@tonic-gate 		 * or do_deferred_work for RSMKA_NO_SLEEP being set. Has to be
11517c478bd9Sstevel@tonic-gate 		 * done here for RSMKA_NO_SLEEP not set.
11527c478bd9Sstevel@tonic-gate 		 */
11537c478bd9Sstevel@tonic-gate 		if (!(flags & RSMKA_NO_SLEEP))
11547c478bd9Sstevel@tonic-gate 			PATH_RELE_NOLOCK(path);
11557c478bd9Sstevel@tonic-gate 		break;
11567c478bd9Sstevel@tonic-gate 	default:
11577c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_ERR,
11587c478bd9Sstevel@tonic-gate 		    "rsm_path_down: invalid path state %d\n", path->state));
11597c478bd9Sstevel@tonic-gate 		rval = B_FALSE;
11607c478bd9Sstevel@tonic-gate 	}
11617c478bd9Sstevel@tonic-gate 
11627c478bd9Sstevel@tonic-gate 	mutex_exit(&path->mutex);
11637c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_path_down done\n"));
11647c478bd9Sstevel@tonic-gate 	return (rval);
11657c478bd9Sstevel@tonic-gate }
11667c478bd9Sstevel@tonic-gate 
11677c478bd9Sstevel@tonic-gate 
11687c478bd9Sstevel@tonic-gate /*
11697c478bd9Sstevel@tonic-gate  * Paths cannot become active until node_is_alive is marked true
11707c478bd9Sstevel@tonic-gate  * in the ipc_info descriptor for the node
11717c478bd9Sstevel@tonic-gate  *
11727c478bd9Sstevel@tonic-gate  * In the event this is called before any paths have been added,
11737c478bd9Sstevel@tonic-gate  * init_ipc_info if called here.
11747c478bd9Sstevel@tonic-gate  *
11757c478bd9Sstevel@tonic-gate  */
11767c478bd9Sstevel@tonic-gate boolean_t
rsmka_node_alive(rsm_node_id_t remote_node)11777c478bd9Sstevel@tonic-gate rsmka_node_alive(rsm_node_id_t remote_node)
11787c478bd9Sstevel@tonic-gate {
11797c478bd9Sstevel@tonic-gate 	ipc_info_t *ipc_info;
11807c478bd9Sstevel@tonic-gate 
11817c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_node_alive enter\n"));
11827c478bd9Sstevel@tonic-gate 
11837c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG,
11847c478bd9Sstevel@tonic-gate 	    "rsmka_node_alive: remote_node = %x\n", remote_node));
11857c478bd9Sstevel@tonic-gate 
11867c478bd9Sstevel@tonic-gate 	ipc_info = lookup_ipc_info(remote_node);
11877c478bd9Sstevel@tonic-gate 
11887c478bd9Sstevel@tonic-gate 	if (ipc_info == NULL) {
11897c478bd9Sstevel@tonic-gate 		ipc_info = init_ipc_info(remote_node, B_TRUE);
11907c478bd9Sstevel@tonic-gate 		DBG_PRINTF((category, RSM_DEBUG,
11917c478bd9Sstevel@tonic-gate 		    "rsmka_node_alive: new ipc_info = %lx\n", ipc_info));
11927c478bd9Sstevel@tonic-gate 	} else {
11937c478bd9Sstevel@tonic-gate 		ASSERT(ipc_info->node_is_alive == B_FALSE);
11947c478bd9Sstevel@tonic-gate 		ipc_info->node_is_alive = B_TRUE;
11957c478bd9Sstevel@tonic-gate 	}
11967c478bd9Sstevel@tonic-gate 
11977c478bd9Sstevel@tonic-gate 	pathup_to_pathactive(ipc_info, remote_node);
11987c478bd9Sstevel@tonic-gate 
11997c478bd9Sstevel@tonic-gate 	mutex_exit(&ipc_info_lock);
12007c478bd9Sstevel@tonic-gate 
12017c478bd9Sstevel@tonic-gate 	/* rsmipc_send() may be waiting for a sendq_token */
12027c478bd9Sstevel@tonic-gate 	mutex_enter(&ipc_info_cvlock);
12037c478bd9Sstevel@tonic-gate 	cv_broadcast(&ipc_info_cv);
12047c478bd9Sstevel@tonic-gate 	mutex_exit(&ipc_info_cvlock);
12057c478bd9Sstevel@tonic-gate 
12067c478bd9Sstevel@tonic-gate 	DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmka_node_alive done\n"));
12077c478bd9Sstevel@tonic-gate 
12087c478bd9Sstevel@tonic-gate 	return (B_TRUE);
12097c478bd9Sstevel@tonic-gate }
12107c478bd9Sstevel@tonic-gate 
12117c478bd9Sstevel@tonic-gate 
12127c478bd9Sstevel@tonic-gate 
12137c478bd9Sstevel@tonic-gate /*
12147c478bd9Sstevel@tonic-gate  * Paths cannot become active when node_is_alive is marked false
12157c478bd9Sstevel@tonic-gate  * in the ipc_info descriptor for the node
12167c478bd9Sstevel@tonic-gate  */
12177c478bd9Sstevel@tonic-gate boolean_t
rsmka_node_died(rsm_node_