xref: /illumos-gate/usr/src/uts/common/io/idm/idm_impl.c (revision d4b0f847)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/conf.h>
28 #include <sys/file.h>
29 #include <sys/ddi.h>
30 #include <sys/sunddi.h>
31 #include <sys/cpuvar.h>
32 #include <sys/sdt.h>
33 
34 #include <sys/socket.h>
35 #include <sys/strsubr.h>
36 #include <sys/socketvar.h>
37 #include <sys/sysmacros.h>
38 
39 #include <sys/idm/idm.h>
40 #include <sys/idm/idm_so.h>
41 #include <hd_crc.h>
42 
43 extern idm_transport_t  idm_transport_list[];
44 /*
45  * -1 - uninitialized
46  * 0  - applicable
47  * others - NA
48  */
49 static int iscsi_crc32_hd = -1;
50 
51 void
52 idm_pdu_rx(idm_conn_t *ic, idm_pdu_t *pdu)
53 {
54 	iscsi_async_evt_hdr_t *async_evt;
55 
56 	/*
57 	 * If we are in full-featured mode then route SCSI-related
58 	 * commands to the appropriate function vector
59 	 */
60 	ic->ic_timestamp = ddi_get_lbolt();
61 	mutex_enter(&ic->ic_state_mutex);
62 	if (ic->ic_ffp && ic->ic_pdu_events == 0) {
63 		mutex_exit(&ic->ic_state_mutex);
64 
65 		if (idm_pdu_rx_forward_ffp(ic, pdu) == B_TRUE) {
66 			/* Forwarded SCSI-related commands */
67 			return;
68 		}
69 		mutex_enter(&ic->ic_state_mutex);
70 	}
71 
72 	/*
73 	 * If we get here with a SCSI-related PDU then we are not in
74 	 * full-feature mode and the PDU is a protocol error (SCSI command
75 	 * PDU's may sometimes be an exception, see below).  All
76 	 * non-SCSI PDU's get treated them the same regardless of whether
77 	 * we are in full-feature mode.
78 	 *
79 	 * Look at the opcode and in some cases the PDU status and
80 	 * determine the appropriate event to send to the connection
81 	 * state machine.  Generate the event, passing the PDU as data.
82 	 * If the current connection state allows reception of the event
83 	 * the PDU will be submitted to the IDM client for processing,
84 	 * otherwise the PDU will be dropped.
85 	 */
86 	switch (IDM_PDU_OPCODE(pdu)) {
87 	case ISCSI_OP_LOGIN_CMD:
88 		DTRACE_ISCSI_2(login__command, idm_conn_t *, ic,
89 		    iscsi_login_hdr_t *, (iscsi_login_hdr_t *)pdu->isp_hdr);
90 		idm_conn_rx_pdu_event(ic, CE_LOGIN_RCV, (uintptr_t)pdu);
91 		break;
92 	case ISCSI_OP_LOGIN_RSP:
93 		idm_parse_login_rsp(ic, pdu, /* RX */ B_TRUE);
94 		break;
95 	case ISCSI_OP_LOGOUT_CMD:
96 		DTRACE_ISCSI_2(logout__command, idm_conn_t *, ic,
97 		    iscsi_logout_hdr_t *,
98 		    (iscsi_logout_hdr_t *)pdu->isp_hdr);
99 		idm_parse_logout_req(ic, pdu, /* RX */ B_TRUE);
100 		break;
101 	case ISCSI_OP_LOGOUT_RSP:
102 		idm_parse_logout_rsp(ic, pdu, /* RX */ B_TRUE);
103 		break;
104 	case ISCSI_OP_ASYNC_EVENT:
105 		async_evt = (iscsi_async_evt_hdr_t *)pdu->isp_hdr;
106 		switch (async_evt->async_event) {
107 		case ISCSI_ASYNC_EVENT_REQUEST_LOGOUT:
108 			idm_conn_rx_pdu_event(ic, CE_ASYNC_LOGOUT_RCV,
109 			    (uintptr_t)pdu);
110 			break;
111 		case ISCSI_ASYNC_EVENT_DROPPING_CONNECTION:
112 			idm_conn_rx_pdu_event(ic, CE_ASYNC_DROP_CONN_RCV,
113 			    (uintptr_t)pdu);
114 			break;
115 		case ISCSI_ASYNC_EVENT_DROPPING_ALL_CONNECTIONS:
116 			idm_conn_rx_pdu_event(ic, CE_ASYNC_DROP_ALL_CONN_RCV,
117 			    (uintptr_t)pdu);
118 			break;
119 		case ISCSI_ASYNC_EVENT_SCSI_EVENT:
120 		case ISCSI_ASYNC_EVENT_PARAM_NEGOTIATION:
121 		default:
122 			idm_conn_rx_pdu_event(ic, CE_MISC_RX,
123 			    (uintptr_t)pdu);
124 			break;
125 		}
126 		break;
127 	case ISCSI_OP_SCSI_CMD:
128 		/*
129 		 * Consider this scenario:  We are a target connection
130 		 * in "in login" state and a "login success sent" event has
131 		 * been generated but not yet handled.  Since we've sent
132 		 * the login response but we haven't actually transitioned
133 		 * to FFP mode we might conceivably receive a SCSI command
134 		 * from the initiator before we are ready.  We are actually
135 		 * in FFP we just don't know it yet -- to address this we
136 		 * can generate an event corresponding to the SCSI command.
137 		 * At the point when the event is handled by the state
138 		 * machine the login request will have been handled and we
139 		 * should be in FFP.  If we are not in FFP by that time
140 		 * we can reject the SCSI command with a protocol error.
141 		 *
142 		 * This scenario only applies to the target.
143 		 *
144 		 * Handle dtrace probe in iscsit so we can find all the
145 		 * pieces of the CDB
146 		 */
147 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
148 		break;
149 	case ISCSI_OP_SCSI_DATA:
150 		DTRACE_ISCSI_2(data__receive, idm_conn_t *, ic,
151 		    iscsi_data_hdr_t *,
152 		    (iscsi_data_hdr_t *)pdu->isp_hdr);
153 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
154 		break;
155 	case ISCSI_OP_SCSI_TASK_MGT_MSG:
156 		DTRACE_ISCSI_2(task__command, idm_conn_t *, ic,
157 		    iscsi_scsi_task_mgt_hdr_t *,
158 		    (iscsi_scsi_task_mgt_hdr_t *)pdu->isp_hdr);
159 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
160 		break;
161 	case ISCSI_OP_NOOP_OUT:
162 		DTRACE_ISCSI_2(nop__receive, idm_conn_t *, ic,
163 		    iscsi_nop_out_hdr_t *,
164 		    (iscsi_nop_out_hdr_t *)pdu->isp_hdr);
165 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
166 		break;
167 	case ISCSI_OP_TEXT_CMD:
168 		DTRACE_ISCSI_2(text__command, idm_conn_t *, ic,
169 		    iscsi_text_hdr_t *,
170 		    (iscsi_text_hdr_t *)pdu->isp_hdr);
171 		idm_conn_rx_pdu_event(ic, CE_MISC_RX, (uintptr_t)pdu);
172 		break;
173 	/* Initiator PDU's */
174 	case ISCSI_OP_SCSI_DATA_RSP:
175 	case ISCSI_OP_RTT_RSP:
176 	case ISCSI_OP_SNACK_CMD:
177 	case ISCSI_OP_NOOP_IN:
178 	case ISCSI_OP_TEXT_RSP:
179 	case ISCSI_OP_REJECT_MSG:
180 	case ISCSI_OP_SCSI_TASK_MGT_RSP:
181 		/* Validate received PDU against current state */
182 		idm_conn_rx_pdu_event(ic, CE_MISC_RX,
183 		    (uintptr_t)pdu);
184 		break;
185 	}
186 	mutex_exit(&ic->ic_state_mutex);
187 }
188 
189 void
190 idm_pdu_tx_forward(idm_conn_t *ic, idm_pdu_t *pdu)
191 {
192 	(*ic->ic_transport_ops->it_tx_pdu)(ic, pdu);
193 }
194 
195 boolean_t
196 idm_pdu_rx_forward_ffp(idm_conn_t *ic, idm_pdu_t *pdu)
197 {
198 	/*
199 	 * If this is an FFP request, call the appropriate handler
200 	 * and return B_TRUE, otherwise return B_FALSE.
201 	 */
202 	switch (IDM_PDU_OPCODE(pdu)) {
203 	case ISCSI_OP_SCSI_CMD:
204 		(*ic->ic_conn_ops.icb_rx_scsi_cmd)(ic, pdu);
205 		return (B_TRUE);
206 	case ISCSI_OP_SCSI_DATA:
207 		DTRACE_ISCSI_2(data__receive, idm_conn_t *, ic,
208 		    iscsi_data_hdr_t *,
209 		    (iscsi_data_hdr_t *)pdu->isp_hdr);
210 		(*ic->ic_transport_ops->it_rx_dataout)(ic, pdu);
211 		return (B_TRUE);
212 	case ISCSI_OP_SCSI_TASK_MGT_MSG:
213 		DTRACE_ISCSI_2(task__command, idm_conn_t *, ic,
214 		    iscsi_scsi_task_mgt_hdr_t *,
215 		    (iscsi_scsi_task_mgt_hdr_t *)pdu->isp_hdr);
216 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
217 		return (B_TRUE);
218 	case ISCSI_OP_NOOP_OUT:
219 		DTRACE_ISCSI_2(nop__receive, idm_conn_t *, ic,
220 		    iscsi_nop_out_hdr_t *,
221 		    (iscsi_nop_out_hdr_t *)pdu->isp_hdr);
222 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
223 		return (B_TRUE);
224 	case ISCSI_OP_TEXT_CMD:
225 		DTRACE_ISCSI_2(text__command, idm_conn_t *, ic,
226 		    iscsi_text_hdr_t *,
227 		    (iscsi_text_hdr_t *)pdu->isp_hdr);
228 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
229 		return (B_TRUE);
230 		/* Initiator only */
231 	case ISCSI_OP_SCSI_RSP:
232 		(*ic->ic_conn_ops.icb_rx_scsi_rsp)(ic, pdu);
233 		return (B_TRUE);
234 	case ISCSI_OP_SCSI_DATA_RSP:
235 		(*ic->ic_transport_ops->it_rx_datain)(ic, pdu);
236 		return (B_TRUE);
237 	case ISCSI_OP_RTT_RSP:
238 		(*ic->ic_transport_ops->it_rx_rtt)(ic, pdu);
239 		return (B_TRUE);
240 	case ISCSI_OP_SCSI_TASK_MGT_RSP:
241 	case ISCSI_OP_TEXT_RSP:
242 	case ISCSI_OP_NOOP_IN:
243 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
244 		return (B_TRUE);
245 	default:
246 		return (B_FALSE);
247 	}
248 	/*NOTREACHED*/
249 }
250 
251 void
252 idm_pdu_rx_forward(idm_conn_t *ic, idm_pdu_t *pdu)
253 {
254 	/*
255 	 * Some PDU's specific to FFP get special handling.  This function
256 	 * will normally never be called in FFP with an FFP PDU since this
257 	 * is a slow path but in can happen on the target side during
258 	 * the transition to FFP.  We primarily call
259 	 * idm_pdu_rx_forward_ffp here to avoid code duplication.
260 	 */
261 	if (idm_pdu_rx_forward_ffp(ic, pdu) == B_FALSE) {
262 		/*
263 		 * Non-FFP PDU, use generic RC handler
264 		 */
265 		(*ic->ic_conn_ops.icb_rx_misc)(ic, pdu);
266 	}
267 }
268 
269 void
270 idm_parse_login_rsp(idm_conn_t *ic, idm_pdu_t *login_rsp_pdu, boolean_t rx)
271 {
272 	iscsi_login_rsp_hdr_t	*login_rsp =
273 	    (iscsi_login_rsp_hdr_t *)login_rsp_pdu->isp_hdr;
274 	idm_conn_event_t	new_event;
275 
276 	if (login_rsp->status_class == ISCSI_STATUS_CLASS_SUCCESS) {
277 		if (!(login_rsp->flags & ISCSI_FLAG_LOGIN_CONTINUE) &&
278 		    (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) &&
279 		    (ISCSI_LOGIN_NEXT_STAGE(login_rsp->flags) ==
280 		    ISCSI_FULL_FEATURE_PHASE)) {
281 			new_event = (rx ? CE_LOGIN_SUCCESS_RCV :
282 			    CE_LOGIN_SUCCESS_SND);
283 		} else {
284 			new_event = (rx ? CE_MISC_RX : CE_MISC_TX);
285 		}
286 	} else {
287 		new_event = (rx ? CE_LOGIN_FAIL_RCV : CE_LOGIN_FAIL_SND);
288 	}
289 
290 	if (rx) {
291 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)login_rsp_pdu);
292 	} else {
293 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)login_rsp_pdu);
294 	}
295 }
296 
297 
298 void
299 idm_parse_logout_req(idm_conn_t *ic, idm_pdu_t *logout_req_pdu, boolean_t rx)
300 {
301 	iscsi_logout_hdr_t 	*logout_req =
302 	    (iscsi_logout_hdr_t *)logout_req_pdu->isp_hdr;
303 	idm_conn_event_t	new_event;
304 	uint8_t			reason =
305 	    (logout_req->flags & ISCSI_FLAG_LOGOUT_REASON_MASK);
306 
307 	/*
308 	 *	For a normal logout (close connection or close session) IDM
309 	 *	will terminate processing of all tasks completing the tasks
310 	 *	back to the client with a status indicating the connection
311 	 *	was logged out.  These tasks do not get completed.
312 	 *
313 	 *	For a "close connection for recovery logout) IDM suspends
314 	 *	processing of all tasks and completes them back to the client
315 	 *	with a status indicating connection was logged out for
316 	 *	recovery.  Both initiator and target hang onto these tasks.
317 	 *	When we add ERL2 support IDM will need to provide mechanisms
318 	 *	to change the task and buffer associations to a new connection.
319 	 *
320 	 *	This code doesn't address the possibility of MC/S.  We'll
321 	 *	need to decide how the separate connections get handled
322 	 *	in that case.  One simple option is to make the client
323 	 *	generate the events for the other connections.
324 	 */
325 	if (reason == ISCSI_LOGOUT_REASON_CLOSE_SESSION) {
326 		new_event =
327 		    (rx ? CE_LOGOUT_SESSION_RCV : CE_LOGOUT_SESSION_SND);
328 	} else if ((reason == ISCSI_LOGOUT_REASON_CLOSE_CONNECTION) ||
329 	    (reason == ISCSI_LOGOUT_REASON_RECOVERY)) {
330 		/* Check logout CID against this connection's CID */
331 		if (ntohs(logout_req->cid) == ic->ic_login_cid) {
332 			/* Logout is for this connection */
333 			new_event = (rx ? CE_LOGOUT_THIS_CONN_RCV :
334 			    CE_LOGOUT_THIS_CONN_SND);
335 		} else {
336 			/*
337 			 * Logout affects another connection.  This is not
338 			 * a relevant event for this connection so we'll
339 			 * just treat it as a normal PDU event.  Client
340 			 * will need to lookup the other connection and
341 			 * generate the event.
342 			 */
343 			new_event = (rx ? CE_MISC_RX : CE_MISC_TX);
344 		}
345 	} else {
346 		/* Invalid reason code */
347 		new_event = (rx ? CE_RX_PROTOCOL_ERROR : CE_TX_PROTOCOL_ERROR);
348 	}
349 
350 	if (rx) {
351 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)logout_req_pdu);
352 	} else {
353 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)logout_req_pdu);
354 	}
355 }
356 
357 
358 
359 void
360 idm_parse_logout_rsp(idm_conn_t *ic, idm_pdu_t *logout_rsp_pdu, boolean_t rx)
361 {
362 	idm_conn_event_t	new_event;
363 	iscsi_logout_rsp_hdr_t *logout_rsp =
364 	    (iscsi_logout_rsp_hdr_t *)logout_rsp_pdu->isp_hdr;
365 
366 	if (logout_rsp->response == ISCSI_STATUS_CLASS_SUCCESS) {
367 		new_event = rx ? CE_LOGOUT_SUCCESS_RCV : CE_LOGOUT_SUCCESS_SND;
368 	} else {
369 		new_event = rx ? CE_LOGOUT_FAIL_RCV : CE_LOGOUT_FAIL_SND;
370 	}
371 
372 	if (rx) {
373 		idm_conn_rx_pdu_event(ic, new_event, (uintptr_t)logout_rsp_pdu);
374 	} else {
375 		idm_conn_tx_pdu_event(ic, new_event, (uintptr_t)logout_rsp_pdu);
376 	}
377 }
378 
379 /*
380  * idm_svc_conn_create()
381  * Transport-agnostic service connection creation, invoked from the transport
382  * layer.
383  */
384 idm_status_t
385 idm_svc_conn_create(idm_svc_t *is, idm_transport_type_t tt,
386     idm_conn_t **ic_result)
387 {
388 	idm_conn_t	*ic;
389 	idm_status_t	rc;
390 
391 	mutex_enter(&is->is_mutex);
392 	if (!is->is_online) {
393 		mutex_exit(&is->is_mutex);
394 		return (IDM_STATUS_FAIL);
395 	}
396 	mutex_exit(&is->is_mutex);
397 
398 	ic = idm_conn_create_common(CONN_TYPE_TGT, tt,
399 	    &is->is_svc_req.sr_conn_ops);
400 	ic->ic_svc_binding = is;
401 
402 	/*
403 	 * Prepare connection state machine
404 	 */
405 	if ((rc = idm_conn_sm_init(ic)) != 0) {
406 		idm_conn_destroy_common(ic);
407 		return (rc);
408 	}
409 
410 
411 	*ic_result = ic;
412 
413 	mutex_enter(&idm.idm_global_mutex);
414 	list_insert_tail(&idm.idm_tgt_conn_list, ic);
415 	idm.idm_tgt_conn_count++;
416 	mutex_exit(&idm.idm_global_mutex);
417 
418 	return (IDM_STATUS_SUCCESS);
419 }
420 
421 void
422 idm_svc_conn_destroy(idm_conn_t *ic)
423 {
424 	mutex_enter(&idm.idm_global_mutex);
425 	list_remove(&idm.idm_tgt_conn_list, ic);
426 	idm.idm_tgt_conn_count--;
427 	mutex_exit(&idm.idm_global_mutex);
428 
429 	if (ic->ic_transport_private != NULL) {
430 		ic->ic_transport_ops->it_tgt_conn_destroy(ic);
431 	}
432 	idm_conn_destroy_common(ic);
433 }
434 
435 /*
436  * idm_conn_create_common()
437  *
438  * Allocate and initialize IDM connection context
439  */
440 idm_conn_t *
441 idm_conn_create_common(idm_conn_type_t conn_type, idm_transport_type_t tt,
442     idm_conn_ops_t *conn_ops)
443 {
444 	idm_conn_t		*ic;
445 	idm_transport_t		*it;
446 	idm_transport_type_t	type;
447 
448 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
449 		it = &idm_transport_list[type];
450 
451 		if ((it->it_ops != NULL) && (it->it_type == tt))
452 			break;
453 	}
454 	ASSERT(it->it_type == tt);
455 	if (it->it_type != tt)
456 		return (NULL);
457 
458 	ic = kmem_zalloc(sizeof (idm_conn_t), KM_SLEEP);
459 
460 	/* Initialize data */
461 	ic->ic_target_name[0] = '\0';
462 	ic->ic_initiator_name[0] = '\0';
463 	ic->ic_isid[0] = '\0';
464 	ic->ic_tsih[0] = '\0';
465 	ic->ic_conn_type = conn_type;
466 	ic->ic_conn_ops = *conn_ops;
467 	ic->ic_transport_ops = it->it_ops;
468 	ic->ic_transport_type = tt;
469 	ic->ic_transport_private = NULL; /* Set by transport service */
470 	ic->ic_internal_cid = idm_cid_alloc();
471 	if (ic->ic_internal_cid == 0) {
472 		kmem_free(ic, sizeof (idm_conn_t));
473 		return (NULL);
474 	}
475 	mutex_init(&ic->ic_mutex, NULL, MUTEX_DEFAULT, NULL);
476 	cv_init(&ic->ic_cv, NULL, CV_DEFAULT, NULL);
477 	idm_refcnt_init(&ic->ic_refcnt, ic);
478 
479 	return (ic);
480 }
481 
482 void
483 idm_conn_destroy_common(idm_conn_t *ic)
484 {
485 	idm_conn_sm_fini(ic);
486 	idm_refcnt_destroy(&ic->ic_refcnt);
487 	cv_destroy(&ic->ic_cv);
488 	mutex_destroy(&ic->ic_mutex);
489 	idm_cid_free(ic->ic_internal_cid);
490 
491 	kmem_free(ic, sizeof (idm_conn_t));
492 }
493 
494 /*
495  * Invoked from the SM as a result of client's invocation of
496  * idm_ini_conn_connect()
497  */
498 idm_status_t
499 idm_ini_conn_finish(idm_conn_t *ic)
500 {
501 	/* invoke transport-specific connection */
502 	return (ic->ic_transport_ops->it_ini_conn_connect(ic));
503 }
504 
505 idm_status_t
506 idm_tgt_conn_finish(idm_conn_t *ic)
507 {
508 	idm_status_t rc;
509 
510 	rc = idm_notify_client(ic, CN_CONNECT_ACCEPT, NULL);
511 	if (rc != IDM_STATUS_SUCCESS) {
512 		return (IDM_STATUS_REJECT);
513 	}
514 
515 	/* Target client is ready to receive a login, start connection */
516 	return (ic->ic_transport_ops->it_tgt_conn_connect(ic));
517 }
518 
519 idm_transport_t *
520 idm_transport_lookup(idm_conn_req_t *cr)
521 {
522 	idm_transport_type_t	type;
523 	idm_transport_t		*it;
524 	idm_transport_caps_t	caps;
525 
526 	/*
527 	 * Make sure all available transports are setup.  We call this now
528 	 * instead of at initialization time in case IB has become available
529 	 * since we started (hotplug, etc).
530 	 */
531 	idm_transport_setup(cr->cr_li);
532 
533 	/* Determine the transport for this connection */
534 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
535 		it = &idm_transport_list[type];
536 
537 		if (it->it_ops == NULL) {
538 			/* transport is not registered */
539 			continue;
540 		}
541 
542 		if (it->it_ops->it_conn_is_capable(cr, &caps)) {
543 			return (it);
544 		}
545 	}
546 
547 	ASSERT(0);
548 	return (NULL); /* Make gcc happy */
549 }
550 
551 void
552 idm_transport_setup(ldi_ident_t li)
553 {
554 	idm_transport_type_t	type;
555 	idm_transport_t		*it;
556 	int			rc;
557 
558 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
559 		it = &idm_transport_list[type];
560 		/*
561 		 * We may want to store the LDI handle in the idm_svc_t
562 		 * and then allow multiple calls to ldi_open_by_name.  This
563 		 * would enable the LDI code to track who has the device open
564 		 * which could be useful in the case where we have multiple
565 		 * services and perhaps also have initiator and target opening
566 		 * the transport simultaneously.  For now we stick with the
567 		 * plan.
568 		 */
569 		if (it->it_ops == NULL) {
570 			/* transport is not ready, try to initialize it */
571 			if (it->it_type == IDM_TRANSPORT_TYPE_SOCKETS) {
572 				idm_so_init(it);
573 			} else {
574 				rc = ldi_open_by_name(it->it_device_path,
575 				    FREAD | FWRITE, kcred, &it->it_ldi_hdl, li);
576 				/*
577 				 * If the open is successful we will have
578 				 * filled in the LDI handle in the transport
579 				 * table and we expect that the transport
580 				 * registered itself.
581 				 */
582 				if (rc != 0) {
583 					it->it_ldi_hdl = NULL;
584 				}
585 			}
586 		}
587 	}
588 }
589 
590 void
591 idm_transport_teardown()
592 {
593 	idm_transport_type_t	type;
594 	idm_transport_t		*it;
595 
596 	ASSERT(mutex_owned(&idm.idm_global_mutex));
597 
598 	/* Caller holds the IDM global mutex */
599 	for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
600 		it = &idm_transport_list[type];
601 		/* If we have an open LDI handle on this driver, close it */
602 		if (it->it_ldi_hdl != NULL) {
603 			(void) ldi_close(it->it_ldi_hdl, FNDELAY, kcred);
604 			it->it_ldi_hdl = NULL;
605 		}
606 	}
607 }
608 
609 /*
610  * ID pool code.  We use this to generate unique structure identifiers without
611  * searching the existing structures.  This avoids the need to lock entire
612  * sets of structures at inopportune times.  Adapted from the CIFS server code.
613  *
614  *    A pool of IDs is a pool of 16 bit numbers. It is implemented as a bitmap.
615  *    A bit set to '1' indicates that that particular value has been allocated.
616  *    The allocation process is done shifting a bit through the whole bitmap.
617  *    The current position of that index bit is kept in the idm_idpool_t
618  *    structure and represented by a byte index (0 to buffer size minus 1) and
619  *    a bit index (0 to 7).
620  *
621  *    The pools start with a size of 8 bytes or 64 IDs. Each time the pool runs
622  *    out of IDs its current size is doubled until it reaches its maximum size
623  *    (8192 bytes or 65536 IDs). The IDs 0 and 65535 are never given out which
624  *    means that a pool can have a maximum number of 65534 IDs available.
625  */
626 
627 static int
628 idm_idpool_increment(
629     idm_idpool_t	*pool)
630 {
631 	uint8_t		*new_pool;
632 	uint32_t	new_size;
633 
634 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
635 
636 	new_size = pool->id_size * 2;
637 	if (new_size <= IDM_IDPOOL_MAX_SIZE) {
638 		new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
639 		if (new_pool) {
640 			bzero(new_pool, new_size / 8);
641 			bcopy(pool->id_pool, new_pool, pool->id_size / 8);
642 			kmem_free(pool->id_pool, pool->id_size / 8);
643 			pool->id_pool = new_pool;
644 			pool->id_free_counter += new_size - pool->id_size;
645 			pool->id_max_free_counter += new_size - pool->id_size;
646 			pool->id_size = new_size;
647 			pool->id_idx_msk = (new_size / 8) - 1;
648 			if (new_size >= IDM_IDPOOL_MAX_SIZE) {
649 				/* id -1 made unavailable */
650 				pool->id_pool[pool->id_idx_msk] = 0x80;
651 				pool->id_free_counter--;
652 				pool->id_max_free_counter--;
653 			}
654 			return (0);
655 		}
656 	}
657 	return (-1);
658 }
659 
660 /*
661  * idm_idpool_constructor
662  *
663  * This function initializes the pool structure provided.
664  */
665 
666 int
667 idm_idpool_create(idm_idpool_t *pool)
668 {
669 
670 	ASSERT(pool->id_magic != IDM_IDPOOL_MAGIC);
671 
672 	pool->id_size = IDM_IDPOOL_MIN_SIZE;
673 	pool->id_idx_msk = (IDM_IDPOOL_MIN_SIZE / 8) - 1;
674 	pool->id_free_counter = IDM_IDPOOL_MIN_SIZE - 1;
675 	pool->id_max_free_counter = IDM_IDPOOL_MIN_SIZE - 1;
676 	pool->id_bit = 0x02;
677 	pool->id_bit_idx = 1;
678 	pool->id_idx = 0;
679 	pool->id_pool = (uint8_t *)kmem_alloc((IDM_IDPOOL_MIN_SIZE / 8),
680 	    KM_SLEEP);
681 	bzero(pool->id_pool, (IDM_IDPOOL_MIN_SIZE / 8));
682 	/* -1 id made unavailable */
683 	pool->id_pool[0] = 0x01;		/* id 0 made unavailable */
684 	mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
685 	pool->id_magic = IDM_IDPOOL_MAGIC;
686 	return (0);
687 }
688 
689 /*
690  * idm_idpool_destructor
691  *
692  * This function tears down and frees the resources associated with the
693  * pool provided.
694  */
695 
696 void
697 idm_idpool_destroy(idm_idpool_t *pool)
698 {
699 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
700 	ASSERT(pool->id_free_counter == pool->id_max_free_counter);
701 	pool->id_magic = (uint32_t)~IDM_IDPOOL_MAGIC;
702 	mutex_destroy(&pool->id_mutex);
703 	kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
704 }
705 
706 /*
707  * idm_idpool_alloc
708  *
709  * This function allocates an ID from the pool provided.
710  */
711 int
712 idm_idpool_alloc(idm_idpool_t *pool, uint16_t *id)
713 {
714 	uint32_t	i;
715 	uint8_t		bit;
716 	uint8_t		bit_idx;
717 	uint8_t		byte;
718 
719 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
720 
721 	mutex_enter(&pool->id_mutex);
722 	if ((pool->id_free_counter == 0) && idm_idpool_increment(pool)) {
723 		mutex_exit(&pool->id_mutex);
724 		return (-1);
725 	}
726 
727 	i = pool->id_size;
728 	while (i) {
729 		bit = pool->id_bit;
730 		bit_idx = pool->id_bit_idx;
731 		byte = pool->id_pool[pool->id_idx];
732 		while (bit) {
733 			if (byte & bit) {
734 				bit = bit << 1;
735 				bit_idx++;
736 				continue;
737 			}
738 			pool->id_pool[pool->id_idx] |= bit;
739 			*id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
740 			pool->id_free_counter--;
741 			pool->id_bit = bit;
742 			pool->id_bit_idx = bit_idx;
743 			mutex_exit(&pool->id_mutex);
744 			return (0);
745 		}
746 		pool->id_bit = 1;
747 		pool->id_bit_idx = 0;
748 		pool->id_idx++;
749 		pool->id_idx &= pool->id_idx_msk;
750 		--i;
751 	}
752 	/*
753 	 * This section of code shouldn't be reached. If there are IDs
754 	 * available and none could be found there's a problem.
755 	 */
756 	ASSERT(0);
757 	mutex_exit(&pool->id_mutex);
758 	return (-1);
759 }
760 
761 /*
762  * idm_idpool_free
763  *
764  * This function frees the ID provided.
765  */
766 void
767 idm_idpool_free(idm_idpool_t *pool, uint16_t id)
768 {
769 	ASSERT(pool->id_magic == IDM_IDPOOL_MAGIC);
770 	ASSERT(id != 0);
771 	ASSERT(id != 0xFFFF);
772 
773 	mutex_enter(&pool->id_mutex);
774 	if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
775 		pool->id_pool[id >> 3] &= ~(1 << (id & 7));
776 		pool->id_free_counter++;
777 		ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
778 		mutex_exit(&pool->id_mutex);
779 		return;
780 	}
781 	/* Freeing a free ID. */
782 	ASSERT(0);
783 	mutex_exit(&pool->id_mutex);
784 }
785 
786 uint32_t
787 idm_cid_alloc(void)
788 {
789 	/*
790 	 * ID pool works with 16-bit identifiers right now.  That should
791 	 * be plenty since we will probably never have more than 2^16
792 	 * connections simultaneously.
793 	 */
794 	uint16_t cid16;
795 
796 	if (idm_idpool_alloc(&idm.idm_conn_id_pool, &cid16) == -1) {
797 		return (0); /* Fail */
798 	}
799 
800 	return ((uint32_t)cid16);
801 }
802 
803 void
804 idm_cid_free(uint32_t cid)
805 {
806 	idm_idpool_free(&idm.idm_conn_id_pool, (uint16_t)cid);
807 }
808 
809 
810 /*
811  * Code for generating the header and data digests
812  *
813  * This is the CRC-32C table
814  * Generated with:
815  * width = 32 bits
816  * poly = 0x1EDC6F41
817  * reflect input bytes = true
818  * reflect output bytes = true
819  */
820 
821 uint32_t idm_crc32c_table[256] =
822 {
823 	0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
824 	0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
825 	0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
826 	0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
827 	0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
828 	0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
829 	0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
830 	0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
831 	0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
832 	0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
833 	0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
834 	0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
835 	0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
836 	0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
837 	0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
838 	0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
839 	0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
840 	0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
841 	0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
842 	0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
843 	0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
844 	0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
845 	0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
846 	0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
847 	0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
848 	0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
849 	0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
850 	0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
851 	0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
852 	0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
853 	0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
854 	0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
855 	0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
856 	0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
857 	0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
858 	0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
859 	0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
860 	0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
861 	0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
862 	0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
863 	0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
864 	0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
865 	0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
866 	0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
867 	0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
868 	0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
869 	0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
870 	0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
871 	0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
872 	0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
873 	0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
874 	0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
875 	0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
876 	0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
877 	0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
878 	0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
879 	0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
880 	0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
881 	0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
882 	0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
883 	0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
884 	0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
885 	0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
886 	0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351
887 };
888 
889 /*
890  * iscsi_crc32c - Steps through buffer one byte at at time, calculates
891  * reflected crc using table.
892  */
893 uint32_t
894 idm_crc32c(void *address, unsigned long length)
895 {
896 	uint8_t *buffer = address;
897 	uint32_t crc = 0xffffffff, result;
898 #ifdef _BIG_ENDIAN
899 	uint8_t byte0, byte1, byte2, byte3;
900 #endif
901 
902 	ASSERT(address != NULL);
903 
904 	if (iscsi_crc32_hd == -1) {
905 		if (hd_crc32_avail((uint32_t *)idm_crc32c_table) == B_TRUE) {
906 			iscsi_crc32_hd = 0;
907 		} else {
908 			iscsi_crc32_hd = 1;
909 		}
910 	}
911 	if (iscsi_crc32_hd == 0)
912 		return (HW_CRC32(buffer, length, crc));
913 
914 	while (length--) {
915 		crc = idm_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^
916 		    (crc >> 8);
917 	}
918 	result = crc ^ 0xffffffff;
919 
920 #ifdef	_BIG_ENDIAN
921 	byte0 = (uint8_t)(result & 0xFF);
922 	byte1 = (uint8_t)((result >> 8) & 0xFF);
923 	byte2 = (uint8_t)((result >> 16) & 0xFF);
924 	byte3 = (uint8_t)((result >> 24) & 0xFF);
925 	result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
926 #endif	/* _BIG_ENDIAN */
927 
928 	return (result);
929 }
930 
931 
932 /*
933  * idm_crc32c_continued - Continues stepping through buffer one
934  * byte at at time, calculates reflected crc using table.
935  */
936 uint32_t
937 idm_crc32c_continued(void *address, unsigned long length, uint32_t crc)
938 {
939 	uint8_t *buffer = address;
940 	uint32_t result;
941 #ifdef	_BIG_ENDIAN
942 	uint8_t byte0, byte1, byte2, byte3;
943 #endif
944 
945 	ASSERT(address != NULL);
946 
947 	if (iscsi_crc32_hd == -1) {
948 		if (hd_crc32_avail((uint32_t *)idm_crc32c_table) == B_TRUE) {
949 			iscsi_crc32_hd = 0;
950 		} else {
951 			iscsi_crc32_hd = 1;
952 		}
953 	}
954 	if (iscsi_crc32_hd == 0)
955 		return (HW_CRC32_CONT(buffer, length, crc));
956 
957 
958 #ifdef	_BIG_ENDIAN
959 	byte0 = (uint8_t)((crc >> 24) & 0xFF);
960 	byte1 = (uint8_t)((crc >> 16) & 0xFF);
961 	byte2 = (uint8_t)((crc >> 8) & 0xFF);
962 	byte3 = (uint8_t)(crc & 0xFF);
963 	crc = ((byte3 << 24) | (byte2 << 16) | (byte1 << 8) | byte0);
964 #endif
965 
966 	crc = crc ^ 0xffffffff;
967 	while (length--) {
968 		crc = idm_crc32c_table[(crc ^ *buffer++) & 0xFFL] ^
969 		    (crc >> 8);
970 	}
971 	result = crc ^ 0xffffffff;
972 
973 #ifdef	_BIG_ENDIAN
974 	byte0 = (uint8_t)(result & 0xFF);
975 	byte1 = (uint8_t)((result >> 8) & 0xFF);
976 	byte2 = (uint8_t)((result >> 16) & 0xFF);
977 	byte3 = (uint8_t)((result >> 24) & 0xFF);
978 	result = ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3);
979 #endif
980 	return (result);
981 }
982 
983 /* ARGSUSED */
984 int
985 idm_task_constructor(void *hdl, void *arg, int flags)
986 {
987 	idm_task_t *idt = (idm_task_t *)hdl;
988 	uint32_t next_task;
989 
990 	mutex_init(&idt->idt_mutex, NULL, MUTEX_DEFAULT, NULL);
991 
992 	/* Find the next free task ID */
993 	rw_enter(&idm.idm_taskid_table_lock, RW_WRITER);
994 	next_task = idm.idm_taskid_next;
995 	while (idm.idm_taskid_table[next_task]) {
996 		next_task++;
997 		if (next_task == idm.idm_taskid_max)
998 			next_task = 0;
999 		if (next_task == idm.idm_taskid_next) {
1000 			rw_exit(&idm.idm_taskid_table_lock);
1001 			return (-1);
1002 		}
1003 	}
1004 
1005 	idm.idm_taskid_table[next_task] = idt;
1006 	idm.idm_taskid_next = (next_task + 1) % idm.idm_taskid_max;
1007 	rw_exit(&idm.idm_taskid_table_lock);
1008 
1009 	idt->idt_tt = next_task;
1010 
1011 	list_create(&idt->idt_inbufv, sizeof (idm_buf_t),
1012 	    offsetof(idm_buf_t, idb_buflink));
1013 	list_create(&idt->idt_outbufv, sizeof (idm_buf_t),
1014 	    offsetof(idm_buf_t, idb_buflink));
1015 	idm_refcnt_init(&idt->idt_refcnt, idt);
1016 
1017 	/*
1018 	 * Set the transport header pointer explicitly.  This removes the
1019 	 * need for per-transport header allocation, which simplifies cache
1020 	 * init considerably.  If at a later date we have an additional IDM
1021 	 * transport that requires a different size, we'll revisit this.
1022 	 */
1023 	idt->idt_transport_hdr = (void *)(idt + 1); /* pointer arithmetic */
1024 
1025 	return (0);
1026 }
1027 
1028 /* ARGSUSED */
1029 void
1030 idm_task_destructor(void *hdl, void *arg)
1031 {
1032 	idm_task_t *idt = (idm_task_t *)hdl;
1033 
1034 	/* Remove the task from the ID table */
1035 	rw_enter(&idm.idm_taskid_table_lock, RW_WRITER);
1036 	idm.idm_taskid_table[idt->idt_tt] = NULL;
1037 	rw_exit(&idm.idm_taskid_table_lock);
1038 
1039 	/* free the inbuf and outbuf */
1040 	idm_refcnt_destroy(&idt->idt_refcnt);
1041 	list_destroy(&idt->idt_inbufv);
1042 	list_destroy(&idt->idt_outbufv);
1043 
1044 	/*
1045 	 * The final call to idm_task_rele may happen with the task
1046 	 * mutex held which may invoke this destructor immediately.
1047 	 * Stall here until the task mutex owner lets go.
1048 	 */
1049 	mutex_enter(&idt->idt_mutex);
1050 	mutex_destroy(&idt->idt_mutex);
1051 }
1052 
1053 /*
1054  * idm_listbuf_insert searches from the back of the list looking for the
1055  * insertion point.
1056  */
1057 void
1058 idm_listbuf_insert(list_t *lst, idm_buf_t *buf)
1059 {
1060 	idm_buf_t	*idb;
1061 
1062 	/* iterate through the list to find the insertion point */
1063 	for (idb = list_tail(lst); idb != NULL; idb = list_prev(lst, idb)) {
1064 
1065 		if (idb->idb_bufoffset < buf->idb_bufoffset) {
1066 
1067 			list_insert_after(lst, idb, buf);
1068 			return;
1069 		}
1070 	}
1071 
1072 	/* add the buf to the head of the list */
1073 	list_insert_head(lst, buf);
1074 
1075 }
1076 
1077 /*ARGSUSED*/
1078 void
1079 idm_wd_thread(void *arg)
1080 {
1081 	idm_conn_t	*ic;
1082 	clock_t		wake_time;
1083 	clock_t		idle_time;
1084 
1085 	/* Record the thread id for thread_join() */
1086 	idm.idm_wd_thread_did = curthread->t_did;
1087 	mutex_enter(&idm.idm_global_mutex);
1088 	idm.idm_wd_thread_running = B_TRUE;
1089 	cv_signal(&idm.idm_wd_cv);
1090 
1091 	while (idm.idm_wd_thread_running) {
1092 		for (ic = list_head(&idm.idm_tgt_conn_list);
1093 		    ic != NULL;
1094 		    ic = list_next(&idm.idm_tgt_conn_list, ic)) {
1095 			idle_time = ddi_get_lbolt() - ic->ic_timestamp;
1096 
1097 			/*
1098 			 * If this connection is in FFP then grab a hold
1099 			 * and check the various timeout thresholds.  Otherwise
1100 			 * the connection is closing and we should just
1101 			 * move on to the next one.
1102 			 */
1103 			mutex_enter(&ic->ic_state_mutex);
1104 			if (ic->ic_ffp) {
1105 				idm_conn_hold(ic);
1106 			} else {
1107 				mutex_exit(&ic->ic_state_mutex);
1108 				continue;
1109 			}
1110 
1111 			/*
1112 			 * If there hasn't been any activity on this
1113 			 * connection for the keepalive timeout period
1114 			 * and if the client has provided a keepalive
1115 			 * callback then call the keepalive callback.
1116 			 * This allows the client to take action to keep
1117 			 * the link alive (like send a nop PDU).
1118 			 */
1119 			if ((TICK_TO_SEC(idle_time) >=
1120 			    IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT) &&
1121 			    !ic->ic_keepalive) {
1122 				ic->ic_keepalive = B_TRUE;
1123 				if (ic->ic_conn_ops.icb_keepalive) {
1124 					mutex_exit(&ic->ic_state_mutex);
1125 					mutex_exit(&idm.idm_global_mutex);
1126 					(*ic->ic_conn_ops.icb_keepalive)(ic);
1127 					mutex_enter(&idm.idm_global_mutex);
1128 					mutex_enter(&ic->ic_state_mutex);
1129 				}
1130 			} else if ((TICK_TO_SEC(idle_time) <
1131 			    IDM_TRANSPORT_KEEPALIVE_IDLE_TIMEOUT)) {
1132 				/* Reset keepalive */
1133 				ic->ic_keepalive = B_FALSE;
1134 			}
1135 
1136 			/*
1137 			 * If there hasn't been any activity on this
1138 			 * connection for the failure timeout period then
1139 			 * drop the connection.  We expect the initiator
1140 			 * to keep the connection alive if it wants the
1141 			 * connection to stay open.
1142 			 *
1143 			 * If it turns out to be desireable to take a
1144 			 * more active role in maintaining the connect
1145 			 * we could add a client callback to send
1146 			 * a "keepalive" kind of message (no doubt a nop)
1147 			 * and fire that on a shorter timer.
1148 			 */
1149 			if (TICK_TO_SEC(idle_time) >
1150 			    IDM_TRANSPORT_FAIL_IDLE_TIMEOUT) {
1151 				mutex_exit(&ic->ic_state_mutex);
1152 				mutex_exit(&idm.idm_global_mutex);
1153 				IDM_SM_LOG(CE_WARN, "idm_wd_thread: "
1154 				    "conn %p idle for %d seconds, "
1155 				    "sending CE_TRANSPORT_FAIL",
1156 				    (void *)ic, (int)idle_time);
1157 				idm_conn_event(ic, CE_TRANSPORT_FAIL, NULL);
1158 				mutex_enter(&idm.idm_global_mutex);
1159 				mutex_enter(&ic->ic_state_mutex);
1160 			}
1161 
1162 			idm_conn_rele(ic);
1163 
1164 			mutex_exit(&ic->ic_state_mutex);
1165 		}
1166 
1167 		wake_time = ddi_get_lbolt() + SEC_TO_TICK(IDM_WD_INTERVAL);
1168 		(void) cv_timedwait(&idm.idm_wd_cv, &idm.idm_global_mutex,
1169 		    wake_time);
1170 	}
1171 	mutex_exit(&idm.idm_global_mutex);
1172 
1173 	thread_exit();
1174 }
1175