xref: /illumos-gate/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_fcp.c (revision a3170057524922242772a15fbeb3e91f5f8d4744)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at
9  * http://www.opensource.org/licenses/cddl1.txt.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004-2012 Emulex. All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2020 RackTop Systems, Inc.
26  */
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_FCP_C);
32 
33 #define	EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
34 	PADDR(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
35 
36 static void	emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp,
37     Q *abort, uint8_t *flag, emlxs_buf_t *fpkt);
38 
39 #define	SCSI3_PERSISTENT_RESERVE_IN	0x5e
40 #define	SCSI_INQUIRY			0x12
41 #define	SCSI_RX_DIAG    		0x1C
42 
43 
44 /*
45  *  emlxs_handle_fcp_event
46  *
47  *  Description: Process an FCP Rsp Ring completion
48  *
49  */
50 /* ARGSUSED */
51 extern void
52 emlxs_handle_fcp_event(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
53 {
54 	emlxs_port_t *port = &PPORT;
55 	emlxs_config_t	*cfg = &CFG;
56 	IOCB *cmd;
57 	emlxs_buf_t *sbp;
58 	fc_packet_t *pkt = NULL;
59 #ifdef SAN_DIAG_SUPPORT
60 	NODELIST *ndlp;
61 #endif
62 	uint32_t iostat;
63 	uint8_t localstat;
64 	fcp_rsp_t *rsp;
65 	uint32_t rsp_data_resid;
66 	uint32_t check_underrun;
67 	uint8_t asc;
68 	uint8_t ascq;
69 	uint8_t scsi_status;
70 	uint8_t sense;
71 	uint32_t did;
72 	uint32_t fix_it;
73 	uint8_t *scsi_cmd;
74 	uint8_t scsi_opcode;
75 	uint16_t scsi_dl;
76 	uint32_t data_rx;
77 	uint32_t length;
78 
79 	cmd = &iocbq->iocb;
80 
81 	/* Initialize the status */
82 	iostat = cmd->ULPSTATUS;
83 	localstat = 0;
84 	scsi_status = 0;
85 	asc = 0;
86 	ascq = 0;
87 	sense = 0;
88 	check_underrun = 0;
89 	fix_it = 0;
90 
91 	HBASTATS.FcpEvent++;
92 
93 	sbp = (emlxs_buf_t *)iocbq->sbp;
94 
95 	if (!sbp) {
96 		/* completion with missing xmit command */
97 		HBASTATS.FcpStray++;
98 
99 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
100 		    "cmd=%x iotag=%d", cmd->ULPCOMMAND, cmd->ULPIOTAG);
101 
102 		return;
103 	}
104 
105 	HBASTATS.FcpCompleted++;
106 
107 #ifdef SAN_DIAG_SUPPORT
108 	emlxs_update_sd_bucket(sbp);
109 #endif /* SAN_DIAG_SUPPORT */
110 
111 	pkt = PRIV2PKT(sbp);
112 
113 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
114 	scsi_cmd = (uint8_t *)pkt->pkt_cmd;
115 	scsi_opcode = scsi_cmd[12];
116 	data_rx = 0;
117 
118 	/* Sync data in data buffer only on FC_PKT_FCP_READ */
119 	if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
120 		EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
121 		    DDI_DMA_SYNC_FORKERNEL);
122 
123 #ifdef TEST_SUPPORT
124 		if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
125 		    (pkt->pkt_datalen >= 512)) {
126 			hba->underrun_counter--;
127 			iostat = IOSTAT_FCP_RSP_ERROR;
128 
129 			/* Report 512 bytes missing by adapter */
130 			cmd->un.fcpi.fcpi_parm = pkt->pkt_datalen - 512;
131 
132 			/* Corrupt 512 bytes of Data buffer */
133 			bzero((uint8_t *)pkt->pkt_data, 512);
134 
135 			/* Set FCP response to STATUS_GOOD */
136 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
137 		}
138 #endif /* TEST_SUPPORT */
139 	}
140 
141 	/* Process the pkt */
142 	mutex_enter(&sbp->mtx);
143 
144 	/* Check for immediate return */
145 	if ((iostat == IOSTAT_SUCCESS) &&
146 	    (pkt->pkt_comp) &&
147 	    !(sbp->pkt_flags &
148 	    (PACKET_ULP_OWNED | PACKET_COMPLETED |
149 	    PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
150 	    PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
151 	    PACKET_IN_ABORT | PACKET_POLLED))) {
152 		HBASTATS.FcpGood++;
153 
154 		sbp->pkt_flags |=
155 		    (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
156 		    PACKET_COMPLETED | PACKET_ULP_OWNED);
157 		mutex_exit(&sbp->mtx);
158 
159 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
160 		emlxs_unswap_pkt(sbp);
161 #endif /* EMLXS_MODREV2X */
162 
163 #ifdef FMA_SUPPORT
164 		emlxs_check_dma(hba, sbp);
165 #endif  /* FMA_SUPPORT */
166 
167 		cp->ulpCmplCmd++;
168 		(*pkt->pkt_comp) (pkt);
169 
170 #ifdef FMA_SUPPORT
171 		if (hba->flag & FC_DMA_CHECK_ERROR) {
172 			emlxs_thread_spawn(hba, emlxs_restart_thread,
173 			    NULL, NULL);
174 		}
175 #endif  /* FMA_SUPPORT */
176 
177 		return;
178 	}
179 
180 	/*
181 	 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
182 	 * is reported.
183 	 */
184 
185 	/* Check if a response buffer was not provided */
186 	if ((iostat != IOSTAT_FCP_RSP_ERROR) || (pkt->pkt_rsplen == 0)) {
187 		goto done;
188 	}
189 
190 	EMLXS_MPDATA_SYNC(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
191 	    DDI_DMA_SYNC_FORKERNEL);
192 
193 	/* Get the response buffer pointer */
194 	rsp = (fcp_rsp_t *)pkt->pkt_resp;
195 
196 	/* Validate the response payload */
197 	if (!rsp->fcp_u.fcp_status.resid_under &&
198 	    !rsp->fcp_u.fcp_status.resid_over) {
199 		rsp->fcp_resid = 0;
200 	}
201 
202 	if (!rsp->fcp_u.fcp_status.rsp_len_set) {
203 		rsp->fcp_response_len = 0;
204 	}
205 
206 	if (!rsp->fcp_u.fcp_status.sense_len_set) {
207 		rsp->fcp_sense_len = 0;
208 	}
209 
210 	length = sizeof (fcp_rsp_t) + LE_SWAP32(rsp->fcp_response_len) +
211 	    LE_SWAP32(rsp->fcp_sense_len);
212 
213 	if (length > pkt->pkt_rsplen) {
214 		iostat = IOSTAT_RSP_INVALID;
215 		pkt->pkt_data_resid = pkt->pkt_datalen;
216 		goto done;
217 	}
218 
219 	/* Set the valid response flag */
220 	sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
221 
222 	scsi_status = rsp->fcp_u.fcp_status.scsi_status;
223 
224 #ifdef SAN_DIAG_SUPPORT
225 	ndlp = (NODELIST *)iocbq->node;
226 	if (scsi_status == SCSI_STAT_QUE_FULL) {
227 		emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_QFULL,
228 		    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
229 	} else if (scsi_status == SCSI_STAT_BUSY) {
230 		emlxs_log_sd_scsi_event(port,
231 		    SD_SCSI_SUBCATEGORY_DEVBSY,
232 		    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
233 	}
234 #endif
235 
236 	/*
237 	 * Convert a task abort to a check condition with no data
238 	 * transferred. We saw a data corruption when Solaris received
239 	 * a Task Abort from a tape.
240 	 */
241 
242 	if (scsi_status == SCSI_STAT_TASK_ABORT) {
243 		EMLXS_MSGF(EMLXS_CONTEXT,
244 		    &emlxs_fcp_completion_error_msg,
245 		    "Task Abort. "
246 		    "Fixed. did=0x%06x sbp=%p cmd=%02x dl=%d",
247 		    did, sbp, scsi_opcode, pkt->pkt_datalen);
248 
249 		rsp->fcp_u.fcp_status.scsi_status =
250 		    SCSI_STAT_CHECK_COND;
251 		rsp->fcp_u.fcp_status.rsp_len_set = 0;
252 		rsp->fcp_u.fcp_status.sense_len_set = 0;
253 		rsp->fcp_u.fcp_status.resid_over = 0;
254 
255 		if (pkt->pkt_datalen) {
256 			rsp->fcp_u.fcp_status.resid_under = 1;
257 			rsp->fcp_resid =
258 			    LE_SWAP32(pkt->pkt_datalen);
259 		} else {
260 			rsp->fcp_u.fcp_status.resid_under = 0;
261 			rsp->fcp_resid = 0;
262 		}
263 
264 		scsi_status = SCSI_STAT_CHECK_COND;
265 	}
266 
267 	/*
268 	 * We only need to check underrun if data could
269 	 * have been sent
270 	 */
271 
272 	/* Always check underrun if status is good */
273 	if (scsi_status == SCSI_STAT_GOOD) {
274 		check_underrun = 1;
275 	}
276 	/* Check the sense codes if this is a check condition */
277 	else if (scsi_status == SCSI_STAT_CHECK_COND) {
278 		check_underrun = 1;
279 
280 		/* Check if sense data was provided */
281 		if (LE_SWAP32(rsp->fcp_sense_len) >= 14) {
282 			sense = *((uint8_t *)rsp + 32 + 2);
283 			asc = *((uint8_t *)rsp + 32 + 12);
284 			ascq = *((uint8_t *)rsp + 32 + 13);
285 		}
286 
287 #ifdef SAN_DIAG_SUPPORT
288 		emlxs_log_sd_scsi_check_event(port,
289 		    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
290 		    scsi_opcode, sense, asc, ascq);
291 #endif
292 	}
293 	/* Status is not good and this is not a check condition */
294 	/* No data should have been sent */
295 	else {
296 		check_underrun = 0;
297 	}
298 
299 	/* Initialize the resids */
300 	pkt->pkt_resp_resid = 0;
301 	pkt->pkt_data_resid = 0;
302 
303 	/* Check if no data was to be transferred */
304 	if (pkt->pkt_datalen == 0) {
305 		goto done;
306 	}
307 
308 	/* Get the residual underrun count reported by the SCSI reply */
309 	rsp_data_resid = (rsp->fcp_u.fcp_status.resid_under) ?
310 	    LE_SWAP32(rsp->fcp_resid) : 0;
311 
312 	/* Set the pkt_data_resid to what the scsi response resid */
313 	pkt->pkt_data_resid = rsp_data_resid;
314 
315 	/* Adjust the pkt_data_resid field if needed */
316 	if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
317 		/*
318 		 * Get the residual underrun count reported by
319 		 * our adapter
320 		 */
321 		pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
322 
323 #ifdef SAN_DIAG_SUPPORT
324 		if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
325 			emlxs_log_sd_fc_rdchk_event(port,
326 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
327 			    scsi_opcode, pkt->pkt_data_resid);
328 		}
329 #endif
330 
331 		/* Get the actual amount of data transferred */
332 		data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
333 
334 		/*
335 		 * If the residual being reported by the adapter is
336 		 * greater than the residual being reported in the
337 		 * reply, then we have a true underrun.
338 		 */
339 		if (check_underrun && (pkt->pkt_data_resid > rsp_data_resid)) {
340 			switch (scsi_opcode) {
341 			case SCSI_INQUIRY:
342 				scsi_dl = scsi_cmd[16];
343 				break;
344 
345 			case SCSI_RX_DIAG:
346 				scsi_dl =
347 				    (scsi_cmd[15] * 0x100) +
348 				    scsi_cmd[16];
349 				break;
350 
351 			default:
352 				scsi_dl = pkt->pkt_datalen;
353 			}
354 
355 #ifdef FCP_UNDERRUN_PATCH1
356 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
357 			/*
358 			 * If status is not good and no data was
359 			 * actually transferred, then we must fix
360 			 * the issue
361 			 */
362 			if ((scsi_status != SCSI_STAT_GOOD) && (data_rx == 0)) {
363 				fix_it = 1;
364 
365 				EMLXS_MSGF(EMLXS_CONTEXT,
366 				    &emlxs_fcp_completion_error_msg,
367 				    "Underrun(1). Fixed. "
368 				    "did=0x%06x sbp=%p cmd=%02x "
369 				    "dl=%d,%d rx=%d rsp=%d",
370 				    did, sbp, scsi_opcode,
371 				    pkt->pkt_datalen, scsi_dl,
372 				    (pkt->pkt_datalen -
373 				    pkt->pkt_data_resid),
374 				    rsp_data_resid);
375 
376 			}
377 }
378 #endif /* FCP_UNDERRUN_PATCH1 */
379 
380 
381 #ifdef FCP_UNDERRUN_PATCH2
382 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH2) {
383 			if (scsi_status == SCSI_STAT_GOOD) {
384 				emlxs_msg_t	*msg;
385 
386 				msg = &emlxs_fcp_completion_error_msg;
387 				/*
388 				 * If status is good and this is an
389 				 * inquiry request and the amount of
390 				 * data
391 				 */
392 				/*
393 				 * requested <= data received, then we
394 				 * must fix the issue.
395 				 */
396 
397 				if ((scsi_opcode == SCSI_INQUIRY) &&
398 				    (pkt->pkt_datalen >= data_rx) &&
399 				    (scsi_dl <= data_rx)) {
400 					fix_it = 1;
401 
402 					EMLXS_MSGF(EMLXS_CONTEXT, msg,
403 					    "Underrun(2). Fixed. "
404 					    "did=0x%06x sbp=%p "
405 					    "cmd=%02x dl=%d,%d "
406 					    "rx=%d rsp=%d",
407 					    did, sbp, scsi_opcode,
408 					    pkt->pkt_datalen, scsi_dl,
409 					    data_rx, rsp_data_resid);
410 
411 				}
412 
413 				/*
414 				 * If status is good and this is an
415 				 * inquiry request and the amount of
416 				 * data requested >= 128 bytes, but
417 				 * only 128 bytes were received,
418 				 * then we must fix the issue.
419 				 */
420 				else if ((scsi_opcode == SCSI_INQUIRY) &&
421 				    (pkt->pkt_datalen >= 128) &&
422 				    (scsi_dl >= 128) && (data_rx == 128)) {
423 					fix_it = 1;
424 
425 					EMLXS_MSGF(EMLXS_CONTEXT, msg,
426 					    "Underrun(3). Fixed. "
427 					    "did=0x%06x sbp=%p "
428 					    "cmd=%02x dl=%d,%d "
429 					    "rx=%d rsp=%d",
430 					    did, sbp, scsi_opcode,
431 					    pkt->pkt_datalen, scsi_dl,
432 					    data_rx, rsp_data_resid);
433 
434 				}
435 			}
436 }
437 #endif /* FCP_UNDERRUN_PATCH2 */
438 
439 			/*
440 			 * Check if SCSI response payload should be
441 			 * fixed or if a DATA_UNDERRUN should be
442 			 * reported
443 			 */
444 			if (fix_it) {
445 				/*
446 				 * Fix the SCSI response payload itself
447 				 */
448 				rsp->fcp_u.fcp_status.resid_under = 1;
449 				rsp->fcp_resid =
450 				    LE_SWAP32(pkt->pkt_data_resid);
451 			} else {
452 				/*
453 				 * Change the status from
454 				 * IOSTAT_FCP_RSP_ERROR to
455 				 * IOSTAT_DATA_UNDERRUN
456 				 */
457 				iostat = IOSTAT_DATA_UNDERRUN;
458 				pkt->pkt_data_resid =
459 				    pkt->pkt_datalen;
460 			}
461 		}
462 
463 		/*
464 		 * If the residual being reported by the adapter is
465 		 * less than the residual being reported in the reply,
466 		 * then we have a true overrun. Since we don't know
467 		 * where the extra data came from or went to then we
468 		 * cannot trust anything we received
469 		 */
470 		else if (rsp_data_resid > pkt->pkt_data_resid) {
471 			/*
472 			 * Change the status from
473 			 * IOSTAT_FCP_RSP_ERROR to
474 			 * IOSTAT_DATA_OVERRUN
475 			 */
476 			iostat = IOSTAT_DATA_OVERRUN;
477 			pkt->pkt_data_resid = pkt->pkt_datalen;
478 		}
479 
480 	} else if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
481 	    (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
482 		/*
483 		 * Get the residual underrun count reported by
484 		 * our adapter
485 		 */
486 		pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
487 
488 #ifdef SAN_DIAG_SUPPORT
489 		if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
490 			emlxs_log_sd_fc_rdchk_event(port,
491 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
492 			    scsi_opcode, pkt->pkt_data_resid);
493 		}
494 #endif /* SAN_DIAG_SUPPORT */
495 
496 		/* Get the actual amount of data transferred */
497 		data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
498 
499 		/*
500 		 * If the residual being reported by the adapter is
501 		 * greater than the residual being reported in the
502 		 * reply, then we have a true underrun.
503 		 */
504 		if (check_underrun && (pkt->pkt_data_resid > rsp_data_resid)) {
505 
506 			scsi_dl = pkt->pkt_datalen;
507 
508 #ifdef FCP_UNDERRUN_PATCH1
509 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
510 			/*
511 			 * If status is not good and no data was
512 			 * actually transferred, then we must fix
513 			 * the issue
514 			 */
515 			if ((scsi_status != SCSI_STAT_GOOD) && (data_rx == 0)) {
516 				fix_it = 1;
517 
518 				EMLXS_MSGF(EMLXS_CONTEXT,
519 				    &emlxs_fcp_completion_error_msg,
520 				    "Underrun(1). Fixed. "
521 				    "did=0x%06x sbp=%p cmd=%02x "
522 				    "dl=%d,%d rx=%d rsp=%d",
523 				    did, sbp, scsi_opcode,
524 				    pkt->pkt_datalen, scsi_dl,
525 				    (pkt->pkt_datalen -
526 				    pkt->pkt_data_resid),
527 				    rsp_data_resid);
528 
529 			}
530 }
531 #endif /* FCP_UNDERRUN_PATCH1 */
532 
533 			/*
534 			 * Check if SCSI response payload should be
535 			 * fixed or if a DATA_UNDERRUN should be
536 			 * reported
537 			 */
538 			if (fix_it) {
539 				/*
540 				 * Fix the SCSI response payload itself
541 				 */
542 				rsp->fcp_u.fcp_status.resid_under = 1;
543 				rsp->fcp_resid =
544 				    LE_SWAP32(pkt->pkt_data_resid);
545 			} else {
546 				/*
547 				 * Change the status from
548 				 * IOSTAT_FCP_RSP_ERROR to
549 				 * IOSTAT_DATA_UNDERRUN
550 				 */
551 				iostat = IOSTAT_DATA_UNDERRUN;
552 				pkt->pkt_data_resid =
553 				    pkt->pkt_datalen;
554 			}
555 		}
556 
557 		/*
558 		 * If the residual being reported by the adapter is
559 		 * less than the residual being reported in the reply,
560 		 * then we have a true overrun. Since we don't know
561 		 * where the extra data came from or went to then we
562 		 * cannot trust anything we received
563 		 */
564 		else if (rsp_data_resid > pkt->pkt_data_resid) {
565 			/*
566 			 * Change the status from
567 			 * IOSTAT_FCP_RSP_ERROR to
568 			 * IOSTAT_DATA_OVERRUN
569 			 */
570 			iostat = IOSTAT_DATA_OVERRUN;
571 			pkt->pkt_data_resid = pkt->pkt_datalen;
572 		}
573 	}
574 
575 done:
576 
577 	/* Print completion message */
578 	switch (iostat) {
579 	case IOSTAT_SUCCESS:
580 		/* Build SCSI GOOD status */
581 		if (pkt->pkt_rsplen) {
582 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
583 		}
584 		break;
585 
586 	case IOSTAT_FCP_RSP_ERROR:
587 		break;
588 
589 	case IOSTAT_REMOTE_STOP:
590 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
591 		    "Remote Stop. did=0x%06x sbp=%p cmd=%02x", did, sbp,
592 		    scsi_opcode);
593 		break;
594 
595 	case IOSTAT_LOCAL_REJECT:
596 		localstat = cmd->un.grsp.perr.statLocalError;
597 
598 		switch (localstat) {
599 		case IOERR_SEQUENCE_TIMEOUT:
600 			EMLXS_MSGF(EMLXS_CONTEXT,
601 			    &emlxs_fcp_completion_error_msg,
602 			    "Local reject. "
603 			    "%s did=0x%06x sbp=%p cmd=%02x tmo=%d ",
604 			    emlxs_error_xlate(localstat), did, sbp,
605 			    scsi_opcode, pkt->pkt_timeout);
606 			break;
607 
608 		default:
609 			EMLXS_MSGF(EMLXS_CONTEXT,
610 			    &emlxs_fcp_completion_error_msg,
611 			    "Local reject. %s 0x%06x %p %02x (%x)(%x)",
612 			    emlxs_error_xlate(localstat), did, sbp,
613 			    scsi_opcode, (uint16_t)cmd->ULPIOTAG,
614 			    (uint16_t)cmd->ULPCONTEXT);
615 		}
616 
617 		break;
618 
619 	case IOSTAT_NPORT_RJT:
620 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
621 		    "Nport reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
622 		    scsi_opcode);
623 		break;
624 
625 	case IOSTAT_FABRIC_RJT:
626 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
627 		    "Fabric reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
628 		    scsi_opcode);
629 		break;
630 
631 	case IOSTAT_NPORT_BSY:
632 #ifdef SAN_DIAG_SUPPORT
633 		ndlp = (NODELIST *)iocbq->node;
634 		emlxs_log_sd_fc_bsy_event(port, (HBA_WWN *)&ndlp->nlp_portname);
635 #endif
636 
637 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
638 		    "Nport busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
639 		    scsi_opcode);
640 		break;
641 
642 	case IOSTAT_FABRIC_BSY:
643 #ifdef SAN_DIAG_SUPPORT
644 		ndlp = (NODELIST *)iocbq->node;
645 		emlxs_log_sd_fc_bsy_event(port, NULL);
646 #endif
647 
648 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
649 		    "Fabric busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
650 		    scsi_opcode);
651 		break;
652 
653 	case IOSTAT_INTERMED_RSP:
654 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
655 		    "Intermediate response. did=0x%06x sbp=%p cmd=%02x", did,
656 		    sbp, scsi_opcode);
657 		break;
658 
659 	case IOSTAT_LS_RJT:
660 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
661 		    "LS Reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
662 		    scsi_opcode);
663 		break;
664 
665 	case IOSTAT_DATA_UNDERRUN:
666 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
667 		    "Underrun. did=0x%06x sbp=%p cmd=%02x "
668 		    "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
669 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
670 		    rsp_data_resid, scsi_status, sense, asc, ascq);
671 		break;
672 
673 	case IOSTAT_DATA_OVERRUN:
674 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
675 		    "Overrun. did=0x%06x sbp=%p cmd=%02x "
676 		    "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
677 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
678 		    rsp_data_resid, scsi_status, sense, asc, ascq);
679 		break;
680 
681 	case IOSTAT_RSP_INVALID:
682 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
683 		    "Rsp Invalid. did=0x%06x sbp=%p cmd=%02x dl=%d rl=%d"
684 		    "(%d, %d, %d)",
685 		    did, sbp, scsi_opcode, pkt->pkt_datalen, pkt->pkt_rsplen,
686 		    LE_SWAP32(rsp->fcp_resid),
687 		    LE_SWAP32(rsp->fcp_sense_len),
688 		    LE_SWAP32(rsp->fcp_response_len));
689 		break;
690 
691 	default:
692 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
693 		    "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
694 		    iostat, cmd->un.grsp.perr.statLocalError, did, sbp,
695 		    scsi_opcode);
696 		break;
697 	}
698 
699 	if (iostat == IOSTAT_SUCCESS) {
700 		HBASTATS.FcpGood++;
701 	} else {
702 		HBASTATS.FcpError++;
703 	}
704 
705 	mutex_exit(&sbp->mtx);
706 
707 	emlxs_pkt_complete(sbp, iostat, localstat, 0);
708 
709 	return;
710 
711 } /* emlxs_handle_fcp_event() */
712 
713 
714 /*
715  *  emlxs_post_buffer
716  *
717  *  This routine will post count buffers to the
718  *  ring with the QUE_RING_BUF_CN command. This
719  *  allows 2 buffers / command to be posted.
720  *  Returns the number of buffers NOT posted.
721  */
722 /* SLI3 */
723 extern int
724 emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
725 {
726 	emlxs_port_t *port = &PPORT;
727 	IOCB *icmd;
728 	IOCBQ *iocbq;
729 	MATCHMAP *mp;
730 	uint16_t tag;
731 	uint32_t maxqbuf;
732 	int32_t i;
733 	int32_t j;
734 	uint32_t seg;
735 	uint32_t size;
736 
737 	mp = 0;
738 	maxqbuf = 2;
739 	tag = (uint16_t)cnt;
740 	cnt += rp->fc_missbufcnt;
741 
742 	if (rp->ringno == hba->channel_els) {
743 		seg = MEM_BUF;
744 		size = MEM_ELSBUF_SIZE;
745 	} else if (rp->ringno == hba->channel_ip) {
746 		seg = MEM_IPBUF;
747 		size = MEM_IPBUF_SIZE;
748 	} else if (rp->ringno == hba->channel_ct) {
749 		seg = MEM_CTBUF;
750 		size = MEM_CTBUF_SIZE;
751 	}
752 #ifdef SFCT_SUPPORT
753 	else if (rp->ringno == hba->CHANNEL_FCT) {
754 		seg = MEM_FCTBUF;
755 		size = MEM_FCTBUF_SIZE;
756 	}
757 #endif /* SFCT_SUPPORT */
758 	else {
759 		return (0);
760 	}
761 
762 	/*
763 	 * While there are buffers to post
764 	 */
765 	while (cnt) {
766 		if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == 0) {
767 			rp->fc_missbufcnt = cnt;
768 			return (cnt);
769 		}
770 
771 		iocbq->channel = (void *)&hba->chan[rp->ringno];
772 		iocbq->port = (void *)port;
773 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
774 
775 		icmd = &iocbq->iocb;
776 
777 		/*
778 		 * Max buffers can be posted per command
779 		 */
780 		for (i = 0; i < maxqbuf; i++) {
781 			if (cnt <= 0)
782 				break;
783 
784 			/* fill in BDEs for command */
785 			if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg))
786 			    == 0) {
787 				icmd->ULPBDECOUNT = i;
788 				for (j = 0; j < i; j++) {
789 					mp = EMLXS_GET_VADDR(hba, rp, icmd);
790 					if (mp) {
791 						emlxs_mem_put(hba, seg,
792 						    (void *)mp);
793 					}
794 				}
795 
796 				rp->fc_missbufcnt = cnt + i;
797 
798 				emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
799 
800 				return (cnt + i);
801 			}
802 
803 			/*
804 			 * map that page and save the address pair for lookup
805 			 * later
806 			 */
807 			emlxs_mem_map_vaddr(hba,
808 			    rp,
809 			    mp,
810 			    (uint32_t *)&icmd->un.cont64[i].addrHigh,
811 			    (uint32_t *)&icmd->un.cont64[i].addrLow);
812 
813 			icmd->un.cont64[i].tus.f.bdeSize = size;
814 			icmd->ULPCOMMAND = CMD_QUE_RING_BUF64_CN;
815 
816 			/*
817 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
818 			 *    "UB Post: ring=%d addr=%08x%08x size=%d",
819 			 *    rp->ringno, icmd->un.cont64[i].addrHigh,
820 			 *    icmd->un.cont64[i].addrLow, size);
821 			 */
822 
823 			cnt--;
824 		}
825 
826 		icmd->ULPIOTAG = tag;
827 		icmd->ULPBDECOUNT = i;
828 		icmd->ULPLE = 1;
829 		icmd->ULPOWNER = OWN_CHIP;
830 		/* used for delimiter between commands */
831 		iocbq->bp = (void *)mp;
832 
833 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[rp->ringno], iocbq);
834 	}
835 
836 	rp->fc_missbufcnt = 0;
837 
838 	return (0);
839 
840 } /* emlxs_post_buffer() */
841 
842 
843 static void
844 emlxs_fcp_tag_nodes(emlxs_port_t *port)
845 {
846 	NODELIST *nlp;
847 	int i;
848 
849 	/* We will process all nodes with this tag later */
850 	rw_enter(&port->node_rwlock, RW_READER);
851 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
852 		nlp = port->node_table[i];
853 		while (nlp != NULL) {
854 			nlp->nlp_tag = 1;
855 			nlp = nlp->nlp_list_next;
856 		}
857 	}
858 	rw_exit(&port->node_rwlock);
859 }
860 
861 
862 static NODELIST *
863 emlxs_find_tagged_node(emlxs_port_t *port)
864 {
865 	NODELIST *nlp;
866 	NODELIST *tagged;
867 	int i;
868 
869 	/* Find first node */
870 	rw_enter(&port->node_rwlock, RW_READER);
871 	tagged = 0;
872 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
873 		nlp = port->node_table[i];
874 		while (nlp != NULL) {
875 			if (!nlp->nlp_tag) {
876 				nlp = nlp->nlp_list_next;
877 				continue;
878 			}
879 			nlp->nlp_tag = 0;
880 
881 			if (nlp->nlp_Rpi == FABRIC_RPI) {
882 				nlp = nlp->nlp_list_next;
883 				continue;
884 			}
885 			tagged = nlp;
886 			break;
887 		}
888 		if (tagged) {
889 			break;
890 		}
891 	}
892 	rw_exit(&port->node_rwlock);
893 	return (tagged);
894 }
895 
896 
897 extern int
898 emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
899 {
900 	emlxs_hba_t *hba = HBA;
901 	emlxs_config_t *cfg;
902 	NODELIST *nlp;
903 	fc_affected_id_t *aid;
904 	uint32_t mask;
905 	uint32_t aff_d_id;
906 	uint32_t linkdown;
907 	uint32_t vlinkdown;
908 	uint32_t action;
909 	int i;
910 	uint32_t unreg_vpi;
911 	uint32_t update;
912 	uint32_t adisc_support;
913 	uint32_t clear_all;
914 	uint8_t format;
915 
916 	/* Target mode only uses this routine for linkdowns */
917 	if ((port->mode == MODE_TARGET) && (scope != 0xffffffff) &&
918 	    (scope != 0xfeffffff) && (scope != 0xfdffffff)) {
919 		return (0);
920 	}
921 
922 	cfg = &CFG;
923 	aid = (fc_affected_id_t *)&scope;
924 	linkdown = 0;
925 	vlinkdown = 0;
926 	unreg_vpi = 0;
927 	update = 0;
928 	clear_all = 0;
929 
930 	if (!(port->flag & EMLXS_PORT_BOUND)) {
931 		return (0);
932 	}
933 
934 	format = aid->aff_format;
935 
936 	switch (format) {
937 	case 0:	/* Port */
938 		mask = 0x00ffffff;
939 		break;
940 
941 	case 1:	/* Area */
942 		mask = 0x00ffff00;
943 		break;
944 
945 	case 2:	/* Domain */
946 		mask = 0x00ff0000;
947 		break;
948 
949 	case 3:	/* Network */
950 		mask = 0x00000000;
951 		break;
952 
953 #ifdef DHCHAP_SUPPORT
954 	case 0xfe:	/* Virtual link down */
955 		mask = 0x00000000;
956 		vlinkdown = 1;
957 		break;
958 #endif /* DHCHAP_SUPPORT */
959 
960 	case 0xff:	/* link is down */
961 		mask = 0x00000000;
962 		linkdown = 1;
963 		break;
964 
965 	case 0xfd:	/* New fabric */
966 	default:
967 		mask = 0x00000000;
968 		linkdown = 1;
969 		clear_all = 1;
970 		break;
971 	}
972 
973 	aff_d_id = aid->aff_d_id & mask;
974 
975 
976 	/*
977 	 * If link is down then this is a hard shutdown and flush
978 	 * If link not down then this is a soft shutdown and flush
979 	 * (e.g. RSCN)
980 	 */
981 	if (linkdown) {
982 		mutex_enter(&EMLXS_PORT_LOCK);
983 
984 		port->flag &= EMLXS_PORT_LINKDOWN_MASK;
985 
986 		if (port->ulp_statec != FC_STATE_OFFLINE) {
987 			port->ulp_statec = FC_STATE_OFFLINE;
988 
989 			port->prev_did = port->did;
990 			port->did = 0;
991 			port->rdid = 0;
992 
993 			bcopy(&port->fabric_sparam, &port->prev_fabric_sparam,
994 			    sizeof (SERV_PARM));
995 			bzero(&port->fabric_sparam, sizeof (SERV_PARM));
996 
997 			update = 1;
998 		}
999 
1000 		mutex_exit(&EMLXS_PORT_LOCK);
1001 
1002 		emlxs_timer_cancel_clean_address(port);
1003 
1004 		/* Tell ULP about it */
1005 		if (update) {
1006 			if (port->flag & EMLXS_PORT_BOUND) {
1007 				if (port->vpi == 0) {
1008 					EMLXS_MSGF(EMLXS_CONTEXT,
1009 					    &emlxs_link_down_msg, NULL);
1010 				}
1011 
1012 				if (port->mode == MODE_INITIATOR) {
1013 					emlxs_fca_link_down(port);
1014 				}
1015 #ifdef SFCT_SUPPORT
1016 				else if (port->mode == MODE_TARGET) {
1017 					emlxs_fct_link_down(port);
1018 				}
1019 #endif /* SFCT_SUPPORT */
1020 
1021 			} else {
1022 				if (port->vpi == 0) {
1023 					EMLXS_MSGF(EMLXS_CONTEXT,
1024 					    &emlxs_link_down_msg, "*");
1025 				}
1026 			}
1027 
1028 
1029 		}
1030 
1031 		unreg_vpi = 1;
1032 
1033 #ifdef DHCHAP_SUPPORT
1034 		/* Stop authentication with all nodes */
1035 		emlxs_dhc_auth_stop(port, NULL);
1036 #endif /* DHCHAP_SUPPORT */
1037 
1038 		/* Flush the base node */
1039 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
1040 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
1041 
1042 		/* Flush any pending ub buffers */
1043 		emlxs_ub_flush(port);
1044 	}
1045 #ifdef DHCHAP_SUPPORT
1046 	/* virtual link down */
1047 	else if (vlinkdown) {
1048 		mutex_enter(&EMLXS_PORT_LOCK);
1049 
1050 		if (port->ulp_statec != FC_STATE_OFFLINE) {
1051 			port->ulp_statec = FC_STATE_OFFLINE;
1052 			update = 1;
1053 		}
1054 
1055 		mutex_exit(&EMLXS_PORT_LOCK);
1056 
1057 		emlxs_timer_cancel_clean_address(port);
1058 
1059 		/* Tell ULP about it */
1060 		if (update) {
1061 			if (port->flag & EMLXS_PORT_BOUND) {
1062 				if (port->vpi == 0) {
1063 					EMLXS_MSGF(EMLXS_CONTEXT,
1064 					    &emlxs_link_down_msg,
1065 					    "Switch authentication failed.");
1066 				}
1067 
1068 				if (port->mode == MODE_INITIATOR) {
1069 					emlxs_fca_link_down(port);
1070 				}
1071 #ifdef SFCT_SUPPORT
1072 				else if (port->mode == MODE_TARGET) {
1073 					emlxs_fct_link_down(port);
1074 				}
1075 #endif /* SFCT_SUPPORT */
1076 			} else {
1077 				if (port->vpi == 0) {
1078 					EMLXS_MSGF(EMLXS_CONTEXT,
1079 					    &emlxs_link_down_msg,
1080 					    "Switch authentication failed. *");
1081 				}
1082 			}
1083 
1084 
1085 		}
1086 
1087 		/* Flush the base node */
1088 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
1089 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
1090 	}
1091 #endif /* DHCHAP_SUPPORT */
1092 	else {
1093 		emlxs_timer_cancel_clean_address(port);
1094 	}
1095 
1096 	if (port->mode == MODE_TARGET) {
1097 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1098 			/* Set the node tags */
1099 			emlxs_fcp_tag_nodes(port);
1100 			unreg_vpi = 0;
1101 			while ((nlp = emlxs_find_tagged_node(port))) {
1102 				(void) emlxs_rpi_pause_notify(port,
1103 				    nlp->rpip);
1104 				/*
1105 				 * In port_online we need to resume
1106 				 * these RPIs before we can use them.
1107 				 */
1108 			}
1109 		}
1110 		goto done;
1111 	}
1112 
1113 	/* Set the node tags */
1114 	emlxs_fcp_tag_nodes(port);
1115 
1116 	if (!clear_all && (hba->flag & FC_ONLINE_MODE)) {
1117 		adisc_support = cfg[CFG_ADISC_SUPPORT].current;
1118 	} else {
1119 		adisc_support = 0;
1120 	}
1121 
1122 	/* Check ADISC support level */
1123 	switch (adisc_support) {
1124 	case 0:	/* No support - Flush all IO to all matching nodes */
1125 
1126 		for (;;) {
1127 			/*
1128 			 * We need to hold the locks this way because
1129 			 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1130 			 * same locks. Also, when we release the lock the list
1131 			 * can change out from under us.
1132 			 */
1133 
1134 			/* Find first node */
1135 			rw_enter(&port->node_rwlock, RW_READER);
1136 			action = 0;
1137 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1138 				nlp = port->node_table[i];
1139 				while (nlp != NULL) {
1140 					if (!nlp->nlp_tag) {
1141 						nlp = nlp->nlp_list_next;
1142 						continue;
1143 					}
1144 					nlp->nlp_tag = 0;
1145 
1146 					/*
1147 					 * Check for any device that matches
1148 					 * our mask
1149 					 */
1150 					if ((nlp->nlp_DID & mask) == aff_d_id) {
1151 						if (linkdown) {
1152 							action = 1;
1153 							break;
1154 						} else { /* Must be an RCSN */
1155 
1156 							action = 2;
1157 							break;
1158 						}
1159 					}
1160 					nlp = nlp->nlp_list_next;
1161 				}
1162 
1163 				if (action) {
1164 					break;
1165 				}
1166 			}
1167 			rw_exit(&port->node_rwlock);
1168 
1169 
1170 			/* Check if nothing was found */
1171 			if (action == 0) {
1172 				break;
1173 			} else if (action == 1) {
1174 				(void) EMLXS_SLI_UNREG_NODE(port, nlp,
1175 				    NULL, NULL, NULL);
1176 			} else if (action == 2) {
1177 				EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1178 
1179 #ifdef DHCHAP_SUPPORT
1180 				emlxs_dhc_auth_stop(port, nlp);
1181 #endif /* DHCHAP_SUPPORT */
1182 
1183 				/*
1184 				 * Close the node for any further normal IO
1185 				 * A PLOGI with reopen the node
1186 				 */
1187 				emlxs_node_close(port, nlp,
1188 				    hba->channel_fcp, 60);
1189 				emlxs_node_close(port, nlp,
1190 				    hba->channel_ip, 60);
1191 
1192 				/* Flush tx queue */
1193 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1194 
1195 				/* Flush chip queue */
1196 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1197 			}
1198 
1199 		}
1200 
1201 		break;
1202 
1203 	case 1:	/* Partial support - Flush IO for non-FCP2 matching nodes */
1204 
1205 		for (;;) {
1206 
1207 			/*
1208 			 * We need to hold the locks this way because
1209 			 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1210 			 * same locks. Also, when we release the lock the list
1211 			 * can change out from under us.
1212 			 */
1213 			rw_enter(&port->node_rwlock, RW_READER);
1214 			action = 0;
1215 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1216 				nlp = port->node_table[i];
1217 				while (nlp != NULL) {
1218 					if (!nlp->nlp_tag) {
1219 						nlp = nlp->nlp_list_next;
1220 						continue;
1221 					}
1222 					nlp->nlp_tag = 0;
1223 
1224 					/*
1225 					 * Check for special FCP2 target device
1226 					 * that matches our mask
1227 					 */
1228 					if ((nlp->nlp_fcp_info &
1229 					    NLP_FCP_TGT_DEVICE) &&
1230 					    (nlp-> nlp_fcp_info &
1231 					    NLP_FCP_2_DEVICE) &&
1232 					    (nlp->nlp_DID & mask) ==
1233 					    aff_d_id) {
1234 						action = 3;
1235 						break;
1236 					}
1237 
1238 					/*
1239 					 * Check for any other device that
1240 					 * matches our mask
1241 					 */
1242 					else if ((nlp->nlp_DID & mask) ==
1243 					    aff_d_id) {
1244 						if (linkdown) {
1245 							action = 1;
1246 							break;
1247 						} else { /* Must be an RSCN */
1248 
1249 							action = 2;
1250 							break;
1251 						}
1252 					}
1253 
1254 					nlp = nlp->nlp_list_next;
1255 				}
1256 
1257 				if (action) {
1258 					break;
1259 				}
1260 			}
1261 			rw_exit(&port->node_rwlock);
1262 
1263 			/* Check if nothing was found */
1264 			if (action == 0) {
1265 				break;
1266 			} else if (action == 1) {
1267 				(void) EMLXS_SLI_UNREG_NODE(port, nlp,
1268 				    NULL, NULL, NULL);
1269 			} else if (action == 2) {
1270 				EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1271 
1272 #ifdef DHCHAP_SUPPORT
1273 				emlxs_dhc_auth_stop(port, nlp);
1274 #endif /* DHCHAP_SUPPORT */
1275 
1276 				/*
1277 				 * Close the node for any further normal IO
1278 				 * A PLOGI with reopen the node
1279 				 */
1280 				emlxs_node_close(port, nlp,
1281 				    hba->channel_fcp, 60);
1282 				emlxs_node_close(port, nlp,
1283 				    hba->channel_ip, 60);
1284 
1285 				/* Flush tx queue */
1286 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1287 
1288 				/* Flush chip queue */
1289 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1290 
1291 			} else if (action == 3) {	/* FCP2 devices */
1292 				EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1293 
1294 				unreg_vpi = 0;
1295 
1296 				if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1297 					(void) emlxs_rpi_pause_notify(port,
1298 					    nlp->rpip);
1299 				}
1300 
1301 #ifdef DHCHAP_SUPPORT
1302 				emlxs_dhc_auth_stop(port, nlp);
1303 #endif /* DHCHAP_SUPPORT */
1304 
1305 				/*
1306 				 * Close the node for any further normal IO
1307 				 * An ADISC or a PLOGI with reopen the node
1308 				 */
1309 				emlxs_node_close(port, nlp,
1310 				    hba->channel_fcp, -1);
1311 				emlxs_node_close(port, nlp, hba->channel_ip,
1312 				    ((linkdown) ? 0 : 60));
1313 
1314 				/* Flush tx queues except for FCP ring */
1315 				(void) emlxs_tx_node_flush(port, nlp,
1316 				    &hba->chan[hba->channel_ct], 0, 0);
1317 				(void) emlxs_tx_node_flush(port, nlp,
1318 				    &hba->chan[hba->channel_els], 0, 0);
1319 				(void) emlxs_tx_node_flush(port, nlp,
1320 				    &hba->chan[hba->channel_ip], 0, 0);
1321 
1322 				/* Flush chip queues except for FCP ring */
1323 				(void) emlxs_chipq_node_flush(port,
1324 				    &hba->chan[hba->channel_ct], nlp, 0);
1325 				(void) emlxs_chipq_node_flush(port,
1326 				    &hba->chan[hba->channel_els], nlp, 0);
1327 				(void) emlxs_chipq_node_flush(port,
1328 				    &hba->chan[hba->channel_ip], nlp, 0);
1329 			}
1330 		}
1331 		break;
1332 
1333 	case 2:	/* Full support - Hold FCP IO to FCP target matching nodes */
1334 
1335 		if (!linkdown && !vlinkdown) {
1336 			break;
1337 		}
1338 
1339 		for (;;) {
1340 			/*
1341 			 * We need to hold the locks this way because
1342 			 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1343 			 * same locks. Also, when we release the lock the list
1344 			 * can change out from under us.
1345 			 */
1346 			rw_enter(&port->node_rwlock, RW_READER);
1347 			action = 0;
1348 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1349 				nlp = port->node_table[i];
1350 				while (nlp != NULL) {
1351 					if (!nlp->nlp_tag) {
1352 						nlp = nlp->nlp_list_next;
1353 						continue;
1354 					}
1355 					nlp->nlp_tag = 0;
1356 
1357 					/*
1358 					 * Check for FCP target device that
1359 					 * matches our mask
1360 					 */
1361 					if ((nlp-> nlp_fcp_info &
1362 					    NLP_FCP_TGT_DEVICE) &&
1363 					    (nlp->nlp_DID & mask) ==
1364 					    aff_d_id) {
1365 						action = 3;
1366 						break;
1367 					}
1368 
1369 					/*
1370 					 * Check for any other device that
1371 					 * matches our mask
1372 					 */
1373 					else if ((nlp->nlp_DID & mask) ==
1374 					    aff_d_id) {
1375 						if (linkdown) {
1376 							action = 1;
1377 							break;
1378 						} else { /* Must be an RSCN */
1379 
1380 							action = 2;
1381 							break;
1382 						}
1383 					}
1384 
1385 					nlp = nlp->nlp_list_next;
1386 				}
1387 				if (action) {
1388 					break;
1389 				}
1390 			}
1391 			rw_exit(&port->node_rwlock);
1392 
1393 			/* Check if nothing was found */
1394 			if (action == 0) {
1395 				break;
1396 			} else if (action == 1) {
1397 				(void) EMLXS_SLI_UNREG_NODE(port, nlp,
1398 				    NULL, NULL, NULL);
1399 			} else if (action == 2) {
1400 				EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1401 
1402 				/*
1403 				 * Close the node for any further normal IO
1404 				 * A PLOGI with reopen the node
1405 				 */
1406 				emlxs_node_close(port, nlp,
1407 				    hba->channel_fcp, 60);
1408 				emlxs_node_close(port, nlp,
1409 				    hba->channel_ip, 60);
1410 
1411 				/* Flush tx queue */
1412 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1413 
1414 				/* Flush chip queue */
1415 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1416 
1417 			} else if (action == 3) {	/* FCP2 devices */
1418 				EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1419 
1420 				unreg_vpi = 0;
1421 
1422 				if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1423 					(void) emlxs_rpi_pause_notify(port,
1424 					    nlp->rpip);
1425 				}
1426 
1427 				/*
1428 				 * Close the node for any further normal IO
1429 				 * An ADISC or a PLOGI with reopen the node
1430 				 */
1431 				emlxs_node_close(port, nlp,
1432 				    hba->channel_fcp, -1);
1433 				emlxs_node_close(port, nlp, hba->channel_ip,
1434 				    ((linkdown) ? 0 : 60));
1435 
1436 				/* Flush tx queues except for FCP ring */
1437 				(void) emlxs_tx_node_flush(port, nlp,
1438 				    &hba->chan[hba->channel_ct], 0, 0);
1439 				(void) emlxs_tx_node_flush(port, nlp,
1440 				    &hba->chan[hba->channel_els], 0, 0);
1441 				(void) emlxs_tx_node_flush(port, nlp,
1442 				    &hba->chan[hba->channel_ip], 0, 0);
1443 
1444 				/* Flush chip queues except for FCP ring */
1445 				(void) emlxs_chipq_node_flush(port,
1446 				    &hba->chan[hba->channel_ct], nlp, 0);
1447 				(void) emlxs_chipq_node_flush(port,
1448 				    &hba->chan[hba->channel_els], nlp, 0);
1449 				(void) emlxs_chipq_node_flush(port,
1450 				    &hba->chan[hba->channel_ip], nlp, 0);
1451 			}
1452 		}
1453 
1454 		break;
1455 
1456 	}	/* switch() */
1457 
1458 done:
1459 
1460 	if (unreg_vpi) {
1461 		(void) emlxs_mb_unreg_vpi(port);
1462 	}
1463 
1464 	return (0);
1465 
1466 } /* emlxs_port_offline() */
1467 
1468 
1469 extern void
1470 emlxs_port_online(emlxs_port_t *vport)
1471 {
1472 	emlxs_hba_t *hba = vport->hba;
1473 	emlxs_port_t *port = &PPORT;
1474 	NODELIST *nlp;
1475 	uint32_t state;
1476 	uint32_t update;
1477 	uint32_t npiv_linkup;
1478 	char topology[32];
1479 	char linkspeed[32];
1480 	char mode[32];
1481 
1482 	/*
1483 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1484 	 *    "linkup_callback. vpi=%d fc_flag=%x", vport->vpi, hba->flag);
1485 	 */
1486 
1487 	if ((vport->vpi > 0) &&
1488 	    (!(hba->flag & FC_NPIV_ENABLED) ||
1489 	    !(hba->flag & FC_NPIV_SUPPORTED))) {
1490 		return;
1491 	}
1492 
1493 	if (!(vport->flag & EMLXS_PORT_BOUND) ||
1494 	    !(vport->flag & EMLXS_PORT_ENABLED)) {
1495 		return;
1496 	}
1497 
1498 	/* Check for mode */
1499 	if (port->mode == MODE_TARGET) {
1500 		(void) strlcpy(mode, ", target", sizeof (mode));
1501 
1502 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1503 			/* Set the node tags */
1504 			emlxs_fcp_tag_nodes(vport);
1505 			while ((nlp = emlxs_find_tagged_node(vport))) {
1506 				/* The RPI was paused in port_offline */
1507 				(void) emlxs_rpi_resume_notify(vport,
1508 				    nlp->rpip, 0);
1509 			}
1510 		}
1511 	} else if (port->mode == MODE_INITIATOR) {
1512 		(void) strlcpy(mode, ", initiator", sizeof (mode));
1513 	} else {
1514 		(void) strlcpy(mode, "unknown", sizeof (mode));
1515 	}
1516 	mutex_enter(&EMLXS_PORT_LOCK);
1517 
1518 	/* Check for loop topology */
1519 	if (hba->topology == TOPOLOGY_LOOP) {
1520 		state = FC_STATE_LOOP;
1521 		(void) strlcpy(topology, ", loop", sizeof (topology));
1522 	} else {
1523 		state = FC_STATE_ONLINE;
1524 		(void) strlcpy(topology, ", fabric", sizeof (topology));
1525 	}
1526 
1527 	/* Set the link speed */
1528 	switch (hba->linkspeed) {
1529 	case 0:
1530 		(void) strlcpy(linkspeed, "Gb", sizeof (linkspeed));
1531 		state |= FC_STATE_1GBIT_SPEED;
1532 		break;
1533 
1534 	case LA_1GHZ_LINK:
1535 		(void) strlcpy(linkspeed, "1Gb", sizeof (linkspeed));
1536 		state |= FC_STATE_1GBIT_SPEED;
1537 		break;
1538 	case LA_2GHZ_LINK:
1539 		(void) strlcpy(linkspeed, "2Gb", sizeof (linkspeed));
1540 		state |= FC_STATE_2GBIT_SPEED;
1541 		break;
1542 	case LA_4GHZ_LINK:
1543 		(void) strlcpy(linkspeed, "4Gb", sizeof (linkspeed));
1544 		state |= FC_STATE_4GBIT_SPEED;
1545 		break;
1546 	case LA_8GHZ_LINK:
1547 		(void) strlcpy(linkspeed, "8Gb", sizeof (linkspeed));
1548 		state |= FC_STATE_8GBIT_SPEED;
1549 		break;
1550 	case LA_10GHZ_LINK:
1551 		(void) strlcpy(linkspeed, "10Gb", sizeof (linkspeed));
1552 		state |= FC_STATE_10GBIT_SPEED;
1553 		break;
1554 	case LA_16GHZ_LINK:
1555 		(void) strlcpy(linkspeed, "16Gb", sizeof (linkspeed));
1556 		state |= FC_STATE_16GBIT_SPEED;
1557 		break;
1558 	case LA_32GHZ_LINK:
1559 		(void) strlcpy(linkspeed, "32Gb", sizeof (linkspeed));
1560 		state |= FC_STATE_32GBIT_SPEED;
1561 		break;
1562 	default:
1563 		(void) snprintf(linkspeed, sizeof (linkspeed), "unknown(0x%x)",
1564 		    hba->linkspeed);
1565 		break;
1566 	}
1567 
1568 	npiv_linkup = 0;
1569 	update = 0;
1570 
1571 	if ((hba->state >= FC_LINK_UP) &&
1572 	    !(hba->flag & FC_LOOPBACK_MODE) && (vport->ulp_statec != state)) {
1573 		update = 1;
1574 		vport->ulp_statec = state;
1575 
1576 		if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1577 			hba->flag |= FC_NPIV_LINKUP;
1578 			npiv_linkup = 1;
1579 		}
1580 	}
1581 
1582 	mutex_exit(&EMLXS_PORT_LOCK);
1583 
1584 	if (update) {
1585 		if (vport->flag & EMLXS_PORT_BOUND) {
1586 			if (vport->vpi == 0) {
1587 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1588 				    "%s%s%s", linkspeed, topology, mode);
1589 
1590 			} else if (npiv_linkup) {
1591 				EMLXS_MSGF(EMLXS_CONTEXT,
1592 				    &emlxs_npiv_link_up_msg, "%s%s%s",
1593 				    linkspeed, topology, mode);
1594 			}
1595 
1596 			if (vport->mode == MODE_INITIATOR) {
1597 				emlxs_fca_link_up(vport);
1598 			}
1599 #ifdef SFCT_SUPPORT
1600 			else if (vport->mode == MODE_TARGET) {
1601 				emlxs_fct_link_up(vport);
1602 			}
1603 #endif /* SFCT_SUPPORT */
1604 		} else {
1605 			if (vport->vpi == 0) {
1606 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1607 				    "%s%s%s *", linkspeed, topology, mode);
1608 
1609 			} else if (npiv_linkup) {
1610 				EMLXS_MSGF(EMLXS_CONTEXT,
1611 				    &emlxs_npiv_link_up_msg, "%s%s%s *",
1612 				    linkspeed, topology, mode);
1613 			}
1614 		}
1615 
1616 		/* Check for waiting threads */
1617 		if (vport->vpi == 0) {
1618 			mutex_enter(&EMLXS_LINKUP_LOCK);
1619 			if (hba->linkup_wait_flag == TRUE) {
1620 				hba->linkup_wait_flag = FALSE;
1621 				cv_broadcast(&EMLXS_LINKUP_CV);
1622 			}
1623 			mutex_exit(&EMLXS_LINKUP_LOCK);
1624 		}
1625 
1626 		/* Flush any pending ub buffers */
1627 		emlxs_ub_flush(vport);
1628 	}
1629 
1630 	return;
1631 
1632 } /* emlxs_port_online() */
1633 
1634 
1635 /* SLI3 */
1636 extern void
1637 emlxs_linkdown(emlxs_hba_t *hba)
1638 {
1639 	emlxs_port_t *port = &PPORT;
1640 	int i;
1641 	uint32_t scope;
1642 
1643 	mutex_enter(&EMLXS_PORT_LOCK);
1644 
1645 	if (hba->state > FC_LINK_DOWN) {
1646 		HBASTATS.LinkDown++;
1647 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_DOWN);
1648 	}
1649 
1650 	/* Set scope */
1651 	scope = (hba->flag & FC_NEW_FABRIC)? 0xFDFFFFFF:0xFFFFFFFF;
1652 
1653 	/* Filter hba flags */
1654 	hba->flag &= FC_LINKDOWN_MASK;
1655 	hba->discovery_timer = 0;
1656 	hba->linkup_timer = 0;
1657 
1658 	mutex_exit(&EMLXS_PORT_LOCK);
1659 
1660 	for (i = 0; i < MAX_VPORTS; i++) {
1661 		port = &VPORT(i);
1662 
1663 		if (!(port->flag & EMLXS_PORT_BOUND)) {
1664 			continue;
1665 		}
1666 
1667 		(void) emlxs_port_offline(port, scope);
1668 
1669 	}
1670 
1671 	emlxs_log_link_event(port);
1672 
1673 	return;
1674 
1675 } /* emlxs_linkdown() */
1676 
1677 
1678 /* SLI3 */
1679 extern void
1680 emlxs_linkup(emlxs_hba_t *hba)
1681 {
1682 	emlxs_port_t *port = &PPORT;
1683 	emlxs_config_t *cfg = &CFG;
1684 
1685 	mutex_enter(&EMLXS_PORT_LOCK);
1686 
1687 	/* Check for any mode changes */
1688 	emlxs_mode_set(hba);
1689 
1690 	HBASTATS.LinkUp++;
1691 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_UP);
1692 
1693 #ifdef MENLO_SUPPORT
1694 	if (hba->flag & FC_MENLO_MODE) {
1695 		mutex_exit(&EMLXS_PORT_LOCK);
1696 
1697 		/*
1698 		 * Trigger linkup CV and don't start linkup & discovery
1699 		 * timers
1700 		 */
1701 		mutex_enter(&EMLXS_LINKUP_LOCK);
1702 		cv_broadcast(&EMLXS_LINKUP_CV);
1703 		mutex_exit(&EMLXS_LINKUP_LOCK);
1704 
1705 		emlxs_log_link_event(port);
1706 
1707 		return;
1708 	}
1709 #endif /* MENLO_SUPPORT */
1710 
1711 	/* Set the linkup & discovery timers */
1712 	hba->linkup_timer = hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current;
1713 	hba->discovery_timer =
1714 	    hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current +
1715 	    cfg[CFG_DISC_TIMEOUT].current;
1716 
1717 	mutex_exit(&EMLXS_PORT_LOCK);
1718 
1719 	emlxs_log_link_event(port);
1720 
1721 	return;
1722 
1723 } /* emlxs_linkup() */
1724 
1725 
1726 /*
1727  *  emlxs_reset_link
1728  *
1729  *  Description:
1730  *  Called to reset the link with an init_link
1731  *
1732  *    Returns:
1733  *
1734  */
1735 extern int
1736 emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup, uint32_t wait)
1737 {
1738 	emlxs_port_t *port = &PPORT;
1739 	emlxs_config_t *cfg;
1740 	MAILBOXQ *mbq = NULL;
1741 	MAILBOX *mb = NULL;
1742 	int rval = 0;
1743 	int tmo;
1744 	int rc;
1745 
1746 	/*
1747 	 * Get a buffer to use for the mailbox command
1748 	 */
1749 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))
1750 	    == NULL) {
1751 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1752 		    "Unable to allocate mailbox buffer.");
1753 		rval = 1;
1754 		goto reset_link_fail;
1755 	}
1756 
1757 	if (linkup) {
1758 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1759 		    "Resetting link...");
1760 	} else {
1761 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1762 		    "Disabling link...");
1763 	}
1764 
1765 	mb = (MAILBOX *)mbq;
1766 
1767 	/* Bring link down first */
1768 	emlxs_mb_down_link(hba, mbq);
1769 
1770 #define	MBXERR_LINK_DOWN	0x33
1771 
1772 	if (wait) {
1773 		wait = MBX_WAIT;
1774 	} else {
1775 		wait = MBX_NOWAIT;
1776 	}
1777 	rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1778 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS) &&
1779 	    (rc != MBXERR_LINK_DOWN)) {
1780 		rval = 1;
1781 		goto reset_link_fail;
1782 	}
1783 
1784 	tmo = 120;
1785 	do {
1786 		delay(drv_usectohz(500000));
1787 		tmo--;
1788 
1789 		if (!tmo)   {
1790 			rval = 1;
1791 
1792 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1793 			    "Linkdown timeout.");
1794 
1795 			goto reset_link_fail;
1796 		}
1797 	} while ((hba->state >= FC_LINK_UP) && (hba->state != FC_ERROR));
1798 
1799 	if (linkup) {
1800 		/*
1801 		 * Setup and issue mailbox INITIALIZE LINK command
1802 		 */
1803 
1804 		if (wait == MBX_NOWAIT) {
1805 			if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))
1806 			    == NULL) {
1807 				EMLXS_MSGF(EMLXS_CONTEXT,
1808 				    &emlxs_link_reset_failed_msg,
1809 				    "Unable to allocate mailbox buffer.");
1810 				rval = 1;
1811 				goto reset_link_fail;
1812 			}
1813 			mb = (MAILBOX *)mbq;
1814 		} else {
1815 			/* Reuse mbq from previous mbox */
1816 			mb = (MAILBOX *)mbq;
1817 		}
1818 		cfg = &CFG;
1819 
1820 		emlxs_mb_init_link(hba, mbq,
1821 		    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1822 
1823 		mb->un.varInitLnk.lipsr_AL_PA = 0;
1824 
1825 		/* Clear the loopback mode */
1826 		mutex_enter(&EMLXS_PORT_LOCK);
1827 		hba->flag &= ~FC_LOOPBACK_MODE;
1828 		hba->loopback_tics = 0;
1829 		mutex_exit(&EMLXS_PORT_LOCK);
1830 
1831 		rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1832 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1833 			rval = 1;
1834 			goto reset_link_fail;
1835 		}
1836 
1837 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1838 	}
1839 
1840 reset_link_fail:
1841 
1842 	if ((wait == MBX_WAIT) && mbq) {
1843 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
1844 	}
1845 
1846 	return (rval);
1847 } /* emlxs_reset_link() */
1848 
1849 
1850 extern int
1851 emlxs_online(emlxs_hba_t *hba)
1852 {
1853 	emlxs_port_t *port = &PPORT;
1854 	int32_t rval = 0;
1855 	uint32_t i = 0;
1856 
1857 	/* Make sure adapter is offline or exit trying (30 seconds) */
1858 	while (i++ < 30) {
1859 		/* Check if adapter is already going online */
1860 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1861 			return (0);
1862 		}
1863 
1864 		mutex_enter(&EMLXS_PORT_LOCK);
1865 
1866 		/* Check again */
1867 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1868 			mutex_exit(&EMLXS_PORT_LOCK);
1869 			return (0);
1870 		}
1871 
1872 		/* Check if adapter is offline */
1873 		if (hba->flag & FC_OFFLINE_MODE) {
1874 			/* Mark it going online */
1875 			hba->flag &= ~FC_OFFLINE_MODE;
1876 			hba->flag |= FC_ONLINING_MODE;
1877 
1878 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1879 			mutex_exit(&EMLXS_PORT_LOCK);
1880 			break;
1881 		}
1882 
1883 		mutex_exit(&EMLXS_PORT_LOCK);
1884 
1885 		BUSYWAIT_MS(1000);
1886 	}
1887 
1888 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1889 	    "Going online...");
1890 
1891 	if (rval = EMLXS_SLI_ONLINE(hba)) {
1892 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, "status=%x",
1893 		    rval);
1894 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1895 
1896 		/* Set FC_OFFLINE_MODE */
1897 		mutex_enter(&EMLXS_PORT_LOCK);
1898 		hba->flag |= FC_OFFLINE_MODE;
1899 		hba->flag &= ~FC_ONLINING_MODE;
1900 		mutex_exit(&EMLXS_PORT_LOCK);
1901 
1902 		return (rval);
1903 	}
1904 
1905 	/* Start the timer */
1906 	emlxs_timer_start(hba);
1907 
1908 	/* Set FC_ONLINE_MODE */
1909 	mutex_enter(&EMLXS_PORT_LOCK);
1910 	hba->flag |= FC_ONLINE_MODE;
1911 	hba->flag &= ~FC_ONLINING_MODE;
1912 	mutex_exit(&EMLXS_PORT_LOCK);
1913 
1914 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1915 
1916 #ifdef SFCT_SUPPORT
1917 	if (port->flag & EMLXS_TGT_ENABLED) {
1918 		(void) emlxs_fct_port_initialize(port);
1919 	}
1920 #endif /* SFCT_SUPPORT */
1921 
1922 	return (rval);
1923 
1924 } /* emlxs_online() */
1925 
1926 
1927 extern int
1928 emlxs_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1929 {
1930 	emlxs_port_t *port = &PPORT;
1931 	uint32_t i = 0;
1932 	int rval = 1;
1933 
1934 	/* Make sure adapter is online or exit trying (30 seconds) */
1935 	while (i++ < 30) {
1936 		/* Check if adapter is already going offline */
1937 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1938 			return (0);
1939 		}
1940 
1941 		mutex_enter(&EMLXS_PORT_LOCK);
1942 
1943 		/* Check again */
1944 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1945 			mutex_exit(&EMLXS_PORT_LOCK);
1946 			return (0);
1947 		}
1948 
1949 		/* Check if adapter is online */
1950 		if (hba->flag & FC_ONLINE_MODE) {
1951 			/* Mark it going offline */
1952 			hba->flag &= ~FC_ONLINE_MODE;
1953 			hba->flag |= FC_OFFLINING_MODE;
1954 
1955 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1956 			mutex_exit(&EMLXS_PORT_LOCK);
1957 			break;
1958 		}
1959 
1960 		mutex_exit(&EMLXS_PORT_LOCK);
1961 
1962 		BUSYWAIT_MS(1000);
1963 	}
1964 
1965 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1966 	    "Going offline...");
1967 
1968 	/* Declare link down */
1969 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1970 		(void) emlxs_fcf_shutdown_notify(port, 1);
1971 	} else {
1972 		emlxs_linkdown(hba);
1973 	}
1974 
1975 #ifdef SFCT_SUPPORT
1976 	if (port->flag & EMLXS_TGT_ENABLED) {
1977 		(void) emlxs_fct_port_shutdown(port);
1978 	}
1979 #endif /* SFCT_SUPPORT */
1980 
1981 	/* Check if adapter was shutdown */
1982 	if (hba->flag & FC_HARDWARE_ERROR) {
1983 		/*
1984 		 * Force mailbox cleanup
1985 		 * This will wake any sleeping or polling threads
1986 		 */
1987 		emlxs_mb_fini(hba, NULL, MBX_HARDWARE_ERROR);
1988 	}
1989 
1990 	/* Pause here for the IO to settle */
1991 	delay(drv_usectohz(1000000));	/* 1 sec */
1992 
1993 	/* Unregister all nodes */
1994 	emlxs_ffcleanup(hba);
1995 
1996 	if (hba->bus_type == SBUS_FC) {
1997 		WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), 0x9A);
1998 #ifdef FMA_SUPPORT
1999 		/* Access handle validation */
2000 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
2001 #endif  /* FMA_SUPPORT */
2002 	}
2003 
2004 	/* Stop the timer */
2005 	emlxs_timer_stop(hba);
2006 
2007 	/* For safety flush every iotag list */
2008 	if (emlxs_iotag_flush(hba)) {
2009 		/* Pause here for the IO to flush */
2010 		delay(drv_usectohz(1000));
2011 	}
2012 
2013 	/* Wait for poll command request to settle */
2014 	while (hba->io_poll_count > 0) {
2015 		delay(drv_usectohz(2000000));   /* 2 sec */
2016 	}
2017 
2018 	/* Shutdown the adapter interface */
2019 	EMLXS_SLI_OFFLINE(hba, reset_requested);
2020 
2021 	mutex_enter(&EMLXS_PORT_LOCK);
2022 	hba->flag |= FC_OFFLINE_MODE;
2023 	hba->flag &= ~FC_OFFLINING_MODE;
2024 	mutex_exit(&EMLXS_PORT_LOCK);
2025 
2026 	rval = 0;
2027 
2028 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
2029 
2030 done:
2031 
2032 	return (rval);
2033 
2034 } /* emlxs_offline() */
2035 
2036 
2037 
2038 extern int
2039 emlxs_power_down(emlxs_hba_t *hba)
2040 {
2041 #ifdef FMA_SUPPORT
2042 	emlxs_port_t *port = &PPORT;
2043 #endif  /* FMA_SUPPORT */
2044 	int32_t rval = 0;
2045 
2046 	if ((rval = emlxs_offline(hba, 0))) {
2047 		return (rval);
2048 	}
2049 	EMLXS_SLI_HBA_RESET(hba, 1, 1, 0);
2050 
2051 
2052 #ifdef FMA_SUPPORT
2053 	if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2054 	    != DDI_FM_OK) {
2055 		EMLXS_MSGF(EMLXS_CONTEXT,
2056 		    &emlxs_invalid_access_handle_msg, NULL);
2057 		return (1);
2058 	}
2059 #endif  /* FMA_SUPPORT */
2060 
2061 	return (0);
2062 
2063 } /* End emlxs_power_down */
2064 
2065 
2066 extern int
2067 emlxs_power_up(emlxs_hba_t *hba)
2068 {
2069 #ifdef FMA_SUPPORT
2070 	emlxs_port_t *port = &PPORT;
2071 #endif  /* FMA_SUPPORT */
2072 	int32_t rval = 0;
2073 
2074 
2075 #ifdef FMA_SUPPORT
2076 	if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2077 	    != DDI_FM_OK) {
2078 		EMLXS_MSGF(EMLXS_CONTEXT,
2079 		    &emlxs_invalid_access_handle_msg, NULL);
2080 		return (1);
2081 	}
2082 #endif  /* FMA_SUPPORT */
2083 
2084 	/* Bring adapter online */
2085 	if ((rval = emlxs_online(hba))) {
2086 		if (hba->pci_cap_offset[PCI_CAP_ID_PM]) {
2087 			/* Put chip in D3 state */
2088 			(void) ddi_put8(hba->pci_acc_handle,
2089 			    (uint8_t *)(hba->pci_addr +
2090 			    hba->pci_cap_offset[PCI_CAP_ID_PM] +
2091 			    PCI_PMCSR),
2092 			    (uint8_t)PCI_PMCSR_D3HOT);
2093 		}
2094 		return (rval);
2095 	}
2096 
2097 	return (rval);
2098 
2099 } /* emlxs_power_up() */
2100 
2101 
2102 /*
2103  *
2104  * NAME:     emlxs_ffcleanup
2105  *
2106  * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
2107  *
2108  * EXECUTION ENVIRONMENT: process only
2109  *
2110  * CALLED FROM: CFG_TERM
2111  *
2112  * INPUT: hba       - pointer to the dev_ctl area.
2113  *
2114  * RETURNS: none
2115  */
2116 extern void
2117 emlxs_ffcleanup(emlxs_hba_t *hba)
2118 {
2119 	emlxs_port_t *port = &PPORT;
2120 	uint32_t i;
2121 
2122 	/* Disable all but the mailbox interrupt */
2123 	EMLXS_SLI_DISABLE_INTR(hba, HC_MBINT_ENA);
2124 
2125 	/* Make sure all port nodes are destroyed */
2126 	for (i = 0; i < MAX_VPORTS; i++) {
2127 		port = &VPORT(i);
2128 
2129 		if (port->node_count) {
2130 			(void) EMLXS_SLI_UNREG_NODE(port, 0, 0, 0, 0);
2131 		}
2132 	}
2133 
2134 	/* Clear all interrupt enable conditions */
2135 	EMLXS_SLI_DISABLE_INTR(hba, 0);
2136 
2137 	return;
2138 
2139 } /* emlxs_ffcleanup() */
2140 
2141 
2142 extern uint16_t
2143 emlxs_register_pkt(CHANNEL *cp, emlxs_buf_t *sbp)
2144 {
2145 	emlxs_hba_t *hba;
2146 	emlxs_port_t *port;
2147 	uint16_t iotag;
2148 	uint32_t i;
2149 
2150 	hba = cp->hba;
2151 
2152 	mutex_enter(&EMLXS_FCTAB_LOCK);
2153 
2154 	if (sbp->iotag != 0) {
2155 		port = &PPORT;
2156 
2157 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2158 		    "Pkt already registered! channel=%d iotag=%d sbp=%p",
2159 		    sbp->channel, sbp->iotag, sbp);
2160 	}
2161 
2162 	iotag = 0;
2163 	for (i = 0; i < hba->max_iotag; i++) {
2164 		if (!hba->fc_iotag || hba->fc_iotag >= hba->max_iotag) {
2165 			hba->fc_iotag = 1;
2166 		}
2167 		iotag = hba->fc_iotag++;
2168 
2169 		if (hba->fc_table[iotag] == 0 ||
2170 		    hba->fc_table[iotag] == STALE_PACKET) {
2171 			hba->io_count++;
2172 			hba->fc_table[iotag] = sbp;
2173 
2174 			sbp->iotag = iotag;
2175 			sbp->channel = cp;
2176 
2177 			break;
2178 		}
2179 		iotag = 0;
2180 	}
2181 
2182 	mutex_exit(&EMLXS_FCTAB_LOCK);
2183 
2184 	/*
2185 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2186 	 *    "register_pkt: channel=%d iotag=%d sbp=%p",
2187 	 *    cp->channelno, iotag, sbp);
2188 	 */
2189 
2190 	return (iotag);
2191 
2192 } /* emlxs_register_pkt() */
2193 
2194 
2195 
2196 extern emlxs_buf_t *
2197 emlxs_unregister_pkt(CHANNEL *cp, uint16_t iotag, uint32_t forced)
2198 {
2199 	emlxs_hba_t *hba;
2200 	emlxs_buf_t *sbp;
2201 
2202 	sbp = NULL;
2203 	hba = cp->hba;
2204 
2205 	/* Check the iotag range */
2206 	if ((iotag == 0) || (iotag >= hba->max_iotag)) {
2207 		return (NULL);
2208 	}
2209 
2210 	/* Remove the sbp from the table */
2211 	mutex_enter(&EMLXS_FCTAB_LOCK);
2212 	sbp = hba->fc_table[iotag];
2213 
2214 	if (!sbp || (sbp == STALE_PACKET)) {
2215 		mutex_exit(&EMLXS_FCTAB_LOCK);
2216 		return (sbp);
2217 	}
2218 
2219 	hba->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
2220 	hba->io_count--;
2221 	sbp->iotag = 0;
2222 
2223 	mutex_exit(&EMLXS_FCTAB_LOCK);
2224 
2225 
2226 	/* Clean up the sbp */
2227 	mutex_enter(&sbp->mtx);
2228 
2229 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
2230 		sbp->pkt_flags &= ~PACKET_IN_TXQ;
2231 		hba->channel_tx_count--;
2232 	}
2233 
2234 	if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
2235 		sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
2236 	}
2237 
2238 	if (sbp->bmp) {
2239 		emlxs_mem_put(hba, MEM_BPL, (void *)sbp->bmp);
2240 		sbp->bmp = 0;
2241 	}
2242 
2243 	mutex_exit(&sbp->mtx);
2244 
2245 	return (sbp);
2246 
2247 } /* emlxs_unregister_pkt() */
2248 
2249 
2250 
2251 /* Flush all IO's to all nodes for a given IO Channel */
2252 extern uint32_t
2253 emlxs_tx_channel_flush(emlxs_hba_t *hba, CHANNEL *cp, emlxs_buf_t *fpkt)
2254 {
2255 	emlxs_port_t *port = &PPORT;
2256 	emlxs_buf_t *sbp;
2257 	IOCBQ *iocbq;
2258 	IOCBQ *next;
2259 	IOCB *iocb;
2260 	uint32_t channelno;
2261 	Q abort;
2262 	NODELIST *ndlp;
2263 	IOCB *icmd;
2264 	MATCHMAP *mp;
2265 	uint32_t i;
2266 	uint8_t flag[MAX_CHANNEL];
2267 
2268 	channelno = cp->channelno;
2269 	bzero((void *)&abort, sizeof (Q));
2270 	bzero((void *)flag, MAX_CHANNEL * sizeof (uint8_t));
2271 
2272 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2273 
2274 	/* While a node needs servicing */
2275 	while (cp->nodeq.q_first) {
2276 		ndlp = (NODELIST *) cp->nodeq.q_first;
2277 
2278 		/* Check if priority queue is not empty */
2279 		if (ndlp->nlp_ptx[channelno].q_first) {
2280 			/* Transfer all iocb's to local queue */
2281 			if (abort.q_first == 0) {
2282 				abort.q_first =
2283 				    ndlp->nlp_ptx[channelno].q_first;
2284 			} else {
2285 				((IOCBQ *)abort.q_last)->next =
2286 				    (IOCBQ *)ndlp->nlp_ptx[channelno].q_first;
2287 			}
2288 			flag[channelno] = 1;
2289 
2290 			abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2291 			abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2292 		}
2293 
2294 		/* Check if tx queue is not empty */
2295 		if (ndlp->nlp_tx[channelno].q_first) {
2296 			/* Transfer all iocb's to local queue */
2297 			if (abort.q_first == 0) {
2298 				abort.q_first = ndlp->nlp_tx[channelno].q_first;
2299 			} else {
2300 				((IOCBQ *)abort.q_last)->next =
2301 				    (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2302 			}
2303 
2304 			abort.q_last = ndlp->nlp_tx[channelno].q_last;
2305 			abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2306 		}
2307 
2308 		/* Clear the queue pointers */
2309 		ndlp->nlp_ptx[channelno].q_first = NULL;
2310 		ndlp->nlp_ptx[channelno].q_last = NULL;
2311 		ndlp->nlp_ptx[channelno].q_cnt = 0;
2312 
2313 		ndlp->nlp_tx[channelno].q_first = NULL;
2314 		ndlp->nlp_tx[channelno].q_last = NULL;
2315 		ndlp->nlp_tx[channelno].q_cnt = 0;
2316 
2317 		/* Remove node from service queue */
2318 
2319 		/* If this is the last node on list */
2320 		if (cp->nodeq.q_last == (void *)ndlp) {
2321 			cp->nodeq.q_last = NULL;
2322 			cp->nodeq.q_first = NULL;
2323 			cp->nodeq.q_cnt = 0;
2324 		} else {
2325 			/* Remove node from head */
2326 			cp->nodeq.q_first = ndlp->nlp_next[channelno];
2327 			((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
2328 			    cp->nodeq.q_first;
2329 			cp->nodeq.q_cnt--;
2330 		}
2331 
2332 		/* Clear node */
2333 		ndlp->nlp_next[channelno] = NULL;
2334 	}
2335 
2336 	/* First cleanup the iocb's while still holding the lock */
2337 	iocbq = (IOCBQ *) abort.q_first;
2338 	while (iocbq) {
2339 		/* Free the IoTag and the bmp */
2340 		iocb = &iocbq->iocb;
2341 
2342 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2343 			sbp = iocbq->sbp;
2344 			if (sbp) {
2345 				emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2346 			}
2347 		} else {
2348 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2349 			    iocb->ULPIOTAG, 0);
2350 		}
2351 
2352 		if (sbp && (sbp != STALE_PACKET)) {
2353 			mutex_enter(&sbp->mtx);
2354 
2355 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2356 			/*
2357 			 * If the fpkt is already set, then we will leave it
2358 			 * alone. This ensures that this pkt is only accounted
2359 			 * for on one fpkt->flush_count
2360 			 */
2361 			if (!sbp->fpkt && fpkt) {
2362 				mutex_enter(&fpkt->mtx);
2363 				sbp->fpkt = fpkt;
2364 				fpkt->flush_count++;
2365 				mutex_exit(&fpkt->mtx);
2366 			}
2367 
2368 			mutex_exit(&sbp->mtx);
2369 		}
2370 
2371 		iocbq = (IOCBQ *)iocbq->next;
2372 	}	/* end of while */
2373 
2374 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2375 
2376 	/* Now abort the iocb's */
2377 	iocbq = (IOCBQ *)abort.q_first;
2378 	while (iocbq) {
2379 		/* Save the next iocbq for now */
2380 		next = (IOCBQ *)iocbq->next;
2381 
2382 		/* Unlink this iocbq */
2383 		iocbq->next = NULL;
2384 
2385 		/* Get the pkt */
2386 		sbp = (emlxs_buf_t *)iocbq->sbp;
2387 
2388 		if (sbp) {
2389 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2390 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2391 
2392 			if (hba->state >= FC_LINK_UP) {
2393 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2394 				    IOERR_ABORT_REQUESTED, 1);
2395 			} else {
2396 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2397 				    IOERR_LINK_DOWN, 1);
2398 			}
2399 
2400 		}
2401 		/* Free the iocb and its associated buffers */
2402 		else {
2403 			icmd = &iocbq->iocb;
2404 
2405 			/* SLI3 */
2406 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2407 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2408 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2409 				if ((hba->flag &
2410 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2411 					/* HBA is detaching or offlining */
2412 					if (icmd->ULPCOMMAND !=
2413 					    CMD_QUE_RING_LIST64_CN) {
2414 						void	*tmp;
2415 						RING *rp;
2416 
2417 						rp = &hba->sli.sli3.
2418 						    ring[channelno];
2419 						for (i = 0;
2420 						    i < icmd->ULPBDECOUNT;
2421 						    i++) {
2422 							mp = EMLXS_GET_VADDR(
2423 							    hba, rp, icmd);
2424 
2425 							tmp = (void *)mp;
2426 							if (mp) {
2427 							emlxs_mem_put(
2428 							    hba, MEM_BUF, tmp);
2429 							}
2430 						}
2431 					}
2432 
2433 					emlxs_mem_put(hba, MEM_IOCB,
2434 					    (void *)iocbq);
2435 				} else {
2436 					/* repost the unsolicited buffer */
2437 					EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp,
2438 					    iocbq);
2439 				}
2440 			} else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2441 			    icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2442 
2443 				emlxs_tx_put(iocbq, 1);
2444 			}
2445 		}
2446 
2447 		iocbq = next;
2448 
2449 	}	/* end of while */
2450 
2451 	/* Now trigger channel service */
2452 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2453 		if (!flag[channelno]) {
2454 			continue;
2455 		}
2456 
2457 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2458 	}
2459 
2460 	return (abort.q_cnt);
2461 
2462 } /* emlxs_tx_channel_flush() */
2463 
2464 
2465 /* Flush all IO's on all or a given ring for a given node */
2466 extern uint32_t
2467 emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan,
2468     uint32_t shutdown, emlxs_buf_t *fpkt)
2469 {
2470 	emlxs_hba_t *hba = HBA;
2471 	emlxs_buf_t *sbp;
2472 	uint32_t channelno;
2473 	CHANNEL *cp;
2474 	IOCB *icmd;
2475 	IOCBQ *iocbq;
2476 	NODELIST *prev;
2477 	IOCBQ *next;
2478 	IOCB *iocb;
2479 	Q abort;
2480 	uint32_t i;
2481 	MATCHMAP *mp;
2482 	uint8_t flag[MAX_CHANNEL];
2483 
2484 	bzero((void *)&abort, sizeof (Q));
2485 
2486 	/* Flush all I/O's on tx queue to this target */
2487 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2488 
2489 	if (!ndlp->nlp_base && shutdown) {
2490 		ndlp->nlp_active = 0;
2491 	}
2492 
2493 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2494 		cp = &hba->chan[channelno];
2495 
2496 		if (chan && cp != chan) {
2497 			continue;
2498 		}
2499 
2500 		if (!ndlp->nlp_base || shutdown) {
2501 			/* Check if priority queue is not empty */
2502 			if (ndlp->nlp_ptx[channelno].q_first) {
2503 				/* Transfer all iocb's to local queue */
2504 				if (abort.q_first == 0) {
2505 					abort.q_first =
2506 					    ndlp->nlp_ptx[channelno].q_first;
2507 				} else {
2508 					((IOCBQ *)(abort.q_last))->next =
2509 					    (IOCBQ *)ndlp->nlp_ptx[channelno].
2510 					    q_first;
2511 				}
2512 
2513 				flag[channelno] = 1;
2514 
2515 				abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2516 				abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2517 			}
2518 		}
2519 
2520 		/* Check if tx queue is not empty */
2521 		if (ndlp->nlp_tx[channelno].q_first) {
2522 
2523 			/* Transfer all iocb's to local queue */
2524 			if (abort.q_first == 0) {
2525 				abort.q_first = ndlp->nlp_tx[channelno].q_first;
2526 			} else {
2527 				((IOCBQ *)abort.q_last)->next =
2528 				    (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2529 			}
2530 
2531 			abort.q_last = ndlp->nlp_tx[channelno].q_last;
2532 			abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2533 		}
2534 
2535 		/* Clear the queue pointers */
2536 		ndlp->nlp_ptx[channelno].q_first = NULL;
2537 		ndlp->nlp_ptx[channelno].q_last = NULL;
2538 		ndlp->nlp_ptx[channelno].q_cnt = 0;
2539 
2540 		ndlp->nlp_tx[channelno].q_first = NULL;
2541 		ndlp->nlp_tx[channelno].q_last = NULL;
2542 		ndlp->nlp_tx[channelno].q_cnt = 0;
2543 
2544 		/* If this node was on the channel queue, remove it */
2545 		if (ndlp->nlp_next[channelno]) {
2546 			/* If this is the only node on list */
2547 			if (cp->nodeq.q_first == (void *)ndlp &&
2548 			    cp->nodeq.q_last == (void *)ndlp) {
2549 				cp->nodeq.q_last = NULL;
2550 				cp->nodeq.q_first = NULL;
2551 				cp->nodeq.q_cnt = 0;
2552 			} else if (cp->nodeq.q_first == (void *)ndlp) {
2553 				cp->nodeq.q_first = ndlp->nlp_next[channelno];
2554 				((NODELIST *) cp->nodeq.q_last)->
2555 				    nlp_next[channelno] = cp->nodeq.q_first;
2556 				cp->nodeq.q_cnt--;
2557 			} else {
2558 				/*
2559 				 * This is a little more difficult find the
2560 				 * previous node in the circular channel queue
2561 				 */
2562 				prev = ndlp;
2563 				while (prev->nlp_next[channelno] != ndlp) {
2564 					prev = prev->nlp_next[channelno];
2565 				}
2566 
2567 				prev->nlp_next[channelno] =
2568 				    ndlp->nlp_next[channelno];
2569 
2570 				if (cp->nodeq.q_last == (void *)ndlp) {
2571 					cp->nodeq.q_last = (void *)prev;
2572 				}
2573 				cp->nodeq.q_cnt--;
2574 
2575 			}
2576 
2577 			/* Clear node */
2578 			ndlp->nlp_next[channelno] = NULL;
2579 		}
2580 
2581 	}
2582 
2583 	/* First cleanup the iocb's while still holding the lock */
2584 	iocbq = (IOCBQ *) abort.q_first;
2585 	while (iocbq) {
2586 		/* Free the IoTag and the bmp */
2587 		iocb = &iocbq->iocb;
2588 
2589 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2590 			sbp = iocbq->sbp;
2591 			if (sbp) {
2592 				emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2593 			}
2594 		} else {
2595 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2596 			    iocb->ULPIOTAG, 0);
2597 		}
2598 
2599 		if (sbp && (sbp != STALE_PACKET)) {
2600 			mutex_enter(&sbp->mtx);
2601 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2602 			/*
2603 			 * If the fpkt is already set, then we will leave it
2604 			 * alone. This ensures that this pkt is only accounted
2605 			 * for on one fpkt->flush_count
2606 			 */
2607 			if (!sbp->fpkt && fpkt) {
2608 				mutex_enter(&fpkt->mtx);
2609 				sbp->fpkt = fpkt;
2610 				fpkt->flush_count++;
2611 				mutex_exit(&fpkt->mtx);
2612 			}
2613 
2614 			mutex_exit(&sbp->mtx);
2615 		}
2616 
2617 		iocbq = (IOCBQ *) iocbq->next;
2618 
2619 	}	/* end of while */
2620 
2621 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2622 
2623 	/* Now abort the iocb's outside the locks */
2624 	iocbq = (IOCBQ *)abort.q_first;
2625 	while (iocbq) {
2626 		/* Save the next iocbq for now */
2627 		next = (IOCBQ *)iocbq->next;
2628 
2629 		/* Unlink this iocbq */
2630 		iocbq->next = NULL;
2631 
2632 		/* Get the pkt */
2633 		sbp = (emlxs_buf_t *)iocbq->sbp;
2634 
2635 		if (sbp) {
2636 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2637 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2638 
2639 			if (hba->state >= FC_LINK_UP) {
2640 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2641 				    IOERR_ABORT_REQUESTED, 1);
2642 			} else {
2643 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2644 				    IOERR_LINK_DOWN, 1);
2645 			}
2646 
2647 		}
2648 		/* Free the iocb and its associated buffers */
2649 		else {
2650 			/* CMD_CLOSE_XRI_CN should also free the memory */
2651 			icmd = &iocbq->iocb;
2652 
2653 			/* SLI3 */
2654 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2655 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2656 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2657 				if ((hba->flag &
2658 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2659 					/* HBA is detaching or offlining */
2660 					if (icmd->ULPCOMMAND !=
2661 					    CMD_QUE_RING_LIST64_CN) {
2662 						void	*tmp;
2663 						RING *rp;
2664 						int ch;
2665 
2666 						ch = ((CHANNEL *)
2667 						    iocbq->channel)->channelno;
2668 						rp = &hba->sli.sli3.ring[ch];
2669 						for (i = 0;
2670 						    i < icmd->ULPBDECOUNT;
2671 						    i++) {
2672 							mp = EMLXS_GET_VADDR(
2673 							    hba, rp, icmd);
2674 
2675 							tmp = (void *)mp;
2676 							if (mp) {
2677 							emlxs_mem_put(
2678 							    hba, MEM_BUF, tmp);
2679 							}
2680 						}
2681 					}
2682 
2683 					emlxs_mem_put(hba, MEM_IOCB,
2684 					    (void *)iocbq);
2685 				} else {
2686 					/* repost the unsolicited buffer */
2687 					EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2688 					    (CHANNEL *)iocbq->channel, iocbq);
2689 				}
2690 			} else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2691 			    icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2692 				/*
2693 				 * Resend the abort iocbq if any
2694 				 */
2695 				emlxs_tx_put(iocbq, 1);
2696 			}
2697 		}
2698 
2699 		iocbq = next;
2700 
2701 	}	/* end of while */
2702 
2703 	/* Now trigger channel service */
2704 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2705 		if (!flag[channelno]) {
2706 			continue;
2707 		}
2708 
2709 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2710 	}
2711 
2712 	return (abort.q_cnt);
2713 
2714 } /* emlxs_tx_node_flush() */
2715 
2716 
2717 /* Check for IO's on all or a given ring for a given node */
2718 extern uint32_t
2719 emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan)
2720 {
2721 	emlxs_hba_t *hba = HBA;
2722 	uint32_t channelno;
2723 	CHANNEL *cp;
2724 	uint32_t count;
2725 
2726 	count = 0;
2727 
2728 	/* Flush all I/O's on tx queue to this target */
2729 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2730 
2731 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2732 		cp = &hba->chan[channelno];
2733 
2734 		if (chan && cp != chan) {
2735 			continue;
2736 		}
2737 
2738 		/* Check if priority queue is not empty */
2739 		if (ndlp->nlp_ptx[channelno].q_first) {
2740 			count += ndlp->nlp_ptx[channelno].q_cnt;
2741 		}
2742 
2743 		/* Check if tx queue is not empty */
2744 		if (ndlp->nlp_tx[channelno].q_first) {
2745 			count += ndlp->nlp_tx[channelno].q_cnt;
2746 		}
2747 
2748 	}
2749 
2750 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2751 
2752 	return (count);
2753 
2754 } /* emlxs_tx_node_check() */
2755 
2756 
2757 
2758 /* Flush all IO's on the any ring for a given node's lun */
2759 extern uint32_t
2760 emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
2761     emlxs_buf_t *fpkt)
2762 {
2763 	emlxs_hba_t *hba = HBA;
2764 	emlxs_buf_t *sbp;
2765 	uint32_t channelno;
2766 	IOCBQ *iocbq;
2767 	IOCBQ *prev;
2768 	IOCBQ *next;
2769 	IOCB *iocb;
2770 	IOCB *icmd;
2771 	Q abort;
2772 	uint32_t i;
2773 	MATCHMAP *mp;
2774 	uint8_t flag[MAX_CHANNEL];
2775 
2776 	if (lun == EMLXS_LUN_NONE) {
2777 		return (0);
2778 	}
2779 
2780 	bzero((void *)&abort, sizeof (Q));
2781 
2782 	/* Flush I/O's on txQ to this target's lun */
2783 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2784 
2785 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2786 
2787 		/* Scan the priority queue first */
2788 		prev = NULL;
2789 		iocbq = (IOCBQ *) ndlp->nlp_ptx[channelno].q_first;
2790 
2791 		while (iocbq) {
2792 			next = (IOCBQ *)iocbq->next;
2793 			iocb = &iocbq->iocb;
2794 			sbp = (emlxs_buf_t *)iocbq->sbp;
2795 
2796 			/* Check if this IO is for our lun */
2797 			if (sbp && (sbp->lun == lun)) {
2798 				/* Remove iocb from the node's ptx queue */
2799 				if (next == 0) {
2800 					ndlp->nlp_ptx[channelno].q_last =
2801 					    (uint8_t *)prev;
2802 				}
2803 
2804 				if (prev == 0) {
2805 					ndlp->nlp_ptx[channelno].q_first =
2806 					    (uint8_t *)next;
2807 				} else {
2808 					prev->next = next;
2809 				}
2810 
2811 				iocbq->next = NULL;
2812 				ndlp->nlp_ptx[channelno].q_cnt--;
2813 
2814 				/*
2815 				 * Add this iocb to our local abort Q
2816 				 */
2817 				if (abort.q_first) {
2818 					((IOCBQ *)abort.q_last)->next = iocbq;
2819 					abort.q_last = (uint8_t *)iocbq;
2820 					abort.q_cnt++;
2821 				} else {
2822 					abort.q_first = (uint8_t *)iocbq;
2823 					abort.q_last = (uint8_t *)iocbq;
2824 					abort.q_cnt = 1;
2825 				}
2826 				iocbq->next = NULL;
2827 				flag[channelno] = 1;
2828 
2829 			} else {
2830 				prev = iocbq;
2831 			}
2832 
2833 			iocbq = next;
2834 
2835 		}	/* while (iocbq) */
2836 
2837 
2838 		/* Scan the regular queue */
2839 		prev = NULL;
2840 		iocbq = (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2841 
2842 		while (iocbq) {
2843 			next = (IOCBQ *)iocbq->next;
2844 			iocb = &iocbq->iocb;
2845 			sbp = (emlxs_buf_t *)iocbq->sbp;
2846 
2847 			/* Check if this IO is for our lun */
2848 			if (sbp && (sbp->lun == lun)) {
2849 				/* Remove iocb from the node's tx queue */
2850 				if (next == 0) {
2851 					ndlp->nlp_tx[channelno].q_last =
2852 					    (uint8_t *)prev;
2853 				}
2854 
2855 				if (prev == 0) {
2856 					ndlp->nlp_tx[channelno].q_first =
2857 					    (uint8_t *)next;
2858 				} else {
2859 					prev->next = next;
2860 				}
2861 
2862 				iocbq->next = NULL;
2863 				ndlp->nlp_tx[channelno].q_cnt--;
2864 
2865 				/*
2866 				 * Add this iocb to our local abort Q
2867 				 */
2868 				if (abort.q_first) {
2869 					((IOCBQ *) abort.q_last)->next = iocbq;
2870 					abort.q_last = (uint8_t *)iocbq;
2871 					abort.q_cnt++;
2872 				} else {
2873 					abort.q_first = (uint8_t *)iocbq;
2874 					abort.q_last = (uint8_t *)iocbq;
2875 					abort.q_cnt = 1;
2876 				}
2877 				iocbq->next = NULL;
2878 			} else {
2879 				prev = iocbq;
2880 			}
2881 
2882 			iocbq = next;
2883 
2884 		}	/* while (iocbq) */
2885 	}	/* for loop */
2886 
2887 	/* First cleanup the iocb's while still holding the lock */
2888 	iocbq = (IOCBQ *)abort.q_first;
2889 	while (iocbq) {
2890 		/* Free the IoTag and the bmp */
2891 		iocb = &iocbq->iocb;
2892 
2893 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2894 			sbp = iocbq->sbp;
2895 			if (sbp) {
2896 				emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2897 			}
2898 		} else {
2899 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2900 			    iocb->ULPIOTAG, 0);
2901 		}
2902 
2903 		if (sbp && (sbp != STALE_PACKET)) {
2904 			mutex_enter(&sbp->mtx);
2905 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2906 			/*
2907 			 * If the fpkt is already set, then we will leave it
2908 			 * alone. This ensures that this pkt is only accounted
2909 			 * for on one fpkt->flush_count
2910 			 */
2911 			if (!sbp->fpkt && fpkt) {
2912 				mutex_enter(&fpkt->mtx);
2913 				sbp->fpkt = fpkt;
2914 				fpkt->flush_count++;
2915 				mutex_exit(&fpkt->mtx);
2916 			}
2917 
2918 			mutex_exit(&sbp->mtx);
2919 		}
2920 
2921 		iocbq = (IOCBQ *) iocbq->next;
2922 
2923 	}	/* end of while */
2924 
2925 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2926 
2927 	/* Now abort the iocb's outside the locks */
2928 	iocbq = (IOCBQ *)abort.q_first;
2929 	while (iocbq) {
2930 		/* Save the next iocbq for now */
2931 		next = (IOCBQ *)iocbq->next;
2932 
2933 		/* Unlink this iocbq */
2934 		iocbq->next = NULL;
2935 
2936 		/* Get the pkt */
2937 		sbp = (emlxs_buf_t *)iocbq->sbp;
2938 
2939 		if (sbp) {
2940 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2941 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2942 
2943 			if (hba->state >= FC_LINK_UP) {
2944 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2945 				    IOERR_ABORT_REQUESTED, 1);
2946 			} else {
2947 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2948 				    IOERR_LINK_DOWN, 1);
2949 			}
2950 		}
2951 
2952 		/* Free the iocb and its associated buffers */
2953 		else {
2954 			/* Should never happen! */
2955 			icmd = &iocbq->iocb;
2956 
2957 			/* SLI3 */
2958 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2959 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2960 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2961 				if ((hba->flag &
2962 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2963 					/* HBA is detaching or offlining */
2964 					if (icmd->ULPCOMMAND !=
2965 					    CMD_QUE_RING_LIST64_CN) {
2966 						void	*tmp;
2967 						RING *rp;
2968 						int ch;
2969 
2970 						ch = ((CHANNEL *)
2971 						    iocbq->channel)->channelno;
2972 						rp = &hba->sli.sli3.ring[ch];
2973 						for (i = 0;
2974 						    i < icmd->ULPBDECOUNT;
2975 						    i++) {
2976 							mp = EMLXS_GET_VADDR(
2977 							    hba, rp, icmd);
2978 
2979 							tmp = (void *)mp;
2980 							if (mp) {
2981 							emlxs_mem_put(
2982 							    hba, MEM_BUF, tmp);
2983 							}
2984 						}
2985 					}
2986 
2987 					emlxs_mem_put(hba, MEM_IOCB,
2988 					    (void *)iocbq);
2989 				} else {
2990 					/* repost the unsolicited buffer */
2991 					EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2992 					    (CHANNEL *)iocbq->channel, iocbq);
2993 				}
2994 			} else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2995 			    icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2996 				/*
2997 				 * Resend the abort iocbq if any
2998 				 */
2999 				emlxs_tx_put(iocbq, 1);
3000 			}
3001 		}
3002 
3003 		iocbq = next;
3004 
3005 	}	/* end of while */
3006 
3007 	/* Now trigger channel service */
3008 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3009 		if (!flag[channelno]) {
3010 			continue;
3011 		}
3012 
3013 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3014 	}
3015 
3016 	return (abort.q_cnt);
3017 
3018 } /* emlxs_tx_lun_flush() */
3019 
3020 
3021 extern void
3022 emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
3023 {
3024 	emlxs_hba_t *hba;
3025 	emlxs_port_t *port;
3026 	uint32_t channelno;
3027 	NODELIST *nlp;
3028 	CHANNEL *cp;
3029 	emlxs_buf_t *sbp;
3030 
3031 	port = (emlxs_port_t *)iocbq->port;
3032 	hba = HBA;
3033 	cp = (CHANNEL *)iocbq->channel;
3034 	nlp = (NODELIST *)iocbq->node;
3035 	channelno = cp->channelno;
3036 	sbp = (emlxs_buf_t *)iocbq->sbp;
3037 
3038 	if (nlp == NULL) {
3039 		/* Set node to base node by default */
3040 		nlp = &port->node_base;
3041 
3042 		iocbq->node = (void *)nlp;
3043 
3044 		if (sbp) {
3045 			sbp->node = (void *)nlp;
3046 		}
3047 	}
3048 
3049 	if (lock) {
3050 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3051 	}
3052 
3053 	if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
3054 		if (sbp) {
3055 			mutex_enter(&sbp->mtx);
3056 			sbp->pkt_flags |= PACKET_IN_FLUSH;
3057 			mutex_exit(&sbp->mtx);
3058 
3059 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3060 				emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3061 			} else {
3062 				(void) emlxs_unregister_pkt(cp, sbp->iotag, 0);
3063 			}
3064 
3065 			if (lock) {
3066 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3067 			}
3068 
3069 			if (hba->state >= FC_LINK_UP) {
3070 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3071 				    IOERR_ABORT_REQUESTED, 1);
3072 			} else {
3073 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3074 				    IOERR_LINK_DOWN, 1);
3075 			}
3076 			return;
3077 		} else {
3078 			if (lock) {
3079 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3080 			}
3081 
3082 			emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
3083 		}
3084 
3085 		return;
3086 	}
3087 
3088 	if (sbp) {
3089 
3090 		mutex_enter(&sbp->mtx);
3091 
3092 		if (sbp->pkt_flags &
3093 		    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ | PACKET_IN_TXQ)) {
3094 			mutex_exit(&sbp->mtx);
3095 			if (lock) {
3096 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3097 			}
3098 			return;
3099 		}
3100 
3101 		sbp->pkt_flags |= PACKET_IN_TXQ;
3102 		hba->channel_tx_count++;
3103 
3104 		mutex_exit(&sbp->mtx);
3105 	}
3106 
3107 
3108 	/* Check iocbq priority */
3109 	/* Some IOCB has the high priority like reset/close xri etc */
3110 	if (iocbq->flag & IOCB_PRIORITY) {
3111 		/* Add the iocb to the bottom of the node's ptx queue */
3112 		if (nlp->nlp_ptx[channelno].q_first) {
3113 			((IOCBQ *)nlp->nlp_ptx[channelno].q_last)->next = iocbq;
3114 			nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
3115 			nlp->nlp_ptx[channelno].q_cnt++;
3116 		} else {
3117 			nlp->nlp_ptx[channelno].q_first = (uint8_t *)iocbq;
3118 			nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
3119 			nlp->nlp_ptx[channelno].q_cnt = 1;
3120 		}
3121 
3122 		iocbq->next = NULL;
3123 	} else {	/* Normal priority */
3124 
3125 
3126 		/* Add the iocb to the bottom of the node's tx queue */
3127 		if (nlp->nlp_tx[channelno].q_first) {
3128 			((IOCBQ *)nlp->nlp_tx[channelno].q_last)->next = iocbq;
3129 			nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
3130 			nlp->nlp_tx[channelno].q_cnt++;
3131 		} else {
3132 			nlp->nlp_tx[channelno].q_first = (uint8_t *)iocbq;
3133 			nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
3134 			nlp->nlp_tx[channelno].q_cnt = 1;
3135 		}
3136 
3137 		iocbq->next = NULL;
3138 	}
3139 
3140 
3141 	/*
3142 	 * Check if the node is not already on channel queue and
3143 	 * (is not closed or  is a priority request)
3144 	 */
3145 	if (!nlp->nlp_next[channelno] &&
3146 	    (!(nlp->nlp_flag[channelno] & NLP_CLOSED) ||
3147 	    (iocbq->flag & IOCB_PRIORITY))) {
3148 		/* If so, then add it to the channel queue */
3149 		if (cp->nodeq.q_first) {
3150 			((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
3151 			    (uint8_t *)nlp;
3152 			nlp->nlp_next[channelno] = cp->nodeq.q_first;
3153 
3154 			/*
3155 			 * If this is not the base node then add it
3156 			 * to the tail
3157 			 */
3158 			if (!nlp->nlp_base) {
3159 				cp->nodeq.q_last = (uint8_t *)nlp;
3160 			} else {	/* Otherwise, add it to the head */
3161 
3162 				/* The command node always gets priority */
3163 				cp->nodeq.q_first = (uint8_t *)nlp;
3164 			}
3165 
3166 			cp->nodeq.q_cnt++;
3167 		} else {
3168 			cp->nodeq.q_first = (uint8_t *)nlp;
3169 			cp->nodeq.q_last = (uint8_t *)nlp;
3170 			nlp->nlp_next[channelno] = nlp;
3171 			cp->nodeq.q_cnt = 1;
3172 		}
3173 	}
3174 
3175 	HBASTATS.IocbTxPut[channelno]++;
3176 
3177 	/* Adjust the channel timeout timer */
3178 	cp->timeout = hba->timer_tics + 5;
3179 
3180 	if (lock) {
3181 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3182 	}
3183 
3184 	return;
3185 
3186 } /* emlxs_tx_put() */
3187 
3188 
3189 extern IOCBQ *
3190 emlxs_tx_get(CHANNEL *cp, uint32_t lock)
3191 {
3192 	emlxs_hba_t *hba;
3193 	uint32_t channelno;
3194 	IOCBQ *iocbq;
3195 	NODELIST *nlp;
3196 	emlxs_buf_t *sbp;
3197 
3198 	hba = cp->hba;
3199 	channelno = cp->channelno;
3200 
3201 	if (lock) {
3202 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3203 	}
3204 
3205 begin:
3206 
3207 	iocbq = NULL;
3208 
3209 	/* Check if a node needs servicing */
3210 	if (cp->nodeq.q_first) {
3211 		nlp = (NODELIST *)cp->nodeq.q_first;
3212 
3213 		/* Get next iocb from node's priority queue */
3214 
3215 		if (nlp->nlp_ptx[channelno].q_first) {
3216 			iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
3217 
3218 			/* Check if this is last entry */
3219 			if (nlp->nlp_ptx[channelno].q_last == (void *)iocbq) {
3220 				nlp->nlp_ptx[channelno].q_first = NULL;
3221 				nlp->nlp_ptx[channelno].q_last = NULL;
3222 				nlp->nlp_ptx[channelno].q_cnt = 0;
3223 			} else {
3224 				/* Remove iocb from head */
3225 				nlp->nlp_ptx[channelno].q_first =
3226 				    (void *)iocbq->next;
3227 				nlp->nlp_ptx[channelno].q_cnt--;
3228 			}
3229 
3230 			iocbq->next = NULL;
3231 		}
3232 
3233 		/* Get next iocb from node tx queue if node not closed */
3234 		else if (nlp->nlp_tx[channelno].q_first &&
3235 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED)) {
3236 			iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
3237 
3238 			/* Check if this is last entry */
3239 			if (nlp->nlp_tx[channelno].q_last == (void *)iocbq) {
3240 				nlp->nlp_tx[channelno].q_first = NULL;
3241 				nlp->nlp_tx[channelno].q_last = NULL;
3242 				nlp->nlp_tx[channelno].q_cnt = 0;
3243 			} else {
3244 				/* Remove iocb from head */
3245 				nlp->nlp_tx[channelno].q_first =
3246 				    (void *)iocbq->next;
3247 				nlp->nlp_tx[channelno].q_cnt--;
3248 			}
3249 
3250 			iocbq->next = NULL;
3251 		}
3252 
3253 		/* Now deal with node itself */
3254 
3255 		/* Check if node still needs servicing */
3256 		if ((nlp->nlp_ptx[channelno].q_first) ||
3257 		    (nlp->nlp_tx[channelno].q_first &&
3258 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3259 
3260 			/*
3261 			 * If this is the base node, then don't shift the
3262 			 * pointers. We want to drain the base node before
3263 			 * moving on
3264 			 */
3265 			if (!nlp->nlp_base) {
3266 				/*
3267 				 * Just shift channel queue pointers to next
3268 				 * node
3269 				 */
3270 				cp->nodeq.q_last = (void *)nlp;
3271 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3272 			}
3273 		} else {
3274 			/* Remove node from channel queue */
3275 
3276 			/* If this is the last node on list */
3277 			if (cp->nodeq.q_last == (void *)nlp) {
3278 				cp->nodeq.q_last = NULL;
3279 				cp->nodeq.q_first = NULL;
3280 				cp->nodeq.q_cnt = 0;
3281 			} else {
3282 				/* Remove node from head */
3283 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3284 				((NODELIST *)cp->nodeq.q_last)->
3285 				    nlp_next[channelno] = cp->nodeq.q_first;
3286 				cp->nodeq.q_cnt--;
3287 
3288 			}
3289 
3290 			/* Clear node */
3291 			nlp->nlp_next[channelno] = NULL;
3292 		}
3293 
3294 		/*
3295 		 * If no iocbq was found on this node, then it will have
3296 		 * been removed. So try again.
3297 		 */
3298 		if (!iocbq) {
3299 			goto begin;
3300 		}
3301 
3302 		sbp = (emlxs_buf_t *)iocbq->sbp;
3303 
3304 		if (sbp) {
3305 			/*
3306 			 * Check flags before we enter mutex in case this
3307 			 * has been flushed and destroyed
3308 			 */
3309 			if ((sbp->pkt_flags &
3310 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3311 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3312 				goto begin;
3313 			}
3314 
3315 			mutex_enter(&sbp->mtx);
3316 
3317 			if ((sbp->pkt_flags &
3318 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3319 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3320 				mutex_exit(&sbp->mtx);
3321 				goto begin;
3322 			}
3323 
3324 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
3325 			hba->channel_tx_count--;
3326 
3327 			mutex_exit(&sbp->mtx);
3328 		}
3329 	}
3330 
3331 	if (iocbq) {
3332 		HBASTATS.IocbTxGet[channelno]++;
3333 	}
3334 
3335 	/* Adjust the ring timeout timer */
3336 	cp->timeout = (cp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
3337 
3338 	if (lock) {
3339 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3340 	}
3341 
3342 	return (iocbq);
3343 
3344 } /* emlxs_tx_get() */
3345 
3346 
3347 /*
3348  * Remove all cmd from from_rp's txq to to_rp's txq for ndlp.
3349  * The old IoTag has to be released, the new one has to be
3350  * allocated.  Others no change
3351  * TX_CHANNEL lock is held
3352  */
3353 extern void
3354 emlxs_tx_move(NODELIST *ndlp, CHANNEL *from_chan, CHANNEL *to_chan,
3355     uint32_t cmd, emlxs_buf_t *fpkt, uint32_t lock)
3356 {
3357 	emlxs_hba_t *hba;
3358 	emlxs_port_t *port;
3359 	uint32_t fchanno, tchanno, i;
3360 
3361 	IOCBQ *iocbq;
3362 	IOCBQ *prev;
3363 	IOCBQ *next;
3364 	IOCB *iocb, *icmd;
3365 	Q tbm;		/* To Be Moved Q */
3366 	MATCHMAP *mp;
3367 
3368 	NODELIST *nlp = ndlp;
3369 	emlxs_buf_t *sbp;
3370 
3371 	NODELIST *n_prev = NULL;
3372 	NODELIST *n_next = NULL;
3373 	uint16_t count = 0;
3374 
3375 	hba = from_chan->hba;
3376 	port = &PPORT;
3377 	cmd = cmd; /* To pass lint */
3378 
3379 	fchanno = from_chan->channelno;
3380 	tchanno = to_chan->channelno;
3381 
3382 	if (lock) {
3383 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3384 	}
3385 
3386 	bzero((void *)&tbm, sizeof (Q));
3387 
3388 	/* Scan the ndlp's fchanno txq to get the iocb of fcp cmd */
3389 	prev = NULL;
3390 	iocbq = (IOCBQ *)nlp->nlp_tx[fchanno].q_first;
3391 
3392 	while (iocbq) {
3393 		next = (IOCBQ *)iocbq->next;
3394 		/* Check if this iocb is fcp cmd */
3395 		iocb = &iocbq->iocb;
3396 
3397 		switch (iocb->ULPCOMMAND) {
3398 		/* FCP commands */
3399 		case CMD_FCP_ICMND_CR:
3400 		case CMD_FCP_ICMND_CX:
3401 		case CMD_FCP_IREAD_CR:
3402 		case CMD_FCP_IREAD_CX:
3403 		case CMD_FCP_IWRITE_CR:
3404 		case CMD_FCP_IWRITE_CX:
3405 		case CMD_FCP_ICMND64_CR:
3406 		case CMD_FCP_ICMND64_CX:
3407 		case CMD_FCP_IREAD64_CR:
3408 		case CMD_FCP_IREAD64_CX:
3409 		case CMD_FCP_IWRITE64_CR:
3410 		case CMD_FCP_IWRITE64_CX:
3411 			/* We found a fcp cmd */
3412 			break;
3413 		default:
3414 			/* this is not fcp cmd continue */
3415 			prev = iocbq;
3416 			iocbq = next;
3417 			continue;
3418 		}
3419 
3420 		/* found a fcp cmd iocb in fchanno txq, now deque it */
3421 		if (next == NULL) {
3422 			/* This is the last iocbq */
3423 			nlp->nlp_tx[fchanno].q_last =
3424 			    (uint8_t *)prev;
3425 		}
3426 
3427 		if (prev == NULL) {
3428 			/* This is the first one then remove it from head */
3429 			nlp->nlp_tx[fchanno].q_first =
3430 			    (uint8_t *)next;
3431 		} else {
3432 			prev->next = next;
3433 		}
3434 
3435 		iocbq->next = NULL;
3436 		nlp->nlp_tx[fchanno].q_cnt--;
3437 
3438 		/* Add this iocb to our local toberemovedq */
3439 		/* This way we donot hold the TX_CHANNEL lock too long */
3440 
3441 		if (tbm.q_first) {
3442 			((IOCBQ *)tbm.q_last)->next = iocbq;
3443 			tbm.q_last = (uint8_t *)iocbq;
3444 			tbm.q_cnt++;
3445 		} else {
3446 			tbm.q_first = (uint8_t *)iocbq;
3447 			tbm.q_last = (uint8_t *)iocbq;
3448 			tbm.q_cnt = 1;
3449 		}
3450 
3451 		iocbq = next;
3452 
3453 	}	/* While (iocbq) */
3454 
3455 	if ((tchanno == hba->channel_fcp) && (tbm.q_cnt != 0)) {
3456 
3457 		/* from_chan->nodeq.q_first must be non NULL */
3458 		if (from_chan->nodeq.q_first) {
3459 
3460 			/* nodeq is not empty, now deal with the node itself */
3461 			if ((nlp->nlp_tx[fchanno].q_first)) {
3462 
3463 				if (!nlp->nlp_base) {
3464 					from_chan->nodeq.q_last =
3465 					    (void *)nlp;
3466 					from_chan->nodeq.q_first =
3467 					    nlp->nlp_next[fchanno];
3468 				}
3469 
3470 			} else {
3471 				n_prev = (NODELIST *)from_chan->nodeq.q_first;
3472 				count = from_chan->nodeq.q_cnt;
3473 
3474 				if (n_prev == nlp) {
3475 
3476 					/* If this is the only node on list */
3477 					if (from_chan->nodeq.q_last ==
3478 					    (void *)nlp) {
3479 						from_chan->nodeq.q_last =
3480 						    NULL;
3481 						from_chan->nodeq.q_first =
3482 						    NULL;
3483 						from_chan->nodeq.q_cnt = 0;
3484 					} else {
3485 						from_chan->nodeq.q_first =
3486 						    nlp->nlp_next[fchanno];
3487 						((NODELIST *)from_chan->
3488 						    nodeq.q_last)->
3489 						    nlp_next[fchanno] =
3490 						    from_chan->nodeq.q_first;
3491 						from_chan->nodeq.q_cnt--;
3492 					}
3493 					/* Clear node */
3494 					nlp->nlp_next[fchanno] = NULL;
3495 				} else {
3496 					count--;
3497 					do {
3498 						n_next =
3499 						    n_prev->nlp_next[fchanno];
3500 						if (n_next == nlp) {
3501 							break;
3502 						}
3503 						n_prev = n_next;
3504 					} while (count--);
3505 
3506 					if (count != 0) {
3507 
3508 						if (n_next ==
3509 						    (NODELIST *)from_chan->
3510 						    nodeq.q_last) {
3511 							n_prev->
3512 							    nlp_next[fchanno]
3513 							    =
3514 							    ((NODELIST *)
3515 							    from_chan->
3516 							    nodeq.q_last)->
3517 							    nlp_next
3518 							    [fchanno];
3519 							from_chan->nodeq.q_last
3520 							    = (uint8_t *)n_prev;
3521 						} else {
3522 
3523 							n_prev->
3524 							    nlp_next[fchanno]
3525 							    =
3526 							    n_next-> nlp_next
3527 							    [fchanno];
3528 						}
3529 						from_chan->nodeq.q_cnt--;
3530 						/* Clear node */
3531 						nlp->nlp_next[fchanno] =
3532 						    NULL;
3533 					}
3534 				}
3535 			}
3536 		}
3537 	}
3538 
3539 	/* Now cleanup the iocb's */
3540 	prev = NULL;
3541 	iocbq = (IOCBQ *)tbm.q_first;
3542 
3543 	while (iocbq) {
3544 
3545 		next = (IOCBQ *)iocbq->next;
3546 
3547 		/* Free the IoTag and the bmp */
3548 		iocb = &iocbq->iocb;
3549 
3550 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3551 			sbp = iocbq->sbp;
3552 			if (sbp) {
3553 				emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3554 			}
3555 		} else {
3556 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
3557 			    iocb->ULPIOTAG, 0);
3558 		}
3559 
3560 		if (sbp && (sbp != STALE_PACKET)) {
3561 			mutex_enter(&sbp->mtx);
3562 			sbp->pkt_flags |= PACKET_IN_FLUSH;
3563 
3564 			/*
3565 			 * If the fpkt is already set, then we will leave it
3566 			 * alone. This ensures that this pkt is only accounted
3567 			 * for on one fpkt->flush_count
3568 			 */
3569 			if (!sbp->fpkt && fpkt) {
3570 				mutex_enter(&fpkt->mtx);
3571 				sbp->fpkt = fpkt;
3572 				fpkt->flush_count++;
3573 				mutex_exit(&fpkt->mtx);
3574 			}
3575 			mutex_exit(&sbp->mtx);
3576 		}
3577 		iocbq = next;
3578 
3579 	}	/* end of while */
3580 
3581 	iocbq = (IOCBQ *)tbm.q_first;
3582 	while (iocbq) {
3583 		/* Save the next iocbq for now */
3584 		next = (IOCBQ *)iocbq->next;
3585 
3586 		/* Unlink this iocbq */
3587 		iocbq->next = NULL;
3588 
3589 		/* Get the pkt */
3590 		sbp = (emlxs_buf_t *)iocbq->sbp;
3591 
3592 		if (sbp) {
3593 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3594 			"tx: sbp=%p node=%p", sbp, sbp->node);
3595 
3596 			if (hba->state >= FC_LINK_UP) {
3597 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3598 				    IOERR_ABORT_REQUESTED, 1);
3599 			} else {
3600 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3601 				    IOERR_LINK_DOWN, 1);
3602 			}
3603 
3604 		}
3605 		/* Free the iocb and its associated buffers */
3606 		else {
3607 			icmd = &iocbq->iocb;
3608 
3609 			/* SLI3 */
3610 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
3611 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
3612 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
3613 				if ((hba->flag &
3614 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
3615 					/* HBA is detaching or offlining */
3616 					if (icmd->ULPCOMMAND !=
3617 					    CMD_QUE_RING_LIST64_CN) {
3618 						void *tmp;
3619 						RING *rp;
3620 						int ch;
3621 
3622 						ch = from_chan->channelno;
3623 						rp = &hba->sli.sli3.ring[ch];
3624 
3625 						for (i = 0;
3626 						    i < icmd->ULPBDECOUNT;
3627 						    i++) {
3628 							mp = EMLXS_GET_VADDR(
3629 							    hba, rp, icmd);
3630 
3631 							tmp = (void *)mp;
3632 							if (mp) {
3633 							emlxs_mem_put(
3634 							    hba,
3635 							    MEM_BUF,
3636 							    tmp);
3637 							}
3638 						}
3639 
3640 					}
3641 
3642 					emlxs_mem_put(hba, MEM_IOCB,
3643 					    (void *)iocbq);
3644 				} else {
3645 					/* repost the unsolicited buffer */
3646 					EMLXS_SLI_ISSUE_IOCB_CMD(hba,
3647 					    from_chan, iocbq);
3648 				}
3649 			}
3650 		}
3651 
3652 		iocbq = next;
3653 
3654 	}	/* end of while */
3655 
3656 	/* Now flush the chipq if any */
3657 	if (!(nlp->nlp_flag[fchanno] & NLP_CLOSED)) {
3658 
3659 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3660 
3661 		(void) emlxs_chipq_node_flush(port, from_chan, nlp, 0);
3662 
3663 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3664 	}
3665 
3666 	if (lock) {
3667 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3668 	}
3669 
3670 	return;
3671 
3672 } /* emlxs_tx_move */
3673 
3674 
3675 extern uint32_t
3676 emlxs_chipq_node_flush(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp,
3677     emlxs_buf_t *fpkt)
3678 {
3679 	emlxs_hba_t *hba = HBA;
3680 	emlxs_buf_t *sbp;
3681 	IOCBQ *iocbq;
3682 	IOCBQ *next;
3683 	Q abort;
3684 	CHANNEL *cp;
3685 	uint32_t channelno;
3686 	uint8_t flag[MAX_CHANNEL];
3687 	uint32_t iotag;
3688 
3689 	bzero((void *)&abort, sizeof (Q));
3690 	bzero((void *)flag, sizeof (flag));
3691 
3692 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3693 		cp = &hba->chan[channelno];
3694 
3695 		if (chan && cp != chan) {
3696 			continue;
3697 		}
3698 
3699 		mutex_enter(&EMLXS_FCTAB_LOCK);
3700 
3701 		for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3702 			sbp = hba->fc_table[iotag];
3703 
3704 			if (sbp && (sbp != STALE_PACKET) &&
3705 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3706 			    (sbp->node == ndlp) &&
3707 			    (sbp->channel == cp) &&
3708 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3709 				emlxs_sbp_abort_add(port, sbp, &abort, flag,
3710 				    fpkt);
3711 			}
3712 
3713 		}
3714 		mutex_exit(&EMLXS_FCTAB_LOCK);
3715 
3716 	}	/* for */
3717 
3718 	/* Now put the iocb's on the tx queue */
3719 	iocbq = (IOCBQ *)abort.q_first;
3720 	while (iocbq) {
3721 		/* Save the next iocbq for now */
3722 		next = (IOCBQ *)iocbq->next;
3723 
3724 		/* Unlink this iocbq */
3725 		iocbq->next = NULL;
3726 
3727 		/* Send this iocbq */
3728 		emlxs_tx_put(iocbq, 1);
3729 
3730 		iocbq = next;
3731 	}
3732 
3733 	/* Now trigger channel service */
3734 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3735 		if (!flag[channelno]) {
3736 			continue;
3737 		}
3738 
3739 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3740 	}
3741 
3742 	return (abort.q_cnt);
3743 
3744 } /* emlxs_chipq_node_flush() */
3745 
3746 
3747 /* Flush all IO's left on all iotag lists */
3748 extern uint32_t
3749 emlxs_iotag_flush(emlxs_hba_t *hba)
3750 {
3751 	emlxs_port_t *port = &PPORT;
3752 	emlxs_buf_t *sbp;
3753 	IOCBQ *iocbq;
3754 	IOCB *iocb;
3755 	Q abort;
3756 	CHANNEL *cp;
3757 	uint32_t channelno;
3758 	uint32_t iotag;
3759 	uint32_t count;
3760 
3761 	count = 0;
3762 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3763 		cp = &hba->chan[channelno];
3764 
3765 		bzero((void *)&abort, sizeof (Q));
3766 
3767 		mutex_enter(&EMLXS_FCTAB_LOCK);
3768 
3769 		for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3770 			sbp = hba->fc_table[iotag];
3771 
3772 			/* Check if the slot is empty */
3773 			if (!sbp || (sbp == STALE_PACKET)) {
3774 				continue;
3775 			}
3776 
3777 			/* We are building an abort list per channel */
3778 			if (sbp->channel != cp) {
3779 				continue;
3780 			}
3781 
3782 			hba->fc_table[iotag] = STALE_PACKET;
3783 			hba->io_count--;
3784 
3785 			/* Check if IO is valid */
3786 			if (!(sbp->pkt_flags & PACKET_VALID) ||
3787 			    (sbp->pkt_flags & (PACKET_ULP_OWNED|
3788 			    PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
3789 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3790 				    "iotag_flush: Invalid IO found. iotag=%d",
3791 				    iotag);
3792 
3793 				continue;
3794 			}
3795 
3796 			sbp->iotag = 0;
3797 
3798 			/* Set IOCB status */
3799 			iocbq = &sbp->iocbq;
3800 			iocb = &iocbq->iocb;
3801 
3802 			iocb->ULPSTATUS = IOSTAT_LOCAL_REJECT;
3803 			iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
3804 			iocb->ULPLE = 1;
3805 			iocbq->next = NULL;
3806 
3807 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3808 				if (sbp->xrip) {
3809 					EMLXS_MSGF(EMLXS_CONTEXT,
3810 					    &emlxs_sli_debug_msg,
3811 					    "iotag_flush: iotag=%d sbp=%p "
3812 					    "xrip=%p state=%x flag=%x",
3813 					    iotag, sbp, sbp->xrip,
3814 					    sbp->xrip->state, sbp->xrip->flag);
3815 				} else {
3816 					EMLXS_MSGF(EMLXS_CONTEXT,
3817 					    &emlxs_sli_debug_msg,
3818 					    "iotag_flush: iotag=%d sbp=%p "
3819 					    "xrip=NULL", iotag, sbp);
3820 				}
3821 
3822 				emlxs_sli4_free_xri(port, sbp, sbp->xrip, 0);
3823 			} else {
3824 				/* Clean up the sbp */
3825 				mutex_enter(&sbp->mtx);
3826 
3827 				if (sbp->pkt_flags & PACKET_IN_TXQ) {
3828 					sbp->pkt_flags &= ~PACKET_IN_TXQ;
3829 					hba->channel_tx_count --;
3830 				}
3831 
3832 				if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
3833 					sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3834 				}
3835 
3836 				if (sbp->bmp) {
3837 					emlxs_mem_put(hba, MEM_BPL,
3838 					    (void *)sbp->bmp);
3839 					sbp->bmp = 0;
3840 				}
3841 
3842 				mutex_exit(&sbp->mtx);
3843 			}
3844 
3845 			/* At this point all nodes are assumed destroyed */
3846 			mutex_enter(&sbp->mtx);
3847 			sbp->node = 0;
3848 			mutex_exit(&sbp->mtx);
3849 
3850 			/* Add this iocb to our local abort Q */
3851 			if (abort.q_first) {
3852 				((IOCBQ *)abort.q_last)->next = iocbq;
3853 				abort.q_last = (uint8_t *)iocbq;
3854 				abort.q_cnt++;
3855 			} else {
3856 				abort.q_first = (uint8_t *)iocbq;
3857 				abort.q_last = (uint8_t *)iocbq;
3858 				abort.q_cnt = 1;
3859 			}
3860 		}
3861 
3862 		mutex_exit(&EMLXS_FCTAB_LOCK);
3863 
3864 		/* Trigger deferred completion */
3865 		if (abort.q_first) {
3866 			mutex_enter(&cp->rsp_lock);
3867 			if (cp->rsp_head == NULL) {
3868 				cp->rsp_head = (IOCBQ *)abort.q_first;
3869 				cp->rsp_tail = (IOCBQ *)abort.q_last;
3870 			} else {
3871 				cp->rsp_tail->next = (IOCBQ *)abort.q_first;
3872 				cp->rsp_tail = (IOCBQ *)abort.q_last;
3873 			}
3874 			mutex_exit(&cp->rsp_lock);
3875 
3876 			emlxs_thread_trigger2(&cp->intr_thread,
3877 			    emlxs_proc_channel, cp);
3878 
3879 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3880 			    "iotag_flush: channel=%d count=%d",
3881 			    channelno, abort.q_cnt);
3882 
3883 			count += abort.q_cnt;
3884 		}
3885 	}
3886 
3887 	return (count);
3888 
3889 } /* emlxs_iotag_flush() */
3890 
3891 
3892 
3893 /* Checks for IO's on all or a given channel for a given node */
3894 extern uint32_t
3895 emlxs_chipq_node_check(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp)
3896 {
3897 	emlxs_hba_t *hba = HBA;
3898 	emlxs_buf_t *sbp;
3899 	CHANNEL *cp;
3900 	uint32_t channelno;
3901 	uint32_t count;
3902 	uint32_t iotag;
3903 
3904 	count = 0;
3905 
3906 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3907 		cp = &hba->chan[channelno];
3908 
3909 		if (chan && cp != chan) {
3910 			continue;
3911 		}
3912 
3913 		mutex_enter(&EMLXS_FCTAB_LOCK);
3914 
3915 		for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3916 			sbp = hba->fc_table[iotag];
3917 
3918 			if (sbp && (sbp != STALE_PACKET) &&
3919 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3920 			    (sbp->node == ndlp) &&
3921 			    (sbp->channel == cp) &&
3922 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3923 				count++;
3924 			}
3925 
3926 		}
3927 		mutex_exit(&EMLXS_FCTAB_LOCK);
3928 
3929 	}	/* for */
3930 
3931 	return (count);
3932 
3933 } /* emlxs_chipq_node_check() */
3934 
3935 
3936 
3937 /* Flush all IO's for a given node's lun (on any channel) */
3938 extern uint32_t
3939 emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp,
3940     uint32_t lun, emlxs_buf_t *fpkt)
3941 {
3942 	emlxs_hba_t *hba = HBA;
3943 	emlxs_buf_t *sbp;
3944 	IOCBQ *iocbq;
3945 	IOCBQ *next;
3946 	Q abort;
3947 	uint32_t iotag;
3948 	uint8_t flag[MAX_CHANNEL];
3949 	uint32_t channelno;
3950 
3951 	if (lun == EMLXS_LUN_NONE) {
3952 		return (0);
3953 	}
3954 
3955 	bzero((void *)flag, sizeof (flag));
3956 	bzero((void *)&abort, sizeof (Q));
3957 
3958 	mutex_enter(&EMLXS_FCTAB_LOCK);
3959 	for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3960 		sbp = hba->fc_table[iotag];
3961 
3962 		if (sbp && (sbp != STALE_PACKET) &&
3963 		    sbp->pkt_flags & PACKET_IN_CHIPQ &&
3964 		    sbp->node == ndlp &&
3965 		    sbp->lun == lun &&
3966 		    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3967 			emlxs_sbp_abort_add(port, sbp,
3968 			    &abort, flag, fpkt);
3969 		}
3970 	}
3971 	mutex_exit(&EMLXS_FCTAB_LOCK);
3972 
3973 	/* Now put the iocb's on the tx queue */
3974 	iocbq = (IOCBQ *)abort.q_first;
3975 	while (iocbq) {
3976 		/* Save the next iocbq for now */
3977 		next = (IOCBQ *)iocbq->next;
3978 
3979 		/* Unlink this iocbq */
3980 		iocbq->next = NULL;
3981 
3982 		/* Send this iocbq */
3983 		emlxs_tx_put(iocbq, 1);
3984 
3985 		iocbq = next;
3986 	}
3987 
3988 	/* Now trigger channel service */
3989 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3990 		if (!flag[channelno]) {
3991 			continue;
3992 		}
3993 
3994 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3995 	}
3996 
3997 	return (abort.q_cnt);
3998 
3999 } /* emlxs_chipq_lun_flush() */
4000 
4001 
4002 
4003 /*
4004  * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
4005  * This must be called while holding the EMLXS_FCTAB_LOCK
4006  */
4007 extern IOCBQ *
4008 emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
4009     uint16_t iotag, CHANNEL *cp, uint8_t class, int32_t flag)
4010 {
4011 	emlxs_hba_t *hba = HBA;
4012 	IOCBQ *iocbq;
4013 	IOCB *iocb;
4014 	emlxs_wqe_t *wqe;
4015 	emlxs_buf_t *sbp;
4016 	uint16_t abort_iotag;
4017 
4018 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4019 		return (NULL);
4020 	}
4021 
4022 	iocbq->channel = (void *)cp;
4023 	iocbq->port = (void *)port;
4024 	iocbq->node = (void *)ndlp;
4025 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4026 
4027 	/*
4028 	 * set up an iotag using special Abort iotags
4029 	 */
4030 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4031 		hba->fc_oor_iotag = hba->max_iotag;
4032 	}
4033 	abort_iotag = hba->fc_oor_iotag++;
4034 
4035 
4036 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4037 		wqe = &iocbq->wqe;
4038 		sbp = hba->fc_table[iotag];
4039 
4040 		/* Try to issue abort by XRI if possible */
4041 		if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
4042 			wqe->un.Abort.Criteria = ABORT_REQ_TAG;
4043 			wqe->AbortTag = iotag;
4044 		} else {
4045 			wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4046 			wqe->AbortTag = sbp->xrip->XRI;
4047 		}
4048 		wqe->un.Abort.IA = 0;
4049 		wqe->RequestTag = abort_iotag;
4050 		wqe->Command = CMD_ABORT_XRI_CX;
4051 		wqe->Class = CLASS3;
4052 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4053 		wqe->CmdType = WQE_TYPE_ABORT;
4054 	} else {
4055 		iocb = &iocbq->iocb;
4056 		iocb->ULPIOTAG = abort_iotag;
4057 		iocb->un.acxri.abortType = flag;
4058 		iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
4059 		iocb->un.acxri.abortIoTag = iotag;
4060 		iocb->ULPLE = 1;
4061 		iocb->ULPCLASS = class;
4062 		iocb->ULPCOMMAND = CMD_ABORT_XRI_CN;
4063 		iocb->ULPOWNER = OWN_CHIP;
4064 	}
4065 
4066 	return (iocbq);
4067 
4068 } /* emlxs_create_abort_xri_cn() */
4069 
4070 
4071 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4072 extern IOCBQ *
4073 emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
4074     CHANNEL *cp, uint8_t class, int32_t flag)
4075 {
4076 	emlxs_hba_t *hba = HBA;
4077 	IOCBQ *iocbq;
4078 	IOCB *iocb;
4079 	emlxs_wqe_t *wqe;
4080 	uint16_t abort_iotag;
4081 
4082 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4083 		return (NULL);
4084 	}
4085 
4086 	iocbq->channel = (void *)cp;
4087 	iocbq->port = (void *)port;
4088 	iocbq->node = (void *)ndlp;
4089 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4090 
4091 	/*
4092 	 * set up an iotag using special Abort iotags
4093 	 */
4094 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4095 		hba->fc_oor_iotag = hba->max_iotag;
4096 	}
4097 	abort_iotag = hba->fc_oor_iotag++;
4098 
4099 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4100 		wqe = &iocbq->wqe;
4101 		wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4102 		wqe->un.Abort.IA = 0;
4103 		wqe->RequestTag = abort_iotag;
4104 		wqe->AbortTag = xid;
4105 		wqe->Command = CMD_ABORT_XRI_CX;
4106 		wqe->Class = CLASS3;
4107 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4108 		wqe->CmdType = WQE_TYPE_ABORT;
4109 	} else {
4110 		iocb = &iocbq->iocb;
4111 		iocb->ULPCONTEXT = xid;
4112 		iocb->ULPIOTAG = abort_iotag;
4113 		iocb->un.acxri.abortType = flag;
4114 		iocb->ULPLE = 1;
4115 		iocb->ULPCLASS = class;
4116 		iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
4117 		iocb->ULPOWNER = OWN_CHIP;
4118 	}
4119 
4120 	return (iocbq);
4121 
4122 } /* emlxs_create_abort_xri_cx() */
4123 
4124 
4125 
4126 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4127 extern IOCBQ *
4128 emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
4129     uint16_t iotag, CHANNEL *cp)
4130 {
4131 	emlxs_hba_t *hba = HBA;
4132 	IOCBQ *iocbq;
4133 	IOCB *iocb;
4134 	emlxs_wqe_t *wqe;
4135 	emlxs_buf_t *sbp;
4136 	uint16_t abort_iotag;
4137 
4138 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4139 		return (NULL);
4140 	}
4141 
4142 	iocbq->channel = (void *)cp;
4143 	iocbq->port = (void *)port;
4144 	iocbq->node = (void *)ndlp;
4145 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4146 
4147 	/*
4148 	 * set up an iotag using special Abort iotags
4149 	 */
4150 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4151 		hba->fc_oor_iotag = hba->max_iotag;
4152 	}
4153 	abort_iotag = hba->fc_oor_iotag++;
4154 
4155 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4156 		wqe = &iocbq->wqe;
4157 		sbp = hba->fc_table[iotag];
4158 
4159 		/* Try to issue close by XRI if possible */
4160 		if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
4161 			wqe->un.Abort.Criteria = ABORT_REQ_TAG;
4162 			wqe->AbortTag = iotag;
4163 		} else {
4164 			wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4165 			wqe->AbortTag = sbp->xrip->XRI;
4166 		}
4167 		wqe->un.Abort.IA = 1;
4168 		wqe->RequestTag = abort_iotag;
4169 		wqe->Command = CMD_ABORT_XRI_CX;
4170 		wqe->Class = CLASS3;
4171 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4172 		wqe->CmdType = WQE_TYPE_ABORT;
4173 	} else {
4174 		iocb = &iocbq->iocb;
4175 		iocb->ULPIOTAG = abort_iotag;
4176 		iocb->un.acxri.abortType = 0;
4177 		iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
4178 		iocb->un.acxri.abortIoTag = iotag;
4179 		iocb->ULPLE = 1;
4180 		iocb->ULPCLASS = 0;
4181 		iocb->ULPCOMMAND = CMD_CLOSE_XRI_CN;
4182 		iocb->ULPOWNER = OWN_CHIP;
4183 	}
4184 
4185 	return (iocbq);
4186 
4187 } /* emlxs_create_close_xri_cn() */
4188 
4189 
4190 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4191 extern IOCBQ *
4192 emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
4193     CHANNEL *cp)
4194 {
4195 	emlxs_hba_t *hba = HBA;
4196 	IOCBQ *iocbq;
4197 	IOCB *iocb;
4198 	emlxs_wqe_t *wqe;
4199 	uint16_t abort_iotag;
4200 
4201 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4202 		return (NULL);
4203 	}
4204 
4205 	iocbq->channel = (void *)cp;
4206 	iocbq->port = (void *)port;
4207 	iocbq->node = (void *)ndlp;
4208 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4209 
4210 	/*
4211 	 * set up an iotag using special Abort iotags
4212 	 */
4213 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4214 		hba->fc_oor_iotag = hba->max_iotag;
4215 	}
4216 	abort_iotag = hba->fc_oor_iotag++;
4217 
4218 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4219 		wqe = &iocbq->wqe;
4220 		wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4221 		wqe->un.Abort.IA = 1;
4222 		wqe->RequestTag = abort_iotag;
4223 		wqe->AbortTag = xid;
4224 		wqe->Command = CMD_ABORT_XRI_CX;
4225 		wqe->Class = CLASS3;
4226 		wqe->CQId = (uint16_t)0xffff;  /* default CQ for response */
4227 		wqe->CmdType = WQE_TYPE_ABORT;
4228 	} else {
4229 		iocb = &iocbq->iocb;
4230 		iocb->ULPCONTEXT = xid;
4231 		iocb->ULPIOTAG = abort_iotag;
4232 		iocb->ULPLE = 1;
4233 		iocb->ULPCLASS = 0;
4234 		iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
4235 		iocb->ULPOWNER = OWN_CHIP;
4236 	}
4237 
4238 	return (iocbq);
4239 
4240 } /* emlxs_create_close_xri_cx() */
4241 
4242 
4243 void
4244 emlxs_close_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4245 {
4246 	CHANNEL *cp;
4247 	IOCBQ *iocbq;
4248 	IOCB *iocb;
4249 
4250 	if (rxid == 0 || rxid == 0xFFFF) {
4251 		return;
4252 	}
4253 
4254 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4255 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4256 		    "Closing ELS exchange: xid=%x", rxid);
4257 
4258 		if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4259 			return;
4260 		}
4261 	}
4262 
4263 	cp = &hba->chan[hba->channel_els];
4264 
4265 	mutex_enter(&EMLXS_FCTAB_LOCK);
4266 
4267 	/* Create the abort IOCB */
4268 	iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4269 
4270 	mutex_exit(&EMLXS_FCTAB_LOCK);
4271 
4272 	if (iocbq) {
4273 		iocb = &iocbq->iocb;
4274 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4275 		    "Closing ELS exchange: xid=%x iotag=%d", rxid,
4276 		    iocb->ULPIOTAG);
4277 
4278 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4279 	}
4280 
4281 } /* emlxs_close_els_exchange() */
4282 
4283 
4284 void
4285 emlxs_abort_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4286 {
4287 	CHANNEL *cp;
4288 	IOCBQ *iocbq;
4289 	IOCB *iocb;
4290 
4291 	if (rxid == 0 || rxid == 0xFFFF) {
4292 		return;
4293 	}
4294 
4295 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4296 
4297 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4298 		    "Aborting ELS exchange: xid=%x", rxid);
4299 
4300 		if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4301 			/* We have no way to abort unsolicited exchanges */
4302 			/* that we have not responded to at this time */
4303 			/* So we will return for now */
4304 			return;
4305 		}
4306 	}
4307 
4308 	cp = &hba->chan[hba->channel_els];
4309 
4310 	mutex_enter(&EMLXS_FCTAB_LOCK);
4311 
4312 	/* Create the abort IOCB */
4313 	if (hba->state >= FC_LINK_UP) {
4314 		iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4315 		    CLASS3, ABORT_TYPE_ABTS);
4316 	} else {
4317 		iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4318 	}
4319 
4320 	mutex_exit(&EMLXS_FCTAB_LOCK);
4321 
4322 	if (iocbq) {
4323 		iocb = &iocbq->iocb;
4324 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4325 		    "Aborting ELS exchange: xid=%x iotag=%d", rxid,
4326 		    iocb->ULPIOTAG);
4327 
4328 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4329 	}
4330 
4331 } /* emlxs_abort_els_exchange() */
4332 
4333 
4334 void
4335 emlxs_abort_ct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4336 {
4337 	CHANNEL *cp;
4338 	IOCBQ *iocbq;
4339 	IOCB *iocb;
4340 
4341 	if (rxid == 0 || rxid == 0xFFFF) {
4342 		return;
4343 	}
4344 
4345 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4346 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_ct_msg,
4347 		    "Aborting CT exchange: xid=%x", rxid);
4348 
4349 		if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4350 			/* We have no way to abort unsolicited exchanges */
4351 			/* that we have not responded to at this time */
4352 			/* So we will return for now */
4353 			return;
4354 		}
4355 	}
4356 
4357 	cp = &hba->chan[hba->channel_ct];
4358 
4359 	mutex_enter(&EMLXS_FCTAB_LOCK);
4360 
4361 	/* Create the abort IOCB */
4362 	if (hba->state >= FC_LINK_UP) {
4363 		iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4364 		    CLASS3, ABORT_TYPE_ABTS);
4365 	} else {
4366 		iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4367 	}
4368 
4369 	mutex_exit(&EMLXS_FCTAB_LOCK);
4370 
4371 	if (iocbq) {
4372 		iocb = &iocbq->iocb;
4373 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4374 		    "Aborting CT exchange: xid=%x iotag=%d", rxid,
4375 		    iocb->ULPIOTAG);
4376 
4377 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4378 	}
4379 
4380 } /* emlxs_abort_ct_exchange() */
4381 
4382 
4383 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4384 static void
4385 emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
4386     uint8_t *flag, emlxs_buf_t *fpkt)
4387 {
4388 	emlxs_hba_t *hba = HBA;
4389 	IOCBQ *iocbq;
4390 	CHANNEL *cp;
4391 	NODELIST *ndlp;
4392 
4393 	cp = (CHANNEL *)sbp->channel;
4394 	ndlp = sbp->node;
4395 
4396 	/* Create the close XRI IOCB */
4397 	if (hba->state >= FC_LINK_UP) {
4398 		iocbq = emlxs_create_abort_xri_cn(port, ndlp, sbp->iotag, cp,
4399 		    CLASS3, ABORT_TYPE_ABTS);
4400 	} else {
4401 		iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, cp);
4402 	}
4403 	/*
4404 	 * Add this iocb to our local abort Q
4405 	 * This way we don't hold the CHIPQ lock too long
4406 	 */
4407 	if (iocbq) {
4408 		if (abort->q_first) {
4409 			((IOCBQ *)abort->q_last)->next = iocbq;
4410 			abort->q_last = (uint8_t *)iocbq;
4411 			abort->q_cnt++;
4412 		} else {
4413 			abort->q_first = (uint8_t *)iocbq;
4414 			abort->q_last = (uint8_t *)iocbq;
4415 			abort->q_cnt = 1;
4416 		}
4417 		iocbq->next = NULL;
4418 	}
4419 
4420 	/* set the flags */
4421 	mutex_enter(&sbp->mtx);
4422 
4423 	sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
4424 
4425 	sbp->ticks = hba->timer_tics + 10;
4426 	sbp->abort_attempts++;
4427 
4428 	flag[cp->channelno] = 1;
4429 
4430 	/*
4431 	 * If the fpkt is already set, then we will leave it alone
4432 	 * This ensures that this pkt is only accounted for on one
4433 	 * fpkt->flush_count
4434 	 */
4435 	if (!sbp->fpkt && fpkt) {
4436 		mutex_enter(&fpkt->mtx);
4437 		sbp->fpkt = fpkt;
4438 		fpkt->flush_count++;
4439 		mutex_exit(&fpkt->mtx);
4440 	}
4441 
4442 	mutex_exit(&sbp->mtx);
4443 
4444 	return;
4445 
4446 }	/* emlxs_sbp_abort_add() */
4447