1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2011 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_FCP_C);
32 
33 #define	EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
34 	PADDR(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
35 
36 static void	emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp,
37     Q *abort, uint8_t *flag, emlxs_buf_t *fpkt);
38 
39 #define	SCSI3_PERSISTENT_RESERVE_IN	0x5e
40 #define	SCSI_INQUIRY			0x12
41 #define	SCSI_RX_DIAG    		0x1C
42 
43 
44 /*
45  *  emlxs_handle_fcp_event
46  *
47  *  Description: Process an FCP Rsp Ring completion
48  *
49  */
50 /* ARGSUSED */
51 extern void
52 emlxs_handle_fcp_event(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
53 {
54 	emlxs_port_t *port = &PPORT;
55 	emlxs_config_t	*cfg = &CFG;
56 	IOCB *cmd;
57 	emlxs_buf_t *sbp;
58 	fc_packet_t *pkt = NULL;
59 #ifdef SAN_DIAG_SUPPORT
60 	NODELIST *ndlp;
61 #endif
62 	uint32_t iostat;
63 	uint8_t localstat;
64 	fcp_rsp_t *rsp;
65 	uint32_t rsp_data_resid;
66 	uint32_t check_underrun;
67 	uint8_t asc;
68 	uint8_t ascq;
69 	uint8_t scsi_status;
70 	uint8_t sense;
71 	uint32_t did;
72 	uint32_t fix_it;
73 	uint8_t *scsi_cmd;
74 	uint8_t scsi_opcode;
75 	uint16_t scsi_dl;
76 	uint32_t data_rx;
77 
78 	cmd = &iocbq->iocb;
79 
80 	/* Initialize the status */
81 	iostat = cmd->ULPSTATUS;
82 	localstat = 0;
83 	scsi_status = 0;
84 	asc = 0;
85 	ascq = 0;
86 	sense = 0;
87 	check_underrun = 0;
88 	fix_it = 0;
89 
90 	HBASTATS.FcpEvent++;
91 
92 	sbp = (emlxs_buf_t *)iocbq->sbp;
93 
94 	if (!sbp) {
95 		/* completion with missing xmit command */
96 		HBASTATS.FcpStray++;
97 
98 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
99 		    "cmd=%x iotag=%x", cmd->ULPCOMMAND, cmd->ULPIOTAG);
100 
101 		return;
102 	}
103 
104 	HBASTATS.FcpCompleted++;
105 
106 #ifdef SAN_DIAG_SUPPORT
107 	emlxs_update_sd_bucket(sbp);
108 #endif /* SAN_DIAG_SUPPORT */
109 
110 	pkt = PRIV2PKT(sbp);
111 
112 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
113 	scsi_cmd = (uint8_t *)pkt->pkt_cmd;
114 	scsi_opcode = scsi_cmd[12];
115 	data_rx = 0;
116 
117 	/* Sync data in data buffer only on FC_PKT_FCP_READ */
118 	if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
119 		EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
120 		    DDI_DMA_SYNC_FORKERNEL);
121 
122 #ifdef TEST_SUPPORT
123 		if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
124 		    (pkt->pkt_datalen >= 512)) {
125 			hba->underrun_counter--;
126 			iostat = IOSTAT_FCP_RSP_ERROR;
127 
128 			/* Report 512 bytes missing by adapter */
129 			cmd->un.fcpi.fcpi_parm = pkt->pkt_datalen - 512;
130 
131 			/* Corrupt 512 bytes of Data buffer */
132 			bzero((uint8_t *)pkt->pkt_data, 512);
133 
134 			/* Set FCP response to STATUS_GOOD */
135 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
136 		}
137 #endif /* TEST_SUPPORT */
138 	}
139 
140 	/* Process the pkt */
141 	mutex_enter(&sbp->mtx);
142 
143 	/* Check for immediate return */
144 	if ((iostat == IOSTAT_SUCCESS) &&
145 	    (pkt->pkt_comp) &&
146 	    !(sbp->pkt_flags &
147 	    (PACKET_ULP_OWNED | PACKET_COMPLETED |
148 	    PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
149 	    PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
150 	    PACKET_IN_ABORT | PACKET_POLLED))) {
151 		HBASTATS.FcpGood++;
152 
153 		sbp->pkt_flags |=
154 		    (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
155 		    PACKET_COMPLETED | PACKET_ULP_OWNED);
156 		mutex_exit(&sbp->mtx);
157 
158 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
159 		emlxs_unswap_pkt(sbp);
160 #endif /* EMLXS_MODREV2X */
161 
162 #ifdef FMA_SUPPORT
163 		emlxs_check_dma(hba, sbp);
164 #endif  /* FMA_SUPPORT */
165 
166 		cp->ulpCmplCmd++;
167 		(*pkt->pkt_comp) (pkt);
168 
169 #ifdef FMA_SUPPORT
170 		if (hba->flag & FC_DMA_CHECK_ERROR) {
171 			emlxs_thread_spawn(hba, emlxs_restart_thread,
172 			    NULL, NULL);
173 		}
174 #endif  /* FMA_SUPPORT */
175 
176 		return;
177 	}
178 
179 	/*
180 	 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
181 	 * is reported.
182 	 */
183 
184 	/* Check if a response buffer was provided */
185 	if ((iostat == IOSTAT_FCP_RSP_ERROR) && pkt->pkt_rsplen) {
186 		EMLXS_MPDATA_SYNC(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
187 		    DDI_DMA_SYNC_FORKERNEL);
188 
189 		/* Get the response buffer pointer */
190 		rsp = (fcp_rsp_t *)pkt->pkt_resp;
191 
192 		/* Set the valid response flag */
193 		sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
194 
195 		scsi_status = rsp->fcp_u.fcp_status.scsi_status;
196 
197 #ifdef SAN_DIAG_SUPPORT
198 		ndlp = (NODELIST *)iocbq->node;
199 		if (scsi_status == SCSI_STAT_QUE_FULL) {
200 			emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_QFULL,
201 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
202 		} else if (scsi_status == SCSI_STAT_BUSY) {
203 			emlxs_log_sd_scsi_event(port,
204 			    SD_SCSI_SUBCATEGORY_DEVBSY,
205 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
206 		}
207 #endif
208 
209 		/*
210 		 * Convert a task abort to a check condition with no data
211 		 * transferred. We saw a data corruption when Solaris received
212 		 * a Task Abort from a tape.
213 		 */
214 
215 		if (scsi_status == SCSI_STAT_TASK_ABORT) {
216 			EMLXS_MSGF(EMLXS_CONTEXT,
217 			    &emlxs_fcp_completion_error_msg,
218 			    "Task Abort. "
219 			    "Fixed.did=0x%06x sbp=%p cmd=%02x dl=%d",
220 			    did, sbp, scsi_opcode, pkt->pkt_datalen);
221 
222 			rsp->fcp_u.fcp_status.scsi_status =
223 			    SCSI_STAT_CHECK_COND;
224 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
225 			rsp->fcp_u.fcp_status.sense_len_set = 0;
226 			rsp->fcp_u.fcp_status.resid_over = 0;
227 
228 			if (pkt->pkt_datalen) {
229 				rsp->fcp_u.fcp_status.resid_under = 1;
230 				rsp->fcp_resid =
231 				    LE_SWAP32(pkt->pkt_datalen);
232 			} else {
233 				rsp->fcp_u.fcp_status.resid_under = 0;
234 				rsp->fcp_resid = 0;
235 			}
236 
237 			scsi_status = SCSI_STAT_CHECK_COND;
238 		}
239 
240 		/*
241 		 * We only need to check underrun if data could
242 		 * have been sent
243 		 */
244 
245 		/* Always check underrun if status is good */
246 		if (scsi_status == SCSI_STAT_GOOD) {
247 			check_underrun = 1;
248 		}
249 		/* Check the sense codes if this is a check condition */
250 		else if (scsi_status == SCSI_STAT_CHECK_COND) {
251 			check_underrun = 1;
252 
253 			/* Check if sense data was provided */
254 			if (LE_SWAP32(rsp->fcp_sense_len) >= 14) {
255 				sense = *((uint8_t *)rsp + 32 + 2);
256 				asc = *((uint8_t *)rsp + 32 + 12);
257 				ascq = *((uint8_t *)rsp + 32 + 13);
258 			}
259 
260 #ifdef SAN_DIAG_SUPPORT
261 			emlxs_log_sd_scsi_check_event(port,
262 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
263 			    scsi_opcode, sense, asc, ascq);
264 #endif
265 		}
266 		/* Status is not good and this is not a check condition */
267 		/* No data should have been sent */
268 		else {
269 			check_underrun = 0;
270 		}
271 
272 		/* Get the residual underrun count reported by the SCSI reply */
273 		rsp_data_resid = (pkt->pkt_datalen &&
274 		    rsp->fcp_u.fcp_status.resid_under) ? LE_SWAP32(rsp->
275 		    fcp_resid) : 0;
276 
277 		/* Set the pkt resp_resid field */
278 		pkt->pkt_resp_resid = 0;
279 
280 		/* Set the pkt data_resid field */
281 		if (pkt->pkt_datalen &&
282 		    (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
283 			/*
284 			 * Get the residual underrun count reported by
285 			 * our adapter
286 			 */
287 			pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
288 
289 #ifdef SAN_DIAG_SUPPORT
290 			if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
291 				emlxs_log_sd_fc_rdchk_event(port,
292 				    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
293 				    scsi_opcode, pkt->pkt_data_resid);
294 			}
295 #endif
296 
297 			/* Get the actual amount of data transferred */
298 			data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
299 
300 			/*
301 			 * If the residual being reported by the adapter is
302 			 * greater than the residual being reported in the
303 			 * reply, then we have a true underrun.
304 			 */
305 			if (check_underrun &&
306 			    (pkt->pkt_data_resid > rsp_data_resid)) {
307 				switch (scsi_opcode) {
308 				case SCSI_INQUIRY:
309 					scsi_dl = scsi_cmd[16];
310 					break;
311 
312 				case SCSI_RX_DIAG:
313 					scsi_dl =
314 					    (scsi_cmd[15] * 0x100) +
315 					    scsi_cmd[16];
316 					break;
317 
318 				default:
319 					scsi_dl = pkt->pkt_datalen;
320 				}
321 
322 #ifdef FCP_UNDERRUN_PATCH1
323 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
324 				/*
325 				 * If status is not good and no data was
326 				 * actually transferred, then we must fix
327 				 * the issue
328 				 */
329 				if ((scsi_status != SCSI_STAT_GOOD) &&
330 				    (data_rx == 0)) {
331 					fix_it = 1;
332 
333 					EMLXS_MSGF(EMLXS_CONTEXT,
334 					    &emlxs_fcp_completion_error_msg,
335 					    "Underrun(1). Fixed. "
336 					    "did=0x%06x sbp=%p cmd=%02x "
337 					    "dl=%d,%d rx=%d rsp=%d",
338 					    did, sbp, scsi_opcode,
339 					    pkt->pkt_datalen, scsi_dl,
340 					    (pkt->pkt_datalen -
341 					    cmd->un.fcpi.fcpi_parm),
342 					    rsp_data_resid);
343 
344 				}
345 }
346 #endif /* FCP_UNDERRUN_PATCH1 */
347 
348 
349 #ifdef FCP_UNDERRUN_PATCH2
350 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH2) {
351 				if ((scsi_status == SCSI_STAT_GOOD)) {
352 					emlxs_msg_t	*msg;
353 
354 					msg = &emlxs_fcp_completion_error_msg;
355 					/*
356 					 * If status is good and this is an
357 					 * inquiry request and the amount of
358 					 * data
359 					 */
360 					/*
361 					 * requested <= data received, then we
362 					 * must fix the issue.
363 					 */
364 
365 					if ((scsi_opcode == SCSI_INQUIRY) &&
366 					    (pkt->pkt_datalen >= data_rx) &&
367 					    (scsi_dl <= data_rx)) {
368 						fix_it = 1;
369 
370 						EMLXS_MSGF(EMLXS_CONTEXT, msg,
371 						    "Underrun(2). Fixed. "
372 						    "did=0x%06x sbp=%p "
373 						    "cmd=%02x dl=%d,%d "
374 						    "rx=%d rsp=%d",
375 						    did, sbp, scsi_opcode,
376 						    pkt->pkt_datalen, scsi_dl,
377 						    data_rx, rsp_data_resid);
378 
379 					}
380 
381 					/*
382 					 * If status is good and this is an
383 					 * inquiry request and the amount of
384 					 * data requested >= 128 bytes, but
385 					 * only 128 bytes were received,
386 					 * then we must fix the issue.
387 					 */
388 					else if ((scsi_opcode ==
389 					    SCSI_INQUIRY) &&
390 					    (pkt->pkt_datalen >= 128) &&
391 					    (scsi_dl >= 128) &&
392 					    (data_rx == 128)) {
393 						fix_it = 1;
394 
395 						EMLXS_MSGF(EMLXS_CONTEXT, msg,
396 						    "Underrun(3). Fixed. "
397 						    "did=0x%06x sbp=%p "
398 						    "cmd=%02x dl=%d,%d "
399 						    "rx=%d rsp=%d",
400 						    did, sbp, scsi_opcode,
401 						    pkt->pkt_datalen, scsi_dl,
402 						    data_rx, rsp_data_resid);
403 
404 					}
405 
406 				}
407 }
408 #endif /* FCP_UNDERRUN_PATCH2 */
409 
410 				/*
411 				 * Check if SCSI response payload should be
412 				 * fixed or if a DATA_UNDERRUN should be
413 				 * reported
414 				 */
415 				if (fix_it) {
416 					/*
417 					 * Fix the SCSI response payload itself
418 					 */
419 					rsp->fcp_u.fcp_status.resid_under = 1;
420 					rsp->fcp_resid =
421 					    LE_SWAP32(pkt->pkt_data_resid);
422 				} else {
423 					/*
424 					 * Change the status from
425 					 * IOSTAT_FCP_RSP_ERROR to
426 					 * IOSTAT_DATA_UNDERRUN
427 					 */
428 					iostat = IOSTAT_DATA_UNDERRUN;
429 					pkt->pkt_data_resid =
430 					    pkt->pkt_datalen;
431 				}
432 			}
433 
434 			/*
435 			 * If the residual being reported by the adapter is
436 			 * less than the residual being reported in the reply,
437 			 * then we have a true overrun. Since we don't know
438 			 * where the extra data came from or went to then we
439 			 * cannot trust anything we received
440 			 */
441 			else if (rsp_data_resid > pkt->pkt_data_resid) {
442 				/*
443 				 * Change the status from
444 				 * IOSTAT_FCP_RSP_ERROR to
445 				 * IOSTAT_DATA_OVERRUN
446 				 */
447 				iostat = IOSTAT_DATA_OVERRUN;
448 				pkt->pkt_data_resid = pkt->pkt_datalen;
449 			}
450 		} else {	/* pkt->pkt_datalen==0 or FC_PKT_FCP_WRITE */
451 
452 			/* Report whatever the target reported */
453 			pkt->pkt_data_resid = rsp_data_resid;
454 		}
455 	}
456 
457 	/* Print completion message */
458 	switch (iostat) {
459 	case IOSTAT_SUCCESS:
460 		/* Build SCSI GOOD status */
461 		if (pkt->pkt_rsplen) {
462 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
463 		}
464 		break;
465 
466 	case IOSTAT_FCP_RSP_ERROR:
467 		break;
468 
469 	case IOSTAT_REMOTE_STOP:
470 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
471 		    "Remote Stop. did=0x%06x sbp=%p cmd=%02x", did, sbp,
472 		    scsi_opcode);
473 		break;
474 
475 	case IOSTAT_LOCAL_REJECT:
476 		localstat = cmd->un.grsp.perr.statLocalError;
477 
478 		switch (localstat) {
479 		case IOERR_SEQUENCE_TIMEOUT:
480 			EMLXS_MSGF(EMLXS_CONTEXT,
481 			    &emlxs_fcp_completion_error_msg,
482 			    "Local reject. "
483 			    "%s did=0x%06x sbp=%p cmd=%02x tmo=%d ",
484 			    emlxs_error_xlate(localstat), did, sbp,
485 			    scsi_opcode, pkt->pkt_timeout);
486 			break;
487 
488 		default:
489 			EMLXS_MSGF(EMLXS_CONTEXT,
490 			    &emlxs_fcp_completion_error_msg,
491 			    "Local reject. %s 0x%06x %p %02x (%x)(%x)",
492 			    emlxs_error_xlate(localstat), did, sbp,
493 			    scsi_opcode, (uint16_t)cmd->ULPIOTAG,
494 			    (uint16_t)cmd->ULPCONTEXT);
495 		}
496 
497 		break;
498 
499 	case IOSTAT_NPORT_RJT:
500 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
501 		    "Nport reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
502 		    scsi_opcode);
503 		break;
504 
505 	case IOSTAT_FABRIC_RJT:
506 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
507 		    "Fabric reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
508 		    scsi_opcode);
509 		break;
510 
511 	case IOSTAT_NPORT_BSY:
512 #ifdef SAN_DIAG_SUPPORT
513 		ndlp = (NODELIST *)iocbq->node;
514 		emlxs_log_sd_fc_bsy_event(port, (HBA_WWN *)&ndlp->nlp_portname);
515 #endif
516 
517 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
518 		    "Nport busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
519 		    scsi_opcode);
520 		break;
521 
522 	case IOSTAT_FABRIC_BSY:
523 #ifdef SAN_DIAG_SUPPORT
524 		ndlp = (NODELIST *)iocbq->node;
525 		emlxs_log_sd_fc_bsy_event(port, NULL);
526 #endif
527 
528 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
529 		    "Fabric busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
530 		    scsi_opcode);
531 		break;
532 
533 	case IOSTAT_INTERMED_RSP:
534 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
535 		    "Intermediate response. did=0x%06x sbp=%p cmd=%02x", did,
536 		    sbp, scsi_opcode);
537 		break;
538 
539 	case IOSTAT_LS_RJT:
540 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
541 		    "LS Reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
542 		    scsi_opcode);
543 		break;
544 
545 	case IOSTAT_DATA_UNDERRUN:
546 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
547 		    "Underrun. did=0x%06x sbp=%p cmd=%02x "
548 		    "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
549 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
550 		    rsp_data_resid, scsi_status, sense, asc, ascq);
551 		break;
552 
553 	case IOSTAT_DATA_OVERRUN:
554 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
555 		    "Overrun. did=0x%06x sbp=%p cmd=%02x "
556 		    "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
557 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
558 		    rsp_data_resid, scsi_status, sense, asc, ascq);
559 		break;
560 
561 	default:
562 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
563 		    "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
564 		    iostat, cmd->un.grsp.perr.statLocalError, did, sbp,
565 		    scsi_opcode);
566 		break;
567 	}
568 
569 done:
570 
571 	if (iostat == IOSTAT_SUCCESS) {
572 		HBASTATS.FcpGood++;
573 	} else {
574 		HBASTATS.FcpError++;
575 	}
576 
577 	mutex_exit(&sbp->mtx);
578 
579 	emlxs_pkt_complete(sbp, iostat, localstat, 0);
580 
581 	return;
582 
583 } /* emlxs_handle_fcp_event() */
584 
585 
586 
587 /*
588  *  emlxs_post_buffer
589  *
590  *  This routine will post count buffers to the
591  *  ring with the QUE_RING_BUF_CN command. This
592  *  allows 2 buffers / command to be posted.
593  *  Returns the number of buffers NOT posted.
594  */
595 /* SLI3 */
596 extern int
597 emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
598 {
599 	emlxs_port_t *port = &PPORT;
600 	IOCB *icmd;
601 	IOCBQ *iocbq;
602 	MATCHMAP *mp;
603 	uint16_t tag;
604 	uint32_t maxqbuf;
605 	int32_t i;
606 	int32_t j;
607 	uint32_t seg;
608 	uint32_t size;
609 
610 	mp = 0;
611 	maxqbuf = 2;
612 	tag = (uint16_t)cnt;
613 	cnt += rp->fc_missbufcnt;
614 
615 	if (rp->ringno == hba->channel_els) {
616 		seg = MEM_BUF;
617 		size = MEM_ELSBUF_SIZE;
618 	} else if (rp->ringno == hba->channel_ip) {
619 		seg = MEM_IPBUF;
620 		size = MEM_IPBUF_SIZE;
621 	} else if (rp->ringno == hba->channel_ct) {
622 		seg = MEM_CTBUF;
623 		size = MEM_CTBUF_SIZE;
624 	}
625 #ifdef SFCT_SUPPORT
626 	else if (rp->ringno == hba->CHANNEL_FCT) {
627 		seg = MEM_FCTBUF;
628 		size = MEM_FCTBUF_SIZE;
629 	}
630 #endif /* SFCT_SUPPORT */
631 	else {
632 		return (0);
633 	}
634 
635 	/*
636 	 * While there are buffers to post
637 	 */
638 	while (cnt) {
639 		if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == 0) {
640 			rp->fc_missbufcnt = cnt;
641 			return (cnt);
642 		}
643 
644 		iocbq->channel = (void *)&hba->chan[rp->ringno];
645 		iocbq->port = (void *)port;
646 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
647 
648 		icmd = &iocbq->iocb;
649 
650 		/*
651 		 * Max buffers can be posted per command
652 		 */
653 		for (i = 0; i < maxqbuf; i++) {
654 			if (cnt <= 0)
655 				break;
656 
657 			/* fill in BDEs for command */
658 			if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg, 1))
659 			    == 0) {
660 				icmd->ULPBDECOUNT = i;
661 				for (j = 0; j < i; j++) {
662 					mp = EMLXS_GET_VADDR(hba, rp, icmd);
663 					if (mp) {
664 						emlxs_mem_put(hba, seg,
665 						    (void *)mp);
666 					}
667 				}
668 
669 				rp->fc_missbufcnt = cnt + i;
670 
671 				emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
672 
673 				return (cnt + i);
674 			}
675 
676 			/*
677 			 * map that page and save the address pair for lookup
678 			 * later
679 			 */
680 			emlxs_mem_map_vaddr(hba,
681 			    rp,
682 			    mp,
683 			    (uint32_t *)&icmd->un.cont64[i].addrHigh,
684 			    (uint32_t *)&icmd->un.cont64[i].addrLow);
685 
686 			icmd->un.cont64[i].tus.f.bdeSize = size;
687 			icmd->ULPCOMMAND = CMD_QUE_RING_BUF64_CN;
688 
689 			/*
690 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
691 			 *    "UB Post: ring=%d addr=%08x%08x size=%d",
692 			 *    rp->ringno, icmd->un.cont64[i].addrHigh,
693 			 *    icmd->un.cont64[i].addrLow, size);
694 			 */
695 
696 			cnt--;
697 		}
698 
699 		icmd->ULPIOTAG = tag;
700 		icmd->ULPBDECOUNT = i;
701 		icmd->ULPLE = 1;
702 		icmd->ULPOWNER = OWN_CHIP;
703 		/* used for delimiter between commands */
704 		iocbq->bp = (void *)mp;
705 
706 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[rp->ringno], iocbq);
707 	}
708 
709 	rp->fc_missbufcnt = 0;
710 
711 	return (0);
712 
713 } /* emlxs_post_buffer() */
714 
715 
716 extern int
717 emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
718 {
719 	emlxs_hba_t *hba = HBA;
720 	emlxs_config_t *cfg;
721 	NODELIST *nlp;
722 	fc_affected_id_t *aid;
723 	uint32_t mask;
724 	uint32_t aff_d_id;
725 	uint32_t linkdown;
726 	uint32_t vlinkdown;
727 	uint32_t action;
728 	int i;
729 	uint32_t unreg_vpi;
730 	uint32_t update;
731 	uint32_t adisc_support;
732 	uint32_t clear_all;
733 	uint8_t format;
734 
735 	/* Target mode only uses this routine for linkdowns */
736 	if (port->tgt_mode && (scope != 0xffffffff) && (scope != 0xfeffffff)) {
737 		return (0);
738 	}
739 
740 	cfg = &CFG;
741 	aid = (fc_affected_id_t *)&scope;
742 	linkdown = 0;
743 	vlinkdown = 0;
744 	unreg_vpi = 0;
745 	update = 0;
746 	clear_all = 0;
747 
748 	if (!(port->flag & EMLXS_PORT_BOUND)) {
749 		return (0);
750 	}
751 
752 	format = aid->aff_format;
753 
754 	switch (format) {
755 	case 0:	/* Port */
756 		mask = 0x00ffffff;
757 		break;
758 
759 	case 1:	/* Area */
760 		mask = 0x00ffff00;
761 		break;
762 
763 	case 2:	/* Domain */
764 		mask = 0x00ff0000;
765 		break;
766 
767 	case 3:	/* Network */
768 		mask = 0x00000000;
769 		break;
770 
771 	case 0xfd:	/* New fabric */
772 		mask = 0x00000000;
773 		linkdown = 1;
774 		clear_all = 1;
775 		break;
776 
777 #ifdef DHCHAP_SUPPORT
778 	case 0xfe:	/* Virtual link down */
779 		mask = 0x00000000;
780 		vlinkdown = 1;
781 		break;
782 #endif /* DHCHAP_SUPPORT */
783 
784 	case 0xff:	/* link is down */
785 		mask = 0x00000000;
786 		linkdown = 1;
787 		break;
788 
789 	}
790 
791 	aff_d_id = aid->aff_d_id & mask;
792 
793 
794 	/*
795 	 * If link is down then this is a hard shutdown and flush
796 	 * If link not down then this is a soft shutdown and flush
797 	 * (e.g. RSCN)
798 	 */
799 	if (linkdown) {
800 		mutex_enter(&EMLXS_PORT_LOCK);
801 
802 		port->flag &= EMLXS_PORT_LINKDOWN_MASK;
803 
804 		if (port->ulp_statec != FC_STATE_OFFLINE) {
805 			port->ulp_statec = FC_STATE_OFFLINE;
806 			port->prev_did = port->did;
807 			bcopy(&port->fabric_sparam, &port->prev_fabric_sparam,
808 			    sizeof (SERV_PARM));
809 			port->did = 0;
810 			update = 1;
811 		}
812 
813 		mutex_exit(&EMLXS_PORT_LOCK);
814 
815 		/* Tell ULP about it */
816 		if (update) {
817 			if (port->flag & EMLXS_PORT_BOUND) {
818 				if (port->vpi == 0) {
819 					EMLXS_MSGF(EMLXS_CONTEXT,
820 					    &emlxs_link_down_msg, NULL);
821 				}
822 
823 				if (port->ini_mode) {
824 					port->ulp_statec_cb(port->ulp_handle,
825 					    FC_STATE_OFFLINE);
826 				}
827 #ifdef SFCT_SUPPORT
828 				else if (port->tgt_mode) {
829 					emlxs_fct_link_down(port);
830 				}
831 #endif /* SFCT_SUPPORT */
832 
833 			} else {
834 				if (port->vpi == 0) {
835 					EMLXS_MSGF(EMLXS_CONTEXT,
836 					    &emlxs_link_down_msg, "*");
837 				}
838 			}
839 
840 
841 		}
842 
843 		unreg_vpi = 1;
844 
845 #ifdef DHCHAP_SUPPORT
846 		/* Stop authentication with all nodes */
847 		emlxs_dhc_auth_stop(port, NULL);
848 #endif /* DHCHAP_SUPPORT */
849 
850 		/* Flush the base node */
851 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
852 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
853 
854 		/* Flush any pending ub buffers */
855 		emlxs_ub_flush(port);
856 	}
857 #ifdef DHCHAP_SUPPORT
858 	/* virtual link down */
859 	else if (vlinkdown) {
860 		mutex_enter(&EMLXS_PORT_LOCK);
861 
862 		if (port->ulp_statec != FC_STATE_OFFLINE) {
863 			port->ulp_statec = FC_STATE_OFFLINE;
864 			update = 1;
865 		}
866 
867 		mutex_exit(&EMLXS_PORT_LOCK);
868 
869 		/* Tell ULP about it */
870 		if (update) {
871 			if (port->flag & EMLXS_PORT_BOUND) {
872 				if (port->vpi == 0) {
873 					EMLXS_MSGF(EMLXS_CONTEXT,
874 					    &emlxs_link_down_msg,
875 					    "Switch authentication failed.");
876 				}
877 
878 #ifdef SFCT_SUPPORT
879 				if (port->tgt_mode) {
880 					emlxs_fct_link_down(port);
881 
882 				} else if (port->ini_mode) {
883 					port->ulp_statec_cb(port->ulp_handle,
884 					    FC_STATE_OFFLINE);
885 				}
886 #else
887 				port->ulp_statec_cb(port->ulp_handle,
888 				    FC_STATE_OFFLINE);
889 #endif	/* SFCT_SUPPORT */
890 			} else {
891 				if (port->vpi == 0) {
892 					EMLXS_MSGF(EMLXS_CONTEXT,
893 					    &emlxs_link_down_msg,
894 					    "Switch authentication failed. *");
895 				}
896 			}
897 
898 
899 		}
900 
901 		/* Flush the base node */
902 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
903 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
904 	}
905 #endif /* DHCHAP_SUPPORT */
906 
907 	if (port->tgt_mode) {
908 		goto done;
909 	}
910 
911 	/* Set the node tags */
912 	/* We will process all nodes with this tag */
913 	rw_enter(&port->node_rwlock, RW_READER);
914 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
915 		nlp = port->node_table[i];
916 		while (nlp != NULL) {
917 			nlp->nlp_tag = 1;
918 			nlp = nlp->nlp_list_next;
919 		}
920 	}
921 	rw_exit(&port->node_rwlock);
922 
923 	if (!clear_all && (hba->flag & FC_ONLINE_MODE)) {
924 		adisc_support = cfg[CFG_ADISC_SUPPORT].current;
925 	} else {
926 		adisc_support = 0;
927 	}
928 
929 	/* Check ADISC support level */
930 	switch (adisc_support) {
931 	case 0:	/* No support - Flush all IO to all matching nodes */
932 
933 		for (;;) {
934 			/*
935 			 * We need to hold the locks this way because
936 			 * emlxs_mb_unreg_node and the flush routines enter the
937 			 * same locks. Also, when we release the lock the list
938 			 * can change out from under us.
939 			 */
940 
941 			/* Find first node */
942 			rw_enter(&port->node_rwlock, RW_READER);
943 			action = 0;
944 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
945 				nlp = port->node_table[i];
946 				while (nlp != NULL) {
947 					if (!nlp->nlp_tag) {
948 						nlp = nlp->nlp_list_next;
949 						continue;
950 					}
951 					nlp->nlp_tag = 0;
952 
953 					/*
954 					 * Check for any device that matches
955 					 * our mask
956 					 */
957 					if ((nlp->nlp_DID & mask) == aff_d_id) {
958 						if (linkdown) {
959 							action = 1;
960 							break;
961 						} else { /* Must be an RCSN */
962 
963 							action = 2;
964 							break;
965 						}
966 					}
967 					nlp = nlp->nlp_list_next;
968 				}
969 
970 				if (action) {
971 					break;
972 				}
973 			}
974 			rw_exit(&port->node_rwlock);
975 
976 
977 			/* Check if nothing was found */
978 			if (action == 0) {
979 				break;
980 			} else if (action == 1) {
981 				(void) emlxs_mb_unreg_node(port, nlp,
982 				    NULL, NULL, NULL);
983 			} else if (action == 2) {
984 #ifdef DHCHAP_SUPPORT
985 				emlxs_dhc_auth_stop(port, nlp);
986 #endif /* DHCHAP_SUPPORT */
987 
988 				/*
989 				 * Close the node for any further normal IO
990 				 * A PLOGI with reopen the node
991 				 */
992 				emlxs_node_close(port, nlp,
993 				    hba->channel_fcp, 60);
994 				emlxs_node_close(port, nlp,
995 				    hba->channel_ip, 60);
996 
997 				/* Flush tx queue */
998 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
999 
1000 				/* Flush chip queue */
1001 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1002 			}
1003 
1004 		}
1005 
1006 		break;
1007 
1008 	case 1:	/* Partial support - Flush IO for non-FCP2 matching nodes */
1009 
1010 		for (;;) {
1011 
1012 			/*
1013 			 * We need to hold the locks this way because
1014 			 * emlxs_mb_unreg_node and the flush routines enter the
1015 			 * same locks. Also, when we release the lock the list
1016 			 * can change out from under us.
1017 			 */
1018 			rw_enter(&port->node_rwlock, RW_READER);
1019 			action = 0;
1020 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1021 				nlp = port->node_table[i];
1022 				while (nlp != NULL) {
1023 					if (!nlp->nlp_tag) {
1024 						nlp = nlp->nlp_list_next;
1025 						continue;
1026 					}
1027 					nlp->nlp_tag = 0;
1028 
1029 					/*
1030 					 * Check for special FCP2 target device
1031 					 * that matches our mask
1032 					 */
1033 					if ((nlp->nlp_fcp_info &
1034 					    NLP_FCP_TGT_DEVICE) &&
1035 					    (nlp-> nlp_fcp_info &
1036 					    NLP_FCP_2_DEVICE) &&
1037 					    (nlp->nlp_DID & mask) ==
1038 					    aff_d_id) {
1039 						action = 3;
1040 						break;
1041 					}
1042 
1043 					/*
1044 					 * Check for any other device that
1045 					 * matches our mask
1046 					 */
1047 					else if ((nlp->nlp_DID & mask) ==
1048 					    aff_d_id) {
1049 						if (linkdown) {
1050 							action = 1;
1051 							break;
1052 						} else { /* Must be an RSCN */
1053 
1054 							action = 2;
1055 							break;
1056 						}
1057 					}
1058 
1059 					nlp = nlp->nlp_list_next;
1060 				}
1061 
1062 				if (action) {
1063 					break;
1064 				}
1065 			}
1066 			rw_exit(&port->node_rwlock);
1067 
1068 			/* Check if nothing was found */
1069 			if (action == 0) {
1070 				break;
1071 			} else if (action == 1) {
1072 				(void) emlxs_mb_unreg_node(port, nlp,
1073 				    NULL, NULL, NULL);
1074 			} else if (action == 2) {
1075 #ifdef DHCHAP_SUPPORT
1076 				emlxs_dhc_auth_stop(port, nlp);
1077 #endif /* DHCHAP_SUPPORT */
1078 
1079 				/*
1080 				 * Close the node for any further normal IO
1081 				 * A PLOGI with reopen the node
1082 				 */
1083 				emlxs_node_close(port, nlp,
1084 				    hba->channel_fcp, 60);
1085 				emlxs_node_close(port, nlp,
1086 				    hba->channel_ip, 60);
1087 
1088 				/* Flush tx queue */
1089 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1090 
1091 				/* Flush chip queue */
1092 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1093 
1094 			} else if (action == 3) {	/* FCP2 devices */
1095 				unreg_vpi = 0;
1096 
1097 				if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1098 					(void) emlxs_rpi_pause_notify(port,
1099 					    nlp->rpip);
1100 				}
1101 
1102 #ifdef DHCHAP_SUPPORT
1103 				emlxs_dhc_auth_stop(port, nlp);
1104 #endif /* DHCHAP_SUPPORT */
1105 
1106 				/*
1107 				 * Close the node for any further normal IO
1108 				 * An ADISC or a PLOGI with reopen the node
1109 				 */
1110 				emlxs_node_close(port, nlp,
1111 				    hba->channel_fcp, -1);
1112 				emlxs_node_close(port, nlp, hba->channel_ip,
1113 				    ((linkdown) ? 0 : 60));
1114 
1115 				/* Flush tx queues except for FCP ring */
1116 				(void) emlxs_tx_node_flush(port, nlp,
1117 				    &hba->chan[hba->channel_ct], 0, 0);
1118 				(void) emlxs_tx_node_flush(port, nlp,
1119 				    &hba->chan[hba->channel_els], 0, 0);
1120 				(void) emlxs_tx_node_flush(port, nlp,
1121 				    &hba->chan[hba->channel_ip], 0, 0);
1122 
1123 				/* Flush chip queues except for FCP ring */
1124 				(void) emlxs_chipq_node_flush(port,
1125 				    &hba->chan[hba->channel_ct], nlp, 0);
1126 				(void) emlxs_chipq_node_flush(port,
1127 				    &hba->chan[hba->channel_els], nlp, 0);
1128 				(void) emlxs_chipq_node_flush(port,
1129 				    &hba->chan[hba->channel_ip], nlp, 0);
1130 			}
1131 		}
1132 		break;
1133 
1134 	case 2:	/* Full support - Hold FCP IO to FCP target matching nodes */
1135 
1136 		if (!linkdown && !vlinkdown) {
1137 			break;
1138 		}
1139 
1140 		for (;;) {
1141 			/*
1142 			 * We need to hold the locks this way because
1143 			 * emlxs_mb_unreg_node and the flush routines enter the
1144 			 * same locks. Also, when we release the lock the list
1145 			 * can change out from under us.
1146 			 */
1147 			rw_enter(&port->node_rwlock, RW_READER);
1148 			action = 0;
1149 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1150 				nlp = port->node_table[i];
1151 				while (nlp != NULL) {
1152 					if (!nlp->nlp_tag) {
1153 						nlp = nlp->nlp_list_next;
1154 						continue;
1155 					}
1156 					nlp->nlp_tag = 0;
1157 
1158 					/*
1159 					 * Check for FCP target device that
1160 					 * matches our mask
1161 					 */
1162 					if ((nlp-> nlp_fcp_info &
1163 					    NLP_FCP_TGT_DEVICE) &&
1164 					    (nlp->nlp_DID & mask) ==
1165 					    aff_d_id) {
1166 						action = 3;
1167 						break;
1168 					}
1169 
1170 					/*
1171 					 * Check for any other device that
1172 					 * matches our mask
1173 					 */
1174 					else if ((nlp->nlp_DID & mask) ==
1175 					    aff_d_id) {
1176 						if (linkdown) {
1177 							action = 1;
1178 							break;
1179 						} else { /* Must be an RSCN */
1180 
1181 							action = 2;
1182 							break;
1183 						}
1184 					}
1185 
1186 					nlp = nlp->nlp_list_next;
1187 				}
1188 				if (action) {
1189 					break;
1190 				}
1191 			}
1192 			rw_exit(&port->node_rwlock);
1193 
1194 			/* Check if nothing was found */
1195 			if (action == 0) {
1196 				break;
1197 			} else if (action == 1) {
1198 				(void) emlxs_mb_unreg_node(port, nlp,
1199 				    NULL, NULL, NULL);
1200 			} else if (action == 2) {
1201 				/*
1202 				 * Close the node for any further normal IO
1203 				 * A PLOGI with reopen the node
1204 				 */
1205 				emlxs_node_close(port, nlp,
1206 				    hba->channel_fcp, 60);
1207 				emlxs_node_close(port, nlp,
1208 				    hba->channel_ip, 60);
1209 
1210 				/* Flush tx queue */
1211 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1212 
1213 				/* Flush chip queue */
1214 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1215 
1216 			} else if (action == 3) {	/* FCP2 devices */
1217 				unreg_vpi = 0;
1218 
1219 				if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1220 					(void) emlxs_rpi_pause_notify(port,
1221 					    nlp->rpip);
1222 				}
1223 
1224 				/*
1225 				 * Close the node for any further normal IO
1226 				 * An ADISC or a PLOGI with reopen the node
1227 				 */
1228 				emlxs_node_close(port, nlp,
1229 				    hba->channel_fcp, -1);
1230 				emlxs_node_close(port, nlp, hba->channel_ip,
1231 				    ((linkdown) ? 0 : 60));
1232 
1233 				/* Flush tx queues except for FCP ring */
1234 				(void) emlxs_tx_node_flush(port, nlp,
1235 				    &hba->chan[hba->channel_ct], 0, 0);
1236 				(void) emlxs_tx_node_flush(port, nlp,
1237 				    &hba->chan[hba->channel_els], 0, 0);
1238 				(void) emlxs_tx_node_flush(port, nlp,
1239 				    &hba->chan[hba->channel_ip], 0, 0);
1240 
1241 				/* Flush chip queues except for FCP ring */
1242 				(void) emlxs_chipq_node_flush(port,
1243 				    &hba->chan[hba->channel_ct], nlp, 0);
1244 				(void) emlxs_chipq_node_flush(port,
1245 				    &hba->chan[hba->channel_els], nlp, 0);
1246 				(void) emlxs_chipq_node_flush(port,
1247 				    &hba->chan[hba->channel_ip], nlp, 0);
1248 			}
1249 		}
1250 
1251 		break;
1252 
1253 	}	/* switch() */
1254 
1255 done:
1256 
1257 	if (unreg_vpi) {
1258 		(void) emlxs_mb_unreg_vpi(port);
1259 	}
1260 
1261 	return (0);
1262 
1263 } /* emlxs_port_offline() */
1264 
1265 
1266 extern void
1267 emlxs_port_online(emlxs_port_t *vport)
1268 {
1269 	emlxs_hba_t *hba = vport->hba;
1270 	emlxs_port_t *port = &PPORT;
1271 	uint32_t state;
1272 	uint32_t update;
1273 	uint32_t npiv_linkup;
1274 	char topology[32];
1275 	char linkspeed[32];
1276 	char mode[32];
1277 
1278 	/*
1279 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1280 	 *    "linkup_callback. vpi=%d fc_flag=%x", vport->vpi, hba->flag);
1281 	 */
1282 
1283 	if ((vport->vpi > 0) &&
1284 	    (!(hba->flag & FC_NPIV_ENABLED) ||
1285 	    !(hba->flag & FC_NPIV_SUPPORTED))) {
1286 		return;
1287 	}
1288 
1289 	if (!(vport->flag & EMLXS_PORT_BOUND) ||
1290 	    !(vport->flag & EMLXS_PORT_ENABLE)) {
1291 		return;
1292 	}
1293 
1294 	mutex_enter(&EMLXS_PORT_LOCK);
1295 
1296 	/* Check for mode */
1297 	if (port->tgt_mode) {
1298 		(void) strcpy(mode, ", target");
1299 	} else if (port->ini_mode) {
1300 		(void) strcpy(mode, ", initiator");
1301 	} else {
1302 		(void) strcpy(mode, "");
1303 	}
1304 
1305 	/* Check for loop topology */
1306 	if (hba->topology == TOPOLOGY_LOOP) {
1307 		state = FC_STATE_LOOP;
1308 		(void) strcpy(topology, ", loop");
1309 	} else {
1310 		state = FC_STATE_ONLINE;
1311 		(void) strcpy(topology, ", fabric");
1312 	}
1313 
1314 	/* Set the link speed */
1315 	switch (hba->linkspeed) {
1316 	case 0:
1317 		(void) strcpy(linkspeed, "Gb");
1318 		state |= FC_STATE_1GBIT_SPEED;
1319 		break;
1320 
1321 	case LA_1GHZ_LINK:
1322 		(void) strcpy(linkspeed, "1Gb");
1323 		state |= FC_STATE_1GBIT_SPEED;
1324 		break;
1325 	case LA_2GHZ_LINK:
1326 		(void) strcpy(linkspeed, "2Gb");
1327 		state |= FC_STATE_2GBIT_SPEED;
1328 		break;
1329 	case LA_4GHZ_LINK:
1330 		(void) strcpy(linkspeed, "4Gb");
1331 		state |= FC_STATE_4GBIT_SPEED;
1332 		break;
1333 	case LA_8GHZ_LINK:
1334 		(void) strcpy(linkspeed, "8Gb");
1335 		state |= FC_STATE_8GBIT_SPEED;
1336 		break;
1337 	case LA_10GHZ_LINK:
1338 		(void) strcpy(linkspeed, "10Gb");
1339 		state |= FC_STATE_10GBIT_SPEED;
1340 		break;
1341 	default:
1342 		(void) sprintf(linkspeed, "unknown(0x%x)", hba->linkspeed);
1343 		break;
1344 	}
1345 
1346 	npiv_linkup = 0;
1347 	update = 0;
1348 
1349 	if ((hba->state >= FC_LINK_UP) &&
1350 	    !(hba->flag & FC_LOOPBACK_MODE) && (vport->ulp_statec != state)) {
1351 		update = 1;
1352 		vport->ulp_statec = state;
1353 
1354 		if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1355 			hba->flag |= FC_NPIV_LINKUP;
1356 			npiv_linkup = 1;
1357 		}
1358 	}
1359 
1360 	mutex_exit(&EMLXS_PORT_LOCK);
1361 
1362 
1363 	/*
1364 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1365 	 *    "linkup_callback: update=%d vpi=%d flag=%d fc_flag=%x state=%x"
1366 	 *    "statec=%x", update, vport->vpi, npiv_linkup, hba->flag,
1367 	 *    hba->state, vport->ulp_statec);
1368 	 */
1369 
1370 	if (update) {
1371 		if (vport->flag & EMLXS_PORT_BOUND) {
1372 			if (vport->vpi == 0) {
1373 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1374 				    "%s%s%s", linkspeed, topology, mode);
1375 
1376 			} else if (npiv_linkup) {
1377 				EMLXS_MSGF(EMLXS_CONTEXT,
1378 				    &emlxs_npiv_link_up_msg, "%s%s%s",
1379 				    linkspeed, topology, mode);
1380 			}
1381 
1382 			if (vport->ini_mode) {
1383 				vport->ulp_statec_cb(vport->ulp_handle,
1384 				    state);
1385 			}
1386 #ifdef SFCT_SUPPORT
1387 			else if (vport->tgt_mode) {
1388 				emlxs_fct_link_up(vport);
1389 			}
1390 #endif /* SFCT_SUPPORT */
1391 		} else {
1392 			if (vport->vpi == 0) {
1393 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1394 				    "%s%s%s *", linkspeed, topology, mode);
1395 
1396 			} else if (npiv_linkup) {
1397 				EMLXS_MSGF(EMLXS_CONTEXT,
1398 				    &emlxs_npiv_link_up_msg, "%s%s%s *",
1399 				    linkspeed, topology, mode);
1400 			}
1401 		}
1402 
1403 		/* Check for waiting threads */
1404 		if (vport->vpi == 0) {
1405 			mutex_enter(&EMLXS_LINKUP_LOCK);
1406 			if (hba->linkup_wait_flag == TRUE) {
1407 				hba->linkup_wait_flag = FALSE;
1408 				cv_broadcast(&EMLXS_LINKUP_CV);
1409 			}
1410 			mutex_exit(&EMLXS_LINKUP_LOCK);
1411 		}
1412 
1413 		/* Flush any pending ub buffers */
1414 		emlxs_ub_flush(vport);
1415 	}
1416 
1417 	return;
1418 
1419 } /* emlxs_port_online() */
1420 
1421 
1422 /* SLI3 */
1423 extern void
1424 emlxs_linkdown(emlxs_hba_t *hba)
1425 {
1426 	emlxs_port_t *port = &PPORT;
1427 	int i;
1428 	uint32_t scope;
1429 
1430 	mutex_enter(&EMLXS_PORT_LOCK);
1431 
1432 	if (hba->state > FC_LINK_DOWN) {
1433 		HBASTATS.LinkDown++;
1434 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_DOWN);
1435 	}
1436 
1437 	/* Set scope */
1438 	scope = (hba->flag & FC_NEW_FABRIC)? 0xFDFFFFFF:0xFFFFFFFF;
1439 
1440 	/* Filter hba flags */
1441 	hba->flag &= FC_LINKDOWN_MASK;
1442 	hba->discovery_timer = 0;
1443 	hba->linkup_timer = 0;
1444 
1445 	mutex_exit(&EMLXS_PORT_LOCK);
1446 
1447 	for (i = 0; i < MAX_VPORTS; i++) {
1448 		port = &VPORT(i);
1449 
1450 		if (!(port->flag & EMLXS_PORT_BOUND)) {
1451 			continue;
1452 		}
1453 
1454 		(void) emlxs_port_offline(port, scope);
1455 
1456 	}
1457 
1458 	emlxs_log_link_event(port);
1459 
1460 	return;
1461 
1462 } /* emlxs_linkdown() */
1463 
1464 
1465 /* SLI3 */
1466 extern void
1467 emlxs_linkup(emlxs_hba_t *hba)
1468 {
1469 	emlxs_port_t *port = &PPORT;
1470 	emlxs_config_t *cfg = &CFG;
1471 
1472 	mutex_enter(&EMLXS_PORT_LOCK);
1473 
1474 	HBASTATS.LinkUp++;
1475 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_UP);
1476 
1477 #ifdef MENLO_SUPPORT
1478 	if (hba->flag & FC_MENLO_MODE) {
1479 		mutex_exit(&EMLXS_PORT_LOCK);
1480 
1481 		/*
1482 		 * Trigger linkup CV and don't start linkup & discovery
1483 		 * timers
1484 		 */
1485 		mutex_enter(&EMLXS_LINKUP_LOCK);
1486 		cv_broadcast(&EMLXS_LINKUP_CV);
1487 		mutex_exit(&EMLXS_LINKUP_LOCK);
1488 
1489 		emlxs_log_link_event(port);
1490 
1491 		return;
1492 	}
1493 #endif /* MENLO_SUPPORT */
1494 
1495 	/* Set the linkup & discovery timers */
1496 	hba->linkup_timer = hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current;
1497 	hba->discovery_timer =
1498 	    hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current +
1499 	    cfg[CFG_DISC_TIMEOUT].current;
1500 
1501 	mutex_exit(&EMLXS_PORT_LOCK);
1502 
1503 	emlxs_log_link_event(port);
1504 
1505 	return;
1506 
1507 } /* emlxs_linkup() */
1508 
1509 
1510 /*
1511  *  emlxs_reset_link
1512  *
1513  *  Description:
1514  *  Called to reset the link with an init_link
1515  *
1516  *    Returns:
1517  *
1518  */
1519 extern int
1520 emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup, uint32_t wait)
1521 {
1522 	emlxs_port_t *port = &PPORT;
1523 	emlxs_config_t *cfg;
1524 	MAILBOXQ *mbq = NULL;
1525 	MAILBOX *mb = NULL;
1526 	int rval = 0;
1527 	int rc;
1528 
1529 	/*
1530 	 * Get a buffer to use for the mailbox command
1531 	 */
1532 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))
1533 	    == NULL) {
1534 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1535 		    "Unable to allocate mailbox buffer.");
1536 		rval = 1;
1537 		goto reset_link_fail;
1538 	}
1539 
1540 	if (linkup) {
1541 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1542 		    "Resetting link...");
1543 	} else {
1544 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1545 		    "Disabling link...");
1546 	}
1547 
1548 	mb = (MAILBOX *)mbq;
1549 
1550 	/* Bring link down first */
1551 	emlxs_mb_down_link(hba, mbq);
1552 
1553 #define	MBXERR_LINK_DOWN	0x33
1554 
1555 	if (wait) {
1556 		wait = MBX_WAIT;
1557 	} else {
1558 		wait = MBX_NOWAIT;
1559 	}
1560 	rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1561 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS) &&
1562 	    (rc != MBXERR_LINK_DOWN)) {
1563 		rval = 1;
1564 		goto reset_link_fail;
1565 	}
1566 
1567 	if (linkup) {
1568 		/*
1569 		 * Setup and issue mailbox INITIALIZE LINK command
1570 		 */
1571 
1572 		if (wait == MBX_NOWAIT) {
1573 			if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))
1574 			    == NULL) {
1575 				EMLXS_MSGF(EMLXS_CONTEXT,
1576 				    &emlxs_link_reset_failed_msg,
1577 				    "Unable to allocate mailbox buffer.");
1578 				rval = 1;
1579 				goto reset_link_fail;
1580 			}
1581 			mb = (MAILBOX *)mbq;
1582 		} else {
1583 			/* Reuse mbq from previous mbox */
1584 			mb = (MAILBOX *)mbq;
1585 		}
1586 		cfg = &CFG;
1587 
1588 		emlxs_mb_init_link(hba, mbq,
1589 		    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1590 
1591 		mb->un.varInitLnk.lipsr_AL_PA = 0;
1592 
1593 		/* Clear the loopback mode */
1594 		mutex_enter(&EMLXS_PORT_LOCK);
1595 		hba->flag &= ~FC_LOOPBACK_MODE;
1596 		hba->loopback_tics = 0;
1597 		mutex_exit(&EMLXS_PORT_LOCK);
1598 
1599 		rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1600 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1601 			rval = 1;
1602 			goto reset_link_fail;
1603 		}
1604 
1605 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1606 	}
1607 
1608 reset_link_fail:
1609 
1610 	if ((wait == MBX_WAIT) && mbq) {
1611 		emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
1612 	}
1613 
1614 	return (rval);
1615 } /* emlxs_reset_link() */
1616 
1617 
1618 extern int
1619 emlxs_online(emlxs_hba_t *hba)
1620 {
1621 	emlxs_port_t *port = &PPORT;
1622 	int32_t rval = 0;
1623 	uint32_t i = 0;
1624 
1625 	/* Make sure adapter is offline or exit trying (30 seconds) */
1626 	while (i++ < 30) {
1627 		/* Check if adapter is already going online */
1628 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1629 			return (0);
1630 		}
1631 
1632 		mutex_enter(&EMLXS_PORT_LOCK);
1633 
1634 		/* Check again */
1635 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1636 			mutex_exit(&EMLXS_PORT_LOCK);
1637 			return (0);
1638 		}
1639 
1640 		/* Check if adapter is offline */
1641 		if (hba->flag & FC_OFFLINE_MODE) {
1642 			/* Mark it going online */
1643 			hba->flag &= ~FC_OFFLINE_MODE;
1644 			hba->flag |= FC_ONLINING_MODE;
1645 
1646 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1647 			mutex_exit(&EMLXS_PORT_LOCK);
1648 			break;
1649 		}
1650 
1651 		mutex_exit(&EMLXS_PORT_LOCK);
1652 
1653 		DELAYMS(1000);
1654 	}
1655 
1656 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1657 	    "Going online...");
1658 
1659 	if (rval = EMLXS_SLI_ONLINE(hba)) {
1660 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, "status=%x",
1661 		    rval);
1662 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1663 
1664 		/* Set FC_OFFLINE_MODE */
1665 		mutex_enter(&EMLXS_PORT_LOCK);
1666 		emlxs_diag_state = DDI_OFFDI;
1667 		hba->flag |= FC_OFFLINE_MODE;
1668 		hba->flag &= ~FC_ONLINING_MODE;
1669 		mutex_exit(&EMLXS_PORT_LOCK);
1670 
1671 		return (rval);
1672 	}
1673 
1674 	/* Start the timer */
1675 	emlxs_timer_start(hba);
1676 
1677 	/* Set FC_ONLINE_MODE */
1678 	mutex_enter(&EMLXS_PORT_LOCK);
1679 	emlxs_diag_state = DDI_ONDI;
1680 	hba->flag |= FC_ONLINE_MODE;
1681 	hba->flag &= ~FC_ONLINING_MODE;
1682 	mutex_exit(&EMLXS_PORT_LOCK);
1683 
1684 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1685 
1686 #ifdef SFCT_SUPPORT
1687 	(void) emlxs_fct_port_initialize(port);
1688 #endif /* SFCT_SUPPORT */
1689 
1690 	return (rval);
1691 
1692 } /* emlxs_online() */
1693 
1694 
1695 extern int
1696 emlxs_offline(emlxs_hba_t *hba)
1697 {
1698 	emlxs_port_t *port = &PPORT;
1699 	uint32_t i = 0;
1700 	int rval = 1;
1701 
1702 	/* Make sure adapter is online or exit trying (30 seconds) */
1703 	while (i++ < 30) {
1704 		/* Check if adapter is already going offline */
1705 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1706 			return (0);
1707 		}
1708 
1709 		mutex_enter(&EMLXS_PORT_LOCK);
1710 
1711 		/* Check again */
1712 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1713 			mutex_exit(&EMLXS_PORT_LOCK);
1714 			return (0);
1715 		}
1716 
1717 		/* Check if adapter is online */
1718 		if (hba->flag & FC_ONLINE_MODE) {
1719 			/* Mark it going offline */
1720 			hba->flag &= ~FC_ONLINE_MODE;
1721 			hba->flag |= FC_OFFLINING_MODE;
1722 
1723 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1724 			mutex_exit(&EMLXS_PORT_LOCK);
1725 			break;
1726 		}
1727 
1728 		mutex_exit(&EMLXS_PORT_LOCK);
1729 
1730 		DELAYMS(1000);
1731 	}
1732 
1733 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1734 	    "Going offline...");
1735 
1736 	if (port->ini_mode) {
1737 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1738 			(void) emlxs_fcf_shutdown_notify(port, 1);
1739 		} else {
1740 			emlxs_linkdown(hba);
1741 		}
1742 	}
1743 #ifdef SFCT_SUPPORT
1744 	else {
1745 		(void) emlxs_fct_port_shutdown(port);
1746 	}
1747 #endif /* SFCT_SUPPORT */
1748 
1749 	/* Check if adapter was shutdown */
1750 	if (hba->flag & FC_HARDWARE_ERROR) {
1751 		/*
1752 		 * Force mailbox cleanup
1753 		 * This will wake any sleeping or polling threads
1754 		 */
1755 		emlxs_mb_fini(hba, NULL, MBX_HARDWARE_ERROR);
1756 	}
1757 
1758 	/* Pause here for the IO to settle */
1759 	delay(drv_usectohz(1000000));	/* 1 sec */
1760 
1761 	/* Unregister all nodes */
1762 	emlxs_ffcleanup(hba);
1763 
1764 	if (hba->bus_type == SBUS_FC) {
1765 		WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), 0x9A);
1766 #ifdef FMA_SUPPORT
1767 		/* Access handle validation */
1768 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
1769 #endif  /* FMA_SUPPORT */
1770 	}
1771 
1772 	/* Stop the timer */
1773 	emlxs_timer_stop(hba);
1774 
1775 	/* For safety flush every iotag list */
1776 	if (emlxs_iotag_flush(hba)) {
1777 		/* Pause here for the IO to flush */
1778 		delay(drv_usectohz(1000));
1779 	}
1780 
1781 	/* Wait for poll command request to settle */
1782 	while (hba->io_poll_count > 0) {
1783 		delay(drv_usectohz(2000000));   /* 2 sec */
1784 	}
1785 
1786 	/* Shutdown the adapter interface */
1787 	EMLXS_SLI_OFFLINE(hba);
1788 
1789 	mutex_enter(&EMLXS_PORT_LOCK);
1790 	hba->flag |= FC_OFFLINE_MODE;
1791 	hba->flag &= ~FC_OFFLINING_MODE;
1792 	emlxs_diag_state = DDI_OFFDI;
1793 	mutex_exit(&EMLXS_PORT_LOCK);
1794 
1795 	rval = 0;
1796 
1797 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1798 
1799 done:
1800 
1801 	return (rval);
1802 
1803 } /* emlxs_offline() */
1804 
1805 
1806 
1807 extern int
1808 emlxs_power_down(emlxs_hba_t *hba)
1809 {
1810 #ifdef FMA_SUPPORT
1811 	emlxs_port_t *port = &PPORT;
1812 #endif  /* FMA_SUPPORT */
1813 	int32_t rval = 0;
1814 
1815 	if ((rval = emlxs_offline(hba))) {
1816 		return (rval);
1817 	}
1818 	EMLXS_SLI_HBA_RESET(hba, 1, 1, 0);
1819 
1820 
1821 #ifdef FMA_SUPPORT
1822 	if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
1823 	    != DDI_FM_OK) {
1824 		EMLXS_MSGF(EMLXS_CONTEXT,
1825 		    &emlxs_invalid_access_handle_msg, NULL);
1826 		return (1);
1827 	}
1828 #endif  /* FMA_SUPPORT */
1829 
1830 	return (0);
1831 
1832 } /* End emlxs_power_down */
1833 
1834 
1835 extern int
1836 emlxs_power_up(emlxs_hba_t *hba)
1837 {
1838 #ifdef FMA_SUPPORT
1839 	emlxs_port_t *port = &PPORT;
1840 #endif  /* FMA_SUPPORT */
1841 	int32_t rval = 0;
1842 
1843 
1844 #ifdef FMA_SUPPORT
1845 	if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
1846 	    != DDI_FM_OK) {
1847 		EMLXS_MSGF(EMLXS_CONTEXT,
1848 		    &emlxs_invalid_access_handle_msg, NULL);
1849 		return (1);
1850 	}
1851 #endif  /* FMA_SUPPORT */
1852 
1853 	/* Bring adapter online */
1854 	if ((rval = emlxs_online(hba))) {
1855 		if (hba->pci_cap_offset[PCI_CAP_ID_PM]) {
1856 			/* Put chip in D3 state */
1857 			(void) ddi_put8(hba->pci_acc_handle,
1858 			    (uint8_t *)(hba->pci_addr +
1859 			    hba->pci_cap_offset[PCI_CAP_ID_PM] +
1860 			    PCI_PMCSR),
1861 			    (uint8_t)PCI_PMCSR_D3HOT);
1862 		}
1863 		return (rval);
1864 	}
1865 
1866 	return (rval);
1867 
1868 } /* End emlxs_power_up */
1869 
1870 
1871 /*
1872  *
1873  * NAME:     emlxs_ffcleanup
1874  *
1875  * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
1876  *
1877  * EXECUTION ENVIRONMENT: process only
1878  *
1879  * CALLED FROM: CFG_TERM
1880  *
1881  * INPUT: hba       - pointer to the dev_ctl area.
1882  *
1883  * RETURNS: none
1884  */
1885 extern void
1886 emlxs_ffcleanup(emlxs_hba_t *hba)
1887 {
1888 	emlxs_port_t *port = &PPORT;
1889 	uint32_t i;
1890 
1891 	/* Disable all but the mailbox interrupt */
1892 	EMLXS_SLI_DISABLE_INTR(hba, HC_MBINT_ENA);
1893 
1894 	/* Make sure all port nodes are destroyed */
1895 	for (i = 0; i < MAX_VPORTS; i++) {
1896 		port = &VPORT(i);
1897 
1898 		if (port->node_count) {
1899 			(void) emlxs_mb_unreg_node(port, 0, 0, 0, 0);
1900 		}
1901 	}
1902 
1903 	/* Clear all interrupt enable conditions */
1904 	EMLXS_SLI_DISABLE_INTR(hba, 0);
1905 
1906 	return;
1907 
1908 } /* emlxs_ffcleanup() */
1909 
1910 
1911 extern uint16_t
1912 emlxs_register_pkt(CHANNEL *cp, emlxs_buf_t *sbp)
1913 {
1914 	emlxs_hba_t *hba;
1915 	emlxs_port_t *port;
1916 	uint16_t iotag;
1917 	uint32_t i;
1918 
1919 	hba = cp->hba;
1920 
1921 	mutex_enter(&EMLXS_FCTAB_LOCK);
1922 
1923 	if (sbp->iotag != 0) {
1924 		port = &PPORT;
1925 
1926 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1927 		    "Pkt already registered! channel=%d iotag=%d sbp=%p",
1928 		    sbp->channel, sbp->iotag, sbp);
1929 	}
1930 
1931 	iotag = 0;
1932 	for (i = 0; i < hba->max_iotag; i++) {
1933 		if (!hba->fc_iotag || hba->fc_iotag >= hba->max_iotag) {
1934 			hba->fc_iotag = 1;
1935 		}
1936 		iotag = hba->fc_iotag++;
1937 
1938 		if (hba->fc_table[iotag] == 0 ||
1939 		    hba->fc_table[iotag] == STALE_PACKET) {
1940 			hba->io_count++;
1941 			hba->fc_table[iotag] = sbp;
1942 
1943 			sbp->iotag = iotag;
1944 			sbp->channel = cp;
1945 
1946 			break;
1947 		}
1948 		iotag = 0;
1949 	}
1950 
1951 	mutex_exit(&EMLXS_FCTAB_LOCK);
1952 
1953 	/*
1954 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1955 	 *    "emlxs_register_pkt: channel=%d iotag=%d sbp=%p",
1956 	 *    cp->channelno, iotag, sbp);
1957 	 */
1958 
1959 	return (iotag);
1960 
1961 } /* emlxs_register_pkt() */
1962 
1963 
1964 
1965 extern emlxs_buf_t *
1966 emlxs_unregister_pkt(CHANNEL *cp, uint16_t iotag, uint32_t forced)
1967 {
1968 	emlxs_hba_t *hba;
1969 	emlxs_buf_t *sbp;
1970 
1971 	sbp = NULL;
1972 	hba = cp->hba;
1973 
1974 	/* Check the iotag range */
1975 	if ((iotag == 0) || (iotag >= hba->max_iotag)) {
1976 		return (NULL);
1977 	}
1978 
1979 	/* Remove the sbp from the table */
1980 	mutex_enter(&EMLXS_FCTAB_LOCK);
1981 	sbp = hba->fc_table[iotag];
1982 
1983 	if (!sbp || (sbp == STALE_PACKET)) {
1984 		mutex_exit(&EMLXS_FCTAB_LOCK);
1985 		return (sbp);
1986 	}
1987 
1988 	hba->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
1989 	hba->io_count--;
1990 	sbp->iotag = 0;
1991 
1992 	mutex_exit(&EMLXS_FCTAB_LOCK);
1993 
1994 
1995 	/* Clean up the sbp */
1996 	mutex_enter(&sbp->mtx);
1997 
1998 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
1999 		sbp->pkt_flags &= ~PACKET_IN_TXQ;
2000 		hba->channel_tx_count--;
2001 	}
2002 
2003 	if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
2004 		sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
2005 	}
2006 
2007 	if (sbp->bmp) {
2008 		emlxs_mem_put(hba, MEM_BPL, (void *)sbp->bmp);
2009 		sbp->bmp = 0;
2010 	}
2011 
2012 	mutex_exit(&sbp->mtx);
2013 
2014 	return (sbp);
2015 
2016 } /* emlxs_unregister_pkt() */
2017 
2018 
2019 
2020 /* Flush all IO's to all nodes for a given IO Channel */
2021 extern uint32_t
2022 emlxs_tx_channel_flush(emlxs_hba_t *hba, CHANNEL *cp, emlxs_buf_t *fpkt)
2023 {
2024 	emlxs_port_t *port = &PPORT;
2025 	emlxs_buf_t *sbp;
2026 	IOCBQ *iocbq;
2027 	IOCBQ *next;
2028 	IOCB *iocb;
2029 	uint32_t channelno;
2030 	Q abort;
2031 	NODELIST *ndlp;
2032 	IOCB *icmd;
2033 	MATCHMAP *mp;
2034 	uint32_t i;
2035 	uint8_t flag[MAX_CHANNEL];
2036 
2037 	channelno = cp->channelno;
2038 	bzero((void *)&abort, sizeof (Q));
2039 	bzero((void *)flag, MAX_CHANNEL * sizeof (uint8_t));
2040 
2041 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2042 
2043 	/* While a node needs servicing */
2044 	while (cp->nodeq.q_first) {
2045 		ndlp = (NODELIST *) cp->nodeq.q_first;
2046 
2047 		/* Check if priority queue is not empty */
2048 		if (ndlp->nlp_ptx[channelno].q_first) {
2049 			/* Transfer all iocb's to local queue */
2050 			if (abort.q_first == 0) {
2051 				abort.q_first =
2052 				    ndlp->nlp_ptx[channelno].q_first;
2053 			} else {
2054 				((IOCBQ *)abort.q_last)->next =
2055 				    (IOCBQ *)ndlp->nlp_ptx[channelno].q_first;
2056 			}
2057 			flag[channelno] = 1;
2058 
2059 			abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2060 			abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2061 		}
2062 
2063 		/* Check if tx queue is not empty */
2064 		if (ndlp->nlp_tx[channelno].q_first) {
2065 			/* Transfer all iocb's to local queue */
2066 			if (abort.q_first == 0) {
2067 				abort.q_first = ndlp->nlp_tx[channelno].q_first;
2068 			} else {
2069 				((IOCBQ *)abort.q_last)->next =
2070 				    (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2071 			}
2072 
2073 			abort.q_last = ndlp->nlp_tx[channelno].q_last;
2074 			abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2075 		}
2076 
2077 		/* Clear the queue pointers */
2078 		ndlp->nlp_ptx[channelno].q_first = NULL;
2079 		ndlp->nlp_ptx[channelno].q_last = NULL;
2080 		ndlp->nlp_ptx[channelno].q_cnt = 0;
2081 
2082 		ndlp->nlp_tx[channelno].q_first = NULL;
2083 		ndlp->nlp_tx[channelno].q_last = NULL;
2084 		ndlp->nlp_tx[channelno].q_cnt = 0;
2085 
2086 		/* Remove node from service queue */
2087 
2088 		/* If this is the last node on list */
2089 		if (cp->nodeq.q_last == (void *)ndlp) {
2090 			cp->nodeq.q_last = NULL;
2091 			cp->nodeq.q_first = NULL;
2092 			cp->nodeq.q_cnt = 0;
2093 		} else {
2094 			/* Remove node from head */
2095 			cp->nodeq.q_first = ndlp->nlp_next[channelno];
2096 			((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
2097 			    cp->nodeq.q_first;
2098 			cp->nodeq.q_cnt--;
2099 		}
2100 
2101 		/* Clear node */
2102 		ndlp->nlp_next[channelno] = NULL;
2103 	}
2104 
2105 	/* First cleanup the iocb's while still holding the lock */
2106 	iocbq = (IOCBQ *) abort.q_first;
2107 	while (iocbq) {
2108 		/* Free the IoTag and the bmp */
2109 		iocb = &iocbq->iocb;
2110 
2111 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2112 			sbp = iocbq->sbp;
2113 			if (sbp) {
2114 				emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1);
2115 			}
2116 		} else {
2117 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2118 			    iocb->ULPIOTAG, 0);
2119 		}
2120 
2121 		if (sbp && (sbp != STALE_PACKET)) {
2122 			mutex_enter(&sbp->mtx);
2123 
2124 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2125 			/*
2126 			 * If the fpkt is already set, then we will leave it
2127 			 * alone. This ensures that this pkt is only accounted
2128 			 * for on one fpkt->flush_count
2129 			 */
2130 			if (!sbp->fpkt && fpkt) {
2131 				mutex_enter(&fpkt->mtx);
2132 				sbp->fpkt = fpkt;
2133 				fpkt->flush_count++;
2134 				mutex_exit(&fpkt->mtx);
2135 			}
2136 
2137 			mutex_exit(&sbp->mtx);
2138 		}
2139 
2140 		iocbq = (IOCBQ *)iocbq->next;
2141 	}	/* end of while */
2142 
2143 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2144 
2145 	/* Now abort the iocb's */
2146 	iocbq = (IOCBQ *)abort.q_first;
2147 	while (iocbq) {
2148 		/* Save the next iocbq for now */
2149 		next = (IOCBQ *)iocbq->next;
2150 
2151 		/* Unlink this iocbq */
2152 		iocbq->next = NULL;
2153 
2154 		/* Get the pkt */
2155 		sbp = (emlxs_buf_t *)iocbq->sbp;
2156 
2157 		if (sbp) {
2158 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2159 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2160 
2161 			if (hba->state >= FC_LINK_UP) {
2162 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2163 				    IOERR_ABORT_REQUESTED, 1);
2164 			} else {
2165 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2166 				    IOERR_LINK_DOWN, 1);
2167 			}
2168 
2169 		}
2170 		/* Free the iocb and its associated buffers */
2171 		else {
2172 			icmd = &iocbq->iocb;
2173 
2174 			/* SLI3 */
2175 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2176 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2177 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2178 				if ((hba->flag &
2179 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2180 					/* HBA is detaching or offlining */
2181 					if (icmd->ULPCOMMAND !=
2182 					    CMD_QUE_RING_LIST64_CN) {
2183 						void	*tmp;
2184 						RING *rp;
2185 
2186 						rp = &hba->sli.sli3.
2187 						    ring[channelno];
2188 						for (i = 0;
2189 						    i < icmd->ULPBDECOUNT;
2190 						    i++) {
2191 							mp = EMLXS_GET_VADDR(
2192 							    hba, rp, icmd);
2193 
2194 							tmp = (void *)mp;
2195 							if (mp) {
2196 							emlxs_mem_put(
2197 							    hba, MEM_BUF, tmp);
2198 							}
2199 						}
2200 					}
2201 
2202 					emlxs_mem_put(hba, MEM_IOCB,
2203 					    (void *)iocbq);
2204 				} else {
2205 					/* repost the unsolicited buffer */
2206 					EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp,
2207 					    iocbq);
2208 				}
2209 			} else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2210 			    icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2211 
2212 				emlxs_tx_put(iocbq, 1);
2213 			}
2214 		}
2215 
2216 		iocbq = next;
2217 
2218 	}	/* end of while */
2219 
2220 	/* Now trigger channel service */
2221 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2222 		if (!flag[channelno]) {
2223 			continue;
2224 		}
2225 
2226 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2227 	}
2228 
2229 	return (abort.q_cnt);
2230 
2231 } /* emlxs_tx_channel_flush() */
2232 
2233 
2234 /* Flush all IO's on all or a given ring for a given node */
2235 extern uint32_t
2236 emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan,
2237     uint32_t shutdown, emlxs_buf_t *fpkt)
2238 {
2239 	emlxs_hba_t *hba = HBA;
2240 	emlxs_buf_t *sbp;
2241 	uint32_t channelno;
2242 	CHANNEL *cp;
2243 	IOCB *icmd;
2244 	IOCBQ *iocbq;
2245 	NODELIST *prev;
2246 	IOCBQ *next;
2247 	IOCB *iocb;
2248 	Q abort;
2249 	uint32_t i;
2250 	MATCHMAP *mp;
2251 	uint8_t flag[MAX_CHANNEL];
2252 
2253 	bzero((void *)&abort, sizeof (Q));
2254 
2255 	/* Flush all I/O's on tx queue to this target */
2256 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2257 
2258 	if (!ndlp->nlp_base && shutdown) {
2259 		ndlp->nlp_active = 0;
2260 	}
2261 
2262 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2263 		cp = &hba->chan[channelno];
2264 
2265 		if (chan && cp != chan) {
2266 			continue;
2267 		}
2268 
2269 		if (!ndlp->nlp_base || shutdown) {
2270 			/* Check if priority queue is not empty */
2271 			if (ndlp->nlp_ptx[channelno].q_first) {
2272 				/* Transfer all iocb's to local queue */
2273 				if (abort.q_first == 0) {
2274 					abort.q_first =
2275 					    ndlp->nlp_ptx[channelno].q_first;
2276 				} else {
2277 					((IOCBQ *)(abort.q_last))->next =
2278 					    (IOCBQ *)ndlp->nlp_ptx[channelno].
2279 					    q_first;
2280 				}
2281 
2282 				flag[channelno] = 1;
2283 
2284 				abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2285 				abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2286 			}
2287 		}
2288 
2289 		/* Check if tx queue is not empty */
2290 		if (ndlp->nlp_tx[channelno].q_first) {
2291 
2292 			/* Transfer all iocb's to local queue */
2293 			if (abort.q_first == 0) {
2294 				abort.q_first = ndlp->nlp_tx[channelno].q_first;
2295 			} else {
2296 				((IOCBQ *)abort.q_last)->next =
2297 				    (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2298 			}
2299 
2300 			abort.q_last = ndlp->nlp_tx[channelno].q_last;
2301 			abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2302 		}
2303 
2304 		/* Clear the queue pointers */
2305 		ndlp->nlp_ptx[channelno].q_first = NULL;
2306 		ndlp->nlp_ptx[channelno].q_last = NULL;
2307 		ndlp->nlp_ptx[channelno].q_cnt = 0;
2308 
2309 		ndlp->nlp_tx[channelno].q_first = NULL;
2310 		ndlp->nlp_tx[channelno].q_last = NULL;
2311 		ndlp->nlp_tx[channelno].q_cnt = 0;
2312 
2313 		/* If this node was on the channel queue, remove it */
2314 		if (ndlp->nlp_next[channelno]) {
2315 			/* If this is the only node on list */
2316 			if (cp->nodeq.q_first == (void *)ndlp &&
2317 			    cp->nodeq.q_last == (void *)ndlp) {
2318 				cp->nodeq.q_last = NULL;
2319 				cp->nodeq.q_first = NULL;
2320 				cp->nodeq.q_cnt = 0;
2321 			} else if (cp->nodeq.q_first == (void *)ndlp) {
2322 				cp->nodeq.q_first = ndlp->nlp_next[channelno];
2323 				((NODELIST *) cp->nodeq.q_last)->
2324 				    nlp_next[channelno] = cp->nodeq.q_first;
2325 				cp->nodeq.q_cnt--;
2326 			} else {
2327 				/*
2328 				 * This is a little more difficult find the
2329 				 * previous node in the circular channel queue
2330 				 */
2331 				prev = ndlp;
2332 				while (prev->nlp_next[channelno] != ndlp) {
2333 					prev = prev->nlp_next[channelno];
2334 				}
2335 
2336 				prev->nlp_next[channelno] =
2337 				    ndlp->nlp_next[channelno];
2338 
2339 				if (cp->nodeq.q_last == (void *)ndlp) {
2340 					cp->nodeq.q_last = (void *)prev;
2341 				}
2342 				cp->nodeq.q_cnt--;
2343 
2344 			}
2345 
2346 			/* Clear node */
2347 			ndlp->nlp_next[channelno] = NULL;
2348 		}
2349 
2350 	}
2351 
2352 	/* First cleanup the iocb's while still holding the lock */
2353 	iocbq = (IOCBQ *) abort.q_first;
2354 	while (iocbq) {
2355 		/* Free the IoTag and the bmp */
2356 		iocb = &iocbq->iocb;
2357 
2358 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2359 			sbp = iocbq->sbp;
2360 			if (sbp) {
2361 				emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1);
2362 			}
2363 		} else {
2364 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2365 			    iocb->ULPIOTAG, 0);
2366 		}
2367 
2368 		if (sbp && (sbp != STALE_PACKET)) {
2369 			mutex_enter(&sbp->mtx);
2370 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2371 			/*
2372 			 * If the fpkt is already set, then we will leave it
2373 			 * alone. This ensures that this pkt is only accounted
2374 			 * for on one fpkt->flush_count
2375 			 */
2376 			if (!sbp->fpkt && fpkt) {
2377 				mutex_enter(&fpkt->mtx);
2378 				sbp->fpkt = fpkt;
2379 				fpkt->flush_count++;
2380 				mutex_exit(&fpkt->mtx);
2381 			}
2382 
2383 			mutex_exit(&sbp->mtx);
2384 		}
2385 
2386 		iocbq = (IOCBQ *) iocbq->next;
2387 
2388 	}	/* end of while */
2389 
2390 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2391 
2392 	/* Now abort the iocb's outside the locks */
2393 	iocbq = (IOCBQ *)abort.q_first;
2394 	while (iocbq) {
2395 		/* Save the next iocbq for now */
2396 		next = (IOCBQ *)iocbq->next;
2397 
2398 		/* Unlink this iocbq */
2399 		iocbq->next = NULL;
2400 
2401 		/* Get the pkt */
2402 		sbp = (emlxs_buf_t *)iocbq->sbp;
2403 
2404 		if (sbp) {
2405 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2406 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2407 
2408 			if (hba->state >= FC_LINK_UP) {
2409 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2410 				    IOERR_ABORT_REQUESTED, 1);
2411 			} else {
2412 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2413 				    IOERR_LINK_DOWN, 1);
2414 			}
2415 
2416 		}
2417 		/* Free the iocb and its associated buffers */
2418 		else {
2419 			/* CMD_CLOSE_XRI_CN should also free the memory */
2420 			icmd = &iocbq->iocb;
2421 
2422 			/* SLI3 */
2423 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2424 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2425 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2426 				if ((hba->flag &
2427 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2428 					/* HBA is detaching or offlining */
2429 					if (icmd->ULPCOMMAND !=
2430 					    CMD_QUE_RING_LIST64_CN) {
2431 						void	*tmp;
2432 						RING *rp;
2433 						int ch;
2434 
2435 						ch = ((CHANNEL *)
2436 						    iocbq->channel)->channelno;
2437 						rp = &hba->sli.sli3.ring[ch];
2438 						for (i = 0;
2439 						    i < icmd->ULPBDECOUNT;
2440 						    i++) {
2441 							mp = EMLXS_GET_VADDR(
2442 							    hba, rp, icmd);
2443 
2444 							tmp = (void *)mp;
2445 							if (mp) {
2446 							emlxs_mem_put(
2447 							    hba, MEM_BUF, tmp);
2448 							}
2449 						}
2450 					}
2451 
2452 					emlxs_mem_put(hba, MEM_IOCB,
2453 					    (void *)iocbq);
2454 				} else {
2455 					/* repost the unsolicited buffer */
2456 					EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2457 					    (CHANNEL *)iocbq->channel, iocbq);
2458 				}
2459 			} else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2460 			    icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2461 				/*
2462 				 * Resend the abort iocbq if any
2463 				 */
2464 				emlxs_tx_put(iocbq, 1);
2465 			}
2466 		}
2467 
2468 		iocbq = next;
2469 
2470 	}	/* end of while */
2471 
2472 	/* Now trigger channel service */
2473 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2474 		if (!flag[channelno]) {
2475 			continue;
2476 		}
2477 
2478 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2479 	}
2480 
2481 	return (abort.q_cnt);
2482 
2483 } /* emlxs_tx_node_flush() */
2484 
2485 
2486 /* Check for IO's on all or a given ring for a given node */
2487 extern uint32_t
2488 emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan)
2489 {
2490 	emlxs_hba_t *hba = HBA;
2491 	uint32_t channelno;
2492 	CHANNEL *cp;
2493 	uint32_t count;
2494 
2495 	count = 0;
2496 
2497 	/* Flush all I/O's on tx queue to this target */
2498 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2499 
2500 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2501 		cp = &hba->chan[channelno];
2502 
2503 		if (chan && cp != chan) {
2504 			continue;
2505 		}
2506 
2507 		/* Check if priority queue is not empty */
2508 		if (ndlp->nlp_ptx[channelno].q_first) {
2509 			count += ndlp->nlp_ptx[channelno].q_cnt;
2510 		}
2511 
2512 		/* Check if tx queue is not empty */
2513 		if (ndlp->nlp_tx[channelno].q_first) {
2514 			count += ndlp->nlp_tx[channelno].q_cnt;
2515 		}
2516 
2517 	}
2518 
2519 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2520 
2521 	return (count);
2522 
2523 } /* emlxs_tx_node_check() */
2524 
2525 
2526 
2527 /* Flush all IO's on the any ring for a given node's lun */
2528 extern uint32_t
2529 emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
2530     emlxs_buf_t *fpkt)
2531 {
2532 	emlxs_hba_t *hba = HBA;
2533 	emlxs_buf_t *sbp;
2534 	uint32_t channelno;
2535 	IOCBQ *iocbq;
2536 	IOCBQ *prev;
2537 	IOCBQ *next;
2538 	IOCB *iocb;
2539 	IOCB *icmd;
2540 	Q abort;
2541 	uint32_t i;
2542 	MATCHMAP *mp;
2543 	uint8_t flag[MAX_CHANNEL];
2544 
2545 	if (lun == EMLXS_LUN_NONE) {
2546 		return (0);
2547 	}
2548 
2549 	bzero((void *)&abort, sizeof (Q));
2550 
2551 	/* Flush I/O's on txQ to this target's lun */
2552 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2553 
2554 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2555 
2556 		/* Scan the priority queue first */
2557 		prev = NULL;
2558 		iocbq = (IOCBQ *) ndlp->nlp_ptx[channelno].q_first;
2559 
2560 		while (iocbq) {
2561 			next = (IOCBQ *)iocbq->next;
2562 			iocb = &iocbq->iocb;
2563 			sbp = (emlxs_buf_t *)iocbq->sbp;
2564 
2565 			/* Check if this IO is for our lun */
2566 			if (sbp && (sbp->lun == lun)) {
2567 				/* Remove iocb from the node's ptx queue */
2568 				if (next == 0) {
2569 					ndlp->nlp_ptx[channelno].q_last =
2570 					    (uint8_t *)prev;
2571 				}
2572 
2573 				if (prev == 0) {
2574 					ndlp->nlp_ptx[channelno].q_first =
2575 					    (uint8_t *)next;
2576 				} else {
2577 					prev->next = next;
2578 				}
2579 
2580 				iocbq->next = NULL;
2581 				ndlp->nlp_ptx[channelno].q_cnt--;
2582 
2583 				/*
2584 				 * Add this iocb to our local abort Q
2585 				 */
2586 				if (abort.q_first) {
2587 					((IOCBQ *)abort.q_last)->next = iocbq;
2588 					abort.q_last = (uint8_t *)iocbq;
2589 					abort.q_cnt++;
2590 				} else {
2591 					abort.q_first = (uint8_t *)iocbq;
2592 					abort.q_last = (uint8_t *)iocbq;
2593 					abort.q_cnt = 1;
2594 				}
2595 				iocbq->next = NULL;
2596 				flag[channelno] = 1;
2597 
2598 			} else {
2599 				prev = iocbq;
2600 			}
2601 
2602 			iocbq = next;
2603 
2604 		}	/* while (iocbq) */
2605 
2606 
2607 		/* Scan the regular queue */
2608 		prev = NULL;
2609 		iocbq = (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2610 
2611 		while (iocbq) {
2612 			next = (IOCBQ *)iocbq->next;
2613 			iocb = &iocbq->iocb;
2614 			sbp = (emlxs_buf_t *)iocbq->sbp;
2615 
2616 			/* Check if this IO is for our lun */
2617 			if (sbp && (sbp->lun == lun)) {
2618 				/* Remove iocb from the node's tx queue */
2619 				if (next == 0) {
2620 					ndlp->nlp_tx[channelno].q_last =
2621 					    (uint8_t *)prev;
2622 				}
2623 
2624 				if (prev == 0) {
2625 					ndlp->nlp_tx[channelno].q_first =
2626 					    (uint8_t *)next;
2627 				} else {
2628 					prev->next = next;
2629 				}
2630 
2631 				iocbq->next = NULL;
2632 				ndlp->nlp_tx[channelno].q_cnt--;
2633 
2634 				/*
2635 				 * Add this iocb to our local abort Q
2636 				 */
2637 				if (abort.q_first) {
2638 					((IOCBQ *) abort.q_last)->next = iocbq;
2639 					abort.q_last = (uint8_t *)iocbq;
2640 					abort.q_cnt++;
2641 				} else {
2642 					abort.q_first = (uint8_t *)iocbq;
2643 					abort.q_last = (uint8_t *)iocbq;
2644 					abort.q_cnt = 1;
2645 				}
2646 				iocbq->next = NULL;
2647 			} else {
2648 				prev = iocbq;
2649 			}
2650 
2651 			iocbq = next;
2652 
2653 		}	/* while (iocbq) */
2654 	}	/* for loop */
2655 
2656 	/* First cleanup the iocb's while still holding the lock */
2657 	iocbq = (IOCBQ *)abort.q_first;
2658 	while (iocbq) {
2659 		/* Free the IoTag and the bmp */
2660 		iocb = &iocbq->iocb;
2661 
2662 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2663 			sbp = iocbq->sbp;
2664 			if (sbp) {
2665 				emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1);
2666 			}
2667 		} else {
2668 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2669 			    iocb->ULPIOTAG, 0);
2670 		}
2671 
2672 		if (sbp && (sbp != STALE_PACKET)) {
2673 			mutex_enter(&sbp->mtx);
2674 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2675 			/*
2676 			 * If the fpkt is already set, then we will leave it
2677 			 * alone. This ensures that this pkt is only accounted
2678 			 * for on one fpkt->flush_count
2679 			 */
2680 			if (!sbp->fpkt && fpkt) {
2681 				mutex_enter(&fpkt->mtx);
2682 				sbp->fpkt = fpkt;
2683 				fpkt->flush_count++;
2684 				mutex_exit(&fpkt->mtx);
2685 			}
2686 
2687 			mutex_exit(&sbp->mtx);
2688 		}
2689 
2690 		iocbq = (IOCBQ *) iocbq->next;
2691 
2692 	}	/* end of while */
2693 
2694 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2695 
2696 	/* Now abort the iocb's outside the locks */
2697 	iocbq = (IOCBQ *)abort.q_first;
2698 	while (iocbq) {
2699 		/* Save the next iocbq for now */
2700 		next = (IOCBQ *)iocbq->next;
2701 
2702 		/* Unlink this iocbq */
2703 		iocbq->next = NULL;
2704 
2705 		/* Get the pkt */
2706 		sbp = (emlxs_buf_t *)iocbq->sbp;
2707 
2708 		if (sbp) {
2709 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2710 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2711 
2712 			if (hba->state >= FC_LINK_UP) {
2713 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2714 				    IOERR_ABORT_REQUESTED, 1);
2715 			} else {
2716 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2717 				    IOERR_LINK_DOWN, 1);
2718 			}
2719 		}
2720 
2721 		/* Free the iocb and its associated buffers */
2722 		else {
2723 			/* Should never happen! */
2724 			icmd = &iocbq->iocb;
2725 
2726 			/* SLI3 */
2727 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2728 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2729 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2730 				if ((hba->flag &
2731 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2732 					/* HBA is detaching or offlining */
2733 					if (icmd->ULPCOMMAND !=
2734 					    CMD_QUE_RING_LIST64_CN) {
2735 						void	*tmp;
2736 						RING *rp;
2737 						int ch;
2738 
2739 						ch = ((CHANNEL *)
2740 						    iocbq->channel)->channelno;
2741 						rp = &hba->sli.sli3.ring[ch];
2742 						for (i = 0;
2743 						    i < icmd->ULPBDECOUNT;
2744 						    i++) {
2745 							mp = EMLXS_GET_VADDR(
2746 							    hba, rp, icmd);
2747 
2748 							tmp = (void *)mp;
2749 							if (mp) {
2750 							emlxs_mem_put(
2751 							    hba, MEM_BUF, tmp);
2752 							}
2753 						}
2754 					}
2755 
2756 					emlxs_mem_put(hba, MEM_IOCB,
2757 					    (void *)iocbq);
2758 				} else {
2759 					/* repost the unsolicited buffer */
2760 					EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2761 					    (CHANNEL *)iocbq->channel, iocbq);
2762 				}
2763 			} else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2764 			    icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2765 				/*
2766 				 * Resend the abort iocbq if any
2767 				 */
2768 				emlxs_tx_put(iocbq, 1);
2769 			}
2770 		}
2771 
2772 		iocbq = next;
2773 
2774 	}	/* end of while */
2775 
2776 	/* Now trigger channel service */
2777 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2778 		if (!flag[channelno]) {
2779 			continue;
2780 		}
2781 
2782 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2783 	}
2784 
2785 	return (abort.q_cnt);
2786 
2787 } /* emlxs_tx_lun_flush() */
2788 
2789 
2790 extern void
2791 emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
2792 {
2793 	emlxs_hba_t *hba;
2794 	emlxs_port_t *port;
2795 	uint32_t channelno;
2796 	NODELIST *nlp;
2797 	CHANNEL *cp;
2798 	emlxs_buf_t *sbp;
2799 
2800 	port = (emlxs_port_t *)iocbq->port;
2801 	hba = HBA;
2802 	cp = (CHANNEL *)iocbq->channel;
2803 	nlp = (NODELIST *)iocbq->node;
2804 	channelno = cp->channelno;
2805 	sbp = (emlxs_buf_t *)iocbq->sbp;
2806 
2807 	/* under what cases, nlp is NULL */
2808 	if (nlp == NULL) {
2809 		/* Set node to base node by default */
2810 		nlp = &port->node_base;
2811 
2812 		iocbq->node = (void *)nlp;
2813 
2814 		if (sbp) {
2815 			sbp->node = (void *)nlp;
2816 		}
2817 	}
2818 
2819 	if (lock) {
2820 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2821 	}
2822 
2823 	if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
2824 		if (sbp) {
2825 			mutex_enter(&sbp->mtx);
2826 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2827 			mutex_exit(&sbp->mtx);
2828 
2829 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2830 				emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1);
2831 			} else {
2832 				(void) emlxs_unregister_pkt(cp, sbp->iotag, 0);
2833 			}
2834 
2835 			if (lock) {
2836 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2837 			}
2838 
2839 			if (hba->state >= FC_LINK_UP) {
2840 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2841 				    IOERR_ABORT_REQUESTED, 1);
2842 			} else {
2843 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2844 				    IOERR_LINK_DOWN, 1);
2845 			}
2846 			return;
2847 		} else {
2848 			if (lock) {
2849 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2850 			}
2851 
2852 			emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
2853 		}
2854 
2855 		return;
2856 	}
2857 
2858 	if (sbp) {
2859 
2860 		mutex_enter(&sbp->mtx);
2861 
2862 		if (sbp->pkt_flags &
2863 		    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ | PACKET_IN_TXQ)) {
2864 			mutex_exit(&sbp->mtx);
2865 			if (lock) {
2866 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2867 			}
2868 			return;
2869 		}
2870 
2871 		sbp->pkt_flags |= PACKET_IN_TXQ;
2872 		hba->channel_tx_count++;
2873 
2874 		mutex_exit(&sbp->mtx);
2875 	}
2876 
2877 
2878 	/* Check iocbq priority */
2879 	/* Some IOCB has the high priority like reset/close xri etc */
2880 	if (iocbq->flag & IOCB_PRIORITY) {
2881 		/* Add the iocb to the bottom of the node's ptx queue */
2882 		if (nlp->nlp_ptx[channelno].q_first) {
2883 			((IOCBQ *)nlp->nlp_ptx[channelno].q_last)->next = iocbq;
2884 			nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
2885 			nlp->nlp_ptx[channelno].q_cnt++;
2886 		} else {
2887 			nlp->nlp_ptx[channelno].q_first = (uint8_t *)iocbq;
2888 			nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
2889 			nlp->nlp_ptx[channelno].q_cnt = 1;
2890 		}
2891 
2892 		iocbq->next = NULL;
2893 	} else {	/* Normal priority */
2894 
2895 
2896 		/* Add the iocb to the bottom of the node's tx queue */
2897 		if (nlp->nlp_tx[channelno].q_first) {
2898 			((IOCBQ *)nlp->nlp_tx[channelno].q_last)->next = iocbq;
2899 			nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
2900 			nlp->nlp_tx[channelno].q_cnt++;
2901 		} else {
2902 			nlp->nlp_tx[channelno].q_first = (uint8_t *)iocbq;
2903 			nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
2904 			nlp->nlp_tx[channelno].q_cnt = 1;
2905 		}
2906 
2907 		iocbq->next = NULL;
2908 	}
2909 
2910 
2911 	/*
2912 	 * Check if the node is not already on channel queue and
2913 	 * (is not closed or  is a priority request)
2914 	 */
2915 	if (!nlp->nlp_next[channelno] &&
2916 	    (!(nlp->nlp_flag[channelno] & NLP_CLOSED) ||
2917 	    (iocbq->flag & IOCB_PRIORITY))) {
2918 		/* If so, then add it to the channel queue */
2919 		if (cp->nodeq.q_first) {
2920 			((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
2921 			    (uint8_t *)nlp;
2922 			nlp->nlp_next[channelno] = cp->nodeq.q_first;
2923 
2924 			/*
2925 			 * If this is not the base node then add it
2926 			 * to the tail
2927 			 */
2928 			if (!nlp->nlp_base) {
2929 				cp->nodeq.q_last = (uint8_t *)nlp;
2930 			} else {	/* Otherwise, add it to the head */
2931 
2932 				/* The command node always gets priority */
2933 				cp->nodeq.q_first = (uint8_t *)nlp;
2934 			}
2935 
2936 			cp->nodeq.q_cnt++;
2937 		} else {
2938 			cp->nodeq.q_first = (uint8_t *)nlp;
2939 			cp->nodeq.q_last = (uint8_t *)nlp;
2940 			nlp->nlp_next[channelno] = nlp;
2941 			cp->nodeq.q_cnt = 1;
2942 		}
2943 	}
2944 
2945 	HBASTATS.IocbTxPut[channelno]++;
2946 
2947 	/* Adjust the channel timeout timer */
2948 	cp->timeout = hba->timer_tics + 5;
2949 
2950 	if (lock) {
2951 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2952 	}
2953 
2954 	return;
2955 
2956 } /* emlxs_tx_put() */
2957 
2958 
2959 extern IOCBQ *
2960 emlxs_tx_get(CHANNEL *cp, uint32_t lock)
2961 {
2962 	emlxs_hba_t *hba;
2963 	uint32_t channelno;
2964 	IOCBQ *iocbq;
2965 	NODELIST *nlp;
2966 	emlxs_buf_t *sbp;
2967 
2968 	hba = cp->hba;
2969 	channelno = cp->channelno;
2970 
2971 	if (lock) {
2972 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2973 	}
2974 
2975 begin:
2976 
2977 	iocbq = NULL;
2978 
2979 	/* Check if a node needs servicing */
2980 	if (cp->nodeq.q_first) {
2981 		nlp = (NODELIST *)cp->nodeq.q_first;
2982 
2983 		/* Get next iocb from node's priority queue */
2984 
2985 		if (nlp->nlp_ptx[channelno].q_first) {
2986 			iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
2987 
2988 			/* Check if this is last entry */
2989 			if (nlp->nlp_ptx[channelno].q_last == (void *)iocbq) {
2990 				nlp->nlp_ptx[channelno].q_first = NULL;
2991 				nlp->nlp_ptx[channelno].q_last = NULL;
2992 				nlp->nlp_ptx[channelno].q_cnt = 0;
2993 			} else {
2994 				/* Remove iocb from head */
2995 				nlp->nlp_ptx[channelno].q_first =
2996 				    (void *)iocbq->next;
2997 				nlp->nlp_ptx[channelno].q_cnt--;
2998 			}
2999 
3000 			iocbq->next = NULL;
3001 		}
3002 
3003 		/* Get next iocb from node tx queue if node not closed */
3004 		else if (nlp->nlp_tx[channelno].q_first &&
3005 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED)) {
3006 			iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
3007 
3008 			/* Check if this is last entry */
3009 			if (nlp->nlp_tx[channelno].q_last == (void *)iocbq) {
3010 				nlp->nlp_tx[channelno].q_first = NULL;
3011 				nlp->nlp_tx[channelno].q_last = NULL;
3012 				nlp->nlp_tx[channelno].q_cnt = 0;
3013 			} else {
3014 				/* Remove iocb from head */
3015 				nlp->nlp_tx[channelno].q_first =
3016 				    (void *)iocbq->next;
3017 				nlp->nlp_tx[channelno].q_cnt--;
3018 			}
3019 
3020 			iocbq->next = NULL;
3021 		}
3022 
3023 		/* Now deal with node itself */
3024 
3025 		/* Check if node still needs servicing */
3026 		if ((nlp->nlp_ptx[channelno].q_first) ||
3027 		    (nlp->nlp_tx[channelno].q_first &&
3028 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3029 
3030 			/*
3031 			 * If this is the base node, then don't shift the
3032 			 * pointers. We want to drain the base node before
3033 			 * moving on
3034 			 */
3035 			if (!nlp->nlp_base) {
3036 				/*
3037 				 * Just shift channel queue pointers to next
3038 				 * node
3039 				 */
3040 				cp->nodeq.q_last = (void *)nlp;
3041 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3042 			}
3043 		} else {
3044 			/* Remove node from channel queue */
3045 
3046 			/* If this is the last node on list */
3047 			if (cp->nodeq.q_last == (void *)nlp) {
3048 				cp->nodeq.q_last = NULL;
3049 				cp->nodeq.q_first = NULL;
3050 				cp->nodeq.q_cnt = 0;
3051 			} else {
3052 				/* Remove node from head */
3053 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3054 				((NODELIST *)cp->nodeq.q_last)->
3055 				    nlp_next[channelno] = cp->nodeq.q_first;
3056 				cp->nodeq.q_cnt--;
3057 
3058 			}
3059 
3060 			/* Clear node */
3061 			nlp->nlp_next[channelno] = NULL;
3062 		}
3063 
3064 		/*
3065 		 * If no iocbq was found on this node, then it will have
3066 		 * been removed. So try again.
3067 		 */
3068 		if (!iocbq) {
3069 			goto begin;
3070 		}
3071 
3072 		sbp = (emlxs_buf_t *)iocbq->sbp;
3073 
3074 		if (sbp) {
3075 			/*
3076 			 * Check flags before we enter mutex in case this
3077 			 * has been flushed and destroyed
3078 			 */
3079 			if ((sbp->pkt_flags &
3080 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3081 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3082 				goto begin;
3083 			}
3084 
3085 			mutex_enter(&sbp->mtx);
3086 
3087 			if ((sbp->pkt_flags &
3088 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3089 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3090 				mutex_exit(&sbp->mtx);
3091 				goto begin;
3092 			}
3093 
3094 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
3095 			hba->channel_tx_count--;
3096 
3097 			mutex_exit(&sbp->mtx);
3098 		}
3099 	}
3100 
3101 	if (iocbq) {
3102 		HBASTATS.IocbTxGet[channelno]++;
3103 	}
3104 
3105 	/* Adjust the ring timeout timer */
3106 	cp->timeout = (cp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
3107 
3108 	if (lock) {
3109 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3110 	}
3111 
3112 	return (iocbq);
3113 
3114 } /* emlxs_tx_get() */
3115 
3116 
3117 /*
3118  * Remove all cmd from from_rp's txq to to_rp's txq for ndlp.
3119  * The old IoTag has to be released, the new one has to be
3120  * allocated.  Others no change
3121  * TX_CHANNEL lock is held
3122  */
3123 extern void
3124 emlxs_tx_move(NODELIST *ndlp, CHANNEL *from_chan, CHANNEL *to_chan,
3125     uint32_t cmd, emlxs_buf_t *fpkt, uint32_t lock)
3126 {
3127 	emlxs_hba_t *hba;
3128 	emlxs_port_t *port;
3129 	uint32_t fchanno, tchanno, i;
3130 
3131 	IOCBQ *iocbq;
3132 	IOCBQ *prev;
3133 	IOCBQ *next;
3134 	IOCB *iocb, *icmd;
3135 	Q tbm;		/* To Be Moved Q */
3136 	MATCHMAP *mp;
3137 
3138 	NODELIST *nlp = ndlp;
3139 	emlxs_buf_t *sbp;
3140 
3141 	NODELIST *n_prev = NULL;
3142 	NODELIST *n_next = NULL;
3143 	uint16_t count = 0;
3144 
3145 	hba = from_chan->hba;
3146 	port = &PPORT;
3147 	cmd = cmd; /* To pass lint */
3148 
3149 	fchanno = from_chan->channelno;
3150 	tchanno = to_chan->channelno;
3151 
3152 	if (lock) {
3153 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3154 	}
3155 
3156 	bzero((void *)&tbm, sizeof (Q));
3157 
3158 	/* Scan the ndlp's fchanno txq to get the iocb of fcp cmd */
3159 	prev = NULL;
3160 	iocbq = (IOCBQ *)nlp->nlp_tx[fchanno].q_first;
3161 
3162 	while (iocbq) {
3163 		next = (IOCBQ *)iocbq->next;
3164 		/* Check if this iocb is fcp cmd */
3165 		iocb = &iocbq->iocb;
3166 
3167 		switch (iocb->ULPCOMMAND) {
3168 		/* FCP commands */
3169 		case CMD_FCP_ICMND_CR:
3170 		case CMD_FCP_ICMND_CX:
3171 		case CMD_FCP_IREAD_CR:
3172 		case CMD_FCP_IREAD_CX:
3173 		case CMD_FCP_IWRITE_CR:
3174 		case CMD_FCP_IWRITE_CX:
3175 		case CMD_FCP_ICMND64_CR:
3176 		case CMD_FCP_ICMND64_CX:
3177 		case CMD_FCP_IREAD64_CR:
3178 		case CMD_FCP_IREAD64_CX:
3179 		case CMD_FCP_IWRITE64_CR:
3180 		case CMD_FCP_IWRITE64_CX:
3181 			/* We found a fcp cmd */
3182 			break;
3183 		default:
3184 			/* this is not fcp cmd continue */
3185 			prev = iocbq;
3186 			iocbq = next;
3187 			continue;
3188 		}
3189 
3190 		/* found a fcp cmd iocb in fchanno txq, now deque it */
3191 		if (next == NULL) {
3192 			/* This is the last iocbq */
3193 			nlp->nlp_tx[fchanno].q_last =
3194 			    (uint8_t *)prev;
3195 		}
3196 
3197 		if (prev == NULL) {
3198 			/* This is the first one then remove it from head */
3199 			nlp->nlp_tx[fchanno].q_first =
3200 			    (uint8_t *)next;
3201 		} else {
3202 			prev->next = next;
3203 		}
3204 
3205 		iocbq->next = NULL;
3206 		nlp->nlp_tx[fchanno].q_cnt--;
3207 
3208 		/* Add this iocb to our local toberemovedq */
3209 		/* This way we donot hold the TX_CHANNEL lock too long */
3210 
3211 		if (tbm.q_first) {
3212 			((IOCBQ *)tbm.q_last)->next = iocbq;
3213 			tbm.q_last = (uint8_t *)iocbq;
3214 			tbm.q_cnt++;
3215 		} else {
3216 			tbm.q_first = (uint8_t *)iocbq;
3217 			tbm.q_last = (uint8_t *)iocbq;
3218 			tbm.q_cnt = 1;
3219 		}
3220 
3221 		iocbq = next;
3222 
3223 	}	/* While (iocbq) */
3224 
3225 	if ((tchanno == hba->channel_fcp) && (tbm.q_cnt != 0)) {
3226 
3227 		/* from_chan->nodeq.q_first must be non NULL */
3228 		if (from_chan->nodeq.q_first) {
3229 
3230 			/* nodeq is not empty, now deal with the node itself */
3231 			if ((nlp->nlp_tx[fchanno].q_first)) {
3232 
3233 				if (!nlp->nlp_base) {
3234 					from_chan->nodeq.q_last =
3235 					    (void *)nlp;
3236 					from_chan->nodeq.q_first =
3237 					    nlp->nlp_next[fchanno];
3238 				}
3239 
3240 			} else {
3241 				n_prev = (NODELIST *)from_chan->nodeq.q_first;
3242 				count = from_chan->nodeq.q_cnt;
3243 
3244 				if (n_prev == nlp) {
3245 
3246 					/* If this is the only node on list */
3247 					if (from_chan->nodeq.q_last ==
3248 					    (void *)nlp) {
3249 						from_chan->nodeq.q_last =
3250 						    NULL;
3251 						from_chan->nodeq.q_first =
3252 						    NULL;
3253 						from_chan->nodeq.q_cnt = 0;
3254 					} else {
3255 						from_chan->nodeq.q_first =
3256 						    nlp->nlp_next[fchanno];
3257 						((NODELIST *)from_chan->
3258 						    nodeq.q_last)->
3259 						    nlp_next[fchanno] =
3260 						    from_chan->nodeq.q_first;
3261 						from_chan->nodeq.q_cnt--;
3262 					}
3263 					/* Clear node */
3264 					nlp->nlp_next[fchanno] = NULL;
3265 				} else {
3266 					count--;
3267 					do {
3268 						n_next =
3269 						    n_prev->nlp_next[fchanno];
3270 						if (n_next == nlp) {
3271 							break;
3272 						}
3273 						n_prev = n_next;
3274 					} while (count--);
3275 
3276 					if (count != 0) {
3277 
3278 						if (n_next ==
3279 						    (NODELIST *)from_chan->
3280 						    nodeq.q_last) {
3281 							n_prev->
3282 							    nlp_next[fchanno]
3283 							    =
3284 							    ((NODELIST *)
3285 							    from_chan->
3286 							    nodeq.q_last)->
3287 							    nlp_next
3288 							    [fchanno];
3289 							from_chan->nodeq.q_last
3290 							    = (uint8_t *)n_prev;
3291 						} else {
3292 
3293 							n_prev->
3294 							    nlp_next[fchanno]
3295 							    =
3296 							    n_next-> nlp_next
3297 							    [fchanno];
3298 						}
3299 						from_chan->nodeq.q_cnt--;
3300 						/* Clear node */
3301 						nlp->nlp_next[fchanno] =
3302 						    NULL;
3303 					}
3304 				}
3305 			}
3306 		}
3307 	}
3308 
3309 	/* Now cleanup the iocb's */
3310 	prev = NULL;
3311 	iocbq = (IOCBQ *)tbm.q_first;
3312 
3313 	while (iocbq) {
3314 
3315 		next = (IOCBQ *)iocbq->next;
3316 
3317 		/* Free the IoTag and the bmp */
3318 		iocb = &iocbq->iocb;
3319 
3320 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3321 			sbp = iocbq->sbp;
3322 			if (sbp) {
3323 				emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 1);
3324 			}
3325 		} else {
3326 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
3327 			    iocb->ULPIOTAG, 0);
3328 		}
3329 
3330 		if (sbp && (sbp != STALE_PACKET)) {
3331 			mutex_enter(&sbp->mtx);
3332 			sbp->pkt_flags |= PACKET_IN_FLUSH;
3333 
3334 			/*
3335 			 * If the fpkt is already set, then we will leave it
3336 			 * alone. This ensures that this pkt is only accounted
3337 			 * for on one fpkt->flush_count
3338 			 */
3339 			if (!sbp->fpkt && fpkt) {
3340 				mutex_enter(&fpkt->mtx);
3341 				sbp->fpkt = fpkt;
3342 				fpkt->flush_count++;
3343 				mutex_exit(&fpkt->mtx);
3344 			}
3345 			mutex_exit(&sbp->mtx);
3346 		}
3347 		iocbq = next;
3348 
3349 	}	/* end of while */
3350 
3351 	iocbq = (IOCBQ *)tbm.q_first;
3352 	while (iocbq) {
3353 		/* Save the next iocbq for now */
3354 		next = (IOCBQ *)iocbq->next;
3355 
3356 		/* Unlink this iocbq */
3357 		iocbq->next = NULL;
3358 
3359 		/* Get the pkt */
3360 		sbp = (emlxs_buf_t *)iocbq->sbp;
3361 
3362 		if (sbp) {
3363 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3364 			"tx: sbp=%p node=%p", sbp, sbp->node);
3365 
3366 			if (hba->state >= FC_LINK_UP) {
3367 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3368 				    IOERR_ABORT_REQUESTED, 1);
3369 			} else {
3370 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3371 				    IOERR_LINK_DOWN, 1);
3372 			}
3373 
3374 		}
3375 		/* Free the iocb and its associated buffers */
3376 		else {
3377 			icmd = &iocbq->iocb;
3378 
3379 			/* SLI3 */
3380 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
3381 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
3382 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
3383 				if ((hba->flag &
3384 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
3385 					/* HBA is detaching or offlining */
3386 					if (icmd->ULPCOMMAND !=
3387 					    CMD_QUE_RING_LIST64_CN) {
3388 						void *tmp;
3389 						RING *rp;
3390 						int ch;
3391 
3392 						ch = from_chan->channelno;
3393 						rp = &hba->sli.sli3.ring[ch];
3394 
3395 						for (i = 0;
3396 						    i < icmd->ULPBDECOUNT;
3397 						    i++) {
3398 							mp = EMLXS_GET_VADDR(
3399 							    hba, rp, icmd);
3400 
3401 							tmp = (void *)mp;
3402 							if (mp) {
3403 							emlxs_mem_put(
3404 							    hba,
3405 							    MEM_BUF,
3406 							    tmp);
3407 							}
3408 						}
3409 
3410 					}
3411 
3412 					emlxs_mem_put(hba, MEM_IOCB,
3413 					    (void *)iocbq);
3414 				} else {
3415 					/* repost the unsolicited buffer */
3416 					EMLXS_SLI_ISSUE_IOCB_CMD(hba,
3417 					    from_chan, iocbq);
3418 				}
3419 			}
3420 		}
3421 
3422 		iocbq = next;
3423 
3424 	}	/* end of while */
3425 
3426 	/* Now flush the chipq if any */
3427 	if (!(nlp->nlp_flag[fchanno] & NLP_CLOSED)) {
3428 
3429 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3430 
3431 		(void) emlxs_chipq_node_flush(port, from_chan, nlp, 0);
3432 
3433 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3434 	}
3435 
3436 	if (lock) {
3437 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3438 	}
3439 
3440 	return;
3441 
3442 } /* emlxs_tx_move */
3443 
3444 
3445 extern uint32_t
3446 emlxs_chipq_node_flush(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp,
3447     emlxs_buf_t *fpkt)
3448 {
3449 	emlxs_hba_t *hba = HBA;
3450 	emlxs_buf_t *sbp;
3451 	IOCBQ *iocbq;
3452 	IOCBQ *next;
3453 	Q abort;
3454 	CHANNEL *cp;
3455 	uint32_t channelno;
3456 	uint8_t flag[MAX_CHANNEL];
3457 	uint32_t iotag;
3458 
3459 	bzero((void *)&abort, sizeof (Q));
3460 	bzero((void *)flag, sizeof (flag));
3461 
3462 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3463 		cp = &hba->chan[channelno];
3464 
3465 		if (chan && cp != chan) {
3466 			continue;
3467 		}
3468 
3469 		mutex_enter(&EMLXS_FCTAB_LOCK);
3470 
3471 		for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3472 			sbp = hba->fc_table[iotag];
3473 
3474 			if (sbp && (sbp != STALE_PACKET) &&
3475 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3476 			    (sbp->node == ndlp) &&
3477 			    (sbp->channel == cp) &&
3478 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3479 				emlxs_sbp_abort_add(port, sbp, &abort, flag,
3480 				    fpkt);
3481 			}
3482 
3483 		}
3484 		mutex_exit(&EMLXS_FCTAB_LOCK);
3485 
3486 	}	/* for */
3487 
3488 	/* Now put the iocb's on the tx queue */
3489 	iocbq = (IOCBQ *)abort.q_first;
3490 	while (iocbq) {
3491 		/* Save the next iocbq for now */
3492 		next = (IOCBQ *)iocbq->next;
3493 
3494 		/* Unlink this iocbq */
3495 		iocbq->next = NULL;
3496 
3497 		/* Send this iocbq */
3498 		emlxs_tx_put(iocbq, 1);
3499 
3500 		iocbq = next;
3501 	}
3502 
3503 	/* Now trigger channel service */
3504 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3505 		if (!flag[channelno]) {
3506 			continue;
3507 		}
3508 
3509 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3510 	}
3511 
3512 	return (abort.q_cnt);
3513 
3514 } /* emlxs_chipq_node_flush() */
3515 
3516 
3517 /* Flush all IO's left on all iotag lists */
3518 extern uint32_t
3519 emlxs_iotag_flush(emlxs_hba_t *hba)
3520 {
3521 	emlxs_port_t *port = &PPORT;
3522 	emlxs_buf_t *sbp;
3523 	IOCBQ *iocbq;
3524 	IOCB *iocb;
3525 	Q abort;
3526 	CHANNEL *cp;
3527 	uint32_t channelno;
3528 	uint32_t iotag;
3529 	uint32_t count;
3530 
3531 	count = 0;
3532 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3533 		cp = &hba->chan[channelno];
3534 
3535 		bzero((void *)&abort, sizeof (Q));
3536 
3537 		mutex_enter(&EMLXS_FCTAB_LOCK);
3538 
3539 		for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3540 			sbp = hba->fc_table[iotag];
3541 
3542 			/* Check if the slot is empty */
3543 			if (!sbp || (sbp == STALE_PACKET)) {
3544 				continue;
3545 			}
3546 
3547 			/* We are building an abort list per channel */
3548 			if (sbp->channel != cp) {
3549 				continue;
3550 			}
3551 
3552 			hba->fc_table[iotag] = STALE_PACKET;
3553 			hba->io_count--;
3554 
3555 			/* Check if IO is valid */
3556 			if (!(sbp->pkt_flags & PACKET_VALID) ||
3557 			    (sbp->pkt_flags & (PACKET_ULP_OWNED|
3558 			    PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
3559 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3560 				    "iotag_flush: Invalid IO found. iotag=%x",
3561 				    iotag);
3562 
3563 				continue;
3564 			}
3565 
3566 			sbp->iotag = 0;
3567 
3568 			/* Set IOCB status */
3569 			iocbq = &sbp->iocbq;
3570 			iocb = &iocbq->iocb;
3571 
3572 			iocb->ULPSTATUS = IOSTAT_LOCAL_REJECT;
3573 			iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
3574 			iocb->ULPLE = 1;
3575 			iocbq->next = NULL;
3576 
3577 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3578 				if (sbp->xrip) {
3579 					EMLXS_MSGF(EMLXS_CONTEXT,
3580 					    &emlxs_sli_debug_msg,
3581 					    "iotag_flush: iotag=%x sbp=%p "
3582 					    "xrip=%p state=%x flag=%x",
3583 					    iotag, sbp, sbp->xrip,
3584 					    sbp->xrip->state, sbp->xrip->flag);
3585 				} else {
3586 					EMLXS_MSGF(EMLXS_CONTEXT,
3587 					    &emlxs_sli_debug_msg,
3588 					    "iotag_flush: iotag=%x sbp=%p "
3589 					    "xrip=NULL",
3590 					    iotag, sbp);
3591 				}
3592 
3593 				emlxs_sli4_free_xri(hba, sbp, sbp->xrip, 0);
3594 			} else {
3595 				/* Clean up the sbp */
3596 				mutex_enter(&sbp->mtx);
3597 
3598 				if (sbp->pkt_flags & PACKET_IN_TXQ) {
3599 					sbp->pkt_flags &= ~PACKET_IN_TXQ;
3600 					hba->channel_tx_count --;
3601 				}
3602 
3603 				if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
3604 					sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3605 				}
3606 
3607 				if (sbp->bmp) {
3608 					emlxs_mem_put(hba, MEM_BPL,
3609 					    (void *)sbp->bmp);
3610 					sbp->bmp = 0;
3611 				}
3612 
3613 				mutex_exit(&sbp->mtx);
3614 			}
3615 
3616 			/* At this point all nodes are assumed destroyed */
3617 			mutex_enter(&sbp->mtx);
3618 			sbp->node = 0;
3619 			mutex_exit(&sbp->mtx);
3620 
3621 			/* Add this iocb to our local abort Q */
3622 			if (abort.q_first) {
3623 				((IOCBQ *)abort.q_last)->next = iocbq;
3624 				abort.q_last = (uint8_t *)iocbq;
3625 				abort.q_cnt++;
3626 			} else {
3627 				abort.q_first = (uint8_t *)iocbq;
3628 				abort.q_last = (uint8_t *)iocbq;
3629 				abort.q_cnt = 1;
3630 			}
3631 		}
3632 
3633 		mutex_exit(&EMLXS_FCTAB_LOCK);
3634 
3635 		/* Trigger deferred completion */
3636 		if (abort.q_first) {
3637 			mutex_enter(&cp->rsp_lock);
3638 			if (cp->rsp_head == NULL) {
3639 				cp->rsp_head = (IOCBQ *)abort.q_first;
3640 				cp->rsp_tail = (IOCBQ *)abort.q_last;
3641 			} else {
3642 				cp->rsp_tail->next = (IOCBQ *)abort.q_first;
3643 				cp->rsp_tail = (IOCBQ *)abort.q_last;
3644 			}
3645 			mutex_exit(&cp->rsp_lock);
3646 
3647 			emlxs_thread_trigger2(&cp->intr_thread,
3648 			    emlxs_proc_channel, cp);
3649 
3650 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3651 			    "iotag_flush: channel=%d count=%d",
3652 			    channelno, abort.q_cnt);
3653 
3654 			count += abort.q_cnt;
3655 		}
3656 	}
3657 
3658 	return (count);
3659 
3660 } /* emlxs_iotag_flush() */
3661 
3662 
3663 
3664 /* Checks for IO's on all or a given channel for a given node */
3665 extern uint32_t
3666 emlxs_chipq_node_check(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp)
3667 {
3668 	emlxs_hba_t *hba = HBA;
3669 	emlxs_buf_t *sbp;
3670 	CHANNEL *cp;
3671 	uint32_t channelno;
3672 	uint32_t count;
3673 	uint32_t iotag;
3674 
3675 	count = 0;
3676 
3677 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3678 		cp = &hba->chan[channelno];
3679 
3680 		if (chan && cp != chan) {
3681 			continue;
3682 		}
3683 
3684 		mutex_enter(&EMLXS_FCTAB_LOCK);
3685 
3686 		for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3687 			sbp = hba->fc_table[iotag];
3688 
3689 			if (sbp && (sbp != STALE_PACKET) &&
3690 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3691 			    (sbp->node == ndlp) &&
3692 			    (sbp->channel == cp) &&
3693 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3694 				count++;
3695 			}
3696 
3697 		}
3698 		mutex_exit(&EMLXS_FCTAB_LOCK);
3699 
3700 	}	/* for */
3701 
3702 	return (count);
3703 
3704 } /* emlxs_chipq_node_check() */
3705 
3706 
3707 
3708 /* Flush all IO's for a given node's lun (on any channel) */
3709 extern uint32_t
3710 emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp,
3711     uint32_t lun, emlxs_buf_t *fpkt)
3712 {
3713 	emlxs_hba_t *hba = HBA;
3714 	emlxs_buf_t *sbp;
3715 	IOCBQ *iocbq;
3716 	IOCBQ *next;
3717 	Q abort;
3718 	uint32_t iotag;
3719 	uint8_t flag[MAX_CHANNEL];
3720 	uint32_t channelno;
3721 
3722 	if (lun == EMLXS_LUN_NONE) {
3723 		return (0);
3724 	}
3725 
3726 	bzero((void *)flag, sizeof (flag));
3727 	bzero((void *)&abort, sizeof (Q));
3728 
3729 	mutex_enter(&EMLXS_FCTAB_LOCK);
3730 	for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3731 		sbp = hba->fc_table[iotag];
3732 
3733 		if (sbp && (sbp != STALE_PACKET) &&
3734 		    sbp->pkt_flags & PACKET_IN_CHIPQ &&
3735 		    sbp->node == ndlp &&
3736 		    sbp->lun == lun &&
3737 		    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3738 			emlxs_sbp_abort_add(port, sbp,
3739 			    &abort, flag, fpkt);
3740 		}
3741 	}
3742 	mutex_exit(&EMLXS_FCTAB_LOCK);
3743 
3744 	/* Now put the iocb's on the tx queue */
3745 	iocbq = (IOCBQ *)abort.q_first;
3746 	while (iocbq) {
3747 		/* Save the next iocbq for now */
3748 		next = (IOCBQ *)iocbq->next;
3749 
3750 		/* Unlink this iocbq */
3751 		iocbq->next = NULL;
3752 
3753 		/* Send this iocbq */
3754 		emlxs_tx_put(iocbq, 1);
3755 
3756 		iocbq = next;
3757 	}
3758 
3759 	/* Now trigger channel service */
3760 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3761 		if (!flag[channelno]) {
3762 			continue;
3763 		}
3764 
3765 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3766 	}
3767 
3768 	return (abort.q_cnt);
3769 
3770 } /* emlxs_chipq_lun_flush() */
3771 
3772 
3773 
3774 /*
3775  * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
3776  * This must be called while holding the EMLXS_FCTAB_LOCK
3777  */
3778 extern IOCBQ *
3779 emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3780     uint16_t iotag, CHANNEL *cp, uint8_t class, int32_t flag)
3781 {
3782 	emlxs_hba_t *hba = HBA;
3783 	IOCBQ *iocbq;
3784 	IOCB *iocb;
3785 	emlxs_wqe_t *wqe;
3786 	emlxs_buf_t *sbp;
3787 	uint16_t abort_iotag;
3788 
3789 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3790 		return (NULL);
3791 	}
3792 
3793 	iocbq->channel = (void *)cp;
3794 	iocbq->port = (void *)port;
3795 	iocbq->node = (void *)ndlp;
3796 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3797 
3798 	/*
3799 	 * set up an iotag using special Abort iotags
3800 	 */
3801 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3802 		hba->fc_oor_iotag = hba->max_iotag;
3803 	}
3804 	abort_iotag = hba->fc_oor_iotag++;
3805 
3806 
3807 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3808 		wqe = &iocbq->wqe;
3809 		sbp = hba->fc_table[iotag];
3810 
3811 		/* Try to issue abort by XRI if possible */
3812 		if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
3813 			wqe->un.Abort.Criteria = ABORT_REQ_TAG;
3814 			wqe->AbortTag = iotag;
3815 		} else {
3816 			wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3817 			wqe->AbortTag = sbp->xrip->XRI;
3818 		}
3819 		wqe->un.Abort.IA = 0;
3820 		wqe->RequestTag = abort_iotag;
3821 		wqe->Command = CMD_ABORT_XRI_CX;
3822 		wqe->Class = CLASS3;
3823 		wqe->CQId = 0x3ff;
3824 		wqe->CmdType = WQE_TYPE_ABORT;
3825 	} else {
3826 		iocb = &iocbq->iocb;
3827 		iocb->ULPIOTAG = abort_iotag;
3828 		iocb->un.acxri.abortType = flag;
3829 		iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3830 		iocb->un.acxri.abortIoTag = iotag;
3831 		iocb->ULPLE = 1;
3832 		iocb->ULPCLASS = class;
3833 		iocb->ULPCOMMAND = CMD_ABORT_XRI_CN;
3834 		iocb->ULPOWNER = OWN_CHIP;
3835 	}
3836 
3837 	return (iocbq);
3838 
3839 } /* emlxs_create_abort_xri_cn() */
3840 
3841 
3842 /* This must be called while holding the EMLXS_FCTAB_LOCK */
3843 extern IOCBQ *
3844 emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3845     CHANNEL *cp, uint8_t class, int32_t flag)
3846 {
3847 	emlxs_hba_t *hba = HBA;
3848 	IOCBQ *iocbq;
3849 	IOCB *iocb;
3850 	emlxs_wqe_t *wqe;
3851 	uint16_t abort_iotag;
3852 
3853 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3854 		return (NULL);
3855 	}
3856 
3857 	iocbq->channel = (void *)cp;
3858 	iocbq->port = (void *)port;
3859 	iocbq->node = (void *)ndlp;
3860 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3861 
3862 	/*
3863 	 * set up an iotag using special Abort iotags
3864 	 */
3865 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3866 		hba->fc_oor_iotag = hba->max_iotag;
3867 	}
3868 	abort_iotag = hba->fc_oor_iotag++;
3869 
3870 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3871 		wqe = &iocbq->wqe;
3872 		wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3873 		wqe->un.Abort.IA = 0;
3874 		wqe->RequestTag = abort_iotag;
3875 		wqe->AbortTag = xid;
3876 		wqe->Command = CMD_ABORT_XRI_CX;
3877 		wqe->Class = CLASS3;
3878 		wqe->CQId = 0x3ff;
3879 		wqe->CmdType = WQE_TYPE_ABORT;
3880 	} else {
3881 		iocb = &iocbq->iocb;
3882 		iocb->ULPCONTEXT = xid;
3883 		iocb->ULPIOTAG = abort_iotag;
3884 		iocb->un.acxri.abortType = flag;
3885 		iocb->ULPLE = 1;
3886 		iocb->ULPCLASS = class;
3887 		iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
3888 		iocb->ULPOWNER = OWN_CHIP;
3889 	}
3890 
3891 	return (iocbq);
3892 
3893 } /* emlxs_create_abort_xri_cx() */
3894 
3895 
3896 
3897 /* This must be called while holding the EMLXS_FCTAB_LOCK */
3898 extern IOCBQ *
3899 emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3900     uint16_t iotag, CHANNEL *cp)
3901 {
3902 	emlxs_hba_t *hba = HBA;
3903 	IOCBQ *iocbq;
3904 	IOCB *iocb;
3905 	emlxs_wqe_t *wqe;
3906 	emlxs_buf_t *sbp;
3907 	uint16_t abort_iotag;
3908 
3909 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3910 		return (NULL);
3911 	}
3912 
3913 	iocbq->channel = (void *)cp;
3914 	iocbq->port = (void *)port;
3915 	iocbq->node = (void *)ndlp;
3916 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3917 
3918 	/*
3919 	 * set up an iotag using special Abort iotags
3920 	 */
3921 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3922 		hba->fc_oor_iotag = hba->max_iotag;
3923 	}
3924 	abort_iotag = hba->fc_oor_iotag++;
3925 
3926 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3927 		wqe = &iocbq->wqe;
3928 		sbp = hba->fc_table[iotag];
3929 
3930 		/* Try to issue close by XRI if possible */
3931 		if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
3932 			wqe->un.Abort.Criteria = ABORT_REQ_TAG;
3933 			wqe->AbortTag = iotag;
3934 		} else {
3935 			wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3936 			wqe->AbortTag = sbp->xrip->XRI;
3937 		}
3938 		wqe->un.Abort.IA = 1;
3939 		wqe->RequestTag = abort_iotag;
3940 		wqe->Command = CMD_ABORT_XRI_CX;
3941 		wqe->Class = CLASS3;
3942 		wqe->CQId = 0x3ff;
3943 		wqe->CmdType = WQE_TYPE_ABORT;
3944 	} else {
3945 		iocb = &iocbq->iocb;
3946 		iocb->ULPIOTAG = abort_iotag;
3947 		iocb->un.acxri.abortType = 0;
3948 		iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3949 		iocb->un.acxri.abortIoTag = iotag;
3950 		iocb->ULPLE = 1;
3951 		iocb->ULPCLASS = 0;
3952 		iocb->ULPCOMMAND = CMD_CLOSE_XRI_CN;
3953 		iocb->ULPOWNER = OWN_CHIP;
3954 	}
3955 
3956 	return (iocbq);
3957 
3958 } /* emlxs_create_close_xri_cn() */
3959 
3960 
3961 /* This must be called while holding the EMLXS_FCTAB_LOCK */
3962 extern IOCBQ *
3963 emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3964     CHANNEL *cp)
3965 {
3966 	emlxs_hba_t *hba = HBA;
3967 	IOCBQ *iocbq;
3968 	IOCB *iocb;
3969 	emlxs_wqe_t *wqe;
3970 	uint16_t abort_iotag;
3971 
3972 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3973 		return (NULL);
3974 	}
3975 
3976 	iocbq->channel = (void *)cp;
3977 	iocbq->port = (void *)port;
3978 	iocbq->node = (void *)ndlp;
3979 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3980 
3981 	/*
3982 	 * set up an iotag using special Abort iotags
3983 	 */
3984 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3985 		hba->fc_oor_iotag = hba->max_iotag;
3986 	}
3987 	abort_iotag = hba->fc_oor_iotag++;
3988 
3989 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3990 		wqe = &iocbq->wqe;
3991 		wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3992 		wqe->un.Abort.IA = 1;
3993 		wqe->RequestTag = abort_iotag;
3994 		wqe->AbortTag = xid;
3995 		wqe->Command = CMD_ABORT_XRI_CX;
3996 		wqe->Class = CLASS3;
3997 		wqe->CQId = 0x3ff;
3998 		wqe->CmdType = WQE_TYPE_ABORT;
3999 	} else {
4000 		iocb = &iocbq->iocb;
4001 		iocb->ULPCONTEXT = xid;
4002 		iocb->ULPIOTAG = abort_iotag;
4003 		iocb->ULPLE = 1;
4004 		iocb->ULPCLASS = 0;
4005 		iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
4006 		iocb->ULPOWNER = OWN_CHIP;
4007 	}
4008 
4009 	return (iocbq);
4010 
4011 } /* emlxs_create_close_xri_cx() */
4012 
4013 
4014 #ifdef SFCT_SUPPORT
4015 void
4016 emlxs_abort_fct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4017 {
4018 	CHANNEL *cp;
4019 	IOCBQ *iocbq;
4020 	IOCB *iocb;
4021 
4022 	if (rxid == 0 || rxid == 0xFFFF) {
4023 		return;
4024 	}
4025 
4026 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4027 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_detail_msg,
4028 		    "Aborting FCT exchange: xid=%x", rxid);
4029 
4030 		if (emlxs_sli4_unreserve_xri(hba, rxid, 1) == 0) {
4031 			/* We have no way to abort unsolicited exchanges */
4032 			/* that we have not responded to at this time */
4033 			/* So we will return for now */
4034 			return;
4035 		}
4036 	}
4037 
4038 	cp = &hba->chan[hba->channel_fcp];
4039 
4040 	mutex_enter(&EMLXS_FCTAB_LOCK);
4041 
4042 	/* Create the abort IOCB */
4043 	if (hba->state >= FC_LINK_UP) {
4044 		iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4045 		    CLASS3, ABORT_TYPE_ABTS);
4046 	} else {
4047 		iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4048 	}
4049 
4050 	mutex_exit(&EMLXS_FCTAB_LOCK);
4051 
4052 	if (iocbq) {
4053 		iocb = &iocbq->iocb;
4054 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_detail_msg,
4055 		    "Aborting FCT exchange: xid=%x iotag=%x", rxid,
4056 		    iocb->ULPIOTAG);
4057 
4058 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4059 	}
4060 
4061 } /* emlxs_abort_fct_exchange() */
4062 #endif /* SFCT_SUPPORT */
4063 
4064 
4065 void
4066 emlxs_close_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4067 {
4068 	CHANNEL *cp;
4069 	IOCBQ *iocbq;
4070 	IOCB *iocb;
4071 
4072 	if (rxid == 0 || rxid == 0xFFFF) {
4073 		return;
4074 	}
4075 
4076 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4077 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4078 		    "Closing ELS exchange: xid=%x", rxid);
4079 
4080 		if (emlxs_sli4_unreserve_xri(hba, rxid, 1) == 0) {
4081 			return;
4082 		}
4083 	}
4084 
4085 	cp = &hba->chan[hba->channel_els];
4086 
4087 	mutex_enter(&EMLXS_FCTAB_LOCK);
4088 
4089 	/* Create the abort IOCB */
4090 	iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4091 
4092 	mutex_exit(&EMLXS_FCTAB_LOCK);
4093 
4094 	if (iocbq) {
4095 		iocb = &iocbq->iocb;
4096 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4097 		    "Closing ELS exchange: xid=%x iotag=%x", rxid,
4098 		    iocb->ULPIOTAG);
4099 
4100 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4101 	}
4102 
4103 } /* emlxs_close_els_exchange() */
4104 
4105 
4106 void
4107 emlxs_abort_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4108 {
4109 	CHANNEL *cp;
4110 	IOCBQ *iocbq;
4111 	IOCB *iocb;
4112 
4113 	if (rxid == 0 || rxid == 0xFFFF) {
4114 		return;
4115 	}
4116 
4117 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4118 
4119 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4120 		    "Aborting ELS exchange: xid=%x", rxid);
4121 
4122 		if (emlxs_sli4_unreserve_xri(hba, rxid, 1) == 0) {
4123 			/* We have no way to abort unsolicited exchanges */
4124 			/* that we have not responded to at this time */
4125 			/* So we will return for now */
4126 			return;
4127 		}
4128 	}
4129 
4130 	cp = &hba->chan[hba->channel_els];
4131 
4132 	mutex_enter(&EMLXS_FCTAB_LOCK);
4133 
4134 	/* Create the abort IOCB */
4135 	if (hba->state >= FC_LINK_UP) {
4136 		iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4137 		    CLASS3, ABORT_TYPE_ABTS);
4138 	} else {
4139 		iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4140 	}
4141 
4142 	mutex_exit(&EMLXS_FCTAB_LOCK);
4143 
4144 	if (iocbq) {
4145 		iocb = &iocbq->iocb;
4146 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4147 		    "Aborting ELS exchange: xid=%x iotag=%x", rxid,
4148 		    iocb->ULPIOTAG);
4149 
4150 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4151 	}
4152 
4153 } /* emlxs_abort_els_exchange() */
4154 
4155 
4156 void
4157 emlxs_abort_ct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4158 {
4159 	CHANNEL *cp;
4160 	IOCBQ *iocbq;
4161 	IOCB *iocb;
4162 
4163 	if (rxid == 0 || rxid == 0xFFFF) {
4164 		return;
4165 	}
4166 
4167 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4168 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_ct_msg,
4169 		    "Aborting CT exchange: xid=%x", rxid);
4170 
4171 		if (emlxs_sli4_unreserve_xri(hba, rxid, 1) == 0) {
4172 			/* We have no way to abort unsolicited exchanges */
4173 			/* that we have not responded to at this time */
4174 			/* So we will return for now */
4175 			return;
4176 		}
4177 	}
4178 
4179 	cp = &hba->chan[hba->channel_ct];
4180 
4181 	mutex_enter(&EMLXS_FCTAB_LOCK);
4182 
4183 	/* Create the abort IOCB */
4184 	if (hba->state >= FC_LINK_UP) {
4185 		iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4186 		    CLASS3, ABORT_TYPE_ABTS);
4187 	} else {
4188 		iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4189 	}
4190 
4191 	mutex_exit(&EMLXS_FCTAB_LOCK);
4192 
4193 	if (iocbq) {
4194 		iocb = &iocbq->iocb;
4195 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4196 		    "Aborting CT exchange: xid=%x iotag=%x", rxid,
4197 		    iocb->ULPIOTAG);
4198 
4199 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4200 	}
4201 
4202 } /* emlxs_abort_ct_exchange() */
4203 
4204 
4205 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4206 static void
4207 emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
4208     uint8_t *flag, emlxs_buf_t *fpkt)
4209 {
4210 	emlxs_hba_t *hba = HBA;
4211 	IOCBQ *iocbq;
4212 	CHANNEL *cp;
4213 	NODELIST *ndlp;
4214 
4215 	cp = (CHANNEL *)sbp->channel;
4216 	ndlp = sbp->node;
4217 
4218 	/* Create the close XRI IOCB */
4219 	if (hba->state >= FC_LINK_UP) {
4220 		iocbq = emlxs_create_abort_xri_cn(port, ndlp, sbp->iotag, cp,
4221 		    CLASS3, ABORT_TYPE_ABTS);
4222 	} else {
4223 		iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, cp);
4224 	}
4225 	/*
4226 	 * Add this iocb to our local abort Q
4227 	 * This way we don't hold the CHIPQ lock too long
4228 	 */
4229 	if (iocbq) {
4230 		if (abort->q_first) {
4231 			((IOCBQ *)abort->q_last)->next = iocbq;
4232 			abort->q_last = (uint8_t *)iocbq;
4233 			abort->q_cnt++;
4234 		} else {
4235 			abort->q_first = (uint8_t *)iocbq;
4236 			abort->q_last = (uint8_t *)iocbq;
4237 			abort->q_cnt = 1;
4238 		}
4239 		iocbq->next = NULL;
4240 	}
4241 
4242 	/* set the flags */
4243 	mutex_enter(&sbp->mtx);
4244 
4245 	sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
4246 
4247 	sbp->ticks = hba->timer_tics + 10;
4248 	sbp->abort_attempts++;
4249 
4250 	flag[cp->channelno] = 1;
4251 
4252 	/*
4253 	 * If the fpkt is already set, then we will leave it alone
4254 	 * This ensures that this pkt is only accounted for on one
4255 	 * fpkt->flush_count
4256 	 */
4257 	if (!sbp->fpkt && fpkt) {
4258 		mutex_enter(&fpkt->mtx);
4259 		sbp->fpkt = fpkt;
4260 		fpkt->flush_count++;
4261 		mutex_exit(&fpkt->mtx);
4262 	}
4263 
4264 	mutex_exit(&sbp->mtx);
4265 
4266 	return;
4267 
4268 }	/* emlxs_sbp_abort_add() */
4269