emlxs_fcp.c (3be114ed) emlxs_fcp.c (82527734)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 7 unchanged lines hidden (view full) ---

16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Emulex. All rights reserved.
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 7 unchanged lines hidden (view full) ---

16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Emulex. All rights reserved.
24 * Use is subject to License terms.
24 * Use is subject to license terms.
25 */
26
25 */
26
27
27#include <emlxs.h>
28
29/* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
30EMLXS_MSG_DEF(EMLXS_FCP_C);
31
32#define EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
28#include <emlxs.h>
29
30/* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31EMLXS_MSG_DEF(EMLXS_FCP_C);
32
33#define EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
33 getPaddr(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
34 PADDR(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
34
35static void emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp,
36 Q *abort, uint8_t *flag, emlxs_buf_t *fpkt);
35
36static void emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp,
37 Q *abort, uint8_t *flag, emlxs_buf_t *fpkt);
37static uint32_t emlxs_iotag_flush(emlxs_hba_t *hba);
38
38
39/*
40 * This routine copies data from src then potentially swaps the destination to
41 * big endian. Assumes cnt is a multiple of sizeof(uint32_t).
42 */
43extern void
44emlxs_pcimem_bcopy(uint32_t *src, uint32_t *dest, uint32_t cnt)
45{
46 uint32_t ldata;
47 int32_t i;
48
49 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
50 ldata = *src++;
51 ldata = PCIMEM_LONG(ldata);
52 *dest++ = ldata;
53 }
54} /* emlxs_pcimem_bcopy */
55
56
57/*
58 * This routine copies data from src then swaps the destination to big endian.
59 * Assumes cnt is a multiple of sizeof(uint32_t).
60 */
61extern void
62emlxs_swap_bcopy(uint32_t *src, uint32_t *dest, uint32_t cnt)
63{
64 uint32_t ldata;
65 int32_t i;
66
67 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
68 ldata = *src++;
69 ldata = SWAP_DATA32(ldata);
70 *dest++ = ldata;
71 }
72} /* End fc_swap_bcopy */
73
74
75#define SCSI3_PERSISTENT_RESERVE_IN 0x5e
76#define SCSI_INQUIRY 0x12
77#define SCSI_RX_DIAG 0x1C
78
79
80/*
81 * emlxs_handle_fcp_event
82 *
83 * Description: Process an FCP Rsp Ring completion
84 *
85 */
86/* ARGSUSED */
87extern void
39#define SCSI3_PERSISTENT_RESERVE_IN 0x5e
40#define SCSI_INQUIRY 0x12
41#define SCSI_RX_DIAG 0x1C
42
43
44/*
45 * emlxs_handle_fcp_event
46 *
47 * Description: Process an FCP Rsp Ring completion
48 *
49 */
50/* ARGSUSED */
51extern void
88emlxs_handle_fcp_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
52emlxs_handle_fcp_event(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
89{
90 emlxs_port_t *port = &PPORT;
53{
54 emlxs_port_t *port = &PPORT;
55 emlxs_config_t *cfg = &CFG;
91 IOCB *cmd;
92 emlxs_buf_t *sbp;
93 fc_packet_t *pkt = NULL;
94#ifdef SAN_DIAG_SUPPORT
95 NODELIST *ndlp;
96#endif
97 uint32_t iostat;
98 uint8_t localstat;

--- 9 unchanged lines hidden (view full) ---

108 uint8_t *scsi_cmd;
109 uint8_t scsi_opcode;
110 uint16_t scsi_dl;
111 uint32_t data_rx;
112
113 cmd = &iocbq->iocb;
114
115 /* Initialize the status */
56 IOCB *cmd;
57 emlxs_buf_t *sbp;
58 fc_packet_t *pkt = NULL;
59#ifdef SAN_DIAG_SUPPORT
60 NODELIST *ndlp;
61#endif
62 uint32_t iostat;
63 uint8_t localstat;

--- 9 unchanged lines hidden (view full) ---

73 uint8_t *scsi_cmd;
74 uint8_t scsi_opcode;
75 uint16_t scsi_dl;
76 uint32_t data_rx;
77
78 cmd = &iocbq->iocb;
79
80 /* Initialize the status */
116 iostat = cmd->ulpStatus;
81 iostat = cmd->ULPSTATUS;
117 localstat = 0;
118 scsi_status = 0;
119 asc = 0;
120 ascq = 0;
121 sense = 0;
122 check_underrun = 0;
123 fix_it = 0;
124
125 HBASTATS.FcpEvent++;
126
127 sbp = (emlxs_buf_t *)iocbq->sbp;
128
129 if (!sbp) {
130 /* completion with missing xmit command */
131 HBASTATS.FcpStray++;
132
133 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
82 localstat = 0;
83 scsi_status = 0;
84 asc = 0;
85 ascq = 0;
86 sense = 0;
87 check_underrun = 0;
88 fix_it = 0;
89
90 HBASTATS.FcpEvent++;
91
92 sbp = (emlxs_buf_t *)iocbq->sbp;
93
94 if (!sbp) {
95 /* completion with missing xmit command */
96 HBASTATS.FcpStray++;
97
98 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
134 "cmd=%x iotag=%x", cmd->ulpCommand, cmd->ulpIoTag);
99 "cmd=%x iotag=%x", cmd->ULPCOMMAND, cmd->ULPIOTAG);
135
136 return;
137 }
138
139 HBASTATS.FcpCompleted++;
140
141#ifdef SAN_DIAG_SUPPORT
142 emlxs_update_sd_bucket(sbp);
143#endif /* SAN_DIAG_SUPPORT */
144
145 pkt = PRIV2PKT(sbp);
146
100
101 return;
102 }
103
104 HBASTATS.FcpCompleted++;
105
106#ifdef SAN_DIAG_SUPPORT
107 emlxs_update_sd_bucket(sbp);
108#endif /* SAN_DIAG_SUPPORT */
109
110 pkt = PRIV2PKT(sbp);
111
147 did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
112 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
148 scsi_cmd = (uint8_t *)pkt->pkt_cmd;
149 scsi_opcode = scsi_cmd[12];
150 data_rx = 0;
151
152 /* Sync data in data buffer only on FC_PKT_FCP_READ */
153 if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
113 scsi_cmd = (uint8_t *)pkt->pkt_cmd;
114 scsi_opcode = scsi_cmd[12];
115 data_rx = 0;
116
117 /* Sync data in data buffer only on FC_PKT_FCP_READ */
118 if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
154 emlxs_mpdata_sync(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
119 EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
155 DDI_DMA_SYNC_FORKERNEL);
156
157#ifdef TEST_SUPPORT
158 if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
159 (pkt->pkt_datalen >= 512)) {
160 hba->underrun_counter--;
161 iostat = IOSTAT_FCP_RSP_ERROR;
162

--- 11 unchanged lines hidden (view full) ---

174
175 /* Process the pkt */
176 mutex_enter(&sbp->mtx);
177
178 /* Check for immediate return */
179 if ((iostat == IOSTAT_SUCCESS) &&
180 (pkt->pkt_comp) &&
181 !(sbp->pkt_flags &
120 DDI_DMA_SYNC_FORKERNEL);
121
122#ifdef TEST_SUPPORT
123 if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
124 (pkt->pkt_datalen >= 512)) {
125 hba->underrun_counter--;
126 iostat = IOSTAT_FCP_RSP_ERROR;
127

--- 11 unchanged lines hidden (view full) ---

139
140 /* Process the pkt */
141 mutex_enter(&sbp->mtx);
142
143 /* Check for immediate return */
144 if ((iostat == IOSTAT_SUCCESS) &&
145 (pkt->pkt_comp) &&
146 !(sbp->pkt_flags &
182 (PACKET_RETURNED | PACKET_COMPLETED |
147 (PACKET_ULP_OWNED | PACKET_COMPLETED |
183 PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
184 PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
185 PACKET_IN_ABORT | PACKET_POLLED))) {
186 HBASTATS.FcpGood++;
187
188 sbp->pkt_flags |=
189 (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
148 PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
149 PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
150 PACKET_IN_ABORT | PACKET_POLLED))) {
151 HBASTATS.FcpGood++;
152
153 sbp->pkt_flags |=
154 (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
190 PACKET_COMPLETED | PACKET_RETURNED);
155 PACKET_COMPLETED | PACKET_ULP_OWNED);
191 mutex_exit(&sbp->mtx);
192
193#if (EMLXS_MODREVX == EMLXS_MODREV2X)
194 emlxs_unswap_pkt(sbp);
195#endif /* EMLXS_MODREV2X */
156 mutex_exit(&sbp->mtx);
157
158#if (EMLXS_MODREVX == EMLXS_MODREV2X)
159 emlxs_unswap_pkt(sbp);
160#endif /* EMLXS_MODREV2X */
196
161 cp->ulpCmplCmd++;
197 (*pkt->pkt_comp) (pkt);
198
199 return;
200 }
201
202 /*
203 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
204 * is reported.
205 */
206
207 /* Check if a response buffer was provided */
208 if ((iostat == IOSTAT_FCP_RSP_ERROR) && pkt->pkt_rsplen) {
162 (*pkt->pkt_comp) (pkt);
163
164 return;
165 }
166
167 /*
168 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
169 * is reported.
170 */
171
172 /* Check if a response buffer was provided */
173 if ((iostat == IOSTAT_FCP_RSP_ERROR) && pkt->pkt_rsplen) {
209 emlxs_mpdata_sync(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
174 EMLXS_MPDATA_SYNC(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
210 DDI_DMA_SYNC_FORKERNEL);
211
212 /* Get the response buffer pointer */
213 rsp = (fcp_rsp_t *)pkt->pkt_resp;
214
215 /* Set the valid response flag */
216 sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
217

--- 27 unchanged lines hidden (view full) ---

245 SCSI_STAT_CHECK_COND;
246 rsp->fcp_u.fcp_status.rsp_len_set = 0;
247 rsp->fcp_u.fcp_status.sense_len_set = 0;
248 rsp->fcp_u.fcp_status.resid_over = 0;
249
250 if (pkt->pkt_datalen) {
251 rsp->fcp_u.fcp_status.resid_under = 1;
252 rsp->fcp_resid =
175 DDI_DMA_SYNC_FORKERNEL);
176
177 /* Get the response buffer pointer */
178 rsp = (fcp_rsp_t *)pkt->pkt_resp;
179
180 /* Set the valid response flag */
181 sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
182

--- 27 unchanged lines hidden (view full) ---

210 SCSI_STAT_CHECK_COND;
211 rsp->fcp_u.fcp_status.rsp_len_set = 0;
212 rsp->fcp_u.fcp_status.sense_len_set = 0;
213 rsp->fcp_u.fcp_status.resid_over = 0;
214
215 if (pkt->pkt_datalen) {
216 rsp->fcp_u.fcp_status.resid_under = 1;
217 rsp->fcp_resid =
253 SWAP_DATA32(pkt->pkt_datalen);
218 LE_SWAP32(pkt->pkt_datalen);
254 } else {
255 rsp->fcp_u.fcp_status.resid_under = 0;
256 rsp->fcp_resid = 0;
257 }
258
259 scsi_status = SCSI_STAT_CHECK_COND;
260 }
261

--- 6 unchanged lines hidden (view full) ---

268 if (scsi_status == SCSI_STAT_GOOD) {
269 check_underrun = 1;
270 }
271 /* Check the sense codes if this is a check condition */
272 else if (scsi_status == SCSI_STAT_CHECK_COND) {
273 check_underrun = 1;
274
275 /* Check if sense data was provided */
219 } else {
220 rsp->fcp_u.fcp_status.resid_under = 0;
221 rsp->fcp_resid = 0;
222 }
223
224 scsi_status = SCSI_STAT_CHECK_COND;
225 }
226

--- 6 unchanged lines hidden (view full) ---

233 if (scsi_status == SCSI_STAT_GOOD) {
234 check_underrun = 1;
235 }
236 /* Check the sense codes if this is a check condition */
237 else if (scsi_status == SCSI_STAT_CHECK_COND) {
238 check_underrun = 1;
239
240 /* Check if sense data was provided */
276 if (SWAP_DATA32(rsp->fcp_sense_len) >= 14) {
241 if (LE_SWAP32(rsp->fcp_sense_len) >= 14) {
277 sense = *((uint8_t *)rsp + 32 + 2);
278 asc = *((uint8_t *)rsp + 32 + 12);
279 ascq = *((uint8_t *)rsp + 32 + 13);
280 }
281
282#ifdef SAN_DIAG_SUPPORT
283 emlxs_log_sd_scsi_check_event(port,
284 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
285 scsi_opcode, sense, asc, ascq);
286#endif
287 }
288 /* Status is not good and this is not a check condition */
289 /* No data should have been sent */
290 else {
291 check_underrun = 0;
292 }
293
294 /* Get the residual underrun count reported by the SCSI reply */
295 rsp_data_resid = (pkt->pkt_datalen &&
242 sense = *((uint8_t *)rsp + 32 + 2);
243 asc = *((uint8_t *)rsp + 32 + 12);
244 ascq = *((uint8_t *)rsp + 32 + 13);
245 }
246
247#ifdef SAN_DIAG_SUPPORT
248 emlxs_log_sd_scsi_check_event(port,
249 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
250 scsi_opcode, sense, asc, ascq);
251#endif
252 }
253 /* Status is not good and this is not a check condition */
254 /* No data should have been sent */
255 else {
256 check_underrun = 0;
257 }
258
259 /* Get the residual underrun count reported by the SCSI reply */
260 rsp_data_resid = (pkt->pkt_datalen &&
296 rsp->fcp_u.fcp_status.resid_under) ? SWAP_DATA32(rsp->
261 rsp->fcp_u.fcp_status.resid_under) ? LE_SWAP32(rsp->
297 fcp_resid) : 0;
298
299 /* Set the pkt resp_resid field */
300 pkt->pkt_resp_resid = 0;
301
302 /* Set the pkt data_resid field */
303 if (pkt->pkt_datalen &&
304 (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {

--- 32 unchanged lines hidden (view full) ---

337 scsi_cmd[16];
338 break;
339
340 default:
341 scsi_dl = pkt->pkt_datalen;
342 }
343
344#ifdef FCP_UNDERRUN_PATCH1
262 fcp_resid) : 0;
263
264 /* Set the pkt resp_resid field */
265 pkt->pkt_resp_resid = 0;
266
267 /* Set the pkt data_resid field */
268 if (pkt->pkt_datalen &&
269 (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {

--- 32 unchanged lines hidden (view full) ---

302 scsi_cmd[16];
303 break;
304
305 default:
306 scsi_dl = pkt->pkt_datalen;
307 }
308
309#ifdef FCP_UNDERRUN_PATCH1
310if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
345 /*
346 * If status is not good and no data was
347 * actually transferred, then we must fix
348 * the issue
349 */
350 if ((scsi_status != SCSI_STAT_GOOD) &&
351 (data_rx == 0)) {
352 fix_it = 1;

--- 5 unchanged lines hidden (view full) ---

358 "dl=%d,%d rx=%d rsp=%d",
359 did, sbp, scsi_opcode,
360 pkt->pkt_datalen, scsi_dl,
361 (pkt->pkt_datalen -
362 cmd->un.fcpi.fcpi_parm),
363 rsp_data_resid);
364
365 }
311 /*
312 * If status is not good and no data was
313 * actually transferred, then we must fix
314 * the issue
315 */
316 if ((scsi_status != SCSI_STAT_GOOD) &&
317 (data_rx == 0)) {
318 fix_it = 1;

--- 5 unchanged lines hidden (view full) ---

324 "dl=%d,%d rx=%d rsp=%d",
325 did, sbp, scsi_opcode,
326 pkt->pkt_datalen, scsi_dl,
327 (pkt->pkt_datalen -
328 cmd->un.fcpi.fcpi_parm),
329 rsp_data_resid);
330
331 }
332}
366#endif /* FCP_UNDERRUN_PATCH1 */
367
368
369#ifdef FCP_UNDERRUN_PATCH2
333#endif /* FCP_UNDERRUN_PATCH1 */
334
335
336#ifdef FCP_UNDERRUN_PATCH2
337if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH2) {
370 if ((scsi_status == SCSI_STAT_GOOD)) {
371 emlxs_msg_t *msg;
372
373 msg = &emlxs_fcp_completion_error_msg;
374 /*
375 * If status is good and this is an
376 * inquiry request and the amount of
377 * data

--- 40 unchanged lines hidden (view full) ---

418 "rx=%d rsp=%d",
419 did, sbp, scsi_opcode,
420 pkt->pkt_datalen, scsi_dl,
421 data_rx, rsp_data_resid);
422
423 }
424
425 }
338 if ((scsi_status == SCSI_STAT_GOOD)) {
339 emlxs_msg_t *msg;
340
341 msg = &emlxs_fcp_completion_error_msg;
342 /*
343 * If status is good and this is an
344 * inquiry request and the amount of
345 * data

--- 40 unchanged lines hidden (view full) ---

386 "rx=%d rsp=%d",
387 did, sbp, scsi_opcode,
388 pkt->pkt_datalen, scsi_dl,
389 data_rx, rsp_data_resid);
390
391 }
392
393 }
394}
426#endif /* FCP_UNDERRUN_PATCH2 */
427
428 /*
429 * Check if SCSI response payload should be
430 * fixed or if a DATA_UNDERRUN should be
431 * reported
432 */
433 if (fix_it) {
434 /*
435 * Fix the SCSI response payload itself
436 */
437 rsp->fcp_u.fcp_status.resid_under = 1;
438 rsp->fcp_resid =
395#endif /* FCP_UNDERRUN_PATCH2 */
396
397 /*
398 * Check if SCSI response payload should be
399 * fixed or if a DATA_UNDERRUN should be
400 * reported
401 */
402 if (fix_it) {
403 /*
404 * Fix the SCSI response payload itself
405 */
406 rsp->fcp_u.fcp_status.resid_under = 1;
407 rsp->fcp_resid =
439 SWAP_DATA32(pkt->pkt_data_resid);
408 LE_SWAP32(pkt->pkt_data_resid);
440 } else {
441 /*
442 * Change the status from
443 * IOSTAT_FCP_RSP_ERROR to
444 * IOSTAT_DATA_UNDERRUN
445 */
446 iostat = IOSTAT_DATA_UNDERRUN;
447 pkt->pkt_data_resid =

--- 19 unchanged lines hidden (view full) ---

467 }
468 } else { /* pkt->pkt_datalen==0 or FC_PKT_FCP_WRITE */
469
470 /* Report whatever the target reported */
471 pkt->pkt_data_resid = rsp_data_resid;
472 }
473 }
474
409 } else {
410 /*
411 * Change the status from
412 * IOSTAT_FCP_RSP_ERROR to
413 * IOSTAT_DATA_UNDERRUN
414 */
415 iostat = IOSTAT_DATA_UNDERRUN;
416 pkt->pkt_data_resid =

--- 19 unchanged lines hidden (view full) ---

436 }
437 } else { /* pkt->pkt_datalen==0 or FC_PKT_FCP_WRITE */
438
439 /* Report whatever the target reported */
440 pkt->pkt_data_resid = rsp_data_resid;
441 }
442 }
443
475 /*
476 * If pkt is tagged for timeout then set the return codes
477 * appropriately
478 */
479 if (sbp->pkt_flags & PACKET_IN_TIMEOUT) {
480 iostat = IOSTAT_LOCAL_REJECT;
481 localstat = IOERR_ABORT_TIMEOUT;
482 goto done;
483 }
484
485 /* If pkt is tagged for abort then set the return codes appropriately */
486 if (sbp->pkt_flags & (PACKET_IN_FLUSH | PACKET_IN_ABORT)) {
487 iostat = IOSTAT_LOCAL_REJECT;
488 localstat = IOERR_ABORT_REQUESTED;
489 goto done;
490 }
491
492 /* Print completion message */
493 switch (iostat) {
494 case IOSTAT_SUCCESS:
495 /* Build SCSI GOOD status */
496 if (pkt->pkt_rsplen) {
497 bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
498 }
499 break;

--- 109 unchanged lines hidden (view full) ---

609 }
610
611 mutex_exit(&sbp->mtx);
612
613 emlxs_pkt_complete(sbp, iostat, localstat, 0);
614
615 return;
616
444 /* Print completion message */
445 switch (iostat) {
446 case IOSTAT_SUCCESS:
447 /* Build SCSI GOOD status */
448 if (pkt->pkt_rsplen) {
449 bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
450 }
451 break;

--- 109 unchanged lines hidden (view full) ---

561 }
562
563 mutex_exit(&sbp->mtx);
564
565 emlxs_pkt_complete(sbp, iostat, localstat, 0);
566
567 return;
568
617} /* emlxs_handle_fcp_event() */
569} /* emlxs_handle_fcp_event() */
618
619
620
621/*
622 * emlxs_post_buffer
623 *
624 * This routine will post count buffers to the
625 * ring with the QUE_RING_BUF_CN command. This
626 * allows 2 buffers / command to be posted.
627 * Returns the number of buffers NOT posted.
628 */
570
571
572
573/*
574 * emlxs_post_buffer
575 *
576 * This routine will post count buffers to the
577 * ring with the QUE_RING_BUF_CN command. This
578 * allows 2 buffers / command to be posted.
579 * Returns the number of buffers NOT posted.
580 */
581/* SLI3 */
629extern int
630emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
631{
632 emlxs_port_t *port = &PPORT;
633 IOCB *icmd;
634 IOCBQ *iocbq;
635 MATCHMAP *mp;
636 uint16_t tag;
637 uint32_t maxqbuf;
638 int32_t i;
639 int32_t j;
640 uint32_t seg;
641 uint32_t size;
642
643 mp = 0;
644 maxqbuf = 2;
645 tag = (uint16_t)cnt;
646 cnt += rp->fc_missbufcnt;
647
582extern int
583emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
584{
585 emlxs_port_t *port = &PPORT;
586 IOCB *icmd;
587 IOCBQ *iocbq;
588 MATCHMAP *mp;
589 uint16_t tag;
590 uint32_t maxqbuf;
591 int32_t i;
592 int32_t j;
593 uint32_t seg;
594 uint32_t size;
595
596 mp = 0;
597 maxqbuf = 2;
598 tag = (uint16_t)cnt;
599 cnt += rp->fc_missbufcnt;
600
648 if (rp->ringno == FC_ELS_RING) {
601 if (rp->ringno == hba->channel_els) {
649 seg = MEM_BUF;
650 size = MEM_ELSBUF_SIZE;
602 seg = MEM_BUF;
603 size = MEM_ELSBUF_SIZE;
651 } else if (rp->ringno == FC_IP_RING) {
604 } else if (rp->ringno == hba->channel_ip) {
652 seg = MEM_IPBUF;
653 size = MEM_IPBUF_SIZE;
605 seg = MEM_IPBUF;
606 size = MEM_IPBUF_SIZE;
654 } else if (rp->ringno == FC_CT_RING) {
607 } else if (rp->ringno == hba->channel_ct) {
655 seg = MEM_CTBUF;
656 size = MEM_CTBUF_SIZE;
657 }
658#ifdef SFCT_SUPPORT
608 seg = MEM_CTBUF;
609 size = MEM_CTBUF_SIZE;
610 }
611#ifdef SFCT_SUPPORT
659 else if (rp->ringno == FC_FCT_RING) {
612 else if (rp->ringno == hba->CHANNEL_FCT) {
660 seg = MEM_FCTBUF;
661 size = MEM_FCTBUF_SIZE;
662 }
663#endif /* SFCT_SUPPORT */
664 else {
665 return (0);
666 }
667
668 /*
669 * While there are buffers to post
670 */
671 while (cnt) {
613 seg = MEM_FCTBUF;
614 size = MEM_FCTBUF_SIZE;
615 }
616#endif /* SFCT_SUPPORT */
617 else {
618 return (0);
619 }
620
621 /*
622 * While there are buffers to post
623 */
624 while (cnt) {
672 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == 0) {
625 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == 0) {
673 rp->fc_missbufcnt = cnt;
674 return (cnt);
675 }
676
626 rp->fc_missbufcnt = cnt;
627 return (cnt);
628 }
629
677 iocbq->ring = (void *)rp;
630 iocbq->channel = (void *)&hba->chan[rp->ringno];
678 iocbq->port = (void *)port;
679 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
680
681 icmd = &iocbq->iocb;
682
683 /*
684 * Max buffers can be posted per command
685 */
686 for (i = 0; i < maxqbuf; i++) {
687 if (cnt <= 0)
688 break;
689
690 /* fill in BDEs for command */
631 iocbq->port = (void *)port;
632 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
633
634 icmd = &iocbq->iocb;
635
636 /*
637 * Max buffers can be posted per command
638 */
639 for (i = 0; i < maxqbuf; i++) {
640 if (cnt <= 0)
641 break;
642
643 /* fill in BDEs for command */
691 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg)) == 0) {
692 icmd->ulpBdeCount = i;
644 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg, 1))
645 == 0) {
646 icmd->ULPBDECOUNT = i;
693 for (j = 0; j < i; j++) {
694 mp = EMLXS_GET_VADDR(hba, rp, icmd);
695 if (mp) {
696 (void) emlxs_mem_put(hba, seg,
697 (uint8_t *)mp);
698 }
699 }
700

--- 11 unchanged lines hidden (view full) ---

712 */
713 emlxs_mem_map_vaddr(hba,
714 rp,
715 mp,
716 (uint32_t *)&icmd->un.cont64[i].addrHigh,
717 (uint32_t *)&icmd->un.cont64[i].addrLow);
718
719 icmd->un.cont64[i].tus.f.bdeSize = size;
647 for (j = 0; j < i; j++) {
648 mp = EMLXS_GET_VADDR(hba, rp, icmd);
649 if (mp) {
650 (void) emlxs_mem_put(hba, seg,
651 (uint8_t *)mp);
652 }
653 }
654

--- 11 unchanged lines hidden (view full) ---

666 */
667 emlxs_mem_map_vaddr(hba,
668 rp,
669 mp,
670 (uint32_t *)&icmd->un.cont64[i].addrHigh,
671 (uint32_t *)&icmd->un.cont64[i].addrLow);
672
673 icmd->un.cont64[i].tus.f.bdeSize = size;
720 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
674 icmd->ULPCOMMAND = CMD_QUE_RING_BUF64_CN;
721
722 /*
723 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
724 * "UB Post: ring=%d addr=%08x%08x size=%d",
725 * rp->ringno, icmd->un.cont64[i].addrHigh,
726 * icmd->un.cont64[i].addrLow, size);
727 */
728
729 cnt--;
730 }
731
675
676 /*
677 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
678 * "UB Post: ring=%d addr=%08x%08x size=%d",
679 * rp->ringno, icmd->un.cont64[i].addrHigh,
680 * icmd->un.cont64[i].addrLow, size);
681 */
682
683 cnt--;
684 }
685
732 icmd->ulpIoTag = tag;
733 icmd->ulpBdeCount = i;
734 icmd->ulpLe = 1;
735 icmd->ulpOwner = OWN_CHIP;
686 icmd->ULPIOTAG = tag;
687 icmd->ULPBDECOUNT = i;
688 icmd->ULPLE = 1;
689 icmd->ULPOWNER = OWN_CHIP;
736 /* used for delimiter between commands */
737 iocbq->bp = (uint8_t *)mp;
738
690 /* used for delimiter between commands */
691 iocbq->bp = (uint8_t *)mp;
692
739 emlxs_sli_issue_iocb_cmd(hba, rp, iocbq);
693 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[rp->ringno], iocbq);
740 }
741
742 rp->fc_missbufcnt = 0;
743
744 return (0);
745
694 }
695
696 rp->fc_missbufcnt = 0;
697
698 return (0);
699
746} /* emlxs_post_buffer() */
700} /* emlxs_post_buffer() */
747
748
749extern int
750emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
751{
752 emlxs_hba_t *hba = HBA;
753 emlxs_config_t *cfg;
754 NODELIST *nlp;

--- 252 unchanged lines hidden (view full) ---

1007#ifdef DHCHAP_SUPPORT
1008 emlxs_dhc_auth_stop(port, nlp);
1009#endif /* DHCHAP_SUPPORT */
1010
1011 /*
1012 * Close the node for any further normal IO
1013 * A PLOGI with reopen the node
1014 */
701
702
703extern int
704emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
705{
706 emlxs_hba_t *hba = HBA;
707 emlxs_config_t *cfg;
708 NODELIST *nlp;

--- 252 unchanged lines hidden (view full) ---

961#ifdef DHCHAP_SUPPORT
962 emlxs_dhc_auth_stop(port, nlp);
963#endif /* DHCHAP_SUPPORT */
964
965 /*
966 * Close the node for any further normal IO
967 * A PLOGI with reopen the node
968 */
1015 emlxs_node_close(port, nlp, FC_FCP_RING, 60);
1016 emlxs_node_close(port, nlp, FC_IP_RING, 60);
969 emlxs_node_close(port, nlp,
970 hba->channel_fcp, 60);
971 emlxs_node_close(port, nlp,
972 hba->channel_ip, 60);
1017
1018 /* Flush tx queue */
1019 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1020
1021 /* Flush chip queue */
1022 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1023 }
1024

--- 71 unchanged lines hidden (view full) ---

1096#ifdef DHCHAP_SUPPORT
1097 emlxs_dhc_auth_stop(port, nlp);
1098#endif /* DHCHAP_SUPPORT */
1099
1100 /*
1101 * Close the node for any further normal IO
1102 * A PLOGI with reopen the node
1103 */
973
974 /* Flush tx queue */
975 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
976
977 /* Flush chip queue */
978 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
979 }
980

--- 71 unchanged lines hidden (view full) ---

1052#ifdef DHCHAP_SUPPORT
1053 emlxs_dhc_auth_stop(port, nlp);
1054#endif /* DHCHAP_SUPPORT */
1055
1056 /*
1057 * Close the node for any further normal IO
1058 * A PLOGI with reopen the node
1059 */
1104 emlxs_node_close(port, nlp, FC_FCP_RING, 60);
1105 emlxs_node_close(port, nlp, FC_IP_RING, 60);
1060 emlxs_node_close(port, nlp,
1061 hba->channel_fcp, 60);
1062 emlxs_node_close(port, nlp,
1063 hba->channel_ip, 60);
1106
1107 /* Flush tx queue */
1108 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1109
1110 /* Flush chip queue */
1111 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1112
1113 } else if (action == 3) { /* FCP2 devices */
1114 unreg_vpi = 0;
1115
1116#ifdef DHCHAP_SUPPORT
1117 emlxs_dhc_auth_stop(port, nlp);
1118#endif /* DHCHAP_SUPPORT */
1119
1120 /*
1121 * Close the node for any further normal IO
1122 * An ADISC or a PLOGI with reopen the node
1123 */
1064
1065 /* Flush tx queue */
1066 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1067
1068 /* Flush chip queue */
1069 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1070
1071 } else if (action == 3) { /* FCP2 devices */
1072 unreg_vpi = 0;
1073
1074#ifdef DHCHAP_SUPPORT
1075 emlxs_dhc_auth_stop(port, nlp);
1076#endif /* DHCHAP_SUPPORT */
1077
1078 /*
1079 * Close the node for any further normal IO
1080 * An ADISC or a PLOGI with reopen the node
1081 */
1124 emlxs_node_close(port, nlp, FC_FCP_RING, -1);
1125 emlxs_node_close(port, nlp, FC_IP_RING,
1082 emlxs_node_close(port, nlp,
1083 hba->channel_fcp, -1);
1084 emlxs_node_close(port, nlp, hba->channel_ip,
1126 ((linkdown) ? 0 : 60));
1127
1128 /* Flush tx queues except for FCP ring */
1129 (void) emlxs_tx_node_flush(port, nlp,
1085 ((linkdown) ? 0 : 60));
1086
1087 /* Flush tx queues except for FCP ring */
1088 (void) emlxs_tx_node_flush(port, nlp,
1130 &hba->ring[FC_CT_RING], 0, 0);
1089 &hba->chan[hba->channel_ct], 0, 0);
1131 (void) emlxs_tx_node_flush(port, nlp,
1090 (void) emlxs_tx_node_flush(port, nlp,
1132 &hba->ring[FC_ELS_RING], 0, 0);
1091 &hba->chan[hba->channel_els], 0, 0);
1133 (void) emlxs_tx_node_flush(port, nlp,
1092 (void) emlxs_tx_node_flush(port, nlp,
1134 &hba->ring[FC_IP_RING], 0, 0);
1093 &hba->chan[hba->channel_ip], 0, 0);
1135
1136 /* Flush chip queues except for FCP ring */
1137 (void) emlxs_chipq_node_flush(port,
1094
1095 /* Flush chip queues except for FCP ring */
1096 (void) emlxs_chipq_node_flush(port,
1138 &hba->ring[FC_CT_RING], nlp, 0);
1097 &hba->chan[hba->channel_ct], nlp, 0);
1139 (void) emlxs_chipq_node_flush(port,
1098 (void) emlxs_chipq_node_flush(port,
1140 &hba->ring[FC_ELS_RING], nlp, 0);
1099 &hba->chan[hba->channel_els], nlp, 0);
1141 (void) emlxs_chipq_node_flush(port,
1100 (void) emlxs_chipq_node_flush(port,
1142 &hba->ring[FC_IP_RING], nlp, 0);
1101 &hba->chan[hba->channel_ip], nlp, 0);
1143 }
1144 }
1145 break;
1146
1147 case 2: /* Full support - Hold FCP IO to FCP target matching nodes */
1148
1149 if (!linkdown && !vlinkdown) {
1150 break;

--- 59 unchanged lines hidden (view full) ---

1210 } else if (action == 1) {
1211 (void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1212 NULL, NULL, NULL);
1213 } else if (action == 2) {
1214 /*
1215 * Close the node for any further normal IO
1216 * A PLOGI with reopen the node
1217 */
1102 }
1103 }
1104 break;
1105
1106 case 2: /* Full support - Hold FCP IO to FCP target matching nodes */
1107
1108 if (!linkdown && !vlinkdown) {
1109 break;

--- 59 unchanged lines hidden (view full) ---

1169 } else if (action == 1) {
1170 (void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1171 NULL, NULL, NULL);
1172 } else if (action == 2) {
1173 /*
1174 * Close the node for any further normal IO
1175 * A PLOGI with reopen the node
1176 */
1218 emlxs_node_close(port, nlp, FC_FCP_RING, 60);
1219 emlxs_node_close(port, nlp, FC_IP_RING, 60);
1177 emlxs_node_close(port, nlp,
1178 hba->channel_fcp, 60);
1179 emlxs_node_close(port, nlp,
1180 hba->channel_ip, 60);
1220
1221 /* Flush tx queue */
1222 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1223
1224 /* Flush chip queue */
1225 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1226
1227 } else if (action == 3) { /* FCP2 devices */
1228 unreg_vpi = 0;
1229
1230 /*
1231 * Close the node for any further normal IO
1232 * An ADISC or a PLOGI with reopen the node
1233 */
1181
1182 /* Flush tx queue */
1183 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1184
1185 /* Flush chip queue */
1186 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1187
1188 } else if (action == 3) { /* FCP2 devices */
1189 unreg_vpi = 0;
1190
1191 /*
1192 * Close the node for any further normal IO
1193 * An ADISC or a PLOGI with reopen the node
1194 */
1234 emlxs_node_close(port, nlp, FC_FCP_RING, -1);
1235 emlxs_node_close(port, nlp, FC_IP_RING,
1195 emlxs_node_close(port, nlp,
1196 hba->channel_fcp, -1);
1197 emlxs_node_close(port, nlp, hba->channel_ip,
1236 ((linkdown) ? 0 : 60));
1237
1238 /* Flush tx queues except for FCP ring */
1239 (void) emlxs_tx_node_flush(port, nlp,
1198 ((linkdown) ? 0 : 60));
1199
1200 /* Flush tx queues except for FCP ring */
1201 (void) emlxs_tx_node_flush(port, nlp,
1240 &hba->ring[FC_CT_RING], 0, 0);
1202 &hba->chan[hba->channel_ct], 0, 0);
1241 (void) emlxs_tx_node_flush(port, nlp,
1203 (void) emlxs_tx_node_flush(port, nlp,
1242 &hba->ring[FC_ELS_RING], 0, 0);
1204 &hba->chan[hba->channel_els], 0, 0);
1243 (void) emlxs_tx_node_flush(port, nlp,
1205 (void) emlxs_tx_node_flush(port, nlp,
1244 &hba->ring[FC_IP_RING], 0, 0);
1206 &hba->chan[hba->channel_ip], 0, 0);
1245
1246 /* Flush chip queues except for FCP ring */
1247 (void) emlxs_chipq_node_flush(port,
1207
1208 /* Flush chip queues except for FCP ring */
1209 (void) emlxs_chipq_node_flush(port,
1248 &hba->ring[FC_CT_RING], nlp, 0);
1210 &hba->chan[hba->channel_ct], nlp, 0);
1249 (void) emlxs_chipq_node_flush(port,
1211 (void) emlxs_chipq_node_flush(port,
1250 &hba->ring[FC_ELS_RING], nlp, 0);
1212 &hba->chan[hba->channel_els], nlp, 0);
1251 (void) emlxs_chipq_node_flush(port,
1213 (void) emlxs_chipq_node_flush(port,
1252 &hba->ring[FC_IP_RING], nlp, 0);
1214 &hba->chan[hba->channel_ip], nlp, 0);
1253 }
1254 }
1255
1256 break;
1257
1258 } /* switch() */
1259
1260done:
1261
1215 }
1216 }
1217
1218 break;
1219
1220 } /* switch() */
1221
1222done:
1223
1262 if (unreg_vpi) {
1263 (void) emlxs_mb_unreg_vpi(port);
1224 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
1225 if (unreg_vpi) {
1226 (void) emlxs_mb_unreg_vpi(port);
1227 }
1264 }
1265
1266 return (0);
1267
1228 }
1229
1230 return (0);
1231
1268} /* emlxs_port_offline() */
1232} /* emlxs_port_offline() */
1269
1270
1271extern void
1272emlxs_port_online(emlxs_port_t *vport)
1273{
1274 emlxs_hba_t *hba = vport->hba;
1275 emlxs_port_t *port = &PPORT;
1276 uint32_t state;

--- 82 unchanged lines hidden (view full) ---

1359 if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1360 hba->flag |= FC_NPIV_LINKUP;
1361 npiv_linkup = 1;
1362 }
1363 }
1364
1365 mutex_exit(&EMLXS_PORT_LOCK);
1366
1233
1234
1235extern void
1236emlxs_port_online(emlxs_port_t *vport)
1237{
1238 emlxs_hba_t *hba = vport->hba;
1239 emlxs_port_t *port = &PPORT;
1240 uint32_t state;

--- 82 unchanged lines hidden (view full) ---

1323 if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1324 hba->flag |= FC_NPIV_LINKUP;
1325 npiv_linkup = 1;
1326 }
1327 }
1328
1329 mutex_exit(&EMLXS_PORT_LOCK);
1330
1331
1367 /*
1368 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1332 /*
1333 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1369 * "linkup_callback: update=%d vpi=%d flag=%d fc_flag=%x state=%x
1370 * statec=%x", update, vport->vpi, npiv_linkup, hba->flag,
1334 * "linkup_callback: update=%d vpi=%d flag=%d fc_flag=%x state=%x"
1335 * "statec=%x", update, vport->vpi, npiv_linkup, hba->flag,
1371 * hba->state, vport->ulp_statec);
1372 */
1336 * hba->state, vport->ulp_statec);
1337 */
1338
1373 if (update) {
1374 if (vport->flag & EMLXS_PORT_BOUND) {
1375 if (vport->vpi == 0) {
1376 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1377 "%s%s%s", linkspeed, topology, mode);
1378 } else if (npiv_linkup) {
1379 EMLXS_MSGF(EMLXS_CONTEXT,
1380 &emlxs_npiv_link_up_msg, "%s%s%s",

--- 31 unchanged lines hidden (view full) ---

1412 }
1413
1414 /* Flush any pending ub buffers */
1415 emlxs_ub_flush(vport);
1416 }
1417
1418 return;
1419
1339 if (update) {
1340 if (vport->flag & EMLXS_PORT_BOUND) {
1341 if (vport->vpi == 0) {
1342 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1343 "%s%s%s", linkspeed, topology, mode);
1344 } else if (npiv_linkup) {
1345 EMLXS_MSGF(EMLXS_CONTEXT,
1346 &emlxs_npiv_link_up_msg, "%s%s%s",

--- 31 unchanged lines hidden (view full) ---

1378 }
1379
1380 /* Flush any pending ub buffers */
1381 emlxs_ub_flush(vport);
1382 }
1383
1384 return;
1385
1420} /* emlxs_port_online() */
1386} /* emlxs_port_online() */
1421
1422
1423extern void
1424emlxs_linkdown(emlxs_hba_t *hba)
1425{
1426 emlxs_port_t *port = &PPORT;
1387
1388
1389extern void
1390emlxs_linkdown(emlxs_hba_t *hba)
1391{
1392 emlxs_port_t *port = &PPORT;
1393 RPIobj_t *rp;
1427 int i;
1428
1429 mutex_enter(&EMLXS_PORT_LOCK);
1430
1394 int i;
1395
1396 mutex_enter(&EMLXS_PORT_LOCK);
1397
1431 HBASTATS.LinkDown++;
1432 emlxs_ffstate_change_locked(hba, FC_LINK_DOWN);
1398 if (hba->state > FC_LINK_DOWN) {
1399 HBASTATS.LinkDown++;
1400 EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_DOWN);
1401 }
1433
1434 /* Filter hba flags */
1435 hba->flag &= FC_LINKDOWN_MASK;
1436 hba->discovery_timer = 0;
1437 hba->linkup_timer = 0;
1438
1402
1403 /* Filter hba flags */
1404 hba->flag &= FC_LINKDOWN_MASK;
1405 hba->discovery_timer = 0;
1406 hba->linkup_timer = 0;
1407
1408 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1409 rp = hba->sli.sli4.RPIp;
1410 for (i = 0; i < hba->sli.sli4.RPICount; i++) {
1411 if (rp->state & RESOURCE_ALLOCATED) {
1412 rp->state |= RESOURCE_RPI_PAUSED;
1413 }
1414 rp++;
1415 }
1416 }
1417
1439 mutex_exit(&EMLXS_PORT_LOCK);
1440
1441 for (i = 0; i < MAX_VPORTS; i++) {
1442 port = &VPORT(i);
1443
1444 if (!(port->flag & EMLXS_PORT_BOUND)) {
1445 continue;
1446 }
1447
1448 (void) emlxs_port_offline(port, 0xffffffff);
1449
1450 }
1451
1452 return;
1453
1418 mutex_exit(&EMLXS_PORT_LOCK);
1419
1420 for (i = 0; i < MAX_VPORTS; i++) {
1421 port = &VPORT(i);
1422
1423 if (!(port->flag & EMLXS_PORT_BOUND)) {
1424 continue;
1425 }
1426
1427 (void) emlxs_port_offline(port, 0xffffffff);
1428
1429 }
1430
1431 return;
1432
1454} /* emlxs_linkdown() */
1433} /* emlxs_linkdown() */
1455
1456
1457extern void
1458emlxs_linkup(emlxs_hba_t *hba)
1459{
1460 emlxs_port_t *port = &PPORT;
1461 emlxs_config_t *cfg = &CFG;
1462
1463 mutex_enter(&EMLXS_PORT_LOCK);
1464
1465 HBASTATS.LinkUp++;
1434
1435
1436extern void
1437emlxs_linkup(emlxs_hba_t *hba)
1438{
1439 emlxs_port_t *port = &PPORT;
1440 emlxs_config_t *cfg = &CFG;
1441
1442 mutex_enter(&EMLXS_PORT_LOCK);
1443
1444 HBASTATS.LinkUp++;
1466 emlxs_ffstate_change_locked(hba, FC_LINK_UP);
1445 EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_UP);
1467
1468#ifdef MENLO_SUPPORT
1469 if (hba->flag & FC_MENLO_MODE) {
1470 mutex_exit(&EMLXS_PORT_LOCK);
1471
1472 /*
1473 * Trigger linkup CV and don't start linkup & discovery
1474 * timers

--- 11 unchanged lines hidden (view full) ---

1486 hba->discovery_timer =
1487 hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current +
1488 cfg[CFG_DISC_TIMEOUT].current;
1489
1490 mutex_exit(&EMLXS_PORT_LOCK);
1491
1492 return;
1493
1446
1447#ifdef MENLO_SUPPORT
1448 if (hba->flag & FC_MENLO_MODE) {
1449 mutex_exit(&EMLXS_PORT_LOCK);
1450
1451 /*
1452 * Trigger linkup CV and don't start linkup & discovery
1453 * timers

--- 11 unchanged lines hidden (view full) ---

1465 hba->discovery_timer =
1466 hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current +
1467 cfg[CFG_DISC_TIMEOUT].current;
1468
1469 mutex_exit(&EMLXS_PORT_LOCK);
1470
1471 return;
1472
1494} /* emlxs_linkup() */
1473} /* emlxs_linkup() */
1495
1496
1497/*
1498 * emlxs_reset_link
1499 *
1500 * Description:
1501 * Called to reset the link with an init_link
1502 *
1503 * Returns:
1504 *
1505 */
1506extern int
1474
1475
1476/*
1477 * emlxs_reset_link
1478 *
1479 * Description:
1480 * Called to reset the link with an init_link
1481 *
1482 * Returns:
1483 *
1484 */
1485extern int
1507emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup)
1486emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup, uint32_t wait)
1508{
1509 emlxs_port_t *port = &PPORT;
1510 emlxs_config_t *cfg;
1487{
1488 emlxs_port_t *port = &PPORT;
1489 emlxs_config_t *cfg;
1511 MAILBOX *mb;
1490 MAILBOXQ *mbq = NULL;
1491 MAILBOX *mb = NULL;
1492 int rval = 0;
1493 int rc;
1512
1513 /*
1514 * Get a buffer to use for the mailbox command
1515 */
1494
1495 /*
1496 * Get a buffer to use for the mailbox command
1497 */
1516 if ((mb = (MAILBOX *)emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == NULL) {
1498 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))
1499 == NULL) {
1517 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1518 "Unable to allocate mailbox buffer.");
1500 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1501 "Unable to allocate mailbox buffer.");
1502 rval = 1;
1503 goto reset_link_fail;
1504 }
1519
1505
1520 return (1);
1506 mb = (MAILBOX *)mbq;
1507
1508 /* Bring link down first */
1509 emlxs_mb_down_link(hba, mbq);
1510
1511#define MBXERR_LINK_DOWN 0x33
1512
1513 if (wait) {
1514 wait = MBX_WAIT;
1515 } else {
1516 wait = MBX_NOWAIT;
1521 }
1517 }
1518 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1519 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS) &&
1520 (rc != MBXERR_LINK_DOWN)) {
1521 rval = 1;
1522 goto reset_link_fail;
1523 }
1522
1524
1523 cfg = &CFG;
1525 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1526 "Disabling link...");
1524
1525 if (linkup) {
1526 /*
1527 * Setup and issue mailbox INITIALIZE LINK command
1528 */
1529
1527
1528 if (linkup) {
1529 /*
1530 * Setup and issue mailbox INITIALIZE LINK command
1531 */
1532
1530 emlxs_mb_init_link(hba,
1531 (MAILBOX *) mb,
1533 if (wait == MBX_NOWAIT) {
1534 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))
1535 == NULL) {
1536 EMLXS_MSGF(EMLXS_CONTEXT,
1537 &emlxs_link_reset_failed_msg,
1538 "Unable to allocate mailbox buffer.");
1539 rval = 1;
1540 goto reset_link_fail;
1541 }
1542 mb = (MAILBOX *)mbq;
1543 } else {
1544 /* Reuse mbq from previous mbox */
1545 mb = (MAILBOX *)mbq;
1546 }
1547 cfg = &CFG;
1548
1549 emlxs_mb_init_link(hba, mbq,
1532 cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1533
1534 mb->un.varInitLnk.lipsr_AL_PA = 0;
1535
1536 /* Clear the loopback mode */
1537 mutex_enter(&EMLXS_PORT_LOCK);
1538 hba->flag &= ~FC_LOOPBACK_MODE;
1539 hba->loopback_tics = 0;
1540 mutex_exit(&EMLXS_PORT_LOCK);
1541
1550 cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1551
1552 mb->un.varInitLnk.lipsr_AL_PA = 0;
1553
1554 /* Clear the loopback mode */
1555 mutex_enter(&EMLXS_PORT_LOCK);
1556 hba->flag &= ~FC_LOOPBACK_MODE;
1557 hba->loopback_tics = 0;
1558 mutex_exit(&EMLXS_PORT_LOCK);
1559
1542 if (emlxs_sli_issue_mbox_cmd(hba, (MAILBOX *)mb, MBX_NOWAIT,
1543 0) != MBX_BUSY) {
1544 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1560 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1561 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1562 rval = 1;
1563 goto reset_link_fail;
1545 }
1546
1547 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1564 }
1565
1566 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1567 }
1548
1568
1549 } else { /* hold link down */
1569reset_link_fail:
1550
1570
1551 emlxs_mb_down_link(hba, (MAILBOX *)mb);
1552
1553 if (emlxs_sli_issue_mbox_cmd(hba, (MAILBOX *)mb, MBX_NOWAIT,
1554 0) != MBX_BUSY) {
1555 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1556 }
1557
1558 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1559 "Disabling link...");
1571 if ((wait == MBX_WAIT) && mbq) {
1572 (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
1560 }
1561
1573 }
1574
1562 return (0);
1575 return (rval);
1576} /* emlxs_reset_link() */
1563
1577
1564} /* emlxs_reset_link() */
1565
1578
1566
1567extern int
1568emlxs_online(emlxs_hba_t *hba)
1569{
1570 emlxs_port_t *port = &PPORT;
1571 int32_t rval = 0;
1572 uint32_t i = 0;
1573
1574 /* Make sure adapter is offline or exit trying (30 seconds) */

--- 25 unchanged lines hidden (view full) ---

1600 mutex_exit(&EMLXS_PORT_LOCK);
1601
1602 DELAYMS(1000);
1603 }
1604
1605 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1606 "Going online...");
1607
1579extern int
1580emlxs_online(emlxs_hba_t *hba)
1581{
1582 emlxs_port_t *port = &PPORT;
1583 int32_t rval = 0;
1584 uint32_t i = 0;
1585
1586 /* Make sure adapter is offline or exit trying (30 seconds) */

--- 25 unchanged lines hidden (view full) ---

1612 mutex_exit(&EMLXS_PORT_LOCK);
1613
1614 DELAYMS(1000);
1615 }
1616
1617 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1618 "Going online...");
1619
1608 if (rval = emlxs_ffinit(hba)) {
1620 if (rval = EMLXS_SLI_ONLINE(hba)) {
1609 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, "status=%x",
1610 rval);
1611 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1612
1613 /* Set FC_OFFLINE_MODE */
1614 mutex_enter(&EMLXS_PORT_LOCK);
1615 emlxs_diag_state = DDI_OFFDI;
1616 hba->flag |= FC_OFFLINE_MODE;

--- 16 unchanged lines hidden (view full) ---

1633 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1634
1635#ifdef SFCT_SUPPORT
1636 (void) emlxs_fct_port_initialize(port);
1637#endif /* SFCT_SUPPORT */
1638
1639 return (rval);
1640
1621 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, "status=%x",
1622 rval);
1623 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1624
1625 /* Set FC_OFFLINE_MODE */
1626 mutex_enter(&EMLXS_PORT_LOCK);
1627 emlxs_diag_state = DDI_OFFDI;
1628 hba->flag |= FC_OFFLINE_MODE;

--- 16 unchanged lines hidden (view full) ---

1645 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1646
1647#ifdef SFCT_SUPPORT
1648 (void) emlxs_fct_port_initialize(port);
1649#endif /* SFCT_SUPPORT */
1650
1651 return (rval);
1652
1641} /* emlxs_online() */
1653} /* emlxs_online() */
1642
1643
1644extern int
1645emlxs_offline(emlxs_hba_t *hba)
1646{
1647 emlxs_port_t *port = &PPORT;
1648 uint32_t i = 0;
1649 int rval = 1;

--- 30 unchanged lines hidden (view full) ---

1680 }
1681
1682 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1683 "Going offline...");
1684
1685 if (port->ini_mode) {
1686 /* Flush all IO */
1687 emlxs_linkdown(hba);
1654
1655
1656extern int
1657emlxs_offline(emlxs_hba_t *hba)
1658{
1659 emlxs_port_t *port = &PPORT;
1660 uint32_t i = 0;
1661 int rval = 1;

--- 30 unchanged lines hidden (view full) ---

1692 }
1693
1694 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1695 "Going offline...");
1696
1697 if (port->ini_mode) {
1698 /* Flush all IO */
1699 emlxs_linkdown(hba);
1688
1689 }
1690#ifdef SFCT_SUPPORT
1691 else {
1692 (void) emlxs_fct_port_shutdown(port);
1693 }
1694#endif /* SFCT_SUPPORT */
1695
1696 /* Check if adapter was shutdown */

--- 6 unchanged lines hidden (view full) ---

1703 }
1704
1705 /* Pause here for the IO to settle */
1706 delay(drv_usectohz(1000000)); /* 1 sec */
1707
1708 /* Unregister all nodes */
1709 emlxs_ffcleanup(hba);
1710
1700 }
1701#ifdef SFCT_SUPPORT
1702 else {
1703 (void) emlxs_fct_port_shutdown(port);
1704 }
1705#endif /* SFCT_SUPPORT */
1706
1707 /* Check if adapter was shutdown */

--- 6 unchanged lines hidden (view full) ---

1714 }
1715
1716 /* Pause here for the IO to settle */
1717 delay(drv_usectohz(1000000)); /* 1 sec */
1718
1719 /* Unregister all nodes */
1720 emlxs_ffcleanup(hba);
1721
1711
1712 if (hba->bus_type == SBUS_FC) {
1722 if (hba->bus_type == SBUS_FC) {
1713 WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba, hba->sbus_csr_addr),
1714 0x9A);
1723 WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), 0x9A);
1715#ifdef FMA_SUPPORT
1724#ifdef FMA_SUPPORT
1716 if (emlxs_fm_check_acc_handle(hba, hba->sbus_csr_handle)
1717 != DDI_FM_OK) {
1718 EMLXS_MSGF(EMLXS_CONTEXT,
1719 &emlxs_invalid_access_handle_msg, NULL);
1720 }
1725 /* Access handle validation */
1726 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
1721#endif /* FMA_SUPPORT */
1722 }
1723
1724 /* Stop the timer */
1725 emlxs_timer_stop(hba);
1726
1727 /* For safety flush every iotag list */
1728 if (emlxs_iotag_flush(hba)) {
1729 /* Pause here for the IO to flush */
1730 delay(drv_usectohz(1000));
1731 }
1732
1733 /* Wait for poll command request to settle */
1734 while (hba->io_poll_count > 0) {
1735 delay(drv_usectohz(2000000)); /* 2 sec */
1736 }
1737
1727#endif /* FMA_SUPPORT */
1728 }
1729
1730 /* Stop the timer */
1731 emlxs_timer_stop(hba);
1732
1733 /* For safety flush every iotag list */
1734 if (emlxs_iotag_flush(hba)) {
1735 /* Pause here for the IO to flush */
1736 delay(drv_usectohz(1000));
1737 }
1738
1739 /* Wait for poll command request to settle */
1740 while (hba->io_poll_count > 0) {
1741 delay(drv_usectohz(2000000)); /* 2 sec */
1742 }
1743
1738 emlxs_sli_offline(hba);
1744 /* Shutdown the adapter interface */
1745 EMLXS_SLI_OFFLINE(hba);
1739
1746
1740 /* Free all the shared memory */
1741 (void) emlxs_mem_free_buffer(hba);
1742
1743 mutex_enter(&EMLXS_PORT_LOCK);
1744 hba->flag |= FC_OFFLINE_MODE;
1745 hba->flag &= ~FC_OFFLINING_MODE;
1746 emlxs_diag_state = DDI_OFFDI;
1747 mutex_exit(&EMLXS_PORT_LOCK);
1748
1749 rval = 0;
1750
1751 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1752
1753done:
1754
1755 return (rval);
1756
1747 mutex_enter(&EMLXS_PORT_LOCK);
1748 hba->flag |= FC_OFFLINE_MODE;
1749 hba->flag &= ~FC_OFFLINING_MODE;
1750 emlxs_diag_state = DDI_OFFDI;
1751 mutex_exit(&EMLXS_PORT_LOCK);
1752
1753 rval = 0;
1754
1755 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1756
1757done:
1758
1759 return (rval);
1760
1757} /* emlxs_offline() */
1761} /* emlxs_offline() */
1758
1759
1760
1761extern int
1762emlxs_power_down(emlxs_hba_t *hba)
1763{
1764#ifdef FMA_SUPPORT
1765 emlxs_port_t *port = &PPORT;
1766#endif /* FMA_SUPPORT */
1767 int32_t rval = 0;
1768 uint32_t *ptr;
1769 uint32_t i;
1770
1771 if ((rval = emlxs_offline(hba))) {
1772 return (rval);
1773 }
1762
1763
1764
1765extern int
1766emlxs_power_down(emlxs_hba_t *hba)
1767{
1768#ifdef FMA_SUPPORT
1769 emlxs_port_t *port = &PPORT;
1770#endif /* FMA_SUPPORT */
1771 int32_t rval = 0;
1772 uint32_t *ptr;
1773 uint32_t i;
1774
1775 if ((rval = emlxs_offline(hba))) {
1776 return (rval);
1777 }
1778 EMLXS_SLI_HBA_RESET(hba, 1, 1, 0);
1774
1775 /* Save pci config space */
1776 ptr = (uint32_t *)hba->pm_config;
1777 for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) {
1778 *ptr =
1779 ddi_get32(hba->pci_acc_handle,
1780 (uint32_t *)(hba->pci_addr + i));
1781 }

--- 9 unchanged lines hidden (view full) ---

1791 EMLXS_MSGF(EMLXS_CONTEXT,
1792 &emlxs_invalid_access_handle_msg, NULL);
1793 return (1);
1794 }
1795#endif /* FMA_SUPPORT */
1796
1797 return (0);
1798
1779
1780 /* Save pci config space */
1781 ptr = (uint32_t *)hba->pm_config;
1782 for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) {
1783 *ptr =
1784 ddi_get32(hba->pci_acc_handle,
1785 (uint32_t *)(hba->pci_addr + i));
1786 }

--- 9 unchanged lines hidden (view full) ---

1796 EMLXS_MSGF(EMLXS_CONTEXT,
1797 &emlxs_invalid_access_handle_msg, NULL);
1798 return (1);
1799 }
1800#endif /* FMA_SUPPORT */
1801
1802 return (0);
1803
1799} /* End emlxs_power_down */
1804} /* End emlxs_power_down */
1800
1801
1802extern int
1803emlxs_power_up(emlxs_hba_t *hba)
1804{
1805#ifdef FMA_SUPPORT
1806 emlxs_port_t *port = &PPORT;
1807#endif /* FMA_SUPPORT */

--- 32 unchanged lines hidden (view full) ---

1840 (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1841 (uint8_t)PCI_PM_D3_STATE);
1842
1843 return (rval);
1844 }
1845
1846 return (rval);
1847
1805
1806
1807extern int
1808emlxs_power_up(emlxs_hba_t *hba)
1809{
1810#ifdef FMA_SUPPORT
1811 emlxs_port_t *port = &PPORT;
1812#endif /* FMA_SUPPORT */

--- 32 unchanged lines hidden (view full) ---

1845 (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1846 (uint8_t)PCI_PM_D3_STATE);
1847
1848 return (rval);
1849 }
1850
1851 return (rval);
1852
1848} /* End emlxs_power_up */
1853} /* End emlxs_power_up */
1849
1850
1851/*
1852 *
1853 * NAME: emlxs_ffcleanup
1854 *
1855 * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
1856 *

--- 7 unchanged lines hidden (view full) ---

1864 */
1865extern void
1866emlxs_ffcleanup(emlxs_hba_t *hba)
1867{
1868 emlxs_port_t *port = &PPORT;
1869 uint32_t i;
1870
1871 /* Disable all but the mailbox interrupt */
1854
1855
1856/*
1857 *
1858 * NAME: emlxs_ffcleanup
1859 *
1860 * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
1861 *

--- 7 unchanged lines hidden (view full) ---

1869 */
1870extern void
1871emlxs_ffcleanup(emlxs_hba_t *hba)
1872{
1873 emlxs_port_t *port = &PPORT;
1874 uint32_t i;
1875
1876 /* Disable all but the mailbox interrupt */
1872 emlxs_disable_intr(hba, HC_MBINT_ENA);
1877 EMLXS_SLI_DISABLE_INTR(hba, HC_MBINT_ENA);
1873
1874 /* Make sure all port nodes are destroyed */
1875 for (i = 0; i < MAX_VPORTS; i++) {
1876 port = &VPORT(i);
1877
1878 if (port->node_count) {
1878
1879 /* Make sure all port nodes are destroyed */
1880 for (i = 0; i < MAX_VPORTS; i++) {
1881 port = &VPORT(i);
1882
1883 if (port->node_count) {
1879 (void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0);
1884 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1885 (void) emlxs_sli4_unreg_all_rpi_by_port(port);
1886 } else {
1887 (void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0,
1888 0);
1889 }
1880 }
1881 }
1882
1883 /* Clear all interrupt enable conditions */
1890 }
1891 }
1892
1893 /* Clear all interrupt enable conditions */
1884 emlxs_disable_intr(hba, 0);
1894 EMLXS_SLI_DISABLE_INTR(hba, 0);
1885
1886 return;
1887
1895
1896 return;
1897
1888} /* emlxs_ffcleanup() */
1898} /* emlxs_ffcleanup() */
1889
1890
1891extern uint16_t
1899
1900
1901extern uint16_t
1892emlxs_register_pkt(RING *rp, emlxs_buf_t *sbp)
1902emlxs_register_pkt(CHANNEL *cp, emlxs_buf_t *sbp)
1893{
1894 emlxs_hba_t *hba;
1895 emlxs_port_t *port;
1896 uint16_t iotag;
1897 uint32_t i;
1898
1903{
1904 emlxs_hba_t *hba;
1905 emlxs_port_t *port;
1906 uint16_t iotag;
1907 uint32_t i;
1908
1899 hba = rp->hba;
1909 hba = cp->hba;
1900
1910
1901 mutex_enter(&EMLXS_FCTAB_LOCK(rp->ringno));
1911 mutex_enter(&EMLXS_FCTAB_LOCK);
1902
1903 if (sbp->iotag != 0) {
1904 port = &PPORT;
1905
1906 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1912
1913 if (sbp->iotag != 0) {
1914 port = &PPORT;
1915
1916 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1907 "Pkt already registered! ringo=%d iotag=%d sbp=%p",
1908 sbp->ring, sbp->iotag, sbp);
1917 "Pkt already registered! channel=%d iotag=%d sbp=%p",
1918 sbp->channel, sbp->iotag, sbp);
1909 }
1910
1911 iotag = 0;
1919 }
1920
1921 iotag = 0;
1912 for (i = 0; i < rp->max_iotag; i++) {
1913 if (!rp->fc_iotag || rp->fc_iotag >= rp->max_iotag) {
1914 rp->fc_iotag = 1;
1922 for (i = 0; i < hba->max_iotag; i++) {
1923 if (!hba->fc_iotag || hba->fc_iotag >= hba->max_iotag) {
1924 hba->fc_iotag = 1;
1915 }
1925 }
1916 iotag = rp->fc_iotag++;
1926 iotag = hba->fc_iotag++;
1917
1927
1918 if (rp->fc_table[iotag] == 0 ||
1919 rp->fc_table[iotag] == STALE_PACKET) {
1920 hba->io_count[rp->ringno]++;
1921 rp->fc_table[iotag] = sbp;
1928 if (hba->fc_table[iotag] == 0 ||
1929 hba->fc_table[iotag] == STALE_PACKET) {
1930 hba->io_count++;
1931 hba->fc_table[iotag] = sbp;
1922
1923 sbp->iotag = iotag;
1932
1933 sbp->iotag = iotag;
1924 sbp->ring = rp;
1934 sbp->channel = cp;
1925
1926 break;
1927 }
1928 iotag = 0;
1929 }
1930
1935
1936 break;
1937 }
1938 iotag = 0;
1939 }
1940
1931 mutex_exit(&EMLXS_FCTAB_LOCK(rp->ringno));
1941 mutex_exit(&EMLXS_FCTAB_LOCK);
1932
1933 /*
1934 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1942
1943 /*
1944 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1935 * "emlxs_register_pkt: ringo=%d iotag=%d sbp=%p",
1936 * rp->ringno, iotag, sbp);
1945 * "emlxs_register_pkt: channel=%d iotag=%d sbp=%p",
1946 * cp->channelno, iotag, sbp);
1937 */
1938
1939 return (iotag);
1940
1947 */
1948
1949 return (iotag);
1950
1941} /* emlxs_register_pkt() */
1951} /* emlxs_register_pkt() */
1942
1943
1944
1945extern emlxs_buf_t *
1952
1953
1954
1955extern emlxs_buf_t *
1946emlxs_unregister_pkt(RING *rp, uint16_t iotag, uint32_t forced)
1956emlxs_unregister_pkt(CHANNEL *cp, uint16_t iotag, uint32_t forced)
1947{
1948 emlxs_hba_t *hba;
1949 emlxs_buf_t *sbp;
1957{
1958 emlxs_hba_t *hba;
1959 emlxs_buf_t *sbp;
1950 uint32_t ringno;
1951
1960
1961 sbp = NULL;
1962 hba = cp->hba;
1963
1952 /* Check the iotag range */
1964 /* Check the iotag range */
1953 if ((iotag == 0) || (iotag >= rp->max_iotag)) {
1965 if ((iotag == 0) || (iotag >= hba->max_iotag)) {
1954 return (NULL);
1955 }
1956
1966 return (NULL);
1967 }
1968
1957 sbp = NULL;
1958 hba = rp->hba;
1959 ringno = rp->ringno;
1960
1961 /* Remove the sbp from the table */
1969 /* Remove the sbp from the table */
1962 mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
1963 sbp = rp->fc_table[iotag];
1970 mutex_enter(&EMLXS_FCTAB_LOCK);
1971 sbp = hba->fc_table[iotag];
1964
1965 if (!sbp || (sbp == STALE_PACKET)) {
1972
1973 if (!sbp || (sbp == STALE_PACKET)) {
1966 mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
1974 mutex_exit(&EMLXS_FCTAB_LOCK);
1967 return (sbp);
1968 }
1969
1975 return (sbp);
1976 }
1977
1970 rp->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
1971 hba->io_count[ringno]--;
1978 hba->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
1979 hba->io_count--;
1972 sbp->iotag = 0;
1973
1980 sbp->iotag = 0;
1981
1974 mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
1982 mutex_exit(&EMLXS_FCTAB_LOCK);
1975
1976
1977 /* Clean up the sbp */
1978 mutex_enter(&sbp->mtx);
1979
1980 if (sbp->pkt_flags & PACKET_IN_TXQ) {
1981 sbp->pkt_flags &= ~PACKET_IN_TXQ;
1983
1984
1985 /* Clean up the sbp */
1986 mutex_enter(&sbp->mtx);
1987
1988 if (sbp->pkt_flags & PACKET_IN_TXQ) {
1989 sbp->pkt_flags &= ~PACKET_IN_TXQ;
1982 hba->ring_tx_count[ringno]--;
1990 hba->channel_tx_count--;
1983 }
1984
1985 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
1986 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
1987 }
1988
1989 if (sbp->bmp) {
1990 (void) emlxs_mem_put(hba, MEM_BPL, (uint8_t *)sbp->bmp);
1991 sbp->bmp = 0;
1992 }
1993
1994 mutex_exit(&sbp->mtx);
1995
1996 return (sbp);
1997
1991 }
1992
1993 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
1994 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
1995 }
1996
1997 if (sbp->bmp) {
1998 (void) emlxs_mem_put(hba, MEM_BPL, (uint8_t *)sbp->bmp);
1999 sbp->bmp = 0;
2000 }
2001
2002 mutex_exit(&sbp->mtx);
2003
2004 return (sbp);
2005
1998} /* emlxs_unregister_pkt() */
2006} /* emlxs_unregister_pkt() */
1999
2000
2001
2007
2008
2009
2002/* Flush all IO's to all nodes for a given ring */
2010/* Flush all IO's to all nodes for a given IO Channel */
2003extern uint32_t
2011extern uint32_t
2004emlxs_tx_ring_flush(emlxs_hba_t *hba, RING *rp, emlxs_buf_t *fpkt)
2012emlxs_tx_channel_flush(emlxs_hba_t *hba, CHANNEL *cp, emlxs_buf_t *fpkt)
2005{
2006 emlxs_port_t *port = &PPORT;
2007 emlxs_buf_t *sbp;
2008 IOCBQ *iocbq;
2009 IOCBQ *next;
2010 IOCB *iocb;
2013{
2014 emlxs_port_t *port = &PPORT;
2015 emlxs_buf_t *sbp;
2016 IOCBQ *iocbq;
2017 IOCBQ *next;
2018 IOCB *iocb;
2011 uint32_t ringno;
2019 uint32_t channelno;
2012 Q abort;
2013 NODELIST *ndlp;
2014 IOCB *icmd;
2015 MATCHMAP *mp;
2016 uint32_t i;
2020 Q abort;
2021 NODELIST *ndlp;
2022 IOCB *icmd;
2023 MATCHMAP *mp;
2024 uint32_t i;
2025 uint8_t flag[MAX_CHANNEL];
2017
2026
2018 ringno = rp->ringno;
2027 channelno = cp->channelno;
2019 bzero((void *)&abort, sizeof (Q));
2028 bzero((void *)&abort, sizeof (Q));
2029 bzero((void *)flag, MAX_CHANNEL * sizeof (uint8_t));
2020
2030
2021 mutex_enter(&EMLXS_RINGTX_LOCK);
2031 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2022
2023 /* While a node needs servicing */
2032
2033 /* While a node needs servicing */
2024 while (rp->nodeq.q_first) {
2025 ndlp = (NODELIST *) rp->nodeq.q_first;
2034 while (cp->nodeq.q_first) {
2035 ndlp = (NODELIST *) cp->nodeq.q_first;
2026
2027 /* Check if priority queue is not empty */
2036
2037 /* Check if priority queue is not empty */
2028 if (ndlp->nlp_ptx[ringno].q_first) {
2038 if (ndlp->nlp_ptx[channelno].q_first) {
2029 /* Transfer all iocb's to local queue */
2030 if (abort.q_first == 0) {
2039 /* Transfer all iocb's to local queue */
2040 if (abort.q_first == 0) {
2031 abort.q_first = ndlp->nlp_ptx[ringno].q_first;
2041 abort.q_first =
2042 ndlp->nlp_ptx[channelno].q_first;
2032 } else {
2033 ((IOCBQ *)abort.q_last)->next =
2043 } else {
2044 ((IOCBQ *)abort.q_last)->next =
2034 (IOCBQ *)ndlp->nlp_ptx[ringno].q_first;
2045 (IOCBQ *)ndlp->nlp_ptx[channelno].q_first;
2035 }
2046 }
2047 flag[channelno] = 1;
2036
2048
2037 abort.q_last = ndlp->nlp_ptx[ringno].q_last;
2038 abort.q_cnt += ndlp->nlp_ptx[ringno].q_cnt;
2049 abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2050 abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2039 }
2040
2041 /* Check if tx queue is not empty */
2051 }
2052
2053 /* Check if tx queue is not empty */
2042 if (ndlp->nlp_tx[ringno].q_first) {
2054 if (ndlp->nlp_tx[channelno].q_first) {
2043 /* Transfer all iocb's to local queue */
2044 if (abort.q_first == 0) {
2055 /* Transfer all iocb's to local queue */
2056 if (abort.q_first == 0) {
2045 abort.q_first = ndlp->nlp_tx[ringno].q_first;
2057 abort.q_first = ndlp->nlp_tx[channelno].q_first;
2046 } else {
2047 ((IOCBQ *)abort.q_last)->next =
2058 } else {
2059 ((IOCBQ *)abort.q_last)->next =
2048 (IOCBQ *)ndlp->nlp_tx[ringno].q_first;
2060 (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2049 }
2050
2061 }
2062
2051 abort.q_last = ndlp->nlp_tx[ringno].q_last;
2052 abort.q_cnt += ndlp->nlp_tx[ringno].q_cnt;
2053
2063 abort.q_last = ndlp->nlp_tx[channelno].q_last;
2064 abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2054 }
2055
2056 /* Clear the queue pointers */
2065 }
2066
2067 /* Clear the queue pointers */
2057 ndlp->nlp_ptx[ringno].q_first = NULL;
2058 ndlp->nlp_ptx[ringno].q_last = NULL;
2059 ndlp->nlp_ptx[ringno].q_cnt = 0;
2068 ndlp->nlp_ptx[channelno].q_first = NULL;
2069 ndlp->nlp_ptx[channelno].q_last = NULL;
2070 ndlp->nlp_ptx[channelno].q_cnt = 0;
2060
2071
2061 ndlp->nlp_tx[ringno].q_first = NULL;
2062 ndlp->nlp_tx[ringno].q_last = NULL;
2063 ndlp->nlp_tx[ringno].q_cnt = 0;
2072 ndlp->nlp_tx[channelno].q_first = NULL;
2073 ndlp->nlp_tx[channelno].q_last = NULL;
2074 ndlp->nlp_tx[channelno].q_cnt = 0;
2064
2065 /* Remove node from service queue */
2066
2067 /* If this is the last node on list */
2075
2076 /* Remove node from service queue */
2077
2078 /* If this is the last node on list */
2068 if (rp->nodeq.q_last == (void *)ndlp) {
2069 rp->nodeq.q_last = NULL;
2070 rp->nodeq.q_first = NULL;
2071 rp->nodeq.q_cnt = 0;
2079 if (cp->nodeq.q_last == (void *)ndlp) {
2080 cp->nodeq.q_last = NULL;
2081 cp->nodeq.q_first = NULL;
2082 cp->nodeq.q_cnt = 0;
2072 } else {
2073 /* Remove node from head */
2083 } else {
2084 /* Remove node from head */
2074 rp->nodeq.q_first = ndlp->nlp_next[ringno];
2075 ((NODELIST *)rp->nodeq.q_last)->nlp_next[ringno] =
2076 rp->nodeq.q_first;
2077 rp->nodeq.q_cnt--;
2085 cp->nodeq.q_first = ndlp->nlp_next[channelno];
2086 ((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
2087 cp->nodeq.q_first;
2088 cp->nodeq.q_cnt--;
2078 }
2079
2080 /* Clear node */
2089 }
2090
2091 /* Clear node */
2081 ndlp->nlp_next[ringno] = NULL;
2092 ndlp->nlp_next[channelno] = NULL;
2082 }
2083
2084 /* First cleanup the iocb's while still holding the lock */
2085 iocbq = (IOCBQ *) abort.q_first;
2086 while (iocbq) {
2087 /* Free the IoTag and the bmp */
2088 iocb = &iocbq->iocb;
2093 }
2094
2095 /* First cleanup the iocb's while still holding the lock */
2096 iocbq = (IOCBQ *) abort.q_first;
2097 while (iocbq) {
2098 /* Free the IoTag and the bmp */
2099 iocb = &iocbq->iocb;
2089 sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
2090
2100
2101 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2102 sbp = iocbq->sbp;
2103 if (sbp) {
2104 hba->fc_table[sbp->iotag] = NULL;
2105 emlxs_sli4_free_xri(hba, sbp, sbp->xp);
2106 }
2107 } else {
2108 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2109 iocb->ULPIOTAG, 0);
2110 }
2111
2091 if (sbp && (sbp != STALE_PACKET)) {
2092 mutex_enter(&sbp->mtx);
2093
2112 if (sbp && (sbp != STALE_PACKET)) {
2113 mutex_enter(&sbp->mtx);
2114
2094 if (sbp->pkt_flags & PACKET_IN_TXQ) {
2095 sbp->pkt_flags &= ~PACKET_IN_TXQ;
2096 hba->ring_tx_count[ringno]--;
2097 }
2098 sbp->pkt_flags |= PACKET_IN_FLUSH;
2115 sbp->pkt_flags |= PACKET_IN_FLUSH;
2099
2100 /*
2101 * If the fpkt is already set, then we will leave it
2102 * alone. This ensures that this pkt is only accounted
2103 * for on one fpkt->flush_count
2104 */
2105 if (!sbp->fpkt && fpkt) {
2106 mutex_enter(&fpkt->mtx);
2107 sbp->fpkt = fpkt;
2108 fpkt->flush_count++;
2109 mutex_exit(&fpkt->mtx);
2110 }
2111
2112 mutex_exit(&sbp->mtx);
2113 }
2114
2115 iocbq = (IOCBQ *)iocbq->next;
2116 /*
2117 * If the fpkt is already set, then we will leave it
2118 * alone. This ensures that this pkt is only accounted
2119 * for on one fpkt->flush_count
2120 */
2121 if (!sbp->fpkt && fpkt) {
2122 mutex_enter(&fpkt->mtx);
2123 sbp->fpkt = fpkt;
2124 fpkt->flush_count++;
2125 mutex_exit(&fpkt->mtx);
2126 }
2127
2128 mutex_exit(&sbp->mtx);
2129 }
2130
2131 iocbq = (IOCBQ *)iocbq->next;
2116
2117 } /* end of while */
2118
2132 } /* end of while */
2133
2119 mutex_exit(&EMLXS_RINGTX_LOCK);
2134 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2120
2121 /* Now abort the iocb's */
2122 iocbq = (IOCBQ *)abort.q_first;
2123 while (iocbq) {
2124 /* Save the next iocbq for now */
2125 next = (IOCBQ *)iocbq->next;
2126
2127 /* Unlink this iocbq */

--- 13 unchanged lines hidden (view full) ---

2141 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2142 IOERR_LINK_DOWN, 1);
2143 }
2144
2145 }
2146 /* Free the iocb and its associated buffers */
2147 else {
2148 icmd = &iocbq->iocb;
2135
2136 /* Now abort the iocb's */
2137 iocbq = (IOCBQ *)abort.q_first;
2138 while (iocbq) {
2139 /* Save the next iocbq for now */
2140 next = (IOCBQ *)iocbq->next;
2141
2142 /* Unlink this iocbq */

--- 13 unchanged lines hidden (view full) ---

2156 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2157 IOERR_LINK_DOWN, 1);
2158 }
2159
2160 }
2161 /* Free the iocb and its associated buffers */
2162 else {
2163 icmd = &iocbq->iocb;
2149 if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
2150 icmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
2151 icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) {
2164
2165 /* SLI3 */
2166 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2167 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2168 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2152 if ((hba->flag &
2153 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2154 /* HBA is detaching or offlining */
2169 if ((hba->flag &
2170 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2171 /* HBA is detaching or offlining */
2155 if (icmd->ulpCommand !=
2172 if (icmd->ULPCOMMAND !=
2156 CMD_QUE_RING_LIST64_CN) {
2157 uint8_t *tmp;
2173 CMD_QUE_RING_LIST64_CN) {
2174 uint8_t *tmp;
2175 RING *rp;
2158
2176
2177 rp = &hba->sli.sli3.
2178 ring[channelno];
2159 for (i = 0;
2179 for (i = 0;
2160 i < icmd->ulpBdeCount;
2180 i < icmd->ULPBDECOUNT;
2161 i++) {
2162 mp = EMLXS_GET_VADDR(
2163 hba, rp, icmd);
2164
2165 tmp = (uint8_t *)mp;
2166 if (mp) {
2167 (void) emlxs_mem_put(
2168 hba, MEM_BUF, tmp);
2169 }
2170 }
2171 }
2172
2173 (void) emlxs_mem_put(hba, MEM_IOCB,
2174 (uint8_t *)iocbq);
2175 } else {
2176 /* repost the unsolicited buffer */
2181 i++) {
2182 mp = EMLXS_GET_VADDR(
2183 hba, rp, icmd);
2184
2185 tmp = (uint8_t *)mp;
2186 if (mp) {
2187 (void) emlxs_mem_put(
2188 hba, MEM_BUF, tmp);
2189 }
2190 }
2191 }
2192
2193 (void) emlxs_mem_put(hba, MEM_IOCB,
2194 (uint8_t *)iocbq);
2195 } else {
2196 /* repost the unsolicited buffer */
2177 emlxs_sli_issue_iocb_cmd(hba, rp,
2197 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp,
2178 iocbq);
2179 }
2198 iocbq);
2199 }
2200 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2201 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2202
2203 emlxs_tx_put(iocbq, 1);
2180 }
2181 }
2182
2183 iocbq = next;
2184
2185 } /* end of while */
2186
2204 }
2205 }
2206
2207 iocbq = next;
2208
2209 } /* end of while */
2210
2211 /* Now trigger channel service */
2212 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2213 if (!flag[channelno]) {
2214 continue;
2215 }
2216
2217 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2218 }
2219
2187 return (abort.q_cnt);
2188
2220 return (abort.q_cnt);
2221
2189} /* emlxs_tx_ring_flush() */
2222} /* emlxs_tx_channel_flush() */
2190
2191
2192/* Flush all IO's on all or a given ring for a given node */
2193extern uint32_t
2223
2224
2225/* Flush all IO's on all or a given ring for a given node */
2226extern uint32_t
2194emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, RING *ring,
2227emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan,
2195 uint32_t shutdown, emlxs_buf_t *fpkt)
2196{
2197 emlxs_hba_t *hba = HBA;
2198 emlxs_buf_t *sbp;
2228 uint32_t shutdown, emlxs_buf_t *fpkt)
2229{
2230 emlxs_hba_t *hba = HBA;
2231 emlxs_buf_t *sbp;
2199 uint32_t ringno;
2200 RING *rp;
2232 uint32_t channelno;
2233 CHANNEL *cp;
2201 IOCB *icmd;
2202 IOCBQ *iocbq;
2203 NODELIST *prev;
2204 IOCBQ *next;
2205 IOCB *iocb;
2206 Q abort;
2207 uint32_t i;
2208 MATCHMAP *mp;
2234 IOCB *icmd;
2235 IOCBQ *iocbq;
2236 NODELIST *prev;
2237 IOCBQ *next;
2238 IOCB *iocb;
2239 Q abort;
2240 uint32_t i;
2241 MATCHMAP *mp;
2242 uint8_t flag[MAX_CHANNEL];
2209
2243
2210
2211 bzero((void *)&abort, sizeof (Q));
2212
2213 /* Flush all I/O's on tx queue to this target */
2244 bzero((void *)&abort, sizeof (Q));
2245
2246 /* Flush all I/O's on tx queue to this target */
2214 mutex_enter(&EMLXS_RINGTX_LOCK);
2247 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2215
2216 if (!ndlp->nlp_base && shutdown) {
2217 ndlp->nlp_active = 0;
2218 }
2219
2248
2249 if (!ndlp->nlp_base && shutdown) {
2250 ndlp->nlp_active = 0;
2251 }
2252
2220 for (ringno = 0; ringno < hba->ring_count; ringno++) {
2221 rp = &hba->ring[ringno];
2253 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2254 cp = &hba->chan[channelno];
2222
2255
2223 if (ring && rp != ring) {
2256 if (chan && cp != chan) {
2224 continue;
2225 }
2226
2227 if (!ndlp->nlp_base || shutdown) {
2228 /* Check if priority queue is not empty */
2257 continue;
2258 }
2259
2260 if (!ndlp->nlp_base || shutdown) {
2261 /* Check if priority queue is not empty */
2229 if (ndlp->nlp_ptx[ringno].q_first) {
2262 if (ndlp->nlp_ptx[channelno].q_first) {
2230 /* Transfer all iocb's to local queue */
2231 if (abort.q_first == 0) {
2232 abort.q_first =
2263 /* Transfer all iocb's to local queue */
2264 if (abort.q_first == 0) {
2265 abort.q_first =
2233 ndlp->nlp_ptx[ringno].q_first;
2266 ndlp->nlp_ptx[channelno].q_first;
2234 } else {
2267 } else {
2235 ((IOCBQ *)abort.q_last)->next =
2236 (IOCBQ *)ndlp->nlp_ptx[ringno].
2268 ((IOCBQ *)(abort.q_last))->next =
2269 (IOCBQ *)ndlp->nlp_ptx[channelno].
2237 q_first;
2238 }
2239
2270 q_first;
2271 }
2272
2240 abort.q_last = ndlp->nlp_ptx[ringno].q_last;
2241 abort.q_cnt += ndlp->nlp_ptx[ringno].q_cnt;
2273 flag[channelno] = 1;
2274
2275 abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2276 abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2242 }
2243 }
2244
2245 /* Check if tx queue is not empty */
2277 }
2278 }
2279
2280 /* Check if tx queue is not empty */
2246 if (ndlp->nlp_tx[ringno].q_first) {
2281 if (ndlp->nlp_tx[channelno].q_first) {
2282
2247 /* Transfer all iocb's to local queue */
2248 if (abort.q_first == 0) {
2283 /* Transfer all iocb's to local queue */
2284 if (abort.q_first == 0) {
2249 abort.q_first = ndlp->nlp_tx[ringno].q_first;
2285 abort.q_first = ndlp->nlp_tx[channelno].q_first;
2250 } else {
2251 ((IOCBQ *)abort.q_last)->next =
2286 } else {
2287 ((IOCBQ *)abort.q_last)->next =
2252 (IOCBQ *)ndlp->nlp_tx[ringno].q_first;
2288 (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2253 }
2254
2289 }
2290
2255 abort.q_last = ndlp->nlp_tx[ringno].q_last;
2256 abort.q_cnt += ndlp->nlp_tx[ringno].q_cnt;
2291 abort.q_last = ndlp->nlp_tx[channelno].q_last;
2292 abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2257 }
2258
2259 /* Clear the queue pointers */
2293 }
2294
2295 /* Clear the queue pointers */
2260 ndlp->nlp_ptx[ringno].q_first = NULL;
2261 ndlp->nlp_ptx[ringno].q_last = NULL;
2262 ndlp->nlp_ptx[ringno].q_cnt = 0;
2296 ndlp->nlp_ptx[channelno].q_first = NULL;
2297 ndlp->nlp_ptx[channelno].q_last = NULL;
2298 ndlp->nlp_ptx[channelno].q_cnt = 0;
2263
2299
2264 ndlp->nlp_tx[ringno].q_first = NULL;
2265 ndlp->nlp_tx[ringno].q_last = NULL;
2266 ndlp->nlp_tx[ringno].q_cnt = 0;
2300 ndlp->nlp_tx[channelno].q_first = NULL;
2301 ndlp->nlp_tx[channelno].q_last = NULL;
2302 ndlp->nlp_tx[channelno].q_cnt = 0;
2267
2303
2268 /* If this node was on the ring queue, remove it */
2269 if (ndlp->nlp_next[ringno]) {
2304 /* If this node was on the channel queue, remove it */
2305 if (ndlp->nlp_next[channelno]) {
2270 /* If this is the only node on list */
2306 /* If this is the only node on list */
2271 if (rp->nodeq.q_first == (void *)ndlp &&
2272 rp->nodeq.q_last == (void *)ndlp) {
2273 rp->nodeq.q_last = NULL;
2274 rp->nodeq.q_first = NULL;
2275 rp->nodeq.q_cnt = 0;
2276 } else if (rp->nodeq.q_first == (void *)ndlp) {
2277 rp->nodeq.q_first = ndlp->nlp_next[ringno];
2278 ((NODELIST *) rp->nodeq.q_last)->
2279 nlp_next[ringno] = rp->nodeq.q_first;
2280 rp->nodeq.q_cnt--;
2307 if (cp->nodeq.q_first == (void *)ndlp &&
2308 cp->nodeq.q_last == (void *)ndlp) {
2309 cp->nodeq.q_last = NULL;
2310 cp->nodeq.q_first = NULL;
2311 cp->nodeq.q_cnt = 0;
2312 } else if (cp->nodeq.q_first == (void *)ndlp) {
2313 cp->nodeq.q_first = ndlp->nlp_next[channelno];
2314 ((NODELIST *) cp->nodeq.q_last)->
2315 nlp_next[channelno] = cp->nodeq.q_first;
2316 cp->nodeq.q_cnt--;
2281 } else {
2282 /*
2283 * This is a little more difficult find the
2317 } else {
2318 /*
2319 * This is a little more difficult find the
2284 * previous node in the circular ring queue
2320 * previous node in the circular channel queue
2285 */
2286 prev = ndlp;
2321 */
2322 prev = ndlp;
2287 while (prev->nlp_next[ringno] != ndlp) {
2288 prev = prev->nlp_next[ringno];
2323 while (prev->nlp_next[channelno] != ndlp) {
2324 prev = prev->nlp_next[channelno];
2289 }
2290
2325 }
2326
2291 prev->nlp_next[ringno] =
2292 ndlp->nlp_next[ringno];
2327 prev->nlp_next[channelno] =
2328 ndlp->nlp_next[channelno];
2293
2329
2294 if (rp->nodeq.q_last == (void *)ndlp) {
2295 rp->nodeq.q_last = (void *)prev;
2330 if (cp->nodeq.q_last == (void *)ndlp) {
2331 cp->nodeq.q_last = (void *)prev;
2296 }
2332 }
2297 rp->nodeq.q_cnt--;
2333 cp->nodeq.q_cnt--;
2298
2299 }
2300
2301 /* Clear node */
2334
2335 }
2336
2337 /* Clear node */
2302 ndlp->nlp_next[ringno] = NULL;
2338 ndlp->nlp_next[channelno] = NULL;
2303 }
2304
2305 }
2306
2307 /* First cleanup the iocb's while still holding the lock */
2308 iocbq = (IOCBQ *) abort.q_first;
2309 while (iocbq) {
2310 /* Free the IoTag and the bmp */
2311 iocb = &iocbq->iocb;
2339 }
2340
2341 }
2342
2343 /* First cleanup the iocb's while still holding the lock */
2344 iocbq = (IOCBQ *) abort.q_first;
2345 while (iocbq) {
2346 /* Free the IoTag and the bmp */
2347 iocb = &iocbq->iocb;
2312 sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
2313
2348
2349 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2350 sbp = iocbq->sbp;
2351 if (sbp) {
2352 hba->fc_table[sbp->iotag] = NULL;
2353 emlxs_sli4_free_xri(hba, sbp, sbp->xp);
2354 }
2355 } else {
2356 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2357 iocb->ULPIOTAG, 0);
2358 }
2359
2314 if (sbp && (sbp != STALE_PACKET)) {
2315 mutex_enter(&sbp->mtx);
2360 if (sbp && (sbp != STALE_PACKET)) {
2361 mutex_enter(&sbp->mtx);
2316 if (sbp->pkt_flags & PACKET_IN_TXQ) {
2317 sbp->pkt_flags &= ~PACKET_IN_TXQ;
2318 hba->ring_tx_count[ring->ringno]--;
2319 }
2320 sbp->pkt_flags |= PACKET_IN_FLUSH;
2362 sbp->pkt_flags |= PACKET_IN_FLUSH;
2321
2322 /*
2323 * If the fpkt is already set, then we will leave it
2324 * alone. This ensures that this pkt is only accounted
2325 * for on one fpkt->flush_count
2326 */
2327 if (!sbp->fpkt && fpkt) {
2328 mutex_enter(&fpkt->mtx);
2329 sbp->fpkt = fpkt;
2330 fpkt->flush_count++;
2331 mutex_exit(&fpkt->mtx);
2332 }
2333
2334 mutex_exit(&sbp->mtx);
2335 }
2336
2337 iocbq = (IOCBQ *) iocbq->next;
2338
2339 } /* end of while */
2340
2363 /*
2364 * If the fpkt is already set, then we will leave it
2365 * alone. This ensures that this pkt is only accounted
2366 * for on one fpkt->flush_count
2367 */
2368 if (!sbp->fpkt && fpkt) {
2369 mutex_enter(&fpkt->mtx);
2370 sbp->fpkt = fpkt;
2371 fpkt->flush_count++;
2372 mutex_exit(&fpkt->mtx);
2373 }
2374
2375 mutex_exit(&sbp->mtx);
2376 }
2377
2378 iocbq = (IOCBQ *) iocbq->next;
2379
2380 } /* end of while */
2381
2341 mutex_exit(&EMLXS_RINGTX_LOCK);
2382 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2342
2343 /* Now abort the iocb's outside the locks */
2344 iocbq = (IOCBQ *)abort.q_first;
2345 while (iocbq) {
2346 /* Save the next iocbq for now */
2347 next = (IOCBQ *)iocbq->next;
2348
2349 /* Unlink this iocbq */

--- 12 unchanged lines hidden (view full) ---

2362 } else {
2363 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2364 IOERR_LINK_DOWN, 1);
2365 }
2366
2367 }
2368 /* Free the iocb and its associated buffers */
2369 else {
2383
2384 /* Now abort the iocb's outside the locks */
2385 iocbq = (IOCBQ *)abort.q_first;
2386 while (iocbq) {
2387 /* Save the next iocbq for now */
2388 next = (IOCBQ *)iocbq->next;
2389
2390 /* Unlink this iocbq */

--- 12 unchanged lines hidden (view full) ---

2403 } else {
2404 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2405 IOERR_LINK_DOWN, 1);
2406 }
2407
2408 }
2409 /* Free the iocb and its associated buffers */
2410 else {
2411 /* CMD_CLOSE_XRI_CN should also free the memory */
2370 icmd = &iocbq->iocb;
2412 icmd = &iocbq->iocb;
2371 if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
2372 icmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
2373 icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) {
2413
2414 /* SLI3 */
2415 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2416 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2417 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2374 if ((hba->flag &
2375 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2376 /* HBA is detaching or offlining */
2418 if ((hba->flag &
2419 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2420 /* HBA is detaching or offlining */
2377 if (icmd->ulpCommand !=
2421 if (icmd->ULPCOMMAND !=
2378 CMD_QUE_RING_LIST64_CN) {
2379 uint8_t *tmp;
2422 CMD_QUE_RING_LIST64_CN) {
2423 uint8_t *tmp;
2424 RING *rp;
2425 int ch;
2380
2426
2427 ch = ((CHANNEL *)
2428 iocbq->channel)->channelno;
2429 rp = &hba->sli.sli3.ring[ch];
2381 for (i = 0;
2430 for (i = 0;
2382 i < icmd->ulpBdeCount;
2431 i < icmd->ULPBDECOUNT;
2383 i++) {
2384 mp = EMLXS_GET_VADDR(
2385 hba, rp, icmd);
2386
2387 tmp = (uint8_t *)mp;
2388 if (mp) {
2389 (void) emlxs_mem_put(
2390 hba, MEM_BUF, tmp);
2391 }
2392 }
2393 }
2394
2395 (void) emlxs_mem_put(hba, MEM_IOCB,
2396 (uint8_t *)iocbq);
2397 } else {
2398 /* repost the unsolicited buffer */
2432 i++) {
2433 mp = EMLXS_GET_VADDR(
2434 hba, rp, icmd);
2435
2436 tmp = (uint8_t *)mp;
2437 if (mp) {
2438 (void) emlxs_mem_put(
2439 hba, MEM_BUF, tmp);
2440 }
2441 }
2442 }
2443
2444 (void) emlxs_mem_put(hba, MEM_IOCB,
2445 (uint8_t *)iocbq);
2446 } else {
2447 /* repost the unsolicited buffer */
2399 emlxs_sli_issue_iocb_cmd(hba, rp,
2400 iocbq);
2448 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2449 (CHANNEL *)iocbq->channel, iocbq);
2401 }
2450 }
2451 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2452 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2453 /*
2454 * Resend the abort iocbq if any
2455 */
2456 emlxs_tx_put(iocbq, 1);
2402 }
2403 }
2404
2405 iocbq = next;
2406
2407 } /* end of while */
2408
2457 }
2458 }
2459
2460 iocbq = next;
2461
2462 } /* end of while */
2463
2464 /* Now trigger channel service */
2465 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2466 if (!flag[channelno]) {
2467 continue;
2468 }
2469
2470 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2471 }
2472
2409 return (abort.q_cnt);
2410
2473 return (abort.q_cnt);
2474
2411} /* emlxs_tx_node_flush() */
2475} /* emlxs_tx_node_flush() */
2412
2413
2414/* Check for IO's on all or a given ring for a given node */
2415extern uint32_t
2476
2477
2478/* Check for IO's on all or a given ring for a given node */
2479extern uint32_t
2416emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, RING *ring)
2480emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan)
2417{
2418 emlxs_hba_t *hba = HBA;
2481{
2482 emlxs_hba_t *hba = HBA;
2419 uint32_t ringno;
2420 RING *rp;
2483 uint32_t channelno;
2484 CHANNEL *cp;
2421 uint32_t count;
2422
2423 count = 0;
2424
2425 /* Flush all I/O's on tx queue to this target */
2485 uint32_t count;
2486
2487 count = 0;
2488
2489 /* Flush all I/O's on tx queue to this target */
2426 mutex_enter(&EMLXS_RINGTX_LOCK);
2490 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2427
2491
2428 for (ringno = 0; ringno < hba->ring_count; ringno++) {
2429 rp = &hba->ring[ringno];
2492 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2493 cp = &hba->chan[channelno];
2430
2494
2431 if (ring && rp != ring) {
2495 if (chan && cp != chan) {
2432 continue;
2433 }
2434
2435 /* Check if priority queue is not empty */
2496 continue;
2497 }
2498
2499 /* Check if priority queue is not empty */
2436 if (ndlp->nlp_ptx[ringno].q_first) {
2437 count += ndlp->nlp_ptx[ringno].q_cnt;
2500 if (ndlp->nlp_ptx[channelno].q_first) {
2501 count += ndlp->nlp_ptx[channelno].q_cnt;
2438 }
2439
2440 /* Check if tx queue is not empty */
2502 }
2503
2504 /* Check if tx queue is not empty */
2441 if (ndlp->nlp_tx[ringno].q_first) {
2442 count += ndlp->nlp_tx[ringno].q_cnt;
2505 if (ndlp->nlp_tx[channelno].q_first) {
2506 count += ndlp->nlp_tx[channelno].q_cnt;
2443 }
2444
2445 }
2446
2507 }
2508
2509 }
2510
2447 mutex_exit(&EMLXS_RINGTX_LOCK);
2511 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2448
2449 return (count);
2450
2512
2513 return (count);
2514
2451} /* emlxs_tx_node_check() */
2515} /* emlxs_tx_node_check() */
2452
2453
2454
2516
2517
2518
2455/* Flush all IO's on the FCP ring for a given node's lun */
2519/* Flush all IO's on the any ring for a given node's lun */
2456extern uint32_t
2457emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
2458 emlxs_buf_t *fpkt)
2459{
2460 emlxs_hba_t *hba = HBA;
2461 emlxs_buf_t *sbp;
2520extern uint32_t
2521emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
2522 emlxs_buf_t *fpkt)
2523{
2524 emlxs_hba_t *hba = HBA;
2525 emlxs_buf_t *sbp;
2462 uint32_t ringno;
2526 uint32_t channelno;
2463 IOCBQ *iocbq;
2464 IOCBQ *prev;
2465 IOCBQ *next;
2466 IOCB *iocb;
2467 IOCB *icmd;
2468 Q abort;
2469 uint32_t i;
2470 MATCHMAP *mp;
2527 IOCBQ *iocbq;
2528 IOCBQ *prev;
2529 IOCBQ *next;
2530 IOCB *iocb;
2531 IOCB *icmd;
2532 Q abort;
2533 uint32_t i;
2534 MATCHMAP *mp;
2471 RING *rp;
2535 CHANNEL *cp;
2536 CHANNEL *channel;
2537 uint8_t flag[MAX_CHANNEL];
2472
2538
2473 ringno = FC_FCP_RING;
2474 rp = &hba->ring[ringno];
2475
2476 bzero((void *)&abort, sizeof (Q));
2477
2478 /* Flush I/O's on txQ to this target's lun */
2539 bzero((void *)&abort, sizeof (Q));
2540
2541 /* Flush I/O's on txQ to this target's lun */
2479 mutex_enter(&EMLXS_RINGTX_LOCK);
2542 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2480
2543
2481 /* Scan the priority queue first */
2482 prev = NULL;
2483 iocbq = (IOCBQ *) ndlp->nlp_ptx[ringno].q_first;
2544 channel = &hba->chan[hba->channel_fcp];
2484
2545
2485 while (iocbq) {
2486 next = (IOCBQ *)iocbq->next;
2487 iocb = &iocbq->iocb;
2488 sbp = (emlxs_buf_t *)iocbq->sbp;
2546 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2547 cp = &hba->chan[channelno];
2489
2548
2490 /* Check if this IO is for our lun */
2491 if (sbp->lun == lun) {
2492 /* Remove iocb from the node's tx queue */
2493 if (next == 0) {
2494 ndlp->nlp_ptx[ringno].q_last =
2495 (uint8_t *)prev;
2496 }
2549 if (channel && cp != channel) {
2550 continue;
2551 }
2497
2552
2498 if (prev == 0) {
2499 ndlp->nlp_ptx[ringno].q_first =
2500 (uint8_t *)next;
2501 } else {
2502 prev->next = next;
2503 }
2553 /* Scan the priority queue first */
2554 prev = NULL;
2555 iocbq = (IOCBQ *) ndlp->nlp_ptx[channelno].q_first;
2504
2556
2505 iocbq->next = NULL;
2506 ndlp->nlp_ptx[ringno].q_cnt--;
2557 while (iocbq) {
2558 next = (IOCBQ *)iocbq->next;
2559 iocb = &iocbq->iocb;
2560 sbp = (emlxs_buf_t *)iocbq->sbp;
2507
2561
2508 /*
2509 * Add this iocb to our local abort Q
2510 * This way we don't hold the RINGTX lock too long
2511 */
2512 if (abort.q_first) {
2513 ((IOCBQ *)abort.q_last)->next = iocbq;
2514 abort.q_last = (uint8_t *)iocbq;
2515 abort.q_cnt++;
2562 /* Check if this IO is for our lun */
2563 if (sbp && (sbp->lun == lun)) {
2564 /* Remove iocb from the node's ptx queue */
2565 if (next == 0) {
2566 ndlp->nlp_ptx[channelno].q_last =
2567 (uint8_t *)prev;
2568 }
2569
2570 if (prev == 0) {
2571 ndlp->nlp_ptx[channelno].q_first =
2572 (uint8_t *)next;
2573 } else {
2574 prev->next = next;
2575 }
2576
2577 iocbq->next = NULL;
2578 ndlp->nlp_ptx[channelno].q_cnt--;
2579
2580 /*
2581 * Add this iocb to our local abort Q
2582 */
2583 if (abort.q_first) {
2584 ((IOCBQ *)abort.q_last)->next = iocbq;
2585 abort.q_last = (uint8_t *)iocbq;
2586 abort.q_cnt++;
2587 } else {
2588 abort.q_first = (uint8_t *)iocbq;
2589 abort.q_last = (uint8_t *)iocbq;
2590 abort.q_cnt = 1;
2591 }
2592 iocbq->next = NULL;
2593 flag[channelno] = 1;
2594
2516 } else {
2595 } else {
2517 abort.q_first = (uint8_t *)iocbq;
2518 abort.q_last = (uint8_t *)iocbq;
2519 abort.q_cnt = 1;
2596 prev = iocbq;
2520 }
2597 }
2521 iocbq->next = NULL;
2522 } else {
2523 prev = iocbq;
2524 }
2525
2598
2526 iocbq = next;
2599 iocbq = next;
2527
2600
2528 } /* while (iocbq) */
2601 } /* while (iocbq) */
2529
2530
2602
2603
2531 /* Scan the regular queue */
2532 prev = NULL;
2533 iocbq = (IOCBQ *)ndlp->nlp_tx[ringno].q_first;
2604 /* Scan the regular queue */
2605 prev = NULL;
2606 iocbq = (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2534
2607
2535 while (iocbq) {
2536 next = (IOCBQ *)iocbq->next;
2537 iocb = &iocbq->iocb;
2538 sbp = (emlxs_buf_t *)iocbq->sbp;
2608 while (iocbq) {
2609 next = (IOCBQ *)iocbq->next;
2610 iocb = &iocbq->iocb;
2611 sbp = (emlxs_buf_t *)iocbq->sbp;
2539
2612
2540 /* Check if this IO is for our lun */
2541 if (sbp->lun == lun) {
2542 /* Remove iocb from the node's tx queue */
2543 if (next == 0) {
2544 ndlp->nlp_tx[ringno].q_last =
2545 (uint8_t *)prev;
2546 }
2613 /* Check if this IO is for our lun */
2614 if (sbp && (sbp->lun == lun)) {
2615 /* Remove iocb from the node's tx queue */
2616 if (next == 0) {
2617 ndlp->nlp_tx[channelno].q_last =
2618 (uint8_t *)prev;
2619 }
2547
2620
2548 if (prev == 0) {
2549 ndlp->nlp_tx[ringno].q_first =
2550 (uint8_t *)next;
2551 } else {
2552 prev->next = next;
2553 }
2621 if (prev == 0) {
2622 ndlp->nlp_tx[channelno].q_first =
2623 (uint8_t *)next;
2624 } else {
2625 prev->next = next;
2626 }
2554
2627
2555 iocbq->next = NULL;
2556 ndlp->nlp_tx[ringno].q_cnt--;
2628 iocbq->next = NULL;
2629 ndlp->nlp_tx[channelno].q_cnt--;
2557
2630
2558 /*
2559 * Add this iocb to our local abort Q
2560 * This way we don't hold the RINGTX lock too long
2561 */
2562 if (abort.q_first) {
2563 ((IOCBQ *) abort.q_last)->next = iocbq;
2564 abort.q_last = (uint8_t *)iocbq;
2565 abort.q_cnt++;
2631 /*
2632 * Add this iocb to our local abort Q
2633 */
2634 if (abort.q_first) {
2635 ((IOCBQ *) abort.q_last)->next = iocbq;
2636 abort.q_last = (uint8_t *)iocbq;
2637 abort.q_cnt++;
2638 } else {
2639 abort.q_first = (uint8_t *)iocbq;
2640 abort.q_last = (uint8_t *)iocbq;
2641 abort.q_cnt = 1;
2642 }
2643 iocbq->next = NULL;
2566 } else {
2644 } else {
2567 abort.q_first = (uint8_t *)iocbq;
2568 abort.q_last = (uint8_t *)iocbq;
2569 abort.q_cnt = 1;
2645 prev = iocbq;
2570 }
2646 }
2571 iocbq->next = NULL;
2572 } else {
2573 prev = iocbq;
2574 }
2575
2647
2576 iocbq = next;
2648 iocbq = next;
2577
2649
2578 } /* while (iocbq) */
2650 } /* while (iocbq) */
2651 } /* for loop */
2579
2580 /* First cleanup the iocb's while still holding the lock */
2581 iocbq = (IOCBQ *)abort.q_first;
2582 while (iocbq) {
2583 /* Free the IoTag and the bmp */
2584 iocb = &iocbq->iocb;
2652
2653 /* First cleanup the iocb's while still holding the lock */
2654 iocbq = (IOCBQ *)abort.q_first;
2655 while (iocbq) {
2656 /* Free the IoTag and the bmp */
2657 iocb = &iocbq->iocb;
2585 sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
2586
2658
2659 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2660 sbp = iocbq->sbp;
2661 if (sbp) {
2662 hba->fc_table[sbp->iotag] = NULL;
2663 emlxs_sli4_free_xri(hba, sbp, sbp->xp);
2664 }
2665 } else {
2666 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2667 iocb->ULPIOTAG, 0);
2668 }
2669
2587 if (sbp && (sbp != STALE_PACKET)) {
2588 mutex_enter(&sbp->mtx);
2670 if (sbp && (sbp != STALE_PACKET)) {
2671 mutex_enter(&sbp->mtx);
2589 if (sbp->pkt_flags & PACKET_IN_TXQ) {
2590 sbp->pkt_flags &= ~PACKET_IN_TXQ;
2591 hba->ring_tx_count[ringno]--;
2592 }
2593 sbp->pkt_flags |= PACKET_IN_FLUSH;
2672 sbp->pkt_flags |= PACKET_IN_FLUSH;
2594
2595 /*
2596 * If the fpkt is already set, then we will leave it
2597 * alone. This ensures that this pkt is only accounted
2598 * for on one fpkt->flush_count
2599 */
2600 if (!sbp->fpkt && fpkt) {
2601 mutex_enter(&fpkt->mtx);
2602 sbp->fpkt = fpkt;
2603 fpkt->flush_count++;
2604 mutex_exit(&fpkt->mtx);
2605 }
2606
2607 mutex_exit(&sbp->mtx);
2608 }
2609
2610 iocbq = (IOCBQ *) iocbq->next;
2611
2612 } /* end of while */
2613
2673 /*
2674 * If the fpkt is already set, then we will leave it
2675 * alone. This ensures that this pkt is only accounted
2676 * for on one fpkt->flush_count
2677 */
2678 if (!sbp->fpkt && fpkt) {
2679 mutex_enter(&fpkt->mtx);
2680 sbp->fpkt = fpkt;
2681 fpkt->flush_count++;
2682 mutex_exit(&fpkt->mtx);
2683 }
2684
2685 mutex_exit(&sbp->mtx);
2686 }
2687
2688 iocbq = (IOCBQ *) iocbq->next;
2689
2690 } /* end of while */
2691
2614 mutex_exit(&EMLXS_RINGTX_LOCK);
2692 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2615
2616 /* Now abort the iocb's outside the locks */
2617 iocbq = (IOCBQ *)abort.q_first;
2618 while (iocbq) {
2619 /* Save the next iocbq for now */
2620 next = (IOCBQ *)iocbq->next;
2621
2622 /* Unlink this iocbq */

--- 12 unchanged lines hidden (view full) ---

2635 } else {
2636 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2637 IOERR_LINK_DOWN, 1);
2638 }
2639 }
2640
2641 /* Free the iocb and its associated buffers */
2642 else {
2693
2694 /* Now abort the iocb's outside the locks */
2695 iocbq = (IOCBQ *)abort.q_first;
2696 while (iocbq) {
2697 /* Save the next iocbq for now */
2698 next = (IOCBQ *)iocbq->next;
2699
2700 /* Unlink this iocbq */

--- 12 unchanged lines hidden (view full) ---

2713 } else {
2714 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2715 IOERR_LINK_DOWN, 1);
2716 }
2717 }
2718
2719 /* Free the iocb and its associated buffers */
2720 else {
2721 /* Should never happen! */
2643 icmd = &iocbq->iocb;
2644
2722 icmd = &iocbq->iocb;
2723
2645 if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
2646 icmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
2647 icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) {
2724 /* SLI3 */
2725 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2726 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2727 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2648 if ((hba->flag &
2649 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2650 /* HBA is detaching or offlining */
2728 if ((hba->flag &
2729 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2730 /* HBA is detaching or offlining */
2651 if (icmd->ulpCommand !=
2731 if (icmd->ULPCOMMAND !=
2652 CMD_QUE_RING_LIST64_CN) {
2653 uint8_t *tmp;
2732 CMD_QUE_RING_LIST64_CN) {
2733 uint8_t *tmp;
2734 RING *rp;
2735 int ch;
2654
2736
2737 ch = ((CHANNEL *)
2738 iocbq->channel)->channelno;
2739 rp = &hba->sli.sli3.ring[ch];
2655 for (i = 0;
2740 for (i = 0;
2656 i < icmd->ulpBdeCount;
2741 i < icmd->ULPBDECOUNT;
2657 i++) {
2658 mp = EMLXS_GET_VADDR(
2659 hba, rp, icmd);
2660
2661 tmp = (uint8_t *)mp;
2662 if (mp) {
2663 (void) emlxs_mem_put(
2664 hba, MEM_BUF, tmp);
2665 }
2666 }
2667 }
2668
2669 (void) emlxs_mem_put(hba, MEM_IOCB,
2670 (uint8_t *)iocbq);
2671 } else {
2672 /* repost the unsolicited buffer */
2742 i++) {
2743 mp = EMLXS_GET_VADDR(
2744 hba, rp, icmd);
2745
2746 tmp = (uint8_t *)mp;
2747 if (mp) {
2748 (void) emlxs_mem_put(
2749 hba, MEM_BUF, tmp);
2750 }
2751 }
2752 }
2753
2754 (void) emlxs_mem_put(hba, MEM_IOCB,
2755 (uint8_t *)iocbq);
2756 } else {
2757 /* repost the unsolicited buffer */
2673 emlxs_sli_issue_iocb_cmd(hba, rp,
2674 iocbq);
2758 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2759 (CHANNEL *)iocbq->channel, iocbq);
2675 }
2760 }
2761 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2762 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2763 /*
2764 * Resend the abort iocbq if any
2765 */
2766 emlxs_tx_put(iocbq, 1);
2676 }
2677 }
2678
2679 iocbq = next;
2680
2681 } /* end of while */
2682
2767 }
2768 }
2769
2770 iocbq = next;
2771
2772 } /* end of while */
2773
2774 /* Now trigger channel service */
2775 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2776 if (!flag[channelno]) {
2777 continue;
2778 }
2683
2779
2780 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2781 }
2782
2684 return (abort.q_cnt);
2685
2783 return (abort.q_cnt);
2784
2686} /* emlxs_tx_lun_flush() */
2785} /* emlxs_tx_lun_flush() */
2687
2688
2689extern void
2690emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
2691{
2692 emlxs_hba_t *hba;
2693 emlxs_port_t *port;
2786
2787
2788extern void
2789emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
2790{
2791 emlxs_hba_t *hba;
2792 emlxs_port_t *port;
2694 uint32_t ringno;
2793 uint32_t channelno;
2695 NODELIST *nlp;
2794 NODELIST *nlp;
2696 RING *rp;
2795 CHANNEL *cp;
2697 emlxs_buf_t *sbp;
2698
2699 port = (emlxs_port_t *)iocbq->port;
2700 hba = HBA;
2796 emlxs_buf_t *sbp;
2797
2798 port = (emlxs_port_t *)iocbq->port;
2799 hba = HBA;
2701 rp = (RING *)iocbq->ring;
2800 cp = (CHANNEL *)iocbq->channel;
2702 nlp = (NODELIST *)iocbq->node;
2801 nlp = (NODELIST *)iocbq->node;
2703 ringno = rp->ringno;
2802 channelno = cp->channelno;
2704 sbp = (emlxs_buf_t *)iocbq->sbp;
2705
2803 sbp = (emlxs_buf_t *)iocbq->sbp;
2804
2805 /* under what cases, nlp is NULL */
2706 if (nlp == NULL) {
2707 /* Set node to base node by default */
2708 nlp = &port->node_base;
2709
2710 iocbq->node = (void *)nlp;
2711
2712 if (sbp) {
2713 sbp->node = (void *)nlp;
2714 }
2715 }
2716
2717 if (lock) {
2806 if (nlp == NULL) {
2807 /* Set node to base node by default */
2808 nlp = &port->node_base;
2809
2810 iocbq->node = (void *)nlp;
2811
2812 if (sbp) {
2813 sbp->node = (void *)nlp;
2814 }
2815 }
2816
2817 if (lock) {
2718 mutex_enter(&EMLXS_RINGTX_LOCK);
2818 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2719 }
2720
2721 if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
2722 if (sbp) {
2723 mutex_enter(&sbp->mtx);
2819 }
2820
2821 if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
2822 if (sbp) {
2823 mutex_enter(&sbp->mtx);
2724
2725 if (sbp->pkt_flags & PACKET_IN_TXQ) {
2726 sbp->pkt_flags &= ~PACKET_IN_TXQ;
2727 hba->ring_tx_count[ringno]--;
2728 }
2729 sbp->pkt_flags |= PACKET_IN_FLUSH;
2824 sbp->pkt_flags |= PACKET_IN_FLUSH;
2730
2731 mutex_exit(&sbp->mtx);
2732
2825 mutex_exit(&sbp->mtx);
2826
2733 /* Free the ulpIoTag and the bmp */
2734 (void) emlxs_unregister_pkt(rp, sbp->iotag, 0);
2827 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2828 hba->fc_table[sbp->iotag] = NULL;
2829 emlxs_sli4_free_xri(hba, sbp, sbp->xp);
2830 } else {
2831 (void) emlxs_unregister_pkt(cp, sbp->iotag, 0);
2832 }
2735
2736 if (lock) {
2833
2834 if (lock) {
2737 mutex_exit(&EMLXS_RINGTX_LOCK);
2835 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2738 }
2739
2740 if (hba->state >= FC_LINK_UP) {
2741 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2742 IOERR_ABORT_REQUESTED, 1);
2743 } else {
2744 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2745 IOERR_LINK_DOWN, 1);
2746 }
2747 return;
2748 } else {
2749 if (lock) {
2836 }
2837
2838 if (hba->state >= FC_LINK_UP) {
2839 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2840 IOERR_ABORT_REQUESTED, 1);
2841 } else {
2842 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2843 IOERR_LINK_DOWN, 1);
2844 }
2845 return;
2846 } else {
2847 if (lock) {
2750 mutex_exit(&EMLXS_RINGTX_LOCK);
2848 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2751 }
2752
2753 (void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
2754 }
2755
2756 return;
2757 }
2758
2759 if (sbp) {
2760
2761 mutex_enter(&sbp->mtx);
2762
2763 if (sbp->pkt_flags &
2764 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ | PACKET_IN_TXQ)) {
2765 mutex_exit(&sbp->mtx);
2766 if (lock) {
2849 }
2850
2851 (void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
2852 }
2853
2854 return;
2855 }
2856
2857 if (sbp) {
2858
2859 mutex_enter(&sbp->mtx);
2860
2861 if (sbp->pkt_flags &
2862 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ | PACKET_IN_TXQ)) {
2863 mutex_exit(&sbp->mtx);
2864 if (lock) {
2767 mutex_exit(&EMLXS_RINGTX_LOCK);
2865 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2768 }
2769 return;
2770 }
2771
2772 sbp->pkt_flags |= PACKET_IN_TXQ;
2866 }
2867 return;
2868 }
2869
2870 sbp->pkt_flags |= PACKET_IN_TXQ;
2773 hba->ring_tx_count[ringno]++;
2871 hba->channel_tx_count++;
2774
2775 mutex_exit(&sbp->mtx);
2776 }
2777
2778
2779 /* Check iocbq priority */
2872
2873 mutex_exit(&sbp->mtx);
2874 }
2875
2876
2877 /* Check iocbq priority */
2878 /* Some IOCB has the high priority like reset/close xri etc */
2780 if (iocbq->flag & IOCB_PRIORITY) {
2781 /* Add the iocb to the bottom of the node's ptx queue */
2879 if (iocbq->flag & IOCB_PRIORITY) {
2880 /* Add the iocb to the bottom of the node's ptx queue */
2782 if (nlp->nlp_ptx[ringno].q_first) {
2783 ((IOCBQ *)nlp->nlp_ptx[ringno].q_last)->next = iocbq;
2784 nlp->nlp_ptx[ringno].q_last = (uint8_t *)iocbq;
2785 nlp->nlp_ptx[ringno].q_cnt++;
2881 if (nlp->nlp_ptx[channelno].q_first) {
2882 ((IOCBQ *)nlp->nlp_ptx[channelno].q_last)->next = iocbq;
2883 nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
2884 nlp->nlp_ptx[channelno].q_cnt++;
2786 } else {
2885 } else {
2787 nlp->nlp_ptx[ringno].q_first = (uint8_t *)iocbq;
2788 nlp->nlp_ptx[ringno].q_last = (uint8_t *)iocbq;
2789 nlp->nlp_ptx[ringno].q_cnt = 1;
2886 nlp->nlp_ptx[channelno].q_first = (uint8_t *)iocbq;
2887 nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
2888 nlp->nlp_ptx[channelno].q_cnt = 1;
2790 }
2791
2792 iocbq->next = NULL;
2793 } else { /* Normal priority */
2794
2795
2796 /* Add the iocb to the bottom of the node's tx queue */
2889 }
2890
2891 iocbq->next = NULL;
2892 } else { /* Normal priority */
2893
2894
2895 /* Add the iocb to the bottom of the node's tx queue */
2797 if (nlp->nlp_tx[ringno].q_first) {
2798 ((IOCBQ *)nlp->nlp_tx[ringno].q_last)->next = iocbq;
2799 nlp->nlp_tx[ringno].q_last = (uint8_t *)iocbq;
2800 nlp->nlp_tx[ringno].q_cnt++;
2896 if (nlp->nlp_tx[channelno].q_first) {
2897 ((IOCBQ *)nlp->nlp_tx[channelno].q_last)->next = iocbq;
2898 nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
2899 nlp->nlp_tx[channelno].q_cnt++;
2801 } else {
2900 } else {
2802 nlp->nlp_tx[ringno].q_first = (uint8_t *)iocbq;
2803 nlp->nlp_tx[ringno].q_last = (uint8_t *)iocbq;
2804 nlp->nlp_tx[ringno].q_cnt = 1;
2901 nlp->nlp_tx[channelno].q_first = (uint8_t *)iocbq;
2902 nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
2903 nlp->nlp_tx[channelno].q_cnt = 1;
2805 }
2806
2807 iocbq->next = NULL;
2808 }
2809
2810
2811 /*
2904 }
2905
2906 iocbq->next = NULL;
2907 }
2908
2909
2910 /*
2812 * Check if the node is not already on ring queue and
2911 * Check if the node is not already on channel queue and
2813 * (is not closed or is a priority request)
2814 */
2912 * (is not closed or is a priority request)
2913 */
2815 if (!nlp->nlp_next[ringno] && (!(nlp->nlp_flag[ringno] & NLP_CLOSED) ||
2914 if (!nlp->nlp_next[channelno] &&
2915 (!(nlp->nlp_flag[channelno] & NLP_CLOSED) ||
2816 (iocbq->flag & IOCB_PRIORITY))) {
2916 (iocbq->flag & IOCB_PRIORITY))) {
2817 /* If so, then add it to the ring queue */
2818 if (rp->nodeq.q_first) {
2819 ((NODELIST *)rp->nodeq.q_last)->nlp_next[ringno] =
2917 /* If so, then add it to the channel queue */
2918 if (cp->nodeq.q_first) {
2919 ((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
2820 (uint8_t *)nlp;
2920 (uint8_t *)nlp;
2821 nlp->nlp_next[ringno] = rp->nodeq.q_first;
2921 nlp->nlp_next[channelno] = cp->nodeq.q_first;
2822
2823 /*
2824 * If this is not the base node then add it
2825 * to the tail
2826 */
2827 if (!nlp->nlp_base) {
2922
2923 /*
2924 * If this is not the base node then add it
2925 * to the tail
2926 */
2927 if (!nlp->nlp_base) {
2828 rp->nodeq.q_last = (uint8_t *)nlp;
2928 cp->nodeq.q_last = (uint8_t *)nlp;
2829 } else { /* Otherwise, add it to the head */
2830
2831 /* The command node always gets priority */
2929 } else { /* Otherwise, add it to the head */
2930
2931 /* The command node always gets priority */
2832 rp->nodeq.q_first = (uint8_t *)nlp;
2932 cp->nodeq.q_first = (uint8_t *)nlp;
2833 }
2834
2933 }
2934
2835 rp->nodeq.q_cnt++;
2935 cp->nodeq.q_cnt++;
2836 } else {
2936 } else {
2837 rp->nodeq.q_first = (uint8_t *)nlp;
2838 rp->nodeq.q_last = (uint8_t *)nlp;
2839 nlp->nlp_next[ringno] = nlp;
2840 rp->nodeq.q_cnt = 1;
2937 cp->nodeq.q_first = (uint8_t *)nlp;
2938 cp->nodeq.q_last = (uint8_t *)nlp;
2939 nlp->nlp_next[channelno] = nlp;
2940 cp->nodeq.q_cnt = 1;
2841 }
2842 }
2843
2941 }
2942 }
2943
2844 HBASTATS.IocbTxPut[ringno]++;
2944 HBASTATS.IocbTxPut[channelno]++;
2845
2945
2846 /* Adjust the ring timeout timer */
2847 rp->timeout = hba->timer_tics + 5;
2946 /* Adjust the channel timeout timer */
2947 cp->timeout = hba->timer_tics + 5;
2848
2849 if (lock) {
2948
2949 if (lock) {
2850 mutex_exit(&EMLXS_RINGTX_LOCK);
2950 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2851 }
2852
2853 return;
2854
2951 }
2952
2953 return;
2954
2855} /* emlxs_tx_put() */
2955} /* emlxs_tx_put() */
2856
2857
2858extern IOCBQ *
2956
2957
2958extern IOCBQ *
2859emlxs_tx_get(RING *rp, uint32_t lock)
2959emlxs_tx_get(CHANNEL *cp, uint32_t lock)
2860{
2861 emlxs_hba_t *hba;
2960{
2961 emlxs_hba_t *hba;
2862 uint32_t ringno;
2962 uint32_t channelno;
2863 IOCBQ *iocbq;
2864 NODELIST *nlp;
2865 emlxs_buf_t *sbp;
2866
2963 IOCBQ *iocbq;
2964 NODELIST *nlp;
2965 emlxs_buf_t *sbp;
2966
2867 hba = rp->hba;
2868 ringno = rp->ringno;
2967 hba = cp->hba;
2968 channelno = cp->channelno;
2869
2870 if (lock) {
2969
2970 if (lock) {
2871 mutex_enter(&EMLXS_RINGTX_LOCK);
2971 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2872 }
2873
2874begin:
2875
2876 iocbq = NULL;
2877
2878 /* Check if a node needs servicing */
2972 }
2973
2974begin:
2975
2976 iocbq = NULL;
2977
2978 /* Check if a node needs servicing */
2879 if (rp->nodeq.q_first) {
2880 nlp = (NODELIST *)rp->nodeq.q_first;
2979 if (cp->nodeq.q_first) {
2980 nlp = (NODELIST *)cp->nodeq.q_first;
2881
2882 /* Get next iocb from node's priority queue */
2883
2981
2982 /* Get next iocb from node's priority queue */
2983
2884 if (nlp->nlp_ptx[ringno].q_first) {
2885 iocbq = (IOCBQ *)nlp->nlp_ptx[ringno].q_first;
2984 if (nlp->nlp_ptx[channelno].q_first) {
2985 iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
2886
2887 /* Check if this is last entry */
2986
2987 /* Check if this is last entry */
2888 if (nlp->nlp_ptx[ringno].q_last == (void *)iocbq) {
2889 nlp->nlp_ptx[ringno].q_first = NULL;
2890 nlp->nlp_ptx[ringno].q_last = NULL;
2891 nlp->nlp_ptx[ringno].q_cnt = 0;
2988 if (nlp->nlp_ptx[channelno].q_last == (void *)iocbq) {
2989 nlp->nlp_ptx[channelno].q_first = NULL;
2990 nlp->nlp_ptx[channelno].q_last = NULL;
2991 nlp->nlp_ptx[channelno].q_cnt = 0;
2892 } else {
2893 /* Remove iocb from head */
2992 } else {
2993 /* Remove iocb from head */
2894 nlp->nlp_ptx[ringno].q_first =
2994 nlp->nlp_ptx[channelno].q_first =
2895 (void *)iocbq->next;
2995 (void *)iocbq->next;
2896 nlp->nlp_ptx[ringno].q_cnt--;
2996 nlp->nlp_ptx[channelno].q_cnt--;
2897 }
2898
2899 iocbq->next = NULL;
2900 }
2901
2902 /* Get next iocb from node tx queue if node not closed */
2997 }
2998
2999 iocbq->next = NULL;
3000 }
3001
3002 /* Get next iocb from node tx queue if node not closed */
2903 else if (nlp->nlp_tx[ringno].q_first &&
2904 !(nlp->nlp_flag[ringno] & NLP_CLOSED)) {
2905 iocbq = (IOCBQ *)nlp->nlp_tx[ringno].q_first;
3003 else if (nlp->nlp_tx[channelno].q_first &&
3004 !(nlp->nlp_flag[channelno] & NLP_CLOSED)) {
3005 iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
2906
2907 /* Check if this is last entry */
3006
3007 /* Check if this is last entry */
2908 if (nlp->nlp_tx[ringno].q_last == (void *)iocbq) {
2909 nlp->nlp_tx[ringno].q_first = NULL;
2910 nlp->nlp_tx[ringno].q_last = NULL;
2911 nlp->nlp_tx[ringno].q_cnt = 0;
3008 if (nlp->nlp_tx[channelno].q_last == (void *)iocbq) {
3009 nlp->nlp_tx[channelno].q_first = NULL;
3010 nlp->nlp_tx[channelno].q_last = NULL;
3011 nlp->nlp_tx[channelno].q_cnt = 0;
2912 } else {
2913 /* Remove iocb from head */
3012 } else {
3013 /* Remove iocb from head */
2914 nlp->nlp_tx[ringno].q_first =
3014 nlp->nlp_tx[channelno].q_first =
2915 (void *)iocbq->next;
3015 (void *)iocbq->next;
2916 nlp->nlp_tx[ringno].q_cnt--;
3016 nlp->nlp_tx[channelno].q_cnt--;
2917 }
2918
2919 iocbq->next = NULL;
2920 }
2921
2922 /* Now deal with node itself */
2923
2924 /* Check if node still needs servicing */
3017 }
3018
3019 iocbq->next = NULL;
3020 }
3021
3022 /* Now deal with node itself */
3023
3024 /* Check if node still needs servicing */
2925 if ((nlp->nlp_ptx[ringno].q_first) ||
2926 (nlp->nlp_tx[ringno].q_first &&
2927 !(nlp->nlp_flag[ringno] & NLP_CLOSED))) {
3025 if ((nlp->nlp_ptx[channelno].q_first) ||
3026 (nlp->nlp_tx[channelno].q_first &&
3027 !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
2928
2929 /*
2930 * If this is the base node, then don't shift the
2931 * pointers. We want to drain the base node before
2932 * moving on
2933 */
2934 if (!nlp->nlp_base) {
2935 /*
3028
3029 /*
3030 * If this is the base node, then don't shift the
3031 * pointers. We want to drain the base node before
3032 * moving on
3033 */
3034 if (!nlp->nlp_base) {
3035 /*
2936 * Just shift ring queue pointers to next
3036 * Just shift channel queue pointers to next
2937 * node
2938 */
3037 * node
3038 */
2939 rp->nodeq.q_last = (void *)nlp;
2940 rp->nodeq.q_first = nlp->nlp_next[ringno];
3039 cp->nodeq.q_last = (void *)nlp;
3040 cp->nodeq.q_first = nlp->nlp_next[channelno];
2941 }
2942 } else {
3041 }
3042 } else {
2943 /* Remove node from ring queue */
3043 /* Remove node from channel queue */
2944
2945 /* If this is the last node on list */
3044
3045 /* If this is the last node on list */
2946 if (rp->nodeq.q_last == (void *)nlp) {
2947 rp->nodeq.q_last = NULL;
2948 rp->nodeq.q_first = NULL;
2949 rp->nodeq.q_cnt = 0;
3046 if (cp->nodeq.q_last == (void *)nlp) {
3047 cp->nodeq.q_last = NULL;
3048 cp->nodeq.q_first = NULL;
3049 cp->nodeq.q_cnt = 0;
2950 } else {
2951 /* Remove node from head */
3050 } else {
3051 /* Remove node from head */
2952 rp->nodeq.q_first = nlp->nlp_next[ringno];
2953 ((NODELIST *)rp->nodeq.q_last)->
2954 nlp_next[ringno] = rp->nodeq.q_first;
2955 rp->nodeq.q_cnt--;
3052 cp->nodeq.q_first = nlp->nlp_next[channelno];
3053 ((NODELIST *)cp->nodeq.q_last)->
3054 nlp_next[channelno] = cp->nodeq.q_first;
3055 cp->nodeq.q_cnt--;
2956
2957 }
2958
2959 /* Clear node */
3056
3057 }
3058
3059 /* Clear node */
2960 nlp->nlp_next[ringno] = NULL;
3060 nlp->nlp_next[channelno] = NULL;
2961 }
2962
2963 /*
2964 * If no iocbq was found on this node, then it will have
2965 * been removed. So try again.
2966 */
2967 if (!iocbq) {
2968 goto begin;

--- 17 unchanged lines hidden (view full) ---

2986 if ((sbp->pkt_flags &
2987 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
2988 !(sbp->pkt_flags & PACKET_IN_TXQ)) {
2989 mutex_exit(&sbp->mtx);
2990 goto begin;
2991 }
2992
2993 sbp->pkt_flags &= ~PACKET_IN_TXQ;
3061 }
3062
3063 /*
3064 * If no iocbq was found on this node, then it will have
3065 * been removed. So try again.
3066 */
3067 if (!iocbq) {
3068 goto begin;

--- 17 unchanged lines hidden (view full) ---

3086 if ((sbp->pkt_flags &
3087 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3088 !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3089 mutex_exit(&sbp->mtx);
3090 goto begin;
3091 }
3092
3093 sbp->pkt_flags &= ~PACKET_IN_TXQ;
2994 hba->ring_tx_count[ringno]--;
3094 hba->channel_tx_count--;
2995
2996 mutex_exit(&sbp->mtx);
2997 }
2998 }
2999
3000 if (iocbq) {
3095
3096 mutex_exit(&sbp->mtx);
3097 }
3098 }
3099
3100 if (iocbq) {
3001 HBASTATS.IocbTxGet[ringno]++;
3101 HBASTATS.IocbTxGet[channelno]++;
3002 }
3003
3004 /* Adjust the ring timeout timer */
3102 }
3103
3104 /* Adjust the ring timeout timer */
3005 rp->timeout = (rp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
3105 cp->timeout = (cp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
3006
3007 if (lock) {
3106
3107 if (lock) {
3008 mutex_exit(&EMLXS_RINGTX_LOCK);
3108 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3009 }
3010
3011 return (iocbq);
3012
3109 }
3110
3111 return (iocbq);
3112
3013} /* emlxs_tx_get() */
3113} /* emlxs_tx_get() */
3014
3015
3114
3115
3116/*
3117 * Remove all cmd from from_rp's txq to to_rp's txq for ndlp.
3118 * The old IoTag has to be released, the new one has to be
3119 * allocated. Others no change
3120 * TX_CHANNEL lock is held
3121 */
3122extern void
3123emlxs_tx_move(NODELIST *ndlp, CHANNEL *from_chan, CHANNEL *to_chan,
3124 uint32_t cmd, emlxs_buf_t *fpkt, uint32_t lock)
3125{
3126 emlxs_hba_t *hba;
3127 emlxs_port_t *port;
3128 uint32_t fchanno, tchanno, i;
3016
3129
3130 IOCBQ *iocbq;
3131 IOCBQ *prev;
3132 IOCBQ *next;
3133 IOCB *iocb, *icmd;
3134 Q tbm; /* To Be Moved Q */
3135 MATCHMAP *mp;
3136
3137 NODELIST *nlp = ndlp;
3138 emlxs_buf_t *sbp;
3139
3140 NODELIST *n_prev = NULL;
3141 NODELIST *n_next = NULL;
3142 uint16_t count = 0;
3143
3144 hba = from_chan->hba;
3145 port = &PPORT;
3146 cmd = cmd; /* To pass lint */
3147
3148 fchanno = from_chan->channelno;
3149 tchanno = to_chan->channelno;
3150
3151 if (lock) {
3152 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3153 }
3154
3155 bzero((void *)&tbm, sizeof (Q));
3156
3157 /* Scan the ndlp's fchanno txq to get the iocb of fcp cmd */
3158 prev = NULL;
3159 iocbq = (IOCBQ *)nlp->nlp_tx[fchanno].q_first;
3160
3161 while (iocbq) {
3162 next = (IOCBQ *)iocbq->next;
3163 /* Check if this iocb is fcp cmd */
3164 iocb = &iocbq->iocb;
3165
3166 switch (iocb->ULPCOMMAND) {
3167 /* FCP commands */
3168 case CMD_FCP_ICMND_CR:
3169 case CMD_FCP_ICMND_CX:
3170 case CMD_FCP_IREAD_CR:
3171 case CMD_FCP_IREAD_CX:
3172 case CMD_FCP_IWRITE_CR:
3173 case CMD_FCP_IWRITE_CX:
3174 case CMD_FCP_ICMND64_CR:
3175 case CMD_FCP_ICMND64_CX:
3176 case CMD_FCP_IREAD64_CR:
3177 case CMD_FCP_IREAD64_CX:
3178 case CMD_FCP_IWRITE64_CR:
3179 case CMD_FCP_IWRITE64_CX:
3180 /* We found a fcp cmd */
3181 break;
3182 default:
3183 /* this is not fcp cmd continue */
3184 prev = iocbq;
3185 iocbq = next;
3186 continue;
3187 }
3188
3189 /* found a fcp cmd iocb in fchanno txq, now deque it */
3190 if (next == NULL) {
3191 /* This is the last iocbq */
3192 nlp->nlp_tx[fchanno].q_last =
3193 (uint8_t *)prev;
3194 }
3195
3196 if (prev == NULL) {
3197 /* This is the first one then remove it from head */
3198 nlp->nlp_tx[fchanno].q_first =
3199 (uint8_t *)next;
3200 } else {
3201 prev->next = next;
3202 }
3203
3204 iocbq->next = NULL;
3205 nlp->nlp_tx[fchanno].q_cnt--;
3206
3207 /* Add this iocb to our local toberemovedq */
3208 /* This way we donot hold the TX_CHANNEL lock too long */
3209
3210 if (tbm.q_first) {
3211 ((IOCBQ *)tbm.q_last)->next = iocbq;
3212 tbm.q_last = (uint8_t *)iocbq;
3213 tbm.q_cnt++;
3214 } else {
3215 tbm.q_first = (uint8_t *)iocbq;
3216 tbm.q_last = (uint8_t *)iocbq;
3217 tbm.q_cnt = 1;
3218 }
3219
3220 iocbq = next;
3221
3222 } /* While (iocbq) */
3223
3224 if ((tchanno == hba->channel_fcp) && (tbm.q_cnt != 0)) {
3225
3226 /* from_chan->nodeq.q_first must be non NULL */
3227 if (from_chan->nodeq.q_first) {
3228
3229 /* nodeq is not empty, now deal with the node itself */
3230 if ((nlp->nlp_tx[fchanno].q_first)) {
3231
3232 if (!nlp->nlp_base) {
3233 from_chan->nodeq.q_last =
3234 (void *)nlp;
3235 from_chan->nodeq.q_first =
3236 nlp->nlp_next[fchanno];
3237 }
3238
3239 } else {
3240 n_prev = (NODELIST *)from_chan->nodeq.q_first;
3241 count = from_chan->nodeq.q_cnt;
3242
3243 if (n_prev == nlp) {
3244
3245 /* If this is the only node on list */
3246 if (from_chan->nodeq.q_last ==
3247 (void *)nlp) {
3248 from_chan->nodeq.q_last =
3249 NULL;
3250 from_chan->nodeq.q_first =
3251 NULL;
3252 from_chan->nodeq.q_cnt = 0;
3253 } else {
3254 from_chan->nodeq.q_first =
3255 nlp->nlp_next[fchanno];
3256 ((NODELIST *)from_chan->
3257 nodeq.q_last)->
3258 nlp_next[fchanno] =
3259 from_chan->nodeq.q_first;
3260 from_chan->nodeq.q_cnt--;
3261 }
3262 /* Clear node */
3263 nlp->nlp_next[fchanno] = NULL;
3264 } else {
3265 count--;
3266 do {
3267 n_next =
3268 n_prev->nlp_next[fchanno];
3269 if (n_next == nlp) {
3270 break;
3271 }
3272 n_prev = n_next;
3273 } while (count--);
3274
3275 if (count != 0) {
3276
3277 if (n_next ==
3278 (NODELIST *)from_chan->
3279 nodeq.q_last) {
3280 n_prev->
3281 nlp_next[fchanno]
3282 =
3283 ((NODELIST *)
3284 from_chan->
3285 nodeq.q_last)->
3286 nlp_next
3287 [fchanno];
3288 from_chan->nodeq.q_last
3289 = (uint8_t *)n_prev;
3290 } else {
3291
3292 n_prev->
3293 nlp_next[fchanno]
3294 =
3295 n_next-> nlp_next
3296 [fchanno];
3297 }
3298 from_chan->nodeq.q_cnt--;
3299 /* Clear node */
3300 nlp->nlp_next[fchanno] =
3301 NULL;
3302 }
3303 }
3304 }
3305 }
3306 }
3307
3308 /* Now cleanup the iocb's */
3309 prev = NULL;
3310 iocbq = (IOCBQ *)tbm.q_first;
3311
3312 while (iocbq) {
3313
3314 next = (IOCBQ *)iocbq->next;
3315
3316 /* Free the IoTag and the bmp */
3317 iocb = &iocbq->iocb;
3318
3319 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3320 sbp = iocbq->sbp;
3321 if (sbp) {
3322 hba->fc_table[sbp->iotag] = NULL;
3323 emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3324 }
3325 } else {
3326 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
3327 iocb->ULPIOTAG, 0);
3328 }
3329
3330 if (sbp && (sbp != STALE_PACKET)) {
3331 mutex_enter(&sbp->mtx);
3332 sbp->pkt_flags |= PACKET_IN_FLUSH;
3333
3334 /*
3335 * If the fpkt is already set, then we will leave it
3336 * alone. This ensures that this pkt is only accounted
3337 * for on one fpkt->flush_count
3338 */
3339 if (!sbp->fpkt && fpkt) {
3340 mutex_enter(&fpkt->mtx);
3341 sbp->fpkt = fpkt;
3342 fpkt->flush_count++;
3343 mutex_exit(&fpkt->mtx);
3344 }
3345 mutex_exit(&sbp->mtx);
3346 }
3347 iocbq = next;
3348
3349 } /* end of while */
3350
3351 iocbq = (IOCBQ *)tbm.q_first;
3352 while (iocbq) {
3353 /* Save the next iocbq for now */
3354 next = (IOCBQ *)iocbq->next;
3355
3356 /* Unlink this iocbq */
3357 iocbq->next = NULL;
3358
3359 /* Get the pkt */
3360 sbp = (emlxs_buf_t *)iocbq->sbp;
3361
3362 if (sbp) {
3363 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3364 "tx: sbp=%p node=%p", sbp, sbp->node);
3365
3366 if (hba->state >= FC_LINK_UP) {
3367 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3368 IOERR_ABORT_REQUESTED, 1);
3369 } else {
3370 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3371 IOERR_LINK_DOWN, 1);
3372 }
3373
3374 }
3375 /* Free the iocb and its associated buffers */
3376 else {
3377 icmd = &iocbq->iocb;
3378
3379 /* SLI3 */
3380 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
3381 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
3382 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
3383 if ((hba->flag &
3384 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
3385 /* HBA is detaching or offlining */
3386 if (icmd->ULPCOMMAND !=
3387 CMD_QUE_RING_LIST64_CN) {
3388 uint8_t *tmp;
3389 RING *rp;
3390 int ch;
3391
3392 ch = from_chan->channelno;
3393 rp = &hba->sli.sli3.ring[ch];
3394
3395 for (i = 0;
3396 i < icmd->ULPBDECOUNT;
3397 i++) {
3398 mp = EMLXS_GET_VADDR(
3399 hba, rp, icmd);
3400
3401 tmp = (uint8_t *)mp;
3402 if (mp) {
3403 (void) emlxs_mem_put(
3404 hba,
3405 MEM_BUF,
3406 tmp);
3407 }
3408 }
3409
3410 }
3411
3412 (void) emlxs_mem_put(hba, MEM_IOCB,
3413 (uint8_t *)iocbq);
3414 } else {
3415 /* repost the unsolicited buffer */
3416 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
3417 from_chan, iocbq);
3418 }
3419 }
3420 }
3421
3422 iocbq = next;
3423
3424 } /* end of while */
3425
3426 /* Now flush the chipq if any */
3427 if (!(nlp->nlp_flag[fchanno] & NLP_CLOSED)) {
3428
3429 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3430
3431 (void) emlxs_chipq_node_flush(port, from_chan, nlp, 0);
3432
3433 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3434 }
3435
3436 if (lock) {
3437 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3438 }
3439
3440 return;
3441
3442} /* emlxs_tx_move */
3443
3444
3017extern uint32_t
3445extern uint32_t
3018emlxs_chipq_node_flush(emlxs_port_t *port, RING *ring, NODELIST *ndlp,
3446emlxs_chipq_node_flush(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp,
3019 emlxs_buf_t *fpkt)
3020{
3021 emlxs_hba_t *hba = HBA;
3022 emlxs_buf_t *sbp;
3023 IOCBQ *iocbq;
3024 IOCBQ *next;
3025 Q abort;
3447 emlxs_buf_t *fpkt)
3448{
3449 emlxs_hba_t *hba = HBA;
3450 emlxs_buf_t *sbp;
3451 IOCBQ *iocbq;
3452 IOCBQ *next;
3453 Q abort;
3026 RING *rp;
3027 uint32_t ringno;
3028 uint8_t flag[MAX_RINGS];
3454 CHANNEL *cp;
3455 uint32_t channelno;
3456 uint8_t flag[MAX_CHANNEL];
3029 uint32_t iotag;
3030
3031 bzero((void *)&abort, sizeof (Q));
3032 bzero((void *)flag, sizeof (flag));
3033
3457 uint32_t iotag;
3458
3459 bzero((void *)&abort, sizeof (Q));
3460 bzero((void *)flag, sizeof (flag));
3461
3034 for (ringno = 0; ringno < hba->ring_count; ringno++) {
3035 rp = &hba->ring[ringno];
3462 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3463 cp = &hba->chan[channelno];
3036
3464
3037 if (ring && rp != ring) {
3465 if (chan && cp != chan) {
3038 continue;
3039 }
3040
3466 continue;
3467 }
3468
3041 mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
3469 mutex_enter(&EMLXS_FCTAB_LOCK);
3042
3470
3043 for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3044 sbp = rp->fc_table[iotag];
3471 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3472 sbp = hba->fc_table[iotag];
3045
3046 if (sbp && (sbp != STALE_PACKET) &&
3047 (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3048 (sbp->node == ndlp) &&
3473
3474 if (sbp && (sbp != STALE_PACKET) &&
3475 (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3476 (sbp->node == ndlp) &&
3049 (sbp->ring == rp) &&
3477 (sbp->channel == cp) &&
3050 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3051 emlxs_sbp_abort_add(port, sbp, &abort, flag,
3052 fpkt);
3053 }
3054
3055 }
3478 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3479 emlxs_sbp_abort_add(port, sbp, &abort, flag,
3480 fpkt);
3481 }
3482
3483 }
3056 mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3484 mutex_exit(&EMLXS_FCTAB_LOCK);
3057
3058 } /* for */
3059
3060 /* Now put the iocb's on the tx queue */
3061 iocbq = (IOCBQ *)abort.q_first;
3062 while (iocbq) {
3063 /* Save the next iocbq for now */
3064 next = (IOCBQ *)iocbq->next;
3065
3066 /* Unlink this iocbq */
3067 iocbq->next = NULL;
3068
3069 /* Send this iocbq */
3070 emlxs_tx_put(iocbq, 1);
3071
3072 iocbq = next;
3073 }
3074
3485
3486 } /* for */
3487
3488 /* Now put the iocb's on the tx queue */
3489 iocbq = (IOCBQ *)abort.q_first;
3490 while (iocbq) {
3491 /* Save the next iocbq for now */
3492 next = (IOCBQ *)iocbq->next;
3493
3494 /* Unlink this iocbq */
3495 iocbq->next = NULL;
3496
3497 /* Send this iocbq */
3498 emlxs_tx_put(iocbq, 1);
3499
3500 iocbq = next;
3501 }
3502
3075 /* Now trigger ring service */
3076 for (ringno = 0; ringno < hba->ring_count; ringno++) {
3077 if (!flag[ringno]) {
3503 /* Now trigger channel service */
3504 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3505 if (!flag[channelno]) {
3078 continue;
3079 }
3080
3506 continue;
3507 }
3508
3081 rp = &hba->ring[ringno];
3082
3083 emlxs_sli_issue_iocb_cmd(hba, rp, 0);
3509 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3084 }
3085
3086 return (abort.q_cnt);
3087
3510 }
3511
3512 return (abort.q_cnt);
3513
3088} /* emlxs_chipq_node_flush() */
3514} /* emlxs_chipq_node_flush() */
3089
3090
3091/* Flush all IO's left on all iotag lists */
3515
3516
3517/* Flush all IO's left on all iotag lists */
3092static uint32_t
3518extern uint32_t
3093emlxs_iotag_flush(emlxs_hba_t *hba)
3094{
3095 emlxs_port_t *port = &PPORT;
3096 emlxs_buf_t *sbp;
3097 IOCBQ *iocbq;
3098 IOCB *iocb;
3099 Q abort;
3519emlxs_iotag_flush(emlxs_hba_t *hba)
3520{
3521 emlxs_port_t *port = &PPORT;
3522 emlxs_buf_t *sbp;
3523 IOCBQ *iocbq;
3524 IOCB *iocb;
3525 Q abort;
3100 RING *rp;
3101 uint32_t ringno;
3526 CHANNEL *cp;
3527 uint32_t channelno;
3102 uint32_t iotag;
3103 uint32_t count;
3104
3105 count = 0;
3528 uint32_t iotag;
3529 uint32_t count;
3530
3531 count = 0;
3106 for (ringno = 0; ringno < hba->ring_count; ringno++) {
3107 rp = &hba->ring[ringno];
3532 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3533 cp = &hba->chan[channelno];
3108
3109 bzero((void *)&abort, sizeof (Q));
3110
3534
3535 bzero((void *)&abort, sizeof (Q));
3536
3111 mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
3537 mutex_enter(&EMLXS_FCTAB_LOCK);
3112
3538
3113 for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3114 sbp = rp->fc_table[iotag];
3539 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3540 sbp = hba->fc_table[iotag];
3115
3541
3542 /* Check if the slot is empty */
3116 if (!sbp || (sbp == STALE_PACKET)) {
3117 continue;
3118 }
3119
3543 if (!sbp || (sbp == STALE_PACKET)) {
3544 continue;
3545 }
3546
3120 /* Unregister the packet */
3121 rp->fc_table[iotag] = STALE_PACKET;
3122 hba->io_count[ringno]--;
3123 sbp->iotag = 0;
3547 /* We are building an abort list per channel */
3548 if (sbp->channel != cp) {
3549 continue;
3550 }
3124
3551
3125 /* Clean up the sbp */
3126 mutex_enter(&sbp->mtx);
3127
3128 /* Set IOCB status */
3129 iocbq = &sbp->iocbq;
3130 iocb = &iocbq->iocb;
3131
3552 /* Set IOCB status */
3553 iocbq = &sbp->iocbq;
3554 iocb = &iocbq->iocb;
3555
3132 iocb->ulpStatus = IOSTAT_LOCAL_REJECT;
3556 iocb->ULPSTATUS = IOSTAT_LOCAL_REJECT;
3133 iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
3557 iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
3134 iocb->ulpLe = 1;
3558 iocb->ULPLE = 1;
3135 iocbq->next = NULL;
3136
3559 iocbq->next = NULL;
3560
3137 if (sbp->pkt_flags & PACKET_IN_TXQ) {
3138 sbp->pkt_flags &= ~PACKET_IN_TXQ;
3139 hba->ring_tx_count[ringno]--;
3140 }
3561 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3562 hba->fc_table[iotag] = NULL;
3563 emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3564 } else {
3565 hba->fc_table[iotag] = STALE_PACKET;
3566 hba->io_count --;
3567 sbp->iotag = 0;
3141
3568
3142 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
3143 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3144 }
3569 /* Clean up the sbp */
3570 mutex_enter(&sbp->mtx);
3145
3571
3146 if (sbp->bmp) {
3147 (void) emlxs_mem_put(hba, MEM_BPL,
3148 (uint8_t *)sbp->bmp);
3149 sbp->bmp = 0;
3572 if (sbp->pkt_flags & PACKET_IN_TXQ) {
3573 sbp->pkt_flags &= ~PACKET_IN_TXQ;
3574 hba->channel_tx_count --;
3575 }
3576
3577 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
3578 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3579 }
3580
3581 if (sbp->bmp) {
3582 (void) emlxs_mem_put(hba, MEM_BPL,
3583 (uint8_t *)sbp->bmp);
3584 sbp->bmp = 0;
3585 }
3586
3587 mutex_exit(&sbp->mtx);
3150 }
3151
3152 /* At this point all nodes are assumed destroyed */
3588 }
3589
3590 /* At this point all nodes are assumed destroyed */
3591 mutex_enter(&sbp->mtx);
3153 sbp->node = 0;
3592 sbp->node = 0;
3154
3155 mutex_exit(&sbp->mtx);
3156
3157 /* Add this iocb to our local abort Q */
3158 if (abort.q_first) {
3159 ((IOCBQ *)abort.q_last)->next = iocbq;
3160 abort.q_last = (uint8_t *)iocbq;
3161 abort.q_cnt++;
3162 } else {
3163 abort.q_first = (uint8_t *)iocbq;
3164 abort.q_last = (uint8_t *)iocbq;
3165 abort.q_cnt = 1;
3166 }
3167 }
3168
3593 mutex_exit(&sbp->mtx);
3594
3595 /* Add this iocb to our local abort Q */
3596 if (abort.q_first) {
3597 ((IOCBQ *)abort.q_last)->next = iocbq;
3598 abort.q_last = (uint8_t *)iocbq;
3599 abort.q_cnt++;
3600 } else {
3601 abort.q_first = (uint8_t *)iocbq;
3602 abort.q_last = (uint8_t *)iocbq;
3603 abort.q_cnt = 1;
3604 }
3605 }
3606
3169 mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3607 mutex_exit(&EMLXS_FCTAB_LOCK);
3170
3171 /* Trigger deferred completion */
3172 if (abort.q_first) {
3608
3609 /* Trigger deferred completion */
3610 if (abort.q_first) {
3173 mutex_enter(&rp->rsp_lock);
3174 if (rp->rsp_head == NULL) {
3175 rp->rsp_head = (IOCBQ *)abort.q_first;
3176 rp->rsp_tail = (IOCBQ *)abort.q_last;
3611 mutex_enter(&cp->rsp_lock);
3612 if (cp->rsp_head == NULL) {
3613 cp->rsp_head = (IOCBQ *)abort.q_first;
3614 cp->rsp_tail = (IOCBQ *)abort.q_last;
3177 } else {
3615 } else {
3178 rp->rsp_tail->next = (IOCBQ *)abort.q_first;
3179 rp->rsp_tail = (IOCBQ *)abort.q_last;
3616 cp->rsp_tail->next = (IOCBQ *)abort.q_first;
3617 cp->rsp_tail = (IOCBQ *)abort.q_last;
3180 }
3618 }
3181 mutex_exit(&rp->rsp_lock);
3619 mutex_exit(&cp->rsp_lock);
3182
3620
3183 emlxs_thread_trigger2(&rp->intr_thread,
3184 emlxs_proc_ring, rp);
3621 emlxs_thread_trigger2(&cp->intr_thread,
3622 emlxs_proc_channel, cp);
3185
3623
3186 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3187 "Forced iotag completion. ring=%d count=%d",
3188 ringno, abort.q_cnt);
3624 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
3625 "Forced iotag completion. channel=%d count=%d",
3626 channelno, abort.q_cnt);
3189
3190 count += abort.q_cnt;
3191 }
3192 }
3193
3194 return (count);
3195
3627
3628 count += abort.q_cnt;
3629 }
3630 }
3631
3632 return (count);
3633
3196} /* emlxs_iotag_flush() */
3634} /* emlxs_iotag_flush() */
3197
3198
3199
3635
3636
3637
3200/* Checks for IO's on all or a given ring for a given node */
3638/* Checks for IO's on all or a given channel for a given node */
3201extern uint32_t
3639extern uint32_t
3202emlxs_chipq_node_check(emlxs_port_t *port, RING *ring, NODELIST *ndlp)
3640emlxs_chipq_node_check(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp)
3203{
3204 emlxs_hba_t *hba = HBA;
3205 emlxs_buf_t *sbp;
3641{
3642 emlxs_hba_t *hba = HBA;
3643 emlxs_buf_t *sbp;
3206 RING *rp;
3207 uint32_t ringno;
3644 CHANNEL *cp;
3645 uint32_t channelno;
3208 uint32_t count;
3209 uint32_t iotag;
3210
3211 count = 0;
3212
3646 uint32_t count;
3647 uint32_t iotag;
3648
3649 count = 0;
3650
3213 for (ringno = 0; ringno < hba->ring_count; ringno++) {
3214 rp = &hba->ring[ringno];
3651 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3652 cp = &hba->chan[channelno];
3215
3653
3216 if (ring && rp != ring) {
3654 if (chan && cp != chan) {
3217 continue;
3218 }
3219
3655 continue;
3656 }
3657
3220 mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
3658 mutex_enter(&EMLXS_FCTAB_LOCK);
3221
3659
3222 for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3223 sbp = rp->fc_table[iotag];
3660 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3661 sbp = hba->fc_table[iotag];
3224
3225 if (sbp && (sbp != STALE_PACKET) &&
3226 (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3227 (sbp->node == ndlp) &&
3662
3663 if (sbp && (sbp != STALE_PACKET) &&
3664 (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3665 (sbp->node == ndlp) &&
3228 (sbp->ring == rp) &&
3666 (sbp->channel == cp) &&
3229 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3230 count++;
3231 }
3232
3233 }
3667 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3668 count++;
3669 }
3670
3671 }
3234 mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3672 mutex_exit(&EMLXS_FCTAB_LOCK);
3235
3236 } /* for */
3237
3238 return (count);
3239
3673
3674 } /* for */
3675
3676 return (count);
3677
3240} /* emlxs_chipq_node_check() */
3678} /* emlxs_chipq_node_check() */
3241
3242
3243
3679
3680
3681
3244/* Flush all IO's for a given node's lun (FC_FCP_RING only) */
3682/* Flush all IO's for a given node's lun (on any channel) */
3245extern uint32_t
3683extern uint32_t
3246emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
3247 emlxs_buf_t *fpkt)
3684emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp,
3685 uint32_t lun, emlxs_buf_t *fpkt)
3248{
3249 emlxs_hba_t *hba = HBA;
3250 emlxs_buf_t *sbp;
3686{
3687 emlxs_hba_t *hba = HBA;
3688 emlxs_buf_t *sbp;
3251 RING *rp;
3252 IOCBQ *iocbq;
3253 IOCBQ *next;
3254 Q abort;
3255 uint32_t iotag;
3689 IOCBQ *iocbq;
3690 IOCBQ *next;
3691 Q abort;
3692 uint32_t iotag;
3256 uint8_t flag[MAX_RINGS];
3693 uint8_t flag[MAX_CHANNEL];
3694 uint32_t channelno;
3257
3258 bzero((void *)flag, sizeof (flag));
3259 bzero((void *)&abort, sizeof (Q));
3695
3696 bzero((void *)flag, sizeof (flag));
3697 bzero((void *)&abort, sizeof (Q));
3260 rp = &hba->ring[FC_FCP_RING];
3261
3698
3262 mutex_enter(&EMLXS_FCTAB_LOCK(FC_FCP_RING));
3263 for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3264 sbp = rp->fc_table[iotag];
3699 mutex_enter(&EMLXS_FCTAB_LOCK);
3700 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3701 sbp = hba->fc_table[iotag];
3265
3266 if (sbp && (sbp != STALE_PACKET) &&
3267 sbp->pkt_flags & PACKET_IN_CHIPQ &&
3268 sbp->node == ndlp &&
3702
3703 if (sbp && (sbp != STALE_PACKET) &&
3704 sbp->pkt_flags & PACKET_IN_CHIPQ &&
3705 sbp->node == ndlp &&
3269 sbp->ring == rp &&
3270 sbp->lun == lun &&
3271 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3706 sbp->lun == lun &&
3707 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3272 emlxs_sbp_abort_add(port, sbp, &abort, flag, fpkt);
3708 emlxs_sbp_abort_add(port, sbp,
3709 &abort, flag, fpkt);
3273 }
3274 }
3710 }
3711 }
3275 mutex_exit(&EMLXS_FCTAB_LOCK(FC_FCP_RING));
3712 mutex_exit(&EMLXS_FCTAB_LOCK);
3276
3277 /* Now put the iocb's on the tx queue */
3278 iocbq = (IOCBQ *)abort.q_first;
3279 while (iocbq) {
3280 /* Save the next iocbq for now */
3281 next = (IOCBQ *)iocbq->next;
3282
3283 /* Unlink this iocbq */
3284 iocbq->next = NULL;
3285
3286 /* Send this iocbq */
3287 emlxs_tx_put(iocbq, 1);
3288
3289 iocbq = next;
3290 }
3291
3713
3714 /* Now put the iocb's on the tx queue */
3715 iocbq = (IOCBQ *)abort.q_first;
3716 while (iocbq) {
3717 /* Save the next iocbq for now */
3718 next = (IOCBQ *)iocbq->next;
3719
3720 /* Unlink this iocbq */
3721 iocbq->next = NULL;
3722
3723 /* Send this iocbq */
3724 emlxs_tx_put(iocbq, 1);
3725
3726 iocbq = next;
3727 }
3728
3292 /* Now trigger ring service */
3293 if (abort.q_cnt) {
3294 emlxs_sli_issue_iocb_cmd(hba, rp, 0);
3729 /* Now trigger channel service */
3730 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3731 if (!flag[channelno]) {
3732 continue;
3733 }
3734
3735 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3295 }
3296
3297 return (abort.q_cnt);
3298
3736 }
3737
3738 return (abort.q_cnt);
3739
3299} /* emlxs_chipq_lun_flush() */
3740} /* emlxs_chipq_lun_flush() */
3300
3301
3302
3303/*
3304 * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
3305 * This must be called while holding the EMLXS_FCCTAB_LOCK
3306 */
3307extern IOCBQ *
3308emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3741
3742
3743
3744/*
3745 * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
3746 * This must be called while holding the EMLXS_FCCTAB_LOCK
3747 */
3748extern IOCBQ *
3749emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3309 uint16_t iotag, RING *rp, uint8_t class, int32_t flag)
3750 uint16_t iotag, CHANNEL *cp, uint8_t class, int32_t flag)
3310{
3311 emlxs_hba_t *hba = HBA;
3312 IOCBQ *iocbq;
3313 IOCB *iocb;
3751{
3752 emlxs_hba_t *hba = HBA;
3753 IOCBQ *iocbq;
3754 IOCB *iocb;
3755 emlxs_wqe_t *wqe;
3756 emlxs_buf_t *sbp;
3314 uint16_t abort_iotag;
3315
3757 uint16_t abort_iotag;
3758
3316 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3759 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3317 return (NULL);
3318 }
3319
3760 return (NULL);
3761 }
3762
3320 iocbq->ring = (void *)rp;
3763 iocbq->channel = (void *)cp;
3321 iocbq->port = (void *)port;
3322 iocbq->node = (void *)ndlp;
3323 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3764 iocbq->port = (void *)port;
3765 iocbq->node = (void *)ndlp;
3766 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3324 iocb = &iocbq->iocb;
3325
3326 /*
3327 * set up an iotag using special Abort iotags
3328 */
3767
3768 /*
3769 * set up an iotag using special Abort iotags
3770 */
3329 if ((rp->fc_abort_iotag < rp->max_iotag)) {
3330 rp->fc_abort_iotag = rp->max_iotag;
3771 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3772 hba->fc_oor_iotag = hba->max_iotag;
3331 }
3773 }
3774 abort_iotag = hba->fc_oor_iotag++;
3332
3775
3333 abort_iotag = rp->fc_abort_iotag++;
3334
3776
3777 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3778 wqe = &iocbq->wqe;
3779 sbp = hba->fc_table[iotag];
3335
3780
3336 iocb->ulpIoTag = abort_iotag;
3337 iocb->un.acxri.abortType = flag;
3338 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3339 iocb->un.acxri.abortIoTag = iotag;
3340 iocb->ulpLe = 1;
3341 iocb->ulpClass = class;
3342 iocb->ulpCommand = CMD_ABORT_XRI_CN;
3343 iocb->ulpOwner = OWN_CHIP;
3781 /* Try to issue abort by XRI if possible */
3782 if (sbp == NULL || sbp == STALE_PACKET || sbp->xp == NULL) {
3783 wqe->un.Abort.Criteria = ABORT_REQ_TAG;
3784 wqe->AbortTag = iotag;
3785 } else {
3786 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3787 wqe->AbortTag = sbp->xp->XRI;
3788 }
3789 wqe->un.Abort.IA = 0;
3790 wqe->RequestTag = abort_iotag;
3791 wqe->Command = CMD_ABORT_XRI_CX;
3792 wqe->Class = CLASS3;
3793 wqe->CQId = 0x3ff;
3794 wqe->CmdType = WQE_TYPE_ABORT;
3795 } else {
3796 iocb = &iocbq->iocb;
3797 iocb->ULPIOTAG = abort_iotag;
3798 iocb->un.acxri.abortType = flag;
3799 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3800 iocb->un.acxri.abortIoTag = iotag;
3801 iocb->ULPLE = 1;
3802 iocb->ULPCLASS = class;
3803 iocb->ULPCOMMAND = CMD_ABORT_XRI_CN;
3804 iocb->ULPOWNER = OWN_CHIP;
3805 }
3344
3345 return (iocbq);
3346
3806
3807 return (iocbq);
3808
3347} /* emlxs_create_abort_xri_cn() */
3809} /* emlxs_create_abort_xri_cn() */
3348
3349
3350extern IOCBQ *
3351emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3810
3811
3812extern IOCBQ *
3813emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3352 RING *rp, uint8_t class, int32_t flag)
3814 CHANNEL *cp, uint8_t class, int32_t flag)
3353{
3354 emlxs_hba_t *hba = HBA;
3355 IOCBQ *iocbq;
3356 IOCB *iocb;
3815{
3816 emlxs_hba_t *hba = HBA;
3817 IOCBQ *iocbq;
3818 IOCB *iocb;
3819 emlxs_wqe_t *wqe;
3357 uint16_t abort_iotag;
3358
3820 uint16_t abort_iotag;
3821
3359 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3822 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3360 return (NULL);
3361 }
3362
3823 return (NULL);
3824 }
3825
3363 iocbq->ring = (void *)rp;
3826 iocbq->channel = (void *)cp;
3364 iocbq->port = (void *)port;
3365 iocbq->node = (void *)ndlp;
3366 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3827 iocbq->port = (void *)port;
3828 iocbq->node = (void *)ndlp;
3829 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3367 iocb = &iocbq->iocb;
3368
3369 /*
3370 * set up an iotag using special Abort iotags
3371 */
3830
3831 /*
3832 * set up an iotag using special Abort iotags
3833 */
3372 if ((rp->fc_abort_iotag < rp->max_iotag)) {
3373 rp->fc_abort_iotag = rp->max_iotag;
3834 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3835 hba->fc_oor_iotag = hba->max_iotag;
3374 }
3836 }
3837 abort_iotag = hba->fc_oor_iotag++;
3375
3838
3376 abort_iotag = rp->fc_abort_iotag++;
3839 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3840 wqe = &iocbq->wqe;
3841 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3842 wqe->un.Abort.IA = 0;
3843 wqe->RequestTag = abort_iotag;
3844 wqe->AbortTag = xid;
3845 wqe->Command = CMD_ABORT_XRI_CX;
3846 wqe->Class = CLASS3;
3847 wqe->CQId = 0x3ff;
3848 wqe->CmdType = WQE_TYPE_ABORT;
3849 } else {
3850 iocb = &iocbq->iocb;
3851 iocb->ULPCONTEXT = xid;
3852 iocb->ULPIOTAG = abort_iotag;
3853 iocb->un.acxri.abortType = flag;
3854 iocb->ULPLE = 1;
3855 iocb->ULPCLASS = class;
3856 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
3857 iocb->ULPOWNER = OWN_CHIP;
3858 }
3377
3859
3378 iocb->ulpContext = xid;
3379 iocb->ulpIoTag = abort_iotag;
3380 iocb->un.acxri.abortType = flag;
3381 iocb->ulpLe = 1;
3382 iocb->ulpClass = class;
3383 iocb->ulpCommand = CMD_ABORT_XRI_CX;
3384 iocb->ulpOwner = OWN_CHIP;
3385
3386 return (iocbq);
3387
3860 return (iocbq);
3861
3388} /* emlxs_create_abort_xri_cx() */
3862} /* emlxs_create_abort_xri_cx() */
3389
3390
3391
3392/* This must be called while holding the EMLXS_FCCTAB_LOCK */
3393extern IOCBQ *
3394emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3863
3864
3865
3866/* This must be called while holding the EMLXS_FCCTAB_LOCK */
3867extern IOCBQ *
3868emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3395 uint16_t iotag, RING *rp)
3869 uint16_t iotag, CHANNEL *cp)
3396{
3397 emlxs_hba_t *hba = HBA;
3398 IOCBQ *iocbq;
3399 IOCB *iocb;
3870{
3871 emlxs_hba_t *hba = HBA;
3872 IOCBQ *iocbq;
3873 IOCB *iocb;
3874 emlxs_wqe_t *wqe;
3875 emlxs_buf_t *sbp;
3400 uint16_t abort_iotag;
3401
3876 uint16_t abort_iotag;
3877
3402 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3878 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3403 return (NULL);
3404 }
3405
3879 return (NULL);
3880 }
3881
3406 iocbq->ring = (void *)rp;
3882 iocbq->channel = (void *)cp;
3407 iocbq->port = (void *)port;
3408 iocbq->node = (void *)ndlp;
3409 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3883 iocbq->port = (void *)port;
3884 iocbq->node = (void *)ndlp;
3885 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3410 iocb = &iocbq->iocb;
3411
3412 /*
3413 * set up an iotag using special Abort iotags
3414 */
3886
3887 /*
3888 * set up an iotag using special Abort iotags
3889 */
3415 if ((rp->fc_abort_iotag < rp->max_iotag)) {
3416 rp->fc_abort_iotag = rp->max_iotag;
3890 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3891 hba->fc_oor_iotag = hba->max_iotag;
3417 }
3892 }
3893 abort_iotag = hba->fc_oor_iotag++;
3418
3894
3419 abort_iotag = rp->fc_abort_iotag++;
3895 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3896 wqe = &iocbq->wqe;
3897 sbp = hba->fc_table[iotag];
3420
3898
3421 iocb->ulpIoTag = abort_iotag;
3422 iocb->un.acxri.abortType = 0;
3423 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3424 iocb->un.acxri.abortIoTag = iotag;
3425 iocb->ulpLe = 1;
3426 iocb->ulpClass = 0;
3427 iocb->ulpCommand = CMD_CLOSE_XRI_CN;
3428 iocb->ulpOwner = OWN_CHIP;
3899 /* Try to issue close by XRI if possible */
3900 if (sbp == NULL || sbp == STALE_PACKET || sbp->xp == NULL) {
3901 wqe->un.Abort.Criteria = ABORT_REQ_TAG;
3902 wqe->AbortTag = iotag;
3903 } else {
3904 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3905 wqe->AbortTag = sbp->xp->XRI;
3906 }
3907 wqe->un.Abort.IA = 1;
3908 wqe->RequestTag = abort_iotag;
3909 wqe->Command = CMD_ABORT_XRI_CX;
3910 wqe->Class = CLASS3;
3911 wqe->CQId = 0x3ff;
3912 wqe->CmdType = WQE_TYPE_ABORT;
3913 } else {
3914 iocb = &iocbq->iocb;
3915 iocb->ULPIOTAG = abort_iotag;
3916 iocb->un.acxri.abortType = 0;
3917 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3918 iocb->un.acxri.abortIoTag = iotag;
3919 iocb->ULPLE = 1;
3920 iocb->ULPCLASS = 0;
3921 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CN;
3922 iocb->ULPOWNER = OWN_CHIP;
3923 }
3429
3430 return (iocbq);
3431
3924
3925 return (iocbq);
3926
3432} /* emlxs_create_close_xri_cn() */
3927} /* emlxs_create_close_xri_cn() */
3433
3434
3435/* This must be called while holding the EMLXS_FCCTAB_LOCK */
3436extern IOCBQ *
3437emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3928
3929
3930/* This must be called while holding the EMLXS_FCCTAB_LOCK */
3931extern IOCBQ *
3932emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3438 RING *rp)
3933 CHANNEL *cp)
3439{
3440 emlxs_hba_t *hba = HBA;
3441 IOCBQ *iocbq;
3442 IOCB *iocb;
3934{
3935 emlxs_hba_t *hba = HBA;
3936 IOCBQ *iocbq;
3937 IOCB *iocb;
3938 emlxs_wqe_t *wqe;
3443 uint16_t abort_iotag;
3444
3939 uint16_t abort_iotag;
3940
3445 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3941 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3446 return (NULL);
3447 }
3448
3942 return (NULL);
3943 }
3944
3449 iocbq->ring = (void *)rp;
3945 iocbq->channel = (void *)cp;
3450 iocbq->port = (void *)port;
3451 iocbq->node = (void *)ndlp;
3452 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3946 iocbq->port = (void *)port;
3947 iocbq->node = (void *)ndlp;
3948 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3453 iocb = &iocbq->iocb;
3454
3455 /*
3456 * set up an iotag using special Abort iotags
3457 */
3949
3950 /*
3951 * set up an iotag using special Abort iotags
3952 */
3458 if ((rp->fc_abort_iotag < rp->max_iotag)) {
3459 rp->fc_abort_iotag = rp->max_iotag;
3953 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3954 hba->fc_oor_iotag = hba->max_iotag;
3460 }
3955 }
3956 abort_iotag = hba->fc_oor_iotag++;
3461
3957
3462 abort_iotag = rp->fc_abort_iotag++;
3958 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3959 wqe = &iocbq->wqe;
3960 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3961 wqe->un.Abort.IA = 1;
3962 wqe->RequestTag = abort_iotag;
3963 wqe->AbortTag = xid;
3964 wqe->Command = CMD_ABORT_XRI_CX;
3965 wqe->Class = CLASS3;
3966 wqe->CQId = 0x3ff;
3967 wqe->CmdType = WQE_TYPE_ABORT;
3968 } else {
3969 iocb = &iocbq->iocb;
3970 iocb->ULPCONTEXT = xid;
3971 iocb->ULPIOTAG = abort_iotag;
3972 iocb->ULPLE = 1;
3973 iocb->ULPCLASS = 0;
3974 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
3975 iocb->ULPOWNER = OWN_CHIP;
3976 }
3463
3977
3464 iocb->ulpContext = xid;
3465 iocb->ulpIoTag = abort_iotag;
3466 iocb->ulpLe = 1;
3467 iocb->ulpClass = 0;
3468 iocb->ulpCommand = CMD_CLOSE_XRI_CX;
3469 iocb->ulpOwner = OWN_CHIP;
3470
3471 return (iocbq);
3472
3978 return (iocbq);
3979
3473} /* emlxs_create_close_xri_cx() */
3980} /* emlxs_create_close_xri_cx() */
3474
3475
3476void
3477emlxs_abort_ct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
3478{
3981
3982
3983void
3984emlxs_abort_ct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
3985{
3479 RING *rp;
3986 CHANNEL *cp;
3480 IOCBQ *iocbq;
3481
3987 IOCBQ *iocbq;
3988
3482 rp = &hba->ring[FC_CT_RING];
3989 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_ct_msg,
3990 "Aborting CT exchange: xid=%x", rxid);
3483
3991
3992 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3993 if (emlxs_sli4_unreserve_xri(hba, rxid) == 0) {
3994 /* We have no way to abort unsolicited exchanges */
3995 /* that we have not responded to at this time */
3996 /* So we will return for now */
3997 return;
3998 }
3999 }
4000
4001 cp = &hba->chan[hba->channel_ct];
4002
3484 /* Create the abort IOCB */
3485 if (hba->state >= FC_LINK_UP) {
3486 iocbq =
4003 /* Create the abort IOCB */
4004 if (hba->state >= FC_LINK_UP) {
4005 iocbq =
3487 emlxs_create_abort_xri_cx(port, NULL, rxid, rp, CLASS3,
4006 emlxs_create_abort_xri_cx(port, NULL, rxid, cp, CLASS3,
3488 ABORT_TYPE_ABTS);
3489 } else {
4007 ABORT_TYPE_ABTS);
4008 } else {
3490 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, rp);
4009 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
3491 }
4010 }
4011
3492 if (iocbq) {
4012 if (iocbq) {
3493 emlxs_sli_issue_iocb_cmd(hba, rp, iocbq);
4013 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
3494 }
4014 }
3495}
3496
4015
4016} /* emlxs_abort_ct_exchange() */
3497
4017
4018
3498/* This must be called while holding the EMLXS_FCCTAB_LOCK */
3499static void
3500emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
3501 uint8_t *flag, emlxs_buf_t *fpkt)
3502{
3503 emlxs_hba_t *hba = HBA;
3504 IOCBQ *iocbq;
4019/* This must be called while holding the EMLXS_FCCTAB_LOCK */
4020static void
4021emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
4022 uint8_t *flag, emlxs_buf_t *fpkt)
4023{
4024 emlxs_hba_t *hba = HBA;
4025 IOCBQ *iocbq;
3505 RING *rp;
4026 CHANNEL *cp;
3506 NODELIST *ndlp;
3507
4027 NODELIST *ndlp;
4028
3508 rp = (RING *)sbp->ring;
4029 cp = (CHANNEL *)sbp->channel;
3509 ndlp = sbp->node;
3510
3511 /* Create the close XRI IOCB */
4030 ndlp = sbp->node;
4031
4032 /* Create the close XRI IOCB */
3512 iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, rp);
4033 iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, cp);
3513
3514 /*
3515 * Add this iocb to our local abort Q
3516 * This way we don't hold the CHIPQ lock too long
3517 */
3518 if (iocbq) {
3519 if (abort->q_first) {
3520 ((IOCBQ *)abort->q_last)->next = iocbq;

--- 6 unchanged lines hidden (view full) ---

3527 }
3528 iocbq->next = NULL;
3529 }
3530
3531 /* set the flags */
3532 mutex_enter(&sbp->mtx);
3533
3534 sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
4034
4035 /*
4036 * Add this iocb to our local abort Q
4037 * This way we don't hold the CHIPQ lock too long
4038 */
4039 if (iocbq) {
4040 if (abort->q_first) {
4041 ((IOCBQ *)abort->q_last)->next = iocbq;

--- 6 unchanged lines hidden (view full) ---

4048 }
4049 iocbq->next = NULL;
4050 }
4051
4052 /* set the flags */
4053 mutex_enter(&sbp->mtx);
4054
4055 sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
4056
3535 sbp->ticks = hba->timer_tics + 10;
3536 sbp->abort_attempts++;
3537
4057 sbp->ticks = hba->timer_tics + 10;
4058 sbp->abort_attempts++;
4059
3538 flag[rp->ringno] = 1;
4060 flag[cp->channelno] = 1;
3539
3540 /*
3541 * If the fpkt is already set, then we will leave it alone
3542 * This ensures that this pkt is only accounted for on one
3543 * fpkt->flush_count
3544 */
3545 if (!sbp->fpkt && fpkt) {
3546 mutex_enter(&fpkt->mtx);
3547 sbp->fpkt = fpkt;
3548 fpkt->flush_count++;
3549 mutex_exit(&fpkt->mtx);
3550 }
3551
3552 mutex_exit(&sbp->mtx);
3553
3554 return;
3555
4061
4062 /*
4063 * If the fpkt is already set, then we will leave it alone
4064 * This ensures that this pkt is only accounted for on one
4065 * fpkt->flush_count
4066 */
4067 if (!sbp->fpkt && fpkt) {
4068 mutex_enter(&fpkt->mtx);
4069 sbp->fpkt = fpkt;
4070 fpkt->flush_count++;
4071 mutex_exit(&fpkt->mtx);
4072 }
4073
4074 mutex_exit(&sbp->mtx);
4075
4076 return;
4077
3556} /* emlxs_sbp_abort_add() */
4078} /* emlxs_sbp_abort_add() */