1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26  */
27 
28 /*
29  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
30  *
31  * ***********************************************************************
32  * *									**
33  * *				NOTICE					**
34  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
35  * *			ALL RIGHTS RESERVED				**
36  * *									**
37  * ***********************************************************************
38  *
39  */
40 
41 #include <ql_apps.h>
42 #include <ql_api.h>
43 #include <ql_debug.h>
44 #include <ql_iocb.h>
45 #include <ql_isr.h>
46 #include <ql_xioctl.h>
47 
48 /*
49  * Local Function Prototypes.
50  */
51 static int ql_req_pkt(ql_adapter_state_t *, request_t **);
52 static void ql_continuation_iocb(ql_adapter_state_t *, ddi_dma_cookie_t *,
53     uint16_t, boolean_t);
54 static void ql_isp24xx_rcvbuf(ql_adapter_state_t *);
55 static void ql_cmd_24xx_type_6_iocb(ql_adapter_state_t *, ql_srb_t *, void *);
56 
57 /*
58  * ql_start_iocb
59  *	The start IOCB is responsible for building request packets
60  *	on request ring and modifying ISP input pointer.
61  *
62  * Input:
63  *	ha:	adapter state pointer.
64  *	sp:	srb structure pointer.
65  *
66  * Context:
67  *	Interrupt or Kernel context, no mailbox commands allowed.
68  */
69 void
70 ql_start_iocb(ql_adapter_state_t *vha, ql_srb_t *sp)
71 {
72 	ql_link_t		*link;
73 	request_t		*pkt;
74 	uint64_t		*ptr64;
75 	uint32_t		cnt;
76 	ql_adapter_state_t	*ha = vha->pha;
77 
78 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
79 
80 	/* Acquire ring lock. */
81 	REQUEST_RING_LOCK(ha);
82 
83 	if (sp != NULL) {
84 		/*
85 		 * If the pending queue is not empty maintain order
86 		 * by puting this srb at the tail and geting the head.
87 		 */
88 		if ((link = ha->pending_cmds.first) != NULL) {
89 			ql_add_link_b(&ha->pending_cmds, &sp->cmd);
90 			/* Remove command from pending command queue */
91 			sp = link->base_address;
92 			ql_remove_link(&ha->pending_cmds, &sp->cmd);
93 		}
94 	} else {
95 		/* Get command from pending command queue if not empty. */
96 		if ((link = ha->pending_cmds.first) == NULL) {
97 			/* Release ring specific lock */
98 			REQUEST_RING_UNLOCK(ha);
99 			QL_PRINT_3(CE_CONT, "(%d): empty done\n",
100 			    ha->instance);
101 			return;
102 		}
103 		/* Remove command from pending command queue */
104 		sp = link->base_address;
105 		ql_remove_link(&ha->pending_cmds, &sp->cmd);
106 	}
107 
108 	/* start this request and as many others as possible */
109 	for (;;) {
110 		if (ha->req_q_cnt < sp->req_cnt) {
111 			/* Calculate number of free request entries. */
112 			cnt = RD16_IO_REG(ha, req_out);
113 			if (ha->req_ring_index < cnt)  {
114 				ha->req_q_cnt = (uint16_t)
115 				    (cnt - ha->req_ring_index);
116 			} else {
117 				ha->req_q_cnt = (uint16_t)(REQUEST_ENTRY_CNT -
118 				    (ha->req_ring_index - cnt));
119 			}
120 			if (ha->req_q_cnt != 0) {
121 				ha->req_q_cnt--;
122 			}
123 
124 			/*
125 			 * If no room in request ring put this srb at
126 			 * the head of the pending queue and exit.
127 			 */
128 			if (ha->req_q_cnt < sp->req_cnt) {
129 				QL_PRINT_8(CE_CONT, "(%d): request ring full,"
130 				    " req_q_cnt=%d, req_ring_index=%d\n",
131 				    ha->instance, ha->req_q_cnt,
132 				    ha->req_ring_index);
133 				ql_add_link_t(&ha->pending_cmds, &sp->cmd);
134 				break;
135 			}
136 		}
137 
138 		/* Check for room in outstanding command list. */
139 		for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
140 			ha->osc_index++;
141 			if (ha->osc_index == MAX_OUTSTANDING_COMMANDS) {
142 				ha->osc_index = 1;
143 			}
144 			if (ha->outstanding_cmds[ha->osc_index] == NULL) {
145 				break;
146 			}
147 		}
148 		/*
149 		 * If no room in outstanding array put this srb at
150 		 * the head of the pending queue and exit.
151 		 */
152 		if (cnt == MAX_OUTSTANDING_COMMANDS) {
153 			QL_PRINT_8(CE_CONT, "(%d): no room in outstanding "
154 			    "array\n", ha->instance);
155 			ql_add_link_t(&ha->pending_cmds, &sp->cmd);
156 			break;
157 		}
158 
159 		/* nothing to stop us now. */
160 		ha->outstanding_cmds[ha->osc_index] = sp;
161 		/* create and save a unique response identifier in the srb */
162 		sp->handle = ha->adapter_stats->ncmds << OSC_INDEX_SHIFT |
163 		    ha->osc_index;
164 		ha->req_q_cnt -= sp->req_cnt;
165 
166 		/* build the iocb in the request ring */
167 		pkt = ha->request_ring_ptr;
168 		sp->request_ring_ptr = pkt;
169 		sp->flags |= SRB_IN_TOKEN_ARRAY;
170 
171 		/* Zero out packet. */
172 		ptr64 = (uint64_t *)pkt;
173 		*ptr64++ = 0; *ptr64++ = 0;
174 		*ptr64++ = 0; *ptr64++ = 0;
175 		*ptr64++ = 0; *ptr64++ = 0;
176 		*ptr64++ = 0; *ptr64 = 0;
177 
178 		/* Setup IOCB common data. */
179 		pkt->entry_count = (uint8_t)sp->req_cnt;
180 		pkt->sys_define = (uint8_t)ha->req_ring_index;
181 		/* mark the iocb with the response identifier */
182 		ddi_put32(ha->hba_buf.acc_handle, &pkt->handle,
183 		    (uint32_t)sp->handle);
184 
185 		/* Setup IOCB unique data. */
186 		(sp->iocb)(vha, sp, pkt);
187 
188 		sp->flags |= SRB_ISP_STARTED;
189 
190 		QL_PRINT_5(CE_CONT, "(%d,%d): req packet, sp=%p\n",
191 		    ha->instance, vha->vp_index, (void *)sp);
192 		QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE);
193 
194 		/* Sync DMA buffer. */
195 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
196 		    (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE +
197 		    REQUEST_Q_BUFFER_OFFSET), (size_t)REQUEST_ENTRY_SIZE,
198 		    DDI_DMA_SYNC_FORDEV);
199 
200 		/* Adjust ring index. */
201 		ha->req_ring_index++;
202 		if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
203 			ha->req_ring_index = 0;
204 			ha->request_ring_ptr = ha->request_ring_bp;
205 		} else {
206 			ha->request_ring_ptr++;
207 		}
208 
209 		/* Reset watchdog timer */
210 		sp->wdg_q_time = sp->init_wdg_q_time;
211 
212 		/*
213 		 * Send it by setting the new ring index in the ISP Request
214 		 * Ring In Pointer register.  This is the mechanism
215 		 * used to notify the isp that a new iocb has been
216 		 * placed on the request ring.
217 		 */
218 		if (CFG_IST(ha, CFG_CTRL_8021)) {
219 			uint32_t	w32;
220 
221 			w32 = ha->req_ring_index << 16 |
222 			    ha->function_number << 5 | 4;
223 			do {
224 				ddi_put32(ha->db_dev_handle, ha->nx_req_in,
225 				    w32);
226 			} while (RD_REG_DWORD(ha, ha->db_read) != w32);
227 
228 		} else {
229 			WRT16_IO_REG(ha, req_in, ha->req_ring_index);
230 		}
231 
232 		/* Update outstanding command count statistic. */
233 		ha->adapter_stats->ncmds++;
234 
235 		/* if there is a pending command, try to start it. */
236 		if ((link = ha->pending_cmds.first) == NULL) {
237 			break;
238 		}
239 
240 		/* Remove command from pending command queue */
241 		sp = link->base_address;
242 		ql_remove_link(&ha->pending_cmds, &sp->cmd);
243 	}
244 
245 	/* Release ring specific lock */
246 	REQUEST_RING_UNLOCK(ha);
247 
248 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
249 }
250 
251 /*
252  * ql_req_pkt
253  *	Function is responsible for locking ring and
254  *	getting a zeroed out request packet.
255  *
256  * Input:
257  *	ha:	adapter state pointer.
258  *	pkt:	address for packet pointer.
259  *
260  * Returns:
261  *	ql local function return status code.
262  *
263  * Context:
264  *	Interrupt or Kernel context, no mailbox commands allowed.
265  */
266 static int
267 ql_req_pkt(ql_adapter_state_t *vha, request_t **pktp)
268 {
269 	uint16_t		cnt;
270 	uint32_t		*long_ptr;
271 	uint32_t		timer;
272 	int			rval = QL_FUNCTION_TIMEOUT;
273 	ql_adapter_state_t	*ha = vha->pha;
274 
275 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
276 
277 	/* Wait for 30 seconds for slot. */
278 	for (timer = 30000; timer != 0; timer--) {
279 		/* Acquire ring lock. */
280 		REQUEST_RING_LOCK(ha);
281 
282 		if (ha->req_q_cnt == 0) {
283 			/* Calculate number of free request entries. */
284 			cnt = RD16_IO_REG(ha, req_out);
285 			if (ha->req_ring_index < cnt) {
286 				ha->req_q_cnt = (uint16_t)
287 				    (cnt - ha->req_ring_index);
288 			} else {
289 				ha->req_q_cnt = (uint16_t)
290 				    (REQUEST_ENTRY_CNT -
291 				    (ha->req_ring_index - cnt));
292 			}
293 			if (ha->req_q_cnt != 0) {
294 				ha->req_q_cnt--;
295 			}
296 		}
297 
298 		/* Found empty request ring slot? */
299 		if (ha->req_q_cnt != 0) {
300 			ha->req_q_cnt--;
301 			*pktp = ha->request_ring_ptr;
302 
303 			/* Zero out packet. */
304 			long_ptr = (uint32_t *)ha->request_ring_ptr;
305 			for (cnt = 0; cnt < REQUEST_ENTRY_SIZE/4; cnt++) {
306 				*long_ptr++ = 0;
307 			}
308 
309 			/* Setup IOCB common data. */
310 			ha->request_ring_ptr->entry_count = 1;
311 			ha->request_ring_ptr->sys_define =
312 			    (uint8_t)ha->req_ring_index;
313 			ddi_put32(ha->hba_buf.acc_handle,
314 			    &ha->request_ring_ptr->handle,
315 			    (uint32_t)QL_FCA_BRAND);
316 
317 			rval = QL_SUCCESS;
318 
319 			break;
320 		}
321 
322 		/* Release request queue lock. */
323 		REQUEST_RING_UNLOCK(ha);
324 
325 		drv_usecwait(MILLISEC);
326 
327 		/* Check for pending interrupts. */
328 		/*
329 		 * XXX protect interrupt routine from calling itself.
330 		 * Need to revisit this routine. So far we never
331 		 * hit this case as req slot was available
332 		 */
333 		if ((!(curthread->t_flag & T_INTR_THREAD)) &&
334 		    INTERRUPT_PENDING(ha)) {
335 			(void) ql_isr((caddr_t)ha);
336 			INTR_LOCK(ha);
337 			ha->intr_claimed = TRUE;
338 			INTR_UNLOCK(ha);
339 		}
340 	}
341 
342 	if (rval != QL_SUCCESS) {
343 		ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
344 		EL(ha, "failed, rval = %xh, isp_abort_needed\n", rval);
345 	} else {
346 		/*EMPTY*/
347 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
348 	}
349 	return (rval);
350 }
351 
352 /*
353  * ql_isp_cmd
354  *	Function is responsible for modifying ISP input pointer.
355  *	This action notifies the isp that a new request has been
356  *	added to the request ring.
357  *
358  *	Releases ring lock.
359  *
360  * Input:
361  *	ha:	adapter state pointer.
362  *
363  * Context:
364  *	Interrupt or Kernel context, no mailbox commands allowed.
365  */
366 void
367 ql_isp_cmd(ql_adapter_state_t *vha)
368 {
369 	ql_adapter_state_t	*ha = vha->pha;
370 
371 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
372 
373 	QL_PRINT_5(CE_CONT, "(%d): req packet:\n", ha->instance);
374 	QL_DUMP_5((uint8_t *)ha->request_ring_ptr, 8, REQUEST_ENTRY_SIZE);
375 
376 	/* Sync DMA buffer. */
377 	(void) ddi_dma_sync(ha->hba_buf.dma_handle,
378 	    (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE +
379 	    REQUEST_Q_BUFFER_OFFSET), (size_t)REQUEST_ENTRY_SIZE,
380 	    DDI_DMA_SYNC_FORDEV);
381 
382 	/* Adjust ring index. */
383 	ha->req_ring_index++;
384 	if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
385 		ha->req_ring_index = 0;
386 		ha->request_ring_ptr = ha->request_ring_bp;
387 	} else {
388 		ha->request_ring_ptr++;
389 	}
390 
391 	/* Set chip new ring index. */
392 	if (CFG_IST(ha, CFG_CTRL_8021)) {
393 		uint32_t	w32;
394 
395 		w32 = ha->req_ring_index << 16 |
396 		    ha->function_number << 5 | 4;
397 		do {
398 			ddi_put32(ha->db_dev_handle, ha->nx_req_in, w32);
399 		} while (RD_REG_DWORD(ha, ha->db_read) != w32);
400 
401 	} else {
402 		WRT16_IO_REG(ha, req_in, ha->req_ring_index);
403 	}
404 
405 	/* Release ring lock. */
406 	REQUEST_RING_UNLOCK(ha);
407 
408 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
409 }
410 
411 /*
412  * ql_command_iocb
413  *	Setup of command IOCB.
414  *
415  * Input:
416  *	ha:	adapter state pointer.
417  *	sp:	srb structure pointer.
418  *
419  *	arg:	request queue packet.
420  *
421  * Context:
422  *	Interrupt or Kernel context, no mailbox commands allowed.
423  */
424 void
425 ql_command_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
426 {
427 	ddi_dma_cookie_t	*cp;
428 	uint32_t		*ptr32, cnt;
429 	uint16_t		seg_cnt;
430 	fcp_cmd_t		*fcp = sp->fcp;
431 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
432 	cmd_entry_t		*pkt = arg;
433 
434 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
435 
436 	/* Set LUN number */
437 	pkt->lun_l = LSB(sp->lun_queue->lun_no);
438 	pkt->lun_h = MSB(sp->lun_queue->lun_no);
439 
440 	/* Set target ID */
441 	if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
442 		pkt->target_l = LSB(tq->loop_id);
443 		pkt->target_h = MSB(tq->loop_id);
444 	} else {
445 		pkt->target_h = LSB(tq->loop_id);
446 	}
447 
448 	/* Set tag queue control flags */
449 	if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
450 		pkt->control_flags_l = (uint8_t)
451 		    (pkt->control_flags_l | CF_HTAG);
452 	} else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
453 		pkt->control_flags_l = (uint8_t)
454 		    (pkt->control_flags_l | CF_OTAG);
455 	/* else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) */
456 	} else {
457 		pkt->control_flags_l = (uint8_t)
458 		    (pkt->control_flags_l | CF_STAG);
459 	}
460 
461 	/* Set ISP command timeout. */
462 	ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout);
463 
464 	/* Load SCSI CDB */
465 	ddi_rep_put8(ha->hba_buf.acc_handle, fcp->fcp_cdb,
466 	    pkt->scsi_cdb, MAX_CMDSZ, DDI_DEV_AUTOINCR);
467 
468 	if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
469 		pkt->entry_type = IOCB_CMD_TYPE_3;
470 		cnt = CMD_TYPE_3_DATA_SEGMENTS;
471 	} else {
472 		pkt->entry_type = IOCB_CMD_TYPE_2;
473 		cnt = CMD_TYPE_2_DATA_SEGMENTS;
474 	}
475 
476 	if (fcp->fcp_data_len == 0) {
477 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
478 		ha->xioctl->IOControlRequests++;
479 		return;
480 	}
481 
482 	/*
483 	 * Set transfer direction. Load Data segments.
484 	 */
485 	if (fcp->fcp_cntl.cntl_write_data) {
486 		pkt->control_flags_l = (uint8_t)
487 		    (pkt->control_flags_l | CF_DATA_OUT);
488 		ha->xioctl->IOOutputRequests++;
489 		ha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
490 	} else if (fcp->fcp_cntl.cntl_read_data) {
491 		pkt->control_flags_l = (uint8_t)
492 		    (pkt->control_flags_l | CF_DATA_IN);
493 		ha->xioctl->IOInputRequests++;
494 		ha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
495 	}
496 
497 	/* Set data segment count. */
498 	seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
499 	ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
500 
501 	/* Load total byte count. */
502 	ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count, fcp->fcp_data_len);
503 
504 	/* Load command data segment. */
505 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
506 	cp = sp->pkt->pkt_data_cookie;
507 	while (cnt && seg_cnt) {
508 		ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
509 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
510 			ddi_put32(ha->hba_buf.acc_handle, ptr32++,
511 			    cp->dmac_notused);
512 		}
513 		ddi_put32(ha->hba_buf.acc_handle, ptr32++,
514 		    (uint32_t)cp->dmac_size);
515 		seg_cnt--;
516 		cnt--;
517 		cp++;
518 	}
519 
520 	/*
521 	 * Build continuation packets.
522 	 */
523 	if (seg_cnt) {
524 		ql_continuation_iocb(ha, cp, seg_cnt,
525 		    (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)));
526 	}
527 
528 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
529 }
530 
531 /*
532  * ql_continuation_iocb
533  *	Setup of continuation IOCB.
534  *
535  * Input:
536  *	ha:		adapter state pointer.
537  *	cp:		cookie list pointer.
538  *	seg_cnt:	number of segments.
539  *	addr64:		64 bit addresses.
540  *
541  * Context:
542  *	Interrupt or Kernel context, no mailbox commands allowed.
543  */
544 static void
545 ql_continuation_iocb(ql_adapter_state_t *ha, ddi_dma_cookie_t *cp,
546     uint16_t seg_cnt, boolean_t addr64)
547 {
548 	cont_entry_t	*pkt;
549 	uint64_t	*ptr64;
550 	uint32_t	*ptr32, cnt;
551 
552 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
553 
554 	/*
555 	 * Build continuation packets.
556 	 */
557 	while (seg_cnt) {
558 		/* Sync DMA buffer. */
559 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
560 		    (off_t)(ha->req_ring_index * REQUEST_ENTRY_SIZE +
561 		    REQUEST_Q_BUFFER_OFFSET), REQUEST_ENTRY_SIZE,
562 		    DDI_DMA_SYNC_FORDEV);
563 
564 		/* Adjust ring pointer, and deal with wrap. */
565 		ha->req_ring_index++;
566 		if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
567 			ha->req_ring_index = 0;
568 			ha->request_ring_ptr = ha->request_ring_bp;
569 		} else {
570 			ha->request_ring_ptr++;
571 		}
572 		pkt = (cont_entry_t *)ha->request_ring_ptr;
573 
574 		/* Zero out packet. */
575 		ptr64 = (uint64_t *)pkt;
576 		*ptr64++ = 0; *ptr64++ = 0;
577 		*ptr64++ = 0; *ptr64++ = 0;
578 		*ptr64++ = 0; *ptr64++ = 0;
579 		*ptr64++ = 0; *ptr64 = 0;
580 
581 		/*
582 		 * Build continuation packet.
583 		 */
584 		pkt->entry_count = 1;
585 		pkt->sys_define = (uint8_t)ha->req_ring_index;
586 		if (addr64) {
587 			pkt->entry_type = CONTINUATION_TYPE_1;
588 			cnt = CONT_TYPE_1_DATA_SEGMENTS;
589 			ptr32 = (uint32_t *)
590 			    &((cont_type_1_entry_t *)pkt)->dseg_0_address;
591 			while (cnt && seg_cnt) {
592 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
593 				    cp->dmac_address);
594 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
595 				    cp->dmac_notused);
596 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
597 				    (uint32_t)cp->dmac_size);
598 				seg_cnt--;
599 				cnt--;
600 				cp++;
601 			}
602 		} else {
603 			pkt->entry_type = CONTINUATION_TYPE_0;
604 			cnt = CONT_TYPE_0_DATA_SEGMENTS;
605 			ptr32 = (uint32_t *)&pkt->dseg_0_address;
606 			while (cnt && seg_cnt) {
607 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
608 				    cp->dmac_address);
609 				ddi_put32(ha->hba_buf.acc_handle, ptr32++,
610 				    (uint32_t)cp->dmac_size);
611 				seg_cnt--;
612 				cnt--;
613 				cp++;
614 			}
615 		}
616 
617 		QL_PRINT_5(CE_CONT, "(%d): packet:\n", ha->instance);
618 		QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE);
619 	}
620 
621 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
622 }
623 
624 /*
625  * ql_command_24xx_iocb
626  *	Setup of ISP24xx command IOCB.
627  *
628  * Input:
629  *	ha:	adapter state pointer.
630  *	sp:	srb structure pointer.
631  *	arg:	request queue packet.
632  *
633  * Context:
634  *	Interrupt or Kernel context, no mailbox commands allowed.
635  */
636 void
637 ql_command_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
638 {
639 	ddi_dma_cookie_t	*cp;
640 	uint32_t		*ptr32, cnt;
641 	uint16_t		seg_cnt;
642 	fcp_cmd_t		*fcp = sp->fcp;
643 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
644 	cmd7_24xx_entry_t	*pkt = arg;
645 	ql_adapter_state_t	*pha = ha->pha;
646 
647 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
648 
649 	if (fcp->fcp_data_len != 0 && sp->sg_dma.dma_handle != NULL &&
650 	    sp->pkt->pkt_data_cookie_cnt > 1) {
651 		ql_cmd_24xx_type_6_iocb(ha, sp, arg);
652 		QL_PRINT_3(CE_CONT, "(%d): cmd6 exit\n", ha->instance);
653 		return;
654 	}
655 
656 	pkt->entry_type = IOCB_CMD_TYPE_7;
657 
658 	/* Set LUN number */
659 	pkt->fcp_lun[2] = LSB(sp->lun_queue->lun_no);
660 	pkt->fcp_lun[3] = MSB(sp->lun_queue->lun_no);
661 
662 	/* Set N_port handle */
663 	ddi_put16(pha->hba_buf.acc_handle, &pkt->n_port_hdl, tq->loop_id);
664 
665 	/* Set target ID */
666 	pkt->target_id[0] = tq->d_id.b.al_pa;
667 	pkt->target_id[1] = tq->d_id.b.area;
668 	pkt->target_id[2] = tq->d_id.b.domain;
669 
670 	pkt->vp_index = ha->vp_index;
671 
672 	/* Set ISP command timeout. */
673 	if (sp->isp_timeout < 0x1999) {
674 		ddi_put16(pha->hba_buf.acc_handle, &pkt->timeout,
675 		    sp->isp_timeout);
676 	}
677 
678 	/* Load SCSI CDB */
679 	ddi_rep_put8(pha->hba_buf.acc_handle, fcp->fcp_cdb, pkt->scsi_cdb,
680 	    MAX_CMDSZ, DDI_DEV_AUTOINCR);
681 	for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
682 		ql_chg_endian((uint8_t *)&pkt->scsi_cdb + cnt, 4);
683 	}
684 
685 	/*
686 	 * Set tag queue control flags
687 	 * Note:
688 	 *	Cannot copy fcp->fcp_cntl.cntl_qtype directly,
689 	 *	problem with x86 in 32bit kernel mode
690 	 */
691 	switch (fcp->fcp_cntl.cntl_qtype) {
692 	case FCP_QTYPE_SIMPLE:
693 		pkt->task = TA_STAG;
694 		break;
695 	case FCP_QTYPE_HEAD_OF_Q:
696 		pkt->task = TA_HTAG;
697 		break;
698 	case FCP_QTYPE_ORDERED:
699 		pkt->task = TA_OTAG;
700 		break;
701 	case FCP_QTYPE_ACA_Q_TAG:
702 		pkt->task = TA_ACA;
703 		break;
704 	case FCP_QTYPE_UNTAGGED:
705 		pkt->task = TA_UNTAGGED;
706 		break;
707 	default:
708 		break;
709 	}
710 
711 	if (fcp->fcp_data_len == 0) {
712 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
713 		pha->xioctl->IOControlRequests++;
714 		return;
715 	}
716 
717 	/* Set transfer direction. */
718 	if (fcp->fcp_cntl.cntl_write_data) {
719 		pkt->control_flags = CF_WR;
720 		pha->xioctl->IOOutputRequests++;
721 		pha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
722 	} else if (fcp->fcp_cntl.cntl_read_data) {
723 		pkt->control_flags = CF_RD;
724 		pha->xioctl->IOInputRequests++;
725 		pha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
726 	}
727 
728 	/* Set data segment count. */
729 	seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
730 	ddi_put16(pha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
731 
732 	/* Load total byte count. */
733 	ddi_put32(pha->hba_buf.acc_handle, &pkt->total_byte_count,
734 	    fcp->fcp_data_len);
735 
736 	/* Load command data segment. */
737 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
738 	cp = sp->pkt->pkt_data_cookie;
739 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
740 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
741 	ddi_put32(pha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
742 	seg_cnt--;
743 	cp++;
744 
745 	/*
746 	 * Build continuation packets.
747 	 */
748 	if (seg_cnt) {
749 		ql_continuation_iocb(pha, cp, seg_cnt, B_TRUE);
750 	}
751 
752 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
753 }
754 
755 /*
756  * ql_cmd_24xx_type_6_iocb
757  *	Setup of ISP24xx command type 6 IOCB.
758  *
759  * Input:
760  *	ha:	adapter state pointer.
761  *	sp:	srb structure pointer.
762  *	arg:	request queue packet.
763  *
764  * Context:
765  *	Interrupt or Kernel context, no mailbox commands allowed.
766  */
767 static void
768 ql_cmd_24xx_type_6_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
769 {
770 	uint64_t		addr;
771 	ddi_dma_cookie_t	*cp;
772 	uint32_t		*ptr32;
773 	uint16_t		seg_cnt;
774 	fcp_cmd_t		*fcp = sp->fcp;
775 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
776 	cmd6_24xx_entry_t	*pkt = arg;
777 	ql_adapter_state_t	*pha = ha->pha;
778 	dma_mem_t		*cmem = &sp->sg_dma;
779 	cmd6_2400_dma_t		*cdma = cmem->bp;
780 
781 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
782 
783 	pkt->entry_type = IOCB_CMD_TYPE_6;
784 
785 	bzero(cdma, sizeof (cmd6_2400_dma_t));
786 
787 	/* Set LUN number */
788 	pkt->fcp_lun[2] = cdma->cmd.fcp_lun[1] = LSB(sp->lun_queue->lun_no);
789 	pkt->fcp_lun[3] = cdma->cmd.fcp_lun[0] = MSB(sp->lun_queue->lun_no);
790 
791 	/* Set N_port handle */
792 	ddi_put16(pha->hba_buf.acc_handle, &pkt->n_port_hdl, tq->loop_id);
793 
794 	/* Set target ID */
795 	pkt->target_id[0] = tq->d_id.b.al_pa;
796 	pkt->target_id[1] = tq->d_id.b.area;
797 	pkt->target_id[2] = tq->d_id.b.domain;
798 
799 	pkt->vp_index = ha->vp_index;
800 
801 	/* Set ISP command timeout. */
802 	if (sp->isp_timeout < 0x1999) {
803 		ddi_put16(pha->hba_buf.acc_handle, &pkt->timeout,
804 		    sp->isp_timeout);
805 	}
806 
807 	/* Load SCSI CDB */
808 	ddi_rep_put8(cmem->acc_handle, fcp->fcp_cdb, cdma->cmd.scsi_cdb,
809 	    MAX_CMDSZ, DDI_DEV_AUTOINCR);
810 
811 	/*
812 	 * Set tag queue control flags
813 	 * Note:
814 	 *	Cannot copy fcp->fcp_cntl.cntl_qtype directly,
815 	 *	problem with x86 in 32bit kernel mode
816 	 */
817 	switch (fcp->fcp_cntl.cntl_qtype) {
818 	case FCP_QTYPE_SIMPLE:
819 		cdma->cmd.task = TA_STAG;
820 		break;
821 	case FCP_QTYPE_HEAD_OF_Q:
822 		cdma->cmd.task = TA_HTAG;
823 		break;
824 	case FCP_QTYPE_ORDERED:
825 		cdma->cmd.task = TA_OTAG;
826 		break;
827 	case FCP_QTYPE_ACA_Q_TAG:
828 		cdma->cmd.task = TA_ACA;
829 		break;
830 	case FCP_QTYPE_UNTAGGED:
831 		cdma->cmd.task = TA_UNTAGGED;
832 		break;
833 	default:
834 		break;
835 	}
836 
837 	/*
838 	 * FCP_CMND Payload Data Segment
839 	 */
840 	cp = cmem->cookies;
841 	ddi_put16(pha->hba_buf.acc_handle, &pkt->cmnd_length,
842 	    sizeof (fcp_cmnd_t));
843 	ddi_put32(pha->hba_buf.acc_handle, &pkt->cmnd_address[0],
844 	    cp->dmac_address);
845 	ddi_put32(pha->hba_buf.acc_handle, &pkt->cmnd_address[1],
846 	    cp->dmac_notused);
847 
848 	/* Set transfer direction. */
849 	if (fcp->fcp_cntl.cntl_write_data) {
850 		pkt->control_flags = (uint8_t)(CF_DSD_PTR | CF_WR);
851 		cdma->cmd.control_flags = CF_WR;
852 		pha->xioctl->IOOutputRequests++;
853 		pha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
854 	} else if (fcp->fcp_cntl.cntl_read_data) {
855 		pkt->control_flags = (uint8_t)(CF_DSD_PTR | CF_RD);
856 		cdma->cmd.control_flags = CF_RD;
857 		pha->xioctl->IOInputRequests++;
858 		pha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
859 	}
860 
861 	/*
862 	 * FCP_DATA Data Segment Descriptor.
863 	 */
864 	addr = cp->dmac_laddress + sizeof (fcp_cmnd_t);
865 	ddi_put32(pha->hba_buf.acc_handle, &pkt->dseg_0_address[0], LSD(addr));
866 	ddi_put32(pha->hba_buf.acc_handle, &pkt->dseg_0_address[1], MSD(addr));
867 
868 	/* Set data segment count. */
869 	seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
870 	ddi_put16(pha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
871 	ddi_put32(pha->hba_buf.acc_handle, &pkt->dseg_0_length,
872 	    seg_cnt * 12 + 12);
873 
874 	/* Load total byte count. */
875 	ddi_put32(pha->hba_buf.acc_handle, &pkt->total_byte_count,
876 	    fcp->fcp_data_len);
877 	ddi_put32(cmem->acc_handle, &cdma->cmd.dl, (uint32_t)fcp->fcp_data_len);
878 	ql_chg_endian((uint8_t *)&cdma->cmd.dl, 4);
879 
880 	/* Load command data segments. */
881 	ptr32 = (uint32_t *)cdma->cookie_list;
882 	cp = sp->pkt->pkt_data_cookie;
883 	while (seg_cnt--) {
884 		ddi_put32(cmem->acc_handle, ptr32++, cp->dmac_address);
885 		ddi_put32(cmem->acc_handle, ptr32++, cp->dmac_notused);
886 		ddi_put32(cmem->acc_handle, ptr32++, (uint32_t)cp->dmac_size);
887 		cp++;
888 	}
889 
890 	/* Sync DMA buffer. */
891 	(void) ddi_dma_sync(cmem->dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
892 
893 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
894 }
895 
896 /*
897  * ql_marker
898  *	Function issues marker IOCB.
899  *
900  * Input:
901  *	ha:		adapter state pointer.
902  *	loop_id:	device loop ID
903  *	lun:		device LUN
904  *	type:		marker modifier
905  *
906  * Returns:
907  *	ql local function return status code.
908  *
909  * Context:
910  *	Interrupt or Kernel context, no mailbox commands allowed.
911  */
912 int
913 ql_marker(ql_adapter_state_t *ha, uint16_t loop_id, uint16_t lun,
914     uint8_t type)
915 {
916 	mrk_entry_t	*pkt;
917 	int		rval;
918 
919 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
920 
921 	rval = ql_req_pkt(ha, (request_t **)&pkt);
922 	if (rval == QL_SUCCESS) {
923 		pkt->entry_type = MARKER_TYPE;
924 
925 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
926 			marker_24xx_entry_t	*pkt24 =
927 			    (marker_24xx_entry_t *)pkt;
928 
929 			pkt24->modifier = type;
930 
931 			/* Set LUN number */
932 			pkt24->fcp_lun[2] = LSB(lun);
933 			pkt24->fcp_lun[3] = MSB(lun);
934 
935 			pkt24->vp_index = ha->vp_index;
936 
937 			/* Set N_port handle */
938 			ddi_put16(ha->pha->hba_buf.acc_handle,
939 			    &pkt24->n_port_hdl, loop_id);
940 
941 		} else {
942 			pkt->modifier = type;
943 
944 			pkt->lun_l = LSB(lun);
945 			pkt->lun_h = MSB(lun);
946 
947 			if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
948 				pkt->target_l = LSB(loop_id);
949 				pkt->target_h = MSB(loop_id);
950 			} else {
951 				pkt->target_h = LSB(loop_id);
952 			}
953 		}
954 
955 		/* Issue command to ISP */
956 		ql_isp_cmd(ha);
957 	}
958 
959 	if (rval != QL_SUCCESS) {
960 		EL(ha, "failed, rval = %xh\n", rval);
961 	} else {
962 		/*EMPTY*/
963 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
964 	}
965 	return (rval);
966 }
967 
968 /*
969  * ql_ms_iocb
970  *	Setup of name/management server IOCB.
971  *
972  * Input:
973  *	ha = adapter state pointer.
974  *	sp = srb structure pointer.
975  *	arg = request queue packet.
976  *
977  * Context:
978  *	Interrupt or Kernel context, no mailbox commands allowed.
979  */
980 void
981 ql_ms_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
982 {
983 	ddi_dma_cookie_t	*cp;
984 	uint32_t		*ptr32;
985 	uint16_t		seg_cnt;
986 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
987 	ms_entry_t		*pkt = arg;
988 
989 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
990 	QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen);
991 	/*
992 	 * Build command packet.
993 	 */
994 	pkt->entry_type = MS_TYPE;
995 
996 	/* Set loop ID */
997 	if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
998 		pkt->loop_id_l = LSB(tq->loop_id);
999 		pkt->loop_id_h = MSB(tq->loop_id);
1000 	} else {
1001 		pkt->loop_id_h = LSB(tq->loop_id);
1002 	}
1003 
1004 	/* Set ISP command timeout. */
1005 	ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout);
1006 
1007 	/* Set cmd data segment count. */
1008 	pkt->cmd_dseg_count_l = 1;
1009 
1010 	/* Set total data segment count */
1011 	seg_cnt = (uint16_t)(sp->pkt->pkt_resp_cookie_cnt + 1);
1012 	ddi_put16(ha->hba_buf.acc_handle, &pkt->total_dseg_count, seg_cnt);
1013 
1014 	/* Load ct cmd byte count. */
1015 	ddi_put32(ha->hba_buf.acc_handle, &pkt->cmd_byte_count,
1016 	    (uint32_t)sp->pkt->pkt_cmdlen);
1017 
1018 	/* Load ct rsp byte count. */
1019 	ddi_put32(ha->hba_buf.acc_handle, &pkt->resp_byte_count,
1020 	    (uint32_t)sp->pkt->pkt_rsplen);
1021 
1022 	/* Load MS command data segments. */
1023 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
1024 	cp = sp->pkt->pkt_cmd_cookie;
1025 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1026 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
1027 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, (uint32_t)cp->dmac_size);
1028 	seg_cnt--;
1029 
1030 	/* Load MS response entry data segments. */
1031 	cp = sp->pkt->pkt_resp_cookie;
1032 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1033 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
1034 	ddi_put32(ha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
1035 	seg_cnt--;
1036 	cp++;
1037 
1038 	/*
1039 	 * Build continuation packets.
1040 	 */
1041 	if (seg_cnt) {
1042 		ql_continuation_iocb(ha, cp, seg_cnt, B_TRUE);
1043 	}
1044 
1045 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1046 }
1047 
1048 /*
1049  * ql_ms_24xx_iocb
1050  *	Setup of name/management server IOCB.
1051  *
1052  * Input:
1053  *	ha:	adapter state pointer.
1054  *	sp:	srb structure pointer.
1055  *	arg:	request queue packet.
1056  *
1057  * Context:
1058  *	Interrupt or Kernel context, no mailbox commands allowed.
1059  */
1060 void
1061 ql_ms_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
1062 {
1063 	ddi_dma_cookie_t	*cp;
1064 	uint32_t		*ptr32;
1065 	uint16_t		seg_cnt;
1066 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
1067 	ct_passthru_entry_t	*pkt = arg;
1068 	ql_adapter_state_t	*pha = ha->pha;
1069 
1070 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1071 	QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen);
1072 	/*
1073 	 * Build command packet.
1074 	 */
1075 	pkt->entry_type = CT_PASSTHRU_TYPE;
1076 
1077 	/* Set loop ID */
1078 	ddi_put16(pha->hba_buf.acc_handle, &pkt->n_port_hdl, tq->loop_id);
1079 
1080 	pkt->vp_index = ha->vp_index;
1081 
1082 	/* Set ISP command timeout. */
1083 	if (sp->isp_timeout < 0x1999) {
1084 		ddi_put16(pha->hba_buf.acc_handle, &pkt->timeout,
1085 		    sp->isp_timeout);
1086 	}
1087 
1088 	/* Set cmd/response data segment counts. */
1089 	ddi_put16(pha->hba_buf.acc_handle, &pkt->cmd_dseg_count, 1);
1090 	seg_cnt = (uint16_t)sp->pkt->pkt_resp_cookie_cnt;
1091 	ddi_put16(pha->hba_buf.acc_handle, &pkt->resp_dseg_count, seg_cnt);
1092 
1093 	/* Load ct cmd byte count. */
1094 	ddi_put32(pha->hba_buf.acc_handle, &pkt->cmd_byte_count,
1095 	    (uint32_t)sp->pkt->pkt_cmdlen);
1096 
1097 	/* Load ct rsp byte count. */
1098 	ddi_put32(pha->hba_buf.acc_handle, &pkt->resp_byte_count,
1099 	    (uint32_t)sp->pkt->pkt_rsplen);
1100 
1101 	/* Load MS command entry data segments. */
1102 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
1103 	cp = sp->pkt->pkt_cmd_cookie;
1104 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1105 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
1106 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, (uint32_t)cp->dmac_size);
1107 
1108 	/* Load MS response entry data segments. */
1109 	cp = sp->pkt->pkt_resp_cookie;
1110 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1111 	ddi_put32(pha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
1112 	ddi_put32(pha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
1113 	seg_cnt--;
1114 	cp++;
1115 
1116 	/*
1117 	 * Build continuation packets.
1118 	 */
1119 	if (seg_cnt) {
1120 		ql_continuation_iocb(pha, cp, seg_cnt, B_TRUE);
1121 	}
1122 
1123 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1124 }
1125 
1126 /*
1127  * ql_ip_iocb
1128  *	Setup of IP IOCB.
1129  *
1130  * Input:
1131  *	ha:	adapter state pointer.
1132  *	sp:	srb structure pointer.
1133  *	arg:	request queue packet.
1134  *
1135  * Context:
1136  *	Interrupt or Kernel context, no mailbox commands allowed.
1137  */
1138 void
1139 ql_ip_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
1140 {
1141 	ddi_dma_cookie_t	*cp;
1142 	uint32_t		*ptr32, cnt;
1143 	uint16_t		seg_cnt;
1144 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
1145 	ip_entry_t		*pkt = arg;
1146 
1147 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1148 
1149 	/* Set loop ID */
1150 	if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1151 		pkt->loop_id_l = LSB(tq->loop_id);
1152 		pkt->loop_id_h = MSB(tq->loop_id);
1153 	} else {
1154 		pkt->loop_id_h = LSB(tq->loop_id);
1155 	}
1156 
1157 	/* Set control flags */
1158 	pkt->control_flags_l = BIT_6;
1159 	if (sp->pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
1160 		pkt->control_flags_h = BIT_7;
1161 	}
1162 
1163 	/* Set ISP command timeout. */
1164 	ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout, sp->isp_timeout);
1165 
1166 	/* Set data segment count. */
1167 	seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt;
1168 	/* Load total byte count. */
1169 	ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count,
1170 	    (uint32_t)sp->pkt->pkt_cmdlen);
1171 	ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
1172 
1173 	/*
1174 	 * Build command packet.
1175 	 */
1176 	if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1177 		pkt->entry_type = IP_A64_TYPE;
1178 		cnt = IP_A64_DATA_SEGMENTS;
1179 	} else {
1180 		pkt->entry_type = IP_TYPE;
1181 		cnt = IP_DATA_SEGMENTS;
1182 	}
1183 
1184 	/* Load command entry data segments. */
1185 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
1186 	cp = sp->pkt->pkt_cmd_cookie;
1187 	while (cnt && seg_cnt) {
1188 		ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1189 		if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1190 			ddi_put32(ha->hba_buf.acc_handle, ptr32++,
1191 			    cp->dmac_notused);
1192 		}
1193 		ddi_put32(ha->hba_buf.acc_handle, ptr32++,
1194 		    (uint32_t)cp->dmac_size);
1195 		seg_cnt--;
1196 		cnt--;
1197 		cp++;
1198 	}
1199 
1200 	/*
1201 	 * Build continuation packets.
1202 	 */
1203 	if (seg_cnt) {
1204 		ql_continuation_iocb(ha, cp, seg_cnt,
1205 		    (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)));
1206 	}
1207 
1208 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1209 }
1210 
1211 /*
1212  * ql_ip_24xx_iocb
1213  *	Setup of IP IOCB for ISP24xx.
1214  *
1215  * Input:
1216  *	ha:	adapter state pointer.
1217  *	sp:	srb structure pointer.
1218  *	arg:	request queue packet.
1219  *
1220  * Context:
1221  *	Interrupt or Kernel context, no mailbox commands allowed.
1222  */
1223 void
1224 ql_ip_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *sp, void *arg)
1225 {
1226 	ddi_dma_cookie_t	*cp;
1227 	uint32_t		*ptr32;
1228 	uint16_t		seg_cnt;
1229 	ql_tgt_t		*tq = sp->lun_queue->target_queue;
1230 	ip_cmd_entry_t		*pkt = arg;
1231 
1232 	pkt->entry_type = IP_CMD_TYPE;
1233 
1234 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1235 
1236 	/* Set N_port handle */
1237 	ddi_put16(ha->hba_buf.acc_handle, &pkt->hdl_status, tq->loop_id);
1238 
1239 	/* Set ISP command timeout. */
1240 	if (sp->isp_timeout < 0x1999) {
1241 		ddi_put16(ha->hba_buf.acc_handle, &pkt->timeout_hdl,
1242 		    sp->isp_timeout);
1243 	}
1244 
1245 	/* Set data segment count. */
1246 	seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt;
1247 	/* Load total byte count. */
1248 	ddi_put32(ha->hba_buf.acc_handle, &pkt->byte_count,
1249 	    (uint32_t)sp->pkt->pkt_cmdlen);
1250 	ddi_put16(ha->hba_buf.acc_handle, &pkt->dseg_count, seg_cnt);
1251 
1252 	/* Set control flags */
1253 	ddi_put16(ha->hba_buf.acc_handle, &pkt->control_flags,
1254 	    (uint16_t)(BIT_0));
1255 
1256 	/* Set frame header control flags */
1257 	ddi_put16(ha->hba_buf.acc_handle, &pkt->frame_hdr_cntrl_flgs,
1258 	    (uint16_t)(IPCF_LAST_SEQ | IPCF_FIRST_SEQ));
1259 
1260 	/* Load command data segment. */
1261 	ptr32 = (uint32_t *)&pkt->dseg_0_address;
1262 	cp = sp->pkt->pkt_cmd_cookie;
1263 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_address);
1264 	ddi_put32(ha->hba_buf.acc_handle, ptr32++, cp->dmac_notused);
1265 	ddi_put32(ha->hba_buf.acc_handle, ptr32, (uint32_t)cp->dmac_size);
1266 	seg_cnt--;
1267 	cp++;
1268 
1269 	/*
1270 	 * Build continuation packets.
1271 	 */
1272 	if (seg_cnt) {
1273 		ql_continuation_iocb(ha, cp, seg_cnt, B_TRUE);
1274 	}
1275 
1276 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1277 }
1278 
1279 /*
1280  * ql_isp_rcvbuf
1281  *	Locates free buffers and places it on the receive buffer queue.
1282  *
1283  * Input:
1284  *	ha = adapter state pointer.
1285  *
1286  * Context:
1287  *	Interrupt or Kernel context, no mailbox commands allowed.
1288  */
1289 void
1290 ql_isp_rcvbuf(ql_adapter_state_t *ha)
1291 {
1292 	rcvbuf_t	*container;
1293 	uint16_t	rcv_q_cnt;
1294 	uint16_t	index = 0;
1295 	uint16_t	index1 = 1;
1296 	int		debounce_count = QL_MAX_DEBOUNCE;
1297 	ql_srb_t	*sp;
1298 	fc_unsol_buf_t	*ubp;
1299 	int		ring_updated = FALSE;
1300 
1301 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
1302 		ql_isp24xx_rcvbuf(ha);
1303 		return;
1304 	}
1305 
1306 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1307 
1308 	/* Acquire adapter state lock. */
1309 	ADAPTER_STATE_LOCK(ha);
1310 
1311 	/* Calculate number of free receive buffer entries. */
1312 	index = RD16_IO_REG(ha, mailbox_out[8]);
1313 	do {
1314 		index1 = RD16_IO_REG(ha, mailbox_out[8]);
1315 		if (index1 == index) {
1316 			break;
1317 		} else {
1318 			index = index1;
1319 		}
1320 	} while (debounce_count --);
1321 
1322 	if (debounce_count < 0) {
1323 		/* This should never happen */
1324 		EL(ha, "max mb8 debounce retries exceeded\n");
1325 	}
1326 
1327 	rcv_q_cnt = (uint16_t)(ha->rcvbuf_ring_index < index ?
1328 	    index - ha->rcvbuf_ring_index : RCVBUF_CONTAINER_CNT -
1329 	    (ha->rcvbuf_ring_index - index));
1330 
1331 	if (rcv_q_cnt == RCVBUF_CONTAINER_CNT) {
1332 		rcv_q_cnt--;
1333 	}
1334 
1335 	/* Load all free buffers in ISP receive buffer ring. */
1336 	index = 0;
1337 	while (rcv_q_cnt > (uint16_t)0 && index < QL_UB_LIMIT) {
1338 		/* Locate a buffer to give. */
1339 		QL_UB_LOCK(ha);
1340 		while (index < QL_UB_LIMIT) {
1341 			ubp = ha->ub_array[index];
1342 			if (ubp != NULL) {
1343 				sp = ubp->ub_fca_private;
1344 				if ((sp->ub_type == FC_TYPE_IS8802_SNAP) &&
1345 				    (ha->flags & IP_INITIALIZED) &&
1346 				    (sp->flags & SRB_UB_IN_FCA) &&
1347 				    (!(sp->flags & (SRB_UB_IN_ISP |
1348 				    SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK |
1349 				    SRB_UB_ACQUIRED)))) {
1350 					sp->flags |= SRB_UB_IN_ISP;
1351 					break;
1352 				}
1353 			}
1354 			index++;
1355 		}
1356 
1357 		if (index < QL_UB_LIMIT) {
1358 			rcv_q_cnt--;
1359 			index++;
1360 			container = ha->rcvbuf_ring_ptr;
1361 
1362 			/*
1363 			 * Build container.
1364 			 */
1365 			ddi_put32(ha->hba_buf.acc_handle,
1366 			    (uint32_t *)(void *)&container->bufp[0],
1367 			    sp->ub_buffer.cookie.dmac_address);
1368 
1369 			ddi_put32(ha->hba_buf.acc_handle,
1370 			    (uint32_t *)(void *)&container->bufp[1],
1371 			    sp->ub_buffer.cookie.dmac_notused);
1372 
1373 			ddi_put16(ha->hba_buf.acc_handle, &container->handle,
1374 			    LSW(sp->handle));
1375 
1376 			ha->ub_outcnt++;
1377 
1378 			/* Adjust ring index. */
1379 			ha->rcvbuf_ring_index++;
1380 			if (ha->rcvbuf_ring_index == RCVBUF_CONTAINER_CNT) {
1381 				ha->rcvbuf_ring_index = 0;
1382 				ha->rcvbuf_ring_ptr = ha->rcvbuf_ring_bp;
1383 			} else {
1384 				ha->rcvbuf_ring_ptr++;
1385 			}
1386 
1387 			ring_updated = TRUE;
1388 		}
1389 		QL_UB_UNLOCK(ha);
1390 	}
1391 
1392 	if (ring_updated) {
1393 		/* Sync queue. */
1394 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1395 		    (off_t)RCVBUF_Q_BUFFER_OFFSET, (size_t)RCVBUF_QUEUE_SIZE,
1396 		    DDI_DMA_SYNC_FORDEV);
1397 
1398 		/* Set chip new ring index. */
1399 		WRT16_IO_REG(ha, mailbox_in[8], ha->rcvbuf_ring_index);
1400 	}
1401 
1402 	/* Release adapter state lock. */
1403 	ADAPTER_STATE_UNLOCK(ha);
1404 
1405 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1406 }
1407 
1408 /*
1409  * ql_isp24xx_rcvbuf
1410  *	Locates free buffers and send it to adapter.
1411  *
1412  * Input:
1413  *	ha = adapter state pointer.
1414  *
1415  * Context:
1416  *	Interrupt or Kernel context, no mailbox commands allowed.
1417  */
1418 static void
1419 ql_isp24xx_rcvbuf(ql_adapter_state_t *ha)
1420 {
1421 	rcvbuf_t		*container;
1422 	uint16_t		index;
1423 	ql_srb_t		*sp;
1424 	fc_unsol_buf_t		*ubp;
1425 	int			rval;
1426 	ip_buf_pool_entry_t	*pkt = NULL;
1427 
1428 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1429 
1430 	for (;;) {
1431 		/* Locate a buffer to give. */
1432 		QL_UB_LOCK(ha);
1433 		for (index = 0; index < QL_UB_LIMIT; index++) {
1434 			ubp = ha->ub_array[index];
1435 			if (ubp != NULL) {
1436 				sp = ubp->ub_fca_private;
1437 				if ((sp->ub_type == FC_TYPE_IS8802_SNAP) &&
1438 				    (ha->flags & IP_INITIALIZED) &&
1439 				    (sp->flags & SRB_UB_IN_FCA) &&
1440 				    (!(sp->flags & (SRB_UB_IN_ISP |
1441 				    SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK |
1442 				    SRB_UB_ACQUIRED)))) {
1443 					ha->ub_outcnt++;
1444 					sp->flags |= SRB_UB_IN_ISP;
1445 					break;
1446 				}
1447 			}
1448 		}
1449 		QL_UB_UNLOCK(ha);
1450 		if (index == QL_UB_LIMIT) {
1451 			break;
1452 		}
1453 
1454 		/* Get IOCB packet for buffers. */
1455 		if (pkt == NULL) {
1456 			rval = ql_req_pkt(ha, (request_t **)&pkt);
1457 			if (rval != QL_SUCCESS) {
1458 				EL(ha, "failed, ql_req_pkt=%x\n", rval);
1459 				QL_UB_LOCK(ha);
1460 				ha->ub_outcnt--;
1461 				sp->flags &= ~SRB_UB_IN_ISP;
1462 				QL_UB_UNLOCK(ha);
1463 				break;
1464 			}
1465 			pkt->entry_type = IP_BUF_POOL_TYPE;
1466 			container = &pkt->buffers[0];
1467 		}
1468 
1469 		/*
1470 		 * Build container.
1471 		 */
1472 		ddi_put32(ha->hba_buf.acc_handle, &container->bufp[0],
1473 		    sp->ub_buffer.cookie.dmac_address);
1474 		ddi_put32(ha->hba_buf.acc_handle, &container->bufp[1],
1475 		    sp->ub_buffer.cookie.dmac_notused);
1476 		ddi_put16(ha->hba_buf.acc_handle, &container->handle,
1477 		    LSW(sp->handle));
1478 
1479 		pkt->buffer_count++;
1480 		container++;
1481 
1482 		if (pkt->buffer_count == IP_POOL_BUFFERS) {
1483 			ql_isp_cmd(ha);
1484 			pkt = NULL;
1485 		}
1486 	}
1487 
1488 	if (pkt != NULL) {
1489 		ql_isp_cmd(ha);
1490 	}
1491 
1492 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1493 }
1494