1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 #include <emlxs.h>
28 
29 /* Timer period in seconds */
30 #define	EMLXS_TIMER_PERIOD		1	/* secs */
31 #define	EMLXS_PKT_PERIOD		5	/* secs */
32 #define	EMLXS_UB_PERIOD			60	/* secs */
33 
34 EMLXS_MSG_DEF(EMLXS_CLOCK_C);
35 
36 
37 #ifdef DFC_SUPPORT
38 static void emlxs_timer_check_loopback(emlxs_hba_t *hba);
39 #endif /* DFC_SUPPORT */
40 
41 #ifdef DHCHAP_SUPPORT
42 static void emlxs_timer_check_dhchap(emlxs_port_t *port);
43 #endif /* DHCHAP_SUPPORT */
44 
45 static void	emlxs_timer(void *arg);
46 static void	emlxs_timer_check_heartbeat(emlxs_hba_t *hba);
47 static uint32_t	emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag);
48 static void	emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag);
49 static void	emlxs_timer_check_linkup(emlxs_hba_t *hba);
50 static void	emlxs_timer_check_discovery(emlxs_port_t *port);
51 static void	emlxs_timer_check_ub(emlxs_port_t *port);
52 static void	emlxs_timer_check_rings(emlxs_hba_t *hba, uint8_t *flag);
53 static uint32_t	emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp,
54 			Q *abortq, uint8_t *flag);
55 
56 #ifdef TX_WATCHDOG
57 static void	emlxs_tx_watchdog(emlxs_hba_t *hba);
58 #endif /* TX_WATCHDOG */
59 
60 extern clock_t
61 emlxs_timeout(emlxs_hba_t *hba, uint32_t timeout)
62 {
63 	emlxs_config_t *cfg = &CFG;
64 	clock_t time;
65 
66 	/* Set thread timeout */
67 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
68 		(void) drv_getparm(LBOLT, &time);
69 		time += (timeout * drv_usectohz(1000000));
70 	} else {
71 		time = -1;
72 	}
73 
74 	return (time);
75 
76 }  /* emlxs_timeout() */
77 
78 
79 static void
80 emlxs_timer(void *arg)
81 {
82 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
83 
84 	if (!hba->timer_id) {
85 		return;
86 	}
87 
88 	mutex_enter(&EMLXS_TIMER_LOCK);
89 
90 	/* Only one timer thread is allowed */
91 	if (hba->timer_flags & EMLXS_TIMER_BUSY) {
92 		mutex_exit(&EMLXS_TIMER_LOCK);
93 		return;
94 	}
95 
96 	/* Check if a kill request has been made */
97 	if (hba->timer_flags & EMLXS_TIMER_KILL) {
98 		hba->timer_id = 0;
99 		hba->timer_flags |= EMLXS_TIMER_ENDED;
100 
101 		mutex_exit(&EMLXS_TIMER_LOCK);
102 		return;
103 	}
104 
105 	hba->timer_flags |= (EMLXS_TIMER_BUSY | EMLXS_TIMER_STARTED);
106 	hba->timer_tics = DRV_TIME;
107 
108 	mutex_exit(&EMLXS_TIMER_LOCK);
109 
110 	/* Perform standard checks */
111 	emlxs_timer_checks(hba);
112 
113 	/* Restart the timer */
114 	mutex_enter(&EMLXS_TIMER_LOCK);
115 
116 	hba->timer_flags &= ~EMLXS_TIMER_BUSY;
117 
118 	/* If timer is still enabled, restart it */
119 	if (!(hba->timer_flags & EMLXS_TIMER_KILL)) {
120 		hba->timer_id =
121 		    timeout(emlxs_timer, (void *)hba,
122 		    (EMLXS_TIMER_PERIOD * drv_usectohz(1000000)));
123 	} else {
124 		hba->timer_id = 0;
125 		hba->timer_flags |= EMLXS_TIMER_ENDED;
126 	}
127 
128 	mutex_exit(&EMLXS_TIMER_LOCK);
129 
130 	return;
131 
132 }  /* emlxs_timer() */
133 
134 
135 extern void
136 emlxs_timer_checks(emlxs_hba_t *hba)
137 {
138 	emlxs_port_t *port = &PPORT;
139 	uint8_t flag[MAX_RINGS];
140 	uint32_t i;
141 	uint32_t rc;
142 
143 	/* Exit if we are still initializing */
144 	if (hba->state < FC_LINK_DOWN) {
145 		return;
146 	}
147 
148 	bzero((void *)flag, sizeof (flag));
149 
150 	/* Check for mailbox timeout */
151 	emlxs_timer_check_mbox(hba);
152 
153 	/* Check heartbeat timer */
154 	emlxs_timer_check_heartbeat(hba);
155 
156 #ifdef IDLE_TIMER
157 	emlxs_pm_idle_timer(hba);
158 #endif /* IDLE_TIMER */
159 
160 #ifdef DFC_SUPPORT
161 	/* Check for loopback timeouts */
162 	emlxs_timer_check_loopback(hba);
163 #endif /* DFC_SUPPORT */
164 
165 	/* Check for packet timeouts */
166 	rc = emlxs_timer_check_pkts(hba, flag);
167 
168 	if (rc) {
169 		/* Link or adapter is being reset */
170 		return;
171 	}
172 
173 	/* Check for linkup timeout */
174 	emlxs_timer_check_linkup(hba);
175 
176 	/* Check the ports */
177 	for (i = 0; i < MAX_VPORTS; i++) {
178 		port = &VPORT(i);
179 
180 		if (!(port->flag & EMLXS_PORT_BOUND)) {
181 			continue;
182 		}
183 
184 		/* Check for node gate timeouts */
185 		emlxs_timer_check_nodes(port, flag);
186 
187 		/* Check for tape discovery timeout */
188 		emlxs_timer_check_discovery(port);
189 
190 		/* Check for UB timeouts */
191 		emlxs_timer_check_ub(port);
192 
193 #ifdef DHCHAP_SUPPORT
194 		/* Check for DHCHAP authentication timeouts */
195 		emlxs_timer_check_dhchap(port);
196 #endif /* DHCHAP_SUPPORT */
197 
198 	}
199 
200 	/* Check for ring service timeouts */
201 	/* Always do this last */
202 	emlxs_timer_check_rings(hba, flag);
203 
204 	return;
205 
206 }  /* emlxs_timer_checks() */
207 
208 
209 extern void
210 emlxs_timer_start(emlxs_hba_t *hba)
211 {
212 	if (hba->timer_id) {
213 		return;
214 	}
215 
216 	/* Restart the timer */
217 	mutex_enter(&EMLXS_TIMER_LOCK);
218 	if (!hba->timer_id) {
219 		hba->timer_flags = 0;
220 		hba->timer_id =
221 		    timeout(emlxs_timer, (void *)hba, drv_usectohz(1000000));
222 	}
223 	mutex_exit(&EMLXS_TIMER_LOCK);
224 
225 }  /* emlxs_timer_start() */
226 
227 
228 extern void
229 emlxs_timer_stop(emlxs_hba_t *hba)
230 {
231 	if (!hba->timer_id) {
232 		return;
233 	}
234 
235 	mutex_enter(&EMLXS_TIMER_LOCK);
236 	hba->timer_flags |= EMLXS_TIMER_KILL;
237 
238 	while (hba->timer_id) {
239 		mutex_exit(&EMLXS_TIMER_LOCK);
240 		delay(drv_usectohz(500000));
241 		mutex_enter(&EMLXS_TIMER_LOCK);
242 	}
243 	mutex_exit(&EMLXS_TIMER_LOCK);
244 
245 	return;
246 
247 }  /* emlxs_timer_stop() */
248 
249 
250 static uint32_t
251 emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag)
252 {
253 	emlxs_port_t *port = &PPORT;
254 	emlxs_config_t *cfg = &CFG;
255 	Q tmo;
256 	int32_t ringno;
257 	RING *rp;
258 	NODELIST *nlp;
259 	IOCBQ *prev;
260 	IOCBQ *next;
261 	IOCB *iocb;
262 	IOCBQ *iocbq;
263 	emlxs_buf_t *sbp;
264 	fc_packet_t *pkt;
265 	Q abort;
266 	uint32_t iotag;
267 	uint32_t rc;
268 
269 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
270 		return (0);
271 	}
272 
273 	if (hba->pkt_timer > hba->timer_tics) {
274 		return (0);
275 	}
276 
277 	hba->pkt_timer = hba->timer_tics + EMLXS_PKT_PERIOD;
278 
279 
280 	bzero((void *)&tmo, sizeof (Q));
281 
282 	/*
283 	 * We must hold the locks here because we never know when an iocb
284 	 * will be removed out from under us
285 	 */
286 
287 	mutex_enter(&EMLXS_RINGTX_LOCK);
288 
289 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
290 		rp = &hba->ring[ringno];
291 
292 		/* Scan the tx queues for each active node on the ring */
293 
294 		/* Get the first node */
295 		nlp = (NODELIST *)rp->nodeq.q_first;
296 
297 		while (nlp) {
298 			/* Scan the node's priority tx queue */
299 			prev = NULL;
300 			iocbq = (IOCBQ *)nlp->nlp_ptx[ringno].q_first;
301 
302 			while (iocbq) {
303 				next = (IOCBQ *)iocbq->next;
304 				iocb = &iocbq->iocb;
305 				sbp = (emlxs_buf_t *)iocbq->sbp;
306 
307 				/* Check if iocb has timed out */
308 				if (sbp && hba->timer_tics >= sbp->ticks) {
309 					/* iocb timed out, now deque it */
310 					if (next == NULL) {
311 						nlp->nlp_ptx[ringno].q_last =
312 						    (uint8_t *)prev;
313 					}
314 
315 					if (prev == NULL) {
316 						nlp->nlp_ptx[ringno].q_first =
317 						    (uint8_t *)next;
318 					} else {
319 						prev->next = next;
320 					}
321 
322 					iocbq->next = NULL;
323 					nlp->nlp_ptx[ringno].q_cnt--;
324 
325 					/* Add this iocb to our local */
326 					/* timout queue */
327 
328 					/*
329 					 * This way we don't hold the RINGTX
330 					 * lock too long
331 					 */
332 
333 					if (tmo.q_first) {
334 						((IOCBQ *)tmo.q_last)->next =
335 						    iocbq;
336 						tmo.q_last =
337 						    (uint8_t *)iocbq;
338 						tmo.q_cnt++;
339 					} else {
340 						tmo.q_first =
341 						    (uint8_t *)iocbq;
342 						tmo.q_last =
343 						    (uint8_t *)iocbq;
344 						tmo.q_cnt = 1;
345 					}
346 					iocbq->next = NULL;
347 
348 				} else {
349 					prev = iocbq;
350 				}
351 
352 				iocbq = next;
353 
354 			}	/* while (iocbq) */
355 
356 
357 			/* Scan the node's tx queue */
358 			prev = NULL;
359 			iocbq = (IOCBQ *)nlp->nlp_tx[ringno].q_first;
360 
361 			while (iocbq) {
362 				next = (IOCBQ *)iocbq->next;
363 				iocb = &iocbq->iocb;
364 				sbp = (emlxs_buf_t *)iocbq->sbp;
365 
366 				/* Check if iocb has timed out */
367 				if (sbp && hba->timer_tics >= sbp->ticks) {
368 					/* iocb timed out, now deque it */
369 					if (next == NULL) {
370 						nlp->nlp_tx[ringno].q_last =
371 						    (uint8_t *)prev;
372 					}
373 
374 					if (prev == NULL) {
375 						nlp->nlp_tx[ringno].q_first =
376 						    (uint8_t *)next;
377 					} else {
378 						prev->next = next;
379 					}
380 
381 					iocbq->next = NULL;
382 					nlp->nlp_tx[ringno].q_cnt--;
383 
384 					/* Add this iocb to our local */
385 					/* timout queue */
386 
387 					/*
388 					 * This way we don't hold the RINGTX
389 					 * lock too long
390 					 */
391 
392 					if (tmo.q_first) {
393 						((IOCBQ *)tmo.q_last)->next =
394 						    iocbq;
395 						tmo.q_last =
396 						    (uint8_t *)iocbq;
397 						tmo.q_cnt++;
398 					} else {
399 						tmo.q_first =
400 						    (uint8_t *)iocbq;
401 						tmo.q_last =
402 						    (uint8_t *)iocbq;
403 						tmo.q_cnt = 1;
404 					}
405 					iocbq->next = NULL;
406 
407 				} else {
408 					prev = iocbq;
409 				}
410 
411 				iocbq = next;
412 
413 			}	/* while (iocbq) */
414 
415 			if (nlp == (NODELIST *)rp->nodeq.q_last) {
416 				nlp = NULL;
417 			} else {
418 				nlp = nlp->nlp_next[ringno];
419 			}
420 
421 		}	/* while(nlp) */
422 
423 	}	/* end of for */
424 
425 	/* Now cleanup the iocb's */
426 	iocbq = (IOCBQ *)tmo.q_first;
427 	while (iocbq) {
428 		/* Free the IoTag and the bmp */
429 		iocb = &iocbq->iocb;
430 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
431 		ringno = ((RING *)iocbq->ring)->ringno;
432 
433 		if (sbp && (sbp != STALE_PACKET)) {
434 			mutex_enter(&sbp->mtx);
435 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
436 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
437 				hba->ring_tx_count[ringno]--;
438 			}
439 			sbp->pkt_flags |= PACKET_IN_TIMEOUT;
440 			mutex_exit(&sbp->mtx);
441 		}
442 
443 		iocbq = (IOCBQ *)iocbq->next;
444 
445 	}	/* end of while */
446 
447 	mutex_exit(&EMLXS_RINGTX_LOCK);
448 
449 	/* Now complete the transmit timeouts outside the locks */
450 	iocbq = (IOCBQ *)tmo.q_first;
451 	while (iocbq) {
452 		/* Save the next iocbq for now */
453 		next = (IOCBQ *)iocbq->next;
454 
455 		/* Unlink this iocbq */
456 		iocbq->next = NULL;
457 
458 		/* Get the pkt */
459 		sbp = (emlxs_buf_t *)iocbq->sbp;
460 
461 		if (sbp) {
462 			/* Warning: Some FCT sbp's don't have */
463 			/* fc_packet objects */
464 			pkt = PRIV2PKT(sbp);
465 
466 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
467 			    "TXQ abort: sbp=%p iotag=%x tmo=%d", sbp,
468 			    sbp->iotag, (pkt) ? pkt->pkt_timeout : 0);
469 
470 			if (hba->state >= FC_LINK_UP) {
471 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
472 				    IOERR_ABORT_TIMEOUT, 1);
473 			} else {
474 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
475 				    IOERR_LINK_DOWN, 1);
476 			}
477 
478 		}
479 
480 		iocbq = next;
481 
482 	}	/* end of while */
483 
484 
485 
486 	/* Now check the chip */
487 	bzero((void *)&abort, sizeof (Q));
488 
489 	/* Check the rings */
490 	rc = 0;
491 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
492 		rp = &hba->ring[ringno];
493 
494 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
495 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
496 			sbp = rp->fc_table[iotag];
497 			if (sbp && (sbp != STALE_PACKET) &&
498 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
499 			    (hba->timer_tics >= sbp->ticks)) {
500 				rc = emlxs_pkt_chip_timeout(sbp->iocbq.port,
501 				    sbp, &abort, flag);
502 
503 				if (rc) {
504 					break;
505 				}
506 			}
507 		}
508 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
509 
510 		if (rc) {
511 			break;
512 		}
513 	}
514 
515 	/* Now put the iocb's on the tx queue */
516 	iocbq = (IOCBQ *)abort.q_first;
517 	while (iocbq) {
518 		/* Save the next iocbq for now */
519 		next = (IOCBQ *)iocbq->next;
520 
521 		/* Unlink this iocbq */
522 		iocbq->next = NULL;
523 
524 		/* Send this iocbq */
525 		emlxs_tx_put(iocbq, 1);
526 
527 		iocbq = next;
528 	}
529 
530 	if (rc == 1) {
531 		/* Spawn a thread to reset the link */
532 		emlxs_thread_spawn(hba, emlxs_reset_link_thread, NULL, NULL);
533 	} else if (rc == 2) {
534 		/* Spawn a thread to reset the adapter */
535 		emlxs_thread_spawn(hba, emlxs_restart_thread, NULL, NULL);
536 	}
537 
538 	return (rc);
539 
540 }  /* emlxs_timer_check_pkts() */
541 
542 
543 
544 static void
545 emlxs_timer_check_rings(emlxs_hba_t *hba, uint8_t *flag)
546 {
547 	emlxs_port_t *port = &PPORT;
548 	emlxs_config_t *cfg = &CFG;
549 	int32_t ringno;
550 	RING *rp;
551 	uint32_t logit = 0;
552 
553 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
554 		return;
555 	}
556 
557 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
558 		rp = &hba->ring[ringno];
559 
560 		/* Check for ring timeout now */
561 		mutex_enter(&EMLXS_RINGTX_LOCK);
562 		if (rp->timeout && (hba->timer_tics >= rp->timeout)) {
563 			/* Check if there is still work to do on the ring and */
564 			/* the link is still up */
565 			if (rp->nodeq.q_first) {
566 				flag[ringno] = 1;
567 				rp->timeout = hba->timer_tics + 10;
568 
569 				if (hba->state >= FC_LINK_UP) {
570 					logit = 1;
571 				}
572 			} else {
573 				rp->timeout = 0;
574 			}
575 		}
576 		mutex_exit(&EMLXS_RINGTX_LOCK);
577 
578 		if (logit) {
579 			EMLXS_MSGF(EMLXS_CONTEXT,
580 			    &emlxs_ring_watchdog_msg,
581 			    "%s host=%d port=%d cnt=%d,%d",
582 			    emlxs_ring_xlate(ringno),
583 			    rp->fc_cmdidx, rp->fc_port_cmdidx,
584 			    hba->ring_tx_count[ringno],
585 			    hba->io_count[ringno]);
586 		}
587 
588 		/*
589 		 * If ring flag is set, request iocb servicing here to send any
590 		 * iocb's that may still be queued
591 		 */
592 		if (flag[ringno]) {
593 			emlxs_sli_issue_iocb_cmd(hba, rp, 0);
594 		}
595 	}
596 
597 	return;
598 
599 }  /* emlxs_timer_check_rings() */
600 
601 
602 static void
603 emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag)
604 {
605 	emlxs_hba_t *hba = HBA;
606 	uint32_t found;
607 	uint32_t i;
608 	NODELIST *nlp;
609 	int32_t ringno;
610 
611 	for (;;) {
612 		/* Check node gate flag for expiration */
613 		found = 0;
614 
615 		/*
616 		 * We need to lock, scan, and unlock because we can't hold the
617 		 * lock while we call node_open
618 		 */
619 		rw_enter(&port->node_rwlock, RW_READER);
620 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
621 			nlp = port->node_table[i];
622 			while (nlp != NULL) {
623 				for (ringno = 0; ringno < hba->ring_count;
624 				    ringno++) {
625 					/* Check if the node timer is active */
626 					/* and if timer has expired */
627 					if (nlp->nlp_tics[ringno] &&
628 					    (hba->timer_tics >=
629 					    nlp->nlp_tics[ringno])) {
630 						/* If so, set the flag and */
631 						/* break out */
632 						found = 1;
633 						flag[ringno] = 1;
634 						break;
635 					}
636 				}
637 
638 				if (found) {
639 					break;
640 				}
641 
642 				nlp = nlp->nlp_list_next;
643 			}
644 
645 			if (found) {
646 				break;
647 			}
648 
649 		}
650 		rw_exit(&port->node_rwlock);
651 
652 		if (!found) {
653 			break;
654 		}
655 
656 		emlxs_node_timeout(port, nlp, ringno);
657 	}
658 
659 }  /* emlxs_timer_check_nodes() */
660 
661 
662 #ifdef DFC_SUPPORT
663 static void
664 emlxs_timer_check_loopback(emlxs_hba_t *hba)
665 {
666 	emlxs_port_t *port = &PPORT;
667 	emlxs_config_t *cfg = &CFG;
668 	int32_t reset = 0;
669 
670 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
671 		return;
672 	}
673 
674 	/* Check the loopback timer for expiration */
675 	mutex_enter(&EMLXS_PORT_LOCK);
676 
677 	if (!hba->loopback_tics || (hba->timer_tics < hba->loopback_tics)) {
678 		mutex_exit(&EMLXS_PORT_LOCK);
679 		return;
680 	}
681 
682 	hba->loopback_tics = 0;
683 
684 	if (hba->flag & FC_LOOPBACK_MODE) {
685 		reset = 1;
686 	}
687 
688 	mutex_exit(&EMLXS_PORT_LOCK);
689 
690 	if (reset) {
691 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_dfc_debug_msg,
692 		    "LOOPBACK_MODE: Expired. Resetting...");
693 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
694 	}
695 
696 	return;
697 
698 }  /* emlxs_timer_check_loopback() */
699 #endif /* DFC_SUPPORT  */
700 
701 
702 static void
703 emlxs_timer_check_linkup(emlxs_hba_t *hba)
704 {
705 	emlxs_port_t *port = &PPORT;
706 	uint32_t linkup;
707 
708 	/* Check the linkup timer for expiration */
709 	mutex_enter(&EMLXS_PORT_LOCK);
710 	linkup = 0;
711 	if (hba->linkup_timer && (hba->timer_tics >= hba->linkup_timer)) {
712 		hba->linkup_timer = 0;
713 
714 		/* Make sure link is still ready */
715 		if (hba->state >= FC_LINK_UP) {
716 			linkup = 1;
717 		}
718 	}
719 	mutex_exit(&EMLXS_PORT_LOCK);
720 
721 	/* Make the linkup callback */
722 	if (linkup) {
723 		emlxs_port_online(port);
724 	}
725 
726 	return;
727 
728 }  /* emlxs_timer_check_linkup() */
729 
730 
731 static void
732 emlxs_timer_check_heartbeat(emlxs_hba_t *hba)
733 {
734 	emlxs_port_t *port = &PPORT;
735 	MAILBOX *mb;
736 	emlxs_config_t *cfg = &CFG;
737 
738 	if (!cfg[CFG_HEARTBEAT_ENABLE].current) {
739 		return;
740 	}
741 
742 	if (hba->timer_tics < hba->heartbeat_timer) {
743 		return;
744 	}
745 
746 	hba->heartbeat_timer = hba->timer_tics + 5;
747 
748 	/* Return if adapter interrupts have occurred */
749 	if (hba->heartbeat_flag) {
750 		hba->heartbeat_flag = 0;
751 		return;
752 	}
753 	/* No adapter interrupts have occured for 5 seconds now */
754 
755 	/* Return if mailbox is busy */
756 	/* This means the mailbox timer routine is watching for problems */
757 	if (hba->mbox_timer) {
758 		return;
759 	}
760 
761 	/* Return if heartbeat is still outstanding */
762 	if (hba->heartbeat_active) {
763 		return;
764 	}
765 
766 	if ((mb = (MAILBOX *)emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == 0) {
767 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
768 		    "Unable to allocate heartbeat mailbox.");
769 		return;
770 	}
771 
772 	emlxs_mb_heartbeat(hba, mb);
773 	hba->heartbeat_active = 1;
774 
775 	if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_NOWAIT, 0) != MBX_BUSY) {
776 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
777 	}
778 
779 	return;
780 
781 }  /* emlxs_timer_check_heartbeat() */
782 
783 
784 static void
785 emlxs_timer_check_discovery(emlxs_port_t *port)
786 {
787 	emlxs_hba_t *hba = HBA;
788 	emlxs_config_t *cfg = &CFG;
789 	int32_t send_clear_la;
790 	uint32_t found;
791 	uint32_t i;
792 	NODELIST *nlp;
793 	MAILBOXQ *mbox;
794 
795 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
796 		return;
797 	}
798 
799 	/* Check the discovery timer for expiration */
800 	send_clear_la = 0;
801 	mutex_enter(&EMLXS_PORT_LOCK);
802 	while (hba->discovery_timer &&
803 	    (hba->timer_tics >= hba->discovery_timer) &&
804 	    (hba->state == FC_LINK_UP)) {
805 		send_clear_la = 1;
806 
807 		/* Perform a flush on fcp2 nodes that are still closed */
808 		found = 0;
809 		rw_enter(&port->node_rwlock, RW_READER);
810 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
811 			nlp = port->node_table[i];
812 			while (nlp != NULL) {
813 				if ((nlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
814 				    (nlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED)) {
815 					found = 1;
816 					break;
817 
818 				}
819 				nlp = nlp->nlp_list_next;
820 			}
821 
822 			if (found) {
823 				break;
824 			}
825 		}
826 		rw_exit(&port->node_rwlock);
827 
828 		if (!found) {
829 			break;
830 		}
831 
832 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_missing_msg,
833 		    "FCP2 device (did=%06x) missing. Flushing...",
834 		    nlp->nlp_DID);
835 
836 		mutex_exit(&EMLXS_PORT_LOCK);
837 
838 		(void) emlxs_mb_unreg_did(port, nlp->nlp_DID, NULL, NULL, NULL);
839 
840 		mutex_enter(&EMLXS_PORT_LOCK);
841 
842 	}
843 	mutex_exit(&EMLXS_PORT_LOCK);
844 
845 	/* Try to send clear link attention, if needed */
846 	if ((send_clear_la == 1) &&
847 	    (mbox = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) {
848 		mutex_enter(&EMLXS_PORT_LOCK);
849 
850 		/*
851 		 * If state is not FC_LINK_UP, then either the link has gone
852 		 * down or a FC_CLEAR_LA has already been issued
853 		 */
854 		if (hba->state != FC_LINK_UP) {
855 			mutex_exit(&EMLXS_PORT_LOCK);
856 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbox);
857 		} else {
858 			/* Change state and clear discovery timer */
859 			emlxs_ffstate_change_locked(hba, FC_CLEAR_LA);
860 
861 			hba->discovery_timer = 0;
862 
863 			mutex_exit(&EMLXS_PORT_LOCK);
864 
865 			/* Prepare and send the CLEAR_LA command */
866 			emlxs_mb_clear_la(hba, (MAILBOX *)mbox);
867 
868 			if (emlxs_sli_issue_mbox_cmd(hba, (MAILBOX *)mbox,
869 			    MBX_NOWAIT, 0) != MBX_BUSY) {
870 				(void) emlxs_mem_put(hba, MEM_MBOX,
871 				    (uint8_t *)mbox);
872 			}
873 		}
874 	}
875 
876 	return;
877 
878 }  /* emlxs_timer_check_discovery()  */
879 
880 
881 static void
882 emlxs_timer_check_ub(emlxs_port_t *port)
883 {
884 	emlxs_hba_t *hba = HBA;
885 	emlxs_unsol_buf_t *ulistp;
886 	fc_unsol_buf_t *ubp;
887 	emlxs_ub_priv_t *ub_priv;
888 	uint32_t i;
889 
890 	if (port->ub_timer > hba->timer_tics) {
891 		return;
892 	}
893 
894 	port->ub_timer = hba->timer_tics + EMLXS_UB_PERIOD;
895 
896 	/* Check the unsolicited buffers */
897 	mutex_enter(&EMLXS_UB_LOCK);
898 
899 	ulistp = port->ub_pool;
900 	while (ulistp) {
901 		/* Check buffers in this pool */
902 		for (i = 0; i < ulistp->pool_nentries; i++) {
903 			ubp = (fc_unsol_buf_t *)&ulistp->fc_ubufs[i];
904 			ub_priv = ubp->ub_fca_private;
905 
906 			if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
907 				continue;
908 			}
909 
910 			/* If buffer has timed out, print message and */
911 			/* increase timeout */
912 			if ((ub_priv->time + ub_priv->timeout) <=
913 			    hba->timer_tics) {
914 				ub_priv->flags |= EMLXS_UB_TIMEOUT;
915 
916 				EMLXS_MSGF(EMLXS_CONTEXT,
917 				    &emlxs_sfs_debug_msg,
918 				    "Stale UB buffer detected (%d mins): "
919 				    "buffer=%p (%x,%x,%x,%x)",
920 				    (ub_priv->timeout / 60), ubp,
921 				    ubp->ub_frame.type, ubp->ub_frame.s_id,
922 				    ubp->ub_frame.ox_id, ubp->ub_frame.rx_id);
923 
924 				/* Increase timeout period */
925 
926 				/* If timeout was 5 mins or less, */
927 				/* increase it to 10 mins */
928 				if (ub_priv->timeout <= (5 * 60)) {
929 					ub_priv->timeout = (10 * 60);
930 				}
931 				/* If timeout was 10 mins or less, */
932 				/* increase it to 30 mins */
933 				else if (ub_priv->timeout <= (10 * 60)) {
934 					ub_priv->timeout = (30 * 60);
935 				}
936 				/* Otherwise double it. */
937 				else {
938 					ub_priv->timeout *= 2;
939 				}
940 			}
941 		}
942 
943 		ulistp = ulistp->pool_next;
944 	}
945 
946 	mutex_exit(&EMLXS_UB_LOCK);
947 
948 	return;
949 
950 }  /* emlxs_timer_check_ub()  */
951 
952 
953 /* EMLXS_FCTAB_LOCK must be held to call this */
954 static uint32_t
955 emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abortq,
956     uint8_t *flag)
957 {
958 	emlxs_hba_t *hba = HBA;
959 	RING *rp = (RING *)sbp->ring;
960 	IOCBQ *iocbq = NULL;
961 	fc_packet_t *pkt;
962 	uint32_t rc = 0;
963 
964 	mutex_enter(&sbp->mtx);
965 
966 	/* Warning: Some FCT sbp's don't have fc_packet objects */
967 	pkt = PRIV2PKT(sbp);
968 
969 	switch (sbp->abort_attempts) {
970 	case 0:
971 
972 		/* Create the abort IOCB */
973 		if (hba->state >= FC_LINK_UP) {
974 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
975 			    "chipQ:1:Aborting. sbp=%p iotag=%x tmo=%d flags=%x",
976 			    sbp, sbp->iotag,
977 			    (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
978 
979 			iocbq =
980 			    emlxs_create_abort_xri_cn(port, sbp->node,
981 			    sbp->iotag, rp, sbp->class, ABORT_TYPE_ABTS);
982 
983 			/* The adapter will make 2 attempts to send ABTS */
984 			/* with 2*ratov timeout each time */
985 			sbp->ticks =
986 			    hba->timer_tics + (4 * hba->fc_ratov) + 10;
987 		} else {
988 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
989 			    "chipQ:1:Closing. sbp=%p iotag=%x tmo=%d flags=%x",
990 			    sbp, sbp->iotag,
991 			    (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
992 
993 			iocbq =
994 			    emlxs_create_close_xri_cn(port, sbp->node,
995 			    sbp->iotag, rp);
996 
997 			sbp->ticks = hba->timer_tics + 30;
998 		}
999 
1000 		/* set the flags */
1001 		sbp->pkt_flags |= (PACKET_IN_TIMEOUT | PACKET_XRI_CLOSED);
1002 
1003 		flag[rp->ringno] = 1;
1004 		rc = 0;
1005 
1006 		break;
1007 
1008 	case 1:
1009 
1010 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1011 		    "chipQ: 2:Closing. sbp=%p iotag=%x", sbp, sbp->iotag);
1012 
1013 		iocbq =
1014 		    emlxs_create_close_xri_cn(port, sbp->node, sbp->iotag,
1015 		    rp);
1016 
1017 		sbp->ticks = hba->timer_tics + 30;
1018 
1019 		flag[rp->ringno] = 1;
1020 		rc = 0;
1021 
1022 		break;
1023 
1024 	case 2:
1025 
1026 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1027 		    "chipQ: 3:Resetting link. sbp=%p iotag=%x", sbp,
1028 		    sbp->iotag);
1029 
1030 		sbp->ticks = hba->timer_tics + 60;
1031 		rc = 1;
1032 
1033 		break;
1034 
1035 	default:
1036 
1037 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1038 		    "chipQ: %d:Resetting adapter. sbp=%p iotag=%x",
1039 		    sbp->abort_attempts, sbp, sbp->iotag);
1040 
1041 		sbp->ticks = hba->timer_tics + 60;
1042 		rc = 2;
1043 
1044 		break;
1045 	}
1046 
1047 	sbp->abort_attempts++;
1048 	mutex_exit(&sbp->mtx);
1049 
1050 	if (iocbq) {
1051 		if (abortq->q_first) {
1052 			((IOCBQ *)abortq->q_last)->next = iocbq;
1053 			abortq->q_last = (uint8_t *)iocbq;
1054 			abortq->q_cnt++;
1055 		} else {
1056 			abortq->q_first = (uint8_t *)iocbq;
1057 			abortq->q_last = (uint8_t *)iocbq;
1058 			abortq->q_cnt = 1;
1059 		}
1060 		iocbq->next = NULL;
1061 	}
1062 
1063 	return (rc);
1064 
1065 }  /* emlxs_pkt_chip_timeout() */
1066 
1067 
1068 #ifdef TX_WATCHDOG
1069 
1070 static void
1071 emlxs_tx_watchdog(emlxs_hba_t *hba)
1072 {
1073 	emlxs_port_t *port = &PPORT;
1074 	NODELIST *nlp;
1075 	uint32_t ringno;
1076 	RING *rp;
1077 	IOCBQ *next;
1078 	IOCBQ *iocbq;
1079 	IOCB *iocb;
1080 	uint32_t found;
1081 	MATCHMAP *bmp;
1082 	Q abort;
1083 	uint32_t iotag;
1084 	emlxs_buf_t *sbp;
1085 	fc_packet_t *pkt = NULL;
1086 	uint32_t cmd;
1087 	uint32_t did;
1088 
1089 	bzero((void *)&abort, sizeof (Q));
1090 
1091 	mutex_enter(&EMLXS_RINGTX_LOCK);
1092 
1093 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
1094 		rp = &hba->ring[ringno];
1095 
1096 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
1097 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
1098 			sbp = rp->fc_table[iotag];
1099 			if (sbp && (sbp != STALE_PACKET) &&
1100 			    (sbp->pkt_flags & PACKET_IN_TXQ)) {
1101 				nlp = sbp->node;
1102 				iocbq = &sbp->iocbq;
1103 
1104 				if (iocbq->flag & IOCB_PRIORITY) {
1105 					iocbq =
1106 					    (IOCBQ *)nlp->nlp_ptx[ringno].
1107 					    q_first;
1108 				} else {
1109 					iocbq =
1110 					    (IOCBQ *)nlp->nlp_tx[ringno].
1111 					    q_first;
1112 				}
1113 
1114 				/* Find a matching entry */
1115 				found = 0;
1116 				while (iocbq) {
1117 					if (iocbq == &sbp->iocbq) {
1118 						found = 1;
1119 						break;
1120 					}
1121 
1122 					iocbq = (IOCBQ *)iocbq->next;
1123 				}
1124 
1125 				if (!found) {
1126 					if (!(sbp->pkt_flags & PACKET_STALE)) {
1127 						mutex_enter(&sbp->mtx);
1128 						sbp->pkt_flags |=
1129 						    PACKET_STALE;
1130 						mutex_exit(&sbp->mtx);
1131 					} else {
1132 						if (abort.q_first == 0) {
1133 							abort.q_first =
1134 							    &sbp->iocbq;
1135 						} else {
1136 							((IOCBQ *)abort.
1137 							    q_last)->next =
1138 							    &sbp->iocbq;
1139 						}
1140 
1141 						abort.q_last = &sbp->iocbq;
1142 						abort.q_cnt++;
1143 					}
1144 
1145 				} else {
1146 					if ((sbp->pkt_flags & PACKET_STALE)) {
1147 						mutex_enter(&sbp->mtx);
1148 						sbp->pkt_flags &=
1149 						    ~PACKET_STALE;
1150 						mutex_exit(&sbp->mtx);
1151 					}
1152 				}
1153 			}
1154 		}
1155 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
1156 	}
1157 
1158 	iocbq = (IOCBQ *)abort.q_first;
1159 	while (iocbq) {
1160 		next = (IOCBQ *)iocbq->next;
1161 		iocbq->next = NULL;
1162 		sbp = (emlxs_buf_t *)iocbq->sbp;
1163 
1164 		pkt = PRIV2PKT(sbp);
1165 		if (pkt) {
1166 			did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
1167 			cmd = *((uint32_t *)pkt->pkt_cmd);
1168 			cmd = SWAP_DATA32(cmd);
1169 		}
1170 
1171 
1172 		emlxs_tx_put(iocbq, 0);
1173 
1174 		iocbq = next;
1175 
1176 	}	/* end of while */
1177 
1178 	mutex_exit(&EMLXS_RINGTX_LOCK);
1179 
1180 	return;
1181 
1182 }  /* emlxs_tx_watchdog() */
1183 
1184 #endif /* TX_WATCHDOG */
1185 
1186 
1187 #ifdef DHCHAP_SUPPORT
1188 
1189 static void
1190 emlxs_timer_check_dhchap(emlxs_port_t *port)
1191 {
1192 	emlxs_hba_t *hba = HBA;
1193 	uint32_t i;
1194 	NODELIST *ndlp = NULL;
1195 
1196 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1197 		ndlp = port->node_table[i];
1198 
1199 		if (!ndlp) {
1200 			continue;
1201 		}
1202 
1203 		/* Check authentication response timeout */
1204 		if (ndlp->node_dhc.nlp_authrsp_tmo &&
1205 		    (hba->timer_tics >= ndlp->node_dhc.nlp_authrsp_tmo)) {
1206 			/* Trigger authresp timeout handler */
1207 			(void) emlxs_dhc_authrsp_timeout(port, ndlp, NULL);
1208 		}
1209 
1210 		/* Check reauthentication timeout */
1211 		if (ndlp->node_dhc.nlp_reauth_tmo &&
1212 		    (hba->timer_tics >= ndlp->node_dhc.nlp_reauth_tmo)) {
1213 			/* Trigger reauth timeout handler */
1214 			emlxs_dhc_reauth_timeout(port, NULL, ndlp);
1215 		}
1216 	}
1217 	return;
1218 
1219 }  /* emlxs_timer_check_dhchap */
1220 
1221 #endif /* DHCHAP_SUPPORT */
1222