1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 #include <emlxs.h>
28 
29 /* Timer period in seconds */
30 #define	EMLXS_TIMER_PERIOD		1	/* secs */
31 #define	EMLXS_PKT_PERIOD		5	/* secs */
32 #define	EMLXS_UB_PERIOD			60	/* secs */
33 
34 EMLXS_MSG_DEF(EMLXS_CLOCK_C);
35 
36 
37 #ifdef DFC_SUPPORT
38 static void emlxs_timer_check_loopback(emlxs_hba_t *hba);
39 #endif /* DFC_SUPPORT */
40 
41 #ifdef DHCHAP_SUPPORT
42 static void emlxs_timer_check_dhchap(emlxs_port_t *port);
43 #endif /* DHCHAP_SUPPORT */
44 
45 static void	emlxs_timer(void *arg);
46 static void	emlxs_timer_check_heartbeat(emlxs_hba_t *hba);
47 static uint32_t	emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag);
48 static void	emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag);
49 static void	emlxs_timer_check_linkup(emlxs_hba_t *hba);
50 static void	emlxs_timer_check_discovery(emlxs_port_t *port);
51 static void	emlxs_timer_check_ub(emlxs_port_t *port);
52 static void	emlxs_timer_check_rings(emlxs_hba_t *hba, uint8_t *flag);
53 static uint32_t	emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp,
54 			Q *abortq, uint8_t *flag);
55 
56 #ifdef TX_WATCHDOG
57 static void	emlxs_tx_watchdog(emlxs_hba_t *hba);
58 #endif /* TX_WATCHDOG */
59 
60 extern clock_t
61 emlxs_timeout(emlxs_hba_t *hba, uint32_t timeout)
62 {
63 	emlxs_config_t *cfg = &CFG;
64 	clock_t time;
65 
66 	/* Set thread timeout */
67 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
68 		(void) drv_getparm(LBOLT, &time);
69 		time += (timeout * drv_usectohz(1000000));
70 	} else {
71 		time = -1;
72 	}
73 
74 	return (time);
75 
76 }  /* emlxs_timeout() */
77 
78 
79 static void
80 emlxs_timer(void *arg)
81 {
82 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
83 
84 	if (!hba->timer_id) {
85 		return;
86 	}
87 
88 	mutex_enter(&EMLXS_TIMER_LOCK);
89 
90 	/* Only one timer thread is allowed */
91 	if (hba->timer_flags & EMLXS_TIMER_BUSY) {
92 		mutex_exit(&EMLXS_TIMER_LOCK);
93 		return;
94 	}
95 
96 	/* Check if a kill request has been made */
97 	if (hba->timer_flags & EMLXS_TIMER_KILL) {
98 		hba->timer_id = 0;
99 		hba->timer_flags |= EMLXS_TIMER_ENDED;
100 
101 		mutex_exit(&EMLXS_TIMER_LOCK);
102 		return;
103 	}
104 
105 	hba->timer_flags |= (EMLXS_TIMER_BUSY | EMLXS_TIMER_STARTED);
106 	hba->timer_tics = DRV_TIME;
107 
108 	mutex_exit(&EMLXS_TIMER_LOCK);
109 
110 	/* Perform standard checks */
111 	emlxs_timer_checks(hba);
112 
113 	/* Restart the timer */
114 	mutex_enter(&EMLXS_TIMER_LOCK);
115 
116 	hba->timer_flags &= ~EMLXS_TIMER_BUSY;
117 
118 	/* If timer is still enabled, restart it */
119 	if (!(hba->timer_flags & EMLXS_TIMER_KILL)) {
120 		hba->timer_id =
121 		    timeout(emlxs_timer, (void *)hba,
122 		    (EMLXS_TIMER_PERIOD * drv_usectohz(1000000)));
123 	} else {
124 		hba->timer_id = 0;
125 		hba->timer_flags |= EMLXS_TIMER_ENDED;
126 	}
127 
128 	mutex_exit(&EMLXS_TIMER_LOCK);
129 
130 	return;
131 
132 }  /* emlxs_timer() */
133 
134 
135 extern void
136 emlxs_timer_checks(emlxs_hba_t *hba)
137 {
138 	emlxs_port_t *port = &PPORT;
139 	uint8_t flag[MAX_RINGS];
140 	uint32_t i;
141 	uint32_t rc;
142 
143 	/* Exit if we are still initializing */
144 	if (hba->state < FC_LINK_DOWN) {
145 		return;
146 	}
147 
148 	bzero((void *)flag, sizeof (flag));
149 
150 	/* Check for mailbox timeout */
151 	emlxs_timer_check_mbox(hba);
152 
153 	/* Check heartbeat timer */
154 	emlxs_timer_check_heartbeat(hba);
155 
156 #ifdef IDLE_TIMER
157 	emlxs_pm_idle_timer(hba);
158 #endif /* IDLE_TIMER */
159 
160 #ifdef DFC_SUPPORT
161 	/* Check for loopback timeouts */
162 	emlxs_timer_check_loopback(hba);
163 #endif /* DFC_SUPPORT */
164 
165 	/* Check for packet timeouts */
166 	rc = emlxs_timer_check_pkts(hba, flag);
167 
168 	if (rc) {
169 		/* Link or adapter is being reset */
170 		return;
171 	}
172 
173 	/* Check for linkup timeout */
174 	emlxs_timer_check_linkup(hba);
175 
176 	/* Check the ports */
177 	for (i = 0; i < MAX_VPORTS; i++) {
178 		port = &VPORT(i);
179 
180 		if (!(port->flag & EMLXS_PORT_BOUND)) {
181 			continue;
182 		}
183 
184 		/* Check for node gate timeouts */
185 		emlxs_timer_check_nodes(port, flag);
186 
187 		/* Check for tape discovery timeout */
188 		emlxs_timer_check_discovery(port);
189 
190 		/* Check for UB timeouts */
191 		emlxs_timer_check_ub(port);
192 
193 #ifdef DHCHAP_SUPPORT
194 		/* Check for DHCHAP authentication timeouts */
195 		emlxs_timer_check_dhchap(port);
196 #endif /* DHCHAP_SUPPORT */
197 
198 	}
199 
200 	/* Check for ring service timeouts */
201 	/* Always do this last */
202 	emlxs_timer_check_rings(hba, flag);
203 
204 	return;
205 
206 }  /* emlxs_timer_checks() */
207 
208 
209 extern void
210 emlxs_timer_start(emlxs_hba_t *hba)
211 {
212 	if (hba->timer_id) {
213 		return;
214 	}
215 
216 	/* Restart the timer */
217 	mutex_enter(&EMLXS_TIMER_LOCK);
218 	if (!hba->timer_id) {
219 		hba->timer_flags = 0;
220 		hba->timer_id =
221 		    timeout(emlxs_timer, (void *)hba, drv_usectohz(1000000));
222 	}
223 	mutex_exit(&EMLXS_TIMER_LOCK);
224 
225 }  /* emlxs_timer_start() */
226 
227 
228 extern void
229 emlxs_timer_stop(emlxs_hba_t *hba)
230 {
231 	if (!hba->timer_id) {
232 		return;
233 	}
234 
235 	mutex_enter(&EMLXS_TIMER_LOCK);
236 	hba->timer_flags |= EMLXS_TIMER_KILL;
237 
238 	while (hba->timer_id) {
239 		mutex_exit(&EMLXS_TIMER_LOCK);
240 		delay(drv_usectohz(500000));
241 		mutex_enter(&EMLXS_TIMER_LOCK);
242 	}
243 	mutex_exit(&EMLXS_TIMER_LOCK);
244 
245 	return;
246 
247 }  /* emlxs_timer_stop() */
248 
249 
250 static uint32_t
251 emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag)
252 {
253 	emlxs_port_t *port = &PPORT;
254 	emlxs_config_t *cfg = &CFG;
255 	Q tmo;
256 	int32_t ringno;
257 	RING *rp;
258 	NODELIST *nlp;
259 	IOCBQ *prev;
260 	IOCBQ *next;
261 	IOCB *iocb;
262 	IOCBQ *iocbq;
263 	emlxs_buf_t *sbp;
264 	fc_packet_t *pkt;
265 	Q abort;
266 	uint32_t iotag;
267 	uint32_t rc;
268 
269 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
270 		return (0);
271 	}
272 
273 	if (hba->pkt_timer > hba->timer_tics) {
274 		return (0);
275 	}
276 
277 	hba->pkt_timer = hba->timer_tics + EMLXS_PKT_PERIOD;
278 
279 
280 	bzero((void *)&tmo, sizeof (Q));
281 
282 	/*
283 	 * We must hold the locks here because we never know when an iocb
284 	 * will be removed out from under us
285 	 */
286 
287 	mutex_enter(&EMLXS_RINGTX_LOCK);
288 
289 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
290 		rp = &hba->ring[ringno];
291 
292 		/* Scan the tx queues for each active node on the ring */
293 
294 		/* Get the first node */
295 		nlp = (NODELIST *)rp->nodeq.q_first;
296 
297 		while (nlp) {
298 			/* Scan the node's priority tx queue */
299 			prev = NULL;
300 			iocbq = (IOCBQ *)nlp->nlp_ptx[ringno].q_first;
301 
302 			while (iocbq) {
303 				next = (IOCBQ *)iocbq->next;
304 				iocb = &iocbq->iocb;
305 				sbp = (emlxs_buf_t *)iocbq->sbp;
306 
307 				/* Check if iocb has timed out */
308 				if (sbp && hba->timer_tics >= sbp->ticks) {
309 					/* iocb timed out, now deque it */
310 					if (next == NULL) {
311 						nlp->nlp_ptx[ringno].q_last =
312 						    (uint8_t *)prev;
313 					}
314 
315 					if (prev == NULL) {
316 						nlp->nlp_ptx[ringno].q_first =
317 						    (uint8_t *)next;
318 					} else {
319 						prev->next = next;
320 					}
321 
322 					iocbq->next = NULL;
323 					nlp->nlp_ptx[ringno].q_cnt--;
324 
325 					/* Add this iocb to our local */
326 					/* timout queue */
327 
328 					/*
329 					 * This way we don't hold the RINGTX
330 					 * lock too long
331 					 */
332 
333 					if (tmo.q_first) {
334 						((IOCBQ *)tmo.q_last)->next =
335 						    iocbq;
336 						tmo.q_last =
337 						    (uint8_t *)iocbq;
338 						tmo.q_cnt++;
339 					} else {
340 						tmo.q_first =
341 						    (uint8_t *)iocbq;
342 						tmo.q_last =
343 						    (uint8_t *)iocbq;
344 						tmo.q_cnt = 1;
345 					}
346 					iocbq->next = NULL;
347 
348 				} else {
349 					prev = iocbq;
350 				}
351 
352 				iocbq = next;
353 
354 			}	/* while (iocbq) */
355 
356 
357 			/* Scan the node's tx queue */
358 			prev = NULL;
359 			iocbq = (IOCBQ *)nlp->nlp_tx[ringno].q_first;
360 
361 			while (iocbq) {
362 				next = (IOCBQ *)iocbq->next;
363 				iocb = &iocbq->iocb;
364 				sbp = (emlxs_buf_t *)iocbq->sbp;
365 
366 				/* Check if iocb has timed out */
367 				if (sbp && hba->timer_tics >= sbp->ticks) {
368 					/* iocb timed out, now deque it */
369 					if (next == NULL) {
370 						nlp->nlp_tx[ringno].q_last =
371 						    (uint8_t *)prev;
372 					}
373 
374 					if (prev == NULL) {
375 						nlp->nlp_tx[ringno].q_first =
376 						    (uint8_t *)next;
377 					} else {
378 						prev->next = next;
379 					}
380 
381 					iocbq->next = NULL;
382 					nlp->nlp_tx[ringno].q_cnt--;
383 
384 					/* Add this iocb to our local */
385 					/* timout queue */
386 
387 					/*
388 					 * This way we don't hold the RINGTX
389 					 * lock too long
390 					 */
391 
392 					if (tmo.q_first) {
393 						((IOCBQ *)tmo.q_last)->next =
394 						    iocbq;
395 						tmo.q_last =
396 						    (uint8_t *)iocbq;
397 						tmo.q_cnt++;
398 					} else {
399 						tmo.q_first =
400 						    (uint8_t *)iocbq;
401 						tmo.q_last =
402 						    (uint8_t *)iocbq;
403 						tmo.q_cnt = 1;
404 					}
405 					iocbq->next = NULL;
406 
407 				} else {
408 					prev = iocbq;
409 				}
410 
411 				iocbq = next;
412 
413 			}	/* while (iocbq) */
414 
415 			if (nlp == (NODELIST *)rp->nodeq.q_last) {
416 				nlp = NULL;
417 			} else {
418 				nlp = nlp->nlp_next[ringno];
419 			}
420 
421 		}	/* while(nlp) */
422 
423 	}	/* end of for */
424 
425 	/* Now cleanup the iocb's */
426 	iocbq = (IOCBQ *)tmo.q_first;
427 	while (iocbq) {
428 		/* Free the IoTag and the bmp */
429 		iocb = &iocbq->iocb;
430 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
431 		ringno = ((RING *)iocbq->ring)->ringno;
432 
433 		if (sbp && (sbp != STALE_PACKET)) {
434 			mutex_enter(&sbp->mtx);
435 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
436 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
437 				hba->ring_tx_count[ringno]--;
438 			}
439 			sbp->pkt_flags |= PACKET_IN_TIMEOUT;
440 			mutex_exit(&sbp->mtx);
441 		}
442 
443 		iocbq = (IOCBQ *)iocbq->next;
444 
445 	}	/* end of while */
446 
447 	mutex_exit(&EMLXS_RINGTX_LOCK);
448 
449 	/* Now complete the transmit timeouts outside the locks */
450 	iocbq = (IOCBQ *)tmo.q_first;
451 	while (iocbq) {
452 		/* Save the next iocbq for now */
453 		next = (IOCBQ *)iocbq->next;
454 
455 		/* Unlink this iocbq */
456 		iocbq->next = NULL;
457 
458 		/* Get the pkt */
459 		sbp = (emlxs_buf_t *)iocbq->sbp;
460 
461 		if (sbp) {
462 			/* Warning: Some FCT sbp's don't have */
463 			/* fc_packet objects */
464 			pkt = PRIV2PKT(sbp);
465 
466 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
467 			    "TXQ abort: sbp=%p iotag=%x tmo=%d", sbp,
468 			    sbp->iotag, (pkt) ? pkt->pkt_timeout : 0);
469 
470 			if (hba->state >= FC_LINK_UP) {
471 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
472 				    IOERR_ABORT_TIMEOUT, 1);
473 			} else {
474 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
475 				    IOERR_LINK_DOWN, 1);
476 			}
477 
478 		}
479 
480 		iocbq = next;
481 
482 	}	/* end of while */
483 
484 
485 
486 	/* Now check the chip */
487 	bzero((void *)&abort, sizeof (Q));
488 
489 	/* Check the rings */
490 	rc = 0;
491 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
492 		rp = &hba->ring[ringno];
493 
494 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
495 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
496 			sbp = rp->fc_table[iotag];
497 			if (sbp && (sbp != STALE_PACKET) &&
498 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
499 			    (hba->timer_tics >= sbp->ticks)) {
500 				rc = emlxs_pkt_chip_timeout(sbp->iocbq.port,
501 				    sbp, &abort, flag);
502 
503 				if (rc) {
504 					break;
505 				}
506 			}
507 		}
508 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
509 
510 		if (rc) {
511 			break;
512 		}
513 	}
514 
515 	/* Now put the iocb's on the tx queue */
516 	iocbq = (IOCBQ *)abort.q_first;
517 	while (iocbq) {
518 		/* Save the next iocbq for now */
519 		next = (IOCBQ *)iocbq->next;
520 
521 		/* Unlink this iocbq */
522 		iocbq->next = NULL;
523 
524 		/* Send this iocbq */
525 		emlxs_tx_put(iocbq, 1);
526 
527 		iocbq = next;
528 	}
529 
530 	if (rc == 1) {
531 		/* Spawn a thread to reset the link */
532 		thread_create(NULL, 0, emlxs_reset_link_thread, (char *)hba,
533 		    0, &p0, TS_RUN, v.v_maxsyspri - 2);
534 	} else if (rc == 2) {
535 		/* Spawn a thread to reset the adapter */
536 		thread_create(NULL, 0, emlxs_restart_thread, (char *)hba, 0,
537 		    &p0, TS_RUN, v.v_maxsyspri - 2);
538 	}
539 
540 	return (rc);
541 
542 }  /* emlxs_timer_check_pkts() */
543 
544 
545 
546 static void
547 emlxs_timer_check_rings(emlxs_hba_t *hba, uint8_t *flag)
548 {
549 	emlxs_port_t *port = &PPORT;
550 	emlxs_config_t *cfg = &CFG;
551 	int32_t ringno;
552 	RING *rp;
553 	uint32_t logit = 0;
554 
555 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
556 		return;
557 	}
558 
559 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
560 		rp = &hba->ring[ringno];
561 
562 		/* Check for ring timeout now */
563 		mutex_enter(&EMLXS_RINGTX_LOCK);
564 		if (rp->timeout && (hba->timer_tics >= rp->timeout)) {
565 			/* Check if there is still work to do on the ring and */
566 			/* the link is still up */
567 			if (rp->nodeq.q_first) {
568 				flag[ringno] = 1;
569 				rp->timeout = hba->timer_tics + 10;
570 
571 				if (hba->state >= FC_LINK_UP) {
572 					logit = 1;
573 				}
574 			} else {
575 				rp->timeout = 0;
576 			}
577 		}
578 		mutex_exit(&EMLXS_RINGTX_LOCK);
579 
580 		if (logit) {
581 			EMLXS_MSGF(EMLXS_CONTEXT,
582 			    &emlxs_ring_watchdog_msg,
583 			    "%s host=%d port=%d cnt=%d,%d",
584 			    emlxs_ring_xlate(ringno),
585 			    rp->fc_cmdidx, rp->fc_port_cmdidx,
586 			    hba->ring_tx_count[ringno],
587 			    hba->io_count[ringno]);
588 		}
589 
590 		/*
591 		 * If ring flag is set, request iocb servicing here to send any
592 		 * iocb's that may still be queued
593 		 */
594 		if (flag[ringno]) {
595 			emlxs_sli_issue_iocb_cmd(hba, rp, 0);
596 		}
597 	}
598 
599 	return;
600 
601 }  /* emlxs_timer_check_rings() */
602 
603 
604 static void
605 emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag)
606 {
607 	emlxs_hba_t *hba = HBA;
608 	uint32_t found;
609 	uint32_t i;
610 	NODELIST *nlp;
611 	int32_t ringno;
612 
613 	for (;;) {
614 		/* Check node gate flag for expiration */
615 		found = 0;
616 
617 		/*
618 		 * We need to lock, scan, and unlock because we can't hold the
619 		 * lock while we call node_open
620 		 */
621 		rw_enter(&port->node_rwlock, RW_READER);
622 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
623 			nlp = port->node_table[i];
624 			while (nlp != NULL) {
625 				for (ringno = 0; ringno < hba->ring_count;
626 				    ringno++) {
627 					/* Check if the node timer is active */
628 					/* and if timer has expired */
629 					if (nlp->nlp_tics[ringno] &&
630 					    (hba->timer_tics >=
631 					    nlp->nlp_tics[ringno])) {
632 						/* If so, set the flag and */
633 						/* break out */
634 						found = 1;
635 						flag[ringno] = 1;
636 						break;
637 					}
638 				}
639 
640 				if (found) {
641 					break;
642 				}
643 
644 				nlp = nlp->nlp_list_next;
645 			}
646 
647 			if (found) {
648 				break;
649 			}
650 
651 		}
652 		rw_exit(&port->node_rwlock);
653 
654 		if (!found) {
655 			break;
656 		}
657 
658 		emlxs_node_timeout(port, nlp, ringno);
659 	}
660 
661 }  /* emlxs_timer_check_nodes() */
662 
663 
664 #ifdef DFC_SUPPORT
665 static void
666 emlxs_timer_check_loopback(emlxs_hba_t *hba)
667 {
668 	emlxs_port_t *port = &PPORT;
669 	emlxs_config_t *cfg = &CFG;
670 	int32_t reset = 0;
671 
672 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
673 		return;
674 	}
675 
676 	/* Check the loopback timer for expiration */
677 	mutex_enter(&EMLXS_PORT_LOCK);
678 
679 	if (!hba->loopback_tics || (hba->timer_tics < hba->loopback_tics)) {
680 		mutex_exit(&EMLXS_PORT_LOCK);
681 		return;
682 	}
683 
684 	hba->loopback_tics = 0;
685 
686 	if (hba->flag & FC_LOOPBACK_MODE) {
687 		reset = 1;
688 	}
689 
690 	mutex_exit(&EMLXS_PORT_LOCK);
691 
692 	if (reset) {
693 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_dfc_debug_msg,
694 		    "LOOPBACK_MODE: Expired. Resetting...");
695 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
696 	}
697 
698 	return;
699 
700 }  /* emlxs_timer_check_loopback() */
701 #endif /* DFC_SUPPORT  */
702 
703 
704 static void
705 emlxs_timer_check_linkup(emlxs_hba_t *hba)
706 {
707 	emlxs_port_t *port = &PPORT;
708 	uint32_t linkup;
709 
710 	/* Check the linkup timer for expiration */
711 	mutex_enter(&EMLXS_PORT_LOCK);
712 	linkup = 0;
713 	if (hba->linkup_timer && (hba->timer_tics >= hba->linkup_timer)) {
714 		hba->linkup_timer = 0;
715 
716 		/* Make sure link is still ready */
717 		if (hba->state >= FC_LINK_UP) {
718 			linkup = 1;
719 		}
720 	}
721 	mutex_exit(&EMLXS_PORT_LOCK);
722 
723 	/* Make the linkup callback */
724 	if (linkup) {
725 		emlxs_port_online(port);
726 	}
727 
728 	return;
729 
730 }  /* emlxs_timer_check_linkup() */
731 
732 
733 static void
734 emlxs_timer_check_heartbeat(emlxs_hba_t *hba)
735 {
736 	emlxs_port_t *port = &PPORT;
737 	MAILBOX *mb;
738 	emlxs_config_t *cfg = &CFG;
739 
740 	if (!cfg[CFG_HEARTBEAT_ENABLE].current) {
741 		return;
742 	}
743 
744 	if (hba->timer_tics < hba->heartbeat_timer) {
745 		return;
746 	}
747 
748 	hba->heartbeat_timer = hba->timer_tics + 5;
749 
750 	/* Return if adapter interrupts have occurred */
751 	if (hba->heartbeat_flag) {
752 		hba->heartbeat_flag = 0;
753 		return;
754 	}
755 	/* No adapter interrupts have occured for 5 seconds now */
756 
757 	/* Return if mailbox is busy */
758 	/* This means the mailbox timer routine is watching for problems */
759 	if (hba->mbox_timer) {
760 		return;
761 	}
762 
763 	/* Return if heartbeat is still outstanding */
764 	if (hba->heartbeat_active) {
765 		return;
766 	}
767 
768 	if ((mb = (MAILBOX *)emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == 0) {
769 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
770 		    "Unable to allocate heartbeat mailbox.");
771 		return;
772 	}
773 
774 	emlxs_mb_heartbeat(hba, mb);
775 	hba->heartbeat_active = 1;
776 
777 	if (emlxs_sli_issue_mbox_cmd(hba, mb, MBX_NOWAIT, 0) != MBX_BUSY) {
778 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
779 	}
780 
781 	return;
782 
783 }  /* emlxs_timer_check_heartbeat() */
784 
785 
786 static void
787 emlxs_timer_check_discovery(emlxs_port_t *port)
788 {
789 	emlxs_hba_t *hba = HBA;
790 	emlxs_config_t *cfg = &CFG;
791 	int32_t send_clear_la;
792 	uint32_t found;
793 	uint32_t i;
794 	NODELIST *nlp;
795 	MAILBOXQ *mbox;
796 
797 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
798 		return;
799 	}
800 
801 	/* Check the discovery timer for expiration */
802 	send_clear_la = 0;
803 	mutex_enter(&EMLXS_PORT_LOCK);
804 	while (hba->discovery_timer &&
805 	    (hba->timer_tics >= hba->discovery_timer) &&
806 	    (hba->state == FC_LINK_UP)) {
807 		send_clear_la = 1;
808 
809 		/* Perform a flush on fcp2 nodes that are still closed */
810 		found = 0;
811 		rw_enter(&port->node_rwlock, RW_READER);
812 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
813 			nlp = port->node_table[i];
814 			while (nlp != NULL) {
815 				if ((nlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
816 				    (nlp->nlp_flag[FC_FCP_RING] & NLP_CLOSED)) {
817 					found = 1;
818 					break;
819 
820 				}
821 				nlp = nlp->nlp_list_next;
822 			}
823 
824 			if (found) {
825 				break;
826 			}
827 		}
828 		rw_exit(&port->node_rwlock);
829 
830 		if (!found) {
831 			break;
832 		}
833 
834 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_missing_msg,
835 		    "FCP2 device (did=%06x) missing. Flushing...",
836 		    nlp->nlp_DID);
837 
838 		mutex_exit(&EMLXS_PORT_LOCK);
839 
840 		(void) emlxs_mb_unreg_did(port, nlp->nlp_DID, NULL, NULL, NULL);
841 
842 		mutex_enter(&EMLXS_PORT_LOCK);
843 
844 	}
845 	mutex_exit(&EMLXS_PORT_LOCK);
846 
847 	/* Try to send clear link attention, if needed */
848 	if ((send_clear_la == 1) &&
849 	    (mbox = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) {
850 		mutex_enter(&EMLXS_PORT_LOCK);
851 
852 		/*
853 		 * If state is not FC_LINK_UP, then either the link has gone
854 		 * down or a FC_CLEAR_LA has already been issued
855 		 */
856 		if (hba->state != FC_LINK_UP) {
857 			mutex_exit(&EMLXS_PORT_LOCK);
858 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbox);
859 		} else {
860 			/* Change state and clear discovery timer */
861 			emlxs_ffstate_change_locked(hba, FC_CLEAR_LA);
862 
863 			hba->discovery_timer = 0;
864 
865 			mutex_exit(&EMLXS_PORT_LOCK);
866 
867 			/* Prepare and send the CLEAR_LA command */
868 			emlxs_mb_clear_la(hba, (MAILBOX *)mbox);
869 
870 			if (emlxs_sli_issue_mbox_cmd(hba, (MAILBOX *)mbox,
871 			    MBX_NOWAIT, 0) != MBX_BUSY) {
872 				(void) emlxs_mem_put(hba, MEM_MBOX,
873 				    (uint8_t *)mbox);
874 			}
875 		}
876 	}
877 
878 	return;
879 
880 }  /* emlxs_timer_check_discovery()  */
881 
882 
883 static void
884 emlxs_timer_check_ub(emlxs_port_t *port)
885 {
886 	emlxs_hba_t *hba = HBA;
887 	emlxs_unsol_buf_t *ulistp;
888 	fc_unsol_buf_t *ubp;
889 	emlxs_ub_priv_t *ub_priv;
890 	uint32_t i;
891 
892 	if (port->ub_timer > hba->timer_tics) {
893 		return;
894 	}
895 
896 	port->ub_timer = hba->timer_tics + EMLXS_UB_PERIOD;
897 
898 	/* Check the unsolicited buffers */
899 	mutex_enter(&EMLXS_UB_LOCK);
900 
901 	ulistp = port->ub_pool;
902 	while (ulistp) {
903 		/* Check buffers in this pool */
904 		for (i = 0; i < ulistp->pool_nentries; i++) {
905 			ubp = (fc_unsol_buf_t *)&ulistp->fc_ubufs[i];
906 			ub_priv = ubp->ub_fca_private;
907 
908 			if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
909 				continue;
910 			}
911 
912 			/* If buffer has timed out, print message and */
913 			/* increase timeout */
914 			if ((ub_priv->time + ub_priv->timeout) <=
915 			    hba->timer_tics) {
916 				ub_priv->flags |= EMLXS_UB_TIMEOUT;
917 
918 				EMLXS_MSGF(EMLXS_CONTEXT,
919 				    &emlxs_sfs_debug_msg,
920 				    "Stale UB buffer detected (%d mins): "
921 				    "buffer=%p (%x,%x,%x,%x)",
922 				    (ub_priv->timeout / 60), ubp,
923 				    ubp->ub_frame.type, ubp->ub_frame.s_id,
924 				    ubp->ub_frame.ox_id, ubp->ub_frame.rx_id);
925 
926 				/* Increase timeout period */
927 
928 				/* If timeout was 5 mins or less, */
929 				/* increase it to 10 mins */
930 				if (ub_priv->timeout <= (5 * 60)) {
931 					ub_priv->timeout = (10 * 60);
932 				}
933 				/* If timeout was 10 mins or less, */
934 				/* increase it to 30 mins */
935 				else if (ub_priv->timeout <= (10 * 60)) {
936 					ub_priv->timeout = (30 * 60);
937 				}
938 				/* Otherwise double it. */
939 				else {
940 					ub_priv->timeout *= 2;
941 				}
942 			}
943 		}
944 
945 		ulistp = ulistp->pool_next;
946 	}
947 
948 	mutex_exit(&EMLXS_UB_LOCK);
949 
950 	return;
951 
952 }  /* emlxs_timer_check_ub()  */
953 
954 
955 /* EMLXS_FCTAB_LOCK must be held to call this */
956 static uint32_t
957 emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abortq,
958     uint8_t *flag)
959 {
960 	emlxs_hba_t *hba = HBA;
961 	RING *rp = (RING *)sbp->ring;
962 	IOCBQ *iocbq = NULL;
963 	fc_packet_t *pkt;
964 	uint32_t rc = 0;
965 
966 	mutex_enter(&sbp->mtx);
967 
968 	/* Warning: Some FCT sbp's don't have fc_packet objects */
969 	pkt = PRIV2PKT(sbp);
970 
971 	switch (sbp->abort_attempts) {
972 	case 0:
973 
974 		/* Create the abort IOCB */
975 		if (hba->state >= FC_LINK_UP) {
976 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
977 			    "chipQ:1:Aborting. sbp=%p iotag=%x tmo=%d flags=%x",
978 			    sbp, sbp->iotag,
979 			    (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
980 
981 			iocbq =
982 			    emlxs_create_abort_xri_cn(port, sbp->node,
983 			    sbp->iotag, rp, sbp->class, ABORT_TYPE_ABTS);
984 
985 			/* The adapter will make 2 attempts to send ABTS */
986 			/* with 2*ratov timeout each time */
987 			sbp->ticks =
988 			    hba->timer_tics + (4 * hba->fc_ratov) + 10;
989 		} else {
990 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
991 			    "chipQ:1:Closing. sbp=%p iotag=%x tmo=%d flags=%x",
992 			    sbp, sbp->iotag,
993 			    (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
994 
995 			iocbq =
996 			    emlxs_create_close_xri_cn(port, sbp->node,
997 			    sbp->iotag, rp);
998 
999 			sbp->ticks = hba->timer_tics + 30;
1000 		}
1001 
1002 		/* set the flags */
1003 		sbp->pkt_flags |= (PACKET_IN_TIMEOUT | PACKET_XRI_CLOSED);
1004 
1005 		flag[rp->ringno] = 1;
1006 		rc = 0;
1007 
1008 		break;
1009 
1010 	case 1:
1011 
1012 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1013 		    "chipQ: 2:Closing. sbp=%p iotag=%x", sbp, sbp->iotag);
1014 
1015 		iocbq =
1016 		    emlxs_create_close_xri_cn(port, sbp->node, sbp->iotag,
1017 		    rp);
1018 
1019 		sbp->ticks = hba->timer_tics + 30;
1020 
1021 		flag[rp->ringno] = 1;
1022 		rc = 0;
1023 
1024 		break;
1025 
1026 	case 2:
1027 
1028 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1029 		    "chipQ: 3:Resetting link. sbp=%p iotag=%x", sbp,
1030 		    sbp->iotag);
1031 
1032 		sbp->ticks = hba->timer_tics + 60;
1033 		rc = 1;
1034 
1035 		break;
1036 
1037 	default:
1038 
1039 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1040 		    "chipQ: %d:Resetting adapter. sbp=%p iotag=%x",
1041 		    sbp->abort_attempts, sbp, sbp->iotag);
1042 
1043 		sbp->ticks = hba->timer_tics + 60;
1044 		rc = 2;
1045 
1046 		break;
1047 	}
1048 
1049 	sbp->abort_attempts++;
1050 	mutex_exit(&sbp->mtx);
1051 
1052 	if (iocbq) {
1053 		if (abortq->q_first) {
1054 			((IOCBQ *)abortq->q_last)->next = iocbq;
1055 			abortq->q_last = (uint8_t *)iocbq;
1056 			abortq->q_cnt++;
1057 		} else {
1058 			abortq->q_first = (uint8_t *)iocbq;
1059 			abortq->q_last = (uint8_t *)iocbq;
1060 			abortq->q_cnt = 1;
1061 		}
1062 		iocbq->next = NULL;
1063 	}
1064 
1065 	return (rc);
1066 
1067 }  /* emlxs_pkt_chip_timeout() */
1068 
1069 
1070 #ifdef TX_WATCHDOG
1071 
1072 static void
1073 emlxs_tx_watchdog(emlxs_hba_t *hba)
1074 {
1075 	emlxs_port_t *port = &PPORT;
1076 	NODELIST *nlp;
1077 	uint32_t ringno;
1078 	RING *rp;
1079 	IOCBQ *next;
1080 	IOCBQ *iocbq;
1081 	IOCB *iocb;
1082 	uint32_t found;
1083 	MATCHMAP *bmp;
1084 	Q abort;
1085 	uint32_t iotag;
1086 	emlxs_buf_t *sbp;
1087 	fc_packet_t *pkt = NULL;
1088 	uint32_t cmd;
1089 	uint32_t did;
1090 
1091 	bzero((void *)&abort, sizeof (Q));
1092 
1093 	mutex_enter(&EMLXS_RINGTX_LOCK);
1094 
1095 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
1096 		rp = &hba->ring[ringno];
1097 
1098 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
1099 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
1100 			sbp = rp->fc_table[iotag];
1101 			if (sbp && (sbp != STALE_PACKET) &&
1102 			    (sbp->pkt_flags & PACKET_IN_TXQ)) {
1103 				nlp = sbp->node;
1104 				iocbq = &sbp->iocbq;
1105 
1106 				if (iocbq->flag & IOCB_PRIORITY) {
1107 					iocbq =
1108 					    (IOCBQ *)nlp->nlp_ptx[ringno].
1109 					    q_first;
1110 				} else {
1111 					iocbq =
1112 					    (IOCBQ *)nlp->nlp_tx[ringno].
1113 					    q_first;
1114 				}
1115 
1116 				/* Find a matching entry */
1117 				found = 0;
1118 				while (iocbq) {
1119 					if (iocbq == &sbp->iocbq) {
1120 						found = 1;
1121 						break;
1122 					}
1123 
1124 					iocbq = (IOCBQ *)iocbq->next;
1125 				}
1126 
1127 				if (!found) {
1128 					if (!(sbp->pkt_flags & PACKET_STALE)) {
1129 						mutex_enter(&sbp->mtx);
1130 						sbp->pkt_flags |=
1131 						    PACKET_STALE;
1132 						mutex_exit(&sbp->mtx);
1133 					} else {
1134 						if (abort.q_first == 0) {
1135 							abort.q_first =
1136 							    &sbp->iocbq;
1137 						} else {
1138 							((IOCBQ *)abort.
1139 							    q_last)->next =
1140 							    &sbp->iocbq;
1141 						}
1142 
1143 						abort.q_last = &sbp->iocbq;
1144 						abort.q_cnt++;
1145 					}
1146 
1147 				} else {
1148 					if ((sbp->pkt_flags & PACKET_STALE)) {
1149 						mutex_enter(&sbp->mtx);
1150 						sbp->pkt_flags &=
1151 						    ~PACKET_STALE;
1152 						mutex_exit(&sbp->mtx);
1153 					}
1154 				}
1155 			}
1156 		}
1157 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
1158 	}
1159 
1160 	iocbq = (IOCBQ *)abort.q_first;
1161 	while (iocbq) {
1162 		next = (IOCBQ *)iocbq->next;
1163 		iocbq->next = NULL;
1164 		sbp = (emlxs_buf_t *)iocbq->sbp;
1165 
1166 		pkt = PRIV2PKT(sbp);
1167 		if (pkt) {
1168 			did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
1169 			cmd = *((uint32_t *)pkt->pkt_cmd);
1170 			cmd = SWAP_DATA32(cmd);
1171 		}
1172 
1173 
1174 		emlxs_tx_put(iocbq, 0);
1175 
1176 		iocbq = next;
1177 
1178 	}	/* end of while */
1179 
1180 	mutex_exit(&EMLXS_RINGTX_LOCK);
1181 
1182 	return;
1183 
1184 }  /* emlxs_tx_watchdog() */
1185 
1186 #endif /* TX_WATCHDOG */
1187 
1188 
1189 #ifdef DHCHAP_SUPPORT
1190 
1191 static void
1192 emlxs_timer_check_dhchap(emlxs_port_t *port)
1193 {
1194 	emlxs_hba_t *hba = HBA;
1195 	uint32_t i;
1196 	NODELIST *ndlp = NULL;
1197 
1198 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1199 		ndlp = port->node_table[i];
1200 
1201 		if (!ndlp) {
1202 			continue;
1203 		}
1204 
1205 		/* Check authentication response timeout */
1206 		if (ndlp->node_dhc.nlp_authrsp_tmo &&
1207 		    (hba->timer_tics >= ndlp->node_dhc.nlp_authrsp_tmo)) {
1208 			/* Trigger authresp timeout handler */
1209 			(void) emlxs_dhc_authrsp_timeout(port, ndlp, NULL);
1210 		}
1211 
1212 		/* Check reauthentication timeout */
1213 		if (ndlp->node_dhc.nlp_reauth_tmo &&
1214 		    (hba->timer_tics >= ndlp->node_dhc.nlp_reauth_tmo)) {
1215 			/* Trigger reauth timeout handler */
1216 			emlxs_dhc_reauth_timeout(port, NULL, ndlp);
1217 		}
1218 	}
1219 	return;
1220 
1221 }  /* emlxs_timer_check_dhchap */
1222 
1223 #endif /* DHCHAP_SUPPORT */
1224