1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Timer period in seconds */
31 #define	EMLXS_TIMER_PERIOD		1	/* secs */
32 #define	EMLXS_PKT_PERIOD		5	/* secs */
33 #define	EMLXS_UB_PERIOD			60	/* secs */
34 
35 EMLXS_MSG_DEF(EMLXS_CLOCK_C);
36 
37 
38 static void emlxs_timer_check_loopback(emlxs_hba_t *hba);
39 
40 #ifdef DHCHAP_SUPPORT
41 static void emlxs_timer_check_dhchap(emlxs_port_t *port);
42 #endif /* DHCHAP_SUPPORT */
43 
44 static void	emlxs_timer(void *arg);
45 static void	emlxs_timer_check_heartbeat(emlxs_hba_t *hba);
46 static uint32_t	emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag);
47 static void	emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag);
48 static void	emlxs_timer_check_linkup(emlxs_hba_t *hba);
49 static void	emlxs_timer_check_discovery(emlxs_port_t *port);
50 static void	emlxs_timer_check_ub(emlxs_port_t *port);
51 static void	emlxs_timer_check_channels(emlxs_hba_t *hba, uint8_t *flag);
52 static uint32_t	emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp,
53 			Q *abortq, uint8_t *flag);
54 
55 #ifdef TX_WATCHDOG
56 static void	emlxs_tx_watchdog(emlxs_hba_t *hba);
57 #endif /* TX_WATCHDOG */
58 
59 extern clock_t
60 emlxs_timeout(emlxs_hba_t *hba, uint32_t timeout)
61 {
62 	emlxs_config_t *cfg = &CFG;
63 	clock_t time;
64 
65 	/* Set thread timeout */
66 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
67 		(void) drv_getparm(LBOLT, &time);
68 		time += (timeout * drv_usectohz(1000000));
69 	} else {
70 		time = -1;
71 	}
72 
73 	return (time);
74 
75 } /* emlxs_timeout() */
76 
77 
78 static void
79 emlxs_timer(void *arg)
80 {
81 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
82 
83 	if (!hba->timer_id) {
84 		return;
85 	}
86 
87 	mutex_enter(&EMLXS_TIMER_LOCK);
88 
89 	EMLXS_SLI_POLL_ERRATT(hba);
90 
91 	/* Only one timer thread is allowed */
92 	if (hba->timer_flags & EMLXS_TIMER_BUSY) {
93 		mutex_exit(&EMLXS_TIMER_LOCK);
94 		return;
95 	}
96 
97 	/* Check if a kill request has been made */
98 	if (hba->timer_flags & EMLXS_TIMER_KILL) {
99 		hba->timer_id = 0;
100 		hba->timer_flags |= EMLXS_TIMER_ENDED;
101 
102 		mutex_exit(&EMLXS_TIMER_LOCK);
103 		return;
104 	}
105 
106 	hba->timer_flags |= (EMLXS_TIMER_BUSY | EMLXS_TIMER_STARTED);
107 	hba->timer_tics = DRV_TIME;
108 
109 	mutex_exit(&EMLXS_TIMER_LOCK);
110 
111 	/* Perform standard checks */
112 	emlxs_timer_checks(hba);
113 
114 	/* Restart the timer */
115 	mutex_enter(&EMLXS_TIMER_LOCK);
116 
117 	hba->timer_flags &= ~EMLXS_TIMER_BUSY;
118 
119 	/* If timer is still enabled, restart it */
120 	if (!(hba->timer_flags & EMLXS_TIMER_KILL)) {
121 		hba->timer_id =
122 		    timeout(emlxs_timer, (void *)hba,
123 		    (EMLXS_TIMER_PERIOD * drv_usectohz(1000000)));
124 	} else {
125 		hba->timer_id = 0;
126 		hba->timer_flags |= EMLXS_TIMER_ENDED;
127 	}
128 
129 	mutex_exit(&EMLXS_TIMER_LOCK);
130 
131 	return;
132 
133 } /* emlxs_timer() */
134 
135 
136 extern void
137 emlxs_timer_checks(emlxs_hba_t *hba)
138 {
139 	emlxs_port_t *port = &PPORT;
140 	uint8_t flag[MAX_CHANNEL];
141 	uint32_t i;
142 	uint32_t rc;
143 
144 	/* Exit if we are still initializing */
145 	if (hba->state < FC_LINK_DOWN) {
146 		return;
147 	}
148 
149 	/* DEBUG - re-examine this path for SLI4 later */
150 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
151 		/* Check for linkup timeout */
152 		emlxs_timer_check_linkup(hba);
153 
154 		return;
155 	}
156 
157 	bzero((void *)flag, sizeof (flag));
158 
159 	/* Check SLI level timeouts */
160 	EMLXS_SLI_TIMER(hba);
161 
162 	/* Check event queue */
163 	emlxs_timer_check_events(hba);
164 
165 	/* Check heartbeat timer */
166 	emlxs_timer_check_heartbeat(hba);
167 
168 #ifdef IDLE_TIMER
169 	emlxs_pm_idle_timer(hba);
170 #endif /* IDLE_TIMER */
171 
172 	/* Check for loopback timeouts */
173 	emlxs_timer_check_loopback(hba);
174 
175 	/* Check for packet timeouts */
176 	rc = emlxs_timer_check_pkts(hba, flag);
177 
178 	if (rc) {
179 		/* Link or adapter is being reset */
180 		return;
181 	}
182 
183 	/* Check for linkup timeout */
184 	emlxs_timer_check_linkup(hba);
185 
186 	/* Check the ports */
187 	for (i = 0; i < MAX_VPORTS; i++) {
188 		port = &VPORT(i);
189 
190 		if (!(port->flag & EMLXS_PORT_BOUND)) {
191 			continue;
192 		}
193 
194 		/* Check for node gate timeouts */
195 		emlxs_timer_check_nodes(port, flag);
196 
197 		/* Check for tape discovery timeout */
198 		emlxs_timer_check_discovery(port);
199 
200 		/* Check for UB timeouts */
201 		emlxs_timer_check_ub(port);
202 
203 #ifdef DHCHAP_SUPPORT
204 		/* Check for DHCHAP authentication timeouts */
205 		emlxs_timer_check_dhchap(port);
206 #endif /* DHCHAP_SUPPORT */
207 
208 	}
209 
210 	/* Check for IO channel service timeouts */
211 	/* Always do this last */
212 	emlxs_timer_check_channels(hba, flag);
213 
214 	return;
215 
216 } /* emlxs_timer_checks() */
217 
218 
219 extern void
220 emlxs_timer_start(emlxs_hba_t *hba)
221 {
222 	if (hba->timer_id) {
223 		return;
224 	}
225 
226 	/* Restart the timer */
227 	mutex_enter(&EMLXS_TIMER_LOCK);
228 	if (!hba->timer_id) {
229 		hba->timer_flags = 0;
230 		hba->timer_id =
231 		    timeout(emlxs_timer, (void *)hba, drv_usectohz(1000000));
232 	}
233 	mutex_exit(&EMLXS_TIMER_LOCK);
234 
235 } /* emlxs_timer_start() */
236 
237 
238 extern void
239 emlxs_timer_stop(emlxs_hba_t *hba)
240 {
241 	if (!hba->timer_id) {
242 		return;
243 	}
244 
245 	mutex_enter(&EMLXS_TIMER_LOCK);
246 	hba->timer_flags |= EMLXS_TIMER_KILL;
247 
248 	while (hba->timer_id) {
249 		mutex_exit(&EMLXS_TIMER_LOCK);
250 		delay(drv_usectohz(500000));
251 		mutex_enter(&EMLXS_TIMER_LOCK);
252 	}
253 	mutex_exit(&EMLXS_TIMER_LOCK);
254 
255 	return;
256 
257 } /* emlxs_timer_stop() */
258 
259 
260 static uint32_t
261 emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag)
262 {
263 	emlxs_port_t *port = &PPORT;
264 	emlxs_config_t *cfg = &CFG;
265 	Q tmo;
266 	int32_t channelno;
267 	CHANNEL *cp;
268 	NODELIST *nlp;
269 	IOCBQ *prev;
270 	IOCBQ *next;
271 	IOCB *iocb;
272 	IOCBQ *iocbq;
273 	emlxs_buf_t *sbp;
274 	fc_packet_t *pkt;
275 	Q abort;
276 	uint32_t iotag;
277 	uint32_t rc;
278 
279 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
280 		return (0);
281 	}
282 
283 	if (hba->pkt_timer > hba->timer_tics) {
284 		return (0);
285 	}
286 
287 	hba->pkt_timer = hba->timer_tics + EMLXS_PKT_PERIOD;
288 
289 
290 	bzero((void *)&tmo, sizeof (Q));
291 
292 	/*
293 	 * We must hold the locks here because we never know when an iocb
294 	 * will be removed out from under us
295 	 */
296 
297 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
298 
299 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
300 		cp = &hba->chan[channelno];
301 
302 		/* Scan the tx queues for each active node on the channel */
303 
304 		/* Get the first node */
305 		nlp = (NODELIST *)cp->nodeq.q_first;
306 
307 		while (nlp) {
308 			/* Scan the node's priority tx queue */
309 			prev = NULL;
310 			iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
311 
312 			while (iocbq) {
313 				next = (IOCBQ *)iocbq->next;
314 				iocb = &iocbq->iocb;
315 				sbp = (emlxs_buf_t *)iocbq->sbp;
316 
317 				/* Check if iocb has timed out */
318 				if (sbp && hba->timer_tics >= sbp->ticks) {
319 					/* iocb timed out, now deque it */
320 					if (next == NULL) {
321 						nlp->nlp_ptx[channelno].q_last =
322 						    (uint8_t *)prev;
323 					}
324 
325 					if (prev == NULL) {
326 						nlp->nlp_ptx[channelno].
327 						    q_first = (uint8_t *)next;
328 					} else {
329 						prev->next = next;
330 					}
331 
332 					iocbq->next = NULL;
333 					nlp->nlp_ptx[channelno].q_cnt--;
334 
335 					/* Add this iocb to our local */
336 					/* timout queue */
337 
338 					/*
339 					 * This way we don't hold the TX_CHANNEL
340 					 * lock too long
341 					 */
342 
343 					if (tmo.q_first) {
344 						((IOCBQ *)tmo.q_last)->next =
345 						    iocbq;
346 						tmo.q_last =
347 						    (uint8_t *)iocbq;
348 						tmo.q_cnt++;
349 					} else {
350 						tmo.q_first =
351 						    (uint8_t *)iocbq;
352 						tmo.q_last =
353 						    (uint8_t *)iocbq;
354 						tmo.q_cnt = 1;
355 					}
356 					iocbq->next = NULL;
357 
358 				} else {
359 					prev = iocbq;
360 				}
361 
362 				iocbq = next;
363 
364 			}	/* while (iocbq) */
365 
366 
367 			/* Scan the node's tx queue */
368 			prev = NULL;
369 			iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
370 
371 			while (iocbq) {
372 				next = (IOCBQ *)iocbq->next;
373 				iocb = &iocbq->iocb;
374 				sbp = (emlxs_buf_t *)iocbq->sbp;
375 
376 				/* Check if iocb has timed out */
377 				if (sbp && hba->timer_tics >= sbp->ticks) {
378 					/* iocb timed out, now deque it */
379 					if (next == NULL) {
380 						nlp->nlp_tx[channelno].q_last =
381 						    (uint8_t *)prev;
382 					}
383 
384 					if (prev == NULL) {
385 						nlp->nlp_tx[channelno].q_first =
386 						    (uint8_t *)next;
387 					} else {
388 						prev->next = next;
389 					}
390 
391 					iocbq->next = NULL;
392 					nlp->nlp_tx[channelno].q_cnt--;
393 
394 					/* Add this iocb to our local */
395 					/* timout queue */
396 
397 					/*
398 					 * This way we don't hold the TX_CHANNEL
399 					 * lock too long
400 					 */
401 
402 					if (tmo.q_first) {
403 						((IOCBQ *)tmo.q_last)->next =
404 						    iocbq;
405 						tmo.q_last =
406 						    (uint8_t *)iocbq;
407 						tmo.q_cnt++;
408 					} else {
409 						tmo.q_first =
410 						    (uint8_t *)iocbq;
411 						tmo.q_last =
412 						    (uint8_t *)iocbq;
413 						tmo.q_cnt = 1;
414 					}
415 					iocbq->next = NULL;
416 
417 				} else {
418 					prev = iocbq;
419 				}
420 
421 				iocbq = next;
422 
423 			}	/* while (iocbq) */
424 
425 			if (nlp == (NODELIST *)cp->nodeq.q_last) {
426 				nlp = NULL;
427 			} else {
428 				nlp = nlp->nlp_next[channelno];
429 			}
430 
431 		}	/* while (nlp) */
432 
433 	}	/* end of for */
434 
435 	/* Now cleanup the iocb's */
436 	iocbq = (IOCBQ *)tmo.q_first;
437 	while (iocbq) {
438 		/* Free the IoTag and the bmp */
439 		iocb = &iocbq->iocb;
440 		channelno = ((CHANNEL *)iocbq->channel)->channelno;
441 		sbp = iocbq->sbp;
442 		if (sbp && (sbp != STALE_PACKET)) {
443 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
444 				hba->fc_table[sbp->iotag] = NULL;
445 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
446 			} else {
447 				(void) emlxs_unregister_pkt(
448 				    (CHANNEL *)iocbq->channel,
449 				    iocb->ULPIOTAG, 0);
450 			}
451 
452 			mutex_enter(&sbp->mtx);
453 			sbp->pkt_flags |= PACKET_IN_TIMEOUT;
454 			mutex_exit(&sbp->mtx);
455 		}
456 
457 		iocbq = (IOCBQ *)iocbq->next;
458 
459 	}	/* end of while */
460 
461 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
462 
463 	/* Now complete the transmit timeouts outside the locks */
464 	iocbq = (IOCBQ *)tmo.q_first;
465 	while (iocbq) {
466 		/* Save the next iocbq for now */
467 		next = (IOCBQ *)iocbq->next;
468 
469 		/* Unlink this iocbq */
470 		iocbq->next = NULL;
471 
472 		/* Get the pkt */
473 		sbp = (emlxs_buf_t *)iocbq->sbp;
474 
475 		if (sbp) {
476 			/* Warning: Some FCT sbp's don't have */
477 			/* fc_packet objects */
478 			pkt = PRIV2PKT(sbp);
479 
480 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
481 			    "TXQ abort: sbp=%p iotag=%x tmo=%d", sbp,
482 			    sbp->iotag, (pkt) ? pkt->pkt_timeout : 0);
483 
484 			if (hba->state >= FC_LINK_UP) {
485 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
486 				    IOERR_ABORT_TIMEOUT, 1);
487 			} else {
488 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
489 				    IOERR_LINK_DOWN, 1);
490 			}
491 
492 		}
493 
494 		iocbq = next;
495 
496 	}	/* end of while */
497 
498 
499 
500 	/* Now check the chip */
501 	bzero((void *)&abort, sizeof (Q));
502 
503 	/* Check the HBA for outstanding IOs */
504 	rc = 0;
505 	mutex_enter(&EMLXS_FCTAB_LOCK);
506 	for (iotag = 1; iotag < hba->max_iotag; iotag++) {
507 		sbp = hba->fc_table[iotag];
508 		if (sbp && (sbp != STALE_PACKET) &&
509 		    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
510 		    !(sbp->pkt_flags & (PACKET_IN_FLUSH |
511 		    PACKET_XRI_CLOSED)) &&
512 		    (hba->timer_tics >= sbp->ticks)) {
513 			rc = emlxs_pkt_chip_timeout(sbp->iocbq.port,
514 			    sbp, &abort, flag);
515 
516 			if (rc) {
517 				break;
518 			}
519 		}
520 	}
521 	mutex_exit(&EMLXS_FCTAB_LOCK);
522 
523 	/* Now put the iocb's on the tx queue */
524 	iocbq = (IOCBQ *)abort.q_first;
525 	while (iocbq) {
526 		/* Save the next iocbq for now */
527 		next = (IOCBQ *)iocbq->next;
528 
529 		/* Unlink this iocbq */
530 		iocbq->next = NULL;
531 
532 		/* Send this iocbq */
533 		emlxs_tx_put(iocbq, 1);
534 
535 		iocbq = next;
536 	}
537 
538 	/* Now trigger IO channel service to send these abort iocbq */
539 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
540 		if (!flag[channelno]) {
541 			continue;
542 		}
543 		cp = &hba->chan[channelno];
544 
545 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0);
546 	}
547 
548 	if (rc == 1) {
549 		/* Spawn a thread to reset the link */
550 		emlxs_thread_spawn(hba, emlxs_reset_link_thread, NULL, NULL);
551 	} else if (rc == 2) {
552 		/* Spawn a thread to reset the adapter */
553 		emlxs_thread_spawn(hba, emlxs_restart_thread, NULL, NULL);
554 	}
555 
556 	return (rc);
557 
558 } /* emlxs_timer_check_pkts() */
559 
560 
561 static void
562 emlxs_timer_check_channels(emlxs_hba_t *hba, uint8_t *flag)
563 {
564 	emlxs_port_t *port = &PPORT;
565 	emlxs_config_t *cfg = &CFG;
566 	int32_t channelno;
567 	CHANNEL *cp;
568 	uint32_t logit;
569 
570 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
571 		return;
572 	}
573 
574 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
575 		cp = &hba->chan[channelno];
576 
577 		logit = 0;
578 
579 		/* Check for channel timeout now */
580 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
581 		if (cp->timeout && (hba->timer_tics >= cp->timeout)) {
582 			/* Check if there is work to do on channel and */
583 			/* the link is still up */
584 			if (cp->nodeq.q_first) {
585 				flag[channelno] = 1;
586 				cp->timeout = hba->timer_tics + 10;
587 
588 				if (hba->state >= FC_LINK_UP) {
589 					logit = 1;
590 				}
591 			} else {
592 				cp->timeout = 0;
593 			}
594 		}
595 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
596 
597 		if (logit) {
598 			EMLXS_MSGF(EMLXS_CONTEXT,
599 			    &emlxs_chan_watchdog_msg,
600 			    "IO Channel %d cnt=%d,%d",
601 			    channelno,
602 			    hba->channel_tx_count,
603 			    hba->io_count);
604 		}
605 
606 		/*
607 		 * If IO channel flag is set, request iocb servicing
608 		 * here to send any iocb's that may still be queued
609 		 */
610 		if (flag[channelno]) {
611 			EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0);
612 		}
613 	}
614 
615 	return;
616 
617 } /* emlxs_timer_check_channels() */
618 
619 
620 static void
621 emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag)
622 {
623 	emlxs_hba_t *hba = HBA;
624 	uint32_t found;
625 	uint32_t i;
626 	NODELIST *nlp;
627 	int32_t channelno;
628 
629 	for (;;) {
630 		/* Check node gate flag for expiration */
631 		found = 0;
632 
633 		/*
634 		 * We need to lock, scan, and unlock because we can't hold the
635 		 * lock while we call node_open
636 		 */
637 		rw_enter(&port->node_rwlock, RW_READER);
638 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
639 			nlp = port->node_table[i];
640 			while (nlp != NULL) {
641 				for (channelno = 0;
642 				    channelno < hba->chan_count;
643 				    channelno++) {
644 					/* Check if the node timer is active */
645 					/* and if timer has expired */
646 					if (nlp->nlp_tics[channelno] &&
647 					    (hba->timer_tics >=
648 					    nlp->nlp_tics[channelno])) {
649 						/* If so, set the flag and */
650 						/* break out */
651 						found = 1;
652 						flag[channelno] = 1;
653 						break;
654 					}
655 				}
656 
657 				if (nlp->nlp_force_rscn &&
658 				    (hba->timer_tics >= nlp->nlp_force_rscn)) {
659 					nlp->nlp_force_rscn = 0;
660 					/*
661 					 * Generate an RSCN to
662 					 * wakeup ULP
663 					 */
664 					(void) emlxs_generate_rscn(port,
665 					    nlp->nlp_DID);
666 				}
667 
668 				if (found) {
669 					break;
670 				}
671 
672 				nlp = nlp->nlp_list_next;
673 			}
674 
675 			if (found) {
676 				break;
677 			}
678 
679 		}
680 		rw_exit(&port->node_rwlock);
681 
682 		if (!found) {
683 			break;
684 		}
685 
686 		emlxs_node_timeout(port, nlp, channelno);
687 	}
688 
689 } /* emlxs_timer_check_nodes() */
690 
691 
692 static void
693 emlxs_timer_check_loopback(emlxs_hba_t *hba)
694 {
695 	emlxs_port_t *port = &PPORT;
696 	emlxs_config_t *cfg = &CFG;
697 	int32_t reset = 0;
698 
699 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
700 		return;
701 	}
702 
703 	/* Check the loopback timer for expiration */
704 	mutex_enter(&EMLXS_PORT_LOCK);
705 
706 	if (!hba->loopback_tics || (hba->timer_tics < hba->loopback_tics)) {
707 		mutex_exit(&EMLXS_PORT_LOCK);
708 		return;
709 	}
710 
711 	hba->loopback_tics = 0;
712 
713 	if (hba->flag & FC_LOOPBACK_MODE) {
714 		reset = 1;
715 	}
716 
717 	mutex_exit(&EMLXS_PORT_LOCK);
718 
719 	if (reset) {
720 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_dfc_debug_msg,
721 		    "LOOPBACK_MODE: Expired. Resetting...");
722 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
723 	}
724 
725 	return;
726 
727 } /* emlxs_timer_check_loopback() */
728 
729 
730 static void
731 emlxs_timer_check_linkup(emlxs_hba_t *hba)
732 {
733 	emlxs_port_t *port = &PPORT;
734 	uint32_t linkup;
735 
736 	/* Check if all mbox commands from previous activity are processed */
737 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
738 		mutex_enter(&EMLXS_MBOX_LOCK);
739 		if (hba->mbox_queue.q_first) {
740 			mutex_exit(&EMLXS_MBOX_LOCK);
741 			return;
742 		}
743 		mutex_exit(&EMLXS_MBOX_LOCK);
744 	}
745 
746 	/* Check the linkup timer for expiration */
747 	mutex_enter(&EMLXS_PORT_LOCK);
748 	linkup = 0;
749 	if (hba->linkup_timer && (hba->timer_tics >= hba->linkup_timer)) {
750 		hba->linkup_timer = 0;
751 
752 		/* Make sure link is still ready */
753 		if (hba->state >= FC_LINK_UP) {
754 			linkup = 1;
755 		}
756 	}
757 	mutex_exit(&EMLXS_PORT_LOCK);
758 
759 	/* Make the linkup callback */
760 	if (linkup) {
761 		emlxs_port_online(port);
762 	}
763 	return;
764 
765 } /* emlxs_timer_check_linkup() */
766 
767 
768 static void
769 emlxs_timer_check_heartbeat(emlxs_hba_t *hba)
770 {
771 	emlxs_port_t *port = &PPORT;
772 	MAILBOXQ *mbq;
773 	emlxs_config_t *cfg = &CFG;
774 	int rc;
775 
776 	if (!cfg[CFG_HEARTBEAT_ENABLE].current) {
777 		return;
778 	}
779 
780 	if (hba->timer_tics < hba->heartbeat_timer) {
781 		return;
782 	}
783 
784 	hba->heartbeat_timer = hba->timer_tics + 5;
785 
786 	/* Return if adapter interrupts have occurred */
787 	if (hba->heartbeat_flag) {
788 		hba->heartbeat_flag = 0;
789 		return;
790 	}
791 	/* No adapter interrupts have occured for 5 seconds now */
792 
793 	/* Return if mailbox is busy */
794 	/* This means the mailbox timer routine is watching for problems */
795 	if (hba->mbox_timer) {
796 		return;
797 	}
798 
799 	/* Return if heartbeat is still outstanding */
800 	if (hba->heartbeat_active) {
801 		return;
802 	}
803 
804 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
805 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
806 		    "Unable to allocate heartbeat mailbox.");
807 		return;
808 	}
809 
810 	emlxs_mb_heartbeat(hba, mbq);
811 	hba->heartbeat_active = 1;
812 
813 	rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
814 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
815 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
816 	}
817 
818 	return;
819 
820 } /* emlxs_timer_check_heartbeat() */
821 
822 
823 static void
824 emlxs_timer_check_discovery(emlxs_port_t *port)
825 {
826 	emlxs_hba_t *hba = HBA;
827 	emlxs_config_t *cfg = &CFG;
828 	int32_t send_clear_la;
829 	uint32_t found;
830 	uint32_t i;
831 	NODELIST *nlp;
832 	MAILBOXQ *mbox;
833 	int rc;
834 
835 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
836 		return;
837 	}
838 
839 	/* Check the discovery timer for expiration */
840 	send_clear_la = 0;
841 	mutex_enter(&EMLXS_PORT_LOCK);
842 	while (hba->discovery_timer &&
843 	    (hba->timer_tics >= hba->discovery_timer) &&
844 	    (hba->state == FC_LINK_UP)) {
845 		send_clear_la = 1;
846 
847 		/* Perform a flush on fcp2 nodes that are still closed */
848 		found = 0;
849 		rw_enter(&port->node_rwlock, RW_READER);
850 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
851 			nlp = port->node_table[i];
852 			while (nlp != NULL) {
853 				if ((nlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
854 				    (nlp->nlp_flag[hba->channel_fcp] &
855 				    NLP_CLOSED)) {
856 					found = 1;
857 					break;
858 
859 				}
860 				nlp = nlp->nlp_list_next;
861 			}
862 
863 			if (found) {
864 				break;
865 			}
866 		}
867 		rw_exit(&port->node_rwlock);
868 
869 		if (!found) {
870 			break;
871 		}
872 
873 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_missing_msg,
874 		    "FCP2 device (did=%06x) missing. Flushing...",
875 		    nlp->nlp_DID);
876 
877 		mutex_exit(&EMLXS_PORT_LOCK);
878 
879 		(void) emlxs_mb_unreg_did(port, nlp->nlp_DID, NULL, NULL, NULL);
880 
881 		mutex_enter(&EMLXS_PORT_LOCK);
882 
883 	}
884 	mutex_exit(&EMLXS_PORT_LOCK);
885 
886 	/* Try to send clear link attention, if needed */
887 	if ((send_clear_la == 1) &&
888 	    (mbox = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
889 		mutex_enter(&EMLXS_PORT_LOCK);
890 
891 		/*
892 		 * If state is not FC_LINK_UP, then either the link has gone
893 		 * down or a FC_CLEAR_LA has already been issued
894 		 */
895 		if (hba->state != FC_LINK_UP) {
896 			mutex_exit(&EMLXS_PORT_LOCK);
897 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbox);
898 		} else {
899 			/* Change state and clear discovery timer */
900 			EMLXS_STATE_CHANGE_LOCKED(hba, FC_CLEAR_LA);
901 
902 			hba->discovery_timer = 0;
903 
904 			mutex_exit(&EMLXS_PORT_LOCK);
905 
906 			/* Prepare and send the CLEAR_LA command */
907 			emlxs_mb_clear_la(hba, mbox);
908 
909 			rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbox, MBX_NOWAIT, 0);
910 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
911 				(void) emlxs_mem_put(hba, MEM_MBOX,
912 				    (uint8_t *)mbox);
913 			}
914 		}
915 	}
916 
917 	return;
918 
919 } /* emlxs_timer_check_discovery()  */
920 
921 
922 static void
923 emlxs_timer_check_ub(emlxs_port_t *port)
924 {
925 	emlxs_hba_t *hba = HBA;
926 	emlxs_unsol_buf_t *ulistp;
927 	fc_unsol_buf_t *ubp;
928 	emlxs_ub_priv_t *ub_priv;
929 	uint32_t i;
930 
931 	if (port->ub_timer > hba->timer_tics) {
932 		return;
933 	}
934 
935 	port->ub_timer = hba->timer_tics + EMLXS_UB_PERIOD;
936 
937 	/* Check the unsolicited buffers */
938 	mutex_enter(&EMLXS_UB_LOCK);
939 
940 	ulistp = port->ub_pool;
941 	while (ulistp) {
942 		/* Check buffers in this pool */
943 		for (i = 0; i < ulistp->pool_nentries; i++) {
944 			ubp = (fc_unsol_buf_t *)&ulistp->fc_ubufs[i];
945 			ub_priv = ubp->ub_fca_private;
946 
947 			if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
948 				continue;
949 			}
950 
951 			/* If buffer has timed out, print message and */
952 			/* increase timeout */
953 			if ((ub_priv->time + ub_priv->timeout) <=
954 			    hba->timer_tics) {
955 				ub_priv->flags |= EMLXS_UB_TIMEOUT;
956 
957 				EMLXS_MSGF(EMLXS_CONTEXT,
958 				    &emlxs_sfs_debug_msg,
959 				    "Stale UB buffer detected (%d mins): "
960 				    "buffer=%p (%x,%x,%x,%x)",
961 				    (ub_priv->timeout / 60), ubp,
962 				    ubp->ub_frame.type, ubp->ub_frame.s_id,
963 				    ubp->ub_frame.ox_id, ubp->ub_frame.rx_id);
964 
965 				/* Increase timeout period */
966 
967 				/* If timeout was 5 mins or less, */
968 				/* increase it to 10 mins */
969 				if (ub_priv->timeout <= (5 * 60)) {
970 					ub_priv->timeout = (10 * 60);
971 				}
972 				/* If timeout was 10 mins or less, */
973 				/* increase it to 30 mins */
974 				else if (ub_priv->timeout <= (10 * 60)) {
975 					ub_priv->timeout = (30 * 60);
976 				}
977 				/* Otherwise double it. */
978 				else {
979 					ub_priv->timeout *= 2;
980 				}
981 			}
982 		}
983 
984 		ulistp = ulistp->pool_next;
985 	}
986 
987 	mutex_exit(&EMLXS_UB_LOCK);
988 
989 	return;
990 
991 } /* emlxs_timer_check_ub()  */
992 
993 
994 /* EMLXS_FCTAB_LOCK must be held to call this */
995 static uint32_t
996 emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abortq,
997     uint8_t *flag)
998 {
999 	emlxs_hba_t *hba = HBA;
1000 	CHANNEL *cp = (CHANNEL *)sbp->channel;
1001 	IOCBQ *iocbq = NULL;
1002 	fc_packet_t *pkt;
1003 	uint32_t rc = 0;
1004 
1005 	mutex_enter(&sbp->mtx);
1006 
1007 	/* Warning: Some FCT sbp's don't have fc_packet objects */
1008 	pkt = PRIV2PKT(sbp);
1009 
1010 	switch (sbp->abort_attempts) {
1011 	case 0:
1012 
1013 		/* Create the abort IOCB */
1014 		if (hba->state >= FC_LINK_UP) {
1015 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1016 			    "chipQ:1:Aborting. sbp=%p iotag=%x tmo=%d flags=%x",
1017 			    sbp, sbp->iotag,
1018 			    (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
1019 
1020 			iocbq =
1021 			    emlxs_create_abort_xri_cn(port, sbp->node,
1022 			    sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
1023 
1024 			/* The adapter will make 2 attempts to send ABTS */
1025 			/* with 2*ratov timeout each time */
1026 			sbp->ticks =
1027 			    hba->timer_tics + (4 * hba->fc_ratov) + 10;
1028 		} else {
1029 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1030 			    "chipQ:1:Closing. sbp=%p iotag=%x tmo=%d flags=%x",
1031 			    sbp, sbp->iotag,
1032 			    (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
1033 
1034 			iocbq =
1035 			    emlxs_create_close_xri_cn(port, sbp->node,
1036 			    sbp->iotag, cp);
1037 
1038 			sbp->ticks = hba->timer_tics + 30;
1039 		}
1040 
1041 		/* set the flags */
1042 		sbp->pkt_flags |= (PACKET_IN_TIMEOUT | PACKET_XRI_CLOSED);
1043 
1044 		flag[cp->channelno] = 1;
1045 		rc = 0;
1046 
1047 		break;
1048 
1049 	case 1:
1050 
1051 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1052 		    "chipQ: 2:Closing. sbp=%p iotag=%x", sbp, sbp->iotag);
1053 
1054 		iocbq =
1055 		    emlxs_create_close_xri_cn(port, sbp->node, sbp->iotag,
1056 		    cp);
1057 
1058 		sbp->ticks = hba->timer_tics + 30;
1059 
1060 		flag[cp->channelno] = 1;
1061 		rc = 0;
1062 
1063 		break;
1064 
1065 	case 2:
1066 
1067 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1068 		    "chipQ: 3:Resetting link. sbp=%p iotag=%x", sbp,
1069 		    sbp->iotag);
1070 
1071 		sbp->ticks = hba->timer_tics + 60;
1072 		rc = 1;
1073 
1074 		break;
1075 
1076 	default:
1077 
1078 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1079 		    "chipQ: %d:Resetting adapter. sbp=%p iotag=%x",
1080 		    sbp->abort_attempts, sbp, sbp->iotag);
1081 
1082 		sbp->ticks = hba->timer_tics + 60;
1083 		rc = 2;
1084 
1085 		break;
1086 	}
1087 
1088 	sbp->abort_attempts++;
1089 	mutex_exit(&sbp->mtx);
1090 
1091 	if (iocbq) {
1092 		if (abortq->q_first) {
1093 			((IOCBQ *)abortq->q_last)->next = iocbq;
1094 			abortq->q_last = (uint8_t *)iocbq;
1095 			abortq->q_cnt++;
1096 		} else {
1097 			abortq->q_first = (uint8_t *)iocbq;
1098 			abortq->q_last = (uint8_t *)iocbq;
1099 			abortq->q_cnt = 1;
1100 		}
1101 		iocbq->next = NULL;
1102 	}
1103 
1104 	return (rc);
1105 
1106 } /* emlxs_pkt_chip_timeout() */
1107 
1108 
1109 #ifdef TX_WATCHDOG
1110 
1111 static void
1112 emlxs_tx_watchdog(emlxs_hba_t *hba)
1113 {
1114 	emlxs_port_t *port = &PPORT;
1115 	NODELIST *nlp;
1116 	uint32_t channelno;
1117 	CHANNEL *cp;
1118 	IOCBQ *next;
1119 	IOCBQ *iocbq;
1120 	IOCB *iocb;
1121 	uint32_t found;
1122 	MATCHMAP *bmp;
1123 	Q abort;
1124 	uint32_t iotag;
1125 	emlxs_buf_t *sbp;
1126 	fc_packet_t *pkt = NULL;
1127 	uint32_t cmd;
1128 	uint32_t did;
1129 
1130 	bzero((void *)&abort, sizeof (Q));
1131 
1132 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
1133 
1134 	mutex_enter(&EMLXS_FCTAB_LOCK);
1135 	for (iotag = 1; iotag < hba->max_iotag; iotag++) {
1136 		sbp = hba->fc_table[iotag];
1137 		if (sbp && (sbp != STALE_PACKET) &&
1138 		    (sbp->pkt_flags & PACKET_IN_TXQ)) {
1139 			nlp = sbp->node;
1140 			iocbq = &sbp->iocbq;
1141 
1142 			channelno = (CHANNEL *)(sbp->channel)->channelno;
1143 			if (iocbq->flag & IOCB_PRIORITY) {
1144 				iocbq =
1145 				    (IOCBQ *)nlp->nlp_ptx[channelno].
1146 				    q_first;
1147 			} else {
1148 				iocbq =
1149 				    (IOCBQ *)nlp->nlp_tx[channelno].
1150 				    q_first;
1151 			}
1152 
1153 			/* Find a matching entry */
1154 			found = 0;
1155 			while (iocbq) {
1156 				if (iocbq == &sbp->iocbq) {
1157 					found = 1;
1158 					break;
1159 				}
1160 
1161 				iocbq = (IOCBQ *)iocbq->next;
1162 			}
1163 
1164 			if (!found) {
1165 				if (!(sbp->pkt_flags & PACKET_STALE)) {
1166 					mutex_enter(&sbp->mtx);
1167 					sbp->pkt_flags |=
1168 					    PACKET_STALE;
1169 					mutex_exit(&sbp->mtx);
1170 				} else {
1171 					if (abort.q_first == 0) {
1172 						abort.q_first =
1173 						    &sbp->iocbq;
1174 					} else {
1175 						((IOCBQ *)abort.
1176 						    q_last)->next =
1177 						    &sbp->iocbq;
1178 					}
1179 
1180 					abort.q_last = &sbp->iocbq;
1181 					abort.q_cnt++;
1182 				}
1183 
1184 			} else {
1185 				if ((sbp->pkt_flags & PACKET_STALE)) {
1186 					mutex_enter(&sbp->mtx);
1187 					sbp->pkt_flags &=
1188 					    ~PACKET_STALE;
1189 					mutex_exit(&sbp->mtx);
1190 				}
1191 			}
1192 		}
1193 	}
1194 	mutex_exit(&EMLXS_FCTAB_LOCK);
1195 
1196 	iocbq = (IOCBQ *)abort.q_first;
1197 	while (iocbq) {
1198 		next = (IOCBQ *)iocbq->next;
1199 		iocbq->next = NULL;
1200 		sbp = (emlxs_buf_t *)iocbq->sbp;
1201 
1202 		pkt = PRIV2PKT(sbp);
1203 		if (pkt) {
1204 			did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
1205 			cmd = *((uint32_t *)pkt->pkt_cmd);
1206 			cmd = LE_SWAP32(cmd);
1207 		}
1208 
1209 
1210 		emlxs_tx_put(iocbq, 0);
1211 
1212 		iocbq = next;
1213 
1214 	}	/* end of while */
1215 
1216 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
1217 
1218 	return;
1219 
1220 } /* emlxs_tx_watchdog() */
1221 
1222 #endif /* TX_WATCHDOG */
1223 
1224 
1225 #ifdef DHCHAP_SUPPORT
1226 
1227 static void
1228 emlxs_timer_check_dhchap(emlxs_port_t *port)
1229 {
1230 	emlxs_hba_t *hba = HBA;
1231 	uint32_t i;
1232 	NODELIST *ndlp = NULL;
1233 
1234 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1235 		ndlp = port->node_table[i];
1236 
1237 		if (!ndlp) {
1238 			continue;
1239 		}
1240 
1241 		/* Check authentication response timeout */
1242 		if (ndlp->node_dhc.nlp_authrsp_tmo &&
1243 		    (hba->timer_tics >= ndlp->node_dhc.nlp_authrsp_tmo)) {
1244 			/* Trigger authresp timeout handler */
1245 			(void) emlxs_dhc_authrsp_timeout(port, ndlp, NULL);
1246 		}
1247 
1248 		/* Check reauthentication timeout */
1249 		if (ndlp->node_dhc.nlp_reauth_tmo &&
1250 		    (hba->timer_tics >= ndlp->node_dhc.nlp_reauth_tmo)) {
1251 			/* Trigger reauth timeout handler */
1252 			emlxs_dhc_reauth_timeout(port, NULL, ndlp);
1253 		}
1254 	}
1255 	return;
1256 
1257 } /* emlxs_timer_check_dhchap */
1258 
1259 #endif /* DHCHAP_SUPPORT */
1260