1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Timer period in seconds */
31 #define	EMLXS_TIMER_PERIOD		1	/* secs */
32 #define	EMLXS_PKT_PERIOD		5	/* secs */
33 #define	EMLXS_UB_PERIOD			60	/* secs */
34 
35 EMLXS_MSG_DEF(EMLXS_CLOCK_C);
36 
37 
38 static void emlxs_timer_check_loopback(emlxs_hba_t *hba);
39 
40 #ifdef DHCHAP_SUPPORT
41 static void emlxs_timer_check_dhchap(emlxs_port_t *port);
42 #endif /* DHCHAP_SUPPORT */
43 
44 static void	emlxs_timer(void *arg);
45 static void	emlxs_timer_check_heartbeat(emlxs_hba_t *hba);
46 static uint32_t	emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag);
47 static void	emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag);
48 static void	emlxs_timer_check_linkup(emlxs_hba_t *hba);
49 static void	emlxs_timer_check_discovery(emlxs_port_t *port);
50 static void	emlxs_timer_check_ub(emlxs_port_t *port);
51 static void	emlxs_timer_check_channels(emlxs_hba_t *hba, uint8_t *flag);
52 static uint32_t	emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp,
53 			Q *abortq, uint8_t *flag);
54 
55 #ifdef TX_WATCHDOG
56 static void	emlxs_tx_watchdog(emlxs_hba_t *hba);
57 #endif /* TX_WATCHDOG */
58 
59 extern clock_t
60 emlxs_timeout(emlxs_hba_t *hba, uint32_t timeout)
61 {
62 	emlxs_config_t *cfg = &CFG;
63 	clock_t time;
64 
65 	/* Set thread timeout */
66 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
67 		(void) drv_getparm(LBOLT, &time);
68 		time += (timeout * drv_usectohz(1000000));
69 	} else {
70 		time = -1;
71 	}
72 
73 	return (time);
74 
75 } /* emlxs_timeout() */
76 
77 
78 static void
79 emlxs_timer(void *arg)
80 {
81 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
82 
83 	if (!hba->timer_id) {
84 		return;
85 	}
86 
87 	mutex_enter(&EMLXS_TIMER_LOCK);
88 
89 	EMLXS_SLI_POLL_ERRATT(hba);
90 
91 	/* Only one timer thread is allowed */
92 	if (hba->timer_flags & EMLXS_TIMER_BUSY) {
93 		mutex_exit(&EMLXS_TIMER_LOCK);
94 		return;
95 	}
96 
97 	/* Check if a kill request has been made */
98 	if (hba->timer_flags & EMLXS_TIMER_KILL) {
99 		hba->timer_id = 0;
100 		hba->timer_flags |= EMLXS_TIMER_ENDED;
101 
102 		mutex_exit(&EMLXS_TIMER_LOCK);
103 		return;
104 	}
105 
106 	hba->timer_flags |= (EMLXS_TIMER_BUSY | EMLXS_TIMER_STARTED);
107 	hba->timer_tics = DRV_TIME;
108 
109 	mutex_exit(&EMLXS_TIMER_LOCK);
110 
111 	/* Perform standard checks */
112 	emlxs_timer_checks(hba);
113 
114 	/* Restart the timer */
115 	mutex_enter(&EMLXS_TIMER_LOCK);
116 
117 	hba->timer_flags &= ~EMLXS_TIMER_BUSY;
118 
119 	/* If timer is still enabled, restart it */
120 	if (!(hba->timer_flags & EMLXS_TIMER_KILL)) {
121 		hba->timer_id =
122 		    timeout(emlxs_timer, (void *)hba,
123 		    (EMLXS_TIMER_PERIOD * drv_usectohz(1000000)));
124 	} else {
125 		hba->timer_id = 0;
126 		hba->timer_flags |= EMLXS_TIMER_ENDED;
127 	}
128 
129 	mutex_exit(&EMLXS_TIMER_LOCK);
130 
131 	return;
132 
133 } /* emlxs_timer() */
134 
135 
136 extern void
137 emlxs_timer_checks(emlxs_hba_t *hba)
138 {
139 	emlxs_port_t *port = &PPORT;
140 	uint8_t flag[MAX_CHANNEL];
141 	uint32_t i;
142 	uint32_t rc;
143 
144 	/* Exit if we are still initializing */
145 	if (hba->state < FC_LINK_DOWN) {
146 		return;
147 	}
148 
149 	bzero((void *)flag, sizeof (flag));
150 
151 	/* Check SLI level timeouts */
152 	EMLXS_SLI_TIMER(hba);
153 
154 	/* Check event queue */
155 	emlxs_timer_check_events(hba);
156 
157 	/* Check heartbeat timer */
158 	emlxs_timer_check_heartbeat(hba);
159 
160 #ifdef IDLE_TIMER
161 	emlxs_pm_idle_timer(hba);
162 #endif /* IDLE_TIMER */
163 
164 	/* Check for loopback timeouts */
165 	emlxs_timer_check_loopback(hba);
166 
167 	/* Check for packet timeouts */
168 	rc = emlxs_timer_check_pkts(hba, flag);
169 
170 	if (rc) {
171 		/* Link or adapter is being reset */
172 		return;
173 	}
174 
175 	/* Check for linkup timeout */
176 	emlxs_timer_check_linkup(hba);
177 
178 	/* Check the ports */
179 	for (i = 0; i < MAX_VPORTS; i++) {
180 		port = &VPORT(i);
181 
182 		if (!(port->flag & EMLXS_PORT_BOUND)) {
183 			continue;
184 		}
185 
186 		/* Check for node gate timeouts */
187 		emlxs_timer_check_nodes(port, flag);
188 
189 		/* Check for tape discovery timeout */
190 		emlxs_timer_check_discovery(port);
191 
192 		/* Check for UB timeouts */
193 		emlxs_timer_check_ub(port);
194 
195 #ifdef DHCHAP_SUPPORT
196 		/* Check for DHCHAP authentication timeouts */
197 		emlxs_timer_check_dhchap(port);
198 #endif /* DHCHAP_SUPPORT */
199 
200 	}
201 
202 	/* Check for IO channel service timeouts */
203 	/* Always do this last */
204 	emlxs_timer_check_channels(hba, flag);
205 
206 	return;
207 
208 } /* emlxs_timer_checks() */
209 
210 
211 extern void
212 emlxs_timer_start(emlxs_hba_t *hba)
213 {
214 	if (hba->timer_id) {
215 		return;
216 	}
217 
218 	/* Restart the timer */
219 	mutex_enter(&EMLXS_TIMER_LOCK);
220 	if (!hba->timer_id) {
221 		hba->timer_flags = 0;
222 		hba->timer_id =
223 		    timeout(emlxs_timer, (void *)hba, drv_usectohz(1000000));
224 	}
225 	mutex_exit(&EMLXS_TIMER_LOCK);
226 
227 } /* emlxs_timer_start() */
228 
229 
230 extern void
231 emlxs_timer_stop(emlxs_hba_t *hba)
232 {
233 	if (!hba->timer_id) {
234 		return;
235 	}
236 
237 	mutex_enter(&EMLXS_TIMER_LOCK);
238 	hba->timer_flags |= EMLXS_TIMER_KILL;
239 
240 	while (hba->timer_id) {
241 		mutex_exit(&EMLXS_TIMER_LOCK);
242 		delay(drv_usectohz(500000));
243 		mutex_enter(&EMLXS_TIMER_LOCK);
244 	}
245 	mutex_exit(&EMLXS_TIMER_LOCK);
246 
247 	return;
248 
249 } /* emlxs_timer_stop() */
250 
251 
252 static uint32_t
253 emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag)
254 {
255 	emlxs_port_t *port = &PPORT;
256 	emlxs_config_t *cfg = &CFG;
257 	Q tmo;
258 	int32_t channelno;
259 	CHANNEL *cp;
260 	NODELIST *nlp;
261 	IOCBQ *prev;
262 	IOCBQ *next;
263 	IOCB *iocb;
264 	IOCBQ *iocbq;
265 	emlxs_buf_t *sbp;
266 	fc_packet_t *pkt;
267 	Q abort;
268 	uint32_t iotag;
269 	uint32_t rc;
270 
271 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
272 		return (0);
273 	}
274 
275 	if (hba->pkt_timer > hba->timer_tics) {
276 		return (0);
277 	}
278 
279 	hba->pkt_timer = hba->timer_tics + EMLXS_PKT_PERIOD;
280 
281 
282 	bzero((void *)&tmo, sizeof (Q));
283 
284 	/*
285 	 * We must hold the locks here because we never know when an iocb
286 	 * will be removed out from under us
287 	 */
288 
289 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
290 
291 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
292 		cp = &hba->chan[channelno];
293 
294 		/* Scan the tx queues for each active node on the channel */
295 
296 		/* Get the first node */
297 		nlp = (NODELIST *)cp->nodeq.q_first;
298 
299 		while (nlp) {
300 			/* Scan the node's priority tx queue */
301 			prev = NULL;
302 			iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
303 
304 			while (iocbq) {
305 				next = (IOCBQ *)iocbq->next;
306 				iocb = &iocbq->iocb;
307 				sbp = (emlxs_buf_t *)iocbq->sbp;
308 
309 				/* Check if iocb has timed out */
310 				if (sbp && hba->timer_tics >= sbp->ticks) {
311 					/* iocb timed out, now deque it */
312 					if (next == NULL) {
313 						nlp->nlp_ptx[channelno].q_last =
314 						    (uint8_t *)prev;
315 					}
316 
317 					if (prev == NULL) {
318 						nlp->nlp_ptx[channelno].
319 						    q_first = (uint8_t *)next;
320 					} else {
321 						prev->next = next;
322 					}
323 
324 					iocbq->next = NULL;
325 					nlp->nlp_ptx[channelno].q_cnt--;
326 
327 					/* Add this iocb to our local */
328 					/* timout queue */
329 
330 					/*
331 					 * This way we don't hold the TX_CHANNEL
332 					 * lock too long
333 					 */
334 
335 					if (tmo.q_first) {
336 						((IOCBQ *)tmo.q_last)->next =
337 						    iocbq;
338 						tmo.q_last =
339 						    (uint8_t *)iocbq;
340 						tmo.q_cnt++;
341 					} else {
342 						tmo.q_first =
343 						    (uint8_t *)iocbq;
344 						tmo.q_last =
345 						    (uint8_t *)iocbq;
346 						tmo.q_cnt = 1;
347 					}
348 					iocbq->next = NULL;
349 
350 				} else {
351 					prev = iocbq;
352 				}
353 
354 				iocbq = next;
355 
356 			}	/* while (iocbq) */
357 
358 
359 			/* Scan the node's tx queue */
360 			prev = NULL;
361 			iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
362 
363 			while (iocbq) {
364 				next = (IOCBQ *)iocbq->next;
365 				iocb = &iocbq->iocb;
366 				sbp = (emlxs_buf_t *)iocbq->sbp;
367 
368 				/* Check if iocb has timed out */
369 				if (sbp && hba->timer_tics >= sbp->ticks) {
370 					/* iocb timed out, now deque it */
371 					if (next == NULL) {
372 						nlp->nlp_tx[channelno].q_last =
373 						    (uint8_t *)prev;
374 					}
375 
376 					if (prev == NULL) {
377 						nlp->nlp_tx[channelno].q_first =
378 						    (uint8_t *)next;
379 					} else {
380 						prev->next = next;
381 					}
382 
383 					iocbq->next = NULL;
384 					nlp->nlp_tx[channelno].q_cnt--;
385 
386 					/* Add this iocb to our local */
387 					/* timout queue */
388 
389 					/*
390 					 * This way we don't hold the TX_CHANNEL
391 					 * lock too long
392 					 */
393 
394 					if (tmo.q_first) {
395 						((IOCBQ *)tmo.q_last)->next =
396 						    iocbq;
397 						tmo.q_last =
398 						    (uint8_t *)iocbq;
399 						tmo.q_cnt++;
400 					} else {
401 						tmo.q_first =
402 						    (uint8_t *)iocbq;
403 						tmo.q_last =
404 						    (uint8_t *)iocbq;
405 						tmo.q_cnt = 1;
406 					}
407 					iocbq->next = NULL;
408 
409 				} else {
410 					prev = iocbq;
411 				}
412 
413 				iocbq = next;
414 
415 			}	/* while (iocbq) */
416 
417 			if (nlp == (NODELIST *)cp->nodeq.q_last) {
418 				nlp = NULL;
419 			} else {
420 				nlp = nlp->nlp_next[channelno];
421 			}
422 
423 		}	/* while (nlp) */
424 
425 	}	/* end of for */
426 
427 	/* Now cleanup the iocb's */
428 	iocbq = (IOCBQ *)tmo.q_first;
429 	while (iocbq) {
430 		/* Free the IoTag and the bmp */
431 		iocb = &iocbq->iocb;
432 		channelno = ((CHANNEL *)iocbq->channel)->channelno;
433 		sbp = iocbq->sbp;
434 		if (sbp && (sbp != STALE_PACKET)) {
435 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
436 				hba->fc_table[sbp->iotag] = NULL;
437 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
438 			} else {
439 				(void) emlxs_unregister_pkt(
440 				    (CHANNEL *)iocbq->channel,
441 				    iocb->ULPIOTAG, 0);
442 			}
443 
444 			mutex_enter(&sbp->mtx);
445 			sbp->pkt_flags |= PACKET_IN_TIMEOUT;
446 			mutex_exit(&sbp->mtx);
447 		}
448 
449 		iocbq = (IOCBQ *)iocbq->next;
450 
451 	}	/* end of while */
452 
453 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
454 
455 	/* Now complete the transmit timeouts outside the locks */
456 	iocbq = (IOCBQ *)tmo.q_first;
457 	while (iocbq) {
458 		/* Save the next iocbq for now */
459 		next = (IOCBQ *)iocbq->next;
460 
461 		/* Unlink this iocbq */
462 		iocbq->next = NULL;
463 
464 		/* Get the pkt */
465 		sbp = (emlxs_buf_t *)iocbq->sbp;
466 
467 		if (sbp) {
468 			/* Warning: Some FCT sbp's don't have */
469 			/* fc_packet objects */
470 			pkt = PRIV2PKT(sbp);
471 
472 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
473 			    "TXQ abort: sbp=%p iotag=%x tmo=%d", sbp,
474 			    sbp->iotag, (pkt) ? pkt->pkt_timeout : 0);
475 
476 			if (hba->state >= FC_LINK_UP) {
477 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
478 				    IOERR_ABORT_TIMEOUT, 1);
479 			} else {
480 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
481 				    IOERR_LINK_DOWN, 1);
482 			}
483 
484 		}
485 
486 		iocbq = next;
487 
488 	}	/* end of while */
489 
490 
491 
492 	/* Now check the chip */
493 	bzero((void *)&abort, sizeof (Q));
494 
495 	/* Check the HBA for outstanding IOs */
496 	rc = 0;
497 	mutex_enter(&EMLXS_FCTAB_LOCK);
498 	for (iotag = 1; iotag < hba->max_iotag; iotag++) {
499 		sbp = hba->fc_table[iotag];
500 		if (sbp && (sbp != STALE_PACKET) &&
501 		    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
502 		    !(sbp->pkt_flags & (PACKET_IN_FLUSH |
503 		    PACKET_XRI_CLOSED)) &&
504 		    (hba->timer_tics >= sbp->ticks)) {
505 			rc = emlxs_pkt_chip_timeout(sbp->iocbq.port,
506 			    sbp, &abort, flag);
507 
508 			if (rc) {
509 				break;
510 			}
511 		}
512 	}
513 	mutex_exit(&EMLXS_FCTAB_LOCK);
514 
515 	/* Now put the iocb's on the tx queue */
516 	iocbq = (IOCBQ *)abort.q_first;
517 	while (iocbq) {
518 		/* Save the next iocbq for now */
519 		next = (IOCBQ *)iocbq->next;
520 
521 		/* Unlink this iocbq */
522 		iocbq->next = NULL;
523 
524 		/* Send this iocbq */
525 		emlxs_tx_put(iocbq, 1);
526 
527 		iocbq = next;
528 	}
529 
530 	/* Now trigger IO channel service to send these abort iocbq */
531 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
532 		if (!flag[channelno]) {
533 			continue;
534 		}
535 		cp = &hba->chan[channelno];
536 
537 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0);
538 	}
539 
540 	if (rc == 1) {
541 		/* Spawn a thread to reset the link */
542 		emlxs_thread_spawn(hba, emlxs_reset_link_thread, NULL, NULL);
543 	} else if (rc == 2) {
544 		/* Spawn a thread to reset the adapter */
545 		emlxs_thread_spawn(hba, emlxs_restart_thread, NULL, NULL);
546 	}
547 
548 	return (rc);
549 
550 } /* emlxs_timer_check_pkts() */
551 
552 
553 static void
554 emlxs_timer_check_channels(emlxs_hba_t *hba, uint8_t *flag)
555 {
556 	emlxs_port_t *port = &PPORT;
557 	emlxs_config_t *cfg = &CFG;
558 	int32_t channelno;
559 	CHANNEL *cp;
560 	uint32_t logit;
561 
562 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
563 		return;
564 	}
565 
566 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
567 		cp = &hba->chan[channelno];
568 
569 		logit = 0;
570 
571 		/* Check for channel timeout now */
572 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
573 		if (cp->timeout && (hba->timer_tics >= cp->timeout)) {
574 			/* Check if there is work to do on channel and */
575 			/* the link is still up */
576 			if (cp->nodeq.q_first) {
577 				flag[channelno] = 1;
578 				cp->timeout = hba->timer_tics + 10;
579 
580 				if (hba->state >= FC_LINK_UP) {
581 					logit = 1;
582 				}
583 			} else {
584 				cp->timeout = 0;
585 			}
586 		}
587 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
588 
589 		if (logit) {
590 			EMLXS_MSGF(EMLXS_CONTEXT,
591 			    &emlxs_chan_watchdog_msg,
592 			    "IO Channel %d cnt=%d,%d",
593 			    channelno,
594 			    hba->channel_tx_count,
595 			    hba->io_count);
596 		}
597 
598 		/*
599 		 * If IO channel flag is set, request iocb servicing
600 		 * here to send any iocb's that may still be queued
601 		 */
602 		if (flag[channelno]) {
603 			EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0);
604 		}
605 	}
606 
607 	return;
608 
609 } /* emlxs_timer_check_channels() */
610 
611 
612 static void
613 emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag)
614 {
615 	emlxs_hba_t *hba = HBA;
616 	uint32_t found;
617 	uint32_t i;
618 	NODELIST *nlp;
619 	int32_t channelno;
620 
621 	for (;;) {
622 		/* Check node gate flag for expiration */
623 		found = 0;
624 
625 		/*
626 		 * We need to lock, scan, and unlock because we can't hold the
627 		 * lock while we call node_open
628 		 */
629 		rw_enter(&port->node_rwlock, RW_READER);
630 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
631 			nlp = port->node_table[i];
632 			while (nlp != NULL) {
633 				for (channelno = 0;
634 				    channelno < hba->chan_count;
635 				    channelno++) {
636 					/* Check if the node timer is active */
637 					/* and if timer has expired */
638 					if (nlp->nlp_tics[channelno] &&
639 					    (hba->timer_tics >=
640 					    nlp->nlp_tics[channelno])) {
641 						/* If so, set the flag and */
642 						/* break out */
643 						found = 1;
644 						flag[channelno] = 1;
645 						break;
646 					}
647 				}
648 
649 				if (nlp->nlp_force_rscn &&
650 				    (hba->timer_tics >= nlp->nlp_force_rscn)) {
651 					nlp->nlp_force_rscn = 0;
652 					/*
653 					 * Generate an RSCN to
654 					 * wakeup ULP
655 					 */
656 					(void) emlxs_generate_rscn(port,
657 					    nlp->nlp_DID);
658 				}
659 
660 				if (found) {
661 					break;
662 				}
663 
664 				nlp = nlp->nlp_list_next;
665 			}
666 
667 			if (found) {
668 				break;
669 			}
670 
671 		}
672 		rw_exit(&port->node_rwlock);
673 
674 		if (!found) {
675 			break;
676 		}
677 
678 		emlxs_node_timeout(port, nlp, channelno);
679 	}
680 
681 } /* emlxs_timer_check_nodes() */
682 
683 
684 static void
685 emlxs_timer_check_loopback(emlxs_hba_t *hba)
686 {
687 	emlxs_port_t *port = &PPORT;
688 	emlxs_config_t *cfg = &CFG;
689 	int32_t reset = 0;
690 
691 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
692 		return;
693 	}
694 
695 	/* Check the loopback timer for expiration */
696 	mutex_enter(&EMLXS_PORT_LOCK);
697 
698 	if (!hba->loopback_tics || (hba->timer_tics < hba->loopback_tics)) {
699 		mutex_exit(&EMLXS_PORT_LOCK);
700 		return;
701 	}
702 
703 	hba->loopback_tics = 0;
704 
705 	if (hba->flag & FC_LOOPBACK_MODE) {
706 		reset = 1;
707 	}
708 
709 	mutex_exit(&EMLXS_PORT_LOCK);
710 
711 	if (reset) {
712 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_dfc_debug_msg,
713 		    "LOOPBACK_MODE: Expired. Resetting...");
714 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
715 	}
716 
717 	return;
718 
719 } /* emlxs_timer_check_loopback() */
720 
721 
722 static void
723 emlxs_timer_check_linkup(emlxs_hba_t *hba)
724 {
725 	emlxs_port_t *port = &PPORT;
726 	uint32_t linkup;
727 
728 	/* Check if all mbox commands from previous activity are processed */
729 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
730 		mutex_enter(&EMLXS_MBOX_LOCK);
731 		if (hba->mbox_queue.q_first) {
732 			mutex_exit(&EMLXS_MBOX_LOCK);
733 			return;
734 		}
735 		mutex_exit(&EMLXS_MBOX_LOCK);
736 	}
737 
738 	/* Check the linkup timer for expiration */
739 	mutex_enter(&EMLXS_PORT_LOCK);
740 	linkup = 0;
741 	if (hba->linkup_timer && (hba->timer_tics >= hba->linkup_timer)) {
742 		hba->linkup_timer = 0;
743 
744 		/* Make sure link is still ready */
745 		if (hba->state >= FC_LINK_UP) {
746 			linkup = 1;
747 		}
748 	}
749 	mutex_exit(&EMLXS_PORT_LOCK);
750 
751 	/* Make the linkup callback */
752 	if (linkup) {
753 		emlxs_port_online(port);
754 	}
755 	return;
756 
757 } /* emlxs_timer_check_linkup() */
758 
759 
760 static void
761 emlxs_timer_check_heartbeat(emlxs_hba_t *hba)
762 {
763 	emlxs_port_t *port = &PPORT;
764 	MAILBOXQ *mbq;
765 	emlxs_config_t *cfg = &CFG;
766 	int rc;
767 
768 	if (!cfg[CFG_HEARTBEAT_ENABLE].current) {
769 		return;
770 	}
771 
772 	if (hba->timer_tics < hba->heartbeat_timer) {
773 		return;
774 	}
775 
776 	hba->heartbeat_timer = hba->timer_tics + 5;
777 
778 	/* Return if adapter interrupts have occurred */
779 	if (hba->heartbeat_flag) {
780 		hba->heartbeat_flag = 0;
781 		return;
782 	}
783 	/* No adapter interrupts have occured for 5 seconds now */
784 
785 	/* Return if mailbox is busy */
786 	/* This means the mailbox timer routine is watching for problems */
787 	if (hba->mbox_timer) {
788 		return;
789 	}
790 
791 	/* Return if heartbeat is still outstanding */
792 	if (hba->heartbeat_active) {
793 		return;
794 	}
795 
796 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
797 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
798 		    "Unable to allocate heartbeat mailbox.");
799 		return;
800 	}
801 
802 	emlxs_mb_heartbeat(hba, mbq);
803 	hba->heartbeat_active = 1;
804 
805 	rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
806 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
807 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
808 	}
809 
810 	return;
811 
812 } /* emlxs_timer_check_heartbeat() */
813 
814 
815 static void
816 emlxs_timer_check_discovery(emlxs_port_t *port)
817 {
818 	emlxs_hba_t *hba = HBA;
819 	emlxs_config_t *cfg = &CFG;
820 	int32_t send_clear_la;
821 	uint32_t found;
822 	uint32_t i;
823 	NODELIST *nlp;
824 	MAILBOXQ *mbox;
825 	int rc;
826 
827 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
828 		return;
829 	}
830 
831 	/* Check the discovery timer for expiration */
832 	send_clear_la = 0;
833 	mutex_enter(&EMLXS_PORT_LOCK);
834 	while (hba->discovery_timer &&
835 	    (hba->timer_tics >= hba->discovery_timer) &&
836 	    (hba->state == FC_LINK_UP)) {
837 		send_clear_la = 1;
838 
839 		/* Perform a flush on fcp2 nodes that are still closed */
840 		found = 0;
841 		rw_enter(&port->node_rwlock, RW_READER);
842 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
843 			nlp = port->node_table[i];
844 			while (nlp != NULL) {
845 				if ((nlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
846 				    (nlp->nlp_flag[hba->channel_fcp] &
847 				    NLP_CLOSED)) {
848 					found = 1;
849 					break;
850 
851 				}
852 				nlp = nlp->nlp_list_next;
853 			}
854 
855 			if (found) {
856 				break;
857 			}
858 		}
859 		rw_exit(&port->node_rwlock);
860 
861 		if (!found) {
862 			break;
863 		}
864 
865 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_missing_msg,
866 		    "FCP2 device (did=%06x) missing. Flushing...",
867 		    nlp->nlp_DID);
868 
869 		mutex_exit(&EMLXS_PORT_LOCK);
870 
871 		(void) emlxs_mb_unreg_did(port, nlp->nlp_DID, NULL, NULL, NULL);
872 
873 		mutex_enter(&EMLXS_PORT_LOCK);
874 
875 	}
876 	mutex_exit(&EMLXS_PORT_LOCK);
877 
878 	/* Try to send clear link attention, if needed */
879 	if ((hba->sli_mode < EMLXS_HBA_SLI4_MODE) && (send_clear_la == 1) &&
880 	    (mbox = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
881 		mutex_enter(&EMLXS_PORT_LOCK);
882 
883 		/*
884 		 * If state is not FC_LINK_UP, then either the link has gone
885 		 * down or a FC_CLEAR_LA has already been issued
886 		 */
887 		if (hba->state != FC_LINK_UP) {
888 			mutex_exit(&EMLXS_PORT_LOCK);
889 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbox);
890 		} else {
891 			/* Change state and clear discovery timer */
892 			EMLXS_STATE_CHANGE_LOCKED(hba, FC_CLEAR_LA);
893 
894 			hba->discovery_timer = 0;
895 
896 			mutex_exit(&EMLXS_PORT_LOCK);
897 
898 			/* Prepare and send the CLEAR_LA command */
899 			emlxs_mb_clear_la(hba, mbox);
900 
901 			rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbox, MBX_NOWAIT, 0);
902 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
903 				(void) emlxs_mem_put(hba, MEM_MBOX,
904 				    (uint8_t *)mbox);
905 			}
906 		}
907 	}
908 
909 	return;
910 
911 } /* emlxs_timer_check_discovery()  */
912 
913 
914 static void
915 emlxs_timer_check_ub(emlxs_port_t *port)
916 {
917 	emlxs_hba_t *hba = HBA;
918 	emlxs_unsol_buf_t *ulistp;
919 	fc_unsol_buf_t *ubp;
920 	emlxs_ub_priv_t *ub_priv;
921 	uint32_t i;
922 
923 	if (port->ub_timer > hba->timer_tics) {
924 		return;
925 	}
926 
927 	port->ub_timer = hba->timer_tics + EMLXS_UB_PERIOD;
928 
929 	/* Check the unsolicited buffers */
930 	mutex_enter(&EMLXS_UB_LOCK);
931 
932 	ulistp = port->ub_pool;
933 	while (ulistp) {
934 		/* Check buffers in this pool */
935 		for (i = 0; i < ulistp->pool_nentries; i++) {
936 			ubp = (fc_unsol_buf_t *)&ulistp->fc_ubufs[i];
937 			ub_priv = ubp->ub_fca_private;
938 
939 			if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
940 				continue;
941 			}
942 
943 			/* If buffer has timed out, print message and */
944 			/* increase timeout */
945 			if ((ub_priv->time + ub_priv->timeout) <=
946 			    hba->timer_tics) {
947 				ub_priv->flags |= EMLXS_UB_TIMEOUT;
948 
949 				EMLXS_MSGF(EMLXS_CONTEXT,
950 				    &emlxs_sfs_debug_msg,
951 				    "Stale UB buffer detected (%d mins): "
952 				    "buffer=%p (%x,%x,%x,%x)",
953 				    (ub_priv->timeout / 60), ubp,
954 				    ubp->ub_frame.type, ubp->ub_frame.s_id,
955 				    ubp->ub_frame.ox_id, ubp->ub_frame.rx_id);
956 
957 				/* Increase timeout period */
958 
959 				/* If timeout was 5 mins or less, */
960 				/* increase it to 10 mins */
961 				if (ub_priv->timeout <= (5 * 60)) {
962 					ub_priv->timeout = (10 * 60);
963 				}
964 				/* If timeout was 10 mins or less, */
965 				/* increase it to 30 mins */
966 				else if (ub_priv->timeout <= (10 * 60)) {
967 					ub_priv->timeout = (30 * 60);
968 				}
969 				/* Otherwise double it. */
970 				else {
971 					ub_priv->timeout *= 2;
972 				}
973 			}
974 		}
975 
976 		ulistp = ulistp->pool_next;
977 	}
978 
979 	mutex_exit(&EMLXS_UB_LOCK);
980 
981 	return;
982 
983 } /* emlxs_timer_check_ub()  */
984 
985 
986 /* EMLXS_FCTAB_LOCK must be held to call this */
987 static uint32_t
988 emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abortq,
989     uint8_t *flag)
990 {
991 	emlxs_hba_t *hba = HBA;
992 	CHANNEL *cp = (CHANNEL *)sbp->channel;
993 	IOCBQ *iocbq = NULL;
994 	fc_packet_t *pkt;
995 	uint32_t rc = 0;
996 
997 	mutex_enter(&sbp->mtx);
998 
999 	/* Warning: Some FCT sbp's don't have fc_packet objects */
1000 	pkt = PRIV2PKT(sbp);
1001 
1002 	switch (sbp->abort_attempts) {
1003 	case 0:
1004 
1005 		/* Create the abort IOCB */
1006 		if (hba->state >= FC_LINK_UP) {
1007 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1008 			    "chipQ:1:Aborting. sbp=%p iotag=%x tmo=%d flags=%x",
1009 			    sbp, sbp->iotag,
1010 			    (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
1011 
1012 			iocbq =
1013 			    emlxs_create_abort_xri_cn(port, sbp->node,
1014 			    sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
1015 
1016 			/* The adapter will make 2 attempts to send ABTS */
1017 			/* with 2*ratov timeout each time */
1018 			sbp->ticks =
1019 			    hba->timer_tics + (4 * hba->fc_ratov) + 10;
1020 		} else {
1021 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1022 			    "chipQ:1:Closing. sbp=%p iotag=%x tmo=%d flags=%x",
1023 			    sbp, sbp->iotag,
1024 			    (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
1025 
1026 			iocbq =
1027 			    emlxs_create_close_xri_cn(port, sbp->node,
1028 			    sbp->iotag, cp);
1029 
1030 			sbp->ticks = hba->timer_tics + 30;
1031 		}
1032 
1033 		/* set the flags */
1034 		sbp->pkt_flags |= (PACKET_IN_TIMEOUT | PACKET_XRI_CLOSED);
1035 
1036 		flag[cp->channelno] = 1;
1037 		rc = 0;
1038 
1039 		break;
1040 
1041 	case 1:
1042 
1043 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1044 		    "chipQ: 2:Closing. sbp=%p iotag=%x", sbp, sbp->iotag);
1045 
1046 		iocbq =
1047 		    emlxs_create_close_xri_cn(port, sbp->node, sbp->iotag,
1048 		    cp);
1049 
1050 		sbp->ticks = hba->timer_tics + 30;
1051 
1052 		flag[cp->channelno] = 1;
1053 		rc = 0;
1054 
1055 		break;
1056 
1057 	case 2:
1058 
1059 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1060 		    "chipQ: 3:Resetting link. sbp=%p iotag=%x", sbp,
1061 		    sbp->iotag);
1062 
1063 		sbp->ticks = hba->timer_tics + 60;
1064 		rc = 1;
1065 
1066 		break;
1067 
1068 	default:
1069 
1070 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1071 		    "chipQ: %d:Resetting adapter. sbp=%p iotag=%x",
1072 		    sbp->abort_attempts, sbp, sbp->iotag);
1073 
1074 		sbp->ticks = hba->timer_tics + 60;
1075 		rc = 2;
1076 
1077 		break;
1078 	}
1079 
1080 	sbp->abort_attempts++;
1081 	mutex_exit(&sbp->mtx);
1082 
1083 	if (iocbq) {
1084 		if (abortq->q_first) {
1085 			((IOCBQ *)abortq->q_last)->next = iocbq;
1086 			abortq->q_last = (uint8_t *)iocbq;
1087 			abortq->q_cnt++;
1088 		} else {
1089 			abortq->q_first = (uint8_t *)iocbq;
1090 			abortq->q_last = (uint8_t *)iocbq;
1091 			abortq->q_cnt = 1;
1092 		}
1093 		iocbq->next = NULL;
1094 	}
1095 
1096 	return (rc);
1097 
1098 } /* emlxs_pkt_chip_timeout() */
1099 
1100 
1101 #ifdef TX_WATCHDOG
1102 
1103 static void
1104 emlxs_tx_watchdog(emlxs_hba_t *hba)
1105 {
1106 	emlxs_port_t *port = &PPORT;
1107 	NODELIST *nlp;
1108 	uint32_t channelno;
1109 	CHANNEL *cp;
1110 	IOCBQ *next;
1111 	IOCBQ *iocbq;
1112 	IOCB *iocb;
1113 	uint32_t found;
1114 	MATCHMAP *bmp;
1115 	Q abort;
1116 	uint32_t iotag;
1117 	emlxs_buf_t *sbp;
1118 	fc_packet_t *pkt = NULL;
1119 	uint32_t cmd;
1120 	uint32_t did;
1121 
1122 	bzero((void *)&abort, sizeof (Q));
1123 
1124 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
1125 
1126 	mutex_enter(&EMLXS_FCTAB_LOCK);
1127 	for (iotag = 1; iotag < hba->max_iotag; iotag++) {
1128 		sbp = hba->fc_table[iotag];
1129 		if (sbp && (sbp != STALE_PACKET) &&
1130 		    (sbp->pkt_flags & PACKET_IN_TXQ)) {
1131 			nlp = sbp->node;
1132 			iocbq = &sbp->iocbq;
1133 
1134 			channelno = (CHANNEL *)(sbp->channel)->channelno;
1135 			if (iocbq->flag & IOCB_PRIORITY) {
1136 				iocbq =
1137 				    (IOCBQ *)nlp->nlp_ptx[channelno].
1138 				    q_first;
1139 			} else {
1140 				iocbq =
1141 				    (IOCBQ *)nlp->nlp_tx[channelno].
1142 				    q_first;
1143 			}
1144 
1145 			/* Find a matching entry */
1146 			found = 0;
1147 			while (iocbq) {
1148 				if (iocbq == &sbp->iocbq) {
1149 					found = 1;
1150 					break;
1151 				}
1152 
1153 				iocbq = (IOCBQ *)iocbq->next;
1154 			}
1155 
1156 			if (!found) {
1157 				if (!(sbp->pkt_flags & PACKET_STALE)) {
1158 					mutex_enter(&sbp->mtx);
1159 					sbp->pkt_flags |=
1160 					    PACKET_STALE;
1161 					mutex_exit(&sbp->mtx);
1162 				} else {
1163 					if (abort.q_first == 0) {
1164 						abort.q_first =
1165 						    &sbp->iocbq;
1166 					} else {
1167 						((IOCBQ *)abort.
1168 						    q_last)->next =
1169 						    &sbp->iocbq;
1170 					}
1171 
1172 					abort.q_last = &sbp->iocbq;
1173 					abort.q_cnt++;
1174 				}
1175 
1176 			} else {
1177 				if ((sbp->pkt_flags & PACKET_STALE)) {
1178 					mutex_enter(&sbp->mtx);
1179 					sbp->pkt_flags &=
1180 					    ~PACKET_STALE;
1181 					mutex_exit(&sbp->mtx);
1182 				}
1183 			}
1184 		}
1185 	}
1186 	mutex_exit(&EMLXS_FCTAB_LOCK);
1187 
1188 	iocbq = (IOCBQ *)abort.q_first;
1189 	while (iocbq) {
1190 		next = (IOCBQ *)iocbq->next;
1191 		iocbq->next = NULL;
1192 		sbp = (emlxs_buf_t *)iocbq->sbp;
1193 
1194 		pkt = PRIV2PKT(sbp);
1195 		if (pkt) {
1196 			did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
1197 			cmd = *((uint32_t *)pkt->pkt_cmd);
1198 			cmd = LE_SWAP32(cmd);
1199 		}
1200 
1201 
1202 		emlxs_tx_put(iocbq, 0);
1203 
1204 		iocbq = next;
1205 
1206 	}	/* end of while */
1207 
1208 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
1209 
1210 	return;
1211 
1212 } /* emlxs_tx_watchdog() */
1213 
1214 #endif /* TX_WATCHDOG */
1215 
1216 
1217 #ifdef DHCHAP_SUPPORT
1218 
1219 static void
1220 emlxs_timer_check_dhchap(emlxs_port_t *port)
1221 {
1222 	emlxs_hba_t *hba = HBA;
1223 	uint32_t i;
1224 	NODELIST *ndlp = NULL;
1225 
1226 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1227 		ndlp = port->node_table[i];
1228 
1229 		if (!ndlp) {
1230 			continue;
1231 		}
1232 
1233 		/* Check authentication response timeout */
1234 		if (ndlp->node_dhc.nlp_authrsp_tmo &&
1235 		    (hba->timer_tics >= ndlp->node_dhc.nlp_authrsp_tmo)) {
1236 			/* Trigger authresp timeout handler */
1237 			(void) emlxs_dhc_authrsp_timeout(port, ndlp, NULL);
1238 		}
1239 
1240 		/* Check reauthentication timeout */
1241 		if (ndlp->node_dhc.nlp_reauth_tmo &&
1242 		    (hba->timer_tics >= ndlp->node_dhc.nlp_reauth_tmo)) {
1243 			/* Trigger reauth timeout handler */
1244 			emlxs_dhc_reauth_timeout(port, NULL, ndlp);
1245 		}
1246 	}
1247 	return;
1248 
1249 } /* emlxs_timer_check_dhchap */
1250 
1251 #endif /* DHCHAP_SUPPORT */
1252