1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Timer period in seconds */
31 #define	EMLXS_TIMER_PERIOD		1	/* secs */
32 #define	EMLXS_PKT_PERIOD		5	/* secs */
33 #define	EMLXS_UB_PERIOD			60	/* secs */
34 
35 EMLXS_MSG_DEF(EMLXS_CLOCK_C);
36 
37 
38 static void emlxs_timer_check_loopback(emlxs_hba_t *hba);
39 
40 #ifdef DHCHAP_SUPPORT
41 static void emlxs_timer_check_dhchap(emlxs_port_t *port);
42 #endif /* DHCHAP_SUPPORT */
43 
44 static void	emlxs_timer(void *arg);
45 static void	emlxs_timer_check_heartbeat(emlxs_hba_t *hba);
46 static uint32_t	emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag);
47 static void	emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag);
48 static void	emlxs_timer_check_linkup(emlxs_hba_t *hba);
49 static void	emlxs_timer_check_discovery(emlxs_port_t *port);
50 static void	emlxs_timer_check_ub(emlxs_port_t *port);
51 static void	emlxs_timer_check_channels(emlxs_hba_t *hba, uint8_t *flag);
52 static uint32_t	emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp,
53 			Q *abortq, uint8_t *flag);
54 
55 #ifdef TX_WATCHDOG
56 static void	emlxs_tx_watchdog(emlxs_hba_t *hba);
57 #endif /* TX_WATCHDOG */
58 
59 extern clock_t
60 emlxs_timeout(emlxs_hba_t *hba, uint32_t timeout)
61 {
62 	emlxs_config_t *cfg = &CFG;
63 	clock_t time;
64 
65 	/* Set thread timeout */
66 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
67 		(void) drv_getparm(LBOLT, &time);
68 		time += (timeout * drv_usectohz(1000000));
69 	} else {
70 		time = -1;
71 	}
72 
73 	return (time);
74 
75 } /* emlxs_timeout() */
76 
77 
78 static void
79 emlxs_timer(void *arg)
80 {
81 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
82 
83 	if (!hba->timer_id) {
84 		return;
85 	}
86 
87 	mutex_enter(&EMLXS_TIMER_LOCK);
88 
89 	EMLXS_SLI_POLL_ERRATT(hba);
90 
91 	/* Only one timer thread is allowed */
92 	if (hba->timer_flags & EMLXS_TIMER_BUSY) {
93 		mutex_exit(&EMLXS_TIMER_LOCK);
94 		return;
95 	}
96 
97 	/* Check if a kill request has been made */
98 	if (hba->timer_flags & EMLXS_TIMER_KILL) {
99 		hba->timer_id = 0;
100 		hba->timer_flags |= EMLXS_TIMER_ENDED;
101 
102 		mutex_exit(&EMLXS_TIMER_LOCK);
103 		return;
104 	}
105 
106 	hba->timer_flags |= (EMLXS_TIMER_BUSY | EMLXS_TIMER_STARTED);
107 	hba->timer_tics = DRV_TIME;
108 
109 	mutex_exit(&EMLXS_TIMER_LOCK);
110 
111 	/* Perform standard checks */
112 	emlxs_timer_checks(hba);
113 
114 	/* Restart the timer */
115 	mutex_enter(&EMLXS_TIMER_LOCK);
116 
117 	hba->timer_flags &= ~EMLXS_TIMER_BUSY;
118 
119 	/* If timer is still enabled, restart it */
120 	if (!(hba->timer_flags & EMLXS_TIMER_KILL)) {
121 		hba->timer_id =
122 		    timeout(emlxs_timer, (void *)hba,
123 		    (EMLXS_TIMER_PERIOD * drv_usectohz(1000000)));
124 	} else {
125 		hba->timer_id = 0;
126 		hba->timer_flags |= EMLXS_TIMER_ENDED;
127 	}
128 
129 	mutex_exit(&EMLXS_TIMER_LOCK);
130 
131 	return;
132 
133 } /* emlxs_timer() */
134 
135 
136 extern void
137 emlxs_timer_checks(emlxs_hba_t *hba)
138 {
139 	emlxs_port_t *port = &PPORT;
140 	uint8_t flag[MAX_CHANNEL];
141 	uint32_t i;
142 	uint32_t rc;
143 
144 	/* Exit if we are still initializing */
145 	if (hba->state < FC_LINK_DOWN) {
146 		return;
147 	}
148 
149 	bzero((void *)flag, sizeof (flag));
150 
151 	/* Check SLI level timeouts */
152 	EMLXS_SLI_TIMER(hba);
153 
154 	/* Check event queue */
155 	emlxs_timer_check_events(hba);
156 
157 	/* Check heartbeat timer */
158 	emlxs_timer_check_heartbeat(hba);
159 
160 #ifdef IDLE_TIMER
161 	emlxs_pm_idle_timer(hba);
162 #endif /* IDLE_TIMER */
163 
164 	/* Check for loopback timeouts */
165 	emlxs_timer_check_loopback(hba);
166 
167 	/* Check for packet timeouts */
168 	rc = emlxs_timer_check_pkts(hba, flag);
169 
170 	if (rc) {
171 		/* Link or adapter is being reset */
172 		return;
173 	}
174 
175 	/* Check for linkup timeout */
176 	emlxs_timer_check_linkup(hba);
177 
178 	/* Check the ports */
179 	for (i = 0; i < MAX_VPORTS; i++) {
180 		port = &VPORT(i);
181 
182 		if (!(port->flag & EMLXS_PORT_BOUND)) {
183 			continue;
184 		}
185 
186 		/* Check for node gate timeouts */
187 		emlxs_timer_check_nodes(port, flag);
188 
189 		/* Check for tape discovery timeout */
190 		emlxs_timer_check_discovery(port);
191 
192 		/* Check for UB timeouts */
193 		emlxs_timer_check_ub(port);
194 
195 #ifdef DHCHAP_SUPPORT
196 		/* Check for DHCHAP authentication timeouts */
197 		emlxs_timer_check_dhchap(port);
198 #endif /* DHCHAP_SUPPORT */
199 
200 	}
201 
202 	/* Check for IO channel service timeouts */
203 	/* Always do this last */
204 	emlxs_timer_check_channels(hba, flag);
205 
206 	return;
207 
208 } /* emlxs_timer_checks() */
209 
210 
211 extern void
212 emlxs_timer_start(emlxs_hba_t *hba)
213 {
214 	if (hba->timer_id) {
215 		return;
216 	}
217 
218 	/* Restart the timer */
219 	mutex_enter(&EMLXS_TIMER_LOCK);
220 	if (!hba->timer_id) {
221 		hba->timer_flags = 0;
222 		hba->timer_id =
223 		    timeout(emlxs_timer, (void *)hba, drv_usectohz(1000000));
224 	}
225 	mutex_exit(&EMLXS_TIMER_LOCK);
226 
227 } /* emlxs_timer_start() */
228 
229 
230 extern void
231 emlxs_timer_stop(emlxs_hba_t *hba)
232 {
233 	if (!hba->timer_id) {
234 		return;
235 	}
236 
237 	mutex_enter(&EMLXS_TIMER_LOCK);
238 	hba->timer_flags |= EMLXS_TIMER_KILL;
239 
240 	while (hba->timer_id) {
241 		mutex_exit(&EMLXS_TIMER_LOCK);
242 		delay(drv_usectohz(500000));
243 		mutex_enter(&EMLXS_TIMER_LOCK);
244 	}
245 	mutex_exit(&EMLXS_TIMER_LOCK);
246 
247 	return;
248 
249 } /* emlxs_timer_stop() */
250 
251 
252 static uint32_t
253 emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag)
254 {
255 	emlxs_port_t *port = &PPORT;
256 	emlxs_config_t *cfg = &CFG;
257 	Q tmo;
258 	int32_t channelno;
259 	CHANNEL *cp;
260 	NODELIST *nlp;
261 	IOCBQ *prev;
262 	IOCBQ *next;
263 	IOCB *iocb;
264 	IOCBQ *iocbq;
265 	emlxs_buf_t *sbp;
266 	fc_packet_t *pkt;
267 	Q abort;
268 	uint32_t iotag;
269 	uint32_t rc;
270 
271 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
272 		return (0);
273 	}
274 
275 	if (hba->pkt_timer > hba->timer_tics) {
276 		return (0);
277 	}
278 
279 	hba->pkt_timer = hba->timer_tics + EMLXS_PKT_PERIOD;
280 
281 
282 	bzero((void *)&tmo, sizeof (Q));
283 
284 	/*
285 	 * We must hold the locks here because we never know when an iocb
286 	 * will be removed out from under us
287 	 */
288 
289 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
290 
291 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
292 		cp = &hba->chan[channelno];
293 
294 		/* Scan the tx queues for each active node on the channel */
295 
296 		/* Get the first node */
297 		nlp = (NODELIST *)cp->nodeq.q_first;
298 
299 		while (nlp) {
300 			/* Scan the node's priority tx queue */
301 			prev = NULL;
302 			iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
303 
304 			while (iocbq) {
305 				next = (IOCBQ *)iocbq->next;
306 				iocb = &iocbq->iocb;
307 				sbp = (emlxs_buf_t *)iocbq->sbp;
308 
309 				/* Check if iocb has timed out */
310 				if (sbp && hba->timer_tics >= sbp->ticks) {
311 					/* iocb timed out, now deque it */
312 					if (next == NULL) {
313 						nlp->nlp_ptx[channelno].q_last =
314 						    (uint8_t *)prev;
315 					}
316 
317 					if (prev == NULL) {
318 						nlp->nlp_ptx[channelno].
319 						    q_first = (uint8_t *)next;
320 					} else {
321 						prev->next = next;
322 					}
323 
324 					iocbq->next = NULL;
325 					nlp->nlp_ptx[channelno].q_cnt--;
326 
327 					/* Add this iocb to our local */
328 					/* timout queue */
329 
330 					/*
331 					 * This way we don't hold the TX_CHANNEL
332 					 * lock too long
333 					 */
334 
335 					if (tmo.q_first) {
336 						((IOCBQ *)tmo.q_last)->next =
337 						    iocbq;
338 						tmo.q_last =
339 						    (uint8_t *)iocbq;
340 						tmo.q_cnt++;
341 					} else {
342 						tmo.q_first =
343 						    (uint8_t *)iocbq;
344 						tmo.q_last =
345 						    (uint8_t *)iocbq;
346 						tmo.q_cnt = 1;
347 					}
348 					iocbq->next = NULL;
349 
350 				} else {
351 					prev = iocbq;
352 				}
353 
354 				iocbq = next;
355 
356 			}	/* while (iocbq) */
357 
358 
359 			/* Scan the node's tx queue */
360 			prev = NULL;
361 			iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
362 
363 			while (iocbq) {
364 				next = (IOCBQ *)iocbq->next;
365 				iocb = &iocbq->iocb;
366 				sbp = (emlxs_buf_t *)iocbq->sbp;
367 
368 				/* Check if iocb has timed out */
369 				if (sbp && hba->timer_tics >= sbp->ticks) {
370 					/* iocb timed out, now deque it */
371 					if (next == NULL) {
372 						nlp->nlp_tx[channelno].q_last =
373 						    (uint8_t *)prev;
374 					}
375 
376 					if (prev == NULL) {
377 						nlp->nlp_tx[channelno].q_first =
378 						    (uint8_t *)next;
379 					} else {
380 						prev->next = next;
381 					}
382 
383 					iocbq->next = NULL;
384 					nlp->nlp_tx[channelno].q_cnt--;
385 
386 					/* Add this iocb to our local */
387 					/* timout queue */
388 
389 					/*
390 					 * This way we don't hold the TX_CHANNEL
391 					 * lock too long
392 					 */
393 
394 					if (tmo.q_first) {
395 						((IOCBQ *)tmo.q_last)->next =
396 						    iocbq;
397 						tmo.q_last =
398 						    (uint8_t *)iocbq;
399 						tmo.q_cnt++;
400 					} else {
401 						tmo.q_first =
402 						    (uint8_t *)iocbq;
403 						tmo.q_last =
404 						    (uint8_t *)iocbq;
405 						tmo.q_cnt = 1;
406 					}
407 					iocbq->next = NULL;
408 
409 				} else {
410 					prev = iocbq;
411 				}
412 
413 				iocbq = next;
414 
415 			}	/* while (iocbq) */
416 
417 			if (nlp == (NODELIST *)cp->nodeq.q_last) {
418 				nlp = NULL;
419 			} else {
420 				nlp = nlp->nlp_next[channelno];
421 			}
422 
423 		}	/* while (nlp) */
424 
425 	}	/* end of for */
426 
427 	/* Now cleanup the iocb's */
428 	iocbq = (IOCBQ *)tmo.q_first;
429 	while (iocbq) {
430 		/* Free the IoTag and the bmp */
431 		iocb = &iocbq->iocb;
432 		channelno = ((CHANNEL *)iocbq->channel)->channelno;
433 		sbp = iocbq->sbp;
434 		if (sbp && (sbp != STALE_PACKET)) {
435 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
436 				hba->fc_table[sbp->iotag] = NULL;
437 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
438 			} else {
439 				(void) emlxs_unregister_pkt(
440 				    (CHANNEL *)iocbq->channel,
441 				    iocb->ULPIOTAG, 0);
442 			}
443 
444 			mutex_enter(&sbp->mtx);
445 			sbp->pkt_flags |= PACKET_IN_TIMEOUT;
446 			mutex_exit(&sbp->mtx);
447 		}
448 
449 		iocbq = (IOCBQ *)iocbq->next;
450 
451 	}	/* end of while */
452 
453 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
454 
455 	/* Now complete the transmit timeouts outside the locks */
456 	iocbq = (IOCBQ *)tmo.q_first;
457 	while (iocbq) {
458 		/* Save the next iocbq for now */
459 		next = (IOCBQ *)iocbq->next;
460 
461 		/* Unlink this iocbq */
462 		iocbq->next = NULL;
463 
464 		/* Get the pkt */
465 		sbp = (emlxs_buf_t *)iocbq->sbp;
466 
467 		if (sbp) {
468 			/* Warning: Some FCT sbp's don't have */
469 			/* fc_packet objects */
470 			pkt = PRIV2PKT(sbp);
471 
472 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
473 			    "TXQ abort: sbp=%p iotag=%x tmo=%d", sbp,
474 			    sbp->iotag, (pkt) ? pkt->pkt_timeout : 0);
475 
476 			if (hba->state >= FC_LINK_UP) {
477 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
478 				    IOERR_ABORT_TIMEOUT, 1);
479 			} else {
480 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
481 				    IOERR_LINK_DOWN, 1);
482 			}
483 
484 		}
485 
486 		iocbq = next;
487 
488 	}	/* end of while */
489 
490 
491 
492 	/* Now check the chip */
493 	bzero((void *)&abort, sizeof (Q));
494 
495 	/* Check the HBA for outstanding IOs */
496 	rc = 0;
497 	mutex_enter(&EMLXS_FCTAB_LOCK);
498 	for (iotag = 1; iotag < hba->max_iotag; iotag++) {
499 		sbp = hba->fc_table[iotag];
500 		if (sbp && (sbp != STALE_PACKET) &&
501 		    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
502 		    (hba->timer_tics >= sbp->ticks)) {
503 			rc = emlxs_pkt_chip_timeout(sbp->iocbq.port,
504 			    sbp, &abort, flag);
505 
506 			if (rc) {
507 				break;
508 			}
509 		}
510 	}
511 	mutex_exit(&EMLXS_FCTAB_LOCK);
512 
513 	/* Now put the iocb's on the tx queue */
514 	iocbq = (IOCBQ *)abort.q_first;
515 	while (iocbq) {
516 		/* Save the next iocbq for now */
517 		next = (IOCBQ *)iocbq->next;
518 
519 		/* Unlink this iocbq */
520 		iocbq->next = NULL;
521 
522 		/* Send this iocbq */
523 		emlxs_tx_put(iocbq, 1);
524 
525 		iocbq = next;
526 	}
527 
528 	/* Now trigger IO channel service to send these abort iocbq */
529 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
530 		if (!flag[channelno]) {
531 			continue;
532 		}
533 		cp = &hba->chan[channelno];
534 
535 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0);
536 	}
537 
538 	if (rc == 1) {
539 		/* Spawn a thread to reset the link */
540 		emlxs_thread_spawn(hba, emlxs_reset_link_thread, NULL, NULL);
541 	} else if (rc == 2) {
542 		/* Spawn a thread to reset the adapter */
543 		emlxs_thread_spawn(hba, emlxs_restart_thread, NULL, NULL);
544 	}
545 
546 	return (rc);
547 
548 } /* emlxs_timer_check_pkts() */
549 
550 
551 static void
552 emlxs_timer_check_channels(emlxs_hba_t *hba, uint8_t *flag)
553 {
554 	emlxs_port_t *port = &PPORT;
555 	emlxs_config_t *cfg = &CFG;
556 	int32_t channelno;
557 	CHANNEL *cp;
558 	uint32_t logit;
559 
560 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
561 		return;
562 	}
563 
564 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
565 		cp = &hba->chan[channelno];
566 
567 		logit = 0;
568 
569 		/* Check for channel timeout now */
570 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
571 		if (cp->timeout && (hba->timer_tics >= cp->timeout)) {
572 			/* Check if there is work to do on channel and */
573 			/* the link is still up */
574 			if (cp->nodeq.q_first) {
575 				flag[channelno] = 1;
576 				cp->timeout = hba->timer_tics + 10;
577 
578 				if (hba->state >= FC_LINK_UP) {
579 					logit = 1;
580 				}
581 			} else {
582 				cp->timeout = 0;
583 			}
584 		}
585 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
586 
587 		if (logit) {
588 			EMLXS_MSGF(EMLXS_CONTEXT,
589 			    &emlxs_chan_watchdog_msg,
590 			    "IO Channel %d cnt=%d,%d",
591 			    channelno,
592 			    hba->channel_tx_count,
593 			    hba->io_count);
594 		}
595 
596 		/*
597 		 * If IO channel flag is set, request iocb servicing
598 		 * here to send any iocb's that may still be queued
599 		 */
600 		if (flag[channelno]) {
601 			EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0);
602 		}
603 	}
604 
605 	return;
606 
607 } /* emlxs_timer_check_channels() */
608 
609 
610 static void
611 emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag)
612 {
613 	emlxs_hba_t *hba = HBA;
614 	uint32_t found;
615 	uint32_t i;
616 	NODELIST *nlp;
617 	int32_t channelno;
618 
619 	for (;;) {
620 		/* Check node gate flag for expiration */
621 		found = 0;
622 
623 		/*
624 		 * We need to lock, scan, and unlock because we can't hold the
625 		 * lock while we call node_open
626 		 */
627 		rw_enter(&port->node_rwlock, RW_READER);
628 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
629 			nlp = port->node_table[i];
630 			while (nlp != NULL) {
631 				for (channelno = 0;
632 				    channelno < hba->chan_count;
633 				    channelno++) {
634 					/* Check if the node timer is active */
635 					/* and if timer has expired */
636 					if (nlp->nlp_tics[channelno] &&
637 					    (hba->timer_tics >=
638 					    nlp->nlp_tics[channelno])) {
639 						/* If so, set the flag and */
640 						/* break out */
641 						found = 1;
642 						flag[channelno] = 1;
643 						break;
644 					}
645 				}
646 
647 				if (nlp->nlp_force_rscn &&
648 				    (hba->timer_tics >= nlp->nlp_force_rscn)) {
649 					nlp->nlp_force_rscn = 0;
650 					/*
651 					 * Generate an RSCN to
652 					 * wakeup ULP
653 					 */
654 					(void) emlxs_generate_rscn(port,
655 					    nlp->nlp_DID);
656 				}
657 
658 				if (found) {
659 					break;
660 				}
661 
662 				nlp = nlp->nlp_list_next;
663 			}
664 
665 			if (found) {
666 				break;
667 			}
668 
669 		}
670 		rw_exit(&port->node_rwlock);
671 
672 		if (!found) {
673 			break;
674 		}
675 
676 		emlxs_node_timeout(port, nlp, channelno);
677 	}
678 
679 } /* emlxs_timer_check_nodes() */
680 
681 
682 static void
683 emlxs_timer_check_loopback(emlxs_hba_t *hba)
684 {
685 	emlxs_port_t *port = &PPORT;
686 	emlxs_config_t *cfg = &CFG;
687 	int32_t reset = 0;
688 
689 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
690 		return;
691 	}
692 
693 	/* Check the loopback timer for expiration */
694 	mutex_enter(&EMLXS_PORT_LOCK);
695 
696 	if (!hba->loopback_tics || (hba->timer_tics < hba->loopback_tics)) {
697 		mutex_exit(&EMLXS_PORT_LOCK);
698 		return;
699 	}
700 
701 	hba->loopback_tics = 0;
702 
703 	if (hba->flag & FC_LOOPBACK_MODE) {
704 		reset = 1;
705 	}
706 
707 	mutex_exit(&EMLXS_PORT_LOCK);
708 
709 	if (reset) {
710 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_dfc_debug_msg,
711 		    "LOOPBACK_MODE: Expired. Resetting...");
712 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
713 	}
714 
715 	return;
716 
717 } /* emlxs_timer_check_loopback() */
718 
719 
720 static void
721 emlxs_timer_check_linkup(emlxs_hba_t *hba)
722 {
723 	emlxs_port_t *port = &PPORT;
724 	uint32_t linkup;
725 
726 	/* Check if all mbox commands from previous activity are processed */
727 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
728 		mutex_enter(&EMLXS_MBOX_LOCK);
729 		if (hba->mbox_queue.q_first) {
730 			mutex_exit(&EMLXS_MBOX_LOCK);
731 			return;
732 		}
733 		mutex_exit(&EMLXS_MBOX_LOCK);
734 	}
735 
736 	/* Check the linkup timer for expiration */
737 	mutex_enter(&EMLXS_PORT_LOCK);
738 	linkup = 0;
739 	if (hba->linkup_timer && (hba->timer_tics >= hba->linkup_timer)) {
740 		hba->linkup_timer = 0;
741 
742 		/* Make sure link is still ready */
743 		if (hba->state >= FC_LINK_UP) {
744 			linkup = 1;
745 		}
746 	}
747 	mutex_exit(&EMLXS_PORT_LOCK);
748 
749 	/* Make the linkup callback */
750 	if (linkup) {
751 		emlxs_port_online(port);
752 	}
753 	return;
754 
755 } /* emlxs_timer_check_linkup() */
756 
757 
758 static void
759 emlxs_timer_check_heartbeat(emlxs_hba_t *hba)
760 {
761 	emlxs_port_t *port = &PPORT;
762 	MAILBOXQ *mbq;
763 	emlxs_config_t *cfg = &CFG;
764 	int rc;
765 
766 	if (!cfg[CFG_HEARTBEAT_ENABLE].current) {
767 		return;
768 	}
769 
770 	if (hba->timer_tics < hba->heartbeat_timer) {
771 		return;
772 	}
773 
774 	hba->heartbeat_timer = hba->timer_tics + 5;
775 
776 	/* Return if adapter interrupts have occurred */
777 	if (hba->heartbeat_flag) {
778 		hba->heartbeat_flag = 0;
779 		return;
780 	}
781 	/* No adapter interrupts have occured for 5 seconds now */
782 
783 	/* Return if mailbox is busy */
784 	/* This means the mailbox timer routine is watching for problems */
785 	if (hba->mbox_timer) {
786 		return;
787 	}
788 
789 	/* Return if heartbeat is still outstanding */
790 	if (hba->heartbeat_active) {
791 		return;
792 	}
793 
794 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
795 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
796 		    "Unable to allocate heartbeat mailbox.");
797 		return;
798 	}
799 
800 	emlxs_mb_heartbeat(hba, mbq);
801 	hba->heartbeat_active = 1;
802 
803 	rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
804 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
805 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
806 	}
807 
808 	return;
809 
810 } /* emlxs_timer_check_heartbeat() */
811 
812 
813 static void
814 emlxs_timer_check_discovery(emlxs_port_t *port)
815 {
816 	emlxs_hba_t *hba = HBA;
817 	emlxs_config_t *cfg = &CFG;
818 	int32_t send_clear_la;
819 	uint32_t found;
820 	uint32_t i;
821 	NODELIST *nlp;
822 	MAILBOXQ *mbox;
823 	int rc;
824 
825 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
826 		return;
827 	}
828 
829 	/* Check the discovery timer for expiration */
830 	send_clear_la = 0;
831 	mutex_enter(&EMLXS_PORT_LOCK);
832 	while (hba->discovery_timer &&
833 	    (hba->timer_tics >= hba->discovery_timer) &&
834 	    (hba->state == FC_LINK_UP)) {
835 		send_clear_la = 1;
836 
837 		/* Perform a flush on fcp2 nodes that are still closed */
838 		found = 0;
839 		rw_enter(&port->node_rwlock, RW_READER);
840 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
841 			nlp = port->node_table[i];
842 			while (nlp != NULL) {
843 				if ((nlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
844 				    (nlp->nlp_flag[hba->channel_fcp] &
845 				    NLP_CLOSED)) {
846 					found = 1;
847 					break;
848 
849 				}
850 				nlp = nlp->nlp_list_next;
851 			}
852 
853 			if (found) {
854 				break;
855 			}
856 		}
857 		rw_exit(&port->node_rwlock);
858 
859 		if (!found) {
860 			break;
861 		}
862 
863 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_missing_msg,
864 		    "FCP2 device (did=%06x) missing. Flushing...",
865 		    nlp->nlp_DID);
866 
867 		mutex_exit(&EMLXS_PORT_LOCK);
868 
869 		(void) emlxs_mb_unreg_did(port, nlp->nlp_DID, NULL, NULL, NULL);
870 
871 		mutex_enter(&EMLXS_PORT_LOCK);
872 
873 	}
874 	mutex_exit(&EMLXS_PORT_LOCK);
875 
876 	/* Try to send clear link attention, if needed */
877 	if ((hba->sli_mode < EMLXS_HBA_SLI4_MODE) && (send_clear_la == 1) &&
878 	    (mbox = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
879 		mutex_enter(&EMLXS_PORT_LOCK);
880 
881 		/*
882 		 * If state is not FC_LINK_UP, then either the link has gone
883 		 * down or a FC_CLEAR_LA has already been issued
884 		 */
885 		if (hba->state != FC_LINK_UP) {
886 			mutex_exit(&EMLXS_PORT_LOCK);
887 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbox);
888 		} else {
889 			/* Change state and clear discovery timer */
890 			EMLXS_STATE_CHANGE_LOCKED(hba, FC_CLEAR_LA);
891 
892 			hba->discovery_timer = 0;
893 
894 			mutex_exit(&EMLXS_PORT_LOCK);
895 
896 			/* Prepare and send the CLEAR_LA command */
897 			emlxs_mb_clear_la(hba, mbox);
898 
899 			rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbox, MBX_NOWAIT, 0);
900 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
901 				(void) emlxs_mem_put(hba, MEM_MBOX,
902 				    (uint8_t *)mbox);
903 			}
904 		}
905 	}
906 
907 	return;
908 
909 } /* emlxs_timer_check_discovery()  */
910 
911 
912 static void
913 emlxs_timer_check_ub(emlxs_port_t *port)
914 {
915 	emlxs_hba_t *hba = HBA;
916 	emlxs_unsol_buf_t *ulistp;
917 	fc_unsol_buf_t *ubp;
918 	emlxs_ub_priv_t *ub_priv;
919 	uint32_t i;
920 
921 	if (port->ub_timer > hba->timer_tics) {
922 		return;
923 	}
924 
925 	port->ub_timer = hba->timer_tics + EMLXS_UB_PERIOD;
926 
927 	/* Check the unsolicited buffers */
928 	mutex_enter(&EMLXS_UB_LOCK);
929 
930 	ulistp = port->ub_pool;
931 	while (ulistp) {
932 		/* Check buffers in this pool */
933 		for (i = 0; i < ulistp->pool_nentries; i++) {
934 			ubp = (fc_unsol_buf_t *)&ulistp->fc_ubufs[i];
935 			ub_priv = ubp->ub_fca_private;
936 
937 			if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
938 				continue;
939 			}
940 
941 			/* If buffer has timed out, print message and */
942 			/* increase timeout */
943 			if ((ub_priv->time + ub_priv->timeout) <=
944 			    hba->timer_tics) {
945 				ub_priv->flags |= EMLXS_UB_TIMEOUT;
946 
947 				EMLXS_MSGF(EMLXS_CONTEXT,
948 				    &emlxs_sfs_debug_msg,
949 				    "Stale UB buffer detected (%d mins): "
950 				    "buffer=%p (%x,%x,%x,%x)",
951 				    (ub_priv->timeout / 60), ubp,
952 				    ubp->ub_frame.type, ubp->ub_frame.s_id,
953 				    ubp->ub_frame.ox_id, ubp->ub_frame.rx_id);
954 
955 				/* Increase timeout period */
956 
957 				/* If timeout was 5 mins or less, */
958 				/* increase it to 10 mins */
959 				if (ub_priv->timeout <= (5 * 60)) {
960 					ub_priv->timeout = (10 * 60);
961 				}
962 				/* If timeout was 10 mins or less, */
963 				/* increase it to 30 mins */
964 				else if (ub_priv->timeout <= (10 * 60)) {
965 					ub_priv->timeout = (30 * 60);
966 				}
967 				/* Otherwise double it. */
968 				else {
969 					ub_priv->timeout *= 2;
970 				}
971 			}
972 		}
973 
974 		ulistp = ulistp->pool_next;
975 	}
976 
977 	mutex_exit(&EMLXS_UB_LOCK);
978 
979 	return;
980 
981 } /* emlxs_timer_check_ub()  */
982 
983 
984 /* EMLXS_FCTAB_LOCK must be held to call this */
985 static uint32_t
986 emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abortq,
987     uint8_t *flag)
988 {
989 	emlxs_hba_t *hba = HBA;
990 	CHANNEL *cp = (CHANNEL *)sbp->channel;
991 	IOCBQ *iocbq = NULL;
992 	fc_packet_t *pkt;
993 	uint32_t rc = 0;
994 
995 	mutex_enter(&sbp->mtx);
996 
997 	/* Warning: Some FCT sbp's don't have fc_packet objects */
998 	pkt = PRIV2PKT(sbp);
999 
1000 	switch (sbp->abort_attempts) {
1001 	case 0:
1002 
1003 		/* Create the abort IOCB */
1004 		if (hba->state >= FC_LINK_UP) {
1005 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1006 			    "chipQ:1:Aborting. sbp=%p iotag=%x tmo=%d flags=%x",
1007 			    sbp, sbp->iotag,
1008 			    (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
1009 
1010 			iocbq =
1011 			    emlxs_create_abort_xri_cn(port, sbp->node,
1012 			    sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
1013 
1014 			/* The adapter will make 2 attempts to send ABTS */
1015 			/* with 2*ratov timeout each time */
1016 			sbp->ticks =
1017 			    hba->timer_tics + (4 * hba->fc_ratov) + 10;
1018 		} else {
1019 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1020 			    "chipQ:1:Closing. sbp=%p iotag=%x tmo=%d flags=%x",
1021 			    sbp, sbp->iotag,
1022 			    (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
1023 
1024 			iocbq =
1025 			    emlxs_create_close_xri_cn(port, sbp->node,
1026 			    sbp->iotag, cp);
1027 
1028 			sbp->ticks = hba->timer_tics + 30;
1029 		}
1030 
1031 		/* set the flags */
1032 		sbp->pkt_flags |= (PACKET_IN_TIMEOUT | PACKET_XRI_CLOSED);
1033 
1034 		flag[cp->channelno] = 1;
1035 		rc = 0;
1036 
1037 		break;
1038 
1039 	case 1:
1040 
1041 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1042 		    "chipQ: 2:Closing. sbp=%p iotag=%x", sbp, sbp->iotag);
1043 
1044 		iocbq =
1045 		    emlxs_create_close_xri_cn(port, sbp->node, sbp->iotag,
1046 		    cp);
1047 
1048 		sbp->ticks = hba->timer_tics + 30;
1049 
1050 		flag[cp->channelno] = 1;
1051 		rc = 0;
1052 
1053 		break;
1054 
1055 	case 2:
1056 
1057 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1058 		    "chipQ: 3:Resetting link. sbp=%p iotag=%x", sbp,
1059 		    sbp->iotag);
1060 
1061 		sbp->ticks = hba->timer_tics + 60;
1062 		rc = 1;
1063 
1064 		break;
1065 
1066 	default:
1067 
1068 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1069 		    "chipQ: %d:Resetting adapter. sbp=%p iotag=%x",
1070 		    sbp->abort_attempts, sbp, sbp->iotag);
1071 
1072 		sbp->ticks = hba->timer_tics + 60;
1073 		rc = 2;
1074 
1075 		break;
1076 	}
1077 
1078 	sbp->abort_attempts++;
1079 	mutex_exit(&sbp->mtx);
1080 
1081 	if (iocbq) {
1082 		if (abortq->q_first) {
1083 			((IOCBQ *)abortq->q_last)->next = iocbq;
1084 			abortq->q_last = (uint8_t *)iocbq;
1085 			abortq->q_cnt++;
1086 		} else {
1087 			abortq->q_first = (uint8_t *)iocbq;
1088 			abortq->q_last = (uint8_t *)iocbq;
1089 			abortq->q_cnt = 1;
1090 		}
1091 		iocbq->next = NULL;
1092 	}
1093 
1094 	return (rc);
1095 
1096 } /* emlxs_pkt_chip_timeout() */
1097 
1098 
1099 #ifdef TX_WATCHDOG
1100 
1101 static void
1102 emlxs_tx_watchdog(emlxs_hba_t *hba)
1103 {
1104 	emlxs_port_t *port = &PPORT;
1105 	NODELIST *nlp;
1106 	uint32_t channelno;
1107 	CHANNEL *cp;
1108 	IOCBQ *next;
1109 	IOCBQ *iocbq;
1110 	IOCB *iocb;
1111 	uint32_t found;
1112 	MATCHMAP *bmp;
1113 	Q abort;
1114 	uint32_t iotag;
1115 	emlxs_buf_t *sbp;
1116 	fc_packet_t *pkt = NULL;
1117 	uint32_t cmd;
1118 	uint32_t did;
1119 
1120 	bzero((void *)&abort, sizeof (Q));
1121 
1122 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
1123 
1124 	mutex_enter(&EMLXS_FCTAB_LOCK);
1125 	for (iotag = 1; iotag < hba->max_iotag; iotag++) {
1126 		sbp = hba->fc_table[iotag];
1127 		if (sbp && (sbp != STALE_PACKET) &&
1128 		    (sbp->pkt_flags & PACKET_IN_TXQ)) {
1129 			nlp = sbp->node;
1130 			iocbq = &sbp->iocbq;
1131 
1132 			channelno = (CHANNEL *)(sbp->channel)->channelno;
1133 			if (iocbq->flag & IOCB_PRIORITY) {
1134 				iocbq =
1135 				    (IOCBQ *)nlp->nlp_ptx[channelno].
1136 				    q_first;
1137 			} else {
1138 				iocbq =
1139 				    (IOCBQ *)nlp->nlp_tx[channelno].
1140 				    q_first;
1141 			}
1142 
1143 			/* Find a matching entry */
1144 			found = 0;
1145 			while (iocbq) {
1146 				if (iocbq == &sbp->iocbq) {
1147 					found = 1;
1148 					break;
1149 				}
1150 
1151 				iocbq = (IOCBQ *)iocbq->next;
1152 			}
1153 
1154 			if (!found) {
1155 				if (!(sbp->pkt_flags & PACKET_STALE)) {
1156 					mutex_enter(&sbp->mtx);
1157 					sbp->pkt_flags |=
1158 					    PACKET_STALE;
1159 					mutex_exit(&sbp->mtx);
1160 				} else {
1161 					if (abort.q_first == 0) {
1162 						abort.q_first =
1163 						    &sbp->iocbq;
1164 					} else {
1165 						((IOCBQ *)abort.
1166 						    q_last)->next =
1167 						    &sbp->iocbq;
1168 					}
1169 
1170 					abort.q_last = &sbp->iocbq;
1171 					abort.q_cnt++;
1172 				}
1173 
1174 			} else {
1175 				if ((sbp->pkt_flags & PACKET_STALE)) {
1176 					mutex_enter(&sbp->mtx);
1177 					sbp->pkt_flags &=
1178 					    ~PACKET_STALE;
1179 					mutex_exit(&sbp->mtx);
1180 				}
1181 			}
1182 		}
1183 	}
1184 	mutex_exit(&EMLXS_FCTAB_LOCK);
1185 
1186 	iocbq = (IOCBQ *)abort.q_first;
1187 	while (iocbq) {
1188 		next = (IOCBQ *)iocbq->next;
1189 		iocbq->next = NULL;
1190 		sbp = (emlxs_buf_t *)iocbq->sbp;
1191 
1192 		pkt = PRIV2PKT(sbp);
1193 		if (pkt) {
1194 			did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
1195 			cmd = *((uint32_t *)pkt->pkt_cmd);
1196 			cmd = LE_SWAP32(cmd);
1197 		}
1198 
1199 
1200 		emlxs_tx_put(iocbq, 0);
1201 
1202 		iocbq = next;
1203 
1204 	}	/* end of while */
1205 
1206 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
1207 
1208 	return;
1209 
1210 } /* emlxs_tx_watchdog() */
1211 
1212 #endif /* TX_WATCHDOG */
1213 
1214 
1215 #ifdef DHCHAP_SUPPORT
1216 
1217 static void
1218 emlxs_timer_check_dhchap(emlxs_port_t *port)
1219 {
1220 	emlxs_hba_t *hba = HBA;
1221 	uint32_t i;
1222 	NODELIST *ndlp = NULL;
1223 
1224 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1225 		ndlp = port->node_table[i];
1226 
1227 		if (!ndlp) {
1228 			continue;
1229 		}
1230 
1231 		/* Check authentication response timeout */
1232 		if (ndlp->node_dhc.nlp_authrsp_tmo &&
1233 		    (hba->timer_tics >= ndlp->node_dhc.nlp_authrsp_tmo)) {
1234 			/* Trigger authresp timeout handler */
1235 			(void) emlxs_dhc_authrsp_timeout(port, ndlp, NULL);
1236 		}
1237 
1238 		/* Check reauthentication timeout */
1239 		if (ndlp->node_dhc.nlp_reauth_tmo &&
1240 		    (hba->timer_tics >= ndlp->node_dhc.nlp_reauth_tmo)) {
1241 			/* Trigger reauth timeout handler */
1242 			emlxs_dhc_reauth_timeout(port, NULL, ndlp);
1243 		}
1244 	}
1245 	return;
1246 
1247 } /* emlxs_timer_check_dhchap */
1248 
1249 #endif /* DHCHAP_SUPPORT */
1250