1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Timer period in seconds */
31 #define	EMLXS_TIMER_PERIOD		1	/* secs */
32 #define	EMLXS_PKT_PERIOD		5	/* secs */
33 #define	EMLXS_UB_PERIOD			60	/* secs */
34 
35 EMLXS_MSG_DEF(EMLXS_CLOCK_C);
36 
37 
38 static void emlxs_timer_check_loopback(emlxs_hba_t *hba);
39 
40 #ifdef DHCHAP_SUPPORT
41 static void emlxs_timer_check_dhchap(emlxs_port_t *port);
42 #endif /* DHCHAP_SUPPORT */
43 
44 static void	emlxs_timer(void *arg);
45 static void	emlxs_timer_check_fw_update(emlxs_hba_t *hba);
46 static void	emlxs_timer_check_heartbeat(emlxs_hba_t *hba);
47 static uint32_t	emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag);
48 static void	emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag);
49 static void	emlxs_timer_check_linkup(emlxs_hba_t *hba);
50 static void	emlxs_timer_check_discovery(emlxs_port_t *port);
51 static void	emlxs_timer_check_ub(emlxs_port_t *port);
52 static void	emlxs_timer_check_channels(emlxs_hba_t *hba, uint8_t *flag);
53 static uint32_t	emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp,
54 			Q *abortq, uint8_t *flag);
55 
56 #ifdef TX_WATCHDOG
57 static void	emlxs_tx_watchdog(emlxs_hba_t *hba);
58 #endif /* TX_WATCHDOG */
59 
60 extern clock_t
61 emlxs_timeout(emlxs_hba_t *hba, uint32_t timeout)
62 {
63 	emlxs_config_t *cfg = &CFG;
64 	clock_t time;
65 
66 	/* Set thread timeout */
67 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
68 		(void) drv_getparm(LBOLT, &time);
69 		time += (timeout * drv_usectohz(1000000));
70 	} else {
71 		time = -1;
72 	}
73 
74 	return (time);
75 
76 } /* emlxs_timeout() */
77 
78 
79 static void
80 emlxs_timer(void *arg)
81 {
82 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
83 
84 	if (!hba->timer_id) {
85 		return;
86 	}
87 
88 	mutex_enter(&EMLXS_TIMER_LOCK);
89 
90 	EMLXS_SLI_POLL_ERRATT(hba);
91 
92 	/* Only one timer thread is allowed */
93 	if (hba->timer_flags & EMLXS_TIMER_BUSY) {
94 		mutex_exit(&EMLXS_TIMER_LOCK);
95 		return;
96 	}
97 
98 	/* Check if a kill request has been made */
99 	if (hba->timer_flags & EMLXS_TIMER_KILL) {
100 		hba->timer_id = 0;
101 		hba->timer_flags |= EMLXS_TIMER_ENDED;
102 
103 		mutex_exit(&EMLXS_TIMER_LOCK);
104 		return;
105 	}
106 
107 	hba->timer_flags |= (EMLXS_TIMER_BUSY | EMLXS_TIMER_STARTED);
108 	hba->timer_tics = DRV_TIME;
109 
110 	mutex_exit(&EMLXS_TIMER_LOCK);
111 
112 	/* Perform standard checks */
113 	emlxs_timer_checks(hba);
114 
115 	/* Restart the timer */
116 	mutex_enter(&EMLXS_TIMER_LOCK);
117 
118 	hba->timer_flags &= ~EMLXS_TIMER_BUSY;
119 
120 	/* If timer is still enabled, restart it */
121 	if (!(hba->timer_flags & EMLXS_TIMER_KILL)) {
122 		hba->timer_id =
123 		    timeout(emlxs_timer, (void *)hba,
124 		    (EMLXS_TIMER_PERIOD * drv_usectohz(1000000)));
125 	} else {
126 		hba->timer_id = 0;
127 		hba->timer_flags |= EMLXS_TIMER_ENDED;
128 	}
129 
130 	mutex_exit(&EMLXS_TIMER_LOCK);
131 
132 	return;
133 
134 } /* emlxs_timer() */
135 
136 
137 extern void
138 emlxs_timer_checks(emlxs_hba_t *hba)
139 {
140 	emlxs_port_t *port = &PPORT;
141 	uint8_t flag[MAX_CHANNEL];
142 	uint32_t i;
143 	uint32_t rc;
144 
145 	/* Exit if we are still initializing */
146 	if (hba->state < FC_LINK_DOWN) {
147 		return;
148 	}
149 
150 	bzero((void *)flag, sizeof (flag));
151 
152 	/* Check SLI level timeouts */
153 	EMLXS_SLI_TIMER(hba);
154 
155 	/* Check event queue */
156 	emlxs_timer_check_events(hba);
157 
158 	/* Check heartbeat timer */
159 	emlxs_timer_check_heartbeat(hba);
160 
161 	/* Check fw update timer */
162 	emlxs_timer_check_fw_update(hba);
163 
164 #ifdef IDLE_TIMER
165 	emlxs_pm_idle_timer(hba);
166 #endif /* IDLE_TIMER */
167 
168 	/* Check for loopback timeouts */
169 	emlxs_timer_check_loopback(hba);
170 
171 	/* Check for packet timeouts */
172 	rc = emlxs_timer_check_pkts(hba, flag);
173 
174 	if (rc) {
175 		/* Link or adapter is being reset */
176 		return;
177 	}
178 
179 	/* Check for linkup timeout */
180 	emlxs_timer_check_linkup(hba);
181 
182 	/* Check the ports */
183 	for (i = 0; i < MAX_VPORTS; i++) {
184 		port = &VPORT(i);
185 
186 		if (!(port->flag & EMLXS_PORT_BOUND)) {
187 			continue;
188 		}
189 
190 		/* Check for node gate timeouts */
191 		emlxs_timer_check_nodes(port, flag);
192 
193 		/* Check for tape discovery timeout */
194 		emlxs_timer_check_discovery(port);
195 
196 		/* Check for UB timeouts */
197 		emlxs_timer_check_ub(port);
198 
199 #ifdef DHCHAP_SUPPORT
200 		/* Check for DHCHAP authentication timeouts */
201 		emlxs_timer_check_dhchap(port);
202 #endif /* DHCHAP_SUPPORT */
203 
204 	}
205 
206 	/* Check for IO channel service timeouts */
207 	/* Always do this last */
208 	emlxs_timer_check_channels(hba, flag);
209 
210 	return;
211 
212 } /* emlxs_timer_checks() */
213 
214 
215 extern void
216 emlxs_timer_start(emlxs_hba_t *hba)
217 {
218 	if (hba->timer_id) {
219 		return;
220 	}
221 
222 	/* Restart the timer */
223 	mutex_enter(&EMLXS_TIMER_LOCK);
224 	if (!hba->timer_id) {
225 		hba->timer_flags = 0;
226 		hba->timer_id =
227 		    timeout(emlxs_timer, (void *)hba, drv_usectohz(1000000));
228 	}
229 	mutex_exit(&EMLXS_TIMER_LOCK);
230 
231 } /* emlxs_timer_start() */
232 
233 
234 extern void
235 emlxs_timer_stop(emlxs_hba_t *hba)
236 {
237 	if (!hba->timer_id) {
238 		return;
239 	}
240 
241 	mutex_enter(&EMLXS_TIMER_LOCK);
242 	hba->timer_flags |= EMLXS_TIMER_KILL;
243 
244 	while (hba->timer_id) {
245 		mutex_exit(&EMLXS_TIMER_LOCK);
246 		delay(drv_usectohz(500000));
247 		mutex_enter(&EMLXS_TIMER_LOCK);
248 	}
249 	mutex_exit(&EMLXS_TIMER_LOCK);
250 
251 	return;
252 
253 } /* emlxs_timer_stop() */
254 
255 
256 static uint32_t
257 emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag)
258 {
259 	emlxs_port_t *port = &PPORT;
260 	emlxs_config_t *cfg = &CFG;
261 	Q tmo;
262 	int32_t channelno;
263 	CHANNEL *cp;
264 	NODELIST *nlp;
265 	IOCBQ *prev;
266 	IOCBQ *next;
267 	IOCB *iocb;
268 	IOCBQ *iocbq;
269 	emlxs_buf_t *sbp;
270 	fc_packet_t *pkt;
271 	Q abort;
272 	uint32_t iotag;
273 	uint32_t rc;
274 
275 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
276 		return (0);
277 	}
278 
279 	if (hba->pkt_timer > hba->timer_tics) {
280 		return (0);
281 	}
282 
283 	hba->pkt_timer = hba->timer_tics + EMLXS_PKT_PERIOD;
284 
285 
286 	bzero((void *)&tmo, sizeof (Q));
287 
288 	/*
289 	 * We must hold the locks here because we never know when an iocb
290 	 * will be removed out from under us
291 	 */
292 
293 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
294 
295 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
296 		cp = &hba->chan[channelno];
297 
298 		/* Scan the tx queues for each active node on the channel */
299 
300 		/* Get the first node */
301 		nlp = (NODELIST *)cp->nodeq.q_first;
302 
303 		while (nlp) {
304 			/* Scan the node's priority tx queue */
305 			prev = NULL;
306 			iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
307 
308 			while (iocbq) {
309 				next = (IOCBQ *)iocbq->next;
310 				iocb = &iocbq->iocb;
311 				sbp = (emlxs_buf_t *)iocbq->sbp;
312 
313 				/* Check if iocb has timed out */
314 				if (sbp && hba->timer_tics >= sbp->ticks) {
315 					/* iocb timed out, now deque it */
316 					if (next == NULL) {
317 						nlp->nlp_ptx[channelno].q_last =
318 						    (uint8_t *)prev;
319 					}
320 
321 					if (prev == NULL) {
322 						nlp->nlp_ptx[channelno].
323 						    q_first = (uint8_t *)next;
324 					} else {
325 						prev->next = next;
326 					}
327 
328 					iocbq->next = NULL;
329 					nlp->nlp_ptx[channelno].q_cnt--;
330 
331 					/* Add this iocb to our local */
332 					/* timout queue */
333 
334 					/*
335 					 * This way we don't hold the TX_CHANNEL
336 					 * lock too long
337 					 */
338 
339 					if (tmo.q_first) {
340 						((IOCBQ *)tmo.q_last)->next =
341 						    iocbq;
342 						tmo.q_last =
343 						    (uint8_t *)iocbq;
344 						tmo.q_cnt++;
345 					} else {
346 						tmo.q_first =
347 						    (uint8_t *)iocbq;
348 						tmo.q_last =
349 						    (uint8_t *)iocbq;
350 						tmo.q_cnt = 1;
351 					}
352 					iocbq->next = NULL;
353 
354 				} else {
355 					prev = iocbq;
356 				}
357 
358 				iocbq = next;
359 
360 			}	/* while (iocbq) */
361 
362 
363 			/* Scan the node's tx queue */
364 			prev = NULL;
365 			iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
366 
367 			while (iocbq) {
368 				next = (IOCBQ *)iocbq->next;
369 				iocb = &iocbq->iocb;
370 				sbp = (emlxs_buf_t *)iocbq->sbp;
371 
372 				/* Check if iocb has timed out */
373 				if (sbp && hba->timer_tics >= sbp->ticks) {
374 					/* iocb timed out, now deque it */
375 					if (next == NULL) {
376 						nlp->nlp_tx[channelno].q_last =
377 						    (uint8_t *)prev;
378 					}
379 
380 					if (prev == NULL) {
381 						nlp->nlp_tx[channelno].q_first =
382 						    (uint8_t *)next;
383 					} else {
384 						prev->next = next;
385 					}
386 
387 					iocbq->next = NULL;
388 					nlp->nlp_tx[channelno].q_cnt--;
389 
390 					/* Add this iocb to our local */
391 					/* timout queue */
392 
393 					/*
394 					 * This way we don't hold the TX_CHANNEL
395 					 * lock too long
396 					 */
397 
398 					if (tmo.q_first) {
399 						((IOCBQ *)tmo.q_last)->next =
400 						    iocbq;
401 						tmo.q_last =
402 						    (uint8_t *)iocbq;
403 						tmo.q_cnt++;
404 					} else {
405 						tmo.q_first =
406 						    (uint8_t *)iocbq;
407 						tmo.q_last =
408 						    (uint8_t *)iocbq;
409 						tmo.q_cnt = 1;
410 					}
411 					iocbq->next = NULL;
412 
413 				} else {
414 					prev = iocbq;
415 				}
416 
417 				iocbq = next;
418 
419 			}	/* while (iocbq) */
420 
421 			if (nlp == (NODELIST *)cp->nodeq.q_last) {
422 				nlp = NULL;
423 			} else {
424 				nlp = nlp->nlp_next[channelno];
425 			}
426 
427 		}	/* while (nlp) */
428 
429 	}	/* end of for */
430 
431 	/* Now cleanup the iocb's */
432 	iocbq = (IOCBQ *)tmo.q_first;
433 	while (iocbq) {
434 		/* Free the IoTag and the bmp */
435 		iocb = &iocbq->iocb;
436 		channelno = ((CHANNEL *)iocbq->channel)->channelno;
437 		sbp = iocbq->sbp;
438 		if (sbp && (sbp != STALE_PACKET)) {
439 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
440 				hba->fc_table[sbp->iotag] = NULL;
441 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
442 			} else {
443 				(void) emlxs_unregister_pkt(
444 				    (CHANNEL *)iocbq->channel,
445 				    iocb->ULPIOTAG, 0);
446 			}
447 
448 			mutex_enter(&sbp->mtx);
449 			sbp->pkt_flags |= PACKET_IN_TIMEOUT;
450 			mutex_exit(&sbp->mtx);
451 		}
452 
453 		iocbq = (IOCBQ *)iocbq->next;
454 
455 	}	/* end of while */
456 
457 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
458 
459 	/* Now complete the transmit timeouts outside the locks */
460 	iocbq = (IOCBQ *)tmo.q_first;
461 	while (iocbq) {
462 		/* Save the next iocbq for now */
463 		next = (IOCBQ *)iocbq->next;
464 
465 		/* Unlink this iocbq */
466 		iocbq->next = NULL;
467 
468 		/* Get the pkt */
469 		sbp = (emlxs_buf_t *)iocbq->sbp;
470 
471 		if (sbp) {
472 			/* Warning: Some FCT sbp's don't have */
473 			/* fc_packet objects */
474 			pkt = PRIV2PKT(sbp);
475 
476 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
477 			    "TXQ abort: sbp=%p iotag=%x tmo=%d", sbp,
478 			    sbp->iotag, (pkt) ? pkt->pkt_timeout : 0);
479 
480 			if (hba->state >= FC_LINK_UP) {
481 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
482 				    IOERR_ABORT_TIMEOUT, 1);
483 			} else {
484 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
485 				    IOERR_LINK_DOWN, 1);
486 			}
487 
488 		}
489 
490 		iocbq = next;
491 
492 	}	/* end of while */
493 
494 
495 
496 	/* Now check the chip */
497 	bzero((void *)&abort, sizeof (Q));
498 
499 	/* Check the HBA for outstanding IOs */
500 	rc = 0;
501 	mutex_enter(&EMLXS_FCTAB_LOCK);
502 	for (iotag = 1; iotag < hba->max_iotag; iotag++) {
503 		sbp = hba->fc_table[iotag];
504 		if (sbp && (sbp != STALE_PACKET) &&
505 		    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
506 		    (hba->timer_tics >= sbp->ticks)) {
507 			rc = emlxs_pkt_chip_timeout(sbp->iocbq.port,
508 			    sbp, &abort, flag);
509 
510 			if (rc) {
511 				break;
512 			}
513 		}
514 	}
515 	mutex_exit(&EMLXS_FCTAB_LOCK);
516 
517 	/* Now put the iocb's on the tx queue */
518 	iocbq = (IOCBQ *)abort.q_first;
519 	while (iocbq) {
520 		/* Save the next iocbq for now */
521 		next = (IOCBQ *)iocbq->next;
522 
523 		/* Unlink this iocbq */
524 		iocbq->next = NULL;
525 
526 		/* Send this iocbq */
527 		emlxs_tx_put(iocbq, 1);
528 
529 		iocbq = next;
530 	}
531 
532 	/* Now trigger IO channel service to send these abort iocbq */
533 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
534 		if (!flag[channelno]) {
535 			continue;
536 		}
537 		cp = &hba->chan[channelno];
538 
539 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0);
540 	}
541 
542 	if (rc == 1) {
543 		/* Spawn a thread to reset the link */
544 		emlxs_thread_spawn(hba, emlxs_reset_link_thread, NULL, NULL);
545 	} else if (rc == 2) {
546 		/* Spawn a thread to reset the adapter */
547 		emlxs_thread_spawn(hba, emlxs_restart_thread, NULL, NULL);
548 	}
549 
550 	return (rc);
551 
552 } /* emlxs_timer_check_pkts() */
553 
554 
555 static void
556 emlxs_timer_check_channels(emlxs_hba_t *hba, uint8_t *flag)
557 {
558 	emlxs_port_t *port = &PPORT;
559 	emlxs_config_t *cfg = &CFG;
560 	int32_t channelno;
561 	CHANNEL *cp;
562 	uint32_t logit;
563 
564 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
565 		return;
566 	}
567 
568 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
569 		cp = &hba->chan[channelno];
570 
571 		logit = 0;
572 
573 		/* Check for channel timeout now */
574 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
575 		if (cp->timeout && (hba->timer_tics >= cp->timeout)) {
576 			/* Check if there is work to do on channel and */
577 			/* the link is still up */
578 			if (cp->nodeq.q_first) {
579 				flag[channelno] = 1;
580 				cp->timeout = hba->timer_tics + 10;
581 
582 				if (hba->state >= FC_LINK_UP) {
583 					logit = 1;
584 				}
585 			} else {
586 				cp->timeout = 0;
587 			}
588 		}
589 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
590 
591 		if (logit) {
592 			EMLXS_MSGF(EMLXS_CONTEXT,
593 			    &emlxs_chan_watchdog_msg,
594 			    "IO Channel %d cnt=%d,%d",
595 			    channelno,
596 			    hba->channel_tx_count,
597 			    hba->io_count);
598 		}
599 
600 		/*
601 		 * If IO channel flag is set, request iocb servicing
602 		 * here to send any iocb's that may still be queued
603 		 */
604 		if (flag[channelno]) {
605 			EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0);
606 		}
607 	}
608 
609 	return;
610 
611 } /* emlxs_timer_check_channels() */
612 
613 
614 static void
615 emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag)
616 {
617 	emlxs_hba_t *hba = HBA;
618 	uint32_t found;
619 	uint32_t i;
620 	NODELIST *nlp;
621 	int32_t channelno;
622 
623 	for (;;) {
624 		/* Check node gate flag for expiration */
625 		found = 0;
626 
627 		/*
628 		 * We need to lock, scan, and unlock because we can't hold the
629 		 * lock while we call node_open
630 		 */
631 		rw_enter(&port->node_rwlock, RW_READER);
632 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
633 			nlp = port->node_table[i];
634 			while (nlp != NULL) {
635 				for (channelno = 0;
636 				    channelno < hba->chan_count;
637 				    channelno++) {
638 					/* Check if the node timer is active */
639 					/* and if timer has expired */
640 					if (nlp->nlp_tics[channelno] &&
641 					    (hba->timer_tics >=
642 					    nlp->nlp_tics[channelno])) {
643 						/* If so, set the flag and */
644 						/* break out */
645 						found = 1;
646 						flag[channelno] = 1;
647 						break;
648 					}
649 				}
650 
651 				if (nlp->nlp_force_rscn &&
652 				    (hba->timer_tics >= nlp->nlp_force_rscn)) {
653 					nlp->nlp_force_rscn = 0;
654 					/*
655 					 * Generate an RSCN to
656 					 * wakeup ULP
657 					 */
658 					(void) emlxs_generate_rscn(port,
659 					    nlp->nlp_DID);
660 				}
661 
662 				if (found) {
663 					break;
664 				}
665 
666 				nlp = nlp->nlp_list_next;
667 			}
668 
669 			if (found) {
670 				break;
671 			}
672 
673 		}
674 		rw_exit(&port->node_rwlock);
675 
676 		if (!found) {
677 			break;
678 		}
679 
680 		emlxs_node_timeout(port, nlp, channelno);
681 	}
682 
683 } /* emlxs_timer_check_nodes() */
684 
685 
686 static void
687 emlxs_timer_check_loopback(emlxs_hba_t *hba)
688 {
689 	emlxs_port_t *port = &PPORT;
690 	emlxs_config_t *cfg = &CFG;
691 	int32_t reset = 0;
692 
693 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
694 		return;
695 	}
696 
697 	/* Check the loopback timer for expiration */
698 	mutex_enter(&EMLXS_PORT_LOCK);
699 
700 	if (!hba->loopback_tics || (hba->timer_tics < hba->loopback_tics)) {
701 		mutex_exit(&EMLXS_PORT_LOCK);
702 		return;
703 	}
704 
705 	hba->loopback_tics = 0;
706 
707 	if (hba->flag & FC_LOOPBACK_MODE) {
708 		reset = 1;
709 	}
710 
711 	mutex_exit(&EMLXS_PORT_LOCK);
712 
713 	if (reset) {
714 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_dfc_debug_msg,
715 		    "LOOPBACK_MODE: Expired. Resetting...");
716 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
717 	}
718 
719 	return;
720 
721 } /* emlxs_timer_check_loopback() */
722 
723 
724 static void
725 emlxs_timer_check_linkup(emlxs_hba_t *hba)
726 {
727 	emlxs_port_t *port = &PPORT;
728 	uint32_t linkup;
729 
730 	/* Check if all mbox commands from previous activity are processed */
731 	if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
732 		mutex_enter(&EMLXS_MBOX_LOCK);
733 		if (hba->mbox_queue.q_first) {
734 			mutex_exit(&EMLXS_MBOX_LOCK);
735 			return;
736 		}
737 		mutex_exit(&EMLXS_MBOX_LOCK);
738 	}
739 
740 	/* Check the linkup timer for expiration */
741 	mutex_enter(&EMLXS_PORT_LOCK);
742 	linkup = 0;
743 	if (hba->linkup_timer && (hba->timer_tics >= hba->linkup_timer)) {
744 		hba->linkup_timer = 0;
745 
746 		/* Make sure link is still ready */
747 		if (hba->state >= FC_LINK_UP) {
748 			linkup = 1;
749 		}
750 	}
751 	mutex_exit(&EMLXS_PORT_LOCK);
752 
753 	/* Make the linkup callback */
754 	if (linkup) {
755 		emlxs_port_online(port);
756 	}
757 	return;
758 
759 } /* emlxs_timer_check_linkup() */
760 
761 
762 static void
763 emlxs_timer_check_heartbeat(emlxs_hba_t *hba)
764 {
765 	emlxs_port_t *port = &PPORT;
766 	MAILBOXQ *mbq;
767 	emlxs_config_t *cfg = &CFG;
768 	int rc;
769 
770 	if (!cfg[CFG_HEARTBEAT_ENABLE].current) {
771 		return;
772 	}
773 
774 	if (hba->timer_tics < hba->heartbeat_timer) {
775 		return;
776 	}
777 
778 	hba->heartbeat_timer = hba->timer_tics + 5;
779 
780 	/* Return if adapter interrupts have occurred */
781 	if (hba->heartbeat_flag) {
782 		hba->heartbeat_flag = 0;
783 		return;
784 	}
785 	/* No adapter interrupts have occured for 5 seconds now */
786 
787 	/* Return if mailbox is busy */
788 	/* This means the mailbox timer routine is watching for problems */
789 	if (hba->mbox_timer) {
790 		return;
791 	}
792 
793 	/* Return if heartbeat is still outstanding */
794 	if (hba->heartbeat_active) {
795 		return;
796 	}
797 
798 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) {
799 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
800 		    "Unable to allocate heartbeat mailbox.");
801 		return;
802 	}
803 
804 	emlxs_mb_heartbeat(hba, mbq);
805 	hba->heartbeat_active = 1;
806 
807 	rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
808 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
809 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
810 	}
811 
812 	return;
813 
814 } /* emlxs_timer_check_heartbeat() */
815 
816 
817 static void
818 emlxs_timer_check_fw_update(emlxs_hba_t *hba)
819 {
820 	emlxs_port_t *port = &PPORT;
821 
822 	if (!(hba->fw_flag & FW_UPDATE_NEEDED)) {
823 		hba->fw_timer = 0;
824 		return;
825 	}
826 
827 	if (hba->timer_tics < hba->fw_timer) {
828 		return;
829 	}
830 
831 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fw_update_msg,
832 	"A manual HBA reset or link reset (using luxadm or fcadm) "
833 	"is required.");
834 
835 	/* Set timer for 24 hours */
836 	hba->fw_timer = hba->timer_tics + (60 * 60 * 24);
837 
838 	return;
839 
840 } /* emlxs_timer_check_fw_update() */
841 
842 
843 static void
844 emlxs_timer_check_discovery(emlxs_port_t *port)
845 {
846 	emlxs_hba_t *hba = HBA;
847 	emlxs_config_t *cfg = &CFG;
848 	int32_t send_clear_la;
849 	uint32_t found;
850 	uint32_t i;
851 	NODELIST *nlp;
852 	MAILBOXQ *mbox;
853 	int rc;
854 
855 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
856 		return;
857 	}
858 
859 	/* Check the discovery timer for expiration */
860 	send_clear_la = 0;
861 	mutex_enter(&EMLXS_PORT_LOCK);
862 	while (hba->discovery_timer &&
863 	    (hba->timer_tics >= hba->discovery_timer) &&
864 	    (hba->state == FC_LINK_UP)) {
865 		send_clear_la = 1;
866 
867 		/* Perform a flush on fcp2 nodes that are still closed */
868 		found = 0;
869 		rw_enter(&port->node_rwlock, RW_READER);
870 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
871 			nlp = port->node_table[i];
872 			while (nlp != NULL) {
873 				if ((nlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
874 				    (nlp->nlp_flag[hba->channel_fcp] &
875 				    NLP_CLOSED)) {
876 					found = 1;
877 					break;
878 
879 				}
880 				nlp = nlp->nlp_list_next;
881 			}
882 
883 			if (found) {
884 				break;
885 			}
886 		}
887 		rw_exit(&port->node_rwlock);
888 
889 		if (!found) {
890 			break;
891 		}
892 
893 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_missing_msg,
894 		    "FCP2 device (did=%06x) missing. Flushing...",
895 		    nlp->nlp_DID);
896 
897 		mutex_exit(&EMLXS_PORT_LOCK);
898 
899 		(void) emlxs_mb_unreg_did(port, nlp->nlp_DID, NULL, NULL, NULL);
900 
901 		mutex_enter(&EMLXS_PORT_LOCK);
902 
903 	}
904 	mutex_exit(&EMLXS_PORT_LOCK);
905 
906 	/* Try to send clear link attention, if needed */
907 	if ((hba->sli_mode < EMLXS_HBA_SLI4_MODE) && (send_clear_la == 1) &&
908 	    (mbox = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) {
909 		mutex_enter(&EMLXS_PORT_LOCK);
910 
911 		/*
912 		 * If state is not FC_LINK_UP, then either the link has gone
913 		 * down or a FC_CLEAR_LA has already been issued
914 		 */
915 		if (hba->state != FC_LINK_UP) {
916 			mutex_exit(&EMLXS_PORT_LOCK);
917 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbox);
918 		} else {
919 			/* Change state and clear discovery timer */
920 			EMLXS_STATE_CHANGE_LOCKED(hba, FC_CLEAR_LA);
921 
922 			hba->discovery_timer = 0;
923 
924 			mutex_exit(&EMLXS_PORT_LOCK);
925 
926 			/* Prepare and send the CLEAR_LA command */
927 			emlxs_mb_clear_la(hba, mbox);
928 
929 			rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbox, MBX_NOWAIT, 0);
930 			if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
931 				(void) emlxs_mem_put(hba, MEM_MBOX,
932 				    (uint8_t *)mbox);
933 			}
934 		}
935 	}
936 
937 	return;
938 
939 } /* emlxs_timer_check_discovery()  */
940 
941 
942 static void
943 emlxs_timer_check_ub(emlxs_port_t *port)
944 {
945 	emlxs_hba_t *hba = HBA;
946 	emlxs_unsol_buf_t *ulistp;
947 	fc_unsol_buf_t *ubp;
948 	emlxs_ub_priv_t *ub_priv;
949 	uint32_t i;
950 
951 	if (port->ub_timer > hba->timer_tics) {
952 		return;
953 	}
954 
955 	port->ub_timer = hba->timer_tics + EMLXS_UB_PERIOD;
956 
957 	/* Check the unsolicited buffers */
958 	mutex_enter(&EMLXS_UB_LOCK);
959 
960 	ulistp = port->ub_pool;
961 	while (ulistp) {
962 		/* Check buffers in this pool */
963 		for (i = 0; i < ulistp->pool_nentries; i++) {
964 			ubp = (fc_unsol_buf_t *)&ulistp->fc_ubufs[i];
965 			ub_priv = ubp->ub_fca_private;
966 
967 			if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
968 				continue;
969 			}
970 
971 			/* If buffer has timed out, print message and */
972 			/* increase timeout */
973 			if ((ub_priv->time + ub_priv->timeout) <=
974 			    hba->timer_tics) {
975 				ub_priv->flags |= EMLXS_UB_TIMEOUT;
976 
977 				EMLXS_MSGF(EMLXS_CONTEXT,
978 				    &emlxs_sfs_debug_msg,
979 				    "Stale UB buffer detected (%d mins): "
980 				    "buffer=%p (%x,%x,%x,%x)",
981 				    (ub_priv->timeout / 60), ubp,
982 				    ubp->ub_frame.type, ubp->ub_frame.s_id,
983 				    ubp->ub_frame.ox_id, ubp->ub_frame.rx_id);
984 
985 				/* Increase timeout period */
986 
987 				/* If timeout was 5 mins or less, */
988 				/* increase it to 10 mins */
989 				if (ub_priv->timeout <= (5 * 60)) {
990 					ub_priv->timeout = (10 * 60);
991 				}
992 				/* If timeout was 10 mins or less, */
993 				/* increase it to 30 mins */
994 				else if (ub_priv->timeout <= (10 * 60)) {
995 					ub_priv->timeout = (30 * 60);
996 				}
997 				/* Otherwise double it. */
998 				else {
999 					ub_priv->timeout *= 2;
1000 				}
1001 			}
1002 		}
1003 
1004 		ulistp = ulistp->pool_next;
1005 	}
1006 
1007 	mutex_exit(&EMLXS_UB_LOCK);
1008 
1009 	return;
1010 
1011 } /* emlxs_timer_check_ub()  */
1012 
1013 
1014 /* EMLXS_FCTAB_LOCK must be held to call this */
1015 static uint32_t
1016 emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abortq,
1017     uint8_t *flag)
1018 {
1019 	emlxs_hba_t *hba = HBA;
1020 	CHANNEL *cp = (CHANNEL *)sbp->channel;
1021 	IOCBQ *iocbq = NULL;
1022 	fc_packet_t *pkt;
1023 	uint32_t rc = 0;
1024 
1025 	mutex_enter(&sbp->mtx);
1026 
1027 	/* Warning: Some FCT sbp's don't have fc_packet objects */
1028 	pkt = PRIV2PKT(sbp);
1029 
1030 	switch (sbp->abort_attempts) {
1031 	case 0:
1032 
1033 		/* Create the abort IOCB */
1034 		if (hba->state >= FC_LINK_UP) {
1035 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1036 			    "chipQ:1:Aborting. sbp=%p iotag=%x tmo=%d flags=%x",
1037 			    sbp, sbp->iotag,
1038 			    (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
1039 
1040 			iocbq =
1041 			    emlxs_create_abort_xri_cn(port, sbp->node,
1042 			    sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
1043 
1044 			/* The adapter will make 2 attempts to send ABTS */
1045 			/* with 2*ratov timeout each time */
1046 			sbp->ticks =
1047 			    hba->timer_tics + (4 * hba->fc_ratov) + 10;
1048 		} else {
1049 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1050 			    "chipQ:1:Closing. sbp=%p iotag=%x tmo=%d flags=%x",
1051 			    sbp, sbp->iotag,
1052 			    (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags);
1053 
1054 			iocbq =
1055 			    emlxs_create_close_xri_cn(port, sbp->node,
1056 			    sbp->iotag, cp);
1057 
1058 			sbp->ticks = hba->timer_tics + 30;
1059 		}
1060 
1061 		/* set the flags */
1062 		sbp->pkt_flags |= (PACKET_IN_TIMEOUT | PACKET_XRI_CLOSED);
1063 
1064 		flag[cp->channelno] = 1;
1065 		rc = 0;
1066 
1067 		break;
1068 
1069 	case 1:
1070 
1071 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1072 		    "chipQ: 2:Closing. sbp=%p iotag=%x", sbp, sbp->iotag);
1073 
1074 		iocbq =
1075 		    emlxs_create_close_xri_cn(port, sbp->node, sbp->iotag,
1076 		    cp);
1077 
1078 		sbp->ticks = hba->timer_tics + 30;
1079 
1080 		flag[cp->channelno] = 1;
1081 		rc = 0;
1082 
1083 		break;
1084 
1085 	case 2:
1086 
1087 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1088 		    "chipQ: 3:Resetting link. sbp=%p iotag=%x", sbp,
1089 		    sbp->iotag);
1090 
1091 		sbp->ticks = hba->timer_tics + 60;
1092 		rc = 1;
1093 
1094 		break;
1095 
1096 	default:
1097 
1098 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1099 		    "chipQ: %d:Resetting adapter. sbp=%p iotag=%x",
1100 		    sbp->abort_attempts, sbp, sbp->iotag);
1101 
1102 		sbp->ticks = hba->timer_tics + 60;
1103 		rc = 2;
1104 
1105 		break;
1106 	}
1107 
1108 	sbp->abort_attempts++;
1109 	mutex_exit(&sbp->mtx);
1110 
1111 	if (iocbq) {
1112 		if (abortq->q_first) {
1113 			((IOCBQ *)abortq->q_last)->next = iocbq;
1114 			abortq->q_last = (uint8_t *)iocbq;
1115 			abortq->q_cnt++;
1116 		} else {
1117 			abortq->q_first = (uint8_t *)iocbq;
1118 			abortq->q_last = (uint8_t *)iocbq;
1119 			abortq->q_cnt = 1;
1120 		}
1121 		iocbq->next = NULL;
1122 	}
1123 
1124 	return (rc);
1125 
1126 } /* emlxs_pkt_chip_timeout() */
1127 
1128 
1129 #ifdef TX_WATCHDOG
1130 
1131 static void
1132 emlxs_tx_watchdog(emlxs_hba_t *hba)
1133 {
1134 	emlxs_port_t *port = &PPORT;
1135 	NODELIST *nlp;
1136 	uint32_t channelno;
1137 	CHANNEL *cp;
1138 	IOCBQ *next;
1139 	IOCBQ *iocbq;
1140 	IOCB *iocb;
1141 	uint32_t found;
1142 	MATCHMAP *bmp;
1143 	Q abort;
1144 	uint32_t iotag;
1145 	emlxs_buf_t *sbp;
1146 	fc_packet_t *pkt = NULL;
1147 	uint32_t cmd;
1148 	uint32_t did;
1149 
1150 	bzero((void *)&abort, sizeof (Q));
1151 
1152 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
1153 
1154 	mutex_enter(&EMLXS_FCTAB_LOCK);
1155 	for (iotag = 1; iotag < hba->max_iotag; iotag++) {
1156 		sbp = hba->fc_table[iotag];
1157 		if (sbp && (sbp != STALE_PACKET) &&
1158 		    (sbp->pkt_flags & PACKET_IN_TXQ)) {
1159 			nlp = sbp->node;
1160 			iocbq = &sbp->iocbq;
1161 
1162 			channelno = (CHANNEL *)(sbp->channel)->channelno;
1163 			if (iocbq->flag & IOCB_PRIORITY) {
1164 				iocbq =
1165 				    (IOCBQ *)nlp->nlp_ptx[channelno].
1166 				    q_first;
1167 			} else {
1168 				iocbq =
1169 				    (IOCBQ *)nlp->nlp_tx[channelno].
1170 				    q_first;
1171 			}
1172 
1173 			/* Find a matching entry */
1174 			found = 0;
1175 			while (iocbq) {
1176 				if (iocbq == &sbp->iocbq) {
1177 					found = 1;
1178 					break;
1179 				}
1180 
1181 				iocbq = (IOCBQ *)iocbq->next;
1182 			}
1183 
1184 			if (!found) {
1185 				if (!(sbp->pkt_flags & PACKET_STALE)) {
1186 					mutex_enter(&sbp->mtx);
1187 					sbp->pkt_flags |=
1188 					    PACKET_STALE;
1189 					mutex_exit(&sbp->mtx);
1190 				} else {
1191 					if (abort.q_first == 0) {
1192 						abort.q_first =
1193 						    &sbp->iocbq;
1194 					} else {
1195 						((IOCBQ *)abort.
1196 						    q_last)->next =
1197 						    &sbp->iocbq;
1198 					}
1199 
1200 					abort.q_last = &sbp->iocbq;
1201 					abort.q_cnt++;
1202 				}
1203 
1204 			} else {
1205 				if ((sbp->pkt_flags & PACKET_STALE)) {
1206 					mutex_enter(&sbp->mtx);
1207 					sbp->pkt_flags &=
1208 					    ~PACKET_STALE;
1209 					mutex_exit(&sbp->mtx);
1210 				}
1211 			}
1212 		}
1213 	}
1214 	mutex_exit(&EMLXS_FCTAB_LOCK);
1215 
1216 	iocbq = (IOCBQ *)abort.q_first;
1217 	while (iocbq) {
1218 		next = (IOCBQ *)iocbq->next;
1219 		iocbq->next = NULL;
1220 		sbp = (emlxs_buf_t *)iocbq->sbp;
1221 
1222 		pkt = PRIV2PKT(sbp);
1223 		if (pkt) {
1224 			did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
1225 			cmd = *((uint32_t *)pkt->pkt_cmd);
1226 			cmd = LE_SWAP32(cmd);
1227 		}
1228 
1229 
1230 		emlxs_tx_put(iocbq, 0);
1231 
1232 		iocbq = next;
1233 
1234 	}	/* end of while */
1235 
1236 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
1237 
1238 	return;
1239 
1240 } /* emlxs_tx_watchdog() */
1241 
1242 #endif /* TX_WATCHDOG */
1243 
1244 
1245 #ifdef DHCHAP_SUPPORT
1246 
1247 static void
1248 emlxs_timer_check_dhchap(emlxs_port_t *port)
1249 {
1250 	emlxs_hba_t *hba = HBA;
1251 	uint32_t i;
1252 	NODELIST *ndlp = NULL;
1253 
1254 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1255 		ndlp = port->node_table[i];
1256 
1257 		if (!ndlp) {
1258 			continue;
1259 		}
1260 
1261 		/* Check authentication response timeout */
1262 		if (ndlp->node_dhc.nlp_authrsp_tmo &&
1263 		    (hba->timer_tics >= ndlp->node_dhc.nlp_authrsp_tmo)) {
1264 			/* Trigger authresp timeout handler */
1265 			(void) emlxs_dhc_authrsp_timeout(port, ndlp, NULL);
1266 		}
1267 
1268 		/* Check reauthentication timeout */
1269 		if (ndlp->node_dhc.nlp_reauth_tmo &&
1270 		    (hba->timer_tics >= ndlp->node_dhc.nlp_reauth_tmo)) {
1271 			/* Trigger reauth timeout handler */
1272 			emlxs_dhc_reauth_timeout(port, NULL, ndlp);
1273 		}
1274 	}
1275 	return;
1276 
1277 } /* emlxs_timer_check_dhchap */
1278 
1279 #endif /* DHCHAP_SUPPORT */
1280