1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 
28 #include "emlxs.h"
29 
30 /* Timer period in seconds */
31 #define	EMLXS_TIMER_PERIOD		1	/* secs */
32 #define	EMLXS_PKT_PERIOD		5	/* secs */
33 #define	EMLXS_UB_PERIOD			60	/* secs */
34 
35 EMLXS_MSG_DEF(EMLXS_CLOCK_C);
36 
37 
38 #ifdef DFC_SUPPORT
39 static void emlxs_timer_check_loopback(emlxs_hba_t *hba);
40 #endif	/* DFC_SUPPORT */
41 
42 #ifdef DHCHAP_SUPPORT
43 static void emlxs_timer_check_dhchap(emlxs_port_t *port);
44 #endif	/* DHCHAP_SUPPORT */
45 
46 static void emlxs_timer(void *arg);
47 static void emlxs_timer_check_heartbeat(emlxs_hba_t *hba);
48 static uint32_t emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag);
49 static void emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag);
50 static void emlxs_timer_check_linkup(emlxs_hba_t *hba);
51 static void emlxs_timer_check_mbox(emlxs_hba_t *hba);
52 static void emlxs_timer_check_discovery(emlxs_port_t *port);
53 static void emlxs_timer_check_ub(emlxs_port_t *port);
54 static void emlxs_timer_check_rings(emlxs_hba_t *hba, uint8_t *flag);
55 static uint32_t emlxs_pkt_chip_timeout(emlxs_port_t *port,
56 	emlxs_buf_t *sbp, Q *abortq, uint8_t *flag);
57 
58 #ifdef TX_WATCHDOG
59 static void emlxs_tx_watchdog(emlxs_hba_t *hba);
60 #endif	/* TX_WATCHDOG */
61 
62 extern clock_t
63 emlxs_timeout(emlxs_hba_t *hba, uint32_t timeout)
64 {
65 	emlxs_config_t *cfg = &CFG;
66 	clock_t time;
67 
68 	/* Set thread timeout */
69 	if (cfg[CFG_TIMEOUT_ENABLE].current) {
70 		(void) drv_getparm(LBOLT, &time);
71 		time += (timeout * drv_usectohz(1000000));
72 	} else {
73 		time = -1;
74 	}
75 
76 	return (time);
77 
78 } /* emlxs_timeout() */
79 
80 
81 static void
82 emlxs_timer(void *arg)
83 {
84 	emlxs_hba_t *hba = (emlxs_hba_t *)arg;
85 	emlxs_port_t *port = &PPORT;
86 	uint8_t flag[MAX_RINGS];
87 	uint32_t i;
88 	uint32_t rc;
89 
90 	if (!hba->timer_id) {
91 		return;
92 	}
93 	mutex_enter(&EMLXS_TIMER_LOCK);
94 
95 	/* Only one timer thread is allowed */
96 	if (hba->timer_flags & EMLXS_TIMER_BUSY) {
97 		mutex_exit(&EMLXS_TIMER_LOCK);
98 		return;
99 	}
100 	/* Check if a kill request has been made */
101 	if (hba->timer_flags & EMLXS_TIMER_KILL) {
102 		hba->timer_id = 0;
103 		hba->timer_flags |= EMLXS_TIMER_ENDED;
104 
105 		mutex_exit(&EMLXS_TIMER_LOCK);
106 		return;
107 	}
108 	hba->timer_flags |= (EMLXS_TIMER_BUSY | EMLXS_TIMER_STARTED);
109 	hba->timer_tics = DRV_TIME;
110 
111 	mutex_exit(&EMLXS_TIMER_LOCK);
112 
113 	/* Exit if we are still initializing */
114 	if (hba->state < FC_LINK_DOWN) {
115 		goto done;
116 	}
117 	bzero((void *) flag, sizeof (flag));
118 
119 	/* Check for mailbox timeout */
120 	emlxs_timer_check_mbox(hba);
121 
122 	/* Check heartbeat timer */
123 	emlxs_timer_check_heartbeat(hba);
124 
125 #ifdef IDLE_TIMER
126 	emlxs_pm_idle_timer(hba);
127 #endif	/* IDLE_TIMER */
128 
129 #ifdef DFC_SUPPORT
130 	/* Check for loopback timeouts */
131 	emlxs_timer_check_loopback(hba);
132 #endif	/* DFC_SUPPORT */
133 
134 	/* Check for packet timeouts */
135 	rc = emlxs_timer_check_pkts(hba, flag);
136 
137 	if (rc) {
138 		/* Link or adapter is being reset */
139 		goto done;
140 	}
141 	/* Check for linkup timeout */
142 	emlxs_timer_check_linkup(hba);
143 
144 	/* Check the ports */
145 	for (i = 0; i < MAX_VPORTS; i++) {
146 		port = &VPORT(i);
147 
148 		if (!(port->flag & EMLXS_PORT_BOUND)) {
149 			continue;
150 		}
151 		/* Check for node gate timeouts */
152 		emlxs_timer_check_nodes(port, flag);
153 
154 		/* Check for tape discovery timeout */
155 		emlxs_timer_check_discovery(port);
156 
157 		/* Check for UB timeouts */
158 		emlxs_timer_check_ub(port);
159 
160 #ifdef DHCHAP_SUPPORT
161 		/* Check for DHCHAP authentication timeouts */
162 		emlxs_timer_check_dhchap(port);
163 #endif	/* DHCHAP_SUPPORT */
164 
165 	}
166 
167 	/* Check for ring service timeouts */
168 	/* Always do this last */
169 	emlxs_timer_check_rings(hba, flag);
170 
171 done:
172 
173 	/* Restart the timer */
174 	mutex_enter(&EMLXS_TIMER_LOCK);
175 
176 	hba->timer_flags &= ~EMLXS_TIMER_BUSY;
177 
178 	/* If timer is still enabled, restart it */
179 	if (!(hba->timer_flags & EMLXS_TIMER_KILL)) {
180 		hba->timer_id = timeout(emlxs_timer, (void *) hba,
181 		    (EMLXS_TIMER_PERIOD * drv_usectohz(1000000)));
182 	} else {
183 		hba->timer_id = 0;
184 		hba->timer_flags |= EMLXS_TIMER_ENDED;
185 	}
186 
187 	mutex_exit(&EMLXS_TIMER_LOCK);
188 
189 	return;
190 
191 } /* emlxs_timer() */
192 
193 
194 extern void
195 emlxs_timer_start(emlxs_hba_t *hba)
196 {
197 	if (hba->timer_id) {
198 		return;
199 	}
200 	/* Restart the timer */
201 	mutex_enter(&EMLXS_TIMER_LOCK);
202 	if (!hba->timer_id) {
203 		hba->timer_flags = 0;
204 		hba->timer_id = timeout(emlxs_timer, (void *)hba,
205 		    drv_usectohz(1000000));
206 	}
207 	mutex_exit(&EMLXS_TIMER_LOCK);
208 
209 } /* emlxs_timer_start() */
210 
211 
212 extern void
213 emlxs_timer_stop(emlxs_hba_t *hba)
214 {
215 	if (!hba->timer_id) {
216 		return;
217 	}
218 	mutex_enter(&EMLXS_TIMER_LOCK);
219 	hba->timer_flags |= EMLXS_TIMER_KILL;
220 
221 	while (hba->timer_id) {
222 		mutex_exit(&EMLXS_TIMER_LOCK);
223 		delay(drv_usectohz(500000));
224 		mutex_enter(&EMLXS_TIMER_LOCK);
225 	}
226 	mutex_exit(&EMLXS_TIMER_LOCK);
227 
228 	return;
229 
230 } /* emlxs_timer_stop() */
231 
232 
233 static uint32_t
234 emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag)
235 {
236 	emlxs_port_t *port = &PPORT;
237 	/* emlxs_port_t *vport; */
238 	emlxs_config_t *cfg = &CFG;
239 	Q tmo;
240 	int32_t ringno;
241 	RING *rp;
242 	NODELIST *nlp;
243 	IOCBQ *prev;
244 	IOCBQ *next;
245 	IOCB *iocb;
246 	IOCBQ *iocbq;
247 	emlxs_buf_t *sbp;
248 	fc_packet_t *pkt;
249 	Q abort;
250 	uint32_t iotag;
251 	uint32_t rc;
252 
253 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
254 		return (0);
255 	}
256 	if (hba->pkt_timer > hba->timer_tics) {
257 		return (0);
258 	}
259 	hba->pkt_timer = hba->timer_tics + EMLXS_PKT_PERIOD;
260 
261 
262 	bzero((void *) &tmo, sizeof (Q));
263 
264 	/*
265 	 * We must hold the locks here because we never know when an iocb
266 	 * will be removed out from under us
267 	 */
268 
269 	mutex_enter(&EMLXS_RINGTX_LOCK);
270 
271 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
272 		rp = &hba->ring[ringno];
273 
274 		/* Scan the tx queues for each active node on the ring */
275 
276 		/* Get the first node */
277 		nlp = (NODELIST *)rp->nodeq.q_first;
278 
279 		while (nlp) {
280 			/* Scan the node's priority tx queue */
281 			prev = NULL;
282 			iocbq = (IOCBQ *)nlp->nlp_ptx[ringno].q_first;
283 
284 			while (iocbq) {
285 				next = (IOCBQ *)iocbq->next;
286 				iocb = &iocbq->iocb;
287 				sbp = (emlxs_buf_t *)iocbq->sbp;
288 
289 				/* Check if iocb has timed out */
290 				if (sbp && hba->timer_tics >= sbp->ticks) {
291 					/* iocb timed out, now deque it */
292 					if (next == NULL) {
293 						nlp->nlp_ptx[ringno].q_last =
294 						    (uint8_t *)prev;
295 					}
296 					if (prev == NULL) {
297 						nlp->nlp_ptx[ringno].q_first =
298 						    (uint8_t *)next;
299 					} else {
300 						prev->next = next;
301 					}
302 
303 					iocbq->next = NULL;
304 					nlp->nlp_ptx[ringno].q_cnt--;
305 
306 					/*
307 					 * Add this iocb to our local timout
308 					 * Q
309 					 */
310 
311 					/*
312 					 * This way we don't hold the RINGTX
313 					 * lock too long
314 					 */
315 
316 					if (tmo.q_first) {
317 						((IOCBQ *)tmo.q_last)->next =
318 						    iocbq;
319 						tmo.q_last = (uint8_t *)iocbq;
320 						tmo.q_cnt++;
321 					} else {
322 						tmo.q_first = (uint8_t *)iocbq;
323 						tmo.q_last = (uint8_t *)iocbq;
324 						tmo.q_cnt = 1;
325 					}
326 					iocbq->next = NULL;
327 
328 				} else {
329 					prev = iocbq;
330 				}
331 
332 				iocbq = next;
333 
334 			}	/* while (iocbq) */
335 
336 
337 			/* Scan the node's tx queue */
338 			prev = NULL;
339 			iocbq = (IOCBQ *)nlp->nlp_tx[ringno].q_first;
340 
341 			while (iocbq) {
342 				next = (IOCBQ *)iocbq->next;
343 				iocb = &iocbq->iocb;
344 				sbp = (emlxs_buf_t *)iocbq->sbp;
345 
346 				/* Check if iocb has timed out */
347 				if (sbp && hba->timer_tics >= sbp->ticks) {
348 					/* iocb timed out, now deque it */
349 					if (next == NULL) {
350 						nlp->nlp_tx[ringno].q_last =
351 						    (uint8_t *)prev;
352 					}
353 					if (prev == NULL) {
354 						nlp->nlp_tx[ringno].q_first =
355 						    (uint8_t *)next;
356 					} else {
357 						prev->next = next;
358 					}
359 
360 					iocbq->next = NULL;
361 					nlp->nlp_tx[ringno].q_cnt--;
362 
363 					/*
364 					 * Add this iocb to our local timout
365 					 * Q
366 					 */
367 
368 					/*
369 					 * This way we don't hold the RINGTX
370 					 * lock too long
371 					 */
372 
373 					/*
374 					 * EMLXS_MSGF(EMLXS_CONTEXT,
375 					 * &emlxs_pkt_timeout_msg, "TXQ
376 					 * abort: Removing iotag=%x qcnt=%d
377 					 * pqcnt=%d", sbp->iotag,
378 					 * nlp->nlp_tx[ringno].q_cnt,
379 					 * nlp->nlp_ptx[ringno].q_cnt);
380 					 */
381 
382 					if (tmo.q_first) {
383 						((IOCBQ *)tmo.q_last)->next =
384 						    iocbq;
385 						tmo.q_last = (uint8_t *)iocbq;
386 						tmo.q_cnt++;
387 					} else {
388 						tmo.q_first = (uint8_t *)iocbq;
389 						tmo.q_last = (uint8_t *)iocbq;
390 						tmo.q_cnt = 1;
391 					}
392 					iocbq->next = NULL;
393 
394 				} else {
395 					prev = iocbq;
396 				}
397 
398 				iocbq = next;
399 
400 			}	/* while (iocbq) */
401 
402 			if (nlp == (NODELIST *) rp->nodeq.q_last) {
403 				nlp = NULL;
404 			} else {
405 				nlp = nlp->nlp_next[ringno];
406 			}
407 
408 		}	/* while(nlp) */
409 
410 	}	/* end of for */
411 
412 	/* Now cleanup the iocb's */
413 	iocbq = (IOCBQ *)tmo.q_first;
414 	while (iocbq) {
415 		/* Free the IoTag and the bmp */
416 		iocb = &iocbq->iocb;
417 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
418 		ringno = ((RING *)iocbq->ring)->ringno;
419 
420 		if (sbp && (sbp != STALE_PACKET)) {
421 			mutex_enter(&sbp->mtx);
422 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
423 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
424 				hba->ring_tx_count[ringno]--;
425 			}
426 			sbp->pkt_flags |= PACKET_IN_TIMEOUT;
427 			mutex_exit(&sbp->mtx);
428 		}
429 		iocbq = (IOCBQ *)iocbq->next;
430 
431 	}	/* end of while */
432 
433 	mutex_exit(&EMLXS_RINGTX_LOCK);
434 
435 	/* Now complete the transmit timeouts outside the locks */
436 	iocbq = (IOCBQ *)tmo.q_first;
437 	while (iocbq) {
438 		/* Save the next iocbq for now */
439 		next = (IOCBQ *)iocbq->next;
440 
441 		/* Unlink this iocbq */
442 		iocbq->next = NULL;
443 
444 		/* Get the pkt */
445 		sbp = (emlxs_buf_t *)iocbq->sbp;
446 
447 		if (sbp) {
448 			/*
449 			 * Warning: Some FCT sbp's don't have fc_packet
450 			 * objects
451 			 */
452 			pkt = PRIV2PKT(sbp);
453 
454 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
455 			    "TXQ abort: sbp=%p iotag=%x tmo=%d", sbp,
456 			    sbp->iotag, (pkt) ? pkt->pkt_timeout : 0);
457 
458 			if (hba->state >= FC_LINK_UP) {
459 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
460 				    IOERR_ABORT_TIMEOUT, 1);
461 			} else {
462 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
463 				    IOERR_LINK_DOWN, 1);
464 			}
465 		}
466 		iocbq = next;
467 
468 	}	/* end of while */
469 
470 
471 
472 	/* Now check the chip */
473 	bzero((void *) &abort, sizeof (Q));
474 
475 	/* Check the rings */
476 	rc = 0;
477 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
478 		rp = &hba->ring[ringno];
479 
480 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
481 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
482 			sbp = rp->fc_table[iotag];
483 			if (sbp && (sbp != STALE_PACKET) &&
484 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
485 			    (hba->timer_tics >= sbp->ticks)) {
486 				rc = emlxs_pkt_chip_timeout(sbp->iocbq.port,
487 				    sbp, &abort, flag);
488 
489 				if (rc) {
490 					break;
491 				}
492 			}
493 		}
494 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
495 
496 		if (rc) {
497 			break;
498 		}
499 	}
500 
501 	/* Now put the iocb's on the tx queue */
502 	iocbq = (IOCBQ *)abort.q_first;
503 	while (iocbq) {
504 		/* Save the next iocbq for now */
505 		next = (IOCBQ *)iocbq->next;
506 
507 		/* Unlink this iocbq */
508 		iocbq->next = NULL;
509 
510 		/* Send this iocbq */
511 		emlxs_tx_put(iocbq, 1);
512 
513 		iocbq = next;
514 	}
515 
516 	if (rc == 1) {
517 		/* Spawn a thread to reset the link */
518 		(void) thread_create(NULL, 0, emlxs_reset_link_thread,
519 		    (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
520 	} else if (rc == 2) {
521 		/* Spawn a thread to reset the adapter */
522 		(void) thread_create(NULL, 0, emlxs_restart_thread,
523 		    (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
524 	}
525 	return (rc);
526 
527 } /* emlxs_timer_check_pkts() */
528 
529 
530 
531 static void
532 emlxs_timer_check_rings(emlxs_hba_t *hba, uint8_t *flag)
533 {
534 	emlxs_port_t *port = &PPORT;
535 	emlxs_config_t *cfg = &CFG;
536 	int32_t ringno;
537 	RING *rp;
538 	uint32_t logit = 0;
539 
540 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
541 		return;
542 	}
543 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
544 		rp = &hba->ring[ringno];
545 
546 		/* Check for ring timeout now */
547 		mutex_enter(&EMLXS_RINGTX_LOCK);
548 		if (rp->timeout && (hba->timer_tics >= rp->timeout)) {
549 			/* Check if there is still work to do on the ring and */
550 			/* the link is still up */
551 			if (rp->nodeq.q_first) {
552 				flag[ringno] = 1;
553 				rp->timeout = hba->timer_tics + 10;
554 
555 				if (hba->state >= FC_LINK_UP) {
556 					logit = 1;
557 				}
558 			} else {
559 				rp->timeout = 0;
560 			}
561 		}
562 		mutex_exit(&EMLXS_RINGTX_LOCK);
563 
564 		if (logit) {
565 			EMLXS_MSGF(EMLXS_CONTEXT,
566 			    &emlxs_ring_watchdog_msg,
567 			    "%s host=%d port=%d cnt=%d,%d",
568 			    emlxs_ring_xlate(ringno),
569 			    rp->fc_cmdidx, rp->fc_port_cmdidx,
570 			    hba->ring_tx_count[ringno],
571 			    hba->io_count[ringno]);
572 		}
573 
574 		/*
575 		 * If ring flag is set, request iocb servicing here to send
576 		 * any iocb's that may still be queued
577 		 */
578 		if (flag[ringno]) {
579 			emlxs_issue_iocb_cmd(hba, rp, 0);
580 		}
581 	}
582 
583 	return;
584 
585 } /* emlxs_timer_check_rings() */
586 
587 
588 static void
589 emlxs_timer_check_nodes(emlxs_port_t *port, uint8_t *flag)
590 {
591 	emlxs_hba_t *hba = HBA;
592 	uint32_t found;
593 	uint32_t i;
594 	NODELIST *nlp;
595 	int32_t ringno;
596 
597 	for (;;) {
598 		/* Check node gate flag for expiration */
599 		found = 0;
600 
601 		/*
602 		 * We need to lock, scan, and unlock because we can't hold
603 		 * the lock while we call node_open
604 		 */
605 		rw_enter(&port->node_rwlock, RW_READER);
606 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
607 			nlp = port->node_table[i];
608 			while (nlp != NULL) {
609 				for (ringno = 0;
610 				    ringno < hba->ring_count;
611 				    ringno++) {
612 					/*
613 					 * Check if the node timer is active
614 					 * and if timer has expired
615 					 */
616 					if ((nlp->nlp_flag[ringno] &
617 					    NLP_TIMER) &&
618 					    nlp->nlp_tics[ringno] &&
619 					    (hba->timer_tics >=
620 					    nlp->nlp_tics[ringno])) {
621 						/*
622 						 * If so, set the flag and
623 						 * break out
624 						 */
625 						found = 1;
626 						flag[ringno] = 1;
627 						break;
628 					}
629 				}
630 
631 				if (found) {
632 					break;
633 				}
634 				nlp = nlp->nlp_list_next;
635 			}
636 
637 			if (found) {
638 				break;
639 			}
640 		}
641 		rw_exit(&port->node_rwlock);
642 
643 		if (!found) {
644 			break;
645 		}
646 		emlxs_node_open(port, nlp, ringno);
647 	}
648 
649 } /* emlxs_timer_check_nodes() */
650 
651 
652 #ifdef DFC_SUPPORT
653 static void
654 emlxs_timer_check_loopback(emlxs_hba_t *hba)
655 {
656 	emlxs_port_t *port = &PPORT;
657 	emlxs_config_t *cfg = &CFG;
658 	int32_t reset = 0;
659 
660 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
661 		return;
662 	}
663 	/* Check the loopback timer for expiration */
664 	mutex_enter(&EMLXS_PORT_LOCK);
665 
666 	if (!hba->loopback_tics ||
667 	    (hba->timer_tics < hba->loopback_tics)) {
668 		mutex_exit(&EMLXS_PORT_LOCK);
669 		return;
670 	}
671 	hba->loopback_tics = 0;
672 
673 	if (hba->flag & FC_LOOPBACK_MODE) {
674 		reset = 1;
675 	}
676 	mutex_exit(&EMLXS_PORT_LOCK);
677 
678 	if (reset) {
679 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_dfc_debug_msg,
680 		    "LOOPBACK_MODE: Expired. Resetting...");
681 		(void) emlxs_reset(port, FC_FCA_LINK_RESET);
682 	}
683 	return;
684 
685 } /* emlxs_timer_check_loopback() */
686 #endif	/* DFC_SUPPORT  */
687 
688 
689 static void
690 emlxs_timer_check_linkup(emlxs_hba_t *hba)
691 {
692 	emlxs_port_t *port = &PPORT;
693 	uint32_t linkup;
694 
695 	/* Check the linkup timer for expiration */
696 	mutex_enter(&EMLXS_PORT_LOCK);
697 	linkup = 0;
698 	if (hba->linkup_timer && (hba->timer_tics >= hba->linkup_timer)) {
699 		hba->linkup_timer = 0;
700 
701 		/* Make sure link is still ready */
702 		if (hba->state >= FC_LINK_UP) {
703 			linkup = 1;
704 		}
705 	}
706 	mutex_exit(&EMLXS_PORT_LOCK);
707 
708 	/* Make the linkup callback */
709 	if (linkup) {
710 		emlxs_port_online(port);
711 	}
712 	return;
713 
714 } /* emlxs_timer_check_linkup() */
715 
716 
717 static void
718 emlxs_timer_check_heartbeat(emlxs_hba_t *hba)
719 {
720 	emlxs_port_t *port = &PPORT;
721 	MAILBOX *mb;
722 	emlxs_config_t *cfg = &CFG;
723 
724 	if (!cfg[CFG_HEARTBEAT_ENABLE].current) {
725 		return;
726 	}
727 	if (hba->timer_tics < hba->heartbeat_timer) {
728 		return;
729 	}
730 	hba->heartbeat_timer = hba->timer_tics + 5;
731 
732 	/* Return if adapter interrupts have occurred */
733 	if (hba->heartbeat_flag) {
734 		hba->heartbeat_flag = 0;
735 		return;
736 	}
737 	/* No adapter interrupts have occured for 5 seconds now */
738 
739 	/* Return if mailbox is busy */
740 	/* This means the mailbox timer routine is watching for problems */
741 	if (hba->mbox_timer) {
742 		return;
743 	}
744 	/* Return if heartbeat is still outstanding */
745 	if (hba->heartbeat_active) {
746 		return;
747 	}
748 	if ((mb = (MAILBOX *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == 0) {
749 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
750 		    "Unable to allocate heartbeat mailbox.");
751 		return;
752 	}
753 	emlxs_mb_heartbeat(hba, mb);
754 	hba->heartbeat_active = 1;
755 
756 	if (emlxs_mb_issue_cmd(hba, mb, MBX_NOWAIT, 0) != MBX_BUSY) {
757 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
758 	}
759 	return;
760 
761 } /* emlxs_timer_check_heartbeat() */
762 
763 
764 
765 static void
766 emlxs_timer_check_mbox(emlxs_hba_t *hba)
767 {
768 	emlxs_port_t *port = &PPORT;
769 	emlxs_config_t *cfg = &CFG;
770 	MAILBOX *mb;
771 	uint32_t word0;
772 	uint32_t offset;
773 	uint32_t ha_copy = 0;
774 
775 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
776 		return;
777 	}
778 	mutex_enter(&EMLXS_PORT_LOCK);
779 
780 	/* Return if timer hasn't expired */
781 	if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
782 		mutex_exit(&EMLXS_PORT_LOCK);
783 		return;
784 	}
785 	hba->mbox_timer = 0;
786 
787 	/* Mailbox timed out, first check for error attention */
788 	ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba, hba->csr_addr));
789 
790 	if (ha_copy & HA_ERATT) {
791 		mutex_exit(&EMLXS_PORT_LOCK);
792 		emlxs_handle_ff_error(hba);
793 		return;
794 	}
795 	if (hba->mbox_queue_flag) {
796 		/* Get first word of mailbox */
797 		if (hba->flag & FC_SLIM2_MODE) {
798 			mb = FC_SLIM2_MAILBOX(hba);
799 			offset = (off_t)((uint64_t)(unsigned long)mb -
800 			    (uint64_t)(unsigned long)hba->slim2.virt);
801 
802 			emlxs_mpdata_sync(hba->slim2.dma_handle,
803 			    offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
804 			word0 = *((volatile uint32_t *) mb);
805 			word0 = PCIMEM_LONG(word0);
806 		} else {
807 			mb = FC_SLIM1_MAILBOX(hba);
808 			word0 = READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb));
809 		}
810 
811 		mb = (MAILBOX *) & word0;
812 
813 		/* Check if mailbox has actually completed */
814 		if (mb->mbxOwner == OWN_HOST) {
815 			/*
816 			 * Read host attention register to determine
817 			 * interrupt source
818 			 */
819 			uint32_t ha_copy = READ_CSR_REG(hba,
820 			    FC_HA_REG(hba, hba->csr_addr));
821 
822 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
823 			    "Mailbox attention missed: %s."
824 			    "Forcing event. hc = %x ha = %x",
825 			    emlxs_mb_cmd_xlate(mb->mbxCommand),
826 			    hba->hc_copy, ha_copy);
827 
828 			mutex_exit(&EMLXS_PORT_LOCK);
829 
830 			(void) emlxs_handle_mb_event(hba);
831 
832 			return;
833 		}
834 		if (hba->mbox_mbq) {
835 			mb = (MAILBOX *)hba->mbox_mbq;
836 		}
837 	}
838 	switch (hba->mbox_queue_flag) {
839 	case MBX_NOWAIT:
840 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
841 		    "Mailbox Timeout: %s: Nowait.",
842 		    emlxs_mb_cmd_xlate(mb->mbxCommand));
843 		break;
844 
845 	case MBX_SLEEP:
846 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
847 		    "Mailbox Timeout: %s: mb=%p Sleep.",
848 		    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
849 		break;
850 
851 	case MBX_POLL:
852 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
853 		    "Mailbox Timeout: %s: mb=%p Polled.",
854 		    emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
855 		break;
856 
857 	default:
858 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
859 		    "Mailbox Timeout.");
860 		break;
861 	}
862 
863 	hba->flag |= FC_MBOX_TIMEOUT;
864 	emlxs_ffstate_change_locked(hba, FC_ERROR);
865 
866 	mutex_exit(&EMLXS_PORT_LOCK);
867 
868 	/* Perform mailbox cleanup */
869 	/* This will wake any sleeping or polling threads */
870 	emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
871 
872 	/* Trigger adapter shutdown */
873 	(void) thread_create(NULL, 0, emlxs_shutdown_thread, (char *)hba, 0,
874 	    &p0, TS_RUN, v.v_maxsyspri - 2);
875 
876 	return;
877 
878 } /* emlxs_timer_check_mbox() */
879 
880 
881 
882 static void
883 emlxs_timer_check_discovery(emlxs_port_t *port)
884 {
885 	emlxs_hba_t *hba = HBA;
886 	emlxs_config_t *cfg = &CFG;
887 	int32_t send_clear_la;
888 	uint32_t found;
889 	uint32_t i;
890 	NODELIST *nlp;
891 	MAILBOXQ *mbox;
892 
893 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
894 		return;
895 	}
896 	/* Check the discovery timer for expiration */
897 	send_clear_la = 0;
898 	mutex_enter(&EMLXS_PORT_LOCK);
899 	while (hba->discovery_timer &&
900 	    (hba->timer_tics >= hba->discovery_timer) &&
901 	    (hba->state == FC_LINK_UP)) {
902 		send_clear_la = 1;
903 
904 		/* Perform a flush on fcp2 nodes that are still closed */
905 		found = 0;
906 		rw_enter(&port->node_rwlock, RW_READER);
907 		for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
908 			nlp = port->node_table[i];
909 			while (nlp != NULL) {
910 				if ((nlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
911 				    (nlp->nlp_flag[FC_FCP_RING] &
912 				    NLP_CLOSED)) {
913 					found = 1;
914 					break;
915 
916 				}
917 				nlp = nlp->nlp_list_next;
918 			}
919 
920 			if (found) {
921 				break;
922 			}
923 		}
924 		rw_exit(&port->node_rwlock);
925 
926 		if (!found) {
927 			break;
928 		}
929 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_missing_msg,
930 		    "FCP2 device (did=%06x) missing. Flushing...",
931 		    nlp->nlp_DID);
932 
933 		mutex_exit(&EMLXS_PORT_LOCK);
934 
935 		(void) emlxs_mb_unreg_did(port, nlp->nlp_DID, NULL, NULL, NULL);
936 
937 		mutex_enter(&EMLXS_PORT_LOCK);
938 
939 	}
940 	mutex_exit(&EMLXS_PORT_LOCK);
941 
942 	/* Try to send clear link attention, if needed */
943 	if ((send_clear_la == 1) &&
944 	    (mbox = (MAILBOXQ *) emlxs_mem_get(hba, MEM_MBOX | MEM_PRI))) {
945 		mutex_enter(&EMLXS_PORT_LOCK);
946 
947 		/*
948 		 * If state is not FC_LINK_UP, then either the link has gone
949 		 * down or a FC_CLEAR_LA has already been issued
950 		 */
951 		if (hba->state != FC_LINK_UP) {
952 			mutex_exit(&EMLXS_PORT_LOCK);
953 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbox);
954 		} else {
955 			/* Change state and clear discovery timer */
956 			emlxs_ffstate_change_locked(hba, FC_CLEAR_LA);
957 
958 			hba->discovery_timer = 0;
959 
960 			mutex_exit(&EMLXS_PORT_LOCK);
961 
962 			/* Prepare and send the CLEAR_LA command */
963 			emlxs_mb_clear_la(hba, (MAILBOX *)mbox);
964 
965 			if (emlxs_mb_issue_cmd(hba, (MAILBOX *)mbox,
966 			    MBX_NOWAIT, 0) != MBX_BUSY) {
967 				(void) emlxs_mem_put(hba, MEM_MBOX,
968 				    (uint8_t *)mbox);
969 			}
970 		}
971 	}
972 	return;
973 
974 } /* emlxs_timer_check_discovery()  */
975 
976 
977 static void
978 emlxs_timer_check_ub(emlxs_port_t *port)
979 {
980 	emlxs_hba_t *hba = HBA;
981 	emlxs_unsol_buf_t *ulistp;
982 	fc_unsol_buf_t *ubp;
983 	emlxs_ub_priv_t *ub_priv;
984 	uint32_t i;
985 
986 	if (port->ub_timer > hba->timer_tics) {
987 		return;
988 	}
989 	port->ub_timer = hba->timer_tics + EMLXS_UB_PERIOD;
990 
991 	/* Check the unsolicited buffers */
992 	mutex_enter(&EMLXS_UB_LOCK);
993 
994 	ulistp = port->ub_pool;
995 	while (ulistp) {
996 		/* Check buffers in this pool */
997 		for (i = 0; i < ulistp->pool_nentries; i++) {
998 			ubp = (fc_unsol_buf_t *)&ulistp->fc_ubufs[i];
999 			ub_priv = ubp->ub_fca_private;
1000 
1001 			if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
1002 				continue;
1003 			}
1004 			/*
1005 			 * If buffer has timed out, print message and
1006 			 * increase timeout
1007 			 */
1008 			if ((ub_priv->time + ub_priv->timeout) <=
1009 			    hba->timer_tics) {
1010 				ub_priv->flags |= EMLXS_UB_TIMEOUT;
1011 
1012 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1013 				    "Stale UB buffer detected (%d mins): "
1014 				    "buffer = %p (%x,%x,%x,%x)",
1015 				    (ub_priv->timeout / 60),
1016 				    ubp, ubp->ub_frame.type,
1017 				    ubp->ub_frame.s_id,
1018 				    ubp->ub_frame.ox_id,
1019 				    ubp->ub_frame.rx_id);
1020 
1021 				/* Increase timeout period */
1022 
1023 				/*
1024 				 * If timeout was 5 mins or less, increase it
1025 				 * to 10 mins
1026 				 */
1027 				if (ub_priv->timeout <= (5 * 60)) {
1028 					ub_priv->timeout = (10 * 60);
1029 				}
1030 				/*
1031 				 * If timeout was 10 mins or less, increase
1032 				 * it to 30 mins
1033 				 */
1034 				else if (ub_priv->timeout <= (10 * 60)) {
1035 					ub_priv->timeout = (30 * 60);
1036 				}
1037 				/* Otherwise double it. */
1038 				else {
1039 					ub_priv->timeout *= 2;
1040 				}
1041 			}
1042 		}
1043 
1044 		ulistp = ulistp->pool_next;
1045 	}
1046 
1047 	mutex_exit(&EMLXS_UB_LOCK);
1048 
1049 	return;
1050 
1051 } /* emlxs_timer_check_ub()  */
1052 
1053 
1054 /* EMLXS_FCTAB_LOCK must be held to call this */
1055 static uint32_t
1056 emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp,
1057 	Q *abortq, uint8_t *flag)
1058 {
1059 	emlxs_hba_t *hba = HBA;
1060 	RING *rp = (RING *) sbp->ring;
1061 	IOCBQ *iocbq = NULL;
1062 	fc_packet_t *pkt;
1063 	uint32_t rc = 0;
1064 
1065 	mutex_enter(&sbp->mtx);
1066 
1067 	/* Warning: Some FCT sbp's don't have fc_packet objects */
1068 	pkt = PRIV2PKT(sbp);
1069 
1070 	switch (sbp->abort_attempts) {
1071 	case 0:
1072 
1073 		/* Create the abort IOCB */
1074 		if (hba->state >= FC_LINK_UP) {
1075 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1076 			    "chipQ: 1:Aborting. sbp=%p iotag=%x tmo=%d",
1077 			    sbp, sbp->iotag, (pkt) ? pkt->pkt_timeout : 0);
1078 
1079 			iocbq = emlxs_create_abort_xri_cn(port, sbp->node,
1080 			    sbp->iotag, rp, sbp->class, ABORT_TYPE_ABTS);
1081 
1082 			/*
1083 			 * The adapter will make 2 attempts to send ABTS with
1084 			 * 2*ratov timeout each time
1085 			 */
1086 			sbp->ticks = hba->timer_tics + (4 * hba->fc_ratov) + 10;
1087 		} else {
1088 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1089 			    "chipQ: 1:Closing. sbp=%p iotag=%x tmo=%d",
1090 			    sbp, sbp->iotag, (pkt) ? pkt->pkt_timeout : 0);
1091 
1092 			iocbq = emlxs_create_close_xri_cn(port, sbp->node,
1093 			    sbp->iotag, rp);
1094 
1095 			sbp->ticks = hba->timer_tics + 30;
1096 		}
1097 
1098 		/* set the flags */
1099 		sbp->pkt_flags |= (PACKET_IN_TIMEOUT | PACKET_XRI_CLOSED);
1100 
1101 		flag[rp->ringno] = 1;
1102 		rc = 0;
1103 
1104 		break;
1105 
1106 	case 1:
1107 
1108 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1109 		    "chipQ: 2:Closing. sbp=%p iotag=%x",
1110 		    sbp, sbp->iotag);
1111 
1112 		iocbq = emlxs_create_close_xri_cn(port, sbp->node,
1113 		    sbp->iotag, rp);
1114 
1115 		sbp->ticks = hba->timer_tics + 30;
1116 
1117 		flag[rp->ringno] = 1;
1118 		rc = 0;
1119 
1120 		break;
1121 
1122 	case 2:
1123 
1124 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1125 		    "chipQ: 3:Resetting link. sbp=%p iotag=%x",
1126 		    sbp, sbp->iotag);
1127 
1128 		sbp->ticks = hba->timer_tics + 60;
1129 		rc = 1;
1130 
1131 		break;
1132 
1133 	default:
1134 
1135 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg,
1136 		    "chipQ: %d:Resetting adapter. sbp=%p iotag=%x",
1137 		    sbp->abort_attempts, sbp, sbp->iotag);
1138 
1139 		sbp->ticks = hba->timer_tics + 60;
1140 		rc = 2;
1141 
1142 		break;
1143 	}
1144 
1145 	sbp->abort_attempts++;
1146 	mutex_exit(&sbp->mtx);
1147 
1148 	if (iocbq) {
1149 		if (abortq->q_first) {
1150 			((IOCBQ *) abortq->q_last)->next = iocbq;
1151 			abortq->q_last = (uint8_t *)iocbq;
1152 			abortq->q_cnt++;
1153 		} else {
1154 			abortq->q_first = (uint8_t *)iocbq;
1155 			abortq->q_last = (uint8_t *)iocbq;
1156 			abortq->q_cnt = 1;
1157 		}
1158 		iocbq->next = NULL;
1159 	}
1160 	return (rc);
1161 
1162 } /* emlxs_pkt_chip_timeout() */
1163 
1164 
1165 #ifdef TX_WATCHDOG
1166 
1167 static void
1168 emlxs_tx_watchdog(emlxs_hba_t *hba)
1169 {
1170 	emlxs_port_t *port = &PPORT;
1171 	NODELIST *nlp;
1172 	uint32_t ringno;
1173 	RING *rp;
1174 	IOCBQ *next;
1175 	IOCBQ *iocbq;
1176 	IOCB *iocb;
1177 	uint32_t found;
1178 	MATCHMAP *bmp;
1179 	Q abort;
1180 	uint32_t iotag;
1181 	emlxs_buf_t *sbp;
1182 	fc_packet_t *pkt = NULL;
1183 	uint32_t cmd;
1184 	uint32_t did;
1185 
1186 	bzero((void *) &abort, sizeof (Q));
1187 
1188 	mutex_enter(&EMLXS_RINGTX_LOCK);
1189 
1190 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
1191 		rp = &hba->ring[ringno];
1192 
1193 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
1194 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
1195 			sbp = rp->fc_table[iotag];
1196 			if (sbp && (sbp != STALE_PACKET) &&
1197 			    (sbp->pkt_flags & PACKET_IN_TXQ)) {
1198 				nlp = sbp->node;
1199 				iocbq = &sbp->iocbq;
1200 
1201 				if (iocbq->flag & IOCB_PRIORITY) {
1202 					iocbq = (IOCBQ *)
1203 					    nlp->nlp_ptx[ringno].q_first;
1204 				} else {
1205 					iocbq = (IOCBQ *)
1206 					    nlp->nlp_tx[ringno].q_first;
1207 				}
1208 
1209 				/* Find a matching entry */
1210 				found = 0;
1211 				while (iocbq) {
1212 					if (iocbq == &sbp->iocbq) {
1213 						found = 1;
1214 						break;
1215 					}
1216 					iocbq = (IOCBQ *) iocbq->next;
1217 				}
1218 
1219 				if (!found) {
1220 					if (!(sbp->pkt_flags & PACKET_STALE)) {
1221 						mutex_enter(&sbp->mtx);
1222 						sbp->pkt_flags |= PACKET_STALE;
1223 						mutex_exit(&sbp->mtx);
1224 					} else {
1225 						if (abort.q_first == 0) {
1226 							abort.q_first =
1227 							    &sbp->iocbq;
1228 							abort.q_last =
1229 							    &sbp->iocbq;
1230 						} else {
1231 							((IOCBQ *)
1232 							    abort.q_last)->next=
1233 							    &sbp->iocbq;
1234 						}
1235 
1236 						abort.q_cnt++;
1237 					}
1238 
1239 				} else {
1240 					if ((sbp->pkt_flags & PACKET_STALE)) {
1241 						mutex_enter(&sbp->mtx);
1242 						sbp->pkt_flags &= ~PACKET_STALE;
1243 						mutex_exit(&sbp->mtx);
1244 					}
1245 				}
1246 			}
1247 		}
1248 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
1249 	}
1250 
1251 	iocbq = (IOCBQ *) abort.q_first;
1252 	while (iocbq) {
1253 		next = (IOCBQ *) iocbq->next;
1254 		iocbq->next = NULL;
1255 		sbp = (emlxs_buf_t *)iocbq->sbp;
1256 
1257 		pkt = PRIV2PKT(sbp);
1258 		if (pkt) {
1259 			did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
1260 			cmd = *((uint32_t *)pkt->pkt_cmd);
1261 			cmd = SWAP_DATA32(cmd);
1262 		}
1263 
1264 
1265 		emlxs_tx_put(iocbq, 0);
1266 
1267 		iocbq = next;
1268 
1269 	}	/* end of while */
1270 
1271 	mutex_exit(&EMLXS_RINGTX_LOCK);
1272 
1273 	return;
1274 
1275 } /* emlxs_tx_watchdog() */
1276 
1277 #endif	/* TX_WATCHDOG */
1278 
1279 
1280 #ifdef DHCHAP_SUPPORT
1281 
1282 static void
1283 emlxs_timer_check_dhchap(emlxs_port_t *port)
1284 {
1285 	emlxs_hba_t *hba = HBA;
1286 	uint32_t i;
1287 	NODELIST *ndlp = NULL;
1288 
1289 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1290 		ndlp = port->node_table[i];
1291 
1292 		if (!ndlp) {
1293 			continue;
1294 		}
1295 		/* Check authentication response timeout */
1296 		if (ndlp->node_dhc.nlp_authrsp_tmo &&
1297 		    (hba->timer_tics >= ndlp->node_dhc.nlp_authrsp_tmo)) {
1298 			/* Trigger authresp timeout handler */
1299 			(void) emlxs_dhc_authrsp_timeout(port, ndlp, NULL);
1300 		}
1301 		/* Check reauthentication timeout */
1302 		if (ndlp->node_dhc.nlp_reauth_tmo &&
1303 		    (hba->timer_tics >= ndlp->node_dhc.nlp_reauth_tmo)) {
1304 			/* Trigger reauth timeout handler */
1305 			emlxs_dhc_reauth_timeout(port, NULL, ndlp);
1306 		}
1307 	}
1308 	return;
1309 
1310 } /* emlxs_timer_check_dhchap */
1311 
1312 #endif	/* DHCHAP_SUPPORT */
1313