1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #define	DEF_EVENT_STRUCT  /* Needed for emlxs_events.h in emlxs_event.h */
29 #include <emlxs.h>
30 
31 
32 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
33 EMLXS_MSG_DEF(EMLXS_EVENT_C);
34 
35 
36 static uint32_t emlxs_event_check(emlxs_port_t *port, emlxs_event_t *evt);
37 static void emlxs_event_destroy(emlxs_hba_t *hba, emlxs_event_entry_t *entry);
38 
39 extern void
40 emlxs_null_func() {}
41 
42 
43 static uint32_t
44 emlxs_event_check(emlxs_port_t *port, emlxs_event_t *evt)
45 {
46 	emlxs_hba_t *hba = HBA;
47 
48 	/* Check if the event is being requested */
49 	if ((hba->event_mask & evt->mask)) {
50 		return (1);
51 	}
52 
53 #ifdef SAN_DIAG_SUPPORT
54 	if ((port->sd_event_mask & evt->mask)) {
55 		return (1);
56 	}
57 #endif /* SAN_DIAG_SUPPORT */
58 
59 	return (0);
60 
61 } /* emlxs_event_check() */
62 
63 
64 extern uint32_t
65 emlxs_event_queue_create(emlxs_hba_t *hba)
66 {
67 	emlxs_event_queue_t *eventq = &EVENTQ;
68 	char buf[40];
69 	ddi_iblock_cookie_t iblock;
70 
71 	/* Clear the queue */
72 	bzero(eventq, sizeof (emlxs_event_queue_t));
73 
74 	/* Initialize */
75 	(void) sprintf(buf, "?%s%d_evt_lock control variable", DRIVER_NAME,
76 	    hba->ddiinst);
77 	cv_init(&eventq->lock_cv, buf, CV_DRIVER, NULL);
78 
79 	(void) sprintf(buf, "?%s%d_evt_lock mutex", DRIVER_NAME, hba->ddiinst);
80 
81 	if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
82 		/* Get the current interrupt block cookie */
83 		(void) ddi_get_iblock_cookie(hba->dip, (uint_t)EMLXS_INUMBER,
84 		    &iblock);
85 
86 		/* Create the mutex lock */
87 		mutex_init(&eventq->lock, buf, MUTEX_DRIVER, (void *)iblock);
88 	}
89 #ifdef  MSI_SUPPORT
90 	else {
91 		/* Create event mutex lock */
92 		mutex_init(&eventq->lock, buf, MUTEX_DRIVER,
93 		    DDI_INTR_PRI(hba->intr_arg));
94 	}
95 #endif
96 
97 	return (1);
98 
99 } /* emlxs_event_queue_create() */
100 
101 
102 extern void
103 emlxs_event_queue_destroy(emlxs_hba_t *hba)
104 {
105 	emlxs_port_t *vport;
106 	emlxs_event_queue_t *eventq = &EVENTQ;
107 	uint32_t i;
108 	uint32_t wakeup = 0;
109 
110 	mutex_enter(&eventq->lock);
111 
112 	/* Clear all event masks and broadcast a wakeup */
113 	/* to clear any sleeping threads */
114 	if (hba->event_mask) {
115 		hba->event_mask = 0;
116 		hba->event_timer = 0;
117 		wakeup = 1;
118 	}
119 
120 	for (i = 0; i < MAX_VPORTS; i++) {
121 		vport = &VPORT(i);
122 
123 		if (vport->sd_event_mask) {
124 			vport->sd_event_mask = 0;
125 			wakeup = 1;
126 		}
127 	}
128 
129 	if (wakeup) {
130 		cv_broadcast(&eventq->lock_cv);
131 
132 		mutex_exit(&eventq->lock);
133 		DELAYMS(10);
134 		mutex_enter(&eventq->lock);
135 	}
136 
137 	/* Destroy the remaining events */
138 	while (eventq->first) {
139 		emlxs_event_destroy(hba, eventq->first);
140 	}
141 
142 	mutex_exit(&eventq->lock);
143 
144 	/* Destroy the queue lock */
145 	mutex_destroy(&eventq->lock);
146 	cv_destroy(&eventq->lock_cv);
147 
148 	/* Clear the queue */
149 	bzero(eventq, sizeof (emlxs_event_queue_t));
150 
151 	return;
152 
153 } /* emlxs_event_queue_destroy() */
154 
155 
156 /* Event queue lock must be held */
157 static void
158 emlxs_event_destroy(emlxs_hba_t *hba, emlxs_event_entry_t *entry)
159 {
160 	emlxs_event_queue_t *eventq = &EVENTQ;
161 	emlxs_port_t *port;
162 	uint32_t missed = 0;
163 
164 	port = (emlxs_port_t *)entry->port;
165 
166 	eventq->count--;
167 	if (eventq->count == 0) {
168 		eventq->first = NULL;
169 		eventq->last = NULL;
170 	} else {
171 		if (entry->prev) {
172 			entry->prev->next = entry->next;
173 		}
174 		if (entry->next) {
175 			entry->next->prev = entry->prev;
176 		}
177 		if (eventq->first == entry) {
178 			eventq->first = entry->next;
179 		}
180 		if (eventq->last == entry) {
181 			eventq->last = entry->prev;
182 		}
183 	}
184 
185 	entry->prev = NULL;
186 	entry->next = NULL;
187 
188 	if ((entry->evt->mask == EVT_LINK) ||
189 	    (entry->evt->mask == EVT_RSCN)) {
190 		if (!(entry->flag & EMLXS_DFC_EVENT_DONE)) {
191 			hba->hba_event.missed++;
192 			missed = 1;
193 		}
194 	}
195 
196 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_dequeued_msg,
197 	    "%s[%d]: flag=%x missed=%d cnt=%d",
198 	    entry->evt->label, entry->id, entry->flag, missed, eventq->count);
199 
200 	/* Call notification handler */
201 	entry->evt->destroy(entry);
202 
203 	/* Free context buffer */
204 	if (entry->bp && entry->size) {
205 		kmem_free(entry->bp, entry->size);
206 	}
207 
208 	/* Free entry buffer */
209 	kmem_free(entry, sizeof (emlxs_event_entry_t));
210 
211 	return;
212 
213 } /* emlxs_event_destroy() */
214 
215 
216 extern void
217 emlxs_event(emlxs_port_t *port, emlxs_event_t *evt, void *bp, uint32_t size)
218 {
219 	emlxs_hba_t *hba = HBA;
220 	emlxs_event_queue_t *eventq = &EVENTQ;
221 	emlxs_event_entry_t *entry;
222 	uint32_t i;
223 	uint32_t mask;
224 
225 	if (emlxs_event_check(port, evt) == 0) {
226 		goto failed;
227 	}
228 
229 	/* Create event entry */
230 	if (!(entry = (emlxs_event_entry_t *)kmem_alloc(
231 	    sizeof (emlxs_event_entry_t), KM_NOSLEEP))) {
232 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
233 		    "%s: Unable to allocate event entry.", evt->label);
234 
235 		goto failed;
236 	}
237 
238 	/* Initialize */
239 	bzero(entry, sizeof (emlxs_event_entry_t));
240 
241 	entry->evt = evt;
242 	entry->port = (void *)port;
243 	entry->bp = bp;
244 	entry->size = size;
245 
246 	mutex_enter(&eventq->lock);
247 
248 	/* Set the event timer */
249 	entry->timestamp = hba->timer_tics;
250 	if (evt->timeout) {
251 		entry->timer = entry->timestamp + evt->timeout;
252 	}
253 
254 	/* Set the event id */
255 	entry->id = eventq->next_id++;
256 
257 	/* Set last event table */
258 	mask = evt->mask;
259 	for (i = 0; i < 32; i++) {
260 		if (mask & 0x01) {
261 			eventq->last_id[i] = entry->id;
262 		}
263 		mask >>= 1;
264 	}
265 
266 	/* Put event on bottom of queue */
267 	entry->next = NULL;
268 	if (eventq->count == 0) {
269 		entry->prev = NULL;
270 		eventq->first = entry;
271 		eventq->last = entry;
272 	} else {
273 		entry->prev = eventq->last;
274 		entry->prev->next = entry;
275 		eventq->last = entry;
276 	}
277 	eventq->count++;
278 
279 	if ((entry->evt->mask == EVT_LINK) ||
280 	    (entry->evt->mask == EVT_RSCN)) {
281 		hba->hba_event.new++;
282 	}
283 
284 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_queued_msg,
285 	    "%s[%d]: bp=%p size=%d cnt=%d", entry->evt->label,
286 	    entry->id, bp, size, eventq->count);
287 
288 	/* Broadcast the event */
289 	cv_broadcast(&eventq->lock_cv);
290 
291 	mutex_exit(&eventq->lock);
292 
293 	return;
294 
295 failed:
296 
297 	/* Call notification handler */
298 	entry->evt->destroy(entry);
299 
300 	if (entry->bp && entry->size) {
301 		kmem_free(entry->bp, entry->size);
302 	}
303 
304 	return;
305 
306 } /* emlxs_event() */
307 
308 
309 extern void
310 emlxs_timer_check_events(emlxs_hba_t *hba)
311 {
312 	emlxs_config_t *cfg = &CFG;
313 	emlxs_event_queue_t *eventq = &EVENTQ;
314 	emlxs_event_entry_t *entry;
315 	emlxs_event_entry_t *next;
316 
317 	if (!cfg[CFG_TIMEOUT_ENABLE].current) {
318 		return;
319 	}
320 
321 	if ((hba->event_timer > hba->timer_tics)) {
322 		return;
323 	}
324 
325 	if (eventq->count) {
326 		mutex_enter(&eventq->lock);
327 
328 		entry = eventq->first;
329 		while (entry) {
330 			if ((!entry->timer) ||
331 			    (entry->timer > hba->timer_tics)) {
332 				entry = entry->next;
333 				continue;
334 			}
335 
336 			/* Event timed out, destroy it */
337 			next = entry->next;
338 			emlxs_event_destroy(hba, entry);
339 			entry = next;
340 		}
341 
342 		mutex_exit(&eventq->lock);
343 	}
344 
345 	/* Set next event timer check */
346 	hba->event_timer = hba->timer_tics + EMLXS_EVENT_PERIOD;
347 
348 	return;
349 
350 } /* emlxs_timer_check_events() */
351 
352 
353 extern void
354 emlxs_log_rscn_event(emlxs_port_t *port, uint8_t *payload, uint32_t size)
355 {
356 	uint8_t *bp;
357 	uint32_t *ptr;
358 
359 	/* Check if the event is being requested */
360 	if (emlxs_event_check(port, &emlxs_rscn_event) == 0) {
361 		return;
362 	}
363 
364 	if (size > MAX_RSCN_PAYLOAD) {
365 		size = MAX_RSCN_PAYLOAD;
366 	}
367 
368 	size += sizeof (uint32_t);
369 
370 	/* Save a copy of the payload for the event log */
371 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
372 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
373 		    "%s: Unable to allocate buffer.", emlxs_rscn_event.label);
374 
375 		return;
376 	}
377 
378 	/*
379 	 * Buffer Format:
380 	 *	word[0] = DID of the RSCN
381 	 *	word[1] = RSCN Payload
382 	 */
383 	ptr = (uint32_t *)bp;
384 	*ptr++ = port->did;
385 	bcopy(payload, (char *)ptr, (size - sizeof (uint32_t)));
386 
387 	emlxs_event(port, &emlxs_rscn_event, bp, size);
388 
389 	return;
390 
391 } /* emlxs_log_rscn_event() */
392 
393 
394 extern void
395 emlxs_log_vportrscn_event(emlxs_port_t *port, uint8_t *payload, uint32_t size)
396 {
397 	uint8_t *bp;
398 	uint8_t *ptr;
399 
400 	/* Check if the event is being requested */
401 	if (emlxs_event_check(port, &emlxs_vportrscn_event) == 0) {
402 		return;
403 	}
404 
405 	if (size > MAX_RSCN_PAYLOAD) {
406 		size = MAX_RSCN_PAYLOAD;
407 	}
408 
409 	size += sizeof (NAME_TYPE);
410 
411 	/* Save a copy of the payload for the event log */
412 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
413 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
414 		    "%s: Unable to allocate buffer.",
415 		    emlxs_vportrscn_event.label);
416 
417 		return;
418 	}
419 
420 	/*
421 	 * Buffer Format:
422 	 *	word[0 - 4] = WWPN of the RSCN
423 	 *	word[5] = RSCN Payload
424 	 */
425 	ptr = bp;
426 	bcopy(&port->wwpn, ptr, sizeof (NAME_TYPE));
427 	ptr += sizeof (NAME_TYPE);
428 	bcopy(payload, ptr, (size - sizeof (NAME_TYPE)));
429 
430 	emlxs_event(port, &emlxs_vportrscn_event, bp, size);
431 
432 	return;
433 
434 } /* emlxs_log_vportrscn_event() */
435 
436 
437 extern uint32_t
438 emlxs_log_ct_event(emlxs_port_t *port, uint8_t *payload, uint32_t size,
439     uint32_t rxid)
440 {
441 	uint8_t *bp;
442 	uint32_t *ptr;
443 
444 	/* Check if the event is being requested */
445 	if (emlxs_event_check(port, &emlxs_ct_event) == 0) {
446 		return (1);
447 	}
448 
449 	if (size > MAX_CT_PAYLOAD) {
450 		size = MAX_CT_PAYLOAD;
451 	}
452 
453 	size += sizeof (uint32_t);
454 
455 	/* Save a copy of the payload for the event log */
456 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
457 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
458 		    "%s: Unable to allocate buffer.", emlxs_ct_event.label);
459 
460 		return (1);
461 	}
462 
463 	/*
464 	 * Buffer Format:
465 	 *	word[0] = RXID tag for outgoing reply to this CT request
466 	 *	word[1] = CT Payload
467 	 */
468 	ptr = (uint32_t *)bp;
469 	*ptr++ = rxid;
470 	bcopy(payload, (char *)ptr, (size - sizeof (uint32_t)));
471 
472 	emlxs_event(port, &emlxs_ct_event, bp, size);
473 
474 	return (0);
475 
476 } /* emlxs_log_ct_event() */
477 
478 
479 extern void
480 emlxs_ct_event_destroy(emlxs_event_entry_t *entry)
481 {
482 	emlxs_port_t *port = (emlxs_port_t *)entry->port;
483 	emlxs_hba_t *hba = HBA;
484 	uint32_t rxid;
485 
486 	if (!(entry->flag & EMLXS_DFC_EVENT_DONE)) {
487 
488 		rxid = *(uint32_t *)entry->bp;
489 
490 		/* Abort exchange */
491 		emlxs_thread_spawn(hba, emlxs_abort_ct_exchange,
492 		    entry->port, (void *)(unsigned long)rxid);
493 	}
494 
495 	return;
496 
497 } /* emlxs_ct_event_destroy() */
498 
499 
500 extern void
501 emlxs_log_link_event(emlxs_port_t *port)
502 {
503 	emlxs_hba_t *hba = HBA;
504 	uint8_t *bp;
505 	dfc_linkinfo_t *linkinfo;
506 	uint8_t *byte;
507 	uint8_t *linkspeed;
508 	uint8_t *liptype;
509 	uint8_t *resv1;
510 	uint8_t *resv2;
511 	uint32_t size;
512 
513 	/* Check if the event is being requested */
514 	if (emlxs_event_check(port, &emlxs_link_event) == 0) {
515 		return;
516 	}
517 
518 	size = sizeof (dfc_linkinfo_t) + sizeof (uint32_t);
519 
520 	/* Save a copy of the buffer for the event log */
521 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
522 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
523 		    "%s: Unable to allocate buffer.", emlxs_link_event.label);
524 
525 		return;
526 	}
527 
528 	/*
529 	 * Buffer Format:
530 	 *	word[0] = Linkspeed:8
531 	 *	word[0] = LIP_type:8
532 	 *	word[0] = resv1:8
533 	 *	word[0] = resv2:8
534 	 *	word[1] = dfc_linkinfo_t data
535 	 */
536 	byte = (uint8_t *)bp;
537 	linkspeed = &byte[0];
538 	liptype = &byte[1];
539 	resv1 = &byte[2];
540 	resv2 = &byte[3];
541 	linkinfo = (dfc_linkinfo_t *)&byte[4];
542 
543 	*resv1 = 0;
544 	*resv2 = 0;
545 
546 	if (hba->state <= FC_LINK_DOWN) {
547 		*linkspeed = 0;
548 		*liptype = 0;
549 	} else {
550 		/* Set linkspeed */
551 		if (hba->linkspeed == LA_2GHZ_LINK) {
552 			*linkspeed = HBA_PORTSPEED_2GBIT;
553 		} else if (hba->linkspeed == LA_4GHZ_LINK) {
554 			*linkspeed = HBA_PORTSPEED_4GBIT;
555 		} else if (hba->linkspeed == LA_8GHZ_LINK) {
556 			*linkspeed = HBA_PORTSPEED_8GBIT;
557 		} else if (hba->linkspeed == LA_10GHZ_LINK) {
558 			*linkspeed = HBA_PORTSPEED_10GBIT;
559 		} else {
560 			*linkspeed = HBA_PORTSPEED_1GBIT;
561 		}
562 
563 		/* Set LIP type */
564 		*liptype = port->lip_type;
565 	}
566 
567 	bzero(linkinfo, sizeof (dfc_linkinfo_t));
568 
569 	linkinfo->a_linkEventTag = hba->link_event_tag;
570 	linkinfo->a_linkUp = HBASTATS.LinkUp;
571 	linkinfo->a_linkDown = HBASTATS.LinkDown;
572 	linkinfo->a_linkMulti = HBASTATS.LinkMultiEvent;
573 
574 	if (hba->state <= FC_LINK_DOWN) {
575 		linkinfo->a_linkState = LNK_DOWN;
576 		linkinfo->a_DID = port->prev_did;
577 	} else if (hba->state < FC_READY) {
578 		linkinfo->a_linkState = LNK_DISCOVERY;
579 	} else {
580 		linkinfo->a_linkState = LNK_READY;
581 	}
582 
583 	if (linkinfo->a_linkState != LNK_DOWN) {
584 		if (hba->topology == TOPOLOGY_LOOP) {
585 			if (hba->flag & FC_FABRIC_ATTACHED) {
586 				linkinfo->a_topology = LNK_PUBLIC_LOOP;
587 			} else {
588 				linkinfo->a_topology = LNK_LOOP;
589 			}
590 
591 			linkinfo->a_alpa = port->did & 0xff;
592 			linkinfo->a_DID = linkinfo->a_alpa;
593 			linkinfo->a_alpaCnt = port->alpa_map[0];
594 
595 			if (linkinfo->a_alpaCnt > 127) {
596 				linkinfo->a_alpaCnt = 127;
597 			}
598 
599 			bcopy((void *)&port->alpa_map[1], linkinfo->a_alpaMap,
600 			    linkinfo->a_alpaCnt);
601 		} else {
602 			if (port->node_count == 1) {
603 				linkinfo->a_topology = LNK_PT2PT;
604 			} else {
605 				linkinfo->a_topology = LNK_FABRIC;
606 			}
607 
608 			linkinfo->a_DID = port->did;
609 		}
610 	}
611 
612 	bcopy(&hba->wwpn, linkinfo->a_wwpName, 8);
613 	bcopy(&hba->wwnn, linkinfo->a_wwnName, 8);
614 
615 	emlxs_event(port, &emlxs_link_event, bp, size);
616 
617 	return;
618 
619 } /* emlxs_log_link_event() */
620 
621 
622 extern void
623 emlxs_log_dump_event(emlxs_port_t *port, uint8_t *buffer, uint32_t size)
624 {
625 	emlxs_hba_t *hba = HBA;
626 	uint8_t *bp;
627 
628 	/* Check if the event is being requested */
629 	if (emlxs_event_check(port, &emlxs_dump_event) == 0) {
630 #ifdef DUMP_SUPPORT
631 		/* Schedule a dump thread */
632 		emlxs_dump(hba, EMLXS_DRV_DUMP, 0, 0);
633 #endif /* DUMP_SUPPORT */
634 		return;
635 	}
636 
637 	if (buffer && size) {
638 		/* Save a copy of the buffer for the event log */
639 		if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
640 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
641 			    "%s: Unable to allocate buffer.",
642 			    emlxs_dump_event.label);
643 
644 			return;
645 		}
646 
647 		bcopy(buffer, bp, size);
648 	} else {
649 		bp = NULL;
650 		size = 0;
651 	}
652 
653 	emlxs_event(port, &emlxs_dump_event, bp, size);
654 
655 	return;
656 
657 } /* emlxs_log_dump_event() */
658 
659 
660 extern void
661 emlxs_log_temp_event(emlxs_port_t *port, uint32_t type, uint32_t temp)
662 {
663 	emlxs_hba_t *hba = HBA;
664 	uint32_t *bp;
665 	uint32_t size;
666 
667 	/* Check if the event is being requested */
668 	if (emlxs_event_check(port, &emlxs_temp_event) == 0) {
669 #ifdef DUMP_SUPPORT
670 		/* Schedule a dump thread */
671 		emlxs_dump(hba, EMLXS_TEMP_DUMP, type, temp);
672 #endif /* DUMP_SUPPORT */
673 		return;
674 	}
675 
676 	size = 2 * sizeof (uint32_t);
677 
678 	if (!(bp = (uint32_t *)kmem_alloc(size, KM_NOSLEEP))) {
679 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
680 		    "%s: Unable to allocate buffer.", emlxs_temp_event.label);
681 
682 		return;
683 	}
684 
685 	bp[0] = type;
686 	bp[1] = temp;
687 
688 	emlxs_event(port, &emlxs_temp_event, bp, size);
689 
690 	return;
691 
692 } /* emlxs_log_temp_event() */
693 
694 
695 
696 extern void
697 emlxs_log_fcoe_event(emlxs_port_t *port, menlo_init_rsp_t *init_rsp)
698 {
699 	emlxs_hba_t *hba = HBA;
700 	uint8_t *bp;
701 	uint32_t size;
702 
703 	/* Check if the event is being requested */
704 	if (emlxs_event_check(port, &emlxs_fcoe_event) == 0) {
705 		return;
706 	}
707 
708 	/* Check if this is a FCOE adapter */
709 	if (hba->model_info.device_id != PCI_DEVICE_ID_LP21000_M) {
710 		return;
711 	}
712 
713 	size = sizeof (menlo_init_rsp_t);
714 
715 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
716 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
717 		    "%s: Unable to allocate buffer.", emlxs_fcoe_event.label);
718 
719 		return;
720 	}
721 
722 	bcopy((uint8_t *)init_rsp, bp, size);
723 
724 	emlxs_event(port, &emlxs_fcoe_event, bp, size);
725 
726 	return;
727 
728 } /* emlxs_log_fcoe_event() */
729 
730 
731 extern void
732 emlxs_log_async_event(emlxs_port_t *port, IOCB *iocb)
733 {
734 	uint8_t *bp;
735 	uint32_t size;
736 
737 	if (emlxs_event_check(port, &emlxs_async_event) == 0) {
738 		return;
739 	}
740 
741 	/* ASYNC_STATUS_CN response size */
742 	size = 64;
743 
744 	if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
745 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
746 		    "%s: Unable to allocate buffer.", emlxs_async_event.label);
747 
748 		return;
749 	}
750 
751 	bcopy((uint8_t *)iocb, bp, size);
752 
753 	emlxs_event(port, &emlxs_async_event, bp, size);
754 
755 	return;
756 
757 } /* emlxs_log_async_event() */
758 
759 
760 extern uint32_t
761 emlxs_get_dfc_eventinfo(emlxs_port_t *port, HBA_EVENTINFO *eventinfo,
762     uint32_t *eventcount, uint32_t *missed)
763 {
764 	emlxs_hba_t *hba = HBA;
765 	emlxs_event_queue_t *eventq = &EVENTQ;
766 	emlxs_event_entry_t *entry;
767 	uint32_t max_events;
768 	dfc_linkinfo_t *linkinfo;
769 	uint32_t *word;
770 	uint8_t *byte;
771 	uint8_t linkspeed;
772 	uint8_t liptype;
773 	fc_affected_id_t *aid;
774 	uint32_t events;
775 	uint8_t format;
776 
777 	if (!eventinfo || !eventcount || !missed) {
778 		return (DFC_ARG_NULL);
779 	}
780 
781 	max_events = *eventcount;
782 	*eventcount = 0;
783 	*missed = 0;
784 
785 	mutex_enter(&eventq->lock);
786 
787 	/* Account for missed events */
788 	if (hba->hba_event.new > hba->hba_event.missed) {
789 		hba->hba_event.new -= hba->hba_event.missed;
790 	} else {
791 		hba->hba_event.new = 0;
792 	}
793 
794 	*missed = hba->hba_event.missed;
795 	hba->hba_event.missed = 0;
796 
797 	if (!hba->hba_event.new) {
798 		hba->hba_event.last_id = eventq->next_id - 1;
799 		mutex_exit(&eventq->lock);
800 		return (0);
801 	}
802 
803 	/* A new event has occurred since last acquisition */
804 
805 	events = 0;
806 	entry = eventq->first;
807 	while (entry && (events < max_events)) {
808 
809 		/* Skip old events */
810 		if (entry->id <= hba->hba_event.last_id) {
811 			entry = entry->next;
812 			continue;
813 		}
814 
815 		/* Process this entry */
816 		switch (entry->evt->mask) {
817 		case EVT_LINK:
818 			byte = (uint8_t *)entry->bp;
819 			linkspeed = byte[0];
820 			liptype = byte[1];
821 			linkinfo = (dfc_linkinfo_t *)&byte[4];
822 
823 			if (linkinfo->a_linkState == LNK_DOWN) {
824 				eventinfo->EventCode =
825 				    HBA_EVENT_LINK_DOWN;
826 				eventinfo->Event.Link_EventInfo.
827 				    PortFcId = linkinfo->a_DID;
828 				eventinfo->Event.Link_EventInfo.
829 				    Reserved[0] = 0;
830 				eventinfo->Event.Link_EventInfo.
831 				    Reserved[1] = 0;
832 				eventinfo->Event.Link_EventInfo.
833 				    Reserved[2] = 0;
834 			} else {
835 				eventinfo->EventCode =
836 				    HBA_EVENT_LINK_UP;
837 				eventinfo->Event.Link_EventInfo.
838 				    PortFcId = linkinfo->a_DID;
839 
840 				if ((linkinfo->a_topology ==
841 				    LNK_PUBLIC_LOOP) ||
842 				    (linkinfo->a_topology ==
843 				    LNK_LOOP)) {
844 					eventinfo->Event.
845 					    Link_EventInfo.
846 					    Reserved[0] = 2;
847 				} else {
848 					eventinfo->Event.
849 					    Link_EventInfo.
850 					    Reserved[0] = 1;
851 				}
852 
853 				eventinfo->Event.Link_EventInfo.
854 				    Reserved[1] = liptype;
855 				eventinfo->Event.Link_EventInfo.
856 				    Reserved[2] = linkspeed;
857 			}
858 
859 			eventinfo++;
860 			events++;
861 			hba->hba_event.new--;
862 			break;
863 
864 		case EVT_RSCN:
865 			word = (uint32_t *)entry->bp;
866 			eventinfo->EventCode = HBA_EVENT_RSCN;
867 			eventinfo->Event.RSCN_EventInfo.PortFcId =
868 			    word[0] & 0xFFFFFF;
869 			/* word[1] is the RSCN payload command */
870 
871 			aid = (fc_affected_id_t *)&word[2];
872 			format = aid->aff_format;
873 
874 			switch (format) {
875 			case 0:	/* Port */
876 				eventinfo->Event.RSCN_EventInfo.
877 				    NPortPage =
878 				    aid->aff_d_id & 0x00ffffff;
879 				break;
880 
881 			case 1:	/* Area */
882 				eventinfo->Event.RSCN_EventInfo.
883 				    NPortPage =
884 				    aid->aff_d_id & 0x00ffff00;
885 				break;
886 
887 			case 2:	/* Domain */
888 				eventinfo->Event.RSCN_EventInfo.
889 				    NPortPage =
890 				    aid->aff_d_id & 0x00ff0000;
891 				break;
892 
893 			case 3:	/* Network */
894 				eventinfo->Event.RSCN_EventInfo.
895 				    NPortPage = 0;
896 				break;
897 			}
898 
899 			eventinfo->Event.RSCN_EventInfo.Reserved[0] =
900 			    0;
901 			eventinfo->Event.RSCN_EventInfo.Reserved[1] =
902 			    0;
903 
904 			eventinfo++;
905 			events++;
906 			hba->hba_event.new--;
907 			break;
908 		}
909 
910 		hba->hba_event.last_id = entry->id;
911 		entry = entry->next;
912 	}
913 
914 	/* Return number of events acquired */
915 	*eventcount = events;
916 
917 	mutex_exit(&eventq->lock);
918 
919 	return (0);
920 
921 } /* emlxs_get_dfc_eventinfo() */
922 
923 
924 uint32_t
925 emlxs_get_dfc_event(emlxs_port_t *port, emlxs_dfc_event_t *dfc_event,
926     uint32_t sleep)
927 {
928 	emlxs_hba_t *hba = HBA;
929 	emlxs_event_queue_t *eventq = &EVENTQ;
930 	emlxs_event_entry_t *entry;
931 	uint32_t found;
932 	uint32_t mask;
933 	uint32_t i;
934 	uint32_t size = 0;
935 	uint32_t rc;
936 
937 	if (dfc_event->dataout && dfc_event->size) {
938 		size = dfc_event->size;
939 	}
940 	dfc_event->size = 0;
941 
942 	if (!dfc_event->event) {
943 		return (DFC_ARG_INVALID);
944 	}
945 
946 	/* Calculate the event index */
947 	mask = dfc_event->event;
948 	for (i = 0; i < 32; i++) {
949 		if (mask & 0x01) {
950 			break;
951 		}
952 
953 		mask >>= 1;
954 	}
955 
956 	mutex_enter(&eventq->lock);
957 
958 wait_for_event:
959 
960 	/* Check if no new event has occurred */
961 	if (dfc_event->last_id == eventq->last_id[i]) {
962 		if (!sleep) {
963 			mutex_exit(&eventq->lock);
964 			return (0);
965 		}
966 
967 		/* While event is still active and */
968 		/* no new event has been logged */
969 		while ((dfc_event->event & hba->event_mask) &&
970 		    (dfc_event->last_id == eventq->last_id[i])) {
971 
972 			rc = cv_wait_sig(&eventq->lock_cv, &eventq->lock);
973 
974 			/* Check if thread was killed by kernel */
975 			if (rc == 0) {
976 				dfc_event->pid = 0;
977 				dfc_event->event = 0;
978 				mutex_exit(&eventq->lock);
979 				return (0);
980 			}
981 		}
982 
983 		/* If the event is no longer registered then */
984 		/* return immediately */
985 		if (!(dfc_event->event & hba->event_mask)) {
986 			mutex_exit(&eventq->lock);
987 			return (0);
988 		}
989 	}
990 
991 	/* !!! An event has occurred since last_id !!! */
992 
993 	/* Check if event data is not being requested */
994 	if (!size) {
995 		/* If so, then just return the last event id */
996 		dfc_event->last_id = eventq->last_id[i];
997 
998 		mutex_exit(&eventq->lock);
999 		return (0);
1000 	}
1001 
1002 	/* !!! The requester wants the next event buffer !!! */
1003 
1004 	found = 0;
1005 	entry = eventq->first;
1006 	while (entry) {
1007 		if ((entry->id > dfc_event->last_id) &&
1008 		    (entry->evt->mask == dfc_event->event)) {
1009 			found = 1;
1010 			break;
1011 		}
1012 
1013 		entry = entry->next;
1014 	}
1015 
1016 	if (!found) {
1017 		/* Update last_id to the last known event */
1018 		dfc_event->last_id = eventq->last_id[i];
1019 
1020 		/* Try waiting again if we can */
1021 		goto wait_for_event;
1022 	}
1023 
1024 	/* !!! Next event found !!! */
1025 
1026 	/* Copy the context buffer to the buffer provided */
1027 	if (entry->bp && entry->size) {
1028 		if (entry->size < size) {
1029 			size = entry->size;
1030 		}
1031 
1032 		if (ddi_copyout((void *)entry->bp, dfc_event->dataout, size,
1033 		    dfc_event->mode) != 0) {
1034 			mutex_exit(&eventq->lock);
1035 
1036 			return (DFC_COPYOUT_ERROR);
1037 		}
1038 
1039 		/* Event has been retrieved by DFCLIB */
1040 		entry->flag |= EMLXS_DFC_EVENT_DONE;
1041 
1042 		dfc_event->size = size;
1043 	}
1044 
1045 	dfc_event->last_id = entry->id;
1046 
1047 	mutex_exit(&eventq->lock);
1048 
1049 	return (0);
1050 
1051 } /* emlxs_get_dfc_event() */
1052 
1053 
1054 uint32_t
1055 emlxs_kill_dfc_event(emlxs_port_t *port, emlxs_dfc_event_t *dfc_event)
1056 {
1057 	emlxs_hba_t *hba = HBA;
1058 	emlxs_event_queue_t *eventq = &EVENTQ;
1059 
1060 	mutex_enter(&eventq->lock);
1061 	dfc_event->pid = 0;
1062 	dfc_event->event = 0;
1063 	cv_broadcast(&eventq->lock_cv);
1064 	mutex_exit(&eventq->lock);
1065 
1066 	return (0);
1067 
1068 } /* emlxs_kill_dfc_event() */
1069 
1070 
1071 #ifdef SAN_DIAG_SUPPORT
1072 extern void
1073 emlxs_log_sd_basic_els_event(emlxs_port_t *port, uint32_t subcat,
1074     HBA_WWN *portname, HBA_WWN *nodename)
1075 {
1076 	struct sd_plogi_rcv_v0	*bp;
1077 	uint32_t		size;
1078 
1079 	/* Check if the event is being requested */
1080 	if (emlxs_event_check(port, &emlxs_sd_els_event) == 0) {
1081 		return;
1082 	}
1083 
1084 	size = sizeof (struct sd_plogi_rcv_v0);
1085 
1086 	if (!(bp = (struct sd_plogi_rcv_v0 *)kmem_alloc(size, KM_NOSLEEP))) {
1087 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1088 		    "%s: Unable to allocate buffer.", emlxs_sd_els_event.label);
1089 
1090 		return;
1091 	}
1092 
1093 	/*
1094 	 * we are using version field to store subtype, libdfc
1095 	 * will fix this up before returning data to app.
1096 	 */
1097 	bp->sd_plogir_version = subcat;
1098 	bcopy((uint8_t *)portname, (uint8_t *)&bp->sd_plogir_portname,
1099 	    sizeof (HBA_WWN));
1100 	bcopy((uint8_t *)nodename, (uint8_t *)&bp->sd_plogir_nodename,
1101 	    sizeof (HBA_WWN));
1102 
1103 	emlxs_event(port, &emlxs_sd_els_event, bp, size);
1104 
1105 	return;
1106 
1107 } /* emlxs_log_sd_basic_els_event() */
1108 
1109 
1110 extern void
1111 emlxs_log_sd_prlo_event(emlxs_port_t *port, HBA_WWN *remoteport)
1112 {
1113 	struct sd_prlo_rcv_v0	*bp;
1114 	uint32_t		size;
1115 
1116 	/* Check if the event is being requested */
1117 	if (emlxs_event_check(port, &emlxs_sd_els_event) == 0) {
1118 		return;
1119 	}
1120 
1121 	size = sizeof (struct sd_prlo_rcv_v0);
1122 
1123 	if (!(bp = (struct sd_prlo_rcv_v0 *)kmem_alloc(size,
1124 	    KM_NOSLEEP))) {
1125 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1126 		    "%s PRLO: Unable to allocate buffer.",
1127 		    emlxs_sd_els_event.label);
1128 
1129 		return;
1130 	}
1131 
1132 	/*
1133 	 * we are using version field to store subtype, libdfc
1134 	 * will fix this up before returning data to app.
1135 	 */
1136 	bp->sd_prlor_version = SD_ELS_SUBCATEGORY_PRLO_RCV;
1137 	bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_prlor_remoteport,
1138 	    sizeof (HBA_WWN));
1139 
1140 	emlxs_event(port, &emlxs_sd_els_event, bp, size);
1141 
1142 	return;
1143 
1144 } /* emlxs_log_sd_prlo_event() */
1145 
1146 
1147 extern void
1148 emlxs_log_sd_lsrjt_event(emlxs_port_t *port, HBA_WWN *remoteport,
1149     uint32_t orig_cmd, uint32_t reason, uint32_t reason_expl)
1150 {
1151 	struct sd_lsrjt_rcv_v0	*bp;
1152 	uint32_t		size;
1153 
1154 	/* Check if the event is being requested */
1155 	if (emlxs_event_check(port, &emlxs_sd_els_event) == 0) {
1156 		return;
1157 	}
1158 
1159 	size = sizeof (struct sd_lsrjt_rcv_v0);
1160 
1161 	if (!(bp = (struct sd_lsrjt_rcv_v0 *)kmem_alloc(size,
1162 	    KM_NOSLEEP))) {
1163 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1164 		    "%s LSRJT: Unable to allocate buffer.",
1165 		    emlxs_sd_els_event.label);
1166 
1167 		return;
1168 	}
1169 
1170 	/*
1171 	 * we are using version field to store subtype, libdfc
1172 	 * will fix this up before returning data to app.
1173 	 */
1174 	bp->sd_lsrjtr_version = SD_ELS_SUBCATEGORY_LSRJT_RCV;
1175 	bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_lsrjtr_remoteport,
1176 	    sizeof (HBA_WWN));
1177 	bp->sd_lsrjtr_original_cmd = orig_cmd;
1178 	bp->sd_lsrjtr_reasoncode = reason;
1179 	bp->sd_lsrjtr_reasoncodeexpl = reason_expl;
1180 
1181 	emlxs_event(port, &emlxs_sd_els_event, bp, size);
1182 
1183 	return;
1184 
1185 } /* emlxs_log_sd_lsrjt_event() */
1186 
1187 
1188 extern void
1189 emlxs_log_sd_fc_bsy_event(emlxs_port_t *port, HBA_WWN *remoteport)
1190 {
1191 	struct sd_pbsy_rcv_v0	*bp;
1192 	uint32_t		size;
1193 
1194 	/* Check if the event is being requested */
1195 	if (emlxs_event_check(port, &emlxs_sd_fabric_event) == 0) {
1196 		return;
1197 	}
1198 
1199 	size = sizeof (struct sd_pbsy_rcv_v0);
1200 
1201 	if (!(bp = (struct sd_pbsy_rcv_v0 *)kmem_alloc(size,
1202 	    KM_NOSLEEP))) {
1203 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1204 		    "%s BSY: Unable to allocate buffer.",
1205 		    emlxs_sd_fabric_event.label);
1206 
1207 		return;
1208 	}
1209 
1210 	/*
1211 	 * we are using version field to store subtype, libdfc
1212 	 * will fix this up before returning data to app.
1213 	 */
1214 	if (remoteport == NULL)
1215 		bp->sd_pbsyr_evt_version = SD_FABRIC_SUBCATEGORY_FABRIC_BUSY;
1216 	else
1217 	{
1218 		bp->sd_pbsyr_evt_version = SD_FABRIC_SUBCATEGORY_PORT_BUSY;
1219 		bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_pbsyr_rport,
1220 		    sizeof (HBA_WWN));
1221 	}
1222 
1223 	emlxs_event(port, &emlxs_sd_fabric_event, bp, size);
1224 
1225 	return;
1226 
1227 } /* emlxs_log_sd_fc_bsy_event() */
1228 
1229 
1230 extern void
1231 emlxs_log_sd_fc_rdchk_event(emlxs_port_t *port, HBA_WWN *remoteport,
1232     uint32_t lun, uint32_t opcode, uint32_t fcp_param)
1233 {
1234 	struct sd_fcprdchkerr_v0	*bp;
1235 	uint32_t			size;
1236 
1237 	/* Check if the event is being requested */
1238 	if (emlxs_event_check(port, &emlxs_sd_fabric_event) == 0) {
1239 		return;
1240 	}
1241 
1242 	size = sizeof (struct sd_fcprdchkerr_v0);
1243 
1244 	if (!(bp = (struct sd_fcprdchkerr_v0 *)kmem_alloc(size,
1245 	    KM_NOSLEEP))) {
1246 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1247 		    "%s RDCHK: Unable to allocate buffer.",
1248 		    emlxs_sd_fabric_event.label);
1249 
1250 		return;
1251 	}
1252 
1253 	/*
1254 	 * we are using version field to store subtype, libdfc
1255 	 * will fix this up before returning data to app.
1256 	 */
1257 	bp->sd_fcprdchkerr_version = SD_FABRIC_SUBCATEGORY_FCPRDCHKERR;
1258 	bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_fcprdchkerr_rport,
1259 	    sizeof (HBA_WWN));
1260 	bp->sd_fcprdchkerr_lun = lun;
1261 	bp->sd_fcprdchkerr_opcode = opcode;
1262 	bp->sd_fcprdchkerr_fcpiparam = fcp_param;
1263 
1264 	emlxs_event(port, &emlxs_sd_fabric_event, bp, size);
1265 
1266 	return;
1267 
1268 } /* emlxs_log_sd_rdchk_event() */
1269 
1270 
1271 extern void
1272 emlxs_log_sd_scsi_event(emlxs_port_t *port, uint32_t type,
1273     HBA_WWN *remoteport, int32_t lun)
1274 {
1275 	struct sd_scsi_generic_v0	*bp;
1276 	uint32_t			size;
1277 
1278 	/* Check if the event is being requested */
1279 	if (emlxs_event_check(port, &emlxs_sd_scsi_event) == 0) {
1280 		return;
1281 	}
1282 
1283 	size = sizeof (struct sd_scsi_generic_v0);
1284 
1285 	if (!(bp = (struct sd_scsi_generic_v0 *)kmem_alloc(size,
1286 	    KM_NOSLEEP))) {
1287 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1288 		    "%s: Unable to allocate buffer.",
1289 		    emlxs_sd_scsi_event.label);
1290 
1291 		return;
1292 	}
1293 
1294 	/*
1295 	 * we are using version field to store subtype, libdfc
1296 	 * will fix this up before returning data to app.
1297 	 */
1298 	bp->sd_scsi_generic_version = type;
1299 	bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_scsi_generic_rport,
1300 	    sizeof (HBA_WWN));
1301 	bp->sd_scsi_generic_lun = lun;
1302 
1303 	emlxs_event(port, &emlxs_sd_scsi_event, bp, size);
1304 
1305 	return;
1306 
1307 } /* emlxs_log_sd_scsi_event() */
1308 
1309 
1310 extern void
1311 emlxs_log_sd_scsi_check_event(emlxs_port_t *port, HBA_WWN *remoteport,
1312     uint32_t lun, uint32_t cmdcode, uint32_t sensekey,
1313     uint32_t asc, uint32_t ascq)
1314 {
1315 	struct sd_scsi_checkcond_v0	*bp;
1316 	uint32_t			size;
1317 
1318 	/* Check if the event is being requested */
1319 	if (emlxs_event_check(port, &emlxs_sd_scsi_event) == 0) {
1320 		return;
1321 	}
1322 
1323 	size = sizeof (struct sd_scsi_checkcond_v0);
1324 
1325 	if (!(bp = (struct sd_scsi_checkcond_v0 *)kmem_alloc(size,
1326 	    KM_NOSLEEP))) {
1327 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1328 		    "%s CHECK: Unable to allocate buffer.",
1329 		    emlxs_sd_scsi_event.label);
1330 
1331 		return;
1332 	}
1333 
1334 	/*
1335 	 * we are using version field to store subtype, libdfc
1336 	 * will fix this up before returning data to app.
1337 	 */
1338 	bp->sd_scsi_checkcond_version = SD_SCSI_SUBCATEGORY_CHECKCONDITION;
1339 	bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_scsi_checkcond_rport,
1340 	    sizeof (HBA_WWN));
1341 	bp->sd_scsi_checkcond_lun = lun;
1342 	bp->sd_scsi_checkcond_cmdcode = cmdcode;
1343 	bp->sd_scsi_checkcond_sensekey = sensekey;
1344 	bp->sd_scsi_checkcond_asc = asc;
1345 	bp->sd_scsi_checkcond_ascq = ascq;
1346 
1347 	emlxs_event(port, &emlxs_sd_scsi_event, bp, size);
1348 
1349 	return;
1350 
1351 } /* emlxs_log_sd_scsi_check_event() */
1352 
1353 
1354 uint32_t
1355 emlxs_get_sd_event(emlxs_port_t *port, emlxs_dfc_event_t *dfc_event,
1356     uint32_t sleep)
1357 {
1358 	emlxs_hba_t *hba = HBA;
1359 	emlxs_event_queue_t *eventq = &EVENTQ;
1360 	emlxs_event_entry_t *entry;
1361 	uint32_t found;
1362 	uint32_t mask;
1363 	uint32_t i;
1364 	uint32_t size = 0;
1365 	uint32_t rc;
1366 
1367 	if (dfc_event->dataout && dfc_event->size) {
1368 		size = dfc_event->size;
1369 	}
1370 	dfc_event->size = 0;
1371 
1372 	if (!dfc_event->event) {
1373 		return (DFC_ARG_INVALID);
1374 	}
1375 
1376 	/* Calculate the event index */
1377 	mask = dfc_event->event;
1378 	for (i = 0; i < 32; i++) {
1379 		if (mask & 0x01) {
1380 			break;
1381 		}
1382 
1383 		mask >>= 1;
1384 	}
1385 
1386 	mutex_enter(&eventq->lock);
1387 
1388 wait_for_event:
1389 
1390 	/* Check if no new event has ocurred */
1391 	if (dfc_event->last_id == eventq->last_id[i]) {
1392 		if (!sleep) {
1393 			mutex_exit(&eventq->lock);
1394 			return (0);
1395 		}
1396 
1397 		/* While event is active and no new event has been logged */
1398 		while ((dfc_event->event & port->sd_event_mask) &&
1399 		    (dfc_event->last_id == eventq->last_id[i])) {
1400 			rc = cv_wait_sig(&eventq->lock_cv, &eventq->lock);
1401 
1402 			/* Check if thread was killed by kernel */
1403 			if (rc == 0) {
1404 				dfc_event->pid = 0;
1405 				dfc_event->event = 0;
1406 				mutex_exit(&eventq->lock);
1407 				return (0);
1408 			}
1409 		}
1410 
1411 		/* If the event is no longer registered then return */
1412 		if (!(dfc_event->event & port->sd_event_mask)) {
1413 			mutex_exit(&eventq->lock);
1414 			return (0);
1415 		}
1416 	}
1417 
1418 	/* !!! An event has occurred since last_id !!! */
1419 
1420 	/* Check if event data is not being requested */
1421 	if (!size) {
1422 		/* If so, then just return the last event id */
1423 		dfc_event->last_id = eventq->last_id[i];
1424 
1425 		mutex_exit(&eventq->lock);
1426 		return (0);
1427 	}
1428 
1429 	/* !!! The requester wants the next event buffer !!! */
1430 
1431 	found = 0;
1432 	entry = eventq->first;
1433 	while (entry) {
1434 		if ((entry->id > dfc_event->last_id) &&
1435 		    (entry->port == (void *)port) &&
1436 		    (entry->evt->mask == dfc_event->event)) {
1437 			found = 1;
1438 			break;
1439 		}
1440 
1441 		entry = entry->next;
1442 	}
1443 
1444 	if (!found) {
1445 		/* Update last_id to the last known event */
1446 		dfc_event->last_id = eventq->last_id[i];
1447 
1448 		/* Try waiting again if we can */
1449 		goto wait_for_event;
1450 	}
1451 
1452 	/* !!! Next event found !!! */
1453 
1454 	/* Copy the context buffer to the buffer provided */
1455 	if (entry->bp && entry->size) {
1456 		if (entry->size < size) {
1457 			size = entry->size;
1458 		}
1459 
1460 		if (ddi_copyout((void *) entry->bp, dfc_event->dataout,
1461 		    size, dfc_event->mode) != 0) {
1462 			mutex_exit(&eventq->lock);
1463 
1464 			return (DFC_COPYOUT_ERROR);
1465 		}
1466 
1467 		/* Event has been retrieved by SANDIAG */
1468 		entry->flag |= EMLXS_SD_EVENT_DONE;
1469 
1470 		dfc_event->size = size;
1471 	}
1472 
1473 	dfc_event->last_id = entry->id;
1474 
1475 	mutex_exit(&eventq->lock);
1476 
1477 	return (0);
1478 
1479 } /* emlxs_get_sd_event */
1480 #endif /* SAN_DIAG_SUPPORT */
1481