xref: /illumos-gate/usr/src/cmd/fm/fmd/common/fmd_eventq.c (revision bbf21555)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
58aec9182Sstephh  * Common Development and Distribution License (the "License").
68aec9182Sstephh  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
21d9638e54Smws 
227c478bd9Sstevel@tonic-gate /*
232a417b23SRobert Johnston  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #include <fmd_alloc.h>
277c478bd9Sstevel@tonic-gate #include <fmd_eventq.h>
287c478bd9Sstevel@tonic-gate #include <fmd_module.h>
29d9638e54Smws #include <fmd_dispq.h>
30d9638e54Smws #include <fmd_subr.h>
31d9638e54Smws 
32d9638e54Smws #include <fmd.h>
337c478bd9Sstevel@tonic-gate 
347c478bd9Sstevel@tonic-gate fmd_eventq_t *
fmd_eventq_create(fmd_module_t * mp,fmd_eventqstat_t * stats,pthread_mutex_t * stats_lock,uint_t limit)35d9638e54Smws fmd_eventq_create(fmd_module_t *mp, fmd_eventqstat_t *stats,
36d9638e54Smws     pthread_mutex_t *stats_lock, uint_t limit)
377c478bd9Sstevel@tonic-gate {
387c478bd9Sstevel@tonic-gate 	fmd_eventq_t *eq = fmd_zalloc(sizeof (fmd_eventq_t), FMD_SLEEP);
397c478bd9Sstevel@tonic-gate 
407c478bd9Sstevel@tonic-gate 	(void) pthread_mutex_init(&eq->eq_lock, NULL);
417c478bd9Sstevel@tonic-gate 	(void) pthread_cond_init(&eq->eq_cv, NULL);
427c478bd9Sstevel@tonic-gate 
437c478bd9Sstevel@tonic-gate 	eq->eq_mod = mp;
44d9638e54Smws 	eq->eq_stats = stats;
45d9638e54Smws 	eq->eq_stats_lock = stats_lock;
467c478bd9Sstevel@tonic-gate 	eq->eq_limit = limit;
47d9638e54Smws 	eq->eq_sgid = fmd_dispq_getgid(fmd.d_disp, eq);
487c478bd9Sstevel@tonic-gate 
497c478bd9Sstevel@tonic-gate 	return (eq);
507c478bd9Sstevel@tonic-gate }
517c478bd9Sstevel@tonic-gate 
527c478bd9Sstevel@tonic-gate void
fmd_eventq_destroy(fmd_eventq_t * eq)537c478bd9Sstevel@tonic-gate fmd_eventq_destroy(fmd_eventq_t *eq)
547c478bd9Sstevel@tonic-gate {
557c478bd9Sstevel@tonic-gate 	fmd_eventqelem_t *eqe;
567c478bd9Sstevel@tonic-gate 
577c478bd9Sstevel@tonic-gate 	while ((eqe = fmd_list_next(&eq->eq_list)) != NULL) {
587c478bd9Sstevel@tonic-gate 		fmd_list_delete(&eq->eq_list, eqe);
597c478bd9Sstevel@tonic-gate 		fmd_event_rele(eqe->eqe_event);
607c478bd9Sstevel@tonic-gate 		fmd_free(eqe, sizeof (fmd_eventqelem_t));
617c478bd9Sstevel@tonic-gate 	}
627c478bd9Sstevel@tonic-gate 
63d9638e54Smws 	fmd_dispq_delgid(fmd.d_disp, eq->eq_sgid);
647c478bd9Sstevel@tonic-gate 	fmd_free(eq, sizeof (fmd_eventq_t));
657c478bd9Sstevel@tonic-gate }
667c478bd9Sstevel@tonic-gate 
677c478bd9Sstevel@tonic-gate static void
fmd_eventq_drop(fmd_eventq_t * eq,fmd_eventqelem_t * eqe)687c478bd9Sstevel@tonic-gate fmd_eventq_drop(fmd_eventq_t *eq, fmd_eventqelem_t *eqe)
697c478bd9Sstevel@tonic-gate {
70d9638e54Smws 	(void) pthread_mutex_lock(eq->eq_stats_lock);
71d9638e54Smws 	eq->eq_stats->eqs_dropped.fmds_value.ui64++;
72d9638e54Smws 	(void) pthread_mutex_unlock(eq->eq_stats_lock);
737c478bd9Sstevel@tonic-gate 
747c478bd9Sstevel@tonic-gate 	fmd_event_rele(eqe->eqe_event);
757c478bd9Sstevel@tonic-gate 	fmd_free(eqe, sizeof (fmd_eventqelem_t));
767c478bd9Sstevel@tonic-gate }
777c478bd9Sstevel@tonic-gate 
782a417b23SRobert Johnston void
fmd_eventq_drop_topo(fmd_eventq_t * eq)792a417b23SRobert Johnston fmd_eventq_drop_topo(fmd_eventq_t *eq)
802a417b23SRobert Johnston {
812a417b23SRobert Johnston 	fmd_eventqelem_t *eqe, *tmp;
822a417b23SRobert Johnston 	boolean_t got_fm_events = B_FALSE;
832a417b23SRobert Johnston 
842a417b23SRobert Johnston 	/*
852a417b23SRobert Johnston 	 * Here we iterate through the per-module event queue in order to remove
862a417b23SRobert Johnston 	 * redundant FMD_EVT_TOPO events.  The trick is to not drop a given
872a417b23SRobert Johnston 	 * topo event if there are any FM protocol events in the queue after
882a417b23SRobert Johnston 	 * it, as those events need to be processed with the correct topology.
892a417b23SRobert Johnston 	 */
902a417b23SRobert Johnston 	(void) pthread_mutex_lock(&eq->eq_lock);
912a417b23SRobert Johnston 	eqe = fmd_list_prev(&eq->eq_list);
922a417b23SRobert Johnston 	while (eqe) {
932a417b23SRobert Johnston 		if (FMD_EVENT_TYPE(eqe->eqe_event) == FMD_EVT_TOPO) {
942a417b23SRobert Johnston 			if (!got_fm_events) {
952a417b23SRobert Johnston 				tmp = eqe;
962a417b23SRobert Johnston 				eqe = fmd_list_prev(eqe);
972a417b23SRobert Johnston 				fmd_list_delete(&eq->eq_list, tmp);
982a417b23SRobert Johnston 				eq->eq_size--;
992a417b23SRobert Johnston 				fmd_eventq_drop(eq, tmp);
1002a417b23SRobert Johnston 			} else {
1012a417b23SRobert Johnston 				got_fm_events = B_FALSE;
1022a417b23SRobert Johnston 				eqe = fmd_list_prev(eqe);
1032a417b23SRobert Johnston 			}
1042a417b23SRobert Johnston 		} else if (FMD_EVENT_TYPE(eqe->eqe_event) == FMD_EVT_PROTOCOL) {
1052a417b23SRobert Johnston 			got_fm_events = B_TRUE;
1062a417b23SRobert Johnston 			eqe = fmd_list_prev(eqe);
1072a417b23SRobert Johnston 		} else
1082a417b23SRobert Johnston 			eqe = fmd_list_prev(eqe);
1092a417b23SRobert Johnston 	}
1102a417b23SRobert Johnston 	(void) pthread_mutex_unlock(&eq->eq_lock);
1112a417b23SRobert Johnston }
1122a417b23SRobert Johnston 
113d9638e54Smws /*
114d9638e54Smws  * Update statistics when an event is dispatched and placed on a module's event
115d9638e54Smws  * queue.  This is essentially the same code as kstat_waitq_enter(9F).
116d9638e54Smws  */
117d9638e54Smws static void
fmd_eventqstat_dispatch(fmd_eventq_t * eq)118d9638e54Smws fmd_eventqstat_dispatch(fmd_eventq_t *eq)
119d9638e54Smws {
120d9638e54Smws 	fmd_eventqstat_t *eqs = eq->eq_stats;
121d9638e54Smws 	hrtime_t new, delta;
122d9638e54Smws 	uint32_t wcnt;
123d9638e54Smws 
124d9638e54Smws 	(void) pthread_mutex_lock(eq->eq_stats_lock);
125d9638e54Smws 
126d9638e54Smws 	new = gethrtime();
127d9638e54Smws 	delta = new - eqs->eqs_wlastupdate.fmds_value.ui64;
128d9638e54Smws 	eqs->eqs_wlastupdate.fmds_value.ui64 = new;
129d9638e54Smws 	wcnt = eqs->eqs_wcnt.fmds_value.ui32++;
130d9638e54Smws 
131d9638e54Smws 	if (wcnt != 0) {
132d9638e54Smws 		eqs->eqs_wlentime.fmds_value.ui64 += delta * wcnt;
133d9638e54Smws 		eqs->eqs_wtime.fmds_value.ui64 += delta;
134d9638e54Smws 	}
135d9638e54Smws 
136d9638e54Smws 	eqs->eqs_dispatched.fmds_value.ui64++;
137d9638e54Smws 	(void) pthread_mutex_unlock(eq->eq_stats_lock);
138d9638e54Smws }
139d9638e54Smws 
1407c478bd9Sstevel@tonic-gate void
fmd_eventq_insert_at_head(fmd_eventq_t * eq,fmd_event_t * ep)1417c478bd9Sstevel@tonic-gate fmd_eventq_insert_at_head(fmd_eventq_t *eq, fmd_event_t *ep)
1427c478bd9Sstevel@tonic-gate {
143d9638e54Smws 	uint_t evt = FMD_EVENT_TYPE(ep);
144d9638e54Smws 	fmd_eventqelem_t *eqe;
1457c478bd9Sstevel@tonic-gate 	int ok;
1467c478bd9Sstevel@tonic-gate 
147d9638e54Smws 	/*
148d9638e54Smws 	 * If this event queue is acting as /dev/null, bounce the reference
149d9638e54Smws 	 * count to free an unreferenced event and just return immediately.
150d9638e54Smws 	 */
151d9638e54Smws 	if (eq->eq_limit == 0) {
152d9638e54Smws 		fmd_event_hold(ep);
153d9638e54Smws 		fmd_event_rele(ep);
154d9638e54Smws 		return;
155d9638e54Smws 	}
156d9638e54Smws 
157d9638e54Smws 	eqe = fmd_alloc(sizeof (fmd_eventqelem_t), FMD_SLEEP);
1587c478bd9Sstevel@tonic-gate 	fmd_event_hold(ep);
1597c478bd9Sstevel@tonic-gate 	eqe->eqe_event = ep;
1607c478bd9Sstevel@tonic-gate 
1617c478bd9Sstevel@tonic-gate 	(void) pthread_mutex_lock(&eq->eq_lock);
1627c478bd9Sstevel@tonic-gate 
1637c478bd9Sstevel@tonic-gate 	if ((ok = eq->eq_size < eq->eq_limit || evt != FMD_EVT_PROTOCOL) != 0) {
1647c478bd9Sstevel@tonic-gate 		if (evt != FMD_EVT_CTL)
165d9638e54Smws 			fmd_eventqstat_dispatch(eq);
1667c478bd9Sstevel@tonic-gate 
1677c478bd9Sstevel@tonic-gate 		fmd_list_prepend(&eq->eq_list, eqe);
1687c478bd9Sstevel@tonic-gate 		eq->eq_size++;
1697c478bd9Sstevel@tonic-gate 	}
1707c478bd9Sstevel@tonic-gate 
1717c478bd9Sstevel@tonic-gate 	(void) pthread_cond_broadcast(&eq->eq_cv);
172d9638e54Smws 	(void) pthread_mutex_unlock(&eq->eq_lock);
1737c478bd9Sstevel@tonic-gate 
1747c478bd9Sstevel@tonic-gate 	if (!ok)
1757c478bd9Sstevel@tonic-gate 		fmd_eventq_drop(eq, eqe);
1767c478bd9Sstevel@tonic-gate }
1777c478bd9Sstevel@tonic-gate 
1787c478bd9Sstevel@tonic-gate void
fmd_eventq_insert_at_time(fmd_eventq_t * eq,fmd_event_t * ep)1797c478bd9Sstevel@tonic-gate fmd_eventq_insert_at_time(fmd_eventq_t *eq, fmd_event_t *ep)
1807c478bd9Sstevel@tonic-gate {
181d9638e54Smws 	uint_t evt = FMD_EVENT_TYPE(ep);
1827c478bd9Sstevel@tonic-gate 	hrtime_t hrt = fmd_event_hrtime(ep);
183d9638e54Smws 	fmd_eventqelem_t *eqe, *oqe;
1847c478bd9Sstevel@tonic-gate 	int ok;
1857c478bd9Sstevel@tonic-gate 
186d9638e54Smws 	/*
187d9638e54Smws 	 * If this event queue is acting as /dev/null, bounce the reference
188d9638e54Smws 	 * count to free an unreferenced event and just return immediately.
189d9638e54Smws 	 */
190d9638e54Smws 	if (eq->eq_limit == 0) {
191d9638e54Smws 		fmd_event_hold(ep);
192d9638e54Smws 		fmd_event_rele(ep);
193d9638e54Smws 		return;
194d9638e54Smws 	}
195d9638e54Smws 
196d9638e54Smws 	eqe = fmd_alloc(sizeof (fmd_eventqelem_t), FMD_SLEEP);
1977c478bd9Sstevel@tonic-gate 	fmd_event_hold(ep);
1987c478bd9Sstevel@tonic-gate 	eqe->eqe_event = ep;
1997c478bd9Sstevel@tonic-gate 
2007c478bd9Sstevel@tonic-gate 	(void) pthread_mutex_lock(&eq->eq_lock);
2017c478bd9Sstevel@tonic-gate 
2027c478bd9Sstevel@tonic-gate 	/*
2037c478bd9Sstevel@tonic-gate 	 * fmd makes no guarantees that events will be delivered in time order
2047c478bd9Sstevel@tonic-gate 	 * because its transport can make no such guarantees.  Instead we make
2057c478bd9Sstevel@tonic-gate 	 * a looser guarantee that an enqueued event will be dequeued before
2067c478bd9Sstevel@tonic-gate 	 * any newer *pending* events according to event time.  This permits us
2077c478bd9Sstevel@tonic-gate 	 * to state, for example, that a timer expiry event will be delivered
2087c478bd9Sstevel@tonic-gate 	 * prior to any enqueued event whose time is after the timer expired.
2097c478bd9Sstevel@tonic-gate 	 * We use a simple insertion sort for this task, as queue lengths are
2107c478bd9Sstevel@tonic-gate 	 * typically short and events do *tend* to be received chronologically.
2117c478bd9Sstevel@tonic-gate 	 */
2127c478bd9Sstevel@tonic-gate 	for (oqe = fmd_list_prev(&eq->eq_list); oqe; oqe = fmd_list_prev(oqe)) {
2137c478bd9Sstevel@tonic-gate 		if (hrt >= fmd_event_hrtime(oqe->eqe_event))
2147c478bd9Sstevel@tonic-gate 			break; /* 'ep' is newer than the event in 'oqe' */
2157c478bd9Sstevel@tonic-gate 	}
2167c478bd9Sstevel@tonic-gate 
2177c478bd9Sstevel@tonic-gate 	if ((ok = eq->eq_size < eq->eq_limit || evt != FMD_EVT_PROTOCOL) != 0) {
2187c478bd9Sstevel@tonic-gate 		if (evt != FMD_EVT_CTL)
219d9638e54Smws 			fmd_eventqstat_dispatch(eq);
2207c478bd9Sstevel@tonic-gate 
2218aec9182Sstephh 		if (oqe == NULL)
2228aec9182Sstephh 			fmd_list_prepend(&eq->eq_list, eqe);
2238aec9182Sstephh 		else
2248aec9182Sstephh 			fmd_list_insert_after(&eq->eq_list, oqe, eqe);
2257c478bd9Sstevel@tonic-gate 		eq->eq_size++;
2267c478bd9Sstevel@tonic-gate 	}
2277c478bd9Sstevel@tonic-gate 
2287c478bd9Sstevel@tonic-gate 	(void) pthread_cond_broadcast(&eq->eq_cv);
229d9638e54Smws 	(void) pthread_mutex_unlock(&eq->eq_lock);
2307c478bd9Sstevel@tonic-gate 
2317c478bd9Sstevel@tonic-gate 	if (!ok)
2327c478bd9Sstevel@tonic-gate 		fmd_eventq_drop(eq, eqe);
2337c478bd9Sstevel@tonic-gate }
2347c478bd9Sstevel@tonic-gate 
2357c478bd9Sstevel@tonic-gate fmd_event_t *
fmd_eventq_delete(fmd_eventq_t * eq)2367c478bd9Sstevel@tonic-gate fmd_eventq_delete(fmd_eventq_t *eq)
2377c478bd9Sstevel@tonic-gate {
238d9638e54Smws 	fmd_eventqstat_t *eqs = eq->eq_stats;
239d9638e54Smws 	hrtime_t new, delta;
240d9638e54Smws 	uint32_t wcnt;
241d9638e54Smws 
2427c478bd9Sstevel@tonic-gate 	fmd_eventqelem_t *eqe;
2437c478bd9Sstevel@tonic-gate 	fmd_event_t *ep;
244d9638e54Smws top:
2457c478bd9Sstevel@tonic-gate 	(void) pthread_mutex_lock(&eq->eq_lock);
2467c478bd9Sstevel@tonic-gate 
247d9638e54Smws 	while (!(eq->eq_flags & FMD_EVENTQ_ABORT) &&
248d9638e54Smws 	    (eq->eq_size == 0 || (eq->eq_flags & FMD_EVENTQ_SUSPEND)))
2497c478bd9Sstevel@tonic-gate 		(void) pthread_cond_wait(&eq->eq_cv, &eq->eq_lock);
2507c478bd9Sstevel@tonic-gate 
251d9638e54Smws 	if (eq->eq_flags & FMD_EVENTQ_ABORT) {
2527c478bd9Sstevel@tonic-gate 		(void) pthread_mutex_unlock(&eq->eq_lock);
2537c478bd9Sstevel@tonic-gate 		return (NULL);
2547c478bd9Sstevel@tonic-gate 	}
2557c478bd9Sstevel@tonic-gate 
2567c478bd9Sstevel@tonic-gate 	eqe = fmd_list_next(&eq->eq_list);
2577c478bd9Sstevel@tonic-gate 	fmd_list_delete(&eq->eq_list, eqe);
2587c478bd9Sstevel@tonic-gate 	eq->eq_size--;
2597c478bd9Sstevel@tonic-gate 
2607c478bd9Sstevel@tonic-gate 	(void) pthread_mutex_unlock(&eq->eq_lock);
2617c478bd9Sstevel@tonic-gate 
2627c478bd9Sstevel@tonic-gate 	ep = eqe->eqe_event;
2637c478bd9Sstevel@tonic-gate 	fmd_free(eqe, sizeof (fmd_eventqelem_t));
2647c478bd9Sstevel@tonic-gate 
265d9638e54Smws 	/*
266d9638e54Smws 	 * If we dequeued a control event, release it and go back to sleep.
267d9638e54Smws 	 * fmd_event_rele() on the event will block as described in fmd_ctl.c.
268d9638e54Smws 	 * This effectively renders control events invisible to our callers
269*bbf21555SRichard Lowe 	 * as well as to statistics and observability tools (e.g. fmstat(8)).
270d9638e54Smws 	 */
271d9638e54Smws 	if (FMD_EVENT_TYPE(ep) == FMD_EVT_CTL) {
272d9638e54Smws 		fmd_event_rele(ep);
273d9638e54Smws 		goto top;
274d9638e54Smws 	}
275d9638e54Smws 
276d9638e54Smws 	/*
277d9638e54Smws 	 * Before returning, update our statistics.  This code is essentially
278d9638e54Smws 	 * kstat_waitq_to_runq(9F), except simplified because our queues are
279d9638e54Smws 	 * always consumed by a single thread (i.e. runq len == 1).
280d9638e54Smws 	 */
281d9638e54Smws 	(void) pthread_mutex_lock(eq->eq_stats_lock);
282d9638e54Smws 
283d9638e54Smws 	new = gethrtime();
284d9638e54Smws 	delta = new - eqs->eqs_wlastupdate.fmds_value.ui64;
285d9638e54Smws 
286d9638e54Smws 	eqs->eqs_wlastupdate.fmds_value.ui64 = new;
287d9638e54Smws 	eqs->eqs_dlastupdate.fmds_value.ui64 = new;
288d9638e54Smws 
289d9638e54Smws 	ASSERT(eqs->eqs_wcnt.fmds_value.ui32 != 0);
290d9638e54Smws 	wcnt = eqs->eqs_wcnt.fmds_value.ui32--;
291d9638e54Smws 
292d9638e54Smws 	eqs->eqs_wlentime.fmds_value.ui64 += delta * wcnt;
293d9638e54Smws 	eqs->eqs_wtime.fmds_value.ui64 += delta;
294d9638e54Smws 
295d9638e54Smws 	if (FMD_EVENT_TYPE(ep) == FMD_EVT_PROTOCOL)
296d9638e54Smws 		eqs->eqs_prdequeued.fmds_value.ui64++;
297d9638e54Smws 
298d9638e54Smws 	eqs->eqs_dequeued.fmds_value.ui64++;
299d9638e54Smws 	(void) pthread_mutex_unlock(eq->eq_stats_lock);
300d9638e54Smws 
3017c478bd9Sstevel@tonic-gate 	return (ep);
3027c478bd9Sstevel@tonic-gate }
3037c478bd9Sstevel@tonic-gate 
304d9638e54Smws /*
305d9638e54Smws  * Update statistics when an event is done being processed by the eventq's
306d9638e54Smws  * consumer thread.  This is essentially kstat_runq_exit(9F) simplified for
307d9638e54Smws  * our principle that a single thread consumes the queue (i.e. runq len == 1).
308d9638e54Smws  */
309d9638e54Smws void
fmd_eventq_done(fmd_eventq_t * eq)310d9638e54Smws fmd_eventq_done(fmd_eventq_t *eq)
311d9638e54Smws {
312d9638e54Smws 	fmd_eventqstat_t *eqs = eq->eq_stats;
313d9638e54Smws 	hrtime_t new, delta;
314d9638e54Smws 
315d9638e54Smws 	(void) pthread_mutex_lock(eq->eq_stats_lock);
316d9638e54Smws 
317d9638e54Smws 	new = gethrtime();
318d9638e54Smws 	delta = new - eqs->eqs_dlastupdate.fmds_value.ui64;
319d9638e54Smws 
320d9638e54Smws 	eqs->eqs_dlastupdate.fmds_value.ui64 = new;
321d9638e54Smws 	eqs->eqs_dtime.fmds_value.ui64 += delta;
322d9638e54Smws 
323d9638e54Smws 	(void) pthread_mutex_unlock(eq->eq_stats_lock);
324d9638e54Smws }
325d9638e54Smws 
3267c478bd9Sstevel@tonic-gate void
fmd_eventq_cancel(fmd_eventq_t * eq,uint_t type,void * data)3277c478bd9Sstevel@tonic-gate fmd_eventq_cancel(fmd_eventq_t *eq, uint_t type, void *data)
3287c478bd9Sstevel@tonic-gate {
3297c478bd9Sstevel@tonic-gate 	fmd_eventqelem_t *eqe, *nqe;
3307c478bd9Sstevel@tonic-gate 
3317c478bd9Sstevel@tonic-gate 	(void) pthread_mutex_lock(&eq->eq_lock);
3327c478bd9Sstevel@tonic-gate 
3337c478bd9Sstevel@tonic-gate 	for (eqe = fmd_list_next(&eq->eq_list); eqe != NULL; eqe = nqe) {
3347c478bd9Sstevel@tonic-gate 		nqe = fmd_list_next(eqe);
3357c478bd9Sstevel@tonic-gate 
3367c478bd9Sstevel@tonic-gate 		if (fmd_event_match(eqe->eqe_event, type, data)) {
3377c478bd9Sstevel@tonic-gate 			fmd_list_delete(&eq->eq_list, eqe);
3387c478bd9Sstevel@tonic-gate 			eq->eq_size--;
3397c478bd9Sstevel@tonic-gate 			fmd_event_rele(eqe->eqe_event);
3407c478bd9Sstevel@tonic-gate 			fmd_free(eqe, sizeof (fmd_eventqelem_t));
3417c478bd9Sstevel@tonic-gate 		}
3427c478bd9Sstevel@tonic-gate 	}
3437c478bd9Sstevel@tonic-gate 
3447c478bd9Sstevel@tonic-gate 	(void) pthread_mutex_unlock(&eq->eq_lock);
3457c478bd9Sstevel@tonic-gate }
3467c478bd9Sstevel@tonic-gate 
347d9638e54Smws void
fmd_eventq_suspend(fmd_eventq_t * eq)348d9638e54Smws fmd_eventq_suspend(fmd_eventq_t *eq)
349d9638e54Smws {
350d9638e54Smws 	(void) pthread_mutex_lock(&eq->eq_lock);
351d9638e54Smws 	eq->eq_flags |= FMD_EVENTQ_SUSPEND;
352d9638e54Smws 	(void) pthread_mutex_unlock(&eq->eq_lock);
353d9638e54Smws }
354d9638e54Smws 
355d9638e54Smws void
fmd_eventq_resume(fmd_eventq_t * eq)356d9638e54Smws fmd_eventq_resume(fmd_eventq_t *eq)
357d9638e54Smws {
358d9638e54Smws 	(void) pthread_mutex_lock(&eq->eq_lock);
359d9638e54Smws 	eq->eq_flags &= ~FMD_EVENTQ_SUSPEND;
360d9638e54Smws 	(void) pthread_cond_broadcast(&eq->eq_cv);
361d9638e54Smws 	(void) pthread_mutex_unlock(&eq->eq_lock);
362d9638e54Smws }
363d9638e54Smws 
3647c478bd9Sstevel@tonic-gate void
fmd_eventq_abort(fmd_eventq_t * eq)3657c478bd9Sstevel@tonic-gate fmd_eventq_abort(fmd_eventq_t *eq)
3667c478bd9Sstevel@tonic-gate {
3677c478bd9Sstevel@tonic-gate 	fmd_eventqelem_t *eqe;
3687c478bd9Sstevel@tonic-gate 
3697c478bd9Sstevel@tonic-gate 	(void) pthread_mutex_lock(&eq->eq_lock);
3707c478bd9Sstevel@tonic-gate 
3717c478bd9Sstevel@tonic-gate 	while ((eqe = fmd_list_next(&eq->eq_list)) != NULL) {
3727c478bd9Sstevel@tonic-gate 		fmd_list_delete(&eq->eq_list, eqe);
3737c478bd9Sstevel@tonic-gate 		fmd_event_rele(eqe->eqe_event);
3747c478bd9Sstevel@tonic-gate 		fmd_free(eqe, sizeof (fmd_eventqelem_t));
3757c478bd9Sstevel@tonic-gate 	}
3767c478bd9Sstevel@tonic-gate 
377d9638e54Smws 	eq->eq_flags |= FMD_EVENTQ_ABORT;
3787c478bd9Sstevel@tonic-gate 	(void) pthread_cond_broadcast(&eq->eq_cv);
379d9638e54Smws 	(void) pthread_mutex_unlock(&eq->eq_lock);
3807c478bd9Sstevel@tonic-gate }
381