txg.c revision ce636f8b38e8c9ff484e880d9abb27251a882860
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Portions Copyright 2011 Martin Matuska
24 * Copyright (c) 2012 by Delphix. All rights reserved.
25 */
26
27#include <sys/zfs_context.h>
28#include <sys/txg_impl.h>
29#include <sys/dmu_impl.h>
30#include <sys/dmu_tx.h>
31#include <sys/dsl_pool.h>
32#include <sys/dsl_scan.h>
33#include <sys/callb.h>
34
35/*
36 * Pool-wide transaction groups.
37 */
38
39static void txg_sync_thread(dsl_pool_t *dp);
40static void txg_quiesce_thread(dsl_pool_t *dp);
41
42int zfs_txg_timeout = 5;	/* max seconds worth of delta per txg */
43
44/*
45 * Prepare the txg subsystem.
46 */
47void
48txg_init(dsl_pool_t *dp, uint64_t txg)
49{
50	tx_state_t *tx = &dp->dp_tx;
51	int c;
52	bzero(tx, sizeof (tx_state_t));
53
54	tx->tx_cpu = kmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
55
56	for (c = 0; c < max_ncpus; c++) {
57		int i;
58
59		mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL);
60		for (i = 0; i < TXG_SIZE; i++) {
61			cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT,
62			    NULL);
63			list_create(&tx->tx_cpu[c].tc_callbacks[i],
64			    sizeof (dmu_tx_callback_t),
65			    offsetof(dmu_tx_callback_t, dcb_node));
66		}
67	}
68
69	mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
70
71	cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
72	cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
73	cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
74	cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
75	cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);
76
77	tx->tx_open_txg = txg;
78}
79
80/*
81 * Close down the txg subsystem.
82 */
83void
84txg_fini(dsl_pool_t *dp)
85{
86	tx_state_t *tx = &dp->dp_tx;
87	int c;
88
89	ASSERT(tx->tx_threads == 0);
90
91	mutex_destroy(&tx->tx_sync_lock);
92
93	cv_destroy(&tx->tx_sync_more_cv);
94	cv_destroy(&tx->tx_sync_done_cv);
95	cv_destroy(&tx->tx_quiesce_more_cv);
96	cv_destroy(&tx->tx_quiesce_done_cv);
97	cv_destroy(&tx->tx_exit_cv);
98
99	for (c = 0; c < max_ncpus; c++) {
100		int i;
101
102		mutex_destroy(&tx->tx_cpu[c].tc_lock);
103		for (i = 0; i < TXG_SIZE; i++) {
104			cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
105			list_destroy(&tx->tx_cpu[c].tc_callbacks[i]);
106		}
107	}
108
109	if (tx->tx_commit_cb_taskq != NULL)
110		taskq_destroy(tx->tx_commit_cb_taskq);
111
112	kmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
113
114	bzero(tx, sizeof (tx_state_t));
115}
116
117/*
118 * Start syncing transaction groups.
119 */
120void
121txg_sync_start(dsl_pool_t *dp)
122{
123	tx_state_t *tx = &dp->dp_tx;
124
125	mutex_enter(&tx->tx_sync_lock);
126
127	dprintf("pool %p\n", dp);
128
129	ASSERT(tx->tx_threads == 0);
130
131	tx->tx_threads = 2;
132
133	tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread,
134	    dp, 0, &p0, TS_RUN, minclsyspri);
135
136	/*
137	 * The sync thread can need a larger-than-default stack size on
138	 * 32-bit x86.  This is due in part to nested pools and
139	 * scrub_visitbp() recursion.
140	 */
141	tx->tx_sync_thread = thread_create(NULL, 32<<10, txg_sync_thread,
142	    dp, 0, &p0, TS_RUN, minclsyspri);
143
144	mutex_exit(&tx->tx_sync_lock);
145}
146
147static void
148txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr)
149{
150	CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG);
151	mutex_enter(&tx->tx_sync_lock);
152}
153
154static void
155txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
156{
157	ASSERT(*tpp != NULL);
158	*tpp = NULL;
159	tx->tx_threads--;
160	cv_broadcast(&tx->tx_exit_cv);
161	CALLB_CPR_EXIT(cpr);		/* drops &tx->tx_sync_lock */
162	thread_exit();
163}
164
165static void
166txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t time)
167{
168	CALLB_CPR_SAFE_BEGIN(cpr);
169
170	if (time)
171		(void) cv_timedwait(cv, &tx->tx_sync_lock,
172		    ddi_get_lbolt() + time);
173	else
174		cv_wait(cv, &tx->tx_sync_lock);
175
176	CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
177}
178
179/*
180 * Stop syncing transaction groups.
181 */
182void
183txg_sync_stop(dsl_pool_t *dp)
184{
185	tx_state_t *tx = &dp->dp_tx;
186
187	dprintf("pool %p\n", dp);
188	/*
189	 * Finish off any work in progress.
190	 */
191	ASSERT(tx->tx_threads == 2);
192
193	/*
194	 * We need to ensure that we've vacated the deferred space_maps.
195	 */
196	txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE);
197
198	/*
199	 * Wake all sync threads and wait for them to die.
200	 */
201	mutex_enter(&tx->tx_sync_lock);
202
203	ASSERT(tx->tx_threads == 2);
204
205	tx->tx_exiting = 1;
206
207	cv_broadcast(&tx->tx_quiesce_more_cv);
208	cv_broadcast(&tx->tx_quiesce_done_cv);
209	cv_broadcast(&tx->tx_sync_more_cv);
210
211	while (tx->tx_threads != 0)
212		cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock);
213
214	tx->tx_exiting = 0;
215
216	mutex_exit(&tx->tx_sync_lock);
217}
218
219uint64_t
220txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
221{
222	tx_state_t *tx = &dp->dp_tx;
223	tx_cpu_t *tc = &tx->tx_cpu[CPU_SEQID];
224	uint64_t txg;
225
226	mutex_enter(&tc->tc_lock);
227
228	txg = tx->tx_open_txg;
229	tc->tc_count[txg & TXG_MASK]++;
230
231	th->th_cpu = tc;
232	th->th_txg = txg;
233
234	return (txg);
235}
236
237void
238txg_rele_to_quiesce(txg_handle_t *th)
239{
240	tx_cpu_t *tc = th->th_cpu;
241
242	mutex_exit(&tc->tc_lock);
243}
244
245void
246txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks)
247{
248	tx_cpu_t *tc = th->th_cpu;
249	int g = th->th_txg & TXG_MASK;
250
251	mutex_enter(&tc->tc_lock);
252	list_move_tail(&tc->tc_callbacks[g], tx_callbacks);
253	mutex_exit(&tc->tc_lock);
254}
255
256void
257txg_rele_to_sync(txg_handle_t *th)
258{
259	tx_cpu_t *tc = th->th_cpu;
260	int g = th->th_txg & TXG_MASK;
261
262	mutex_enter(&tc->tc_lock);
263	ASSERT(tc->tc_count[g] != 0);
264	if (--tc->tc_count[g] == 0)
265		cv_broadcast(&tc->tc_cv[g]);
266	mutex_exit(&tc->tc_lock);
267
268	th->th_cpu = NULL;	/* defensive */
269}
270
271static void
272txg_quiesce(dsl_pool_t *dp, uint64_t txg)
273{
274	tx_state_t *tx = &dp->dp_tx;
275	int g = txg & TXG_MASK;
276	int c;
277
278	/*
279	 * Grab all tx_cpu locks so nobody else can get into this txg.
280	 */
281	for (c = 0; c < max_ncpus; c++)
282		mutex_enter(&tx->tx_cpu[c].tc_lock);
283
284	ASSERT(txg == tx->tx_open_txg);
285	tx->tx_open_txg++;
286
287	/*
288	 * Now that we've incremented tx_open_txg, we can let threads
289	 * enter the next transaction group.
290	 */
291	for (c = 0; c < max_ncpus; c++)
292		mutex_exit(&tx->tx_cpu[c].tc_lock);
293
294	/*
295	 * Quiesce the transaction group by waiting for everyone to txg_exit().
296	 */
297	for (c = 0; c < max_ncpus; c++) {
298		tx_cpu_t *tc = &tx->tx_cpu[c];
299		mutex_enter(&tc->tc_lock);
300		while (tc->tc_count[g] != 0)
301			cv_wait(&tc->tc_cv[g], &tc->tc_lock);
302		mutex_exit(&tc->tc_lock);
303	}
304}
305
306static void
307txg_do_callbacks(list_t *cb_list)
308{
309	dmu_tx_do_callbacks(cb_list, 0);
310
311	list_destroy(cb_list);
312
313	kmem_free(cb_list, sizeof (list_t));
314}
315
316/*
317 * Dispatch the commit callbacks registered on this txg to worker threads.
318 */
319static void
320txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
321{
322	int c;
323	tx_state_t *tx = &dp->dp_tx;
324	list_t *cb_list;
325
326	for (c = 0; c < max_ncpus; c++) {
327		tx_cpu_t *tc = &tx->tx_cpu[c];
328		/* No need to lock tx_cpu_t at this point */
329
330		int g = txg & TXG_MASK;
331
332		if (list_is_empty(&tc->tc_callbacks[g]))
333			continue;
334
335		if (tx->tx_commit_cb_taskq == NULL) {
336			/*
337			 * Commit callback taskq hasn't been created yet.
338			 */
339			tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
340			    max_ncpus, minclsyspri, max_ncpus, max_ncpus * 2,
341			    TASKQ_PREPOPULATE);
342		}
343
344		cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
345		list_create(cb_list, sizeof (dmu_tx_callback_t),
346		    offsetof(dmu_tx_callback_t, dcb_node));
347
348		list_move_tail(&tc->tc_callbacks[g], cb_list);
349
350		(void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *)
351		    txg_do_callbacks, cb_list, TQ_SLEEP);
352	}
353}
354
355static void
356txg_sync_thread(dsl_pool_t *dp)
357{
358	spa_t *spa = dp->dp_spa;
359	tx_state_t *tx = &dp->dp_tx;
360	callb_cpr_t cpr;
361	uint64_t start, delta;
362
363	txg_thread_enter(tx, &cpr);
364
365	start = delta = 0;
366	for (;;) {
367		uint64_t timer, timeout = zfs_txg_timeout * hz;
368		uint64_t txg;
369
370		/*
371		 * We sync when we're scanning, there's someone waiting
372		 * on us, or the quiesce thread has handed off a txg to
373		 * us, or we have reached our timeout.
374		 */
375		timer = (delta >= timeout ? 0 : timeout - delta);
376		while (!dsl_scan_active(dp->dp_scan) &&
377		    !tx->tx_exiting && timer > 0 &&
378		    tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
379		    tx->tx_quiesced_txg == 0) {
380			dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
381			    tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
382			txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
383			delta = ddi_get_lbolt() - start;
384			timer = (delta > timeout ? 0 : timeout - delta);
385		}
386
387		/*
388		 * Wait until the quiesce thread hands off a txg to us,
389		 * prompting it to do so if necessary.
390		 */
391		while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) {
392			if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
393				tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
394			cv_broadcast(&tx->tx_quiesce_more_cv);
395			txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
396		}
397
398		if (tx->tx_exiting)
399			txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
400
401		/*
402		 * Consume the quiesced txg which has been handed off to
403		 * us.  This may cause the quiescing thread to now be
404		 * able to quiesce another txg, so we must signal it.
405		 */
406		txg = tx->tx_quiesced_txg;
407		tx->tx_quiesced_txg = 0;
408		tx->tx_syncing_txg = txg;
409		cv_broadcast(&tx->tx_quiesce_more_cv);
410
411		dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
412		    txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
413		mutex_exit(&tx->tx_sync_lock);
414
415		start = ddi_get_lbolt();
416		spa_sync(spa, txg);
417		delta = ddi_get_lbolt() - start;
418
419		mutex_enter(&tx->tx_sync_lock);
420		tx->tx_synced_txg = txg;
421		tx->tx_syncing_txg = 0;
422		cv_broadcast(&tx->tx_sync_done_cv);
423
424		/*
425		 * Dispatch commit callbacks to worker threads.
426		 */
427		txg_dispatch_callbacks(dp, txg);
428	}
429}
430
431static void
432txg_quiesce_thread(dsl_pool_t *dp)
433{
434	tx_state_t *tx = &dp->dp_tx;
435	callb_cpr_t cpr;
436
437	txg_thread_enter(tx, &cpr);
438
439	for (;;) {
440		uint64_t txg;
441
442		/*
443		 * We quiesce when there's someone waiting on us.
444		 * However, we can only have one txg in "quiescing" or
445		 * "quiesced, waiting to sync" state.  So we wait until
446		 * the "quiesced, waiting to sync" txg has been consumed
447		 * by the sync thread.
448		 */
449		while (!tx->tx_exiting &&
450		    (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting ||
451		    tx->tx_quiesced_txg != 0))
452			txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0);
453
454		if (tx->tx_exiting)
455			txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread);
456
457		txg = tx->tx_open_txg;
458		dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
459		    txg, tx->tx_quiesce_txg_waiting,
460		    tx->tx_sync_txg_waiting);
461		mutex_exit(&tx->tx_sync_lock);
462		txg_quiesce(dp, txg);
463		mutex_enter(&tx->tx_sync_lock);
464
465		/*
466		 * Hand this txg off to the sync thread.
467		 */
468		dprintf("quiesce done, handing off txg %llu\n", txg);
469		tx->tx_quiesced_txg = txg;
470		cv_broadcast(&tx->tx_sync_more_cv);
471		cv_broadcast(&tx->tx_quiesce_done_cv);
472	}
473}
474
475/*
476 * Delay this thread by 'ticks' if we are still in the open transaction
477 * group and there is already a waiting txg quiesing or quiesced.  Abort
478 * the delay if this txg stalls or enters the quiesing state.
479 */
480void
481txg_delay(dsl_pool_t *dp, uint64_t txg, int ticks)
482{
483	tx_state_t *tx = &dp->dp_tx;
484	clock_t timeout = ddi_get_lbolt() + ticks;
485
486	/* don't delay if this txg could transition to quiesing immediately */
487	if (tx->tx_open_txg > txg ||
488	    tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
489		return;
490
491	mutex_enter(&tx->tx_sync_lock);
492	if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
493		mutex_exit(&tx->tx_sync_lock);
494		return;
495	}
496
497	while (ddi_get_lbolt() < timeout &&
498	    tx->tx_syncing_txg < txg-1 && !txg_stalled(dp))
499		(void) cv_timedwait(&tx->tx_quiesce_more_cv, &tx->tx_sync_lock,
500		    timeout);
501
502	mutex_exit(&tx->tx_sync_lock);
503}
504
505void
506txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
507{
508	tx_state_t *tx = &dp->dp_tx;
509
510	mutex_enter(&tx->tx_sync_lock);
511	ASSERT(tx->tx_threads == 2);
512	if (txg == 0)
513		txg = tx->tx_open_txg + TXG_DEFER_SIZE;
514	if (tx->tx_sync_txg_waiting < txg)
515		tx->tx_sync_txg_waiting = txg;
516	dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
517	    txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
518	while (tx->tx_synced_txg < txg) {
519		dprintf("broadcasting sync more "
520		    "tx_synced=%llu waiting=%llu dp=%p\n",
521		    tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
522		cv_broadcast(&tx->tx_sync_more_cv);
523		cv_wait(&tx->tx_sync_done_cv, &tx->tx_sync_lock);
524	}
525	mutex_exit(&tx->tx_sync_lock);
526}
527
528void
529txg_wait_open(dsl_pool_t *dp, uint64_t txg)
530{
531	tx_state_t *tx = &dp->dp_tx;
532
533	mutex_enter(&tx->tx_sync_lock);
534	ASSERT(tx->tx_threads == 2);
535	if (txg == 0)
536		txg = tx->tx_open_txg + 1;
537	if (tx->tx_quiesce_txg_waiting < txg)
538		tx->tx_quiesce_txg_waiting = txg;
539	dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
540	    txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
541	while (tx->tx_open_txg < txg) {
542		cv_broadcast(&tx->tx_quiesce_more_cv);
543		cv_wait(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock);
544	}
545	mutex_exit(&tx->tx_sync_lock);
546}
547
548boolean_t
549txg_stalled(dsl_pool_t *dp)
550{
551	tx_state_t *tx = &dp->dp_tx;
552	return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg);
553}
554
555boolean_t
556txg_sync_waiting(dsl_pool_t *dp)
557{
558	tx_state_t *tx = &dp->dp_tx;
559
560	return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting ||
561	    tx->tx_quiesced_txg != 0);
562}
563
564/*
565 * Per-txg object lists.
566 */
567void
568txg_list_create(txg_list_t *tl, size_t offset)
569{
570	int t;
571
572	mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL);
573
574	tl->tl_offset = offset;
575
576	for (t = 0; t < TXG_SIZE; t++)
577		tl->tl_head[t] = NULL;
578}
579
580void
581txg_list_destroy(txg_list_t *tl)
582{
583	int t;
584
585	for (t = 0; t < TXG_SIZE; t++)
586		ASSERT(txg_list_empty(tl, t));
587
588	mutex_destroy(&tl->tl_lock);
589}
590
591boolean_t
592txg_list_empty(txg_list_t *tl, uint64_t txg)
593{
594	return (tl->tl_head[txg & TXG_MASK] == NULL);
595}
596
597/*
598 * Add an entry to the list.
599 * Returns 0 if it's a new entry, 1 if it's already there.
600 */
601int
602txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
603{
604	int t = txg & TXG_MASK;
605	txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
606	int already_on_list;
607
608	mutex_enter(&tl->tl_lock);
609	already_on_list = tn->tn_member[t];
610	if (!already_on_list) {
611		tn->tn_member[t] = 1;
612		tn->tn_next[t] = tl->tl_head[t];
613		tl->tl_head[t] = tn;
614	}
615	mutex_exit(&tl->tl_lock);
616
617	return (already_on_list);
618}
619
620/*
621 * Add an entry to the end of the list (walks list to find end).
622 * Returns 0 if it's a new entry, 1 if it's already there.
623 */
624int
625txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
626{
627	int t = txg & TXG_MASK;
628	txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
629	int already_on_list;
630
631	mutex_enter(&tl->tl_lock);
632	already_on_list = tn->tn_member[t];
633	if (!already_on_list) {
634		txg_node_t **tp;
635
636		for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t])
637			continue;
638
639		tn->tn_member[t] = 1;
640		tn->tn_next[t] = NULL;
641		*tp = tn;
642	}
643	mutex_exit(&tl->tl_lock);
644
645	return (already_on_list);
646}
647
648/*
649 * Remove the head of the list and return it.
650 */
651void *
652txg_list_remove(txg_list_t *tl, uint64_t txg)
653{
654	int t = txg & TXG_MASK;
655	txg_node_t *tn;
656	void *p = NULL;
657
658	mutex_enter(&tl->tl_lock);
659	if ((tn = tl->tl_head[t]) != NULL) {
660		p = (char *)tn - tl->tl_offset;
661		tl->tl_head[t] = tn->tn_next[t];
662		tn->tn_next[t] = NULL;
663		tn->tn_member[t] = 0;
664	}
665	mutex_exit(&tl->tl_lock);
666
667	return (p);
668}
669
670/*
671 * Remove a specific item from the list and return it.
672 */
673void *
674txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
675{
676	int t = txg & TXG_MASK;
677	txg_node_t *tn, **tp;
678
679	mutex_enter(&tl->tl_lock);
680
681	for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
682		if ((char *)tn - tl->tl_offset == p) {
683			*tp = tn->tn_next[t];
684			tn->tn_next[t] = NULL;
685			tn->tn_member[t] = 0;
686			mutex_exit(&tl->tl_lock);
687			return (p);
688		}
689	}
690
691	mutex_exit(&tl->tl_lock);
692
693	return (NULL);
694}
695
696int
697txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
698{
699	int t = txg & TXG_MASK;
700	txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
701
702	return (tn->tn_member[t]);
703}
704
705/*
706 * Walk a txg list -- only safe if you know it's not changing.
707 */
708void *
709txg_list_head(txg_list_t *tl, uint64_t txg)
710{
711	int t = txg & TXG_MASK;
712	txg_node_t *tn = tl->tl_head[t];
713
714	return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
715}
716
717void *
718txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
719{
720	int t = txg & TXG_MASK;
721	txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
722
723	tn = tn->tn_next[t];
724
725	return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
726}
727