1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2017 by Lawrence Livermore National Security, LLC.
23 * Copyright 2019 Joyent, Inc.
24 */
25
26#include <sys/abd.h>
27#include <sys/mmp.h>
28#include <sys/spa.h>
29#include <sys/spa_impl.h>
30#include <sys/time.h>
31#include <sys/vdev.h>
32#include <sys/vdev_impl.h>
33#include <sys/zfs_context.h>
34#include <sys/callb.h>
35
36/*
37 * Multi-Modifier Protection (MMP) attempts to prevent a user from importing
38 * or opening a pool on more than one host at a time.  In particular, it
39 * prevents "zpool import -f" on a host from succeeding while the pool is
40 * already imported on another host.  There are many other ways in which a
41 * device could be used by two hosts for different purposes at the same time
42 * resulting in pool damage.  This implementation does not attempt to detect
43 * those cases.
44 *
45 * MMP operates by ensuring there are frequent visible changes on disk (a
46 * "heartbeat") at all times.  And by altering the import process to check
47 * for these changes and failing the import when they are detected.  This
48 * functionality is enabled by setting the 'multihost' pool property to on.
49 *
50 * Uberblocks written by the txg_sync thread always go into the first
51 * (N-MMP_BLOCKS_PER_LABEL) slots, the remaining slots are reserved for MMP.
52 * They are used to hold uberblocks which are exactly the same as the last
53 * synced uberblock except that the ub_timestamp and mmp_config are frequently
54 * updated.  Like all other uberblocks, the slot is written with an embedded
55 * checksum, and slots with invalid checksums are ignored.  This provides the
56 * "heartbeat", with no risk of overwriting good uberblocks that must be
57 * preserved, e.g. previous txgs and associated block pointers.
58 *
59 * Three optional fields are added to uberblock structure; ub_mmp_magic,
60 * ub_mmp_config, and ub_mmp_delay.  The ub_mmp_magic value allows zfs to tell
61 * whether the other ub_mmp_* fields are valid.  The ub_mmp_config field tells
62 * the importing host the settings of zfs_multihost_interval and
63 * zfs_multihost_fail_intervals on the host which last had (or currently has)
64 * the pool imported.  These determine how long a host must wait to detect
65 * activity in the pool, before concluding the pool is not in use.  The
66 * mmp_delay field is a decaying average of the amount of time between
67 * completion of successive MMP writes, in nanoseconds.  It indicates whether
68 * MMP is enabled.
69 *
70 * During import an activity test may now be performed to determine if
71 * the pool is in use.  The activity test is typically required if the
72 * ZPOOL_CONFIG_HOSTID does not match the system hostid, the pool state is
73 * POOL_STATE_ACTIVE, and the pool is not a root pool.
74 *
75 * The activity test finds the "best" uberblock (highest txg, timestamp, and, if
76 * ub_mmp_magic is valid, sequence number from ub_mmp_config).  It then waits
77 * some time, and finds the "best" uberblock again.  If any of the mentioned
78 * fields have different values in the newly read uberblock, the pool is in use
79 * by another host and the import fails.  In order to assure the accuracy of the
80 * activity test, the default values result in an activity test duration of 20x
81 * the mmp write interval.
82 *
83 * The duration of the "zpool import" activity test depends on the information
84 * available in the "best" uberblock:
85 *
86 * 1) If uberblock was written by zfs-0.8 or newer and fail_intervals > 0:
87 *    ub_mmp_config.fail_intervals * ub_mmp_config.multihost_interval * 2
88 *
89 *    In this case, a weak guarantee is provided.  Since the host which last had
90 *    the pool imported will suspend the pool if no mmp writes land within
91 *    fail_intervals * multihost_interval ms, the absence of writes during that
92 *    time means either the pool is not imported, or it is imported but the pool
93 *    is suspended and no further writes will occur.
94 *
95 *    Note that resuming the suspended pool on the remote host would invalidate
96 *    this guarantee, and so it is not allowed.
97 *
98 *    The factor of 2 provides a conservative safety factor and derives from
99 *    MMP_IMPORT_SAFETY_FACTOR;
100 *
101 * 2) If uberblock was written by zfs-0.8 or newer and fail_intervals == 0:
102 *    (ub_mmp_config.multihost_interval + ub_mmp_delay) *
103 *        zfs_multihost_import_intervals
104 *
105 *    In this case no guarantee can provided.  However, as long as some devices
106 *    are healthy and connected, it is likely that at least one write will land
107 *    within (multihost_interval + mmp_delay) because multihost_interval is
108 *    enough time for a write to be attempted to each leaf vdev, and mmp_delay
109 *    is enough for one to land, based on past delays.  Multiplying by
110 *    zfs_multihost_import_intervals provides a conservative safety factor.
111 *
112 * 3) If uberblock was written by zfs-0.7:
113 *    (zfs_multihost_interval + ub_mmp_delay) * zfs_multihost_import_intervals
114 *
115 *    The same logic as case #2 applies, but we do not know remote tunables.
116 *
117 *    We use the local value for zfs_multihost_interval because the original MMP
118 *    did not record this value in the uberblock.
119 *
120 *    ub_mmp_delay >= (zfs_multihost_interval / leaves), so if the other host
121 *    has a much larger zfs_multihost_interval set, ub_mmp_delay will reflect
122 *    that.  We will have waited enough time for zfs_multihost_import_intervals
123 *    writes to be issued and all but one to land.
124 *
125 *    single device pool example delays
126 *
127 *    import_delay = (1 + 1) * 20   =  40s #defaults, no I/O delay
128 *    import_delay = (1 + 10) * 20  = 220s #defaults, 10s I/O delay
129 *    import_delay = (10 + 10) * 20 = 400s #10s multihost_interval,
130 *                                          no I/O delay
131 *    100 device pool example delays
132 *
133 *    import_delay = (1 + .01) * 20 =  20s #defaults, no I/O delay
134 *    import_delay = (1 + 10) * 20  = 220s #defaults, 10s I/O delay
135 *    import_delay = (10 + .1) * 20 = 202s #10s multihost_interval,
136 *                                          no I/O delay
137 *
138 * 4) Otherwise, this uberblock was written by a pre-MMP zfs:
139 *    zfs_multihost_import_intervals * zfs_multihost_interval
140 *
141 *    In this case local tunables are used.  By default this product = 10s, long
142 *    enough for a pool with any activity at all to write at least one
143 *    uberblock.  No guarantee can be provided.
144 *
145 * Additionally, the duration is then extended by a random 25% to attempt to to
146 * detect simultaneous imports.  For example, if both partner hosts are rebooted
147 * at the same time and automatically attempt to import the pool.
148 */
149
150/*
151 * Used to control the frequency of mmp writes which are performed when the
152 * 'multihost' pool property is on.  This is one factor used to determine the
153 * length of the activity check during import.
154 *
155 * On average an mmp write will be issued for each leaf vdev every
156 * zfs_multihost_interval milliseconds.  In practice, the observed period can
157 * vary with the I/O load and this observed value is the ub_mmp_delay which is
158 * stored in the uberblock.  The minimum allowed value is 100 ms.
159 */
160ulong_t zfs_multihost_interval = MMP_DEFAULT_INTERVAL;
161
162/*
163 * Used to control the duration of the activity test on import.  Smaller values
164 * of zfs_multihost_import_intervals will reduce the import time but increase
165 * the risk of failing to detect an active pool.  The total activity check time
166 * is never allowed to drop below one second.  A value of 0 is ignored and
167 * treated as if it was set to 1.
168 */
169uint_t zfs_multihost_import_intervals = MMP_DEFAULT_IMPORT_INTERVALS;
170
171/*
172 * Controls the behavior of the pool when mmp write failures or delays are
173 * detected.
174 *
175 * When zfs_multihost_fail_intervals = 0, mmp write failures or delays are
176 * ignored.  The failures will still be reported to the ZED which depending on
177 * its configuration may take action such as suspending the pool or taking a
178 * device offline.
179 *
180 * When zfs_multihost_fail_intervals > 0, the pool will be suspended if
181 * zfs_multihost_fail_intervals * zfs_multihost_interval milliseconds pass
182 * without a successful mmp write.  This guarantees the activity test will see
183 * mmp writes if the pool is imported.  A value of 1 is ignored and treated as
184 * if it was set to 2, because a single leaf vdev pool will issue a write once
185 * per multihost_interval and thus any variation in latency would cause the
186 * pool to be suspended.
187 */
188uint_t zfs_multihost_fail_intervals = MMP_DEFAULT_FAIL_INTERVALS;
189
190char *mmp_tag = "mmp_write_uberblock";
191static void mmp_thread(void *arg);
192
193void
194mmp_init(spa_t *spa)
195{
196	mmp_thread_t *mmp = &spa->spa_mmp;
197
198	mutex_init(&mmp->mmp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
199	cv_init(&mmp->mmp_thread_cv, NULL, CV_DEFAULT, NULL);
200	mutex_init(&mmp->mmp_io_lock, NULL, MUTEX_DEFAULT, NULL);
201	mmp->mmp_kstat_id = 1;
202
203	/*
204	 * mmp_write_done() calculates mmp_delay based on prior mmp_delay and
205	 * the elapsed time since the last write.  For the first mmp write,
206	 * there is no "last write", so we start with fake non-zero values.
207	 */
208	mmp->mmp_last_write = gethrtime();
209	mmp->mmp_delay = MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval));
210}
211
212void
213mmp_fini(spa_t *spa)
214{
215	mmp_thread_t *mmp = &spa->spa_mmp;
216
217	mutex_destroy(&mmp->mmp_thread_lock);
218	cv_destroy(&mmp->mmp_thread_cv);
219	mutex_destroy(&mmp->mmp_io_lock);
220}
221
222static void
223mmp_thread_enter(mmp_thread_t *mmp, callb_cpr_t *cpr)
224{
225	CALLB_CPR_INIT(cpr, &mmp->mmp_thread_lock, callb_generic_cpr, FTAG);
226	mutex_enter(&mmp->mmp_thread_lock);
227}
228
229static void
230mmp_thread_exit(mmp_thread_t *mmp, kthread_t **mpp, callb_cpr_t *cpr)
231{
232	ASSERT(*mpp != NULL);
233	*mpp = NULL;
234	cv_broadcast(&mmp->mmp_thread_cv);
235	CALLB_CPR_EXIT(cpr);		/* drops &mmp->mmp_thread_lock */
236	thread_exit();
237}
238
239void
240mmp_thread_start(spa_t *spa)
241{
242	mmp_thread_t *mmp = &spa->spa_mmp;
243
244	if (spa_writeable(spa)) {
245		mutex_enter(&mmp->mmp_thread_lock);
246		if (!mmp->mmp_thread) {
247			mmp->mmp_thread = thread_create(NULL, 0, mmp_thread,
248			    spa, 0, &p0, TS_RUN, minclsyspri);
249			zfs_dbgmsg("MMP thread started pool '%s' "
250			    "gethrtime %llu", spa_name(spa), gethrtime());
251		}
252		mutex_exit(&mmp->mmp_thread_lock);
253	}
254}
255
256void
257mmp_thread_stop(spa_t *spa)
258{
259	mmp_thread_t *mmp = &spa->spa_mmp;
260
261	mutex_enter(&mmp->mmp_thread_lock);
262	mmp->mmp_thread_exiting = 1;
263	cv_broadcast(&mmp->mmp_thread_cv);
264
265	while (mmp->mmp_thread) {
266		cv_wait(&mmp->mmp_thread_cv, &mmp->mmp_thread_lock);
267	}
268	mutex_exit(&mmp->mmp_thread_lock);
269	zfs_dbgmsg("MMP thread stopped pool '%s' gethrtime %llu",
270	    spa_name(spa), gethrtime());
271
272	ASSERT(mmp->mmp_thread == NULL);
273	mmp->mmp_thread_exiting = 0;
274}
275
276typedef enum mmp_vdev_state_flag {
277	MMP_FAIL_NOT_WRITABLE	= (1 << 0),
278	MMP_FAIL_WRITE_PENDING	= (1 << 1),
279} mmp_vdev_state_flag_t;
280
281/*
282 * Find a leaf vdev to write an MMP block to.  It must not have an outstanding
283 * mmp write (if so a new write will also likely block).  If there is no usable
284 * leaf, a nonzero error value is returned. The error value returned is a bit
285 * field.
286 *
287 * MMP_FAIL_WRITE_PENDING   One or more leaf vdevs are writeable, but have an
288 *                          outstanding MMP write.
289 * MMP_FAIL_NOT_WRITABLE    One or more leaf vdevs are not writeable.
290 */
291
292static int
293mmp_next_leaf(spa_t *spa)
294{
295	vdev_t *leaf;
296	vdev_t *starting_leaf;
297	int fail_mask = 0;
298
299	ASSERT(MUTEX_HELD(&spa->spa_mmp.mmp_io_lock));
300	ASSERT(spa_config_held(spa, SCL_STATE, RW_READER));
301	ASSERT(list_link_active(&spa->spa_leaf_list.list_head) == B_TRUE);
302	ASSERT(!list_is_empty(&spa->spa_leaf_list));
303
304	if (spa->spa_mmp.mmp_leaf_last_gen != spa->spa_leaf_list_gen) {
305		spa->spa_mmp.mmp_last_leaf = list_head(&spa->spa_leaf_list);
306		spa->spa_mmp.mmp_leaf_last_gen = spa->spa_leaf_list_gen;
307	}
308
309	leaf = spa->spa_mmp.mmp_last_leaf;
310	if (leaf == NULL)
311		leaf = list_head(&spa->spa_leaf_list);
312	starting_leaf = leaf;
313
314	do {
315		leaf = list_next(&spa->spa_leaf_list, leaf);
316		if (leaf == NULL)
317			leaf = list_head(&spa->spa_leaf_list);
318
319		if (!vdev_writeable(leaf)) {
320			fail_mask |= MMP_FAIL_NOT_WRITABLE;
321		} else if (leaf->vdev_mmp_pending != 0) {
322			fail_mask |= MMP_FAIL_WRITE_PENDING;
323		} else {
324			spa->spa_mmp.mmp_last_leaf = leaf;
325			return (0);
326		}
327	} while (leaf != starting_leaf);
328
329	ASSERT(fail_mask);
330
331	return (fail_mask);
332}
333
334/*
335 * MMP writes are issued on a fixed schedule, but may complete at variable,
336 * much longer, intervals.  The mmp_delay captures long periods between
337 * successful writes for any reason, including disk latency, scheduling delays,
338 * etc.
339 *
340 * The mmp_delay is usually calculated as a decaying average, but if the latest
341 * delay is higher we do not average it, so that we do not hide sudden spikes
342 * which the importing host must wait for.
343 *
344 * If writes are occurring frequently, such as due to a high rate of txg syncs,
345 * the mmp_delay could become very small.  Since those short delays depend on
346 * activity we cannot count on, we never allow mmp_delay to get lower than rate
347 * expected if only mmp_thread writes occur.
348 *
349 * If an mmp write was skipped or fails, and we have already waited longer than
350 * mmp_delay, we need to update it so the next write reflects the longer delay.
351 *
352 * Do not set mmp_delay if the multihost property is not on, so as not to
353 * trigger an activity check on import.
354 */
355static void
356mmp_delay_update(spa_t *spa, boolean_t write_completed)
357{
358	mmp_thread_t *mts = &spa->spa_mmp;
359	hrtime_t delay = gethrtime() - mts->mmp_last_write;
360
361	ASSERT(MUTEX_HELD(&mts->mmp_io_lock));
362
363	if (spa_multihost(spa) == B_FALSE) {
364		mts->mmp_delay = 0;
365		return;
366	}
367
368	if (delay > mts->mmp_delay)
369		mts->mmp_delay = delay;
370
371	if (write_completed == B_FALSE)
372		return;
373
374	mts->mmp_last_write = gethrtime();
375
376	/*
377	 * strictly less than, in case delay was changed above.
378	 */
379	if (delay < mts->mmp_delay) {
380		hrtime_t min_delay =
381		    MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval)) /
382		    MAX(1, vdev_count_leaves(spa));
383		mts->mmp_delay = MAX(((delay + mts->mmp_delay * 127) / 128),
384		    min_delay);
385	}
386}
387
388static void
389mmp_write_done(zio_t *zio)
390{
391	spa_t *spa = zio->io_spa;
392	vdev_t *vd = zio->io_vd;
393	mmp_thread_t *mts = zio->io_private;
394
395	mutex_enter(&mts->mmp_io_lock);
396	uint64_t mmp_kstat_id = vd->vdev_mmp_kstat_id;
397	hrtime_t mmp_write_duration = gethrtime() - vd->vdev_mmp_pending;
398
399	mmp_delay_update(spa, (zio->io_error == 0));
400
401	vd->vdev_mmp_pending = 0;
402	vd->vdev_mmp_kstat_id = 0;
403
404	mutex_exit(&mts->mmp_io_lock);
405	spa_config_exit(spa, SCL_STATE, mmp_tag);
406
407	abd_free(zio->io_abd);
408}
409
410/*
411 * When the uberblock on-disk is updated by a spa_sync,
412 * creating a new "best" uberblock, update the one stored
413 * in the mmp thread state, used for mmp writes.
414 */
415void
416mmp_update_uberblock(spa_t *spa, uberblock_t *ub)
417{
418	mmp_thread_t *mmp = &spa->spa_mmp;
419
420	mutex_enter(&mmp->mmp_io_lock);
421	mmp->mmp_ub = *ub;
422	mmp->mmp_seq = 1;
423	mmp->mmp_ub.ub_timestamp = gethrestime_sec();
424	mmp_delay_update(spa, B_TRUE);
425	mutex_exit(&mmp->mmp_io_lock);
426}
427
428/*
429 * Choose a random vdev, label, and MMP block, and write over it
430 * with a copy of the last-synced uberblock, whose timestamp
431 * has been updated to reflect that the pool is in use.
432 */
433static void
434mmp_write_uberblock(spa_t *spa)
435{
436	int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
437	mmp_thread_t *mmp = &spa->spa_mmp;
438	uberblock_t *ub;
439	vdev_t *vd = NULL;
440	int label, error;
441	uint64_t offset;
442
443	hrtime_t lock_acquire_time = gethrtime();
444	spa_config_enter(spa, SCL_STATE, mmp_tag, RW_READER);
445	lock_acquire_time = gethrtime() - lock_acquire_time;
446	if (lock_acquire_time > (MSEC2NSEC(MMP_MIN_INTERVAL) / 10))
447		zfs_dbgmsg("MMP SCL_STATE acquisition pool '%s' took %llu ns "
448		    "gethrtime %llu", spa_name(spa), lock_acquire_time,
449		    gethrtime());
450
451	mutex_enter(&mmp->mmp_io_lock);
452
453	error = mmp_next_leaf(spa);
454
455	/*
456	 * spa_mmp_history has two types of entries:
457	 * Issued MMP write: records time issued, error status, etc.
458	 * Skipped MMP write: an MMP write could not be issued because no
459	 * suitable leaf vdev was available.  See comment above struct
460	 * spa_mmp_history for details.
461	 */
462
463	if (error) {
464		mmp_delay_update(spa, B_FALSE);
465		if (mmp->mmp_skip_error == error) {
466			/*
467			 * ZoL porting note: the following is TBD
468			 * spa_mmp_history_set_skip(spa, mmp->mmp_kstat_id - 1);
469			 */
470		} else {
471			mmp->mmp_skip_error = error;
472			/*
473			 * ZoL porting note: the following is TBD
474			 * spa_mmp_history_add(spa, mmp->mmp_ub.ub_txg,
475			 * gethrestime_sec(), mmp->mmp_delay, NULL, 0,
476			 * mmp->mmp_kstat_id++, error);
477			 */
478			zfs_dbgmsg("MMP error choosing leaf pool '%s' "
479			    "gethrtime %llu fail_mask %#x", spa_name(spa),
480			    gethrtime(), error);
481		}
482		mutex_exit(&mmp->mmp_io_lock);
483		spa_config_exit(spa, SCL_STATE, mmp_tag);
484		return;
485	}
486
487	vd = spa->spa_mmp.mmp_last_leaf;
488	if (mmp->mmp_skip_error != 0) {
489		mmp->mmp_skip_error = 0;
490		zfs_dbgmsg("MMP write after skipping due to unavailable "
491		    "leaves, pool '%s' gethrtime %llu leaf %#llu",
492		    spa_name(spa), gethrtime(), vd->vdev_guid);
493	}
494
495	if (mmp->mmp_zio_root == NULL)
496		mmp->mmp_zio_root = zio_root(spa, NULL, NULL,
497		    flags | ZIO_FLAG_GODFATHER);
498
499	if (mmp->mmp_ub.ub_timestamp != gethrestime_sec()) {
500		/*
501		 * Want to reset mmp_seq when timestamp advances because after
502		 * an mmp_seq wrap new values will not be chosen by
503		 * uberblock_compare() as the "best".
504		 */
505		mmp->mmp_ub.ub_timestamp = gethrestime_sec();
506		mmp->mmp_seq = 1;
507	}
508
509	ub = &mmp->mmp_ub;
510	ub->ub_mmp_magic = MMP_MAGIC;
511	ub->ub_mmp_delay = mmp->mmp_delay;
512	ub->ub_mmp_config = MMP_SEQ_SET(mmp->mmp_seq) |
513	    MMP_INTERVAL_SET(MMP_INTERVAL_OK(zfs_multihost_interval)) |
514	    MMP_FAIL_INT_SET(MMP_FAIL_INTVS_OK(
515	    zfs_multihost_fail_intervals));
516	vd->vdev_mmp_pending = gethrtime();
517	vd->vdev_mmp_kstat_id = mmp->mmp_kstat_id;
518
519	zio_t *zio  = zio_null(mmp->mmp_zio_root, spa, NULL, NULL, NULL, flags);
520	abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
521	abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
522	abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
523
524	mmp->mmp_seq++;
525	mmp->mmp_kstat_id++;
526	mutex_exit(&mmp->mmp_io_lock);
527
528	offset = VDEV_UBERBLOCK_OFFSET(vd, VDEV_UBERBLOCK_COUNT(vd) -
529	    MMP_BLOCKS_PER_LABEL + spa_get_random(MMP_BLOCKS_PER_LABEL));
530
531	label = spa_get_random(VDEV_LABELS);
532	vdev_label_write(zio, vd, label, ub_abd, offset,
533	    VDEV_UBERBLOCK_SIZE(vd), mmp_write_done, mmp,
534	    flags | ZIO_FLAG_DONT_PROPAGATE);
535
536	/*
537	 * ZoL porting note: the following is TBD
538	 * (void) spa_mmp_history_add(spa, ub->ub_txg, ub->ub_timestamp,
539	 * ub->ub_mmp_delay, vd, label, vd->vdev_mmp_kstat_id, 0);
540	 */
541
542	zio_nowait(zio);
543}
544
545static void
546mmp_thread(void *arg)
547{
548	spa_t *spa = (spa_t *)arg;
549	mmp_thread_t *mmp = &spa->spa_mmp;
550	boolean_t suspended = spa_suspended(spa);
551	boolean_t multihost = spa_multihost(spa);
552	uint64_t mmp_interval = MSEC2NSEC(MMP_INTERVAL_OK(
553	    zfs_multihost_interval));
554	uint32_t mmp_fail_intervals = MMP_FAIL_INTVS_OK(
555	    zfs_multihost_fail_intervals);
556	hrtime_t mmp_fail_ns = mmp_fail_intervals * mmp_interval;
557	boolean_t last_spa_suspended = suspended;
558	boolean_t last_spa_multihost = multihost;
559	uint64_t last_mmp_interval = mmp_interval;
560	uint32_t last_mmp_fail_intervals = mmp_fail_intervals;
561	hrtime_t last_mmp_fail_ns = mmp_fail_ns;
562	callb_cpr_t cpr;
563	int skip_wait = 0;
564
565	mmp_thread_enter(mmp, &cpr);
566
567	while (!mmp->mmp_thread_exiting) {
568		hrtime_t next_time = gethrtime() +
569		    MSEC2NSEC(MMP_DEFAULT_INTERVAL);
570		int leaves = MAX(vdev_count_leaves(spa), 1);
571
572		/* Detect changes in tunables or state */
573
574		last_spa_suspended = suspended;
575		last_spa_multihost = multihost;
576		suspended = spa_suspended(spa);
577		multihost = spa_multihost(spa);
578
579		last_mmp_interval = mmp_interval;
580		last_mmp_fail_intervals = mmp_fail_intervals;
581		last_mmp_fail_ns = mmp_fail_ns;
582		mmp_interval = MSEC2NSEC(MMP_INTERVAL_OK(
583		    zfs_multihost_interval));
584		mmp_fail_intervals = MMP_FAIL_INTVS_OK(
585		    zfs_multihost_fail_intervals);
586
587		/* Smooth so pool is not suspended when reducing tunables */
588		if (mmp_fail_intervals * mmp_interval < mmp_fail_ns) {
589			mmp_fail_ns = (mmp_fail_ns * 31 +
590			    mmp_fail_intervals * mmp_interval) / 32;
591		} else {
592			mmp_fail_ns = mmp_fail_intervals *
593			    mmp_interval;
594		}
595
596		if (mmp_interval != last_mmp_interval ||
597		    mmp_fail_intervals != last_mmp_fail_intervals) {
598			/*
599			 * We want other hosts to see new tunables as quickly as
600			 * possible.  Write out at higher frequency than usual.
601			 */
602			skip_wait += leaves;
603		}
604
605		if (multihost)
606			next_time = gethrtime() + mmp_interval / leaves;
607
608		if (mmp_fail_ns != last_mmp_fail_ns) {
609			zfs_dbgmsg("MMP interval change pool '%s' "
610			    "gethrtime %llu last_mmp_interval %llu "
611			    "mmp_interval %llu last_mmp_fail_intervals %u "
612			    "mmp_fail_intervals %u mmp_fail_ns %llu "
613			    "skip_wait %d leaves %d next_time %llu",
614			    spa_name(spa), gethrtime(), last_mmp_interval,
615			    mmp_interval, last_mmp_fail_intervals,
616			    mmp_fail_intervals, mmp_fail_ns, skip_wait, leaves,
617			    next_time);
618		}
619
620		/*
621		 * MMP off => on, or suspended => !suspended:
622		 * No writes occurred recently.  Update mmp_last_write to give
623		 * us some time to try.
624		 */
625		if ((!last_spa_multihost && multihost) ||
626		    (last_spa_suspended && !suspended)) {
627			zfs_dbgmsg("MMP state change pool '%s': gethrtime %llu "
628			    "last_spa_multihost %u multihost %u "
629			    "last_spa_suspended %u suspended %u",
630			    spa_name(spa), last_spa_multihost, multihost,
631			    last_spa_suspended, suspended);
632			mutex_enter(&mmp->mmp_io_lock);
633			mmp->mmp_last_write = gethrtime();
634			mmp->mmp_delay = mmp_interval;
635			mutex_exit(&mmp->mmp_io_lock);
636		}
637
638		/*
639		 * MMP on => off:
640		 * mmp_delay == 0 tells importing node to skip activity check.
641		 */
642		if (last_spa_multihost && !multihost) {
643			mutex_enter(&mmp->mmp_io_lock);
644			mmp->mmp_delay = 0;
645			mutex_exit(&mmp->mmp_io_lock);
646		}
647
648		/*
649		 * Suspend the pool if no MMP write has succeeded in over
650		 * mmp_interval * mmp_fail_intervals nanoseconds.
651		 */
652		if (multihost && !suspended && mmp_fail_intervals &&
653		    (gethrtime() - mmp->mmp_last_write) > mmp_fail_ns) {
654			zfs_dbgmsg("MMP suspending pool '%s': gethrtime %llu "
655			    "mmp_last_write %llu mmp_interval %llu "
656			    "mmp_fail_intervals %llu mmp_fail_ns %llu",
657			    spa_name(spa), (u_longlong_t)gethrtime(),
658			    (u_longlong_t)mmp->mmp_last_write,
659			    (u_longlong_t)mmp_interval,
660			    (u_longlong_t)mmp_fail_intervals,
661			    (u_longlong_t)mmp_fail_ns);
662			cmn_err(CE_WARN, "MMP writes to pool '%s' have not "
663			    "succeeded in over %llu ms; suspending pool. "
664			    "Hrtime %llu",
665			    spa_name(spa),
666			    NSEC2MSEC(gethrtime() - mmp->mmp_last_write),
667			    gethrtime());
668			zio_suspend(spa, NULL, ZIO_SUSPEND_MMP);
669		}
670
671		if (multihost && !suspended)
672			mmp_write_uberblock(spa);
673
674		if (skip_wait > 0) {
675			next_time = gethrtime() + MSEC2NSEC(MMP_MIN_INTERVAL) /
676			    leaves;
677			skip_wait--;
678		}
679
680		CALLB_CPR_SAFE_BEGIN(&cpr);
681		(void) cv_timedwait_sig_hrtime(&mmp->mmp_thread_cv,
682		    &mmp->mmp_thread_lock, next_time);
683		CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock);
684	}
685
686	/* Outstanding writes are allowed to complete. */
687	if (mmp->mmp_zio_root)
688		zio_wait(mmp->mmp_zio_root);
689
690	mmp->mmp_zio_root = NULL;
691	mmp_thread_exit(mmp, &mmp->mmp_thread, &cpr);
692}
693
694/*
695 * Signal the MMP thread to wake it, when it is sleeping on
696 * its cv.  Used when some module parameter has changed and
697 * we want the thread to know about it.
698 * Only signal if the pool is active and mmp thread is
699 * running, otherwise there is no thread to wake.
700 */
701static void
702mmp_signal_thread(spa_t *spa)
703{
704	mmp_thread_t *mmp = &spa->spa_mmp;
705
706	mutex_enter(&mmp->mmp_thread_lock);
707	if (mmp->mmp_thread)
708		cv_broadcast(&mmp->mmp_thread_cv);
709	mutex_exit(&mmp->mmp_thread_lock);
710}
711
712void
713mmp_signal_all_threads(void)
714{
715	spa_t *spa = NULL;
716
717	mutex_enter(&spa_namespace_lock);
718	while ((spa = spa_next(spa))) {
719		if (spa->spa_state == POOL_STATE_ACTIVE)
720			mmp_signal_thread(spa);
721	}
722	mutex_exit(&spa_namespace_lock);
723}
724