1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2013 Saso Kiselkov. All rights reserved.
27 * Copyright (c) 2017 Datto Inc.
28 * Copyright (c) 2017, Intel Corporation.
29 * Copyright 2019 Joyent, Inc.
30 */
31
32#ifndef _SYS_SPA_IMPL_H
33#define	_SYS_SPA_IMPL_H
34
35#include <sys/spa.h>
36#include <sys/spa_checkpoint.h>
37#include <sys/spa_log_spacemap.h>
38#include <sys/vdev.h>
39#include <sys/vdev_removal.h>
40#include <sys/metaslab.h>
41#include <sys/dmu.h>
42#include <sys/dsl_pool.h>
43#include <sys/uberblock_impl.h>
44#include <sys/zfs_context.h>
45#include <sys/avl.h>
46#include <sys/refcount.h>
47#include <sys/bplist.h>
48#include <sys/bpobj.h>
49#include <sys/dsl_crypt.h>
50#include <sys/zfeature.h>
51#include <sys/zthr.h>
52#include <zfeature_common.h>
53
54#ifdef	__cplusplus
55extern "C" {
56#endif
57
58typedef struct spa_error_entry {
59	zbookmark_phys_t	se_bookmark;
60	char			*se_name;
61	avl_node_t		se_avl;
62} spa_error_entry_t;
63
64typedef struct spa_history_phys {
65	uint64_t sh_pool_create_len;	/* ending offset of zpool create */
66	uint64_t sh_phys_max_off;	/* physical EOF */
67	uint64_t sh_bof;		/* logical BOF */
68	uint64_t sh_eof;		/* logical EOF */
69	uint64_t sh_records_lost;	/* num of records overwritten */
70} spa_history_phys_t;
71
72/*
73 * All members must be uint64_t, for byteswap purposes.
74 */
75typedef struct spa_removing_phys {
76	uint64_t sr_state; /* dsl_scan_state_t */
77
78	/*
79	 * The vdev ID that we most recently attempted to remove,
80	 * or -1 if no removal has been attempted.
81	 */
82	uint64_t sr_removing_vdev;
83
84	/*
85	 * The vdev ID that we most recently successfully removed,
86	 * or -1 if no devices have been removed.
87	 */
88	uint64_t sr_prev_indirect_vdev;
89
90	uint64_t sr_start_time;
91	uint64_t sr_end_time;
92
93	/*
94	 * Note that we can not use the space map's or indirect mapping's
95	 * accounting as a substitute for these values, because we need to
96	 * count frees of not-yet-copied data as though it did the copy.
97	 * Otherwise, we could get into a situation where copied > to_copy,
98	 * or we complete before copied == to_copy.
99	 */
100	uint64_t sr_to_copy; /* bytes that need to be copied */
101	uint64_t sr_copied; /* bytes that have been copied or freed */
102} spa_removing_phys_t;
103
104/*
105 * This struct is stored as an entry in the DMU_POOL_DIRECTORY_OBJECT
106 * (with key DMU_POOL_CONDENSING_INDIRECT).  It is present if a condense
107 * of an indirect vdev's mapping object is in progress.
108 */
109typedef struct spa_condensing_indirect_phys {
110	/*
111	 * The vdev ID of the indirect vdev whose indirect mapping is
112	 * being condensed.
113	 */
114	uint64_t	scip_vdev;
115
116	/*
117	 * The vdev's old obsolete spacemap.  This spacemap's contents are
118	 * being integrated into the new mapping.
119	 */
120	uint64_t	scip_prev_obsolete_sm_object;
121
122	/*
123	 * The new mapping object that is being created.
124	 */
125	uint64_t	scip_next_mapping_object;
126} spa_condensing_indirect_phys_t;
127
128struct spa_aux_vdev {
129	uint64_t	sav_object;		/* MOS object for device list */
130	nvlist_t	*sav_config;		/* cached device config */
131	vdev_t		**sav_vdevs;		/* devices */
132	int		sav_count;		/* number devices */
133	boolean_t	sav_sync;		/* sync the device list */
134	nvlist_t	**sav_pending;		/* pending device additions */
135	uint_t		sav_npending;		/* # pending devices */
136};
137
138typedef struct spa_config_lock {
139	kmutex_t	scl_lock;
140	kthread_t	*scl_writer;
141	int		scl_write_wanted;
142	kcondvar_t	scl_cv;
143	zfs_refcount_t	scl_count;
144} spa_config_lock_t;
145
146typedef struct spa_config_dirent {
147	list_node_t	scd_link;
148	char		*scd_path;
149} spa_config_dirent_t;
150
151typedef enum zio_taskq_type {
152	ZIO_TASKQ_ISSUE = 0,
153	ZIO_TASKQ_ISSUE_HIGH,
154	ZIO_TASKQ_INTERRUPT,
155	ZIO_TASKQ_INTERRUPT_HIGH,
156	ZIO_TASKQ_TYPES
157} zio_taskq_type_t;
158
159/*
160 * State machine for the zpool-poolname process.  The states transitions
161 * are done as follows:
162 *
163 *	From		   To			Routine
164 *	PROC_NONE	-> PROC_CREATED		spa_activate()
165 *	PROC_CREATED	-> PROC_ACTIVE		spa_thread()
166 *	PROC_ACTIVE	-> PROC_DEACTIVATE	spa_deactivate()
167 *	PROC_DEACTIVATE	-> PROC_GONE		spa_thread()
168 *	PROC_GONE	-> PROC_NONE		spa_deactivate()
169 */
170typedef enum spa_proc_state {
171	SPA_PROC_NONE,		/* spa_proc = &p0, no process created */
172	SPA_PROC_CREATED,	/* spa_activate() has proc, is waiting */
173	SPA_PROC_ACTIVE,	/* taskqs created, spa_proc set */
174	SPA_PROC_DEACTIVATE,	/* spa_deactivate() requests process exit */
175	SPA_PROC_GONE		/* spa_thread() is exiting, spa_proc = &p0 */
176} spa_proc_state_t;
177
178typedef struct spa_taskqs {
179	uint_t stqs_count;
180	taskq_t **stqs_taskq;
181} spa_taskqs_t;
182
183typedef enum spa_all_vdev_zap_action {
184	AVZ_ACTION_NONE = 0,
185	AVZ_ACTION_DESTROY,	/* Destroy all per-vdev ZAPs and the AVZ. */
186	AVZ_ACTION_REBUILD,	/* Populate the new AVZ, see spa_avz_rebuild */
187	AVZ_ACTION_INITIALIZE
188} spa_avz_action_t;
189
190typedef enum spa_config_source {
191	SPA_CONFIG_SRC_NONE = 0,
192	SPA_CONFIG_SRC_SCAN,		/* scan of path (default: /dev/dsk) */
193	SPA_CONFIG_SRC_CACHEFILE,	/* any cachefile */
194	SPA_CONFIG_SRC_TRYIMPORT,	/* returned from call to tryimport */
195	SPA_CONFIG_SRC_SPLIT,		/* new pool in a pool split */
196	SPA_CONFIG_SRC_MOS		/* MOS, but not always from right txg */
197} spa_config_source_t;
198
199struct spa {
200	/*
201	 * Fields protected by spa_namespace_lock.
202	 */
203	char		spa_name[ZFS_MAX_DATASET_NAME_LEN];	/* pool name */
204	char		*spa_comment;		/* comment */
205	avl_node_t	spa_avl;		/* node in spa_namespace_avl */
206	nvlist_t	*spa_config;		/* last synced config */
207	nvlist_t	*spa_config_syncing;	/* currently syncing config */
208	nvlist_t	*spa_config_splitting;	/* config for splitting */
209	nvlist_t	*spa_load_info;		/* info and errors from load */
210	uint64_t	spa_config_txg;		/* txg of last config change */
211	int		spa_sync_pass;		/* iterate-to-convergence */
212	pool_state_t	spa_state;		/* pool state */
213	int		spa_inject_ref;		/* injection references */
214	uint8_t		spa_sync_on;		/* sync threads are running */
215	spa_load_state_t spa_load_state;	/* current load operation */
216	boolean_t	spa_indirect_vdevs_loaded; /* mappings loaded? */
217	boolean_t	spa_trust_config;	/* do we trust vdev tree? */
218	spa_config_source_t spa_config_source;	/* where config comes from? */
219	uint64_t	spa_import_flags;	/* import specific flags */
220	spa_taskqs_t	spa_zio_taskq[ZIO_TYPES][ZIO_TASKQ_TYPES];
221	dsl_pool_t	*spa_dsl_pool;
222	boolean_t	spa_is_initializing;	/* true while opening pool */
223	metaslab_class_t *spa_normal_class;	/* normal data class */
224	metaslab_class_t *spa_log_class;	/* intent log data class */
225	metaslab_class_t *spa_special_class;	/* special allocation class */
226	metaslab_class_t *spa_dedup_class;	/* dedup allocation class */
227	uint64_t	spa_first_txg;		/* first txg after spa_open() */
228	uint64_t	spa_final_txg;		/* txg of export/destroy */
229	uint64_t	spa_freeze_txg;		/* freeze pool at this txg */
230	uint64_t	spa_load_max_txg;	/* best initial ub_txg */
231	uint64_t	spa_claim_max_txg;	/* highest claimed birth txg */
232	timespec_t	spa_loaded_ts;		/* 1st successful open time */
233	objset_t	*spa_meta_objset;	/* copy of dp->dp_meta_objset */
234	kmutex_t	spa_evicting_os_lock;	/* Evicting objset list lock */
235	list_t		spa_evicting_os_list;	/* Objsets being evicted. */
236	kcondvar_t	spa_evicting_os_cv;	/* Objset Eviction Completion */
237	txg_list_t	spa_vdev_txg_list;	/* per-txg dirty vdev list */
238	vdev_t		*spa_root_vdev;		/* top-level vdev container */
239	int		spa_min_ashift;		/* of vdevs in normal class */
240	int		spa_max_ashift;		/* of vdevs in normal class */
241	uint64_t	spa_config_guid;	/* config pool guid */
242	uint64_t	spa_load_guid;		/* spa_load initialized guid */
243	uint64_t	spa_last_synced_guid;	/* last synced guid */
244	list_t		spa_config_dirty_list;	/* vdevs with dirty config */
245	list_t		spa_state_dirty_list;	/* vdevs with dirty state */
246	/*
247	 * spa_alloc_locks and spa_alloc_trees are arrays, whose lengths are
248	 * stored in spa_alloc_count. There is one tree and one lock for each
249	 * allocator, to help improve allocation performance in write-heavy
250	 * workloads.
251	 */
252	kmutex_t	*spa_alloc_locks;
253	avl_tree_t	*spa_alloc_trees;
254	int		spa_alloc_count;
255
256	spa_aux_vdev_t	spa_spares;		/* hot spares */
257	spa_aux_vdev_t	spa_l2cache;		/* L2ARC cache devices */
258	hrtime_t	spa_spares_last_polled;	/* time spares last polled */
259	nvlist_t	*spa_label_features;	/* Features for reading MOS */
260	uint64_t	spa_config_object;	/* MOS object for pool config */
261	uint64_t	spa_config_generation;	/* config generation number */
262	uint64_t	spa_syncing_txg;	/* txg currently syncing */
263	bpobj_t		spa_deferred_bpobj;	/* deferred-free bplist */
264	bplist_t	spa_free_bplist[TXG_SIZE]; /* bplist of stuff to free */
265	zio_cksum_salt_t spa_cksum_salt;	/* secret salt for cksum */
266	/* checksum context templates */
267	kmutex_t	spa_cksum_tmpls_lock;
268	void		*spa_cksum_tmpls[ZIO_CHECKSUM_FUNCTIONS];
269	uberblock_t	spa_ubsync;		/* last synced uberblock */
270	uberblock_t	spa_uberblock;		/* current uberblock */
271	boolean_t	spa_extreme_rewind;	/* rewind past deferred frees */
272	kmutex_t	spa_scrub_lock;		/* resilver/scrub lock */
273	uint64_t	spa_scrub_inflight;	/* in-flight scrub bytes */
274	uint64_t	spa_load_verify_ios;	/* in-flight verification IOs */
275	kcondvar_t	spa_scrub_io_cv;	/* scrub I/O completion */
276	uint8_t		spa_scrub_active;	/* active or suspended? */
277	uint8_t		spa_scrub_type;		/* type of scrub we're doing */
278	uint8_t		spa_scrub_finished;	/* indicator to rotate logs */
279	uint8_t		spa_scrub_started;	/* started since last boot */
280	uint8_t		spa_scrub_reopen;	/* scrub doing vdev_reopen */
281	uint64_t	spa_scan_pass_start;	/* start time per pass/reboot */
282	uint64_t	spa_scan_pass_scrub_pause; /* scrub pause time */
283	uint64_t	spa_scan_pass_scrub_spent_paused; /* total paused */
284	uint64_t	spa_scan_pass_exam;	/* examined bytes per pass */
285	uint64_t	spa_scan_pass_issued;	/* issued bytes per pass */
286
287	/*
288	 * We are in the middle of a resilver, and another resilver
289	 * is needed once this one completes. This is set iff any
290	 * vdev_resilver_deferred is set.
291	 */
292	boolean_t	spa_resilver_deferred;
293	kmutex_t	spa_async_lock;		/* protect async state */
294	kthread_t	*spa_async_thread;	/* thread doing async task */
295	int		spa_async_suspended;	/* async tasks suspended */
296	kcondvar_t	spa_async_cv;		/* wait for thread_exit() */
297	uint16_t	spa_async_tasks;	/* async task mask */
298	uint64_t	spa_missing_tvds;	/* unopenable tvds on load */
299	uint64_t	spa_missing_tvds_allowed; /* allow loading spa? */
300
301	spa_removing_phys_t spa_removing_phys;
302	spa_vdev_removal_t *spa_vdev_removal;
303
304	spa_condensing_indirect_phys_t	spa_condensing_indirect_phys;
305	spa_condensing_indirect_t	*spa_condensing_indirect;
306	zthr_t		*spa_condense_zthr;	/* zthr doing condense. */
307
308	uint64_t	spa_checkpoint_txg;	/* the txg of the checkpoint */
309	spa_checkpoint_info_t spa_checkpoint_info; /* checkpoint accounting */
310	zthr_t		*spa_checkpoint_discard_zthr;
311
312	space_map_t	*spa_syncing_log_sm;	/* current log space map */
313	avl_tree_t	spa_sm_logs_by_txg;
314	kmutex_t	spa_flushed_ms_lock;	/* for metaslabs_by_flushed */
315	avl_tree_t	spa_metaslabs_by_flushed;
316	spa_unflushed_stats_t	spa_unflushed_stats;
317	list_t		spa_log_summary;
318	uint64_t	spa_log_flushall_txg;
319
320	char		*spa_root;		/* alternate root directory */
321	uint64_t	spa_ena;		/* spa-wide ereport ENA */
322	int		spa_last_open_failed;	/* error if last open failed */
323	uint64_t	spa_last_ubsync_txg;	/* "best" uberblock txg */
324	uint64_t	spa_last_ubsync_txg_ts;	/* timestamp from that ub */
325	uint64_t	spa_load_txg;		/* ub txg that loaded */
326	uint64_t	spa_load_txg_ts;	/* timestamp from that ub */
327	uint64_t	spa_load_meta_errors;	/* verify metadata err count */
328	uint64_t	spa_load_data_errors;	/* verify data err count */
329	uint64_t	spa_verify_min_txg;	/* start txg of verify scrub */
330	kmutex_t	spa_errlog_lock;	/* error log lock */
331	uint64_t	spa_errlog_last;	/* last error log object */
332	uint64_t	spa_errlog_scrub;	/* scrub error log object */
333	kmutex_t	spa_errlist_lock;	/* error list/ereport lock */
334	avl_tree_t	spa_errlist_last;	/* last error list */
335	avl_tree_t	spa_errlist_scrub;	/* scrub error list */
336	uint64_t	spa_deflate;		/* should we deflate? */
337	uint64_t	spa_history;		/* history object */
338	kmutex_t	spa_history_lock;	/* history lock */
339	vdev_t		*spa_pending_vdev;	/* pending vdev additions */
340	kmutex_t	spa_props_lock;		/* property lock */
341	uint64_t	spa_pool_props_object;	/* object for properties */
342	uint64_t	spa_bootfs;		/* default boot filesystem */
343	uint64_t	spa_failmode;		/* failure mode for the pool */
344	uint64_t	spa_delegation;		/* delegation on/off */
345	list_t		spa_config_list;	/* previous cache file(s) */
346	/* per-CPU array of root of async I/O: */
347	zio_t		**spa_async_zio_root;
348	zio_t		*spa_suspend_zio_root;	/* root of all suspended I/O */
349	zio_t		*spa_txg_zio[TXG_SIZE]; /* spa_sync() waits for this */
350	kmutex_t	spa_suspend_lock;	/* protects suspend_zio_root */
351	kcondvar_t	spa_suspend_cv;		/* notification of resume */
352	zio_suspend_reason_t	spa_suspended;	/* pool is suspended */
353	uint8_t		spa_claiming;		/* pool is doing zil_claim() */
354	boolean_t	spa_is_root;		/* pool is root */
355	int		spa_minref;		/* num refs when first opened */
356	int		spa_mode;		/* FREAD | FWRITE */
357	spa_log_state_t spa_log_state;		/* log state */
358	uint64_t	spa_autoexpand;		/* lun expansion on/off */
359	uint64_t	spa_bootsize;		/* efi system partition size */
360	ddt_t		*spa_ddt[ZIO_CHECKSUM_FUNCTIONS]; /* in-core DDTs */
361	uint64_t	spa_ddt_stat_object;	/* DDT statistics */
362	uint64_t	spa_dedup_ditto;	/* dedup ditto threshold */
363	uint64_t	spa_dedup_checksum;	/* default dedup checksum */
364	uint64_t	spa_dspace;		/* dspace in normal class */
365	kmutex_t	spa_vdev_top_lock;	/* dueling offline/remove */
366	kmutex_t	spa_proc_lock;		/* protects spa_proc* */
367	kcondvar_t	spa_proc_cv;		/* spa_proc_state transitions */
368	spa_proc_state_t spa_proc_state;	/* see definition */
369	struct proc	*spa_proc;		/* "zpool-poolname" process */
370	uint64_t	spa_did;		/* if procp != p0, did of t1 */
371	boolean_t	spa_autoreplace;	/* autoreplace set in open */
372	int		spa_vdev_locks;		/* locks grabbed */
373	uint64_t	spa_creation_version;	/* version at pool creation */
374	uint64_t	spa_prev_software_version; /* See ub_software_version */
375	uint64_t	spa_feat_for_write_obj;	/* required to write to pool */
376	uint64_t	spa_feat_for_read_obj;	/* required to read from pool */
377	uint64_t	spa_feat_desc_obj;	/* Feature descriptions */
378	uint64_t	spa_feat_enabled_txg_obj; /* Feature enabled txg */
379	/* cache feature refcounts */
380	uint64_t	spa_feat_refcount_cache[SPA_FEATURES];
381	cyclic_id_t	spa_deadman_cycid;	/* cyclic id */
382	uint64_t	spa_deadman_calls;	/* number of deadman calls */
383	hrtime_t	spa_sync_starttime;	/* starting time fo spa_sync */
384	uint64_t	spa_deadman_synctime;	/* deadman expiration timer */
385	uint64_t	spa_all_vdev_zaps;	/* ZAP of per-vd ZAP obj #s */
386	spa_avz_action_t	spa_avz_action;	/* destroy/rebuild AVZ? */
387	uint64_t	spa_autotrim;		/* automatic background trim? */
388	spa_keystore_t	spa_keystore;	/* loaded crypto keys */
389	uint64_t	spa_errata;	/* errata issues detected */
390
391	/*
392	 * spa_iokstat_lock protects spa_iokstat and
393	 * spa_queue_stats[].
394	 */
395	kmutex_t	spa_iokstat_lock;
396	struct kstat	*spa_iokstat;		/* kstat of io to this pool */
397	struct {
398		int spa_active;
399		int spa_queued;
400	} spa_queue_stats[ZIO_PRIORITY_NUM_QUEUEABLE];
401
402	/*
403	 * The following two members diverge from OpenZFS. Upstream import
404	 * status is built around the Linux /proc fs. On illumos we use a kstat
405	 * to track import status. spa_imp_kstat_lock protects spa_imp_kstat.
406	 */
407	kmutex_t	spa_imp_kstat_lock;
408	struct kstat	*spa_imp_kstat;		/* kstat for import status */
409
410	/* arc_memory_throttle() parameters during low memory condition */
411	uint64_t	spa_lowmem_page_load;	/* memory load during txg */
412	uint64_t	spa_lowmem_last_txg;	/* txg window start */
413
414	hrtime_t	spa_ccw_fail_time;	/* Conf cache write fail time */
415
416	uint64_t	spa_multihost;		/* multihost aware (mmp) */
417	mmp_thread_t	spa_mmp;		/* multihost mmp thread */
418	list_t		spa_leaf_list;		/* list of leaf vdevs */
419	uint64_t	spa_leaf_list_gen;	/* track leaf_list changes */
420
421	/*
422	 * spa_refcount & spa_config_lock must be the last elements
423	 * because refcount_t changes size based on compilation options.
424	 * because zfs_refcount_t changes size based on compilation options.
425	 * In order for the MDB module to function correctly, the other
426	 * fields must remain in the same location.
427	 */
428	spa_config_lock_t spa_config_lock[SCL_LOCKS]; /* config changes */
429	zfs_refcount_t	spa_refcount;		/* number of opens */
430
431	taskq_t		*spa_upgrade_taskq;	/* taskq for upgrade jobs */
432};
433
434extern const char *spa_config_path;
435
436extern void spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
437    task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent);
438extern void spa_load_spares(spa_t *spa);
439extern void spa_load_l2cache(spa_t *spa);
440
441#ifdef	__cplusplus
442}
443#endif
444
445#endif	/* _SYS_SPA_IMPL_H */
446