1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
26 * Copyright 2013 Saso Kiselkov. All rights reserved.
27 * Copyright (c) 2017 Datto Inc.
28 * Copyright (c) 2017, Intel Corporation.
29 */
30
31#ifndef _SYS_SPA_IMPL_H
32#define	_SYS_SPA_IMPL_H
33
34#include <sys/spa.h>
35#include <sys/spa_checkpoint.h>
36#include <sys/spa_log_spacemap.h>
37#include <sys/vdev.h>
38#include <sys/vdev_removal.h>
39#include <sys/metaslab.h>
40#include <sys/dmu.h>
41#include <sys/dsl_pool.h>
42#include <sys/uberblock_impl.h>
43#include <sys/zfs_context.h>
44#include <sys/avl.h>
45#include <sys/refcount.h>
46#include <sys/bplist.h>
47#include <sys/bpobj.h>
48#include <sys/dsl_crypt.h>
49#include <sys/zfeature.h>
50#include <sys/zthr.h>
51#include <zfeature_common.h>
52
53#ifdef	__cplusplus
54extern "C" {
55#endif
56
57typedef struct spa_error_entry {
58	zbookmark_phys_t	se_bookmark;
59	char			*se_name;
60	avl_node_t		se_avl;
61} spa_error_entry_t;
62
63typedef struct spa_history_phys {
64	uint64_t sh_pool_create_len;	/* ending offset of zpool create */
65	uint64_t sh_phys_max_off;	/* physical EOF */
66	uint64_t sh_bof;		/* logical BOF */
67	uint64_t sh_eof;		/* logical EOF */
68	uint64_t sh_records_lost;	/* num of records overwritten */
69} spa_history_phys_t;
70
71/*
72 * All members must be uint64_t, for byteswap purposes.
73 */
74typedef struct spa_removing_phys {
75	uint64_t sr_state; /* dsl_scan_state_t */
76
77	/*
78	 * The vdev ID that we most recently attempted to remove,
79	 * or -1 if no removal has been attempted.
80	 */
81	uint64_t sr_removing_vdev;
82
83	/*
84	 * The vdev ID that we most recently successfully removed,
85	 * or -1 if no devices have been removed.
86	 */
87	uint64_t sr_prev_indirect_vdev;
88
89	uint64_t sr_start_time;
90	uint64_t sr_end_time;
91
92	/*
93	 * Note that we can not use the space map's or indirect mapping's
94	 * accounting as a substitute for these values, because we need to
95	 * count frees of not-yet-copied data as though it did the copy.
96	 * Otherwise, we could get into a situation where copied > to_copy,
97	 * or we complete before copied == to_copy.
98	 */
99	uint64_t sr_to_copy; /* bytes that need to be copied */
100	uint64_t sr_copied; /* bytes that have been copied or freed */
101} spa_removing_phys_t;
102
103/*
104 * This struct is stored as an entry in the DMU_POOL_DIRECTORY_OBJECT
105 * (with key DMU_POOL_CONDENSING_INDIRECT).  It is present if a condense
106 * of an indirect vdev's mapping object is in progress.
107 */
108typedef struct spa_condensing_indirect_phys {
109	/*
110	 * The vdev ID of the indirect vdev whose indirect mapping is
111	 * being condensed.
112	 */
113	uint64_t	scip_vdev;
114
115	/*
116	 * The vdev's old obsolete spacemap.  This spacemap's contents are
117	 * being integrated into the new mapping.
118	 */
119	uint64_t	scip_prev_obsolete_sm_object;
120
121	/*
122	 * The new mapping object that is being created.
123	 */
124	uint64_t	scip_next_mapping_object;
125} spa_condensing_indirect_phys_t;
126
127struct spa_aux_vdev {
128	uint64_t	sav_object;		/* MOS object for device list */
129	nvlist_t	*sav_config;		/* cached device config */
130	vdev_t		**sav_vdevs;		/* devices */
131	int		sav_count;		/* number devices */
132	boolean_t	sav_sync;		/* sync the device list */
133	nvlist_t	**sav_pending;		/* pending device additions */
134	uint_t		sav_npending;		/* # pending devices */
135};
136
137typedef struct spa_config_lock {
138	kmutex_t	scl_lock;
139	kthread_t	*scl_writer;
140	int		scl_write_wanted;
141	kcondvar_t	scl_cv;
142	zfs_refcount_t	scl_count;
143} spa_config_lock_t;
144
145typedef struct spa_config_dirent {
146	list_node_t	scd_link;
147	char		*scd_path;
148} spa_config_dirent_t;
149
150typedef enum zio_taskq_type {
151	ZIO_TASKQ_ISSUE = 0,
152	ZIO_TASKQ_ISSUE_HIGH,
153	ZIO_TASKQ_INTERRUPT,
154	ZIO_TASKQ_INTERRUPT_HIGH,
155	ZIO_TASKQ_TYPES
156} zio_taskq_type_t;
157
158/*
159 * State machine for the zpool-poolname process.  The states transitions
160 * are done as follows:
161 *
162 *	From		   To			Routine
163 *	PROC_NONE	-> PROC_CREATED		spa_activate()
164 *	PROC_CREATED	-> PROC_ACTIVE		spa_thread()
165 *	PROC_ACTIVE	-> PROC_DEACTIVATE	spa_deactivate()
166 *	PROC_DEACTIVATE	-> PROC_GONE		spa_thread()
167 *	PROC_GONE	-> PROC_NONE		spa_deactivate()
168 */
169typedef enum spa_proc_state {
170	SPA_PROC_NONE,		/* spa_proc = &p0, no process created */
171	SPA_PROC_CREATED,	/* spa_activate() has proc, is waiting */
172	SPA_PROC_ACTIVE,	/* taskqs created, spa_proc set */
173	SPA_PROC_DEACTIVATE,	/* spa_deactivate() requests process exit */
174	SPA_PROC_GONE		/* spa_thread() is exiting, spa_proc = &p0 */
175} spa_proc_state_t;
176
177typedef struct spa_taskqs {
178	uint_t stqs_count;
179	taskq_t **stqs_taskq;
180} spa_taskqs_t;
181
182typedef enum spa_all_vdev_zap_action {
183	AVZ_ACTION_NONE = 0,
184	AVZ_ACTION_DESTROY,	/* Destroy all per-vdev ZAPs and the AVZ. */
185	AVZ_ACTION_REBUILD,	/* Populate the new AVZ, see spa_avz_rebuild */
186	AVZ_ACTION_INITIALIZE
187} spa_avz_action_t;
188
189typedef enum spa_config_source {
190	SPA_CONFIG_SRC_NONE = 0,
191	SPA_CONFIG_SRC_SCAN,		/* scan of path (default: /dev/dsk) */
192	SPA_CONFIG_SRC_CACHEFILE,	/* any cachefile */
193	SPA_CONFIG_SRC_TRYIMPORT,	/* returned from call to tryimport */
194	SPA_CONFIG_SRC_SPLIT,		/* new pool in a pool split */
195	SPA_CONFIG_SRC_MOS		/* MOS, but not always from right txg */
196} spa_config_source_t;
197
198struct spa {
199	/*
200	 * Fields protected by spa_namespace_lock.
201	 */
202	char		spa_name[ZFS_MAX_DATASET_NAME_LEN];	/* pool name */
203	char		*spa_comment;		/* comment */
204	avl_node_t	spa_avl;		/* node in spa_namespace_avl */
205	nvlist_t	*spa_config;		/* last synced config */
206	nvlist_t	*spa_config_syncing;	/* currently syncing config */
207	nvlist_t	*spa_config_splitting;	/* config for splitting */
208	nvlist_t	*spa_load_info;		/* info and errors from load */
209	uint64_t	spa_config_txg;		/* txg of last config change */
210	int		spa_sync_pass;		/* iterate-to-convergence */
211	pool_state_t	spa_state;		/* pool state */
212	int		spa_inject_ref;		/* injection references */
213	uint8_t		spa_sync_on;		/* sync threads are running */
214	spa_load_state_t spa_load_state;	/* current load operation */
215	boolean_t	spa_indirect_vdevs_loaded; /* mappings loaded? */
216	boolean_t	spa_trust_config;	/* do we trust vdev tree? */
217	spa_config_source_t spa_config_source;	/* where config comes from? */
218	uint64_t	spa_import_flags;	/* import specific flags */
219	spa_taskqs_t	spa_zio_taskq[ZIO_TYPES][ZIO_TASKQ_TYPES];
220	dsl_pool_t	*spa_dsl_pool;
221	boolean_t	spa_is_initializing;	/* true while opening pool */
222	metaslab_class_t *spa_normal_class;	/* normal data class */
223	metaslab_class_t *spa_log_class;	/* intent log data class */
224	metaslab_class_t *spa_special_class;	/* special allocation class */
225	metaslab_class_t *spa_dedup_class;	/* dedup allocation class */
226	uint64_t	spa_first_txg;		/* first txg after spa_open() */
227	uint64_t	spa_final_txg;		/* txg of export/destroy */
228	uint64_t	spa_freeze_txg;		/* freeze pool at this txg */
229	uint64_t	spa_load_max_txg;	/* best initial ub_txg */
230	uint64_t	spa_claim_max_txg;	/* highest claimed birth txg */
231	timespec_t	spa_loaded_ts;		/* 1st successful open time */
232	objset_t	*spa_meta_objset;	/* copy of dp->dp_meta_objset */
233	kmutex_t	spa_evicting_os_lock;	/* Evicting objset list lock */
234	list_t		spa_evicting_os_list;	/* Objsets being evicted. */
235	kcondvar_t	spa_evicting_os_cv;	/* Objset Eviction Completion */
236	txg_list_t	spa_vdev_txg_list;	/* per-txg dirty vdev list */
237	vdev_t		*spa_root_vdev;		/* top-level vdev container */
238	int		spa_min_ashift;		/* of vdevs in normal class */
239	int		spa_max_ashift;		/* of vdevs in normal class */
240	uint64_t	spa_config_guid;	/* config pool guid */
241	uint64_t	spa_load_guid;		/* spa_load initialized guid */
242	uint64_t	spa_last_synced_guid;	/* last synced guid */
243	list_t		spa_config_dirty_list;	/* vdevs with dirty config */
244	list_t		spa_state_dirty_list;	/* vdevs with dirty state */
245	/*
246	 * spa_alloc_locks and spa_alloc_trees are arrays, whose lengths are
247	 * stored in spa_alloc_count. There is one tree and one lock for each
248	 * allocator, to help improve allocation performance in write-heavy
249	 * workloads.
250	 */
251	kmutex_t	*spa_alloc_locks;
252	avl_tree_t	*spa_alloc_trees;
253	int		spa_alloc_count;
254
255	spa_aux_vdev_t	spa_spares;		/* hot spares */
256	spa_aux_vdev_t	spa_l2cache;		/* L2ARC cache devices */
257	nvlist_t	*spa_label_features;	/* Features for reading MOS */
258	uint64_t	spa_config_object;	/* MOS object for pool config */
259	uint64_t	spa_config_generation;	/* config generation number */
260	uint64_t	spa_syncing_txg;	/* txg currently syncing */
261	bpobj_t		spa_deferred_bpobj;	/* deferred-free bplist */
262	bplist_t	spa_free_bplist[TXG_SIZE]; /* bplist of stuff to free */
263	zio_cksum_salt_t spa_cksum_salt;	/* secret salt for cksum */
264	/* checksum context templates */
265	kmutex_t	spa_cksum_tmpls_lock;
266	void		*spa_cksum_tmpls[ZIO_CHECKSUM_FUNCTIONS];
267	uberblock_t	spa_ubsync;		/* last synced uberblock */
268	uberblock_t	spa_uberblock;		/* current uberblock */
269	boolean_t	spa_extreme_rewind;	/* rewind past deferred frees */
270	kmutex_t	spa_scrub_lock;		/* resilver/scrub lock */
271	uint64_t	spa_scrub_inflight;	/* in-flight scrub bytes */
272	uint64_t	spa_load_verify_ios;	/* in-flight verification IOs */
273	kcondvar_t	spa_scrub_io_cv;	/* scrub I/O completion */
274	uint8_t		spa_scrub_active;	/* active or suspended? */
275	uint8_t		spa_scrub_type;		/* type of scrub we're doing */
276	uint8_t		spa_scrub_finished;	/* indicator to rotate logs */
277	uint8_t		spa_scrub_started;	/* started since last boot */
278	uint8_t		spa_scrub_reopen;	/* scrub doing vdev_reopen */
279	uint64_t	spa_scan_pass_start;	/* start time per pass/reboot */
280	uint64_t	spa_scan_pass_scrub_pause; /* scrub pause time */
281	uint64_t	spa_scan_pass_scrub_spent_paused; /* total paused */
282	uint64_t	spa_scan_pass_exam;	/* examined bytes per pass */
283	uint64_t	spa_scan_pass_issued;	/* issued bytes per pass */
284
285	/*
286	 * We are in the middle of a resilver, and another resilver
287	 * is needed once this one completes. This is set iff any
288	 * vdev_resilver_deferred is set.
289	 */
290	boolean_t	spa_resilver_deferred;
291	kmutex_t	spa_async_lock;		/* protect async state */
292	kthread_t	*spa_async_thread;	/* thread doing async task */
293	int		spa_async_suspended;	/* async tasks suspended */
294	kcondvar_t	spa_async_cv;		/* wait for thread_exit() */
295	uint16_t	spa_async_tasks;	/* async task mask */
296	uint64_t	spa_missing_tvds;	/* unopenable tvds on load */
297	uint64_t	spa_missing_tvds_allowed; /* allow loading spa? */
298
299	spa_removing_phys_t spa_removing_phys;
300	spa_vdev_removal_t *spa_vdev_removal;
301
302	spa_condensing_indirect_phys_t	spa_condensing_indirect_phys;
303	spa_condensing_indirect_t	*spa_condensing_indirect;
304	zthr_t		*spa_condense_zthr;	/* zthr doing condense. */
305
306	uint64_t	spa_checkpoint_txg;	/* the txg of the checkpoint */
307	spa_checkpoint_info_t spa_checkpoint_info; /* checkpoint accounting */
308	zthr_t		*spa_checkpoint_discard_zthr;
309
310	space_map_t	*spa_syncing_log_sm;	/* current log space map */
311	avl_tree_t	spa_sm_logs_by_txg;
312	kmutex_t	spa_flushed_ms_lock;	/* for metaslabs_by_flushed */
313	avl_tree_t	spa_metaslabs_by_flushed;
314	spa_unflushed_stats_t	spa_unflushed_stats;
315	list_t		spa_log_summary;
316	uint64_t	spa_log_flushall_txg;
317
318	char		*spa_root;		/* alternate root directory */
319	uint64_t	spa_ena;		/* spa-wide ereport ENA */
320	int		spa_last_open_failed;	/* error if last open failed */
321	uint64_t	spa_last_ubsync_txg;	/* "best" uberblock txg */
322	uint64_t	spa_last_ubsync_txg_ts;	/* timestamp from that ub */
323	uint64_t	spa_load_txg;		/* ub txg that loaded */
324	uint64_t	spa_load_txg_ts;	/* timestamp from that ub */
325	uint64_t	spa_load_meta_errors;	/* verify metadata err count */
326	uint64_t	spa_load_data_errors;	/* verify data err count */
327	uint64_t	spa_verify_min_txg;	/* start txg of verify scrub */
328	kmutex_t	spa_errlog_lock;	/* error log lock */
329	uint64_t	spa_errlog_last;	/* last error log object */
330	uint64_t	spa_errlog_scrub;	/* scrub error log object */
331	kmutex_t	spa_errlist_lock;	/* error list/ereport lock */
332	avl_tree_t	spa_errlist_last;	/* last error list */
333	avl_tree_t	spa_errlist_scrub;	/* scrub error list */
334	uint64_t	spa_deflate;		/* should we deflate? */
335	uint64_t	spa_history;		/* history object */
336	kmutex_t	spa_history_lock;	/* history lock */
337	vdev_t		*spa_pending_vdev;	/* pending vdev additions */
338	kmutex_t	spa_props_lock;		/* property lock */
339	uint64_t	spa_pool_props_object;	/* object for properties */
340	uint64_t	spa_bootfs;		/* default boot filesystem */
341	uint64_t	spa_failmode;		/* failure mode for the pool */
342	uint64_t	spa_delegation;		/* delegation on/off */
343	list_t		spa_config_list;	/* previous cache file(s) */
344	/* per-CPU array of root of async I/O: */
345	zio_t		**spa_async_zio_root;
346	zio_t		*spa_suspend_zio_root;	/* root of all suspended I/O */
347	zio_t		*spa_txg_zio[TXG_SIZE]; /* spa_sync() waits for this */
348	kmutex_t	spa_suspend_lock;	/* protects suspend_zio_root */
349	kcondvar_t	spa_suspend_cv;		/* notification of resume */
350	zio_suspend_reason_t	spa_suspended;	/* pool is suspended */
351	uint8_t		spa_claiming;		/* pool is doing zil_claim() */
352	boolean_t	spa_is_root;		/* pool is root */
353	int		spa_minref;		/* num refs when first opened */
354	int		spa_mode;		/* FREAD | FWRITE */
355	spa_log_state_t spa_log_state;		/* log state */
356	uint64_t	spa_autoexpand;		/* lun expansion on/off */
357	uint64_t	spa_bootsize;		/* efi system partition size */
358	ddt_t		*spa_ddt[ZIO_CHECKSUM_FUNCTIONS]; /* in-core DDTs */
359	uint64_t	spa_ddt_stat_object;	/* DDT statistics */
360	uint64_t	spa_dedup_ditto;	/* dedup ditto threshold */
361	uint64_t	spa_dedup_checksum;	/* default dedup checksum */
362	uint64_t	spa_dspace;		/* dspace in normal class */
363	kmutex_t	spa_vdev_top_lock;	/* dueling offline/remove */
364	kmutex_t	spa_proc_lock;		/* protects spa_proc* */
365	kcondvar_t	spa_proc_cv;		/* spa_proc_state transitions */
366	spa_proc_state_t spa_proc_state;	/* see definition */
367	struct proc	*spa_proc;		/* "zpool-poolname" process */
368	uint64_t	spa_did;		/* if procp != p0, did of t1 */
369	boolean_t	spa_autoreplace;	/* autoreplace set in open */
370	int		spa_vdev_locks;		/* locks grabbed */
371	uint64_t	spa_creation_version;	/* version at pool creation */
372	uint64_t	spa_prev_software_version; /* See ub_software_version */
373	uint64_t	spa_feat_for_write_obj;	/* required to write to pool */
374	uint64_t	spa_feat_for_read_obj;	/* required to read from pool */
375	uint64_t	spa_feat_desc_obj;	/* Feature descriptions */
376	uint64_t	spa_feat_enabled_txg_obj; /* Feature enabled txg */
377	/* cache feature refcounts */
378	uint64_t	spa_feat_refcount_cache[SPA_FEATURES];
379	cyclic_id_t	spa_deadman_cycid;	/* cyclic id */
380	uint64_t	spa_deadman_calls;	/* number of deadman calls */
381	hrtime_t	spa_sync_starttime;	/* starting time fo spa_sync */
382	uint64_t	spa_deadman_synctime;	/* deadman expiration timer */
383	uint64_t	spa_all_vdev_zaps;	/* ZAP of per-vd ZAP obj #s */
384	spa_avz_action_t	spa_avz_action;	/* destroy/rebuild AVZ? */
385	uint64_t	spa_autotrim;		/* automatic background trim? */
386	spa_keystore_t	spa_keystore;	/* loaded crypto keys */
387	uint64_t	spa_errata;	/* errata issues detected */
388
389	/*
390	 * spa_iokstat_lock protects spa_iokstat and
391	 * spa_queue_stats[].
392	 */
393	kmutex_t	spa_iokstat_lock;
394	struct kstat	*spa_iokstat;		/* kstat of io to this pool */
395	struct {
396		int spa_active;
397		int spa_queued;
398	} spa_queue_stats[ZIO_PRIORITY_NUM_QUEUEABLE];
399
400	/* arc_memory_throttle() parameters during low memory condition */
401	uint64_t	spa_lowmem_page_load;	/* memory load during txg */
402	uint64_t	spa_lowmem_last_txg;	/* txg window start */
403
404	hrtime_t	spa_ccw_fail_time;	/* Conf cache write fail time */
405
406	uint64_t	spa_multihost;		/* multihost aware (mmp) */
407	mmp_thread_t	spa_mmp;		/* multihost mmp thread */
408	list_t		spa_leaf_list;		/* list of leaf vdevs */
409	uint64_t	spa_leaf_list_gen;	/* track leaf_list changes */
410
411	/*
412	 * spa_refcount & spa_config_lock must be the last elements
413	 * because refcount_t changes size based on compilation options.
414	 * because zfs_refcount_t changes size based on compilation options.
415	 * In order for the MDB module to function correctly, the other
416	 * fields must remain in the same location.
417	 */
418	spa_config_lock_t spa_config_lock[SCL_LOCKS]; /* config changes */
419	zfs_refcount_t	spa_refcount;		/* number of opens */
420
421	taskq_t		*spa_upgrade_taskq;	/* taskq for upgrade jobs */
422};
423
424extern const char *spa_config_path;
425
426extern void spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
427    task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent);
428extern void spa_load_spares(spa_t *spa);
429extern void spa_load_l2cache(spa_t *spa);
430
431#ifdef	__cplusplus
432}
433#endif
434
435#endif	/* _SYS_SPA_IMPL_H */
436