xref: /illumos-gate/usr/src/uts/common/os/zone.c (revision 542a813c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2013, Joyent Inc. All rights reserved.
25  */
26 
27 /*
28  * Zones
29  *
30  *   A zone is a named collection of processes, namespace constraints,
31  *   and other system resources which comprise a secure and manageable
32  *   application containment facility.
33  *
34  *   Zones (represented by the reference counted zone_t) are tracked in
35  *   the kernel in the zonehash.  Elsewhere in the kernel, Zone IDs
36  *   (zoneid_t) are used to track zone association.  Zone IDs are
37  *   dynamically generated when the zone is created; if a persistent
38  *   identifier is needed (core files, accounting logs, audit trail,
39  *   etc.), the zone name should be used.
40  *
41  *
42  *   Global Zone:
43  *
44  *   The global zone (zoneid 0) is automatically associated with all
45  *   system resources that have not been bound to a user-created zone.
46  *   This means that even systems where zones are not in active use
47  *   have a global zone, and all processes, mounts, etc. are
48  *   associated with that zone.  The global zone is generally
49  *   unconstrained in terms of privileges and access, though the usual
50  *   credential and privilege based restrictions apply.
51  *
52  *
53  *   Zone States:
54  *
55  *   The states in which a zone may be in and the transitions are as
56  *   follows:
57  *
58  *   ZONE_IS_UNINITIALIZED: primordial state for a zone. The partially
59  *   initialized zone is added to the list of active zones on the system but
60  *   isn't accessible.
61  *
62  *   ZONE_IS_INITIALIZED: Initialization complete except the ZSD callbacks are
63  *   not yet completed. Not possible to enter the zone, but attributes can
64  *   be retrieved.
65  *
66  *   ZONE_IS_READY: zsched (the kernel dummy process for a zone) is
67  *   ready.  The zone is made visible after the ZSD constructor callbacks are
68  *   executed.  A zone remains in this state until it transitions into
69  *   the ZONE_IS_BOOTING state as a result of a call to zone_boot().
70  *
71  *   ZONE_IS_BOOTING: in this shortlived-state, zsched attempts to start
72  *   init.  Should that fail, the zone proceeds to the ZONE_IS_SHUTTING_DOWN
73  *   state.
74  *
75  *   ZONE_IS_RUNNING: The zone is open for business: zsched has
76  *   successfully started init.   A zone remains in this state until
77  *   zone_shutdown() is called.
78  *
79  *   ZONE_IS_SHUTTING_DOWN: zone_shutdown() has been called, the system is
80  *   killing all processes running in the zone. The zone remains
81  *   in this state until there are no more user processes running in the zone.
82  *   zone_create(), zone_enter(), and zone_destroy() on this zone will fail.
83  *   Since zone_shutdown() is restartable, it may be called successfully
84  *   multiple times for the same zone_t.  Setting of the zone's state to
85  *   ZONE_IS_SHUTTING_DOWN is synchronized with mounts, so VOP_MOUNT() may check
86  *   the zone's status without worrying about it being a moving target.
87  *
88  *   ZONE_IS_EMPTY: zone_shutdown() has been called, and there
89  *   are no more user processes in the zone.  The zone remains in this
90  *   state until there are no more kernel threads associated with the
91  *   zone.  zone_create(), zone_enter(), and zone_destroy() on this zone will
92  *   fail.
93  *
94  *   ZONE_IS_DOWN: All kernel threads doing work on behalf of the zone
95  *   have exited.  zone_shutdown() returns.  Henceforth it is not possible to
96  *   join the zone or create kernel threads therein.
97  *
98  *   ZONE_IS_DYING: zone_destroy() has been called on the zone; zone
99  *   remains in this state until zsched exits.  Calls to zone_find_by_*()
100  *   return NULL from now on.
101  *
102  *   ZONE_IS_DEAD: zsched has exited (zone_ntasks == 0).  There are no
103  *   processes or threads doing work on behalf of the zone.  The zone is
104  *   removed from the list of active zones.  zone_destroy() returns, and
105  *   the zone can be recreated.
106  *
107  *   ZONE_IS_FREE (internal state): zone_ref goes to 0, ZSD destructor
108  *   callbacks are executed, and all memory associated with the zone is
109  *   freed.
110  *
111  *   Threads can wait for the zone to enter a requested state by using
112  *   zone_status_wait() or zone_status_timedwait() with the desired
113  *   state passed in as an argument.  Zone state transitions are
114  *   uni-directional; it is not possible to move back to an earlier state.
115  *
116  *
117  *   Zone-Specific Data:
118  *
119  *   Subsystems needing to maintain zone-specific data can store that
120  *   data using the ZSD mechanism.  This provides a zone-specific data
121  *   store, similar to thread-specific data (see pthread_getspecific(3C)
122  *   or the TSD code in uts/common/disp/thread.c.  Also, ZSD can be used
123  *   to register callbacks to be invoked when a zone is created, shut
124  *   down, or destroyed.  This can be used to initialize zone-specific
125  *   data for new zones and to clean up when zones go away.
126  *
127  *
128  *   Data Structures:
129  *
130  *   The per-zone structure (zone_t) is reference counted, and freed
131  *   when all references are released.  zone_hold and zone_rele can be
132  *   used to adjust the reference count.  In addition, reference counts
133  *   associated with the cred_t structure are tracked separately using
134  *   zone_cred_hold and zone_cred_rele.
135  *
136  *   Pointers to active zone_t's are stored in two hash tables; one
137  *   for searching by id, the other for searching by name.  Lookups
138  *   can be performed on either basis, using zone_find_by_id and
139  *   zone_find_by_name.  Both return zone_t pointers with the zone
140  *   held, so zone_rele should be called when the pointer is no longer
141  *   needed.  Zones can also be searched by path; zone_find_by_path
142  *   returns the zone with which a path name is associated (global
143  *   zone if the path is not within some other zone's file system
144  *   hierarchy).  This currently requires iterating through each zone,
145  *   so it is slower than an id or name search via a hash table.
146  *
147  *
148  *   Locking:
149  *
150  *   zonehash_lock: This is a top-level global lock used to protect the
151  *       zone hash tables and lists.  Zones cannot be created or destroyed
152  *       while this lock is held.
153  *   zone_status_lock: This is a global lock protecting zone state.
154  *       Zones cannot change state while this lock is held.  It also
155  *       protects the list of kernel threads associated with a zone.
156  *   zone_lock: This is a per-zone lock used to protect several fields of
157  *       the zone_t (see <sys/zone.h> for details).  In addition, holding
158  *       this lock means that the zone cannot go away.
159  *   zone_nlwps_lock: This is a per-zone lock used to protect the fields
160  *	 related to the zone.max-lwps rctl.
161  *   zone_mem_lock: This is a per-zone lock used to protect the fields
162  *	 related to the zone.max-locked-memory and zone.max-swap rctls.
163  *   zone_rctl_lock: This is a per-zone lock used to protect other rctls,
164  *       currently just max_lofi
165  *   zsd_key_lock: This is a global lock protecting the key state for ZSD.
166  *   zone_deathrow_lock: This is a global lock protecting the "deathrow"
167  *       list (a list of zones in the ZONE_IS_DEAD state).
168  *
169  *   Ordering requirements:
170  *       pool_lock --> cpu_lock --> zonehash_lock --> zone_status_lock -->
171  *       	zone_lock --> zsd_key_lock --> pidlock --> p_lock
172  *
173  *   When taking zone_mem_lock or zone_nlwps_lock, the lock ordering is:
174  *	zonehash_lock --> a_lock --> pidlock --> p_lock --> zone_mem_lock
175  *	zonehash_lock --> a_lock --> pidlock --> p_lock --> zone_nlwps_lock
176  *
177  *   Blocking memory allocations are permitted while holding any of the
178  *   zone locks.
179  *
180  *
181  *   System Call Interface:
182  *
183  *   The zone subsystem can be managed and queried from user level with
184  *   the following system calls (all subcodes of the primary "zone"
185  *   system call):
186  *   - zone_create: creates a zone with selected attributes (name,
187  *     root path, privileges, resource controls, ZFS datasets)
188  *   - zone_enter: allows the current process to enter a zone
189  *   - zone_getattr: reports attributes of a zone
190  *   - zone_setattr: set attributes of a zone
191  *   - zone_boot: set 'init' running for the zone
192  *   - zone_list: lists all zones active in the system
193  *   - zone_lookup: looks up zone id based on name
194  *   - zone_shutdown: initiates shutdown process (see states above)
195  *   - zone_destroy: completes shutdown process (see states above)
196  *
197  */
198 
199 #include <sys/priv_impl.h>
200 #include <sys/cred.h>
201 #include <c2/audit.h>
202 #include <sys/debug.h>
203 #include <sys/file.h>
204 #include <sys/kmem.h>
205 #include <sys/kstat.h>
206 #include <sys/mutex.h>
207 #include <sys/note.h>
208 #include <sys/pathname.h>
209 #include <sys/proc.h>
210 #include <sys/project.h>
211 #include <sys/sysevent.h>
212 #include <sys/task.h>
213 #include <sys/systm.h>
214 #include <sys/types.h>
215 #include <sys/utsname.h>
216 #include <sys/vnode.h>
217 #include <sys/vfs.h>
218 #include <sys/systeminfo.h>
219 #include <sys/policy.h>
220 #include <sys/cred_impl.h>
221 #include <sys/contract_impl.h>
222 #include <sys/contract/process_impl.h>
223 #include <sys/class.h>
224 #include <sys/pool.h>
225 #include <sys/pool_pset.h>
226 #include <sys/pset.h>
227 #include <sys/strlog.h>
228 #include <sys/sysmacros.h>
229 #include <sys/callb.h>
230 #include <sys/vmparam.h>
231 #include <sys/corectl.h>
232 #include <sys/ipc_impl.h>
233 #include <sys/klpd.h>
234 
235 #include <sys/door.h>
236 #include <sys/cpuvar.h>
237 #include <sys/sdt.h>
238 
239 #include <sys/uadmin.h>
240 #include <sys/session.h>
241 #include <sys/cmn_err.h>
242 #include <sys/modhash.h>
243 #include <sys/sunddi.h>
244 #include <sys/nvpair.h>
245 #include <sys/rctl.h>
246 #include <sys/fss.h>
247 #include <sys/brand.h>
248 #include <sys/zone.h>
249 #include <net/if.h>
250 #include <sys/cpucaps.h>
251 #include <vm/seg.h>
252 #include <sys/mac.h>
253 
254 /*
255  * This constant specifies the number of seconds that threads waiting for
256  * subsystems to release a zone's general-purpose references will wait before
257  * they log the zone's reference counts.  The constant's value shouldn't
258  * be so small that reference counts are unnecessarily reported for zones
259  * whose references are slowly released.  On the other hand, it shouldn't be so
260  * large that users reboot their systems out of frustration over hung zones
261  * before the system logs the zones' reference counts.
262  */
263 #define	ZONE_DESTROY_TIMEOUT_SECS	60
264 
265 /* List of data link IDs which are accessible from the zone */
266 typedef struct zone_dl {
267 	datalink_id_t	zdl_id;
268 	nvlist_t	*zdl_net;
269 	list_node_t	zdl_linkage;
270 } zone_dl_t;
271 
272 /*
273  * cv used to signal that all references to the zone have been released.  This
274  * needs to be global since there may be multiple waiters, and the first to
275  * wake up will free the zone_t, hence we cannot use zone->zone_cv.
276  */
277 static kcondvar_t zone_destroy_cv;
278 /*
279  * Lock used to serialize access to zone_cv.  This could have been per-zone,
280  * but then we'd need another lock for zone_destroy_cv, and why bother?
281  */
282 static kmutex_t zone_status_lock;
283 
284 /*
285  * ZSD-related global variables.
286  */
287 static kmutex_t zsd_key_lock;	/* protects the following two */
288 /*
289  * The next caller of zone_key_create() will be assigned a key of ++zsd_keyval.
290  */
291 static zone_key_t zsd_keyval = 0;
292 /*
293  * Global list of registered keys.  We use this when a new zone is created.
294  */
295 static list_t zsd_registered_keys;
296 
297 int zone_hash_size = 256;
298 static mod_hash_t *zonehashbyname, *zonehashbyid, *zonehashbylabel;
299 static kmutex_t zonehash_lock;
300 static uint_t zonecount;
301 static id_space_t *zoneid_space;
302 
303 /*
304  * The global zone (aka zone0) is the all-seeing, all-knowing zone in which the
305  * kernel proper runs, and which manages all other zones.
306  *
307  * Although not declared as static, the variable "zone0" should not be used
308  * except for by code that needs to reference the global zone early on in boot,
309  * before it is fully initialized.  All other consumers should use
310  * 'global_zone'.
311  */
312 zone_t zone0;
313 zone_t *global_zone = NULL;	/* Set when the global zone is initialized */
314 
315 /*
316  * List of active zones, protected by zonehash_lock.
317  */
318 static list_t zone_active;
319 
320 /*
321  * List of destroyed zones that still have outstanding cred references.
322  * Used for debugging.  Uses a separate lock to avoid lock ordering
323  * problems in zone_free.
324  */
325 static list_t zone_deathrow;
326 static kmutex_t zone_deathrow_lock;
327 
328 /* number of zones is limited by virtual interface limit in IP */
329 uint_t maxzones = 8192;
330 
331 /* Event channel to sent zone state change notifications */
332 evchan_t *zone_event_chan;
333 
334 /*
335  * This table holds the mapping from kernel zone states to
336  * states visible in the state notification API.
337  * The idea is that we only expose "obvious" states and
338  * do not expose states which are just implementation details.
339  */
340 const char  *zone_status_table[] = {
341 	ZONE_EVENT_UNINITIALIZED,	/* uninitialized */
342 	ZONE_EVENT_INITIALIZED,		/* initialized */
343 	ZONE_EVENT_READY,		/* ready */
344 	ZONE_EVENT_READY,		/* booting */
345 	ZONE_EVENT_RUNNING,		/* running */
346 	ZONE_EVENT_SHUTTING_DOWN,	/* shutting_down */
347 	ZONE_EVENT_SHUTTING_DOWN,	/* empty */
348 	ZONE_EVENT_SHUTTING_DOWN,	/* down */
349 	ZONE_EVENT_SHUTTING_DOWN,	/* dying */
350 	ZONE_EVENT_UNINITIALIZED,	/* dead */
351 };
352 
353 /*
354  * This array contains the names of the subsystems listed in zone_ref_subsys_t
355  * (see sys/zone.h).
356  */
357 static char *zone_ref_subsys_names[] = {
358 	"NFS",		/* ZONE_REF_NFS */
359 	"NFSv4",	/* ZONE_REF_NFSV4 */
360 	"SMBFS",	/* ZONE_REF_SMBFS */
361 	"MNTFS",	/* ZONE_REF_MNTFS */
362 	"LOFI",		/* ZONE_REF_LOFI */
363 	"VFS",		/* ZONE_REF_VFS */
364 	"IPC"		/* ZONE_REF_IPC */
365 };
366 
367 /*
368  * This isn't static so lint doesn't complain.
369  */
370 rctl_hndl_t rc_zone_cpu_shares;
371 rctl_hndl_t rc_zone_locked_mem;
372 rctl_hndl_t rc_zone_max_swap;
373 rctl_hndl_t rc_zone_max_lofi;
374 rctl_hndl_t rc_zone_cpu_cap;
375 rctl_hndl_t rc_zone_nlwps;
376 rctl_hndl_t rc_zone_nprocs;
377 rctl_hndl_t rc_zone_shmmax;
378 rctl_hndl_t rc_zone_shmmni;
379 rctl_hndl_t rc_zone_semmni;
380 rctl_hndl_t rc_zone_msgmni;
381 /*
382  * Synchronization primitives used to synchronize between mounts and zone
383  * creation/destruction.
384  */
385 static int mounts_in_progress;
386 static kcondvar_t mount_cv;
387 static kmutex_t mount_lock;
388 
389 const char * const zone_default_initname = "/sbin/init";
390 static char * const zone_prefix = "/zone/";
391 static int zone_shutdown(zoneid_t zoneid);
392 static int zone_add_datalink(zoneid_t, datalink_id_t);
393 static int zone_remove_datalink(zoneid_t, datalink_id_t);
394 static int zone_list_datalink(zoneid_t, int *, datalink_id_t *);
395 static int zone_set_network(zoneid_t, zone_net_data_t *);
396 static int zone_get_network(zoneid_t, zone_net_data_t *);
397 
398 typedef boolean_t zsd_applyfn_t(kmutex_t *, boolean_t, zone_t *, zone_key_t);
399 
400 static void zsd_apply_all_zones(zsd_applyfn_t *, zone_key_t);
401 static void zsd_apply_all_keys(zsd_applyfn_t *, zone_t *);
402 static boolean_t zsd_apply_create(kmutex_t *, boolean_t, zone_t *, zone_key_t);
403 static boolean_t zsd_apply_shutdown(kmutex_t *, boolean_t, zone_t *,
404     zone_key_t);
405 static boolean_t zsd_apply_destroy(kmutex_t *, boolean_t, zone_t *, zone_key_t);
406 static boolean_t zsd_wait_for_creator(zone_t *, struct zsd_entry *,
407     kmutex_t *);
408 static boolean_t zsd_wait_for_inprogress(zone_t *, struct zsd_entry *,
409     kmutex_t *);
410 
411 /*
412  * Bump this number when you alter the zone syscall interfaces; this is
413  * because we need to have support for previous API versions in libc
414  * to support patching; libc calls into the kernel to determine this number.
415  *
416  * Version 1 of the API is the version originally shipped with Solaris 10
417  * Version 2 alters the zone_create system call in order to support more
418  *     arguments by moving the args into a structure; and to do better
419  *     error reporting when zone_create() fails.
420  * Version 3 alters the zone_create system call in order to support the
421  *     import of ZFS datasets to zones.
422  * Version 4 alters the zone_create system call in order to support
423  *     Trusted Extensions.
424  * Version 5 alters the zone_boot system call, and converts its old
425  *     bootargs parameter to be set by the zone_setattr API instead.
426  * Version 6 adds the flag argument to zone_create.
427  */
428 static const int ZONE_SYSCALL_API_VERSION = 6;
429 
430 /*
431  * Certain filesystems (such as NFS and autofs) need to know which zone
432  * the mount is being placed in.  Because of this, we need to be able to
433  * ensure that a zone isn't in the process of being created such that
434  * nfs_mount() thinks it is in the global zone, while by the time it
435  * gets added the list of mounted zones, it ends up on zoneA's mount
436  * list.
437  *
438  * The following functions: block_mounts()/resume_mounts() and
439  * mount_in_progress()/mount_completed() are used by zones and the VFS
440  * layer (respectively) to synchronize zone creation and new mounts.
441  *
442  * The semantics are like a reader-reader lock such that there may
443  * either be multiple mounts (or zone creations, if that weren't
444  * serialized by zonehash_lock) in progress at the same time, but not
445  * both.
446  *
447  * We use cv's so the user can ctrl-C out of the operation if it's
448  * taking too long.
449  *
450  * The semantics are such that there is unfair bias towards the
451  * "current" operation.  This means that zone creations may starve if
452  * there is a rapid succession of new mounts coming in to the system, or
453  * there is a remote possibility that zones will be created at such a
454  * rate that new mounts will not be able to proceed.
455  */
456 /*
457  * Prevent new mounts from progressing to the point of calling
458  * VFS_MOUNT().  If there are already mounts in this "region", wait for
459  * them to complete.
460  */
461 static int
462 block_mounts(void)
463 {
464 	int retval = 0;
465 
466 	/*
467 	 * Since it may block for a long time, block_mounts() shouldn't be
468 	 * called with zonehash_lock held.
469 	 */
470 	ASSERT(MUTEX_NOT_HELD(&zonehash_lock));
471 	mutex_enter(&mount_lock);
472 	while (mounts_in_progress > 0) {
473 		if (cv_wait_sig(&mount_cv, &mount_lock) == 0)
474 			goto signaled;
475 	}
476 	/*
477 	 * A negative value of mounts_in_progress indicates that mounts
478 	 * have been blocked by (-mounts_in_progress) different callers.
479 	 */
480 	mounts_in_progress--;
481 	retval = 1;
482 signaled:
483 	mutex_exit(&mount_lock);
484 	return (retval);
485 }
486 
487 /*
488  * The VFS layer may progress with new mounts as far as we're concerned.
489  * Allow them to progress if we were the last obstacle.
490  */
491 static void
492 resume_mounts(void)
493 {
494 	mutex_enter(&mount_lock);
495 	if (++mounts_in_progress == 0)
496 		cv_broadcast(&mount_cv);
497 	mutex_exit(&mount_lock);
498 }
499 
500 /*
501  * The VFS layer is busy with a mount; zones should wait until all
502  * mounts are completed to progress.
503  */
504 void
505 mount_in_progress(void)
506 {
507 	mutex_enter(&mount_lock);
508 	while (mounts_in_progress < 0)
509 		cv_wait(&mount_cv, &mount_lock);
510 	mounts_in_progress++;
511 	mutex_exit(&mount_lock);
512 }
513 
514 /*
515  * VFS is done with one mount; wake up any waiting block_mounts()
516  * callers if this is the last mount.
517  */
518 void
519 mount_completed(void)
520 {
521 	mutex_enter(&mount_lock);
522 	if (--mounts_in_progress == 0)
523 		cv_broadcast(&mount_cv);
524 	mutex_exit(&mount_lock);
525 }
526 
527 /*
528  * ZSD routines.
529  *
530  * Zone Specific Data (ZSD) is modeled after Thread Specific Data as
531  * defined by the pthread_key_create() and related interfaces.
532  *
533  * Kernel subsystems may register one or more data items and/or
534  * callbacks to be executed when a zone is created, shutdown, or
535  * destroyed.
536  *
537  * Unlike the thread counterpart, destructor callbacks will be executed
538  * even if the data pointer is NULL and/or there are no constructor
539  * callbacks, so it is the responsibility of such callbacks to check for
540  * NULL data values if necessary.
541  *
542  * The locking strategy and overall picture is as follows:
543  *
544  * When someone calls zone_key_create(), a template ZSD entry is added to the
545  * global list "zsd_registered_keys", protected by zsd_key_lock.  While
546  * holding that lock all the existing zones are marked as
547  * ZSD_CREATE_NEEDED and a copy of the ZSD entry added to the per-zone
548  * zone_zsd list (protected by zone_lock). The global list is updated first
549  * (under zone_key_lock) to make sure that newly created zones use the
550  * most recent list of keys. Then under zonehash_lock we walk the zones
551  * and mark them.  Similar locking is used in zone_key_delete().
552  *
553  * The actual create, shutdown, and destroy callbacks are done without
554  * holding any lock. And zsd_flags are used to ensure that the operations
555  * completed so that when zone_key_create (and zone_create) is done, as well as
556  * zone_key_delete (and zone_destroy) is done, all the necessary callbacks
557  * are completed.
558  *
559  * When new zones are created constructor callbacks for all registered ZSD
560  * entries will be called. That also uses the above two phases of marking
561  * what needs to be done, and then running the callbacks without holding
562  * any locks.
563  *
564  * The framework does not provide any locking around zone_getspecific() and
565  * zone_setspecific() apart from that needed for internal consistency, so
566  * callers interested in atomic "test-and-set" semantics will need to provide
567  * their own locking.
568  */
569 
570 /*
571  * Helper function to find the zsd_entry associated with the key in the
572  * given list.
573  */
574 static struct zsd_entry *
575 zsd_find(list_t *l, zone_key_t key)
576 {
577 	struct zsd_entry *zsd;
578 
579 	for (zsd = list_head(l); zsd != NULL; zsd = list_next(l, zsd)) {
580 		if (zsd->zsd_key == key) {
581 			return (zsd);
582 		}
583 	}
584 	return (NULL);
585 }
586 
587 /*
588  * Helper function to find the zsd_entry associated with the key in the
589  * given list. Move it to the front of the list.
590  */
591 static struct zsd_entry *
592 zsd_find_mru(list_t *l, zone_key_t key)
593 {
594 	struct zsd_entry *zsd;
595 
596 	for (zsd = list_head(l); zsd != NULL; zsd = list_next(l, zsd)) {
597 		if (zsd->zsd_key == key) {
598 			/*
599 			 * Move to head of list to keep list in MRU order.
600 			 */
601 			if (zsd != list_head(l)) {
602 				list_remove(l, zsd);
603 				list_insert_head(l, zsd);
604 			}
605 			return (zsd);
606 		}
607 	}
608 	return (NULL);
609 }
610 
611 void
612 zone_key_create(zone_key_t *keyp, void *(*create)(zoneid_t),
613     void (*shutdown)(zoneid_t, void *), void (*destroy)(zoneid_t, void *))
614 {
615 	struct zsd_entry *zsdp;
616 	struct zsd_entry *t;
617 	struct zone *zone;
618 	zone_key_t  key;
619 
620 	zsdp = kmem_zalloc(sizeof (*zsdp), KM_SLEEP);
621 	zsdp->zsd_data = NULL;
622 	zsdp->zsd_create = create;
623 	zsdp->zsd_shutdown = shutdown;
624 	zsdp->zsd_destroy = destroy;
625 
626 	/*
627 	 * Insert in global list of callbacks. Makes future zone creations
628 	 * see it.
629 	 */
630 	mutex_enter(&zsd_key_lock);
631 	key = zsdp->zsd_key = ++zsd_keyval;
632 	ASSERT(zsd_keyval != 0);
633 	list_insert_tail(&zsd_registered_keys, zsdp);
634 	mutex_exit(&zsd_key_lock);
635 
636 	/*
637 	 * Insert for all existing zones and mark them as needing
638 	 * a create callback.
639 	 */
640 	mutex_enter(&zonehash_lock);	/* stop the world */
641 	for (zone = list_head(&zone_active); zone != NULL;
642 	    zone = list_next(&zone_active, zone)) {
643 		zone_status_t status;
644 
645 		mutex_enter(&zone->zone_lock);
646 
647 		/* Skip zones that are on the way down or not yet up */
648 		status = zone_status_get(zone);
649 		if (status >= ZONE_IS_DOWN ||
650 		    status == ZONE_IS_UNINITIALIZED) {
651 			mutex_exit(&zone->zone_lock);
652 			continue;
653 		}
654 
655 		t = zsd_find_mru(&zone->zone_zsd, key);
656 		if (t != NULL) {
657 			/*
658 			 * A zsd_configure already inserted it after
659 			 * we dropped zsd_key_lock above.
660 			 */
661 			mutex_exit(&zone->zone_lock);
662 			continue;
663 		}
664 		t = kmem_zalloc(sizeof (*t), KM_SLEEP);
665 		t->zsd_key = key;
666 		t->zsd_create = create;
667 		t->zsd_shutdown = shutdown;
668 		t->zsd_destroy = destroy;
669 		if (create != NULL) {
670 			t->zsd_flags = ZSD_CREATE_NEEDED;
671 			DTRACE_PROBE2(zsd__create__needed,
672 			    zone_t *, zone, zone_key_t, key);
673 		}
674 		list_insert_tail(&zone->zone_zsd, t);
675 		mutex_exit(&zone->zone_lock);
676 	}
677 	mutex_exit(&zonehash_lock);
678 
679 	if (create != NULL) {
680 		/* Now call the create callback for this key */
681 		zsd_apply_all_zones(zsd_apply_create, key);
682 	}
683 	/*
684 	 * It is safe for consumers to use the key now, make it
685 	 * globally visible. Specifically zone_getspecific() will
686 	 * always successfully return the zone specific data associated
687 	 * with the key.
688 	 */
689 	*keyp = key;
690 
691 }
692 
693 /*
694  * Function called when a module is being unloaded, or otherwise wishes
695  * to unregister its ZSD key and callbacks.
696  *
697  * Remove from the global list and determine the functions that need to
698  * be called under a global lock. Then call the functions without
699  * holding any locks. Finally free up the zone_zsd entries. (The apply
700  * functions need to access the zone_zsd entries to find zsd_data etc.)
701  */
702 int
703 zone_key_delete(zone_key_t key)
704 {
705 	struct zsd_entry *zsdp = NULL;
706 	zone_t *zone;
707 
708 	mutex_enter(&zsd_key_lock);
709 	zsdp = zsd_find_mru(&zsd_registered_keys, key);
710 	if (zsdp == NULL) {
711 		mutex_exit(&zsd_key_lock);
712 		return (-1);
713 	}
714 	list_remove(&zsd_registered_keys, zsdp);
715 	mutex_exit(&zsd_key_lock);
716 
717 	mutex_enter(&zonehash_lock);
718 	for (zone = list_head(&zone_active); zone != NULL;
719 	    zone = list_next(&zone_active, zone)) {
720 		struct zsd_entry *del;
721 
722 		mutex_enter(&zone->zone_lock);
723 		del = zsd_find_mru(&zone->zone_zsd, key);
724 		if (del == NULL) {
725 			/*
726 			 * Somebody else got here first e.g the zone going
727 			 * away.
728 			 */
729 			mutex_exit(&zone->zone_lock);
730 			continue;
731 		}
732 		ASSERT(del->zsd_shutdown == zsdp->zsd_shutdown);
733 		ASSERT(del->zsd_destroy == zsdp->zsd_destroy);
734 		if (del->zsd_shutdown != NULL &&
735 		    (del->zsd_flags & ZSD_SHUTDOWN_ALL) == 0) {
736 			del->zsd_flags |= ZSD_SHUTDOWN_NEEDED;
737 			DTRACE_PROBE2(zsd__shutdown__needed,
738 			    zone_t *, zone, zone_key_t, key);
739 		}
740 		if (del->zsd_destroy != NULL &&
741 		    (del->zsd_flags & ZSD_DESTROY_ALL) == 0) {
742 			del->zsd_flags |= ZSD_DESTROY_NEEDED;
743 			DTRACE_PROBE2(zsd__destroy__needed,
744 			    zone_t *, zone, zone_key_t, key);
745 		}
746 		mutex_exit(&zone->zone_lock);
747 	}
748 	mutex_exit(&zonehash_lock);
749 	kmem_free(zsdp, sizeof (*zsdp));
750 
751 	/* Now call the shutdown and destroy callback for this key */
752 	zsd_apply_all_zones(zsd_apply_shutdown, key);
753 	zsd_apply_all_zones(zsd_apply_destroy, key);
754 
755 	/* Now we can free up the zsdp structures in each zone */
756 	mutex_enter(&zonehash_lock);
757 	for (zone = list_head(&zone_active); zone != NULL;
758 	    zone = list_next(&zone_active, zone)) {
759 		struct zsd_entry *del;
760 
761 		mutex_enter(&zone->zone_lock);
762 		del = zsd_find(&zone->zone_zsd, key);
763 		if (del != NULL) {
764 			list_remove(&zone->zone_zsd, del);
765 			ASSERT(!(del->zsd_flags & ZSD_ALL_INPROGRESS));
766 			kmem_free(del, sizeof (*del));
767 		}
768 		mutex_exit(&zone->zone_lock);
769 	}
770 	mutex_exit(&zonehash_lock);
771 
772 	return (0);
773 }
774 
775 /*
776  * ZSD counterpart of pthread_setspecific().
777  *
778  * Since all zsd callbacks, including those with no create function,
779  * have an entry in zone_zsd, if the key is registered it is part of
780  * the zone_zsd list.
781  * Return an error if the key wasn't registerd.
782  */
783 int
784 zone_setspecific(zone_key_t key, zone_t *zone, const void *data)
785 {
786 	struct zsd_entry *t;
787 
788 	mutex_enter(&zone->zone_lock);
789 	t = zsd_find_mru(&zone->zone_zsd, key);
790 	if (t != NULL) {
791 		/*
792 		 * Replace old value with new
793 		 */
794 		t->zsd_data = (void *)data;
795 		mutex_exit(&zone->zone_lock);
796 		return (0);
797 	}
798 	mutex_exit(&zone->zone_lock);
799 	return (-1);
800 }
801 
802 /*
803  * ZSD counterpart of pthread_getspecific().
804  */
805 void *
806 zone_getspecific(zone_key_t key, zone_t *zone)
807 {
808 	struct zsd_entry *t;
809 	void *data;
810 
811 	mutex_enter(&zone->zone_lock);
812 	t = zsd_find_mru(&zone->zone_zsd, key);
813 	data = (t == NULL ? NULL : t->zsd_data);
814 	mutex_exit(&zone->zone_lock);
815 	return (data);
816 }
817 
818 /*
819  * Function used to initialize a zone's list of ZSD callbacks and data
820  * when the zone is being created.  The callbacks are initialized from
821  * the template list (zsd_registered_keys). The constructor callback is
822  * executed later (once the zone exists and with locks dropped).
823  */
824 static void
825 zone_zsd_configure(zone_t *zone)
826 {
827 	struct zsd_entry *zsdp;
828 	struct zsd_entry *t;
829 
830 	ASSERT(MUTEX_HELD(&zonehash_lock));
831 	ASSERT(list_head(&zone->zone_zsd) == NULL);
832 	mutex_enter(&zone->zone_lock);
833 	mutex_enter(&zsd_key_lock);
834 	for (zsdp = list_head(&zsd_registered_keys); zsdp != NULL;
835 	    zsdp = list_next(&zsd_registered_keys, zsdp)) {
836 		/*
837 		 * Since this zone is ZONE_IS_UNCONFIGURED, zone_key_create
838 		 * should not have added anything to it.
839 		 */
840 		ASSERT(zsd_find(&zone->zone_zsd, zsdp->zsd_key) == NULL);
841 
842 		t = kmem_zalloc(sizeof (*t), KM_SLEEP);
843 		t->zsd_key = zsdp->zsd_key;
844 		t->zsd_create = zsdp->zsd_create;
845 		t->zsd_shutdown = zsdp->zsd_shutdown;
846 		t->zsd_destroy = zsdp->zsd_destroy;
847 		if (zsdp->zsd_create != NULL) {
848 			t->zsd_flags = ZSD_CREATE_NEEDED;
849 			DTRACE_PROBE2(zsd__create__needed,
850 			    zone_t *, zone, zone_key_t, zsdp->zsd_key);
851 		}
852 		list_insert_tail(&zone->zone_zsd, t);
853 	}
854 	mutex_exit(&zsd_key_lock);
855 	mutex_exit(&zone->zone_lock);
856 }
857 
858 enum zsd_callback_type { ZSD_CREATE, ZSD_SHUTDOWN, ZSD_DESTROY };
859 
860 /*
861  * Helper function to execute shutdown or destructor callbacks.
862  */
863 static void
864 zone_zsd_callbacks(zone_t *zone, enum zsd_callback_type ct)
865 {
866 	struct zsd_entry *t;
867 
868 	ASSERT(ct == ZSD_SHUTDOWN || ct == ZSD_DESTROY);
869 	ASSERT(ct != ZSD_SHUTDOWN || zone_status_get(zone) >= ZONE_IS_EMPTY);
870 	ASSERT(ct != ZSD_DESTROY || zone_status_get(zone) >= ZONE_IS_DOWN);
871 
872 	/*
873 	 * Run the callback solely based on what is registered for the zone
874 	 * in zone_zsd. The global list can change independently of this
875 	 * as keys are registered and unregistered and we don't register new
876 	 * callbacks for a zone that is in the process of going away.
877 	 */
878 	mutex_enter(&zone->zone_lock);
879 	for (t = list_head(&zone->zone_zsd); t != NULL;
880 	    t = list_next(&zone->zone_zsd, t)) {
881 		zone_key_t key = t->zsd_key;
882 
883 		/* Skip if no callbacks registered */
884 
885 		if (ct == ZSD_SHUTDOWN) {
886 			if (t->zsd_shutdown != NULL &&
887 			    (t->zsd_flags & ZSD_SHUTDOWN_ALL) == 0) {
888 				t->zsd_flags |= ZSD_SHUTDOWN_NEEDED;
889 				DTRACE_PROBE2(zsd__shutdown__needed,
890 				    zone_t *, zone, zone_key_t, key);
891 			}
892 		} else {
893 			if (t->zsd_destroy != NULL &&
894 			    (t->zsd_flags & ZSD_DESTROY_ALL) == 0) {
895 				t->zsd_flags |= ZSD_DESTROY_NEEDED;
896 				DTRACE_PROBE2(zsd__destroy__needed,
897 				    zone_t *, zone, zone_key_t, key);
898 			}
899 		}
900 	}
901 	mutex_exit(&zone->zone_lock);
902 
903 	/* Now call the shutdown and destroy callback for this key */
904 	zsd_apply_all_keys(zsd_apply_shutdown, zone);
905 	zsd_apply_all_keys(zsd_apply_destroy, zone);
906 
907 }
908 
909 /*
910  * Called when the zone is going away; free ZSD-related memory, and
911  * destroy the zone_zsd list.
912  */
913 static void
914 zone_free_zsd(zone_t *zone)
915 {
916 	struct zsd_entry *t, *next;
917 
918 	/*
919 	 * Free all the zsd_entry's we had on this zone.
920 	 */
921 	mutex_enter(&zone->zone_lock);
922 	for (t = list_head(&zone->zone_zsd); t != NULL; t = next) {
923 		next = list_next(&zone->zone_zsd, t);
924 		list_remove(&zone->zone_zsd, t);
925 		ASSERT(!(t->zsd_flags & ZSD_ALL_INPROGRESS));
926 		kmem_free(t, sizeof (*t));
927 	}
928 	list_destroy(&zone->zone_zsd);
929 	mutex_exit(&zone->zone_lock);
930 
931 }
932 
933 /*
934  * Apply a function to all zones for particular key value.
935  *
936  * The applyfn has to drop zonehash_lock if it does some work, and
937  * then reacquire it before it returns.
938  * When the lock is dropped we don't follow list_next even
939  * if it is possible to do so without any hazards. This is
940  * because we want the design to allow for the list of zones
941  * to change in any arbitrary way during the time the
942  * lock was dropped.
943  *
944  * It is safe to restart the loop at list_head since the applyfn
945  * changes the zsd_flags as it does work, so a subsequent
946  * pass through will have no effect in applyfn, hence the loop will terminate
947  * in at worst O(N^2).
948  */
949 static void
950 zsd_apply_all_zones(zsd_applyfn_t *applyfn, zone_key_t key)
951 {
952 	zone_t *zone;
953 
954 	mutex_enter(&zonehash_lock);
955 	zone = list_head(&zone_active);
956 	while (zone != NULL) {
957 		if ((applyfn)(&zonehash_lock, B_FALSE, zone, key)) {
958 			/* Lock dropped - restart at head */
959 			zone = list_head(&zone_active);
960 		} else {
961 			zone = list_next(&zone_active, zone);
962 		}
963 	}
964 	mutex_exit(&zonehash_lock);
965 }
966 
967 /*
968  * Apply a function to all keys for a particular zone.
969  *
970  * The applyfn has to drop zonehash_lock if it does some work, and
971  * then reacquire it before it returns.
972  * When the lock is dropped we don't follow list_next even
973  * if it is possible to do so without any hazards. This is
974  * because we want the design to allow for the list of zsd callbacks
975  * to change in any arbitrary way during the time the
976  * lock was dropped.
977  *
978  * It is safe to restart the loop at list_head since the applyfn
979  * changes the zsd_flags as it does work, so a subsequent
980  * pass through will have no effect in applyfn, hence the loop will terminate
981  * in at worst O(N^2).
982  */
983 static void
984 zsd_apply_all_keys(zsd_applyfn_t *applyfn, zone_t *zone)
985 {
986 	struct zsd_entry *t;
987 
988 	mutex_enter(&zone->zone_lock);
989 	t = list_head(&zone->zone_zsd);
990 	while (t != NULL) {
991 		if ((applyfn)(NULL, B_TRUE, zone, t->zsd_key)) {
992 			/* Lock dropped - restart at head */
993 			t = list_head(&zone->zone_zsd);
994 		} else {
995 			t = list_next(&zone->zone_zsd, t);
996 		}
997 	}
998 	mutex_exit(&zone->zone_lock);
999 }
1000 
1001 /*
1002  * Call the create function for the zone and key if CREATE_NEEDED
1003  * is set.
1004  * If some other thread gets here first and sets CREATE_INPROGRESS, then
1005  * we wait for that thread to complete so that we can ensure that
1006  * all the callbacks are done when we've looped over all zones/keys.
1007  *
1008  * When we call the create function, we drop the global held by the
1009  * caller, and return true to tell the caller it needs to re-evalute the
1010  * state.
1011  * If the caller holds zone_lock then zone_lock_held is set, and zone_lock
1012  * remains held on exit.
1013  */
1014 static boolean_t
1015 zsd_apply_create(kmutex_t *lockp, boolean_t zone_lock_held,
1016     zone_t *zone, zone_key_t key)
1017 {
1018 	void *result;
1019 	struct zsd_entry *t;
1020 	boolean_t dropped;
1021 
1022 	if (lockp != NULL) {
1023 		ASSERT(MUTEX_HELD(lockp));
1024 	}
1025 	if (zone_lock_held) {
1026 		ASSERT(MUTEX_HELD(&zone->zone_lock));
1027 	} else {
1028 		mutex_enter(&zone->zone_lock);
1029 	}
1030 
1031 	t = zsd_find(&zone->zone_zsd, key);
1032 	if (t == NULL) {
1033 		/*
1034 		 * Somebody else got here first e.g the zone going
1035 		 * away.
1036 		 */
1037 		if (!zone_lock_held)
1038 			mutex_exit(&zone->zone_lock);
1039 		return (B_FALSE);
1040 	}
1041 	dropped = B_FALSE;
1042 	if (zsd_wait_for_inprogress(zone, t, lockp))
1043 		dropped = B_TRUE;
1044 
1045 	if (t->zsd_flags & ZSD_CREATE_NEEDED) {
1046 		t->zsd_flags &= ~ZSD_CREATE_NEEDED;
1047 		t->zsd_flags |= ZSD_CREATE_INPROGRESS;
1048 		DTRACE_PROBE2(zsd__create__inprogress,
1049 		    zone_t *, zone, zone_key_t, key);
1050 		mutex_exit(&zone->zone_lock);
1051 		if (lockp != NULL)
1052 			mutex_exit(lockp);
1053 
1054 		dropped = B_TRUE;
1055 		ASSERT(t->zsd_create != NULL);
1056 		DTRACE_PROBE2(zsd__create__start,
1057 		    zone_t *, zone, zone_key_t, key);
1058 
1059 		result = (*t->zsd_create)(zone->zone_id);
1060 
1061 		DTRACE_PROBE2(zsd__create__end,
1062 		    zone_t *, zone, voidn *, result);
1063 
1064 		ASSERT(result != NULL);
1065 		if (lockp != NULL)
1066 			mutex_enter(lockp);
1067 		mutex_enter(&zone->zone_lock);
1068 		t->zsd_data = result;
1069 		t->zsd_flags &= ~ZSD_CREATE_INPROGRESS;
1070 		t->zsd_flags |= ZSD_CREATE_COMPLETED;
1071 		cv_broadcast(&t->zsd_cv);
1072 		DTRACE_PROBE2(zsd__create__completed,
1073 		    zone_t *, zone, zone_key_t, key);
1074 	}
1075 	if (!zone_lock_held)
1076 		mutex_exit(&zone->zone_lock);
1077 	return (dropped);
1078 }
1079 
1080 /*
1081  * Call the shutdown function for the zone and key if SHUTDOWN_NEEDED
1082  * is set.
1083  * If some other thread gets here first and sets *_INPROGRESS, then
1084  * we wait for that thread to complete so that we can ensure that
1085  * all the callbacks are done when we've looped over all zones/keys.
1086  *
1087  * When we call the shutdown function, we drop the global held by the
1088  * caller, and return true to tell the caller it needs to re-evalute the
1089  * state.
1090  * If the caller holds zone_lock then zone_lock_held is set, and zone_lock
1091  * remains held on exit.
1092  */
1093 static boolean_t
1094 zsd_apply_shutdown(kmutex_t *lockp, boolean_t zone_lock_held,
1095     zone_t *zone, zone_key_t key)
1096 {
1097 	struct zsd_entry *t;
1098 	void *data;
1099 	boolean_t dropped;
1100 
1101 	if (lockp != NULL) {
1102 		ASSERT(MUTEX_HELD(lockp));
1103 	}
1104 	if (zone_lock_held) {
1105 		ASSERT(MUTEX_HELD(&zone->zone_lock));
1106 	} else {
1107 		mutex_enter(&zone->zone_lock);
1108 	}
1109 
1110 	t = zsd_find(&zone->zone_zsd, key);
1111 	if (t == NULL) {
1112 		/*
1113 		 * Somebody else got here first e.g the zone going
1114 		 * away.
1115 		 */
1116 		if (!zone_lock_held)
1117 			mutex_exit(&zone->zone_lock);
1118 		return (B_FALSE);
1119 	}
1120 	dropped = B_FALSE;
1121 	if (zsd_wait_for_creator(zone, t, lockp))
1122 		dropped = B_TRUE;
1123 
1124 	if (zsd_wait_for_inprogress(zone, t, lockp))
1125 		dropped = B_TRUE;
1126 
1127 	if (t->zsd_flags & ZSD_SHUTDOWN_NEEDED) {
1128 		t->zsd_flags &= ~ZSD_SHUTDOWN_NEEDED;
1129 		t->zsd_flags |= ZSD_SHUTDOWN_INPROGRESS;
1130 		DTRACE_PROBE2(zsd__shutdown__inprogress,
1131 		    zone_t *, zone, zone_key_t, key);
1132 		mutex_exit(&zone->zone_lock);
1133 		if (lockp != NULL)
1134 			mutex_exit(lockp);
1135 		dropped = B_TRUE;
1136 
1137 		ASSERT(t->zsd_shutdown != NULL);
1138 		data = t->zsd_data;
1139 
1140 		DTRACE_PROBE2(zsd__shutdown__start,
1141 		    zone_t *, zone, zone_key_t, key);
1142 
1143 		(t->zsd_shutdown)(zone->zone_id, data);
1144 		DTRACE_PROBE2(zsd__shutdown__end,
1145 		    zone_t *, zone, zone_key_t, key);
1146 
1147 		if (lockp != NULL)
1148 			mutex_enter(lockp);
1149 		mutex_enter(&zone->zone_lock);
1150 		t->zsd_flags &= ~ZSD_SHUTDOWN_INPROGRESS;
1151 		t->zsd_flags |= ZSD_SHUTDOWN_COMPLETED;
1152 		cv_broadcast(&t->zsd_cv);
1153 		DTRACE_PROBE2(zsd__shutdown__completed,
1154 		    zone_t *, zone, zone_key_t, key);
1155 	}
1156 	if (!zone_lock_held)
1157 		mutex_exit(&zone->zone_lock);
1158 	return (dropped);
1159 }
1160 
1161 /*
1162  * Call the destroy function for the zone and key if DESTROY_NEEDED
1163  * is set.
1164  * If some other thread gets here first and sets *_INPROGRESS, then
1165  * we wait for that thread to complete so that we can ensure that
1166  * all the callbacks are done when we've looped over all zones/keys.
1167  *
1168  * When we call the destroy function, we drop the global held by the
1169  * caller, and return true to tell the caller it needs to re-evalute the
1170  * state.
1171  * If the caller holds zone_lock then zone_lock_held is set, and zone_lock
1172  * remains held on exit.
1173  */
1174 static boolean_t
1175 zsd_apply_destroy(kmutex_t *lockp, boolean_t zone_lock_held,
1176     zone_t *zone, zone_key_t key)
1177 {
1178 	struct zsd_entry *t;
1179 	void *data;
1180 	boolean_t dropped;
1181 
1182 	if (lockp != NULL) {
1183 		ASSERT(MUTEX_HELD(lockp));
1184 	}
1185 	if (zone_lock_held) {
1186 		ASSERT(MUTEX_HELD(&zone->zone_lock));
1187 	} else {
1188 		mutex_enter(&zone->zone_lock);
1189 	}
1190 
1191 	t = zsd_find(&zone->zone_zsd, key);
1192 	if (t == NULL) {
1193 		/*
1194 		 * Somebody else got here first e.g the zone going
1195 		 * away.
1196 		 */
1197 		if (!zone_lock_held)
1198 			mutex_exit(&zone->zone_lock);
1199 		return (B_FALSE);
1200 	}
1201 	dropped = B_FALSE;
1202 	if (zsd_wait_for_creator(zone, t, lockp))
1203 		dropped = B_TRUE;
1204 
1205 	if (zsd_wait_for_inprogress(zone, t, lockp))
1206 		dropped = B_TRUE;
1207 
1208 	if (t->zsd_flags & ZSD_DESTROY_NEEDED) {
1209 		t->zsd_flags &= ~ZSD_DESTROY_NEEDED;
1210 		t->zsd_flags |= ZSD_DESTROY_INPROGRESS;
1211 		DTRACE_PROBE2(zsd__destroy__inprogress,
1212 		    zone_t *, zone, zone_key_t, key);
1213 		mutex_exit(&zone->zone_lock);
1214 		if (lockp != NULL)
1215 			mutex_exit(lockp);
1216 		dropped = B_TRUE;
1217 
1218 		ASSERT(t->zsd_destroy != NULL);
1219 		data = t->zsd_data;
1220 		DTRACE_PROBE2(zsd__destroy__start,
1221 		    zone_t *, zone, zone_key_t, key);
1222 
1223 		(t->zsd_destroy)(zone->zone_id, data);
1224 		DTRACE_PROBE2(zsd__destroy__end,
1225 		    zone_t *, zone, zone_key_t, key);
1226 
1227 		if (lockp != NULL)
1228 			mutex_enter(lockp);
1229 		mutex_enter(&zone->zone_lock);
1230 		t->zsd_data = NULL;
1231 		t->zsd_flags &= ~ZSD_DESTROY_INPROGRESS;
1232 		t->zsd_flags |= ZSD_DESTROY_COMPLETED;
1233 		cv_broadcast(&t->zsd_cv);
1234 		DTRACE_PROBE2(zsd__destroy__completed,
1235 		    zone_t *, zone, zone_key_t, key);
1236 	}
1237 	if (!zone_lock_held)
1238 		mutex_exit(&zone->zone_lock);
1239 	return (dropped);
1240 }
1241 
1242 /*
1243  * Wait for any CREATE_NEEDED flag to be cleared.
1244  * Returns true if lockp was temporarily dropped while waiting.
1245  */
1246 static boolean_t
1247 zsd_wait_for_creator(zone_t *zone, struct zsd_entry *t, kmutex_t *lockp)
1248 {
1249 	boolean_t dropped = B_FALSE;
1250 
1251 	while (t->zsd_flags & ZSD_CREATE_NEEDED) {
1252 		DTRACE_PROBE2(zsd__wait__for__creator,
1253 		    zone_t *, zone, struct zsd_entry *, t);
1254 		if (lockp != NULL) {
1255 			dropped = B_TRUE;
1256 			mutex_exit(lockp);
1257 		}
1258 		cv_wait(&t->zsd_cv, &zone->zone_lock);
1259 		if (lockp != NULL) {
1260 			/* First drop zone_lock to preserve order */
1261 			mutex_exit(&zone->zone_lock);
1262 			mutex_enter(lockp);
1263 			mutex_enter(&zone->zone_lock);
1264 		}
1265 	}
1266 	return (dropped);
1267 }
1268 
1269 /*
1270  * Wait for any INPROGRESS flag to be cleared.
1271  * Returns true if lockp was temporarily dropped while waiting.
1272  */
1273 static boolean_t
1274 zsd_wait_for_inprogress(zone_t *zone, struct zsd_entry *t, kmutex_t *lockp)
1275 {
1276 	boolean_t dropped = B_FALSE;
1277 
1278 	while (t->zsd_flags & ZSD_ALL_INPROGRESS) {
1279 		DTRACE_PROBE2(zsd__wait__for__inprogress,
1280 		    zone_t *, zone, struct zsd_entry *, t);
1281 		if (lockp != NULL) {
1282 			dropped = B_TRUE;
1283 			mutex_exit(lockp);
1284 		}
1285 		cv_wait(&t->zsd_cv, &zone->zone_lock);
1286 		if (lockp != NULL) {
1287 			/* First drop zone_lock to preserve order */
1288 			mutex_exit(&zone->zone_lock);
1289 			mutex_enter(lockp);
1290 			mutex_enter(&zone->zone_lock);
1291 		}
1292 	}
1293 	return (dropped);
1294 }
1295 
1296 /*
1297  * Frees memory associated with the zone dataset list.
1298  */
1299 static void
1300 zone_free_datasets(zone_t *zone)
1301 {
1302 	zone_dataset_t *t, *next;
1303 
1304 	for (t = list_head(&zone->zone_datasets); t != NULL; t = next) {
1305 		next = list_next(&zone->zone_datasets, t);
1306 		list_remove(&zone->zone_datasets, t);
1307 		kmem_free(t->zd_dataset, strlen(t->zd_dataset) + 1);
1308 		kmem_free(t, sizeof (*t));
1309 	}
1310 	list_destroy(&zone->zone_datasets);
1311 }
1312 
1313 /*
1314  * zone.cpu-shares resource control support.
1315  */
1316 /*ARGSUSED*/
1317 static rctl_qty_t
1318 zone_cpu_shares_usage(rctl_t *rctl, struct proc *p)
1319 {
1320 	ASSERT(MUTEX_HELD(&p->p_lock));
1321 	return (p->p_zone->zone_shares);
1322 }
1323 
1324 /*ARGSUSED*/
1325 static int
1326 zone_cpu_shares_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1327     rctl_qty_t nv)
1328 {
1329 	ASSERT(MUTEX_HELD(&p->p_lock));
1330 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1331 	if (e->rcep_p.zone == NULL)
1332 		return (0);
1333 
1334 	e->rcep_p.zone->zone_shares = nv;
1335 	return (0);
1336 }
1337 
1338 static rctl_ops_t zone_cpu_shares_ops = {
1339 	rcop_no_action,
1340 	zone_cpu_shares_usage,
1341 	zone_cpu_shares_set,
1342 	rcop_no_test
1343 };
1344 
1345 /*
1346  * zone.cpu-cap resource control support.
1347  */
1348 /*ARGSUSED*/
1349 static rctl_qty_t
1350 zone_cpu_cap_get(rctl_t *rctl, struct proc *p)
1351 {
1352 	ASSERT(MUTEX_HELD(&p->p_lock));
1353 	return (cpucaps_zone_get(p->p_zone));
1354 }
1355 
1356 /*ARGSUSED*/
1357 static int
1358 zone_cpu_cap_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1359     rctl_qty_t nv)
1360 {
1361 	zone_t *zone = e->rcep_p.zone;
1362 
1363 	ASSERT(MUTEX_HELD(&p->p_lock));
1364 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1365 
1366 	if (zone == NULL)
1367 		return (0);
1368 
1369 	/*
1370 	 * set cap to the new value.
1371 	 */
1372 	return (cpucaps_zone_set(zone, nv));
1373 }
1374 
1375 static rctl_ops_t zone_cpu_cap_ops = {
1376 	rcop_no_action,
1377 	zone_cpu_cap_get,
1378 	zone_cpu_cap_set,
1379 	rcop_no_test
1380 };
1381 
1382 /*ARGSUSED*/
1383 static rctl_qty_t
1384 zone_lwps_usage(rctl_t *r, proc_t *p)
1385 {
1386 	rctl_qty_t nlwps;
1387 	zone_t *zone = p->p_zone;
1388 
1389 	ASSERT(MUTEX_HELD(&p->p_lock));
1390 
1391 	mutex_enter(&zone->zone_nlwps_lock);
1392 	nlwps = zone->zone_nlwps;
1393 	mutex_exit(&zone->zone_nlwps_lock);
1394 
1395 	return (nlwps);
1396 }
1397 
1398 /*ARGSUSED*/
1399 static int
1400 zone_lwps_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
1401     rctl_qty_t incr, uint_t flags)
1402 {
1403 	rctl_qty_t nlwps;
1404 
1405 	ASSERT(MUTEX_HELD(&p->p_lock));
1406 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1407 	if (e->rcep_p.zone == NULL)
1408 		return (0);
1409 	ASSERT(MUTEX_HELD(&(e->rcep_p.zone->zone_nlwps_lock)));
1410 	nlwps = e->rcep_p.zone->zone_nlwps;
1411 
1412 	if (nlwps + incr > rcntl->rcv_value)
1413 		return (1);
1414 
1415 	return (0);
1416 }
1417 
1418 /*ARGSUSED*/
1419 static int
1420 zone_lwps_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e, rctl_qty_t nv)
1421 {
1422 	ASSERT(MUTEX_HELD(&p->p_lock));
1423 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1424 	if (e->rcep_p.zone == NULL)
1425 		return (0);
1426 	e->rcep_p.zone->zone_nlwps_ctl = nv;
1427 	return (0);
1428 }
1429 
1430 static rctl_ops_t zone_lwps_ops = {
1431 	rcop_no_action,
1432 	zone_lwps_usage,
1433 	zone_lwps_set,
1434 	zone_lwps_test,
1435 };
1436 
1437 /*ARGSUSED*/
1438 static rctl_qty_t
1439 zone_procs_usage(rctl_t *r, proc_t *p)
1440 {
1441 	rctl_qty_t nprocs;
1442 	zone_t *zone = p->p_zone;
1443 
1444 	ASSERT(MUTEX_HELD(&p->p_lock));
1445 
1446 	mutex_enter(&zone->zone_nlwps_lock);
1447 	nprocs = zone->zone_nprocs;
1448 	mutex_exit(&zone->zone_nlwps_lock);
1449 
1450 	return (nprocs);
1451 }
1452 
1453 /*ARGSUSED*/
1454 static int
1455 zone_procs_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
1456     rctl_qty_t incr, uint_t flags)
1457 {
1458 	rctl_qty_t nprocs;
1459 
1460 	ASSERT(MUTEX_HELD(&p->p_lock));
1461 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1462 	if (e->rcep_p.zone == NULL)
1463 		return (0);
1464 	ASSERT(MUTEX_HELD(&(e->rcep_p.zone->zone_nlwps_lock)));
1465 	nprocs = e->rcep_p.zone->zone_nprocs;
1466 
1467 	if (nprocs + incr > rcntl->rcv_value)
1468 		return (1);
1469 
1470 	return (0);
1471 }
1472 
1473 /*ARGSUSED*/
1474 static int
1475 zone_procs_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e, rctl_qty_t nv)
1476 {
1477 	ASSERT(MUTEX_HELD(&p->p_lock));
1478 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1479 	if (e->rcep_p.zone == NULL)
1480 		return (0);
1481 	e->rcep_p.zone->zone_nprocs_ctl = nv;
1482 	return (0);
1483 }
1484 
1485 static rctl_ops_t zone_procs_ops = {
1486 	rcop_no_action,
1487 	zone_procs_usage,
1488 	zone_procs_set,
1489 	zone_procs_test,
1490 };
1491 
1492 /*ARGSUSED*/
1493 static int
1494 zone_shmmax_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
1495     rctl_qty_t incr, uint_t flags)
1496 {
1497 	rctl_qty_t v;
1498 	ASSERT(MUTEX_HELD(&p->p_lock));
1499 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1500 	v = e->rcep_p.zone->zone_shmmax + incr;
1501 	if (v > rval->rcv_value)
1502 		return (1);
1503 	return (0);
1504 }
1505 
1506 static rctl_ops_t zone_shmmax_ops = {
1507 	rcop_no_action,
1508 	rcop_no_usage,
1509 	rcop_no_set,
1510 	zone_shmmax_test
1511 };
1512 
1513 /*ARGSUSED*/
1514 static int
1515 zone_shmmni_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
1516     rctl_qty_t incr, uint_t flags)
1517 {
1518 	rctl_qty_t v;
1519 	ASSERT(MUTEX_HELD(&p->p_lock));
1520 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1521 	v = e->rcep_p.zone->zone_ipc.ipcq_shmmni + incr;
1522 	if (v > rval->rcv_value)
1523 		return (1);
1524 	return (0);
1525 }
1526 
1527 static rctl_ops_t zone_shmmni_ops = {
1528 	rcop_no_action,
1529 	rcop_no_usage,
1530 	rcop_no_set,
1531 	zone_shmmni_test
1532 };
1533 
1534 /*ARGSUSED*/
1535 static int
1536 zone_semmni_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
1537     rctl_qty_t incr, uint_t flags)
1538 {
1539 	rctl_qty_t v;
1540 	ASSERT(MUTEX_HELD(&p->p_lock));
1541 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1542 	v = e->rcep_p.zone->zone_ipc.ipcq_semmni + incr;
1543 	if (v > rval->rcv_value)
1544 		return (1);
1545 	return (0);
1546 }
1547 
1548 static rctl_ops_t zone_semmni_ops = {
1549 	rcop_no_action,
1550 	rcop_no_usage,
1551 	rcop_no_set,
1552 	zone_semmni_test
1553 };
1554 
1555 /*ARGSUSED*/
1556 static int
1557 zone_msgmni_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
1558     rctl_qty_t incr, uint_t flags)
1559 {
1560 	rctl_qty_t v;
1561 	ASSERT(MUTEX_HELD(&p->p_lock));
1562 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1563 	v = e->rcep_p.zone->zone_ipc.ipcq_msgmni + incr;
1564 	if (v > rval->rcv_value)
1565 		return (1);
1566 	return (0);
1567 }
1568 
1569 static rctl_ops_t zone_msgmni_ops = {
1570 	rcop_no_action,
1571 	rcop_no_usage,
1572 	rcop_no_set,
1573 	zone_msgmni_test
1574 };
1575 
1576 /*ARGSUSED*/
1577 static rctl_qty_t
1578 zone_locked_mem_usage(rctl_t *rctl, struct proc *p)
1579 {
1580 	rctl_qty_t q;
1581 	ASSERT(MUTEX_HELD(&p->p_lock));
1582 	mutex_enter(&p->p_zone->zone_mem_lock);
1583 	q = p->p_zone->zone_locked_mem;
1584 	mutex_exit(&p->p_zone->zone_mem_lock);
1585 	return (q);
1586 }
1587 
1588 /*ARGSUSED*/
1589 static int
1590 zone_locked_mem_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
1591     rctl_val_t *rcntl, rctl_qty_t incr, uint_t flags)
1592 {
1593 	rctl_qty_t q;
1594 	zone_t *z;
1595 
1596 	z = e->rcep_p.zone;
1597 	ASSERT(MUTEX_HELD(&p->p_lock));
1598 	ASSERT(MUTEX_HELD(&z->zone_mem_lock));
1599 	q = z->zone_locked_mem;
1600 	if (q + incr > rcntl->rcv_value)
1601 		return (1);
1602 	return (0);
1603 }
1604 
1605 /*ARGSUSED*/
1606 static int
1607 zone_locked_mem_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1608     rctl_qty_t nv)
1609 {
1610 	ASSERT(MUTEX_HELD(&p->p_lock));
1611 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1612 	if (e->rcep_p.zone == NULL)
1613 		return (0);
1614 	e->rcep_p.zone->zone_locked_mem_ctl = nv;
1615 	return (0);
1616 }
1617 
1618 static rctl_ops_t zone_locked_mem_ops = {
1619 	rcop_no_action,
1620 	zone_locked_mem_usage,
1621 	zone_locked_mem_set,
1622 	zone_locked_mem_test
1623 };
1624 
1625 /*ARGSUSED*/
1626 static rctl_qty_t
1627 zone_max_swap_usage(rctl_t *rctl, struct proc *p)
1628 {
1629 	rctl_qty_t q;
1630 	zone_t *z = p->p_zone;
1631 
1632 	ASSERT(MUTEX_HELD(&p->p_lock));
1633 	mutex_enter(&z->zone_mem_lock);
1634 	q = z->zone_max_swap;
1635 	mutex_exit(&z->zone_mem_lock);
1636 	return (q);
1637 }
1638 
1639 /*ARGSUSED*/
1640 static int
1641 zone_max_swap_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
1642     rctl_val_t *rcntl, rctl_qty_t incr, uint_t flags)
1643 {
1644 	rctl_qty_t q;
1645 	zone_t *z;
1646 
1647 	z = e->rcep_p.zone;
1648 	ASSERT(MUTEX_HELD(&p->p_lock));
1649 	ASSERT(MUTEX_HELD(&z->zone_mem_lock));
1650 	q = z->zone_max_swap;
1651 	if (q + incr > rcntl->rcv_value)
1652 		return (1);
1653 	return (0);
1654 }
1655 
1656 /*ARGSUSED*/
1657 static int
1658 zone_max_swap_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1659     rctl_qty_t nv)
1660 {
1661 	ASSERT(MUTEX_HELD(&p->p_lock));
1662 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1663 	if (e->rcep_p.zone == NULL)
1664 		return (0);
1665 	e->rcep_p.zone->zone_max_swap_ctl = nv;
1666 	return (0);
1667 }
1668 
1669 static rctl_ops_t zone_max_swap_ops = {
1670 	rcop_no_action,
1671 	zone_max_swap_usage,
1672 	zone_max_swap_set,
1673 	zone_max_swap_test
1674 };
1675 
1676 /*ARGSUSED*/
1677 static rctl_qty_t
1678 zone_max_lofi_usage(rctl_t *rctl, struct proc *p)
1679 {
1680 	rctl_qty_t q;
1681 	zone_t *z = p->p_zone;
1682 
1683 	ASSERT(MUTEX_HELD(&p->p_lock));
1684 	mutex_enter(&z->zone_rctl_lock);
1685 	q = z->zone_max_lofi;
1686 	mutex_exit(&z->zone_rctl_lock);
1687 	return (q);
1688 }
1689 
1690 /*ARGSUSED*/
1691 static int
1692 zone_max_lofi_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
1693     rctl_val_t *rcntl, rctl_qty_t incr, uint_t flags)
1694 {
1695 	rctl_qty_t q;
1696 	zone_t *z;
1697 
1698 	z = e->rcep_p.zone;
1699 	ASSERT(MUTEX_HELD(&p->p_lock));
1700 	ASSERT(MUTEX_HELD(&z->zone_rctl_lock));
1701 	q = z->zone_max_lofi;
1702 	if (q + incr > rcntl->rcv_value)
1703 		return (1);
1704 	return (0);
1705 }
1706 
1707 /*ARGSUSED*/
1708 static int
1709 zone_max_lofi_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1710     rctl_qty_t nv)
1711 {
1712 	ASSERT(MUTEX_HELD(&p->p_lock));
1713 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1714 	if (e->rcep_p.zone == NULL)
1715 		return (0);
1716 	e->rcep_p.zone->zone_max_lofi_ctl = nv;
1717 	return (0);
1718 }
1719 
1720 static rctl_ops_t zone_max_lofi_ops = {
1721 	rcop_no_action,
1722 	zone_max_lofi_usage,
1723 	zone_max_lofi_set,
1724 	zone_max_lofi_test
1725 };
1726 
1727 /*
1728  * Helper function to brand the zone with a unique ID.
1729  */
1730 static void
1731 zone_uniqid(zone_t *zone)
1732 {
1733 	static uint64_t uniqid = 0;
1734 
1735 	ASSERT(MUTEX_HELD(&zonehash_lock));
1736 	zone->zone_uniqid = uniqid++;
1737 }
1738 
1739 /*
1740  * Returns a held pointer to the "kcred" for the specified zone.
1741  */
1742 struct cred *
1743 zone_get_kcred(zoneid_t zoneid)
1744 {
1745 	zone_t *zone;
1746 	cred_t *cr;
1747 
1748 	if ((zone = zone_find_by_id(zoneid)) == NULL)
1749 		return (NULL);
1750 	cr = zone->zone_kcred;
1751 	crhold(cr);
1752 	zone_rele(zone);
1753 	return (cr);
1754 }
1755 
1756 static int
1757 zone_lockedmem_kstat_update(kstat_t *ksp, int rw)
1758 {
1759 	zone_t *zone = ksp->ks_private;
1760 	zone_kstat_t *zk = ksp->ks_data;
1761 
1762 	if (rw == KSTAT_WRITE)
1763 		return (EACCES);
1764 
1765 	zk->zk_usage.value.ui64 = zone->zone_locked_mem;
1766 	zk->zk_value.value.ui64 = zone->zone_locked_mem_ctl;
1767 	return (0);
1768 }
1769 
1770 static int
1771 zone_nprocs_kstat_update(kstat_t *ksp, int rw)
1772 {
1773 	zone_t *zone = ksp->ks_private;
1774 	zone_kstat_t *zk = ksp->ks_data;
1775 
1776 	if (rw == KSTAT_WRITE)
1777 		return (EACCES);
1778 
1779 	zk->zk_usage.value.ui64 = zone->zone_nprocs;
1780 	zk->zk_value.value.ui64 = zone->zone_nprocs_ctl;
1781 	return (0);
1782 }
1783 
1784 static int
1785 zone_swapresv_kstat_update(kstat_t *ksp, int rw)
1786 {
1787 	zone_t *zone = ksp->ks_private;
1788 	zone_kstat_t *zk = ksp->ks_data;
1789 
1790 	if (rw == KSTAT_WRITE)
1791 		return (EACCES);
1792 
1793 	zk->zk_usage.value.ui64 = zone->zone_max_swap;
1794 	zk->zk_value.value.ui64 = zone->zone_max_swap_ctl;
1795 	return (0);
1796 }
1797 
1798 static kstat_t *
1799 zone_kstat_create_common(zone_t *zone, char *name,
1800     int (*updatefunc) (kstat_t *, int))
1801 {
1802 	kstat_t *ksp;
1803 	zone_kstat_t *zk;
1804 
1805 	ksp = rctl_kstat_create_zone(zone, name, KSTAT_TYPE_NAMED,
1806 	    sizeof (zone_kstat_t) / sizeof (kstat_named_t),
1807 	    KSTAT_FLAG_VIRTUAL);
1808 
1809 	if (ksp == NULL)
1810 		return (NULL);
1811 
1812 	zk = ksp->ks_data = kmem_alloc(sizeof (zone_kstat_t), KM_SLEEP);
1813 	ksp->ks_data_size += strlen(zone->zone_name) + 1;
1814 	kstat_named_init(&zk->zk_zonename, "zonename", KSTAT_DATA_STRING);
1815 	kstat_named_setstr(&zk->zk_zonename, zone->zone_name);
1816 	kstat_named_init(&zk->zk_usage, "usage", KSTAT_DATA_UINT64);
1817 	kstat_named_init(&zk->zk_value, "value", KSTAT_DATA_UINT64);
1818 	ksp->ks_update = updatefunc;
1819 	ksp->ks_private = zone;
1820 	kstat_install(ksp);
1821 	return (ksp);
1822 }
1823 
1824 static int
1825 zone_misc_kstat_update(kstat_t *ksp, int rw)
1826 {
1827 	zone_t *zone = ksp->ks_private;
1828 	zone_misc_kstat_t *zmp = ksp->ks_data;
1829 	hrtime_t tmp;
1830 
1831 	if (rw == KSTAT_WRITE)
1832 		return (EACCES);
1833 
1834 	tmp = zone->zone_utime;
1835 	scalehrtime(&tmp);
1836 	zmp->zm_utime.value.ui64 = tmp;
1837 	tmp = zone->zone_stime;
1838 	scalehrtime(&tmp);
1839 	zmp->zm_stime.value.ui64 = tmp;
1840 	tmp = zone->zone_wtime;
1841 	scalehrtime(&tmp);
1842 	zmp->zm_wtime.value.ui64 = tmp;
1843 
1844 	return (0);
1845 }
1846 
1847 static kstat_t *
1848 zone_misc_kstat_create(zone_t *zone)
1849 {
1850 	kstat_t *ksp;
1851 	zone_misc_kstat_t *zmp;
1852 
1853 	if ((ksp = kstat_create_zone("zones", zone->zone_id,
1854 	    zone->zone_name, "zone_misc", KSTAT_TYPE_NAMED,
1855 	    sizeof (zone_misc_kstat_t) / sizeof (kstat_named_t),
1856 	    KSTAT_FLAG_VIRTUAL, zone->zone_id)) == NULL)
1857 		return (NULL);
1858 
1859 	if (zone->zone_id != GLOBAL_ZONEID)
1860 		kstat_zone_add(ksp, GLOBAL_ZONEID);
1861 
1862 	zmp = ksp->ks_data = kmem_zalloc(sizeof (zone_misc_kstat_t), KM_SLEEP);
1863 	ksp->ks_data_size += strlen(zone->zone_name) + 1;
1864 	ksp->ks_lock = &zone->zone_misc_lock;
1865 	zone->zone_misc_stats = zmp;
1866 
1867 	/* The kstat "name" field is not large enough for a full zonename */
1868 	kstat_named_init(&zmp->zm_zonename, "zonename", KSTAT_DATA_STRING);
1869 	kstat_named_setstr(&zmp->zm_zonename, zone->zone_name);
1870 	kstat_named_init(&zmp->zm_utime, "nsec_user", KSTAT_DATA_UINT64);
1871 	kstat_named_init(&zmp->zm_stime, "nsec_sys", KSTAT_DATA_UINT64);
1872 	kstat_named_init(&zmp->zm_wtime, "nsec_waitrq", KSTAT_DATA_UINT64);
1873 
1874 	ksp->ks_update = zone_misc_kstat_update;
1875 	ksp->ks_private = zone;
1876 
1877 	kstat_install(ksp);
1878 	return (ksp);
1879 }
1880 
1881 static void
1882 zone_kstat_create(zone_t *zone)
1883 {
1884 	zone->zone_lockedmem_kstat = zone_kstat_create_common(zone,
1885 	    "lockedmem", zone_lockedmem_kstat_update);
1886 	zone->zone_swapresv_kstat = zone_kstat_create_common(zone,
1887 	    "swapresv", zone_swapresv_kstat_update);
1888 	zone->zone_nprocs_kstat = zone_kstat_create_common(zone,
1889 	    "nprocs", zone_nprocs_kstat_update);
1890 
1891 	if ((zone->zone_misc_ksp = zone_misc_kstat_create(zone)) == NULL) {
1892 		zone->zone_misc_stats = kmem_zalloc(
1893 		    sizeof (zone_misc_kstat_t), KM_SLEEP);
1894 	}
1895 }
1896 
1897 static void
1898 zone_kstat_delete_common(kstat_t **pkstat, size_t datasz)
1899 {
1900 	void *data;
1901 
1902 	if (*pkstat != NULL) {
1903 		data = (*pkstat)->ks_data;
1904 		kstat_delete(*pkstat);
1905 		kmem_free(data, datasz);
1906 		*pkstat = NULL;
1907 	}
1908 }
1909 
1910 static void
1911 zone_kstat_delete(zone_t *zone)
1912 {
1913 	zone_kstat_delete_common(&zone->zone_lockedmem_kstat,
1914 	    sizeof (zone_kstat_t));
1915 	zone_kstat_delete_common(&zone->zone_swapresv_kstat,
1916 	    sizeof (zone_kstat_t));
1917 	zone_kstat_delete_common(&zone->zone_nprocs_kstat,
1918 	    sizeof (zone_kstat_t));
1919 	zone_kstat_delete_common(&zone->zone_misc_ksp,
1920 	    sizeof (zone_misc_kstat_t));
1921 }
1922 
1923 /*
1924  * Called very early on in boot to initialize the ZSD list so that
1925  * zone_key_create() can be called before zone_init().  It also initializes
1926  * portions of zone0 which may be used before zone_init() is called.  The
1927  * variable "global_zone" will be set when zone0 is fully initialized by
1928  * zone_init().
1929  */
1930 void
1931 zone_zsd_init(void)
1932 {
1933 	mutex_init(&zonehash_lock, NULL, MUTEX_DEFAULT, NULL);
1934 	mutex_init(&zsd_key_lock, NULL, MUTEX_DEFAULT, NULL);
1935 	list_create(&zsd_registered_keys, sizeof (struct zsd_entry),
1936 	    offsetof(struct zsd_entry, zsd_linkage));
1937 	list_create(&zone_active, sizeof (zone_t),
1938 	    offsetof(zone_t, zone_linkage));
1939 	list_create(&zone_deathrow, sizeof (zone_t),
1940 	    offsetof(zone_t, zone_linkage));
1941 
1942 	mutex_init(&zone0.zone_lock, NULL, MUTEX_DEFAULT, NULL);
1943 	mutex_init(&zone0.zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL);
1944 	mutex_init(&zone0.zone_mem_lock, NULL, MUTEX_DEFAULT, NULL);
1945 	zone0.zone_shares = 1;
1946 	zone0.zone_nlwps = 0;
1947 	zone0.zone_nlwps_ctl = INT_MAX;
1948 	zone0.zone_nprocs = 0;
1949 	zone0.zone_nprocs_ctl = INT_MAX;
1950 	zone0.zone_locked_mem = 0;
1951 	zone0.zone_locked_mem_ctl = UINT64_MAX;
1952 	ASSERT(zone0.zone_max_swap == 0);
1953 	zone0.zone_max_swap_ctl = UINT64_MAX;
1954 	zone0.zone_max_lofi = 0;
1955 	zone0.zone_max_lofi_ctl = UINT64_MAX;
1956 	zone0.zone_shmmax = 0;
1957 	zone0.zone_ipc.ipcq_shmmni = 0;
1958 	zone0.zone_ipc.ipcq_semmni = 0;
1959 	zone0.zone_ipc.ipcq_msgmni = 0;
1960 	zone0.zone_name = GLOBAL_ZONENAME;
1961 	zone0.zone_nodename = utsname.nodename;
1962 	zone0.zone_domain = srpc_domain;
1963 	zone0.zone_hostid = HW_INVALID_HOSTID;
1964 	zone0.zone_fs_allowed = NULL;
1965 	zone0.zone_ref = 1;
1966 	zone0.zone_id = GLOBAL_ZONEID;
1967 	zone0.zone_status = ZONE_IS_RUNNING;
1968 	zone0.zone_rootpath = "/";
1969 	zone0.zone_rootpathlen = 2;
1970 	zone0.zone_psetid = ZONE_PS_INVAL;
1971 	zone0.zone_ncpus = 0;
1972 	zone0.zone_ncpus_online = 0;
1973 	zone0.zone_proc_initpid = 1;
1974 	zone0.zone_initname = initname;
1975 	zone0.zone_lockedmem_kstat = NULL;
1976 	zone0.zone_swapresv_kstat = NULL;
1977 	zone0.zone_nprocs_kstat = NULL;
1978 
1979 	zone0.zone_stime = 0;
1980 	zone0.zone_utime = 0;
1981 	zone0.zone_wtime = 0;
1982 
1983 	list_create(&zone0.zone_ref_list, sizeof (zone_ref_t),
1984 	    offsetof(zone_ref_t, zref_linkage));
1985 	list_create(&zone0.zone_zsd, sizeof (struct zsd_entry),
1986 	    offsetof(struct zsd_entry, zsd_linkage));
1987 	list_insert_head(&zone_active, &zone0);
1988 
1989 	/*
1990 	 * The root filesystem is not mounted yet, so zone_rootvp cannot be set
1991 	 * to anything meaningful.  It is assigned to be 'rootdir' in
1992 	 * vfs_mountroot().
1993 	 */
1994 	zone0.zone_rootvp = NULL;
1995 	zone0.zone_vfslist = NULL;
1996 	zone0.zone_bootargs = initargs;
1997 	zone0.zone_privset = kmem_alloc(sizeof (priv_set_t), KM_SLEEP);
1998 	/*
1999 	 * The global zone has all privileges
2000 	 */
2001 	priv_fillset(zone0.zone_privset);
2002 	/*
2003 	 * Add p0 to the global zone
2004 	 */
2005 	zone0.zone_zsched = &p0;
2006 	p0.p_zone = &zone0;
2007 }
2008 
2009 /*
2010  * Compute a hash value based on the contents of the label and the DOI.  The
2011  * hash algorithm is somewhat arbitrary, but is based on the observation that
2012  * humans will likely pick labels that differ by amounts that work out to be
2013  * multiples of the number of hash chains, and thus stirring in some primes
2014  * should help.
2015  */
2016 static uint_t
2017 hash_bylabel(void *hdata, mod_hash_key_t key)
2018 {
2019 	const ts_label_t *lab = (ts_label_t *)key;
2020 	const uint32_t *up, *ue;
2021 	uint_t hash;
2022 	int i;
2023 
2024 	_NOTE(ARGUNUSED(hdata));
2025 
2026 	hash = lab->tsl_doi + (lab->tsl_doi << 1);
2027 	/* we depend on alignment of label, but not representation */
2028 	up = (const uint32_t *)&lab->tsl_label;
2029 	ue = up + sizeof (lab->tsl_label) / sizeof (*up);
2030 	i = 1;
2031 	while (up < ue) {
2032 		/* using 2^n + 1, 1 <= n <= 16 as source of many primes */
2033 		hash += *up + (*up << ((i % 16) + 1));
2034 		up++;
2035 		i++;
2036 	}
2037 	return (hash);
2038 }
2039 
2040 /*
2041  * All that mod_hash cares about here is zero (equal) versus non-zero (not
2042  * equal).  This may need to be changed if less than / greater than is ever
2043  * needed.
2044  */
2045 static int
2046 hash_labelkey_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
2047 {
2048 	ts_label_t *lab1 = (ts_label_t *)key1;
2049 	ts_label_t *lab2 = (ts_label_t *)key2;
2050 
2051 	return (label_equal(lab1, lab2) ? 0 : 1);
2052 }
2053 
2054 /*
2055  * Called by main() to initialize the zones framework.
2056  */
2057 void
2058 zone_init(void)
2059 {
2060 	rctl_dict_entry_t *rde;
2061 	rctl_val_t *dval;
2062 	rctl_set_t *set;
2063 	rctl_alloc_gp_t *gp;
2064 	rctl_entity_p_t e;
2065 	int res;
2066 
2067 	ASSERT(curproc == &p0);
2068 
2069 	/*
2070 	 * Create ID space for zone IDs.  ID 0 is reserved for the
2071 	 * global zone.
2072 	 */
2073 	zoneid_space = id_space_create("zoneid_space", 1, MAX_ZONEID);
2074 
2075 	/*
2076 	 * Initialize generic zone resource controls, if any.
2077 	 */
2078 	rc_zone_cpu_shares = rctl_register("zone.cpu-shares",
2079 	    RCENTITY_ZONE, RCTL_GLOBAL_SIGNAL_NEVER | RCTL_GLOBAL_DENY_NEVER |
2080 	    RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT | RCTL_GLOBAL_SYSLOG_NEVER,
2081 	    FSS_MAXSHARES, FSS_MAXSHARES, &zone_cpu_shares_ops);
2082 
2083 	rc_zone_cpu_cap = rctl_register("zone.cpu-cap",
2084 	    RCENTITY_ZONE, RCTL_GLOBAL_SIGNAL_NEVER | RCTL_GLOBAL_DENY_ALWAYS |
2085 	    RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT |RCTL_GLOBAL_SYSLOG_NEVER |
2086 	    RCTL_GLOBAL_INFINITE,
2087 	    MAXCAP, MAXCAP, &zone_cpu_cap_ops);
2088 
2089 	rc_zone_nlwps = rctl_register("zone.max-lwps", RCENTITY_ZONE,
2090 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT,
2091 	    INT_MAX, INT_MAX, &zone_lwps_ops);
2092 
2093 	rc_zone_nprocs = rctl_register("zone.max-processes", RCENTITY_ZONE,
2094 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT,
2095 	    INT_MAX, INT_MAX, &zone_procs_ops);
2096 
2097 	/*
2098 	 * System V IPC resource controls
2099 	 */
2100 	rc_zone_msgmni = rctl_register("zone.max-msg-ids",
2101 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
2102 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &zone_msgmni_ops);
2103 
2104 	rc_zone_semmni = rctl_register("zone.max-sem-ids",
2105 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
2106 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &zone_semmni_ops);
2107 
2108 	rc_zone_shmmni = rctl_register("zone.max-shm-ids",
2109 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
2110 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &zone_shmmni_ops);
2111 
2112 	rc_zone_shmmax = rctl_register("zone.max-shm-memory",
2113 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
2114 	    RCTL_GLOBAL_BYTES, UINT64_MAX, UINT64_MAX, &zone_shmmax_ops);
2115 
2116 	/*
2117 	 * Create a rctl_val with PRIVILEGED, NOACTION, value = 1.  Then attach
2118 	 * this at the head of the rctl_dict_entry for ``zone.cpu-shares''.
2119 	 */
2120 	dval = kmem_cache_alloc(rctl_val_cache, KM_SLEEP);
2121 	bzero(dval, sizeof (rctl_val_t));
2122 	dval->rcv_value = 1;
2123 	dval->rcv_privilege = RCPRIV_PRIVILEGED;
2124 	dval->rcv_flagaction = RCTL_LOCAL_NOACTION;
2125 	dval->rcv_action_recip_pid = -1;
2126 
2127 	rde = rctl_dict_lookup("zone.cpu-shares");
2128 	(void) rctl_val_list_insert(&rde->rcd_default_value, dval);
2129 
2130 	rc_zone_locked_mem = rctl_register("zone.max-locked-memory",
2131 	    RCENTITY_ZONE, RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_BYTES |
2132 	    RCTL_GLOBAL_DENY_ALWAYS, UINT64_MAX, UINT64_MAX,
2133 	    &zone_locked_mem_ops);
2134 
2135 	rc_zone_max_swap = rctl_register("zone.max-swap",
2136 	    RCENTITY_ZONE, RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_BYTES |
2137 	    RCTL_GLOBAL_DENY_ALWAYS, UINT64_MAX, UINT64_MAX,
2138 	    &zone_max_swap_ops);
2139 
2140 	rc_zone_max_lofi = rctl_register("zone.max-lofi",
2141 	    RCENTITY_ZONE, RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT |
2142 	    RCTL_GLOBAL_DENY_ALWAYS, UINT64_MAX, UINT64_MAX,
2143 	    &zone_max_lofi_ops);
2144 
2145 	/*
2146 	 * Initialize the ``global zone''.
2147 	 */
2148 	set = rctl_set_create();
2149 	gp = rctl_set_init_prealloc(RCENTITY_ZONE);
2150 	mutex_enter(&p0.p_lock);
2151 	e.rcep_p.zone = &zone0;
2152 	e.rcep_t = RCENTITY_ZONE;
2153 	zone0.zone_rctls = rctl_set_init(RCENTITY_ZONE, &p0, &e, set,
2154 	    gp);
2155 
2156 	zone0.zone_nlwps = p0.p_lwpcnt;
2157 	zone0.zone_nprocs = 1;
2158 	zone0.zone_ntasks = 1;
2159 	mutex_exit(&p0.p_lock);
2160 	zone0.zone_restart_init = B_TRUE;
2161 	zone0.zone_brand = &native_brand;
2162 	rctl_prealloc_destroy(gp);
2163 	/*
2164 	 * pool_default hasn't been initialized yet, so we let pool_init()
2165 	 * take care of making sure the global zone is in the default pool.
2166 	 */
2167 
2168 	/*
2169 	 * Initialize global zone kstats
2170 	 */
2171 	zone_kstat_create(&zone0);
2172 
2173 	/*
2174 	 * Initialize zone label.
2175 	 * mlp are initialized when tnzonecfg is loaded.
2176 	 */
2177 	zone0.zone_slabel = l_admin_low;
2178 	rw_init(&zone0.zone_mlps.mlpl_rwlock, NULL, RW_DEFAULT, NULL);
2179 	label_hold(l_admin_low);
2180 
2181 	/*
2182 	 * Initialise the lock for the database structure used by mntfs.
2183 	 */
2184 	rw_init(&zone0.zone_mntfs_db_lock, NULL, RW_DEFAULT, NULL);
2185 
2186 	mutex_enter(&zonehash_lock);
2187 	zone_uniqid(&zone0);
2188 	ASSERT(zone0.zone_uniqid == GLOBAL_ZONEUNIQID);
2189 
2190 	zonehashbyid = mod_hash_create_idhash("zone_by_id", zone_hash_size,
2191 	    mod_hash_null_valdtor);
2192 	zonehashbyname = mod_hash_create_strhash("zone_by_name",
2193 	    zone_hash_size, mod_hash_null_valdtor);
2194 	/*
2195 	 * maintain zonehashbylabel only for labeled systems
2196 	 */
2197 	if (is_system_labeled())
2198 		zonehashbylabel = mod_hash_create_extended("zone_by_label",
2199 		    zone_hash_size, mod_hash_null_keydtor,
2200 		    mod_hash_null_valdtor, hash_bylabel, NULL,
2201 		    hash_labelkey_cmp, KM_SLEEP);
2202 	zonecount = 1;
2203 
2204 	(void) mod_hash_insert(zonehashbyid, (mod_hash_key_t)GLOBAL_ZONEID,
2205 	    (mod_hash_val_t)&zone0);
2206 	(void) mod_hash_insert(zonehashbyname, (mod_hash_key_t)zone0.zone_name,
2207 	    (mod_hash_val_t)&zone0);
2208 	if (is_system_labeled()) {
2209 		zone0.zone_flags |= ZF_HASHED_LABEL;
2210 		(void) mod_hash_insert(zonehashbylabel,
2211 		    (mod_hash_key_t)zone0.zone_slabel, (mod_hash_val_t)&zone0);
2212 	}
2213 	mutex_exit(&zonehash_lock);
2214 
2215 	/*
2216 	 * We avoid setting zone_kcred until now, since kcred is initialized
2217 	 * sometime after zone_zsd_init() and before zone_init().
2218 	 */
2219 	zone0.zone_kcred = kcred;
2220 	/*
2221 	 * The global zone is fully initialized (except for zone_rootvp which
2222 	 * will be set when the root filesystem is mounted).
2223 	 */
2224 	global_zone = &zone0;
2225 
2226 	/*
2227 	 * Setup an event channel to send zone status change notifications on
2228 	 */
2229 	res = sysevent_evc_bind(ZONE_EVENT_CHANNEL, &zone_event_chan,
2230 	    EVCH_CREAT);
2231 
2232 	if (res)
2233 		panic("Sysevent_evc_bind failed during zone setup.\n");
2234 
2235 }
2236 
2237 static void
2238 zone_free(zone_t *zone)
2239 {
2240 	ASSERT(zone != global_zone);
2241 	ASSERT(zone->zone_ntasks == 0);
2242 	ASSERT(zone->zone_nlwps == 0);
2243 	ASSERT(zone->zone_nprocs == 0);
2244 	ASSERT(zone->zone_cred_ref == 0);
2245 	ASSERT(zone->zone_kcred == NULL);
2246 	ASSERT(zone_status_get(zone) == ZONE_IS_DEAD ||
2247 	    zone_status_get(zone) == ZONE_IS_UNINITIALIZED);
2248 	ASSERT(list_is_empty(&zone->zone_ref_list));
2249 
2250 	/*
2251 	 * Remove any zone caps.
2252 	 */
2253 	cpucaps_zone_remove(zone);
2254 
2255 	ASSERT(zone->zone_cpucap == NULL);
2256 
2257 	/* remove from deathrow list */
2258 	if (zone_status_get(zone) == ZONE_IS_DEAD) {
2259 		ASSERT(zone->zone_ref == 0);
2260 		mutex_enter(&zone_deathrow_lock);
2261 		list_remove(&zone_deathrow, zone);
2262 		mutex_exit(&zone_deathrow_lock);
2263 	}
2264 
2265 	list_destroy(&zone->zone_ref_list);
2266 	zone_free_zsd(zone);
2267 	zone_free_datasets(zone);
2268 	list_destroy(&zone->zone_dl_list);
2269 
2270 	if (zone->zone_rootvp != NULL)
2271 		VN_RELE(zone->zone_rootvp);
2272 	if (zone->zone_rootpath)
2273 		kmem_free(zone->zone_rootpath, zone->zone_rootpathlen);
2274 	if (zone->zone_name != NULL)
2275 		kmem_free(zone->zone_name, ZONENAME_MAX);
2276 	if (zone->zone_slabel != NULL)
2277 		label_rele(zone->zone_slabel);
2278 	if (zone->zone_nodename != NULL)
2279 		kmem_free(zone->zone_nodename, _SYS_NMLN);
2280 	if (zone->zone_domain != NULL)
2281 		kmem_free(zone->zone_domain, _SYS_NMLN);
2282 	if (zone->zone_privset != NULL)
2283 		kmem_free(zone->zone_privset, sizeof (priv_set_t));
2284 	if (zone->zone_rctls != NULL)
2285 		rctl_set_free(zone->zone_rctls);
2286 	if (zone->zone_bootargs != NULL)
2287 		strfree(zone->zone_bootargs);
2288 	if (zone->zone_initname != NULL)
2289 		strfree(zone->zone_initname);
2290 	if (zone->zone_fs_allowed != NULL)
2291 		strfree(zone->zone_fs_allowed);
2292 	if (zone->zone_pfexecd != NULL)
2293 		klpd_freelist(&zone->zone_pfexecd);
2294 	id_free(zoneid_space, zone->zone_id);
2295 	mutex_destroy(&zone->zone_lock);
2296 	cv_destroy(&zone->zone_cv);
2297 	rw_destroy(&zone->zone_mlps.mlpl_rwlock);
2298 	rw_destroy(&zone->zone_mntfs_db_lock);
2299 	kmem_free(zone, sizeof (zone_t));
2300 }
2301 
2302 /*
2303  * See block comment at the top of this file for information about zone
2304  * status values.
2305  */
2306 /*
2307  * Convenience function for setting zone status.
2308  */
2309 static void
2310 zone_status_set(zone_t *zone, zone_status_t status)
2311 {
2312 
2313 	nvlist_t *nvl = NULL;
2314 	ASSERT(MUTEX_HELD(&zone_status_lock));
2315 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE &&
2316 	    status >= zone_status_get(zone));
2317 
2318 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) ||
2319 	    nvlist_add_string(nvl, ZONE_CB_NAME, zone->zone_name) ||
2320 	    nvlist_add_string(nvl, ZONE_CB_NEWSTATE,
2321 	    zone_status_table[status]) ||
2322 	    nvlist_add_string(nvl, ZONE_CB_OLDSTATE,
2323 	    zone_status_table[zone->zone_status]) ||
2324 	    nvlist_add_int32(nvl, ZONE_CB_ZONEID, zone->zone_id) ||
2325 	    nvlist_add_uint64(nvl, ZONE_CB_TIMESTAMP, (uint64_t)gethrtime()) ||
2326 	    sysevent_evc_publish(zone_event_chan, ZONE_EVENT_STATUS_CLASS,
2327 	    ZONE_EVENT_STATUS_SUBCLASS, "sun.com", "kernel", nvl, EVCH_SLEEP)) {
2328 #ifdef DEBUG
2329 		(void) printf(
2330 		    "Failed to allocate and send zone state change event.\n");
2331 #endif
2332 	}
2333 	nvlist_free(nvl);
2334 
2335 	zone->zone_status = status;
2336 
2337 	cv_broadcast(&zone->zone_cv);
2338 }
2339 
2340 /*
2341  * Public function to retrieve the zone status.  The zone status may
2342  * change after it is retrieved.
2343  */
2344 zone_status_t
2345 zone_status_get(zone_t *zone)
2346 {
2347 	return (zone->zone_status);
2348 }
2349 
2350 static int
2351 zone_set_bootargs(zone_t *zone, const char *zone_bootargs)
2352 {
2353 	char *buf = kmem_zalloc(BOOTARGS_MAX, KM_SLEEP);
2354 	int err = 0;
2355 
2356 	ASSERT(zone != global_zone);
2357 	if ((err = copyinstr(zone_bootargs, buf, BOOTARGS_MAX, NULL)) != 0)
2358 		goto done;	/* EFAULT or ENAMETOOLONG */
2359 
2360 	if (zone->zone_bootargs != NULL)
2361 		strfree(zone->zone_bootargs);
2362 
2363 	zone->zone_bootargs = strdup(buf);
2364 
2365 done:
2366 	kmem_free(buf, BOOTARGS_MAX);
2367 	return (err);
2368 }
2369 
2370 static int
2371 zone_set_brand(zone_t *zone, const char *brand)
2372 {
2373 	struct brand_attr *attrp;
2374 	brand_t *bp;
2375 
2376 	attrp = kmem_alloc(sizeof (struct brand_attr), KM_SLEEP);
2377 	if (copyin(brand, attrp, sizeof (struct brand_attr)) != 0) {
2378 		kmem_free(attrp, sizeof (struct brand_attr));
2379 		return (EFAULT);
2380 	}
2381 
2382 	bp = brand_register_zone(attrp);
2383 	kmem_free(attrp, sizeof (struct brand_attr));
2384 	if (bp == NULL)
2385 		return (EINVAL);
2386 
2387 	/*
2388 	 * This is the only place where a zone can change it's brand.
2389 	 * We already need to hold zone_status_lock to check the zone
2390 	 * status, so we'll just use that lock to serialize zone
2391 	 * branding requests as well.
2392 	 */
2393 	mutex_enter(&zone_status_lock);
2394 
2395 	/* Re-Branding is not allowed and the zone can't be booted yet */
2396 	if ((ZONE_IS_BRANDED(zone)) ||
2397 	    (zone_status_get(zone) >= ZONE_IS_BOOTING)) {
2398 		mutex_exit(&zone_status_lock);
2399 		brand_unregister_zone(bp);
2400 		return (EINVAL);
2401 	}
2402 
2403 	/* set up the brand specific data */
2404 	zone->zone_brand = bp;
2405 	ZBROP(zone)->b_init_brand_data(zone);
2406 
2407 	mutex_exit(&zone_status_lock);
2408 	return (0);
2409 }
2410 
2411 static int
2412 zone_set_fs_allowed(zone_t *zone, const char *zone_fs_allowed)
2413 {
2414 	char *buf = kmem_zalloc(ZONE_FS_ALLOWED_MAX, KM_SLEEP);
2415 	int err = 0;
2416 
2417 	ASSERT(zone != global_zone);
2418 	if ((err = copyinstr(zone_fs_allowed, buf,
2419 	    ZONE_FS_ALLOWED_MAX, NULL)) != 0)
2420 		goto done;
2421 
2422 	if (zone->zone_fs_allowed != NULL)
2423 		strfree(zone->zone_fs_allowed);
2424 
2425 	zone->zone_fs_allowed = strdup(buf);
2426 
2427 done:
2428 	kmem_free(buf, ZONE_FS_ALLOWED_MAX);
2429 	return (err);
2430 }
2431 
2432 static int
2433 zone_set_initname(zone_t *zone, const char *zone_initname)
2434 {
2435 	char initname[INITNAME_SZ];
2436 	size_t len;
2437 	int err = 0;
2438 
2439 	ASSERT(zone != global_zone);
2440 	if ((err = copyinstr(zone_initname, initname, INITNAME_SZ, &len)) != 0)
2441 		return (err);	/* EFAULT or ENAMETOOLONG */
2442 
2443 	if (zone->zone_initname != NULL)
2444 		strfree(zone->zone_initname);
2445 
2446 	zone->zone_initname = kmem_alloc(strlen(initname) + 1, KM_SLEEP);
2447 	(void) strcpy(zone->zone_initname, initname);
2448 	return (0);
2449 }
2450 
2451 static int
2452 zone_set_phys_mcap(zone_t *zone, const uint64_t *zone_mcap)
2453 {
2454 	uint64_t mcap;
2455 	int err = 0;
2456 
2457 	if ((err = copyin(zone_mcap, &mcap, sizeof (uint64_t))) == 0)
2458 		zone->zone_phys_mcap = mcap;
2459 
2460 	return (err);
2461 }
2462 
2463 static int
2464 zone_set_sched_class(zone_t *zone, const char *new_class)
2465 {
2466 	char sched_class[PC_CLNMSZ];
2467 	id_t classid;
2468 	int err;
2469 
2470 	ASSERT(zone != global_zone);
2471 	if ((err = copyinstr(new_class, sched_class, PC_CLNMSZ, NULL)) != 0)
2472 		return (err);	/* EFAULT or ENAMETOOLONG */
2473 
2474 	if (getcid(sched_class, &classid) != 0 || CLASS_KERNEL(classid))
2475 		return (set_errno(EINVAL));
2476 	zone->zone_defaultcid = classid;
2477 	ASSERT(zone->zone_defaultcid > 0 &&
2478 	    zone->zone_defaultcid < loaded_classes);
2479 
2480 	return (0);
2481 }
2482 
2483 /*
2484  * Block indefinitely waiting for (zone_status >= status)
2485  */
2486 void
2487 zone_status_wait(zone_t *zone, zone_status_t status)
2488 {
2489 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
2490 
2491 	mutex_enter(&zone_status_lock);
2492 	while (zone->zone_status < status) {
2493 		cv_wait(&zone->zone_cv, &zone_status_lock);
2494 	}
2495 	mutex_exit(&zone_status_lock);
2496 }
2497 
2498 /*
2499  * Private CPR-safe version of zone_status_wait().
2500  */
2501 static void
2502 zone_status_wait_cpr(zone_t *zone, zone_status_t status, char *str)
2503 {
2504 	callb_cpr_t cprinfo;
2505 
2506 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
2507 
2508 	CALLB_CPR_INIT(&cprinfo, &zone_status_lock, callb_generic_cpr,
2509 	    str);
2510 	mutex_enter(&zone_status_lock);
2511 	while (zone->zone_status < status) {
2512 		CALLB_CPR_SAFE_BEGIN(&cprinfo);
2513 		cv_wait(&zone->zone_cv, &zone_status_lock);
2514 		CALLB_CPR_SAFE_END(&cprinfo, &zone_status_lock);
2515 	}
2516 	/*
2517 	 * zone_status_lock is implicitly released by the following.
2518 	 */
2519 	CALLB_CPR_EXIT(&cprinfo);
2520 }
2521 
2522 /*
2523  * Block until zone enters requested state or signal is received.  Return (0)
2524  * if signaled, non-zero otherwise.
2525  */
2526 int
2527 zone_status_wait_sig(zone_t *zone, zone_status_t status)
2528 {
2529 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
2530 
2531 	mutex_enter(&zone_status_lock);
2532 	while (zone->zone_status < status) {
2533 		if (!cv_wait_sig(&zone->zone_cv, &zone_status_lock)) {
2534 			mutex_exit(&zone_status_lock);
2535 			return (0);
2536 		}
2537 	}
2538 	mutex_exit(&zone_status_lock);
2539 	return (1);
2540 }
2541 
2542 /*
2543  * Block until the zone enters the requested state or the timeout expires,
2544  * whichever happens first.  Return (-1) if operation timed out, time remaining
2545  * otherwise.
2546  */
2547 clock_t
2548 zone_status_timedwait(zone_t *zone, clock_t tim, zone_status_t status)
2549 {
2550 	clock_t timeleft = 0;
2551 
2552 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
2553 
2554 	mutex_enter(&zone_status_lock);
2555 	while (zone->zone_status < status && timeleft != -1) {
2556 		timeleft = cv_timedwait(&zone->zone_cv, &zone_status_lock, tim);
2557 	}
2558 	mutex_exit(&zone_status_lock);
2559 	return (timeleft);
2560 }
2561 
2562 /*
2563  * Block until the zone enters the requested state, the current process is
2564  * signaled,  or the timeout expires, whichever happens first.  Return (-1) if
2565  * operation timed out, 0 if signaled, time remaining otherwise.
2566  */
2567 clock_t
2568 zone_status_timedwait_sig(zone_t *zone, clock_t tim, zone_status_t status)
2569 {
2570 	clock_t timeleft = tim - ddi_get_lbolt();
2571 
2572 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
2573 
2574 	mutex_enter(&zone_status_lock);
2575 	while (zone->zone_status < status) {
2576 		timeleft = cv_timedwait_sig(&zone->zone_cv, &zone_status_lock,
2577 		    tim);
2578 		if (timeleft <= 0)
2579 			break;
2580 	}
2581 	mutex_exit(&zone_status_lock);
2582 	return (timeleft);
2583 }
2584 
2585 /*
2586  * Zones have two reference counts: one for references from credential
2587  * structures (zone_cred_ref), and one (zone_ref) for everything else.
2588  * This is so we can allow a zone to be rebooted while there are still
2589  * outstanding cred references, since certain drivers cache dblks (which
2590  * implicitly results in cached creds).  We wait for zone_ref to drop to
2591  * 0 (actually 1), but not zone_cred_ref.  The zone structure itself is
2592  * later freed when the zone_cred_ref drops to 0, though nothing other
2593  * than the zone id and privilege set should be accessed once the zone
2594  * is "dead".
2595  *
2596  * A debugging flag, zone_wait_for_cred, can be set to a non-zero value
2597  * to force halt/reboot to block waiting for the zone_cred_ref to drop
2598  * to 0.  This can be useful to flush out other sources of cached creds
2599  * that may be less innocuous than the driver case.
2600  *
2601  * Zones also provide a tracked reference counting mechanism in which zone
2602  * references are represented by "crumbs" (zone_ref structures).  Crumbs help
2603  * debuggers determine the sources of leaked zone references.  See
2604  * zone_hold_ref() and zone_rele_ref() below for more information.
2605  */
2606 
2607 int zone_wait_for_cred = 0;
2608 
2609 static void
2610 zone_hold_locked(zone_t *z)
2611 {
2612 	ASSERT(MUTEX_HELD(&z->zone_lock));
2613 	z->zone_ref++;
2614 	ASSERT(z->zone_ref != 0);
2615 }
2616 
2617 /*
2618  * Increment the specified zone's reference count.  The zone's zone_t structure
2619  * will not be freed as long as the zone's reference count is nonzero.
2620  * Decrement the zone's reference count via zone_rele().
2621  *
2622  * NOTE: This function should only be used to hold zones for short periods of
2623  * time.  Use zone_hold_ref() if the zone must be held for a long time.
2624  */
2625 void
2626 zone_hold(zone_t *z)
2627 {
2628 	mutex_enter(&z->zone_lock);
2629 	zone_hold_locked(z);
2630 	mutex_exit(&z->zone_lock);
2631 }
2632 
2633 /*
2634  * If the non-cred ref count drops to 1 and either the cred ref count
2635  * is 0 or we aren't waiting for cred references, the zone is ready to
2636  * be destroyed.
2637  */
2638 #define	ZONE_IS_UNREF(zone)	((zone)->zone_ref == 1 && \
2639 	    (!zone_wait_for_cred || (zone)->zone_cred_ref == 0))
2640 
2641 /*
2642  * Common zone reference release function invoked by zone_rele() and
2643  * zone_rele_ref().  If subsys is ZONE_REF_NUM_SUBSYS, then the specified
2644  * zone's subsystem-specific reference counters are not affected by the
2645  * release.  If ref is not NULL, then the zone_ref_t to which it refers is
2646  * removed from the specified zone's reference list.  ref must be non-NULL iff
2647  * subsys is not ZONE_REF_NUM_SUBSYS.
2648  */
2649 static void
2650 zone_rele_common(zone_t *z, zone_ref_t *ref, zone_ref_subsys_t subsys)
2651 {
2652 	boolean_t wakeup;
2653 
2654 	mutex_enter(&z->zone_lock);
2655 	ASSERT(z->zone_ref != 0);
2656 	z->zone_ref--;
2657 	if (subsys != ZONE_REF_NUM_SUBSYS) {
2658 		ASSERT(z->zone_subsys_ref[subsys] != 0);
2659 		z->zone_subsys_ref[subsys]--;
2660 		list_remove(&z->zone_ref_list, ref);
2661 	}
2662 	if (z->zone_ref == 0 && z->zone_cred_ref == 0) {
2663 		/* no more refs, free the structure */
2664 		mutex_exit(&z->zone_lock);
2665 		zone_free(z);
2666 		return;
2667 	}
2668 	/* signal zone_destroy so the zone can finish halting */
2669 	wakeup = (ZONE_IS_UNREF(z) && zone_status_get(z) >= ZONE_IS_DEAD);
2670 	mutex_exit(&z->zone_lock);
2671 
2672 	if (wakeup) {
2673 		/*
2674 		 * Grabbing zonehash_lock here effectively synchronizes with
2675 		 * zone_destroy() to avoid missed signals.
2676 		 */
2677 		mutex_enter(&zonehash_lock);
2678 		cv_broadcast(&zone_destroy_cv);
2679 		mutex_exit(&zonehash_lock);
2680 	}
2681 }
2682 
2683 /*
2684  * Decrement the specified zone's reference count.  The specified zone will
2685  * cease to exist after this function returns if the reference count drops to
2686  * zero.  This function should be paired with zone_hold().
2687  */
2688 void
2689 zone_rele(zone_t *z)
2690 {
2691 	zone_rele_common(z, NULL, ZONE_REF_NUM_SUBSYS);
2692 }
2693 
2694 /*
2695  * Initialize a zone reference structure.  This function must be invoked for
2696  * a reference structure before the structure is passed to zone_hold_ref().
2697  */
2698 void
2699 zone_init_ref(zone_ref_t *ref)
2700 {
2701 	ref->zref_zone = NULL;
2702 	list_link_init(&ref->zref_linkage);
2703 }
2704 
2705 /*
2706  * Acquire a reference to zone z.  The caller must specify the
2707  * zone_ref_subsys_t constant associated with its subsystem.  The specified
2708  * zone_ref_t structure will represent a reference to the specified zone.  Use
2709  * zone_rele_ref() to release the reference.
2710  *
2711  * The referenced zone_t structure will not be freed as long as the zone_t's
2712  * zone_status field is not ZONE_IS_DEAD and the zone has outstanding
2713  * references.
2714  *
2715  * NOTE: The zone_ref_t structure must be initialized before it is used.
2716  * See zone_init_ref() above.
2717  */
2718 void
2719 zone_hold_ref(zone_t *z, zone_ref_t *ref, zone_ref_subsys_t subsys)
2720 {
2721 	ASSERT(subsys >= 0 && subsys < ZONE_REF_NUM_SUBSYS);
2722 
2723 	/*
2724 	 * Prevent consumers from reusing a reference structure before
2725 	 * releasing it.
2726 	 */
2727 	VERIFY(ref->zref_zone == NULL);
2728 
2729 	ref->zref_zone = z;
2730 	mutex_enter(&z->zone_lock);
2731 	zone_hold_locked(z);
2732 	z->zone_subsys_ref[subsys]++;
2733 	ASSERT(z->zone_subsys_ref[subsys] != 0);
2734 	list_insert_head(&z->zone_ref_list, ref);
2735 	mutex_exit(&z->zone_lock);
2736 }
2737 
2738 /*
2739  * Release the zone reference represented by the specified zone_ref_t.
2740  * The reference is invalid after it's released; however, the zone_ref_t
2741  * structure can be reused without having to invoke zone_init_ref().
2742  * subsys should be the same value that was passed to zone_hold_ref()
2743  * when the reference was acquired.
2744  */
2745 void
2746 zone_rele_ref(zone_ref_t *ref, zone_ref_subsys_t subsys)
2747 {
2748 	zone_rele_common(ref->zref_zone, ref, subsys);
2749 
2750 	/*
2751 	 * Set the zone_ref_t's zref_zone field to NULL to generate panics
2752 	 * when consumers dereference the reference.  This helps us catch
2753 	 * consumers who use released references.  Furthermore, this lets
2754 	 * consumers reuse the zone_ref_t structure without having to
2755 	 * invoke zone_init_ref().
2756 	 */
2757 	ref->zref_zone = NULL;
2758 }
2759 
2760 void
2761 zone_cred_hold(zone_t *z)
2762 {
2763 	mutex_enter(&z->zone_lock);
2764 	z->zone_cred_ref++;
2765 	ASSERT(z->zone_cred_ref != 0);
2766 	mutex_exit(&z->zone_lock);
2767 }
2768 
2769 void
2770 zone_cred_rele(zone_t *z)
2771 {
2772 	boolean_t wakeup;
2773 
2774 	mutex_enter(&z->zone_lock);
2775 	ASSERT(z->zone_cred_ref != 0);
2776 	z->zone_cred_ref--;
2777 	if (z->zone_ref == 0 && z->zone_cred_ref == 0) {
2778 		/* no more refs, free the structure */
2779 		mutex_exit(&z->zone_lock);
2780 		zone_free(z);
2781 		return;
2782 	}
2783 	/*
2784 	 * If zone_destroy is waiting for the cred references to drain
2785 	 * out, and they have, signal it.
2786 	 */
2787 	wakeup = (zone_wait_for_cred && ZONE_IS_UNREF(z) &&
2788 	    zone_status_get(z) >= ZONE_IS_DEAD);
2789 	mutex_exit(&z->zone_lock);
2790 
2791 	if (wakeup) {
2792 		/*
2793 		 * Grabbing zonehash_lock here effectively synchronizes with
2794 		 * zone_destroy() to avoid missed signals.
2795 		 */
2796 		mutex_enter(&zonehash_lock);
2797 		cv_broadcast(&zone_destroy_cv);
2798 		mutex_exit(&zonehash_lock);
2799 	}
2800 }
2801 
2802 void
2803 zone_task_hold(zone_t *z)
2804 {
2805 	mutex_enter(&z->zone_lock);
2806 	z->zone_ntasks++;
2807 	ASSERT(z->zone_ntasks != 0);
2808 	mutex_exit(&z->zone_lock);
2809 }
2810 
2811 void
2812 zone_task_rele(zone_t *zone)
2813 {
2814 	uint_t refcnt;
2815 
2816 	mutex_enter(&zone->zone_lock);
2817 	ASSERT(zone->zone_ntasks != 0);
2818 	refcnt = --zone->zone_ntasks;
2819 	if (refcnt > 1)	{	/* Common case */
2820 		mutex_exit(&zone->zone_lock);
2821 		return;
2822 	}
2823 	zone_hold_locked(zone);	/* so we can use the zone_t later */
2824 	mutex_exit(&zone->zone_lock);
2825 	if (refcnt == 1) {
2826 		/*
2827 		 * See if the zone is shutting down.
2828 		 */
2829 		mutex_enter(&zone_status_lock);
2830 		if (zone_status_get(zone) != ZONE_IS_SHUTTING_DOWN) {
2831 			goto out;
2832 		}
2833 
2834 		/*
2835 		 * Make sure the ntasks didn't change since we
2836 		 * dropped zone_lock.
2837 		 */
2838 		mutex_enter(&zone->zone_lock);
2839 		if (refcnt != zone->zone_ntasks) {
2840 			mutex_exit(&zone->zone_lock);
2841 			goto out;
2842 		}
2843 		mutex_exit(&zone->zone_lock);
2844 
2845 		/*
2846 		 * No more user processes in the zone.  The zone is empty.
2847 		 */
2848 		zone_status_set(zone, ZONE_IS_EMPTY);
2849 		goto out;
2850 	}
2851 
2852 	ASSERT(refcnt == 0);
2853 	/*
2854 	 * zsched has exited; the zone is dead.
2855 	 */
2856 	zone->zone_zsched = NULL;		/* paranoia */
2857 	mutex_enter(&zone_status_lock);
2858 	zone_status_set(zone, ZONE_IS_DEAD);
2859 out:
2860 	mutex_exit(&zone_status_lock);
2861 	zone_rele(zone);
2862 }
2863 
2864 zoneid_t
2865 getzoneid(void)
2866 {
2867 	return (curproc->p_zone->zone_id);
2868 }
2869 
2870 /*
2871  * Internal versions of zone_find_by_*().  These don't zone_hold() or
2872  * check the validity of a zone's state.
2873  */
2874 static zone_t *
2875 zone_find_all_by_id(zoneid_t zoneid)
2876 {
2877 	mod_hash_val_t hv;
2878 	zone_t *zone = NULL;
2879 
2880 	ASSERT(MUTEX_HELD(&zonehash_lock));
2881 
2882 	if (mod_hash_find(zonehashbyid,
2883 	    (mod_hash_key_t)(uintptr_t)zoneid, &hv) == 0)
2884 		zone = (zone_t *)hv;
2885 	return (zone);
2886 }
2887 
2888 static zone_t *
2889 zone_find_all_by_label(const ts_label_t *label)
2890 {
2891 	mod_hash_val_t hv;
2892 	zone_t *zone = NULL;
2893 
2894 	ASSERT(MUTEX_HELD(&zonehash_lock));
2895 
2896 	/*
2897 	 * zonehashbylabel is not maintained for unlabeled systems
2898 	 */
2899 	if (!is_system_labeled())
2900 		return (NULL);
2901 	if (mod_hash_find(zonehashbylabel, (mod_hash_key_t)label, &hv) == 0)
2902 		zone = (zone_t *)hv;
2903 	return (zone);
2904 }
2905 
2906 static zone_t *
2907 zone_find_all_by_name(char *name)
2908 {
2909 	mod_hash_val_t hv;
2910 	zone_t *zone = NULL;
2911 
2912 	ASSERT(MUTEX_HELD(&zonehash_lock));
2913 
2914 	if (mod_hash_find(zonehashbyname, (mod_hash_key_t)name, &hv) == 0)
2915 		zone = (zone_t *)hv;
2916 	return (zone);
2917 }
2918 
2919 /*
2920  * Public interface for looking up a zone by zoneid.  Only returns the zone if
2921  * it is fully initialized, and has not yet begun the zone_destroy() sequence.
2922  * Caller must call zone_rele() once it is done with the zone.
2923  *
2924  * The zone may begin the zone_destroy() sequence immediately after this
2925  * function returns, but may be safely used until zone_rele() is called.
2926  */
2927 zone_t *
2928 zone_find_by_id(zoneid_t zoneid)
2929 {
2930 	zone_t *zone;
2931 	zone_status_t status;
2932 
2933 	mutex_enter(&zonehash_lock);
2934 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
2935 		mutex_exit(&zonehash_lock);
2936 		return (NULL);
2937 	}
2938 	status = zone_status_get(zone);
2939 	if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) {
2940 		/*
2941 		 * For all practical purposes the zone doesn't exist.
2942 		 */
2943 		mutex_exit(&zonehash_lock);
2944 		return (NULL);
2945 	}
2946 	zone_hold(zone);
2947 	mutex_exit(&zonehash_lock);
2948 	return (zone);
2949 }
2950 
2951 /*
2952  * Similar to zone_find_by_id, but using zone label as the key.
2953  */
2954 zone_t *
2955 zone_find_by_label(const ts_label_t *label)
2956 {
2957 	zone_t *zone;
2958 	zone_status_t status;
2959 
2960 	mutex_enter(&zonehash_lock);
2961 	if ((zone = zone_find_all_by_label(label)) == NULL) {
2962 		mutex_exit(&zonehash_lock);
2963 		return (NULL);
2964 	}
2965 
2966 	status = zone_status_get(zone);
2967 	if (status > ZONE_IS_DOWN) {
2968 		/*
2969 		 * For all practical purposes the zone doesn't exist.
2970 		 */
2971 		mutex_exit(&zonehash_lock);
2972 		return (NULL);
2973 	}
2974 	zone_hold(zone);
2975 	mutex_exit(&zonehash_lock);
2976 	return (zone);
2977 }
2978 
2979 /*
2980  * Similar to zone_find_by_id, but using zone name as the key.
2981  */
2982 zone_t *
2983 zone_find_by_name(char *name)
2984 {
2985 	zone_t *zone;
2986 	zone_status_t status;
2987 
2988 	mutex_enter(&zonehash_lock);
2989 	if ((zone = zone_find_all_by_name(name)) == NULL) {
2990 		mutex_exit(&zonehash_lock);
2991 		return (NULL);
2992 	}
2993 	status = zone_status_get(zone);
2994 	if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) {
2995 		/*
2996 		 * For all practical purposes the zone doesn't exist.
2997 		 */
2998 		mutex_exit(&zonehash_lock);
2999 		return (NULL);
3000 	}
3001 	zone_hold(zone);
3002 	mutex_exit(&zonehash_lock);
3003 	return (zone);
3004 }
3005 
3006 /*
3007  * Similar to zone_find_by_id(), using the path as a key.  For instance,
3008  * if there is a zone "foo" rooted at /foo/root, and the path argument
3009  * is "/foo/root/proc", it will return the held zone_t corresponding to
3010  * zone "foo".
3011  *
3012  * zone_find_by_path() always returns a non-NULL value, since at the
3013  * very least every path will be contained in the global zone.
3014  *
3015  * As with the other zone_find_by_*() functions, the caller is
3016  * responsible for zone_rele()ing the return value of this function.
3017  */
3018 zone_t *
3019 zone_find_by_path(const char *path)
3020 {
3021 	zone_t *zone;
3022 	zone_t *zret = NULL;
3023 	zone_status_t status;
3024 
3025 	if (path == NULL) {
3026 		/*
3027 		 * Call from rootconf().
3028 		 */
3029 		zone_hold(global_zone);
3030 		return (global_zone);
3031 	}
3032 	ASSERT(*path == '/');
3033 	mutex_enter(&zonehash_lock);
3034 	for (zone = list_head(&zone_active); zone != NULL;
3035 	    zone = list_next(&zone_active, zone)) {
3036 		if (ZONE_PATH_VISIBLE(path, zone))
3037 			zret = zone;
3038 	}
3039 	ASSERT(zret != NULL);
3040 	status = zone_status_get(zret);
3041 	if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) {
3042 		/*
3043 		 * Zone practically doesn't exist.
3044 		 */
3045 		zret = global_zone;
3046 	}
3047 	zone_hold(zret);
3048 	mutex_exit(&zonehash_lock);
3049 	return (zret);
3050 }
3051 
3052 /*
3053  * Get the number of cpus visible to this zone.  The system-wide global
3054  * 'ncpus' is returned if pools are disabled, the caller is in the
3055  * global zone, or a NULL zone argument is passed in.
3056  */
3057 int
3058 zone_ncpus_get(zone_t *zone)
3059 {
3060 	int myncpus = zone == NULL ? 0 : zone->zone_ncpus;
3061 
3062 	return (myncpus != 0 ? myncpus : ncpus);
3063 }
3064 
3065 /*
3066  * Get the number of online cpus visible to this zone.  The system-wide
3067  * global 'ncpus_online' is returned if pools are disabled, the caller
3068  * is in the global zone, or a NULL zone argument is passed in.
3069  */
3070 int
3071 zone_ncpus_online_get(zone_t *zone)
3072 {
3073 	int myncpus_online = zone == NULL ? 0 : zone->zone_ncpus_online;
3074 
3075 	return (myncpus_online != 0 ? myncpus_online : ncpus_online);
3076 }
3077 
3078 /*
3079  * Return the pool to which the zone is currently bound.
3080  */
3081 pool_t *
3082 zone_pool_get(zone_t *zone)
3083 {
3084 	ASSERT(pool_lock_held());
3085 
3086 	return (zone->zone_pool);
3087 }
3088 
3089 /*
3090  * Set the zone's pool pointer and update the zone's visibility to match
3091  * the resources in the new pool.
3092  */
3093 void
3094 zone_pool_set(zone_t *zone, pool_t *pool)
3095 {
3096 	ASSERT(pool_lock_held());
3097 	ASSERT(MUTEX_HELD(&cpu_lock));
3098 
3099 	zone->zone_pool = pool;
3100 	zone_pset_set(zone, pool->pool_pset->pset_id);
3101 }
3102 
3103 /*
3104  * Return the cached value of the id of the processor set to which the
3105  * zone is currently bound.  The value will be ZONE_PS_INVAL if the pools
3106  * facility is disabled.
3107  */
3108 psetid_t
3109 zone_pset_get(zone_t *zone)
3110 {
3111 	ASSERT(MUTEX_HELD(&cpu_lock));
3112 
3113 	return (zone->zone_psetid);
3114 }
3115 
3116 /*
3117  * Set the cached value of the id of the processor set to which the zone
3118  * is currently bound.  Also update the zone's visibility to match the
3119  * resources in the new processor set.
3120  */
3121 void
3122 zone_pset_set(zone_t *zone, psetid_t newpsetid)
3123 {
3124 	psetid_t oldpsetid;
3125 
3126 	ASSERT(MUTEX_HELD(&cpu_lock));
3127 	oldpsetid = zone_pset_get(zone);
3128 
3129 	if (oldpsetid == newpsetid)
3130 		return;
3131 	/*
3132 	 * Global zone sees all.
3133 	 */
3134 	if (zone != global_zone) {
3135 		zone->zone_psetid = newpsetid;
3136 		if (newpsetid != ZONE_PS_INVAL)
3137 			pool_pset_visibility_add(newpsetid, zone);
3138 		if (oldpsetid != ZONE_PS_INVAL)
3139 			pool_pset_visibility_remove(oldpsetid, zone);
3140 	}
3141 	/*
3142 	 * Disabling pools, so we should start using the global values
3143 	 * for ncpus and ncpus_online.
3144 	 */
3145 	if (newpsetid == ZONE_PS_INVAL) {
3146 		zone->zone_ncpus = 0;
3147 		zone->zone_ncpus_online = 0;
3148 	}
3149 }
3150 
3151 /*
3152  * Walk the list of active zones and issue the provided callback for
3153  * each of them.
3154  *
3155  * Caller must not be holding any locks that may be acquired under
3156  * zonehash_lock.  See comment at the beginning of the file for a list of
3157  * common locks and their interactions with zones.
3158  */
3159 int
3160 zone_walk(int (*cb)(zone_t *, void *), void *data)
3161 {
3162 	zone_t *zone;
3163 	int ret = 0;
3164 	zone_status_t status;
3165 
3166 	mutex_enter(&zonehash_lock);
3167 	for (zone = list_head(&zone_active); zone != NULL;
3168 	    zone = list_next(&zone_active, zone)) {
3169 		/*
3170 		 * Skip zones that shouldn't be externally visible.
3171 		 */
3172 		status = zone_status_get(zone);
3173 		if (status < ZONE_IS_READY || status > ZONE_IS_DOWN)
3174 			continue;
3175 		/*
3176 		 * Bail immediately if any callback invocation returns a
3177 		 * non-zero value.
3178 		 */
3179 		ret = (*cb)(zone, data);
3180 		if (ret != 0)
3181 			break;
3182 	}
3183 	mutex_exit(&zonehash_lock);
3184 	return (ret);
3185 }
3186 
3187 static int
3188 zone_set_root(zone_t *zone, const char *upath)
3189 {
3190 	vnode_t *vp;
3191 	int trycount;
3192 	int error = 0;
3193 	char *path;
3194 	struct pathname upn, pn;
3195 	size_t pathlen;
3196 
3197 	if ((error = pn_get((char *)upath, UIO_USERSPACE, &upn)) != 0)
3198 		return (error);
3199 
3200 	pn_alloc(&pn);
3201 
3202 	/* prevent infinite loop */
3203 	trycount = 10;
3204 	for (;;) {
3205 		if (--trycount <= 0) {
3206 			error = ESTALE;
3207 			goto out;
3208 		}
3209 
3210 		if ((error = lookuppn(&upn, &pn, FOLLOW, NULLVPP, &vp)) == 0) {
3211 			/*
3212 			 * VOP_ACCESS() may cover 'vp' with a new
3213 			 * filesystem, if 'vp' is an autoFS vnode.
3214 			 * Get the new 'vp' if so.
3215 			 */
3216 			if ((error =
3217 			    VOP_ACCESS(vp, VEXEC, 0, CRED(), NULL)) == 0 &&
3218 			    (!vn_ismntpt(vp) ||
3219 			    (error = traverse(&vp)) == 0)) {
3220 				pathlen = pn.pn_pathlen + 2;
3221 				path = kmem_alloc(pathlen, KM_SLEEP);
3222 				(void) strncpy(path, pn.pn_path,
3223 				    pn.pn_pathlen + 1);
3224 				path[pathlen - 2] = '/';
3225 				path[pathlen - 1] = '\0';
3226 				pn_free(&pn);
3227 				pn_free(&upn);
3228 
3229 				/* Success! */
3230 				break;
3231 			}
3232 			VN_RELE(vp);
3233 		}
3234 		if (error != ESTALE)
3235 			goto out;
3236 	}
3237 
3238 	ASSERT(error == 0);
3239 	zone->zone_rootvp = vp;		/* we hold a reference to vp */
3240 	zone->zone_rootpath = path;
3241 	zone->zone_rootpathlen = pathlen;
3242 	if (pathlen > 5 && strcmp(path + pathlen - 5, "/lu/") == 0)
3243 		zone->zone_flags |= ZF_IS_SCRATCH;
3244 	return (0);
3245 
3246 out:
3247 	pn_free(&pn);
3248 	pn_free(&upn);
3249 	return (error);
3250 }
3251 
3252 #define	isalnum(c)	(((c) >= '0' && (c) <= '9') || \
3253 			((c) >= 'a' && (c) <= 'z') || \
3254 			((c) >= 'A' && (c) <= 'Z'))
3255 
3256 static int
3257 zone_set_name(zone_t *zone, const char *uname)
3258 {
3259 	char *kname = kmem_zalloc(ZONENAME_MAX, KM_SLEEP);
3260 	size_t len;
3261 	int i, err;
3262 
3263 	if ((err = copyinstr(uname, kname, ZONENAME_MAX, &len)) != 0) {
3264 		kmem_free(kname, ZONENAME_MAX);
3265 		return (err);	/* EFAULT or ENAMETOOLONG */
3266 	}
3267 
3268 	/* must be less than ZONENAME_MAX */
3269 	if (len == ZONENAME_MAX && kname[ZONENAME_MAX - 1] != '\0') {
3270 		kmem_free(kname, ZONENAME_MAX);
3271 		return (EINVAL);
3272 	}
3273 
3274 	/*
3275 	 * Name must start with an alphanumeric and must contain only
3276 	 * alphanumerics, '-', '_' and '.'.
3277 	 */
3278 	if (!isalnum(kname[0])) {
3279 		kmem_free(kname, ZONENAME_MAX);
3280 		return (EINVAL);
3281 	}
3282 	for (i = 1; i < len - 1; i++) {
3283 		if (!isalnum(kname[i]) && kname[i] != '-' && kname[i] != '_' &&
3284 		    kname[i] != '.') {
3285 			kmem_free(kname, ZONENAME_MAX);
3286 			return (EINVAL);
3287 		}
3288 	}
3289 
3290 	zone->zone_name = kname;
3291 	return (0);
3292 }
3293 
3294 /*
3295  * Gets the 32-bit hostid of the specified zone as an unsigned int.  If 'zonep'
3296  * is NULL or it points to a zone with no hostid emulation, then the machine's
3297  * hostid (i.e., the global zone's hostid) is returned.  This function returns
3298  * zero if neither the zone nor the host machine (global zone) have hostids.  It
3299  * returns HW_INVALID_HOSTID if the function attempts to return the machine's
3300  * hostid and the machine's hostid is invalid.
3301  */
3302 uint32_t
3303 zone_get_hostid(zone_t *zonep)
3304 {
3305 	unsigned long machine_hostid;
3306 
3307 	if (zonep == NULL || zonep->zone_hostid == HW_INVALID_HOSTID) {
3308 		if (ddi_strtoul(hw_serial, NULL, 10, &machine_hostid) != 0)
3309 			return (HW_INVALID_HOSTID);
3310 		return ((uint32_t)machine_hostid);
3311 	}
3312 	return (zonep->zone_hostid);
3313 }
3314 
3315 /*
3316  * Similar to thread_create(), but makes sure the thread is in the appropriate
3317  * zone's zsched process (curproc->p_zone->zone_zsched) before returning.
3318  */
3319 /*ARGSUSED*/
3320 kthread_t *
3321 zthread_create(
3322     caddr_t stk,
3323     size_t stksize,
3324     void (*proc)(),
3325     void *arg,
3326     size_t len,
3327     pri_t pri)
3328 {
3329 	kthread_t *t;
3330 	zone_t *zone = curproc->p_zone;
3331 	proc_t *pp = zone->zone_zsched;
3332 
3333 	zone_hold(zone);	/* Reference to be dropped when thread exits */
3334 
3335 	/*
3336 	 * No-one should be trying to create threads if the zone is shutting
3337 	 * down and there aren't any kernel threads around.  See comment
3338 	 * in zthread_exit().
3339 	 */
3340 	ASSERT(!(zone->zone_kthreads == NULL &&
3341 	    zone_status_get(zone) >= ZONE_IS_EMPTY));
3342 	/*
3343 	 * Create a thread, but don't let it run until we've finished setting
3344 	 * things up.
3345 	 */
3346 	t = thread_create(stk, stksize, proc, arg, len, pp, TS_STOPPED, pri);
3347 	ASSERT(t->t_forw == NULL);
3348 	mutex_enter(&zone_status_lock);
3349 	if (zone->zone_kthreads == NULL) {
3350 		t->t_forw = t->t_back = t;
3351 	} else {
3352 		kthread_t *tx = zone->zone_kthreads;
3353 
3354 		t->t_forw = tx;
3355 		t->t_back = tx->t_back;
3356 		tx->t_back->t_forw = t;
3357 		tx->t_back = t;
3358 	}
3359 	zone->zone_kthreads = t;
3360 	mutex_exit(&zone_status_lock);
3361 
3362 	mutex_enter(&pp->p_lock);
3363 	t->t_proc_flag |= TP_ZTHREAD;
3364 	project_rele(t->t_proj);
3365 	t->t_proj = project_hold(pp->p_task->tk_proj);
3366 
3367 	/*
3368 	 * Setup complete, let it run.
3369 	 */
3370 	thread_lock(t);
3371 	t->t_schedflag |= TS_ALLSTART;
3372 	setrun_locked(t);
3373 	thread_unlock(t);
3374 
3375 	mutex_exit(&pp->p_lock);
3376 
3377 	return (t);
3378 }
3379 
3380 /*
3381  * Similar to thread_exit().  Must be called by threads created via
3382  * zthread_exit().
3383  */
3384 void
3385 zthread_exit(void)
3386 {
3387 	kthread_t *t = curthread;
3388 	proc_t *pp = curproc;
3389 	zone_t *zone = pp->p_zone;
3390 
3391 	mutex_enter(&zone_status_lock);
3392 
3393 	/*
3394 	 * Reparent to p0
3395 	 */
3396 	kpreempt_disable();
3397 	mutex_enter(&pp->p_lock);
3398 	t->t_proc_flag &= ~TP_ZTHREAD;
3399 	t->t_procp = &p0;
3400 	hat_thread_exit(t);
3401 	mutex_exit(&pp->p_lock);
3402 	kpreempt_enable();
3403 
3404 	if (t->t_back == t) {
3405 		ASSERT(t->t_forw == t);
3406 		/*
3407 		 * If the zone is empty, once the thread count
3408 		 * goes to zero no further kernel threads can be
3409 		 * created.  This is because if the creator is a process
3410 		 * in the zone, then it must have exited before the zone
3411 		 * state could be set to ZONE_IS_EMPTY.
3412 		 * Otherwise, if the creator is a kernel thread in the
3413 		 * zone, the thread count is non-zero.
3414 		 *
3415 		 * This really means that non-zone kernel threads should
3416 		 * not create zone kernel threads.
3417 		 */
3418 		zone->zone_kthreads = NULL;
3419 		if (zone_status_get(zone) == ZONE_IS_EMPTY) {
3420 			zone_status_set(zone, ZONE_IS_DOWN);
3421 			/*
3422 			 * Remove any CPU caps on this zone.
3423 			 */
3424 			cpucaps_zone_remove(zone);
3425 		}
3426 	} else {
3427 		t->t_forw->t_back = t->t_back;
3428 		t->t_back->t_forw = t->t_forw;
3429 		if (zone->zone_kthreads == t)
3430 			zone->zone_kthreads = t->t_forw;
3431 	}
3432 	mutex_exit(&zone_status_lock);
3433 	zone_rele(zone);
3434 	thread_exit();
3435 	/* NOTREACHED */
3436 }
3437 
3438 static void
3439 zone_chdir(vnode_t *vp, vnode_t **vpp, proc_t *pp)
3440 {
3441 	vnode_t *oldvp;
3442 
3443 	/* we're going to hold a reference here to the directory */
3444 	VN_HOLD(vp);
3445 
3446 	/* update abs cwd/root path see c2/audit.c */
3447 	if (AU_AUDITING())
3448 		audit_chdirec(vp, vpp);
3449 
3450 	mutex_enter(&pp->p_lock);
3451 	oldvp = *vpp;
3452 	*vpp = vp;
3453 	mutex_exit(&pp->p_lock);
3454 	if (oldvp != NULL)
3455 		VN_RELE(oldvp);
3456 }
3457 
3458 /*
3459  * Convert an rctl value represented by an nvlist_t into an rctl_val_t.
3460  */
3461 static int
3462 nvlist2rctlval(nvlist_t *nvl, rctl_val_t *rv)
3463 {
3464 	nvpair_t *nvp = NULL;
3465 	boolean_t priv_set = B_FALSE;
3466 	boolean_t limit_set = B_FALSE;
3467 	boolean_t action_set = B_FALSE;
3468 
3469 	while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
3470 		const char *name;
3471 		uint64_t ui64;
3472 
3473 		name = nvpair_name(nvp);
3474 		if (nvpair_type(nvp) != DATA_TYPE_UINT64)
3475 			return (EINVAL);
3476 		(void) nvpair_value_uint64(nvp, &ui64);
3477 		if (strcmp(name, "privilege") == 0) {
3478 			/*
3479 			 * Currently only privileged values are allowed, but
3480 			 * this may change in the future.
3481 			 */
3482 			if (ui64 != RCPRIV_PRIVILEGED)
3483 				return (EINVAL);
3484 			rv->rcv_privilege = ui64;
3485 			priv_set = B_TRUE;
3486 		} else if (strcmp(name, "limit") == 0) {
3487 			rv->rcv_value = ui64;
3488 			limit_set = B_TRUE;
3489 		} else if (strcmp(name, "action") == 0) {
3490 			if (ui64 != RCTL_LOCAL_NOACTION &&
3491 			    ui64 != RCTL_LOCAL_DENY)
3492 				return (EINVAL);
3493 			rv->rcv_flagaction = ui64;
3494 			action_set = B_TRUE;
3495 		} else {
3496 			return (EINVAL);
3497 		}
3498 	}
3499 
3500 	if (!(priv_set && limit_set && action_set))
3501 		return (EINVAL);
3502 	rv->rcv_action_signal = 0;
3503 	rv->rcv_action_recipient = NULL;
3504 	rv->rcv_action_recip_pid = -1;
3505 	rv->rcv_firing_time = 0;
3506 
3507 	return (0);
3508 }
3509 
3510 /*
3511  * Non-global zone version of start_init.
3512  */
3513 void
3514 zone_start_init(void)
3515 {
3516 	proc_t *p = ttoproc(curthread);
3517 	zone_t *z = p->p_zone;
3518 
3519 	ASSERT(!INGLOBALZONE(curproc));
3520 
3521 	/*
3522 	 * For all purposes (ZONE_ATTR_INITPID and restart_init),
3523 	 * storing just the pid of init is sufficient.
3524 	 */
3525 	z->zone_proc_initpid = p->p_pid;
3526 
3527 	/*
3528 	 * We maintain zone_boot_err so that we can return the cause of the
3529 	 * failure back to the caller of the zone_boot syscall.
3530 	 */
3531 	p->p_zone->zone_boot_err = start_init_common();
3532 
3533 	/*
3534 	 * We will prevent booting zones from becoming running zones if the
3535 	 * global zone is shutting down.
3536 	 */
3537 	mutex_enter(&zone_status_lock);
3538