xref: /illumos-gate/usr/src/uts/common/os/zone.c (revision 59f2ff5c96304fcfa3d97e66fbe1c521f42ac103)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Zones
31  *
32  *   A zone is a named collection of processes, namespace constraints,
33  *   and other system resources which comprise a secure and manageable
34  *   application containment facility.
35  *
36  *   Zones (represented by the reference counted zone_t) are tracked in
37  *   the kernel in the zonehash.  Elsewhere in the kernel, Zone IDs
38  *   (zoneid_t) are used to track zone association.  Zone IDs are
39  *   dynamically generated when the zone is created; if a persistent
40  *   identifier is needed (core files, accounting logs, audit trail,
41  *   etc.), the zone name should be used.
42  *
43  *
44  *   Global Zone:
45  *
46  *   The global zone (zoneid 0) is automatically associated with all
47  *   system resources that have not been bound to a user-created zone.
48  *   This means that even systems where zones are not in active use
49  *   have a global zone, and all processes, mounts, etc. are
50  *   associated with that zone.  The global zone is generally
51  *   unconstrained in terms of privileges and access, though the usual
52  *   credential and privilege based restrictions apply.
53  *
54  *
55  *   Zone States:
56  *
57  *   The states in which a zone may be in and the transitions are as
58  *   follows:
59  *
60  *   ZONE_IS_UNINITIALIZED: primordial state for a zone. The partially
61  *   initialized zone is added to the list of active zones on the system but
62  *   isn't accessible.
63  *
64  *   ZONE_IS_READY: zsched (the kernel dummy process for a zone) is
65  *   ready.  The zone is made visible after the ZSD constructor callbacks are
66  *   executed.  A zone remains in this state until it transitions into
67  *   the ZONE_IS_BOOTING state as a result of a call to zone_boot().
68  *
69  *   ZONE_IS_BOOTING: in this shortlived-state, zsched attempts to start
70  *   init.  Should that fail, the zone proceeds to the ZONE_IS_SHUTTING_DOWN
71  *   state.
72  *
73  *   ZONE_IS_RUNNING: The zone is open for business: zsched has
74  *   successfully started init.   A zone remains in this state until
75  *   zone_shutdown() is called.
76  *
77  *   ZONE_IS_SHUTTING_DOWN: zone_shutdown() has been called, the system is
78  *   killing all processes running in the zone. The zone remains
79  *   in this state until there are no more user processes running in the zone.
80  *   zone_create(), zone_enter(), and zone_destroy() on this zone will fail.
81  *   Since zone_shutdown() is restartable, it may be called successfully
82  *   multiple times for the same zone_t.  Setting of the zone's state to
83  *   ZONE_IS_SHUTTING_DOWN is synchronized with mounts, so VOP_MOUNT() may check
84  *   the zone's status without worrying about it being a moving target.
85  *
86  *   ZONE_IS_EMPTY: zone_shutdown() has been called, and there
87  *   are no more user processes in the zone.  The zone remains in this
88  *   state until there are no more kernel threads associated with the
89  *   zone.  zone_create(), zone_enter(), and zone_destroy() on this zone will
90  *   fail.
91  *
92  *   ZONE_IS_DOWN: All kernel threads doing work on behalf of the zone
93  *   have exited.  zone_shutdown() returns.  Henceforth it is not possible to
94  *   join the zone or create kernel threads therein.
95  *
96  *   ZONE_IS_DYING: zone_destroy() has been called on the zone; zone
97  *   remains in this state until zsched exits.  Calls to zone_find_by_*()
98  *   return NULL from now on.
99  *
100  *   ZONE_IS_DEAD: zsched has exited (zone_ntasks == 0).  There are no
101  *   processes or threads doing work on behalf of the zone.  The zone is
102  *   removed from the list of active zones.  zone_destroy() returns, and
103  *   the zone can be recreated.
104  *
105  *   ZONE_IS_FREE (internal state): zone_ref goes to 0, ZSD destructor
106  *   callbacks are executed, and all memory associated with the zone is
107  *   freed.
108  *
109  *   Threads can wait for the zone to enter a requested state by using
110  *   zone_status_wait() or zone_status_timedwait() with the desired
111  *   state passed in as an argument.  Zone state transitions are
112  *   uni-directional; it is not possible to move back to an earlier state.
113  *
114  *
115  *   Zone-Specific Data:
116  *
117  *   Subsystems needing to maintain zone-specific data can store that
118  *   data using the ZSD mechanism.  This provides a zone-specific data
119  *   store, similar to thread-specific data (see pthread_getspecific(3C)
120  *   or the TSD code in uts/common/disp/thread.c.  Also, ZSD can be used
121  *   to register callbacks to be invoked when a zone is created, shut
122  *   down, or destroyed.  This can be used to initialize zone-specific
123  *   data for new zones and to clean up when zones go away.
124  *
125  *
126  *   Data Structures:
127  *
128  *   The per-zone structure (zone_t) is reference counted, and freed
129  *   when all references are released.  zone_hold and zone_rele can be
130  *   used to adjust the reference count.  In addition, reference counts
131  *   associated with the cred_t structure are tracked separately using
132  *   zone_cred_hold and zone_cred_rele.
133  *
134  *   Pointers to active zone_t's are stored in two hash tables; one
135  *   for searching by id, the other for searching by name.  Lookups
136  *   can be performed on either basis, using zone_find_by_id and
137  *   zone_find_by_name.  Both return zone_t pointers with the zone
138  *   held, so zone_rele should be called when the pointer is no longer
139  *   needed.  Zones can also be searched by path; zone_find_by_path
140  *   returns the zone with which a path name is associated (global
141  *   zone if the path is not within some other zone's file system
142  *   hierarchy).  This currently requires iterating through each zone,
143  *   so it is slower than an id or name search via a hash table.
144  *
145  *
146  *   Locking:
147  *
148  *   zonehash_lock: This is a top-level global lock used to protect the
149  *       zone hash tables and lists.  Zones cannot be created or destroyed
150  *       while this lock is held.
151  *   zone_status_lock: This is a global lock protecting zone state.
152  *       Zones cannot change state while this lock is held.  It also
153  *       protects the list of kernel threads associated with a zone.
154  *   zone_lock: This is a per-zone lock used to protect several fields of
155  *       the zone_t (see <sys/zone.h> for details).  In addition, holding
156  *       this lock means that the zone cannot go away.
157  *   zone_nlwps_lock: This is a per-zone lock used to protect the fields
158  *	 related to the zone.max-lwps rctl.
159  *   zone_mem_lock: This is a per-zone lock used to protect the fields
160  *	 related to the zone.max-locked-memory and zone.max-swap rctls.
161  *   zsd_key_lock: This is a global lock protecting the key state for ZSD.
162  *   zone_deathrow_lock: This is a global lock protecting the "deathrow"
163  *       list (a list of zones in the ZONE_IS_DEAD state).
164  *
165  *   Ordering requirements:
166  *       pool_lock --> cpu_lock --> zonehash_lock --> zone_status_lock -->
167  *       	zone_lock --> zsd_key_lock --> pidlock --> p_lock
168  *
169  *   When taking zone_mem_lock or zone_nlwps_lock, the lock ordering is:
170  *	zonehash_lock --> a_lock --> pidlock --> p_lock --> zone_mem_lock
171  *	zonehash_lock --> a_lock --> pidlock --> p_lock --> zone_mem_lock
172  *
173  *   Blocking memory allocations are permitted while holding any of the
174  *   zone locks.
175  *
176  *
177  *   System Call Interface:
178  *
179  *   The zone subsystem can be managed and queried from user level with
180  *   the following system calls (all subcodes of the primary "zone"
181  *   system call):
182  *   - zone_create: creates a zone with selected attributes (name,
183  *     root path, privileges, resource controls, ZFS datasets)
184  *   - zone_enter: allows the current process to enter a zone
185  *   - zone_getattr: reports attributes of a zone
186  *   - zone_setattr: set attributes of a zone
187  *   - zone_boot: set 'init' running for the zone
188  *   - zone_list: lists all zones active in the system
189  *   - zone_lookup: looks up zone id based on name
190  *   - zone_shutdown: initiates shutdown process (see states above)
191  *   - zone_destroy: completes shutdown process (see states above)
192  *
193  */
194 
195 #include <sys/priv_impl.h>
196 #include <sys/cred.h>
197 #include <c2/audit.h>
198 #include <sys/debug.h>
199 #include <sys/file.h>
200 #include <sys/kmem.h>
201 #include <sys/kstat.h>
202 #include <sys/mutex.h>
203 #include <sys/note.h>
204 #include <sys/pathname.h>
205 #include <sys/proc.h>
206 #include <sys/project.h>
207 #include <sys/sysevent.h>
208 #include <sys/task.h>
209 #include <sys/systm.h>
210 #include <sys/types.h>
211 #include <sys/utsname.h>
212 #include <sys/vnode.h>
213 #include <sys/vfs.h>
214 #include <sys/systeminfo.h>
215 #include <sys/policy.h>
216 #include <sys/cred_impl.h>
217 #include <sys/contract_impl.h>
218 #include <sys/contract/process_impl.h>
219 #include <sys/class.h>
220 #include <sys/pool.h>
221 #include <sys/pool_pset.h>
222 #include <sys/pset.h>
223 #include <sys/sysmacros.h>
224 #include <sys/callb.h>
225 #include <sys/vmparam.h>
226 #include <sys/corectl.h>
227 #include <sys/ipc_impl.h>
228 
229 #include <sys/door.h>
230 #include <sys/cpuvar.h>
231 
232 #include <sys/uadmin.h>
233 #include <sys/session.h>
234 #include <sys/cmn_err.h>
235 #include <sys/modhash.h>
236 #include <sys/sunddi.h>
237 #include <sys/nvpair.h>
238 #include <sys/rctl.h>
239 #include <sys/fss.h>
240 #include <sys/brand.h>
241 #include <sys/zone.h>
242 #include <net/if.h>
243 #include <sys/cpucaps.h>
244 #include <vm/seg.h>
245 
246 /*
247  * cv used to signal that all references to the zone have been released.  This
248  * needs to be global since there may be multiple waiters, and the first to
249  * wake up will free the zone_t, hence we cannot use zone->zone_cv.
250  */
251 static kcondvar_t zone_destroy_cv;
252 /*
253  * Lock used to serialize access to zone_cv.  This could have been per-zone,
254  * but then we'd need another lock for zone_destroy_cv, and why bother?
255  */
256 static kmutex_t zone_status_lock;
257 
258 /*
259  * ZSD-related global variables.
260  */
261 static kmutex_t zsd_key_lock;	/* protects the following two */
262 /*
263  * The next caller of zone_key_create() will be assigned a key of ++zsd_keyval.
264  */
265 static zone_key_t zsd_keyval = 0;
266 /*
267  * Global list of registered keys.  We use this when a new zone is created.
268  */
269 static list_t zsd_registered_keys;
270 
271 int zone_hash_size = 256;
272 static mod_hash_t *zonehashbyname, *zonehashbyid, *zonehashbylabel;
273 static kmutex_t zonehash_lock;
274 static uint_t zonecount;
275 static id_space_t *zoneid_space;
276 
277 /*
278  * The global zone (aka zone0) is the all-seeing, all-knowing zone in which the
279  * kernel proper runs, and which manages all other zones.
280  *
281  * Although not declared as static, the variable "zone0" should not be used
282  * except for by code that needs to reference the global zone early on in boot,
283  * before it is fully initialized.  All other consumers should use
284  * 'global_zone'.
285  */
286 zone_t zone0;
287 zone_t *global_zone = NULL;	/* Set when the global zone is initialized */
288 
289 /*
290  * List of active zones, protected by zonehash_lock.
291  */
292 static list_t zone_active;
293 
294 /*
295  * List of destroyed zones that still have outstanding cred references.
296  * Used for debugging.  Uses a separate lock to avoid lock ordering
297  * problems in zone_free.
298  */
299 static list_t zone_deathrow;
300 static kmutex_t zone_deathrow_lock;
301 
302 /* number of zones is limited by virtual interface limit in IP */
303 uint_t maxzones = 8192;
304 
305 /* Event channel to sent zone state change notifications */
306 evchan_t *zone_event_chan;
307 
308 /*
309  * This table holds the mapping from kernel zone states to
310  * states visible in the state notification API.
311  * The idea is that we only expose "obvious" states and
312  * do not expose states which are just implementation details.
313  */
314 const char  *zone_status_table[] = {
315 	ZONE_EVENT_UNINITIALIZED,	/* uninitialized */
316 	ZONE_EVENT_READY,		/* ready */
317 	ZONE_EVENT_READY,		/* booting */
318 	ZONE_EVENT_RUNNING,		/* running */
319 	ZONE_EVENT_SHUTTING_DOWN,	/* shutting_down */
320 	ZONE_EVENT_SHUTTING_DOWN,	/* empty */
321 	ZONE_EVENT_SHUTTING_DOWN,	/* down */
322 	ZONE_EVENT_SHUTTING_DOWN,	/* dying */
323 	ZONE_EVENT_UNINITIALIZED,	/* dead */
324 };
325 
326 /*
327  * This isn't static so lint doesn't complain.
328  */
329 rctl_hndl_t rc_zone_cpu_shares;
330 rctl_hndl_t rc_zone_locked_mem;
331 rctl_hndl_t rc_zone_max_swap;
332 rctl_hndl_t rc_zone_cpu_cap;
333 rctl_hndl_t rc_zone_nlwps;
334 rctl_hndl_t rc_zone_shmmax;
335 rctl_hndl_t rc_zone_shmmni;
336 rctl_hndl_t rc_zone_semmni;
337 rctl_hndl_t rc_zone_msgmni;
338 /*
339  * Synchronization primitives used to synchronize between mounts and zone
340  * creation/destruction.
341  */
342 static int mounts_in_progress;
343 static kcondvar_t mount_cv;
344 static kmutex_t mount_lock;
345 
346 const char * const zone_default_initname = "/sbin/init";
347 static char * const zone_prefix = "/zone/";
348 static int zone_shutdown(zoneid_t zoneid);
349 static int zone_add_datalink(zoneid_t, char *);
350 static int zone_remove_datalink(zoneid_t, char *);
351 static int zone_check_datalink(zoneid_t *, char *);
352 static int zone_list_datalink(zoneid_t, int *, char *);
353 
354 /*
355  * Bump this number when you alter the zone syscall interfaces; this is
356  * because we need to have support for previous API versions in libc
357  * to support patching; libc calls into the kernel to determine this number.
358  *
359  * Version 1 of the API is the version originally shipped with Solaris 10
360  * Version 2 alters the zone_create system call in order to support more
361  *     arguments by moving the args into a structure; and to do better
362  *     error reporting when zone_create() fails.
363  * Version 3 alters the zone_create system call in order to support the
364  *     import of ZFS datasets to zones.
365  * Version 4 alters the zone_create system call in order to support
366  *     Trusted Extensions.
367  * Version 5 alters the zone_boot system call, and converts its old
368  *     bootargs parameter to be set by the zone_setattr API instead.
369  * Version 6 adds the flag argument to zone_create.
370  */
371 static const int ZONE_SYSCALL_API_VERSION = 6;
372 
373 /*
374  * Certain filesystems (such as NFS and autofs) need to know which zone
375  * the mount is being placed in.  Because of this, we need to be able to
376  * ensure that a zone isn't in the process of being created such that
377  * nfs_mount() thinks it is in the global zone, while by the time it
378  * gets added the list of mounted zones, it ends up on zoneA's mount
379  * list.
380  *
381  * The following functions: block_mounts()/resume_mounts() and
382  * mount_in_progress()/mount_completed() are used by zones and the VFS
383  * layer (respectively) to synchronize zone creation and new mounts.
384  *
385  * The semantics are like a reader-reader lock such that there may
386  * either be multiple mounts (or zone creations, if that weren't
387  * serialized by zonehash_lock) in progress at the same time, but not
388  * both.
389  *
390  * We use cv's so the user can ctrl-C out of the operation if it's
391  * taking too long.
392  *
393  * The semantics are such that there is unfair bias towards the
394  * "current" operation.  This means that zone creations may starve if
395  * there is a rapid succession of new mounts coming in to the system, or
396  * there is a remote possibility that zones will be created at such a
397  * rate that new mounts will not be able to proceed.
398  */
399 /*
400  * Prevent new mounts from progressing to the point of calling
401  * VFS_MOUNT().  If there are already mounts in this "region", wait for
402  * them to complete.
403  */
404 static int
405 block_mounts(void)
406 {
407 	int retval = 0;
408 
409 	/*
410 	 * Since it may block for a long time, block_mounts() shouldn't be
411 	 * called with zonehash_lock held.
412 	 */
413 	ASSERT(MUTEX_NOT_HELD(&zonehash_lock));
414 	mutex_enter(&mount_lock);
415 	while (mounts_in_progress > 0) {
416 		if (cv_wait_sig(&mount_cv, &mount_lock) == 0)
417 			goto signaled;
418 	}
419 	/*
420 	 * A negative value of mounts_in_progress indicates that mounts
421 	 * have been blocked by (-mounts_in_progress) different callers.
422 	 */
423 	mounts_in_progress--;
424 	retval = 1;
425 signaled:
426 	mutex_exit(&mount_lock);
427 	return (retval);
428 }
429 
430 /*
431  * The VFS layer may progress with new mounts as far as we're concerned.
432  * Allow them to progress if we were the last obstacle.
433  */
434 static void
435 resume_mounts(void)
436 {
437 	mutex_enter(&mount_lock);
438 	if (++mounts_in_progress == 0)
439 		cv_broadcast(&mount_cv);
440 	mutex_exit(&mount_lock);
441 }
442 
443 /*
444  * The VFS layer is busy with a mount; zones should wait until all
445  * mounts are completed to progress.
446  */
447 void
448 mount_in_progress(void)
449 {
450 	mutex_enter(&mount_lock);
451 	while (mounts_in_progress < 0)
452 		cv_wait(&mount_cv, &mount_lock);
453 	mounts_in_progress++;
454 	mutex_exit(&mount_lock);
455 }
456 
457 /*
458  * VFS is done with one mount; wake up any waiting block_mounts()
459  * callers if this is the last mount.
460  */
461 void
462 mount_completed(void)
463 {
464 	mutex_enter(&mount_lock);
465 	if (--mounts_in_progress == 0)
466 		cv_broadcast(&mount_cv);
467 	mutex_exit(&mount_lock);
468 }
469 
470 /*
471  * ZSD routines.
472  *
473  * Zone Specific Data (ZSD) is modeled after Thread Specific Data as
474  * defined by the pthread_key_create() and related interfaces.
475  *
476  * Kernel subsystems may register one or more data items and/or
477  * callbacks to be executed when a zone is created, shutdown, or
478  * destroyed.
479  *
480  * Unlike the thread counterpart, destructor callbacks will be executed
481  * even if the data pointer is NULL and/or there are no constructor
482  * callbacks, so it is the responsibility of such callbacks to check for
483  * NULL data values if necessary.
484  *
485  * The locking strategy and overall picture is as follows:
486  *
487  * When someone calls zone_key_create(), a template ZSD entry is added to the
488  * global list "zsd_registered_keys", protected by zsd_key_lock.  The
489  * constructor callback is called immediately on all existing zones, and a
490  * copy of the ZSD entry added to the per-zone zone_zsd list (protected by
491  * zone_lock).  As this operation requires the list of zones, the list of
492  * registered keys, and the per-zone list of ZSD entries to remain constant
493  * throughout the entire operation, it must grab zonehash_lock, zone_lock for
494  * all existing zones, and zsd_key_lock, in that order.  Similar locking is
495  * needed when zone_key_delete() is called.  It is thus sufficient to hold
496  * zsd_key_lock *or* zone_lock to prevent additions to or removals from the
497  * per-zone zone_zsd list.
498  *
499  * Note that this implementation does not make a copy of the ZSD entry if a
500  * constructor callback is not provided.  A zone_getspecific() on such an
501  * uninitialized ZSD entry will return NULL.
502  *
503  * When new zones are created constructor callbacks for all registered ZSD
504  * entries will be called.
505  *
506  * The framework does not provide any locking around zone_getspecific() and
507  * zone_setspecific() apart from that needed for internal consistency, so
508  * callers interested in atomic "test-and-set" semantics will need to provide
509  * their own locking.
510  */
511 void
512 zone_key_create(zone_key_t *keyp, void *(*create)(zoneid_t),
513     void (*shutdown)(zoneid_t, void *), void (*destroy)(zoneid_t, void *))
514 {
515 	struct zsd_entry *zsdp;
516 	struct zsd_entry *t;
517 	struct zone *zone;
518 
519 	zsdp = kmem_alloc(sizeof (*zsdp), KM_SLEEP);
520 	zsdp->zsd_data = NULL;
521 	zsdp->zsd_create = create;
522 	zsdp->zsd_shutdown = shutdown;
523 	zsdp->zsd_destroy = destroy;
524 
525 	mutex_enter(&zonehash_lock);	/* stop the world */
526 	for (zone = list_head(&zone_active); zone != NULL;
527 	    zone = list_next(&zone_active, zone))
528 		mutex_enter(&zone->zone_lock);	/* lock all zones */
529 
530 	mutex_enter(&zsd_key_lock);
531 	*keyp = zsdp->zsd_key = ++zsd_keyval;
532 	ASSERT(zsd_keyval != 0);
533 	list_insert_tail(&zsd_registered_keys, zsdp);
534 	mutex_exit(&zsd_key_lock);
535 
536 	if (create != NULL) {
537 		for (zone = list_head(&zone_active); zone != NULL;
538 		    zone = list_next(&zone_active, zone)) {
539 			t = kmem_alloc(sizeof (*t), KM_SLEEP);
540 			t->zsd_key = *keyp;
541 			t->zsd_data = (*create)(zone->zone_id);
542 			t->zsd_create = create;
543 			t->zsd_shutdown = shutdown;
544 			t->zsd_destroy = destroy;
545 			list_insert_tail(&zone->zone_zsd, t);
546 		}
547 	}
548 	for (zone = list_head(&zone_active); zone != NULL;
549 	    zone = list_next(&zone_active, zone))
550 		mutex_exit(&zone->zone_lock);
551 	mutex_exit(&zonehash_lock);
552 }
553 
554 /*
555  * Helper function to find the zsd_entry associated with the key in the
556  * given list.
557  */
558 static struct zsd_entry *
559 zsd_find(list_t *l, zone_key_t key)
560 {
561 	struct zsd_entry *zsd;
562 
563 	for (zsd = list_head(l); zsd != NULL; zsd = list_next(l, zsd)) {
564 		if (zsd->zsd_key == key) {
565 			/*
566 			 * Move to head of list to keep list in MRU order.
567 			 */
568 			if (zsd != list_head(l)) {
569 				list_remove(l, zsd);
570 				list_insert_head(l, zsd);
571 			}
572 			return (zsd);
573 		}
574 	}
575 	return (NULL);
576 }
577 
578 /*
579  * Function called when a module is being unloaded, or otherwise wishes
580  * to unregister its ZSD key and callbacks.
581  */
582 int
583 zone_key_delete(zone_key_t key)
584 {
585 	struct zsd_entry *zsdp = NULL;
586 	zone_t *zone;
587 
588 	mutex_enter(&zonehash_lock);	/* Zone create/delete waits for us */
589 	for (zone = list_head(&zone_active); zone != NULL;
590 	    zone = list_next(&zone_active, zone))
591 		mutex_enter(&zone->zone_lock);	/* lock all zones */
592 
593 	mutex_enter(&zsd_key_lock);
594 	zsdp = zsd_find(&zsd_registered_keys, key);
595 	if (zsdp == NULL)
596 		goto notfound;
597 	list_remove(&zsd_registered_keys, zsdp);
598 	mutex_exit(&zsd_key_lock);
599 
600 	for (zone = list_head(&zone_active); zone != NULL;
601 	    zone = list_next(&zone_active, zone)) {
602 		struct zsd_entry *del;
603 		void *data;
604 
605 		if (!(zone->zone_flags & ZF_DESTROYED)) {
606 			del = zsd_find(&zone->zone_zsd, key);
607 			if (del != NULL) {
608 				data = del->zsd_data;
609 				ASSERT(del->zsd_shutdown == zsdp->zsd_shutdown);
610 				ASSERT(del->zsd_destroy == zsdp->zsd_destroy);
611 				list_remove(&zone->zone_zsd, del);
612 				kmem_free(del, sizeof (*del));
613 			} else {
614 				data = NULL;
615 			}
616 			if (zsdp->zsd_shutdown)
617 				zsdp->zsd_shutdown(zone->zone_id, data);
618 			if (zsdp->zsd_destroy)
619 				zsdp->zsd_destroy(zone->zone_id, data);
620 		}
621 		mutex_exit(&zone->zone_lock);
622 	}
623 	mutex_exit(&zonehash_lock);
624 	kmem_free(zsdp, sizeof (*zsdp));
625 	return (0);
626 
627 notfound:
628 	mutex_exit(&zsd_key_lock);
629 	for (zone = list_head(&zone_active); zone != NULL;
630 	    zone = list_next(&zone_active, zone))
631 		mutex_exit(&zone->zone_lock);
632 	mutex_exit(&zonehash_lock);
633 	return (-1);
634 }
635 
636 /*
637  * ZSD counterpart of pthread_setspecific().
638  */
639 int
640 zone_setspecific(zone_key_t key, zone_t *zone, const void *data)
641 {
642 	struct zsd_entry *t;
643 	struct zsd_entry *zsdp = NULL;
644 
645 	mutex_enter(&zone->zone_lock);
646 	t = zsd_find(&zone->zone_zsd, key);
647 	if (t != NULL) {
648 		/*
649 		 * Replace old value with new
650 		 */
651 		t->zsd_data = (void *)data;
652 		mutex_exit(&zone->zone_lock);
653 		return (0);
654 	}
655 	/*
656 	 * If there was no previous value, go through the list of registered
657 	 * keys.
658 	 *
659 	 * We avoid grabbing zsd_key_lock until we are sure we need it; this is
660 	 * necessary for shutdown callbacks to be able to execute without fear
661 	 * of deadlock.
662 	 */
663 	mutex_enter(&zsd_key_lock);
664 	zsdp = zsd_find(&zsd_registered_keys, key);
665 	if (zsdp == NULL) { 	/* Key was not registered */
666 		mutex_exit(&zsd_key_lock);
667 		mutex_exit(&zone->zone_lock);
668 		return (-1);
669 	}
670 
671 	/*
672 	 * Add a zsd_entry to this zone, using the template we just retrieved
673 	 * to initialize the constructor and destructor(s).
674 	 */
675 	t = kmem_alloc(sizeof (*t), KM_SLEEP);
676 	t->zsd_key = key;
677 	t->zsd_data = (void *)data;
678 	t->zsd_create = zsdp->zsd_create;
679 	t->zsd_shutdown = zsdp->zsd_shutdown;
680 	t->zsd_destroy = zsdp->zsd_destroy;
681 	list_insert_tail(&zone->zone_zsd, t);
682 	mutex_exit(&zsd_key_lock);
683 	mutex_exit(&zone->zone_lock);
684 	return (0);
685 }
686 
687 /*
688  * ZSD counterpart of pthread_getspecific().
689  */
690 void *
691 zone_getspecific(zone_key_t key, zone_t *zone)
692 {
693 	struct zsd_entry *t;
694 	void *data;
695 
696 	mutex_enter(&zone->zone_lock);
697 	t = zsd_find(&zone->zone_zsd, key);
698 	data = (t == NULL ? NULL : t->zsd_data);
699 	mutex_exit(&zone->zone_lock);
700 	return (data);
701 }
702 
703 /*
704  * Function used to initialize a zone's list of ZSD callbacks and data
705  * when the zone is being created.  The callbacks are initialized from
706  * the template list (zsd_registered_keys), and the constructor
707  * callback executed (if one exists).
708  *
709  * This is called before the zone is made publicly available, hence no
710  * need to grab zone_lock.
711  *
712  * Although we grab and release zsd_key_lock, new entries cannot be
713  * added to or removed from the zsd_registered_keys list until we
714  * release zonehash_lock, so there isn't a window for a
715  * zone_key_create() to come in after we've dropped zsd_key_lock but
716  * before the zone is added to the zone list, such that the constructor
717  * callbacks aren't executed for the new zone.
718  */
719 static void
720 zone_zsd_configure(zone_t *zone)
721 {
722 	struct zsd_entry *zsdp;
723 	struct zsd_entry *t;
724 	zoneid_t zoneid = zone->zone_id;
725 
726 	ASSERT(MUTEX_HELD(&zonehash_lock));
727 	ASSERT(list_head(&zone->zone_zsd) == NULL);
728 	mutex_enter(&zsd_key_lock);
729 	for (zsdp = list_head(&zsd_registered_keys); zsdp != NULL;
730 	    zsdp = list_next(&zsd_registered_keys, zsdp)) {
731 		if (zsdp->zsd_create != NULL) {
732 			t = kmem_alloc(sizeof (*t), KM_SLEEP);
733 			t->zsd_key = zsdp->zsd_key;
734 			t->zsd_create = zsdp->zsd_create;
735 			t->zsd_data = (*t->zsd_create)(zoneid);
736 			t->zsd_shutdown = zsdp->zsd_shutdown;
737 			t->zsd_destroy = zsdp->zsd_destroy;
738 			list_insert_tail(&zone->zone_zsd, t);
739 		}
740 	}
741 	mutex_exit(&zsd_key_lock);
742 }
743 
744 enum zsd_callback_type { ZSD_CREATE, ZSD_SHUTDOWN, ZSD_DESTROY };
745 
746 /*
747  * Helper function to execute shutdown or destructor callbacks.
748  */
749 static void
750 zone_zsd_callbacks(zone_t *zone, enum zsd_callback_type ct)
751 {
752 	struct zsd_entry *zsdp;
753 	struct zsd_entry *t;
754 	zoneid_t zoneid = zone->zone_id;
755 
756 	ASSERT(ct == ZSD_SHUTDOWN || ct == ZSD_DESTROY);
757 	ASSERT(ct != ZSD_SHUTDOWN || zone_status_get(zone) >= ZONE_IS_EMPTY);
758 	ASSERT(ct != ZSD_DESTROY || zone_status_get(zone) >= ZONE_IS_DOWN);
759 
760 	mutex_enter(&zone->zone_lock);
761 	if (ct == ZSD_DESTROY) {
762 		if (zone->zone_flags & ZF_DESTROYED) {
763 			/*
764 			 * Make sure destructors are only called once.
765 			 */
766 			mutex_exit(&zone->zone_lock);
767 			return;
768 		}
769 		zone->zone_flags |= ZF_DESTROYED;
770 	}
771 	mutex_exit(&zone->zone_lock);
772 
773 	/*
774 	 * Both zsd_key_lock and zone_lock need to be held in order to add or
775 	 * remove a ZSD key, (either globally as part of
776 	 * zone_key_create()/zone_key_delete(), or on a per-zone basis, as is
777 	 * possible through zone_setspecific()), so it's sufficient to hold
778 	 * zsd_key_lock here.
779 	 *
780 	 * This is a good thing, since we don't want to recursively try to grab
781 	 * zone_lock if a callback attempts to do something like a crfree() or
782 	 * zone_rele().
783 	 */
784 	mutex_enter(&zsd_key_lock);
785 	for (zsdp = list_head(&zsd_registered_keys); zsdp != NULL;
786 	    zsdp = list_next(&zsd_registered_keys, zsdp)) {
787 		zone_key_t key = zsdp->zsd_key;
788 
789 		/* Skip if no callbacks registered */
790 		if (ct == ZSD_SHUTDOWN && zsdp->zsd_shutdown == NULL)
791 			continue;
792 		if (ct == ZSD_DESTROY && zsdp->zsd_destroy == NULL)
793 			continue;
794 		/*
795 		 * Call the callback with the zone-specific data if we can find
796 		 * any, otherwise with NULL.
797 		 */
798 		t = zsd_find(&zone->zone_zsd, key);
799 		if (t != NULL) {
800 			if (ct == ZSD_SHUTDOWN) {
801 				t->zsd_shutdown(zoneid, t->zsd_data);
802 			} else {
803 				ASSERT(ct == ZSD_DESTROY);
804 				t->zsd_destroy(zoneid, t->zsd_data);
805 			}
806 		} else {
807 			if (ct == ZSD_SHUTDOWN) {
808 				zsdp->zsd_shutdown(zoneid, NULL);
809 			} else {
810 				ASSERT(ct == ZSD_DESTROY);
811 				zsdp->zsd_destroy(zoneid, NULL);
812 			}
813 		}
814 	}
815 	mutex_exit(&zsd_key_lock);
816 }
817 
818 /*
819  * Called when the zone is going away; free ZSD-related memory, and
820  * destroy the zone_zsd list.
821  */
822 static void
823 zone_free_zsd(zone_t *zone)
824 {
825 	struct zsd_entry *t, *next;
826 
827 	/*
828 	 * Free all the zsd_entry's we had on this zone.
829 	 */
830 	for (t = list_head(&zone->zone_zsd); t != NULL; t = next) {
831 		next = list_next(&zone->zone_zsd, t);
832 		list_remove(&zone->zone_zsd, t);
833 		kmem_free(t, sizeof (*t));
834 	}
835 	list_destroy(&zone->zone_zsd);
836 }
837 
838 /*
839  * Frees memory associated with the zone dataset list.
840  */
841 static void
842 zone_free_datasets(zone_t *zone)
843 {
844 	zone_dataset_t *t, *next;
845 
846 	for (t = list_head(&zone->zone_datasets); t != NULL; t = next) {
847 		next = list_next(&zone->zone_datasets, t);
848 		list_remove(&zone->zone_datasets, t);
849 		kmem_free(t->zd_dataset, strlen(t->zd_dataset) + 1);
850 		kmem_free(t, sizeof (*t));
851 	}
852 	list_destroy(&zone->zone_datasets);
853 }
854 
855 /*
856  * zone.cpu-shares resource control support.
857  */
858 /*ARGSUSED*/
859 static rctl_qty_t
860 zone_cpu_shares_usage(rctl_t *rctl, struct proc *p)
861 {
862 	ASSERT(MUTEX_HELD(&p->p_lock));
863 	return (p->p_zone->zone_shares);
864 }
865 
866 /*ARGSUSED*/
867 static int
868 zone_cpu_shares_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
869     rctl_qty_t nv)
870 {
871 	ASSERT(MUTEX_HELD(&p->p_lock));
872 	ASSERT(e->rcep_t == RCENTITY_ZONE);
873 	if (e->rcep_p.zone == NULL)
874 		return (0);
875 
876 	e->rcep_p.zone->zone_shares = nv;
877 	return (0);
878 }
879 
880 static rctl_ops_t zone_cpu_shares_ops = {
881 	rcop_no_action,
882 	zone_cpu_shares_usage,
883 	zone_cpu_shares_set,
884 	rcop_no_test
885 };
886 
887 /*
888  * zone.cpu-cap resource control support.
889  */
890 /*ARGSUSED*/
891 static rctl_qty_t
892 zone_cpu_cap_get(rctl_t *rctl, struct proc *p)
893 {
894 	ASSERT(MUTEX_HELD(&p->p_lock));
895 	return (cpucaps_zone_get(p->p_zone));
896 }
897 
898 /*ARGSUSED*/
899 static int
900 zone_cpu_cap_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
901     rctl_qty_t nv)
902 {
903 	zone_t *zone = e->rcep_p.zone;
904 
905 	ASSERT(MUTEX_HELD(&p->p_lock));
906 	ASSERT(e->rcep_t == RCENTITY_ZONE);
907 
908 	if (zone == NULL)
909 		return (0);
910 
911 	/*
912 	 * set cap to the new value.
913 	 */
914 	return (cpucaps_zone_set(zone, nv));
915 }
916 
917 static rctl_ops_t zone_cpu_cap_ops = {
918 	rcop_no_action,
919 	zone_cpu_cap_get,
920 	zone_cpu_cap_set,
921 	rcop_no_test
922 };
923 
924 /*ARGSUSED*/
925 static rctl_qty_t
926 zone_lwps_usage(rctl_t *r, proc_t *p)
927 {
928 	rctl_qty_t nlwps;
929 	zone_t *zone = p->p_zone;
930 
931 	ASSERT(MUTEX_HELD(&p->p_lock));
932 
933 	mutex_enter(&zone->zone_nlwps_lock);
934 	nlwps = zone->zone_nlwps;
935 	mutex_exit(&zone->zone_nlwps_lock);
936 
937 	return (nlwps);
938 }
939 
940 /*ARGSUSED*/
941 static int
942 zone_lwps_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
943     rctl_qty_t incr, uint_t flags)
944 {
945 	rctl_qty_t nlwps;
946 
947 	ASSERT(MUTEX_HELD(&p->p_lock));
948 	ASSERT(e->rcep_t == RCENTITY_ZONE);
949 	if (e->rcep_p.zone == NULL)
950 		return (0);
951 	ASSERT(MUTEX_HELD(&(e->rcep_p.zone->zone_nlwps_lock)));
952 	nlwps = e->rcep_p.zone->zone_nlwps;
953 
954 	if (nlwps + incr > rcntl->rcv_value)
955 		return (1);
956 
957 	return (0);
958 }
959 
960 /*ARGSUSED*/
961 static int
962 zone_lwps_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e, rctl_qty_t nv)
963 {
964 	ASSERT(MUTEX_HELD(&p->p_lock));
965 	ASSERT(e->rcep_t == RCENTITY_ZONE);
966 	if (e->rcep_p.zone == NULL)
967 		return (0);
968 	e->rcep_p.zone->zone_nlwps_ctl = nv;
969 	return (0);
970 }
971 
972 static rctl_ops_t zone_lwps_ops = {
973 	rcop_no_action,
974 	zone_lwps_usage,
975 	zone_lwps_set,
976 	zone_lwps_test,
977 };
978 
979 /*ARGSUSED*/
980 static int
981 zone_shmmax_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
982     rctl_qty_t incr, uint_t flags)
983 {
984 	rctl_qty_t v;
985 	ASSERT(MUTEX_HELD(&p->p_lock));
986 	ASSERT(e->rcep_t == RCENTITY_ZONE);
987 	v = e->rcep_p.zone->zone_shmmax + incr;
988 	if (v > rval->rcv_value)
989 		return (1);
990 	return (0);
991 }
992 
993 static rctl_ops_t zone_shmmax_ops = {
994 	rcop_no_action,
995 	rcop_no_usage,
996 	rcop_no_set,
997 	zone_shmmax_test
998 };
999 
1000 /*ARGSUSED*/
1001 static int
1002 zone_shmmni_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
1003     rctl_qty_t incr, uint_t flags)
1004 {
1005 	rctl_qty_t v;
1006 	ASSERT(MUTEX_HELD(&p->p_lock));
1007 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1008 	v = e->rcep_p.zone->zone_ipc.ipcq_shmmni + incr;
1009 	if (v > rval->rcv_value)
1010 		return (1);
1011 	return (0);
1012 }
1013 
1014 static rctl_ops_t zone_shmmni_ops = {
1015 	rcop_no_action,
1016 	rcop_no_usage,
1017 	rcop_no_set,
1018 	zone_shmmni_test
1019 };
1020 
1021 /*ARGSUSED*/
1022 static int
1023 zone_semmni_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
1024     rctl_qty_t incr, uint_t flags)
1025 {
1026 	rctl_qty_t v;
1027 	ASSERT(MUTEX_HELD(&p->p_lock));
1028 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1029 	v = e->rcep_p.zone->zone_ipc.ipcq_semmni + incr;
1030 	if (v > rval->rcv_value)
1031 		return (1);
1032 	return (0);
1033 }
1034 
1035 static rctl_ops_t zone_semmni_ops = {
1036 	rcop_no_action,
1037 	rcop_no_usage,
1038 	rcop_no_set,
1039 	zone_semmni_test
1040 };
1041 
1042 /*ARGSUSED*/
1043 static int
1044 zone_msgmni_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
1045     rctl_qty_t incr, uint_t flags)
1046 {
1047 	rctl_qty_t v;
1048 	ASSERT(MUTEX_HELD(&p->p_lock));
1049 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1050 	v = e->rcep_p.zone->zone_ipc.ipcq_msgmni + incr;
1051 	if (v > rval->rcv_value)
1052 		return (1);
1053 	return (0);
1054 }
1055 
1056 static rctl_ops_t zone_msgmni_ops = {
1057 	rcop_no_action,
1058 	rcop_no_usage,
1059 	rcop_no_set,
1060 	zone_msgmni_test
1061 };
1062 
1063 /*ARGSUSED*/
1064 static rctl_qty_t
1065 zone_locked_mem_usage(rctl_t *rctl, struct proc *p)
1066 {
1067 	rctl_qty_t q;
1068 	ASSERT(MUTEX_HELD(&p->p_lock));
1069 	mutex_enter(&p->p_zone->zone_mem_lock);
1070 	q = p->p_zone->zone_locked_mem;
1071 	mutex_exit(&p->p_zone->zone_mem_lock);
1072 	return (q);
1073 }
1074 
1075 /*ARGSUSED*/
1076 static int
1077 zone_locked_mem_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
1078     rctl_val_t *rcntl, rctl_qty_t incr, uint_t flags)
1079 {
1080 	rctl_qty_t q;
1081 	zone_t *z;
1082 
1083 	z = e->rcep_p.zone;
1084 	ASSERT(MUTEX_HELD(&p->p_lock));
1085 	ASSERT(MUTEX_HELD(&z->zone_mem_lock));
1086 	q = z->zone_locked_mem;
1087 	if (q + incr > rcntl->rcv_value)
1088 		return (1);
1089 	return (0);
1090 }
1091 
1092 /*ARGSUSED*/
1093 static int
1094 zone_locked_mem_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1095     rctl_qty_t nv)
1096 {
1097 	ASSERT(MUTEX_HELD(&p->p_lock));
1098 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1099 	if (e->rcep_p.zone == NULL)
1100 		return (0);
1101 	e->rcep_p.zone->zone_locked_mem_ctl = nv;
1102 	return (0);
1103 }
1104 
1105 static rctl_ops_t zone_locked_mem_ops = {
1106 	rcop_no_action,
1107 	zone_locked_mem_usage,
1108 	zone_locked_mem_set,
1109 	zone_locked_mem_test
1110 };
1111 
1112 /*ARGSUSED*/
1113 static rctl_qty_t
1114 zone_max_swap_usage(rctl_t *rctl, struct proc *p)
1115 {
1116 	rctl_qty_t q;
1117 	zone_t *z = p->p_zone;
1118 
1119 	ASSERT(MUTEX_HELD(&p->p_lock));
1120 	mutex_enter(&z->zone_mem_lock);
1121 	q = z->zone_max_swap;
1122 	mutex_exit(&z->zone_mem_lock);
1123 	return (q);
1124 }
1125 
1126 /*ARGSUSED*/
1127 static int
1128 zone_max_swap_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
1129     rctl_val_t *rcntl, rctl_qty_t incr, uint_t flags)
1130 {
1131 	rctl_qty_t q;
1132 	zone_t *z;
1133 
1134 	z = e->rcep_p.zone;
1135 	ASSERT(MUTEX_HELD(&p->p_lock));
1136 	ASSERT(MUTEX_HELD(&z->zone_mem_lock));
1137 	q = z->zone_max_swap;
1138 	if (q + incr > rcntl->rcv_value)
1139 		return (1);
1140 	return (0);
1141 }
1142 
1143 /*ARGSUSED*/
1144 static int
1145 zone_max_swap_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1146     rctl_qty_t nv)
1147 {
1148 	ASSERT(MUTEX_HELD(&p->p_lock));
1149 	ASSERT(e->rcep_t == RCENTITY_ZONE);
1150 	if (e->rcep_p.zone == NULL)
1151 		return (0);
1152 	e->rcep_p.zone->zone_max_swap_ctl = nv;
1153 	return (0);
1154 }
1155 
1156 static rctl_ops_t zone_max_swap_ops = {
1157 	rcop_no_action,
1158 	zone_max_swap_usage,
1159 	zone_max_swap_set,
1160 	zone_max_swap_test
1161 };
1162 
1163 /*
1164  * Helper function to brand the zone with a unique ID.
1165  */
1166 static void
1167 zone_uniqid(zone_t *zone)
1168 {
1169 	static uint64_t uniqid = 0;
1170 
1171 	ASSERT(MUTEX_HELD(&zonehash_lock));
1172 	zone->zone_uniqid = uniqid++;
1173 }
1174 
1175 /*
1176  * Returns a held pointer to the "kcred" for the specified zone.
1177  */
1178 struct cred *
1179 zone_get_kcred(zoneid_t zoneid)
1180 {
1181 	zone_t *zone;
1182 	cred_t *cr;
1183 
1184 	if ((zone = zone_find_by_id(zoneid)) == NULL)
1185 		return (NULL);
1186 	cr = zone->zone_kcred;
1187 	crhold(cr);
1188 	zone_rele(zone);
1189 	return (cr);
1190 }
1191 
1192 static int
1193 zone_lockedmem_kstat_update(kstat_t *ksp, int rw)
1194 {
1195 	zone_t *zone = ksp->ks_private;
1196 	zone_kstat_t *zk = ksp->ks_data;
1197 
1198 	if (rw == KSTAT_WRITE)
1199 		return (EACCES);
1200 
1201 	zk->zk_usage.value.ui64 = zone->zone_locked_mem;
1202 	zk->zk_value.value.ui64 = zone->zone_locked_mem_ctl;
1203 	return (0);
1204 }
1205 
1206 static int
1207 zone_swapresv_kstat_update(kstat_t *ksp, int rw)
1208 {
1209 	zone_t *zone = ksp->ks_private;
1210 	zone_kstat_t *zk = ksp->ks_data;
1211 
1212 	if (rw == KSTAT_WRITE)
1213 		return (EACCES);
1214 
1215 	zk->zk_usage.value.ui64 = zone->zone_max_swap;
1216 	zk->zk_value.value.ui64 = zone->zone_max_swap_ctl;
1217 	return (0);
1218 }
1219 
1220 static void
1221 zone_kstat_create(zone_t *zone)
1222 {
1223 	kstat_t *ksp;
1224 	zone_kstat_t *zk;
1225 
1226 	ksp = rctl_kstat_create_zone(zone, "lockedmem", KSTAT_TYPE_NAMED,
1227 	    sizeof (zone_kstat_t) / sizeof (kstat_named_t),
1228 	    KSTAT_FLAG_VIRTUAL);
1229 
1230 	if (ksp == NULL)
1231 		return;
1232 
1233 	zk = ksp->ks_data = kmem_alloc(sizeof (zone_kstat_t), KM_SLEEP);
1234 	ksp->ks_data_size += strlen(zone->zone_name) + 1;
1235 	kstat_named_init(&zk->zk_zonename, "zonename", KSTAT_DATA_STRING);
1236 	kstat_named_setstr(&zk->zk_zonename, zone->zone_name);
1237 	kstat_named_init(&zk->zk_usage, "usage", KSTAT_DATA_UINT64);
1238 	kstat_named_init(&zk->zk_value, "value", KSTAT_DATA_UINT64);
1239 	ksp->ks_update = zone_lockedmem_kstat_update;
1240 	ksp->ks_private = zone;
1241 	kstat_install(ksp);
1242 
1243 	zone->zone_lockedmem_kstat = ksp;
1244 
1245 	ksp = rctl_kstat_create_zone(zone, "swapresv", KSTAT_TYPE_NAMED,
1246 	    sizeof (zone_kstat_t) / sizeof (kstat_named_t),
1247 	    KSTAT_FLAG_VIRTUAL);
1248 
1249 	if (ksp == NULL)
1250 		return;
1251 
1252 	zk = ksp->ks_data = kmem_alloc(sizeof (zone_kstat_t), KM_SLEEP);
1253 	ksp->ks_data_size += strlen(zone->zone_name) + 1;
1254 	kstat_named_init(&zk->zk_zonename, "zonename", KSTAT_DATA_STRING);
1255 	kstat_named_setstr(&zk->zk_zonename, zone->zone_name);
1256 	kstat_named_init(&zk->zk_usage, "usage", KSTAT_DATA_UINT64);
1257 	kstat_named_init(&zk->zk_value, "value", KSTAT_DATA_UINT64);
1258 	ksp->ks_update = zone_swapresv_kstat_update;
1259 	ksp->ks_private = zone;
1260 	kstat_install(ksp);
1261 
1262 	zone->zone_swapresv_kstat = ksp;
1263 }
1264 
1265 static void
1266 zone_kstat_delete(zone_t *zone)
1267 {
1268 	void *data;
1269 
1270 	if (zone->zone_lockedmem_kstat != NULL) {
1271 		data = zone->zone_lockedmem_kstat->ks_data;
1272 		kstat_delete(zone->zone_lockedmem_kstat);
1273 		kmem_free(data, sizeof (zone_kstat_t));
1274 	}
1275 	if (zone->zone_swapresv_kstat != NULL) {
1276 		data = zone->zone_swapresv_kstat->ks_data;
1277 		kstat_delete(zone->zone_swapresv_kstat);
1278 		kmem_free(data, sizeof (zone_kstat_t));
1279 	}
1280 }
1281 
1282 /*
1283  * Called very early on in boot to initialize the ZSD list so that
1284  * zone_key_create() can be called before zone_init().  It also initializes
1285  * portions of zone0 which may be used before zone_init() is called.  The
1286  * variable "global_zone" will be set when zone0 is fully initialized by
1287  * zone_init().
1288  */
1289 void
1290 zone_zsd_init(void)
1291 {
1292 	mutex_init(&zonehash_lock, NULL, MUTEX_DEFAULT, NULL);
1293 	mutex_init(&zsd_key_lock, NULL, MUTEX_DEFAULT, NULL);
1294 	list_create(&zsd_registered_keys, sizeof (struct zsd_entry),
1295 	    offsetof(struct zsd_entry, zsd_linkage));
1296 	list_create(&zone_active, sizeof (zone_t),
1297 	    offsetof(zone_t, zone_linkage));
1298 	list_create(&zone_deathrow, sizeof (zone_t),
1299 	    offsetof(zone_t, zone_linkage));
1300 
1301 	mutex_init(&zone0.zone_lock, NULL, MUTEX_DEFAULT, NULL);
1302 	mutex_init(&zone0.zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL);
1303 	mutex_init(&zone0.zone_mem_lock, NULL, MUTEX_DEFAULT, NULL);
1304 	zone0.zone_shares = 1;
1305 	zone0.zone_nlwps = 0;
1306 	zone0.zone_nlwps_ctl = INT_MAX;
1307 	zone0.zone_locked_mem = 0;
1308 	zone0.zone_locked_mem_ctl = UINT64_MAX;
1309 	ASSERT(zone0.zone_max_swap == 0);
1310 	zone0.zone_max_swap_ctl = UINT64_MAX;
1311 	zone0.zone_shmmax = 0;
1312 	zone0.zone_ipc.ipcq_shmmni = 0;
1313 	zone0.zone_ipc.ipcq_semmni = 0;
1314 	zone0.zone_ipc.ipcq_msgmni = 0;
1315 	zone0.zone_name = GLOBAL_ZONENAME;
1316 	zone0.zone_nodename = utsname.nodename;
1317 	zone0.zone_domain = srpc_domain;
1318 	zone0.zone_ref = 1;
1319 	zone0.zone_id = GLOBAL_ZONEID;
1320 	zone0.zone_status = ZONE_IS_RUNNING;
1321 	zone0.zone_rootpath = "/";
1322 	zone0.zone_rootpathlen = 2;
1323 	zone0.zone_psetid = ZONE_PS_INVAL;
1324 	zone0.zone_ncpus = 0;
1325 	zone0.zone_ncpus_online = 0;
1326 	zone0.zone_proc_initpid = 1;
1327 	zone0.zone_initname = initname;
1328 	zone0.zone_lockedmem_kstat = NULL;
1329 	zone0.zone_swapresv_kstat = NULL;
1330 	list_create(&zone0.zone_zsd, sizeof (struct zsd_entry),
1331 	    offsetof(struct zsd_entry, zsd_linkage));
1332 	list_insert_head(&zone_active, &zone0);
1333 
1334 	/*
1335 	 * The root filesystem is not mounted yet, so zone_rootvp cannot be set
1336 	 * to anything meaningful.  It is assigned to be 'rootdir' in
1337 	 * vfs_mountroot().
1338 	 */
1339 	zone0.zone_rootvp = NULL;
1340 	zone0.zone_vfslist = NULL;
1341 	zone0.zone_bootargs = initargs;
1342 	zone0.zone_privset = kmem_alloc(sizeof (priv_set_t), KM_SLEEP);
1343 	/*
1344 	 * The global zone has all privileges
1345 	 */
1346 	priv_fillset(zone0.zone_privset);
1347 	/*
1348 	 * Add p0 to the global zone
1349 	 */
1350 	zone0.zone_zsched = &p0;
1351 	p0.p_zone = &zone0;
1352 }
1353 
1354 /*
1355  * Compute a hash value based on the contents of the label and the DOI.  The
1356  * hash algorithm is somewhat arbitrary, but is based on the observation that
1357  * humans will likely pick labels that differ by amounts that work out to be
1358  * multiples of the number of hash chains, and thus stirring in some primes
1359  * should help.
1360  */
1361 static uint_t
1362 hash_bylabel(void *hdata, mod_hash_key_t key)
1363 {
1364 	const ts_label_t *lab = (ts_label_t *)key;
1365 	const uint32_t *up, *ue;
1366 	uint_t hash;
1367 	int i;
1368 
1369 	_NOTE(ARGUNUSED(hdata));
1370 
1371 	hash = lab->tsl_doi + (lab->tsl_doi << 1);
1372 	/* we depend on alignment of label, but not representation */
1373 	up = (const uint32_t *)&lab->tsl_label;
1374 	ue = up + sizeof (lab->tsl_label) / sizeof (*up);
1375 	i = 1;
1376 	while (up < ue) {
1377 		/* using 2^n + 1, 1 <= n <= 16 as source of many primes */
1378 		hash += *up + (*up << ((i % 16) + 1));
1379 		up++;
1380 		i++;
1381 	}
1382 	return (hash);
1383 }
1384 
1385 /*
1386  * All that mod_hash cares about here is zero (equal) versus non-zero (not
1387  * equal).  This may need to be changed if less than / greater than is ever
1388  * needed.
1389  */
1390 static int
1391 hash_labelkey_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
1392 {
1393 	ts_label_t *lab1 = (ts_label_t *)key1;
1394 	ts_label_t *lab2 = (ts_label_t *)key2;
1395 
1396 	return (label_equal(lab1, lab2) ? 0 : 1);
1397 }
1398 
1399 /*
1400  * Called by main() to initialize the zones framework.
1401  */
1402 void
1403 zone_init(void)
1404 {
1405 	rctl_dict_entry_t *rde;
1406 	rctl_val_t *dval;
1407 	rctl_set_t *set;
1408 	rctl_alloc_gp_t *gp;
1409 	rctl_entity_p_t e;
1410 	int res;
1411 
1412 	ASSERT(curproc == &p0);
1413 
1414 	/*
1415 	 * Create ID space for zone IDs.  ID 0 is reserved for the
1416 	 * global zone.
1417 	 */
1418 	zoneid_space = id_space_create("zoneid_space", 1, MAX_ZONEID);
1419 
1420 	/*
1421 	 * Initialize generic zone resource controls, if any.
1422 	 */
1423 	rc_zone_cpu_shares = rctl_register("zone.cpu-shares",
1424 	    RCENTITY_ZONE, RCTL_GLOBAL_SIGNAL_NEVER | RCTL_GLOBAL_DENY_NEVER |
1425 	    RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT | RCTL_GLOBAL_SYSLOG_NEVER,
1426 	    FSS_MAXSHARES, FSS_MAXSHARES, &zone_cpu_shares_ops);
1427 
1428 	rc_zone_cpu_cap = rctl_register("zone.cpu-cap",
1429 	    RCENTITY_ZONE, RCTL_GLOBAL_SIGNAL_NEVER | RCTL_GLOBAL_DENY_ALWAYS |
1430 	    RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT |RCTL_GLOBAL_SYSLOG_NEVER |
1431 	    RCTL_GLOBAL_INFINITE,
1432 	    MAXCAP, MAXCAP, &zone_cpu_cap_ops);
1433 
1434 	rc_zone_nlwps = rctl_register("zone.max-lwps", RCENTITY_ZONE,
1435 	    RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT,
1436 	    INT_MAX, INT_MAX, &zone_lwps_ops);
1437 	/*
1438 	 * System V IPC resource controls
1439 	 */
1440 	rc_zone_msgmni = rctl_register("zone.max-msg-ids",
1441 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
1442 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &zone_msgmni_ops);
1443 
1444 	rc_zone_semmni = rctl_register("zone.max-sem-ids",
1445 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
1446 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &zone_semmni_ops);
1447 
1448 	rc_zone_shmmni = rctl_register("zone.max-shm-ids",
1449 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
1450 	    RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &zone_shmmni_ops);
1451 
1452 	rc_zone_shmmax = rctl_register("zone.max-shm-memory",
1453 	    RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
1454 	    RCTL_GLOBAL_BYTES, UINT64_MAX, UINT64_MAX, &zone_shmmax_ops);
1455 
1456 	/*
1457 	 * Create a rctl_val with PRIVILEGED, NOACTION, value = 1.  Then attach
1458 	 * this at the head of the rctl_dict_entry for ``zone.cpu-shares''.
1459 	 */
1460 	dval = kmem_cache_alloc(rctl_val_cache, KM_SLEEP);
1461 	bzero(dval, sizeof (rctl_val_t));
1462 	dval->rcv_value = 1;
1463 	dval->rcv_privilege = RCPRIV_PRIVILEGED;
1464 	dval->rcv_flagaction = RCTL_LOCAL_NOACTION;
1465 	dval->rcv_action_recip_pid = -1;
1466 
1467 	rde = rctl_dict_lookup("zone.cpu-shares");
1468 	(void) rctl_val_list_insert(&rde->rcd_default_value, dval);
1469 
1470 	rc_zone_locked_mem = rctl_register("zone.max-locked-memory",
1471 	    RCENTITY_ZONE, RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_BYTES |
1472 	    RCTL_GLOBAL_DENY_ALWAYS, UINT64_MAX, UINT64_MAX,
1473 	    &zone_locked_mem_ops);
1474 
1475 	rc_zone_max_swap = rctl_register("zone.max-swap",
1476 	    RCENTITY_ZONE, RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_BYTES |
1477 	    RCTL_GLOBAL_DENY_ALWAYS, UINT64_MAX, UINT64_MAX,
1478 	    &zone_max_swap_ops);
1479 
1480 	/*
1481 	 * Initialize the ``global zone''.
1482 	 */
1483 	set = rctl_set_create();
1484 	gp = rctl_set_init_prealloc(RCENTITY_ZONE);
1485 	mutex_enter(&p0.p_lock);
1486 	e.rcep_p.zone = &zone0;
1487 	e.rcep_t = RCENTITY_ZONE;
1488 	zone0.zone_rctls = rctl_set_init(RCENTITY_ZONE, &p0, &e, set,
1489 	    gp);
1490 
1491 	zone0.zone_nlwps = p0.p_lwpcnt;
1492 	zone0.zone_ntasks = 1;
1493 	mutex_exit(&p0.p_lock);
1494 	zone0.zone_restart_init = B_TRUE;
1495 	zone0.zone_brand = &native_brand;
1496 	rctl_prealloc_destroy(gp);
1497 	/*
1498 	 * pool_default hasn't been initialized yet, so we let pool_init()
1499 	 * take care of making sure the global zone is in the default pool.
1500 	 */
1501 
1502 	/*
1503 	 * Initialize global zone kstats
1504 	 */
1505 	zone_kstat_create(&zone0);
1506 
1507 	/*
1508 	 * Initialize zone label.
1509 	 * mlp are initialized when tnzonecfg is loaded.
1510 	 */
1511 	zone0.zone_slabel = l_admin_low;
1512 	rw_init(&zone0.zone_mlps.mlpl_rwlock, NULL, RW_DEFAULT, NULL);
1513 	label_hold(l_admin_low);
1514 
1515 	mutex_enter(&zonehash_lock);
1516 	zone_uniqid(&zone0);
1517 	ASSERT(zone0.zone_uniqid == GLOBAL_ZONEUNIQID);
1518 
1519 	zonehashbyid = mod_hash_create_idhash("zone_by_id", zone_hash_size,
1520 	    mod_hash_null_valdtor);
1521 	zonehashbyname = mod_hash_create_strhash("zone_by_name",
1522 	    zone_hash_size, mod_hash_null_valdtor);
1523 	/*
1524 	 * maintain zonehashbylabel only for labeled systems
1525 	 */
1526 	if (is_system_labeled())
1527 		zonehashbylabel = mod_hash_create_extended("zone_by_label",
1528 		    zone_hash_size, mod_hash_null_keydtor,
1529 		    mod_hash_null_valdtor, hash_bylabel, NULL,
1530 		    hash_labelkey_cmp, KM_SLEEP);
1531 	zonecount = 1;
1532 
1533 	(void) mod_hash_insert(zonehashbyid, (mod_hash_key_t)GLOBAL_ZONEID,
1534 	    (mod_hash_val_t)&zone0);
1535 	(void) mod_hash_insert(zonehashbyname, (mod_hash_key_t)zone0.zone_name,
1536 	    (mod_hash_val_t)&zone0);
1537 	if (is_system_labeled()) {
1538 		zone0.zone_flags |= ZF_HASHED_LABEL;
1539 		(void) mod_hash_insert(zonehashbylabel,
1540 		    (mod_hash_key_t)zone0.zone_slabel, (mod_hash_val_t)&zone0);
1541 	}
1542 	mutex_exit(&zonehash_lock);
1543 
1544 	/*
1545 	 * We avoid setting zone_kcred until now, since kcred is initialized
1546 	 * sometime after zone_zsd_init() and before zone_init().
1547 	 */
1548 	zone0.zone_kcred = kcred;
1549 	/*
1550 	 * The global zone is fully initialized (except for zone_rootvp which
1551 	 * will be set when the root filesystem is mounted).
1552 	 */
1553 	global_zone = &zone0;
1554 
1555 	/*
1556 	 * Setup an event channel to send zone status change notifications on
1557 	 */
1558 	res = sysevent_evc_bind(ZONE_EVENT_CHANNEL, &zone_event_chan,
1559 	    EVCH_CREAT);
1560 
1561 	if (res)
1562 		panic("Sysevent_evc_bind failed during zone setup.\n");
1563 
1564 }
1565 
1566 static void
1567 zone_free(zone_t *zone)
1568 {
1569 	ASSERT(zone != global_zone);
1570 	ASSERT(zone->zone_ntasks == 0);
1571 	ASSERT(zone->zone_nlwps == 0);
1572 	ASSERT(zone->zone_cred_ref == 0);
1573 	ASSERT(zone->zone_kcred == NULL);
1574 	ASSERT(zone_status_get(zone) == ZONE_IS_DEAD ||
1575 	    zone_status_get(zone) == ZONE_IS_UNINITIALIZED);
1576 
1577 	/*
1578 	 * Remove any zone caps.
1579 	 */
1580 	cpucaps_zone_remove(zone);
1581 
1582 	ASSERT(zone->zone_cpucap == NULL);
1583 
1584 	/* remove from deathrow list */
1585 	if (zone_status_get(zone) == ZONE_IS_DEAD) {
1586 		ASSERT(zone->zone_ref == 0);
1587 		mutex_enter(&zone_deathrow_lock);
1588 		list_remove(&zone_deathrow, zone);
1589 		mutex_exit(&zone_deathrow_lock);
1590 	}
1591 
1592 	zone_free_zsd(zone);
1593 	zone_free_datasets(zone);
1594 
1595 	if (zone->zone_rootvp != NULL)
1596 		VN_RELE(zone->zone_rootvp);
1597 	if (zone->zone_rootpath)
1598 		kmem_free(zone->zone_rootpath, zone->zone_rootpathlen);
1599 	if (zone->zone_name != NULL)
1600 		kmem_free(zone->zone_name, ZONENAME_MAX);
1601 	if (zone->zone_slabel != NULL)
1602 		label_rele(zone->zone_slabel);
1603 	if (zone->zone_nodename != NULL)
1604 		kmem_free(zone->zone_nodename, _SYS_NMLN);
1605 	if (zone->zone_domain != NULL)
1606 		kmem_free(zone->zone_domain, _SYS_NMLN);
1607 	if (zone->zone_privset != NULL)
1608 		kmem_free(zone->zone_privset, sizeof (priv_set_t));
1609 	if (zone->zone_rctls != NULL)
1610 		rctl_set_free(zone->zone_rctls);
1611 	if (zone->zone_bootargs != NULL)
1612 		kmem_free(zone->zone_bootargs, strlen(zone->zone_bootargs) + 1);
1613 	if (zone->zone_initname != NULL)
1614 		kmem_free(zone->zone_initname, strlen(zone->zone_initname) + 1);
1615 	id_free(zoneid_space, zone->zone_id);
1616 	mutex_destroy(&zone->zone_lock);
1617 	cv_destroy(&zone->zone_cv);
1618 	rw_destroy(&zone->zone_mlps.mlpl_rwlock);
1619 	kmem_free(zone, sizeof (zone_t));
1620 }
1621 
1622 /*
1623  * See block comment at the top of this file for information about zone
1624  * status values.
1625  */
1626 /*
1627  * Convenience function for setting zone status.
1628  */
1629 static void
1630 zone_status_set(zone_t *zone, zone_status_t status)
1631 {
1632 
1633 	nvlist_t *nvl = NULL;
1634 	ASSERT(MUTEX_HELD(&zone_status_lock));
1635 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE &&
1636 	    status >= zone_status_get(zone));
1637 
1638 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) ||
1639 	    nvlist_add_string(nvl, ZONE_CB_NAME, zone->zone_name) ||
1640 	    nvlist_add_string(nvl, ZONE_CB_NEWSTATE,
1641 	    zone_status_table[status]) ||
1642 	    nvlist_add_string(nvl, ZONE_CB_OLDSTATE,
1643 	    zone_status_table[zone->zone_status]) ||
1644 	    nvlist_add_int32(nvl, ZONE_CB_ZONEID, zone->zone_id) ||
1645 	    nvlist_add_uint64(nvl, ZONE_CB_TIMESTAMP, (uint64_t)gethrtime()) ||
1646 	    sysevent_evc_publish(zone_event_chan, ZONE_EVENT_STATUS_CLASS,
1647 	    ZONE_EVENT_STATUS_SUBCLASS, "sun.com", "kernel", nvl, EVCH_SLEEP)) {
1648 #ifdef DEBUG
1649 		(void) printf(
1650 		    "Failed to allocate and send zone state change event.\n");
1651 #endif
1652 	}
1653 	nvlist_free(nvl);
1654 
1655 	zone->zone_status = status;
1656 
1657 	cv_broadcast(&zone->zone_cv);
1658 }
1659 
1660 /*
1661  * Public function to retrieve the zone status.  The zone status may
1662  * change after it is retrieved.
1663  */
1664 zone_status_t
1665 zone_status_get(zone_t *zone)
1666 {
1667 	return (zone->zone_status);
1668 }
1669 
1670 static int
1671 zone_set_bootargs(zone_t *zone, const char *zone_bootargs)
1672 {
1673 	char *bootargs = kmem_zalloc(BOOTARGS_MAX, KM_SLEEP);
1674 	int err = 0;
1675 
1676 	ASSERT(zone != global_zone);
1677 	if ((err = copyinstr(zone_bootargs, bootargs, BOOTARGS_MAX, NULL)) != 0)
1678 		goto done;	/* EFAULT or ENAMETOOLONG */
1679 
1680 	if (zone->zone_bootargs != NULL)
1681 		kmem_free(zone->zone_bootargs, strlen(zone->zone_bootargs) + 1);
1682 
1683 	zone->zone_bootargs = kmem_alloc(strlen(bootargs) + 1, KM_SLEEP);
1684 	(void) strcpy(zone->zone_bootargs, bootargs);
1685 
1686 done:
1687 	kmem_free(bootargs, BOOTARGS_MAX);
1688 	return (err);
1689 }
1690 
1691 static int
1692 zone_set_brand(zone_t *zone, const char *brand)
1693 {
1694 	struct brand_attr *attrp;
1695 	brand_t *bp;
1696 
1697 	attrp = kmem_alloc(sizeof (struct brand_attr), KM_SLEEP);
1698 	if (copyin(brand, attrp, sizeof (struct brand_attr)) != 0) {
1699 		kmem_free(attrp, sizeof (struct brand_attr));
1700 		return (EFAULT);
1701 	}
1702 
1703 	bp = brand_register_zone(attrp);
1704 	kmem_free(attrp, sizeof (struct brand_attr));
1705 	if (bp == NULL)
1706 		return (EINVAL);
1707 
1708 	/*
1709 	 * This is the only place where a zone can change it's brand.
1710 	 * We already need to hold zone_status_lock to check the zone
1711 	 * status, so we'll just use that lock to serialize zone
1712 	 * branding requests as well.
1713 	 */
1714 	mutex_enter(&zone_status_lock);
1715 
1716 	/* Re-Branding is not allowed and the zone can't be booted yet */
1717 	if ((ZONE_IS_BRANDED(zone)) ||
1718 	    (zone_status_get(zone) >= ZONE_IS_BOOTING)) {
1719 		mutex_exit(&zone_status_lock);
1720 		brand_unregister_zone(bp);
1721 		return (EINVAL);
1722 	}
1723 
1724 	if (is_system_labeled() &&
1725 	    strncmp(attrp->ba_brandname, NATIVE_BRAND_NAME, MAXNAMELEN) != 0) {
1726 		mutex_exit(&zone_status_lock);
1727 		brand_unregister_zone(bp);
1728 		return (EPERM);
1729 	}
1730 
1731 	zone->zone_brand = bp;
1732 	mutex_exit(&zone_status_lock);
1733 	return (0);
1734 }
1735 
1736 static int
1737 zone_set_initname(zone_t *zone, const char *zone_initname)
1738 {
1739 	char initname[INITNAME_SZ];
1740 	size_t len;
1741 	int err = 0;
1742 
1743 	ASSERT(zone != global_zone);
1744 	if ((err = copyinstr(zone_initname, initname, INITNAME_SZ, &len)) != 0)
1745 		return (err);	/* EFAULT or ENAMETOOLONG */
1746 
1747 	if (zone->zone_initname != NULL)
1748 		kmem_free(zone->zone_initname, strlen(zone->zone_initname) + 1);
1749 
1750 	zone->zone_initname = kmem_alloc(strlen(initname) + 1, KM_SLEEP);
1751 	(void) strcpy(zone->zone_initname, initname);
1752 	return (0);
1753 }
1754 
1755 static int
1756 zone_set_phys_mcap(zone_t *zone, const uint64_t *zone_mcap)
1757 {
1758 	uint64_t mcap;
1759 	int err = 0;
1760 
1761 	if ((err = copyin(zone_mcap, &mcap, sizeof (uint64_t))) == 0)
1762 		zone->zone_phys_mcap = mcap;
1763 
1764 	return (err);
1765 }
1766 
1767 static int
1768 zone_set_sched_class(zone_t *zone, const char *new_class)
1769 {
1770 	char sched_class[PC_CLNMSZ];
1771 	id_t classid;
1772 	int err;
1773 
1774 	ASSERT(zone != global_zone);
1775 	if ((err = copyinstr(new_class, sched_class, PC_CLNMSZ, NULL)) != 0)
1776 		return (err);	/* EFAULT or ENAMETOOLONG */
1777 
1778 	if (getcid(sched_class, &classid) != 0 || classid == syscid)
1779 		return (set_errno(EINVAL));
1780 	zone->zone_defaultcid = classid;
1781 	ASSERT(zone->zone_defaultcid > 0 &&
1782 	    zone->zone_defaultcid < loaded_classes);
1783 
1784 	return (0);
1785 }
1786 
1787 /*
1788  * Block indefinitely waiting for (zone_status >= status)
1789  */
1790 void
1791 zone_status_wait(zone_t *zone, zone_status_t status)
1792 {
1793 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
1794 
1795 	mutex_enter(&zone_status_lock);
1796 	while (zone->zone_status < status) {
1797 		cv_wait(&zone->zone_cv, &zone_status_lock);
1798 	}
1799 	mutex_exit(&zone_status_lock);
1800 }
1801 
1802 /*
1803  * Private CPR-safe version of zone_status_wait().
1804  */
1805 static void
1806 zone_status_wait_cpr(zone_t *zone, zone_status_t status, char *str)
1807 {
1808 	callb_cpr_t cprinfo;
1809 
1810 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
1811 
1812 	CALLB_CPR_INIT(&cprinfo, &zone_status_lock, callb_generic_cpr,
1813 	    str);
1814 	mutex_enter(&zone_status_lock);
1815 	while (zone->zone_status < status) {
1816 		CALLB_CPR_SAFE_BEGIN(&cprinfo);
1817 		cv_wait(&zone->zone_cv, &zone_status_lock);
1818 		CALLB_CPR_SAFE_END(&cprinfo, &zone_status_lock);
1819 	}
1820 	/*
1821 	 * zone_status_lock is implicitly released by the following.
1822 	 */
1823 	CALLB_CPR_EXIT(&cprinfo);
1824 }
1825 
1826 /*
1827  * Block until zone enters requested state or signal is received.  Return (0)
1828  * if signaled, non-zero otherwise.
1829  */
1830 int
1831 zone_status_wait_sig(zone_t *zone, zone_status_t status)
1832 {
1833 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
1834 
1835 	mutex_enter(&zone_status_lock);
1836 	while (zone->zone_status < status) {
1837 		if (!cv_wait_sig(&zone->zone_cv, &zone_status_lock)) {
1838 			mutex_exit(&zone_status_lock);
1839 			return (0);
1840 		}
1841 	}
1842 	mutex_exit(&zone_status_lock);
1843 	return (1);
1844 }
1845 
1846 /*
1847  * Block until the zone enters the requested state or the timeout expires,
1848  * whichever happens first.  Return (-1) if operation timed out, time remaining
1849  * otherwise.
1850  */
1851 clock_t
1852 zone_status_timedwait(zone_t *zone, clock_t tim, zone_status_t status)
1853 {
1854 	clock_t timeleft = 0;
1855 
1856 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
1857 
1858 	mutex_enter(&zone_status_lock);
1859 	while (zone->zone_status < status && timeleft != -1) {
1860 		timeleft = cv_timedwait(&zone->zone_cv, &zone_status_lock, tim);
1861 	}
1862 	mutex_exit(&zone_status_lock);
1863 	return (timeleft);
1864 }
1865 
1866 /*
1867  * Block until the zone enters the requested state, the current process is
1868  * signaled,  or the timeout expires, whichever happens first.  Return (-1) if
1869  * operation timed out, 0 if signaled, time remaining otherwise.
1870  */
1871 clock_t
1872 zone_status_timedwait_sig(zone_t *zone, clock_t tim, zone_status_t status)
1873 {
1874 	clock_t timeleft = tim - lbolt;
1875 
1876 	ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
1877 
1878 	mutex_enter(&zone_status_lock);
1879 	while (zone->zone_status < status) {
1880 		timeleft = cv_timedwait_sig(&zone->zone_cv, &zone_status_lock,
1881 		    tim);
1882 		if (timeleft <= 0)
1883 			break;
1884 	}
1885 	mutex_exit(&zone_status_lock);
1886 	return (timeleft);
1887 }
1888 
1889 /*
1890  * Zones have two reference counts: one for references from credential
1891  * structures (zone_cred_ref), and one (zone_ref) for everything else.
1892  * This is so we can allow a zone to be rebooted while there are still
1893  * outstanding cred references, since certain drivers cache dblks (which
1894  * implicitly results in cached creds).  We wait for zone_ref to drop to
1895  * 0 (actually 1), but not zone_cred_ref.  The zone structure itself is
1896  * later freed when the zone_cred_ref drops to 0, though nothing other
1897  * than the zone id and privilege set should be accessed once the zone
1898  * is "dead".
1899  *
1900  * A debugging flag, zone_wait_for_cred, can be set to a non-zero value
1901  * to force halt/reboot to block waiting for the zone_cred_ref to drop
1902  * to 0.  This can be useful to flush out other sources of cached creds
1903  * that may be less innocuous than the driver case.
1904  */
1905 
1906 int zone_wait_for_cred = 0;
1907 
1908 static void
1909 zone_hold_locked(zone_t *z)
1910 {
1911 	ASSERT(MUTEX_HELD(&z->zone_lock));
1912 	z->zone_ref++;
1913 	ASSERT(z->zone_ref != 0);
1914 }
1915 
1916 void
1917 zone_hold(zone_t *z)
1918 {
1919 	mutex_enter(&z->zone_lock);
1920 	zone_hold_locked(z);
1921 	mutex_exit(&z->zone_lock);
1922 }
1923 
1924 /*
1925  * If the non-cred ref count drops to 1 and either the cred ref count
1926  * is 0 or we aren't waiting for cred references, the zone is ready to
1927  * be destroyed.
1928  */
1929 #define	ZONE_IS_UNREF(zone)	((zone)->zone_ref == 1 && \
1930 	    (!zone_wait_for_cred || (zone)->zone_cred_ref == 0))
1931 
1932 void
1933 zone_rele(zone_t *z)
1934 {
1935 	boolean_t wakeup;
1936 
1937 	mutex_enter(&z->zone_lock);
1938 	ASSERT(z->zone_ref != 0);
1939 	z->zone_ref--;
1940 	if (z->zone_ref == 0 && z->zone_cred_ref == 0) {
1941 		/* no more refs, free the structure */
1942 		mutex_exit(&z->zone_lock);
1943 		zone_free(z);
1944 		return;
1945 	}
1946 	/* signal zone_destroy so the zone can finish halting */
1947 	wakeup = (ZONE_IS_UNREF(z) && zone_status_get(z) >= ZONE_IS_DEAD);
1948 	mutex_exit(&z->zone_lock);
1949 
1950 	if (wakeup) {
1951 		/*
1952 		 * Grabbing zonehash_lock here effectively synchronizes with
1953 		 * zone_destroy() to avoid missed signals.
1954 		 */
1955 		mutex_enter(&zonehash_lock);
1956 		cv_broadcast(&zone_destroy_cv);
1957 		mutex_exit(&zonehash_lock);
1958 	}
1959 }
1960 
1961 void
1962 zone_cred_hold(zone_t *z)
1963 {
1964 	mutex_enter(&z->zone_lock);
1965 	z->zone_cred_ref++;
1966 	ASSERT(z->zone_cred_ref != 0);
1967 	mutex_exit(&z->zone_lock);
1968 }
1969 
1970 void
1971 zone_cred_rele(zone_t *z)
1972 {
1973 	boolean_t wakeup;
1974 
1975 	mutex_enter(&z->zone_lock);
1976 	ASSERT(z->zone_cred_ref != 0);
1977 	z->zone_cred_ref--;
1978 	if (z->zone_ref == 0 && z->zone_cred_ref == 0) {
1979 		/* no more refs, free the structure */
1980 		mutex_exit(&z->zone_lock);
1981 		zone_free(z);
1982 		return;
1983 	}
1984 	/*
1985 	 * If zone_destroy is waiting for the cred references to drain
1986 	 * out, and they have, signal it.
1987 	 */
1988 	wakeup = (zone_wait_for_cred && ZONE_IS_UNREF(z) &&
1989 	    zone_status_get(z) >= ZONE_IS_DEAD);
1990 	mutex_exit(&z->zone_lock);
1991 
1992 	if (wakeup) {
1993 		/*
1994 		 * Grabbing zonehash_lock here effectively synchronizes with
1995 		 * zone_destroy() to avoid missed signals.
1996 		 */
1997 		mutex_enter(&zonehash_lock);
1998 		cv_broadcast(&zone_destroy_cv);
1999 		mutex_exit(&zonehash_lock);
2000 	}
2001 }
2002 
2003 void
2004 zone_task_hold(zone_t *z)
2005 {
2006 	mutex_enter(&z->zone_lock);
2007 	z->zone_ntasks++;
2008 	ASSERT(z->zone_ntasks != 0);
2009 	mutex_exit(&z->zone_lock);
2010 }
2011 
2012 void
2013 zone_task_rele(zone_t *zone)
2014 {
2015 	uint_t refcnt;
2016 
2017 	mutex_enter(&zone->zone_lock);
2018 	ASSERT(zone->zone_ntasks != 0);
2019 	refcnt = --zone->zone_ntasks;
2020 	if (refcnt > 1)	{	/* Common case */
2021 		mutex_exit(&zone->zone_lock);
2022 		return;
2023 	}
2024 	zone_hold_locked(zone);	/* so we can use the zone_t later */
2025 	mutex_exit(&zone->zone_lock);
2026 	if (refcnt == 1) {
2027 		/*
2028 		 * See if the zone is shutting down.
2029 		 */
2030 		mutex_enter(&zone_status_lock);
2031 		if (zone_status_get(zone) != ZONE_IS_SHUTTING_DOWN) {
2032 			goto out;
2033 		}
2034 
2035 		/*
2036 		 * Make sure the ntasks didn't change since we
2037 		 * dropped zone_lock.
2038 		 */
2039 		mutex_enter(&zone->zone_lock);
2040 		if (refcnt != zone->zone_ntasks) {
2041 			mutex_exit(&zone->zone_lock);
2042 			goto out;
2043 		}
2044 		mutex_exit(&zone->zone_lock);
2045 
2046 		/*
2047 		 * No more user processes in the zone.  The zone is empty.
2048 		 */
2049 		zone_status_set(zone, ZONE_IS_EMPTY);
2050 		goto out;
2051 	}
2052 
2053 	ASSERT(refcnt == 0);
2054 	/*
2055 	 * zsched has exited; the zone is dead.
2056 	 */
2057 	zone->zone_zsched = NULL;		/* paranoia */
2058 	mutex_enter(&zone_status_lock);
2059 	zone_status_set(zone, ZONE_IS_DEAD);
2060 out:
2061 	mutex_exit(&zone_status_lock);
2062 	zone_rele(zone);
2063 }
2064 
2065 zoneid_t
2066 getzoneid(void)
2067 {
2068 	return (curproc->p_zone->zone_id);
2069 }
2070 
2071 /*
2072  * Internal versions of zone_find_by_*().  These don't zone_hold() or
2073  * check the validity of a zone's state.
2074  */
2075 static zone_t *
2076 zone_find_all_by_id(zoneid_t zoneid)
2077 {
2078 	mod_hash_val_t hv;
2079 	zone_t *zone = NULL;
2080 
2081 	ASSERT(MUTEX_HELD(&zonehash_lock));
2082 
2083 	if (mod_hash_find(zonehashbyid,
2084 	    (mod_hash_key_t)(uintptr_t)zoneid, &hv) == 0)
2085 		zone = (zone_t *)hv;
2086 	return (zone);
2087 }
2088 
2089 static zone_t *
2090 zone_find_all_by_label(const ts_label_t *label)
2091 {
2092 	mod_hash_val_t hv;
2093 	zone_t *zone = NULL;
2094 
2095 	ASSERT(MUTEX_HELD(&zonehash_lock));
2096 
2097 	/*
2098 	 * zonehashbylabel is not maintained for unlabeled systems
2099 	 */
2100 	if (!is_system_labeled())
2101 		return (NULL);
2102 	if (mod_hash_find(zonehashbylabel, (mod_hash_key_t)label, &hv) == 0)
2103 		zone = (zone_t *)hv;
2104 	return (zone);
2105 }
2106 
2107 static zone_t *
2108 zone_find_all_by_name(char *name)
2109 {
2110 	mod_hash_val_t hv;
2111 	zone_t *zone = NULL;
2112 
2113 	ASSERT(MUTEX_HELD(&zonehash_lock));
2114 
2115 	if (mod_hash_find(zonehashbyname, (mod_hash_key_t)name, &hv) == 0)
2116 		zone = (zone_t *)hv;
2117 	return (zone);
2118 }
2119 
2120 /*
2121  * Public interface for looking up a zone by zoneid.  Only returns the zone if
2122  * it is fully initialized, and has not yet begun the zone_destroy() sequence.
2123  * Caller must call zone_rele() once it is done with the zone.
2124  *
2125  * The zone may begin the zone_destroy() sequence immediately after this
2126  * function returns, but may be safely used until zone_rele() is called.
2127  */
2128 zone_t *
2129 zone_find_by_id(zoneid_t zoneid)
2130 {
2131 	zone_t *zone;
2132 	zone_status_t status;
2133 
2134 	mutex_enter(&zonehash_lock);
2135 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
2136 		mutex_exit(&zonehash_lock);
2137 		return (NULL);
2138 	}
2139 	status = zone_status_get(zone);
2140 	if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) {
2141 		/*
2142 		 * For all practical purposes the zone doesn't exist.
2143 		 */
2144 		mutex_exit(&zonehash_lock);
2145 		return (NULL);
2146 	}
2147 	zone_hold(zone);
2148 	mutex_exit(&zonehash_lock);
2149 	return (zone);
2150 }
2151 
2152 /*
2153  * Similar to zone_find_by_id, but using zone label as the key.
2154  */
2155 zone_t *
2156 zone_find_by_label(const ts_label_t *label)
2157 {
2158 	zone_t *zone;
2159 	zone_status_t status;
2160 
2161 	mutex_enter(&zonehash_lock);
2162 	if ((zone = zone_find_all_by_label(label)) == NULL) {
2163 		mutex_exit(&zonehash_lock);
2164 		return (NULL);
2165 	}
2166 
2167 	status = zone_status_get(zone);
2168 	if (status > ZONE_IS_DOWN) {
2169 		/*
2170 		 * For all practical purposes the zone doesn't exist.
2171 		 */
2172 		mutex_exit(&zonehash_lock);
2173 		return (NULL);
2174 	}
2175 	zone_hold(zone);
2176 	mutex_exit(&zonehash_lock);
2177 	return (zone);
2178 }
2179 
2180 /*
2181  * Similar to zone_find_by_id, but using zone name as the key.
2182  */
2183 zone_t *
2184 zone_find_by_name(char *name)
2185 {
2186 	zone_t *zone;
2187 	zone_status_t status;
2188 
2189 	mutex_enter(&zonehash_lock);
2190 	if ((zone = zone_find_all_by_name(name)) == NULL) {
2191 		mutex_exit(&zonehash_lock);
2192 		return (NULL);
2193 	}
2194 	status = zone_status_get(zone);
2195 	if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) {
2196 		/*
2197 		 * For all practical purposes the zone doesn't exist.
2198 		 */
2199 		mutex_exit(&zonehash_lock);
2200 		return (NULL);
2201 	}
2202 	zone_hold(zone);
2203 	mutex_exit(&zonehash_lock);
2204 	return (zone);
2205 }
2206 
2207 /*
2208  * Similar to zone_find_by_id(), using the path as a key.  For instance,
2209  * if there is a zone "foo" rooted at /foo/root, and the path argument
2210  * is "/foo/root/proc", it will return the held zone_t corresponding to
2211  * zone "foo".
2212  *
2213  * zone_find_by_path() always returns a non-NULL value, since at the
2214  * very least every path will be contained in the global zone.
2215  *
2216  * As with the other zone_find_by_*() functions, the caller is
2217  * responsible for zone_rele()ing the return value of this function.
2218  */
2219 zone_t *
2220 zone_find_by_path(const char *path)
2221 {
2222 	zone_t *zone;
2223 	zone_t *zret = NULL;
2224 	zone_status_t status;
2225 
2226 	if (path == NULL) {
2227 		/*
2228 		 * Call from rootconf().
2229 		 */
2230 		zone_hold(global_zone);
2231 		return (global_zone);
2232 	}
2233 	ASSERT(*path == '/');
2234 	mutex_enter(&zonehash_lock);
2235 	for (zone = list_head(&zone_active); zone != NULL;
2236 	    zone = list_next(&zone_active, zone)) {
2237 		if (ZONE_PATH_VISIBLE(path, zone))
2238 			zret = zone;
2239 	}
2240 	ASSERT(zret != NULL);
2241 	status = zone_status_get(zret);
2242 	if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) {
2243 		/*
2244 		 * Zone practically doesn't exist.
2245 		 */
2246 		zret = global_zone;
2247 	}
2248 	zone_hold(zret);
2249 	mutex_exit(&zonehash_lock);
2250 	return (zret);
2251 }
2252 
2253 /*
2254  * Get the number of cpus visible to this zone.  The system-wide global
2255  * 'ncpus' is returned if pools are disabled, the caller is in the
2256  * global zone, or a NULL zone argument is passed in.
2257  */
2258 int
2259 zone_ncpus_get(zone_t *zone)
2260 {
2261 	int myncpus = zone == NULL ? 0 : zone->zone_ncpus;
2262 
2263 	return (myncpus != 0 ? myncpus : ncpus);
2264 }
2265 
2266 /*
2267  * Get the number of online cpus visible to this zone.  The system-wide
2268  * global 'ncpus_online' is returned if pools are disabled, the caller
2269  * is in the global zone, or a NULL zone argument is passed in.
2270  */
2271 int
2272 zone_ncpus_online_get(zone_t *zone)
2273 {
2274 	int myncpus_online = zone == NULL ? 0 : zone->zone_ncpus_online;
2275 
2276 	return (myncpus_online != 0 ? myncpus_online : ncpus_online);
2277 }
2278 
2279 /*
2280  * Return the pool to which the zone is currently bound.
2281  */
2282 pool_t *
2283 zone_pool_get(zone_t *zone)
2284 {
2285 	ASSERT(pool_lock_held());
2286 
2287 	return (zone->zone_pool);
2288 }
2289 
2290 /*
2291  * Set the zone's pool pointer and update the zone's visibility to match
2292  * the resources in the new pool.
2293  */
2294 void
2295 zone_pool_set(zone_t *zone, pool_t *pool)
2296 {
2297 	ASSERT(pool_lock_held());
2298 	ASSERT(MUTEX_HELD(&cpu_lock));
2299 
2300 	zone->zone_pool = pool;
2301 	zone_pset_set(zone, pool->pool_pset->pset_id);
2302 }
2303 
2304 /*
2305  * Return the cached value of the id of the processor set to which the
2306  * zone is currently bound.  The value will be ZONE_PS_INVAL if the pools
2307  * facility is disabled.
2308  */
2309 psetid_t
2310 zone_pset_get(zone_t *zone)
2311 {
2312 	ASSERT(MUTEX_HELD(&cpu_lock));
2313 
2314 	return (zone->zone_psetid);
2315 }
2316 
2317 /*
2318  * Set the cached value of the id of the processor set to which the zone
2319  * is currently bound.  Also update the zone's visibility to match the
2320  * resources in the new processor set.
2321  */
2322 void
2323 zone_pset_set(zone_t *zone, psetid_t newpsetid)
2324 {
2325 	psetid_t oldpsetid;
2326 
2327 	ASSERT(MUTEX_HELD(&cpu_lock));
2328 	oldpsetid = zone_pset_get(zone);
2329 
2330 	if (oldpsetid == newpsetid)
2331 		return;
2332 	/*
2333 	 * Global zone sees all.
2334 	 */
2335 	if (zone != global_zone) {
2336 		zone->zone_psetid = newpsetid;
2337 		if (newpsetid != ZONE_PS_INVAL)
2338 			pool_pset_visibility_add(newpsetid, zone);
2339 		if (oldpsetid != ZONE_PS_INVAL)
2340 			pool_pset_visibility_remove(oldpsetid, zone);
2341 	}
2342 	/*
2343 	 * Disabling pools, so we should start using the global values
2344 	 * for ncpus and ncpus_online.
2345 	 */
2346 	if (newpsetid == ZONE_PS_INVAL) {
2347 		zone->zone_ncpus = 0;
2348 		zone->zone_ncpus_online = 0;
2349 	}
2350 }
2351 
2352 /*
2353  * Walk the list of active zones and issue the provided callback for
2354  * each of them.
2355  *
2356  * Caller must not be holding any locks that may be acquired under
2357  * zonehash_lock.  See comment at the beginning of the file for a list of
2358  * common locks and their interactions with zones.
2359  */
2360 int
2361 zone_walk(int (*cb)(zone_t *, void *), void *data)
2362 {
2363 	zone_t *zone;
2364 	int ret = 0;
2365 	zone_status_t status;
2366 
2367 	mutex_enter(&zonehash_lock);
2368 	for (zone = list_head(&zone_active); zone != NULL;
2369 	    zone = list_next(&zone_active, zone)) {
2370 		/*
2371 		 * Skip zones that shouldn't be externally visible.
2372 		 */
2373 		status = zone_status_get(zone);
2374 		if (status < ZONE_IS_READY || status > ZONE_IS_DOWN)
2375 			continue;
2376 		/*
2377 		 * Bail immediately if any callback invocation returns a
2378 		 * non-zero value.
2379 		 */
2380 		ret = (*cb)(zone, data);
2381 		if (ret != 0)
2382 			break;
2383 	}
2384 	mutex_exit(&zonehash_lock);
2385 	return (ret);
2386 }
2387 
2388 static int
2389 zone_set_root(zone_t *zone, const char *upath)
2390 {
2391 	vnode_t *vp;
2392 	int trycount;
2393 	int error = 0;
2394 	char *path;
2395 	struct pathname upn, pn;
2396 	size_t pathlen;
2397 
2398 	if ((error = pn_get((char *)upath, UIO_USERSPACE, &upn)) != 0)
2399 		return (error);
2400 
2401 	pn_alloc(&pn);
2402 
2403 	/* prevent infinite loop */
2404 	trycount = 10;
2405 	for (;;) {
2406 		if (--trycount <= 0) {
2407 			error = ESTALE;
2408 			goto out;
2409 		}
2410 
2411 		if ((error = lookuppn(&upn, &pn, FOLLOW, NULLVPP, &vp)) == 0) {
2412 			/*
2413 			 * VOP_ACCESS() may cover 'vp' with a new
2414 			 * filesystem, if 'vp' is an autoFS vnode.
2415 			 * Get the new 'vp' if so.
2416 			 */
2417 			if ((error = VOP_ACCESS(vp, VEXEC, 0, CRED())) == 0 &&
2418 			    (vp->v_vfsmountedhere == NULL ||
2419 			    (error = traverse(&vp)) == 0)) {
2420 				pathlen = pn.pn_pathlen + 2;
2421 				path = kmem_alloc(pathlen, KM_SLEEP);
2422 				(void) strncpy(path, pn.pn_path,
2423 				    pn.pn_pathlen + 1);
2424 				path[pathlen - 2] = '/';
2425 				path[pathlen - 1] = '\0';
2426 				pn_free(&pn);
2427 				pn_free(&upn);
2428 
2429 				/* Success! */
2430 				break;
2431 			}
2432 			VN_RELE(vp);
2433 		}
2434 		if (error != ESTALE)
2435 			goto out;
2436 	}
2437 
2438 	ASSERT(error == 0);
2439 	zone->zone_rootvp = vp;		/* we hold a reference to vp */
2440 	zone->zone_rootpath = path;
2441 	zone->zone_rootpathlen = pathlen;
2442 	if (pathlen > 5 && strcmp(path + pathlen - 5, "/lu/") == 0)
2443 		zone->zone_flags |= ZF_IS_SCRATCH;
2444 	return (0);
2445 
2446 out:
2447 	pn_free(&pn);
2448 	pn_free(&upn);
2449 	return (error);
2450 }
2451 
2452 #define	isalnum(c)	(((c) >= '0' && (c) <= '9') || \
2453 			((c) >= 'a' && (c) <= 'z') || \
2454 			((c) >= 'A' && (c) <= 'Z'))
2455 
2456 static int
2457 zone_set_name(zone_t *zone, const char *uname)
2458 {
2459 	char *kname = kmem_zalloc(ZONENAME_MAX, KM_SLEEP);
2460 	size_t len;
2461 	int i, err;
2462 
2463 	if ((err = copyinstr(uname, kname, ZONENAME_MAX, &len)) != 0) {
2464 		kmem_free(kname, ZONENAME_MAX);
2465 		return (err);	/* EFAULT or ENAMETOOLONG */
2466 	}
2467 
2468 	/* must be less than ZONENAME_MAX */
2469 	if (len == ZONENAME_MAX && kname[ZONENAME_MAX - 1] != '\0') {
2470 		kmem_free(kname, ZONENAME_MAX);
2471 		return (EINVAL);
2472 	}
2473 
2474 	/*
2475 	 * Name must start with an alphanumeric and must contain only
2476 	 * alphanumerics, '-', '_' and '.'.
2477 	 */
2478 	if (!isalnum(kname[0])) {
2479 		kmem_free(kname, ZONENAME_MAX);
2480 		return (EINVAL);
2481 	}
2482 	for (i = 1; i < len - 1; i++) {
2483 		if (!isalnum(kname[i]) && kname[i] != '-' && kname[i] != '_' &&
2484 		    kname[i] != '.') {
2485 			kmem_free(kname, ZONENAME_MAX);
2486 			return (EINVAL);
2487 		}
2488 	}
2489 
2490 	zone->zone_name = kname;
2491 	return (0);
2492 }
2493 
2494 /*
2495  * Similar to thread_create(), but makes sure the thread is in the appropriate
2496  * zone's zsched process (curproc->p_zone->zone_zsched) before returning.
2497  */
2498 /*ARGSUSED*/
2499 kthread_t *
2500 zthread_create(
2501     caddr_t stk,
2502     size_t stksize,
2503     void (*proc)(),
2504     void *arg,
2505     size_t len,
2506     pri_t pri)
2507 {
2508 	kthread_t *t;
2509 	zone_t *zone = curproc->p_zone;
2510 	proc_t *pp = zone->zone_zsched;
2511 
2512 	zone_hold(zone);	/* Reference to be dropped when thread exits */
2513 
2514 	/*
2515 	 * No-one should be trying to create threads if the zone is shutting
2516 	 * down and there aren't any kernel threads around.  See comment
2517 	 * in zthread_exit().
2518 	 */
2519 	ASSERT(!(zone->zone_kthreads == NULL &&
2520 	    zone_status_get(zone) >= ZONE_IS_EMPTY));
2521 	/*
2522 	 * Create a thread, but don't let it run until we've finished setting
2523 	 * things up.
2524 	 */
2525 	t = thread_create(stk, stksize, proc, arg, len, pp, TS_STOPPED, pri);
2526 	ASSERT(t->t_forw == NULL);
2527 	mutex_enter(&zone_status_lock);
2528 	if (zone->zone_kthreads == NULL) {
2529 		t->t_forw = t->t_back = t;
2530 	} else {
2531 		kthread_t *tx = zone->zone_kthreads;
2532 
2533 		t->t_forw = tx;
2534 		t->t_back = tx->t_back;
2535 		tx->t_back->t_forw = t;
2536 		tx->t_back = t;
2537 	}
2538 	zone->zone_kthreads = t;
2539 	mutex_exit(&zone_status_lock);
2540 
2541 	mutex_enter(&pp->p_lock);
2542 	t->t_proc_flag |= TP_ZTHREAD;
2543 	project_rele(t->t_proj);
2544 	t->t_proj = project_hold(pp->p_task->tk_proj);
2545 
2546 	/*
2547 	 * Setup complete, let it run.
2548 	 */
2549 	thread_lock(t);
2550 	t->t_schedflag |= TS_ALLSTART;
2551 	setrun_locked(t);
2552 	thread_unlock(t);
2553 
2554 	mutex_exit(&pp->p_lock);
2555 
2556 	return (t);
2557 }
2558 
2559 /*
2560  * Similar to thread_exit().  Must be called by threads created via
2561  * zthread_exit().
2562  */
2563 void
2564 zthread_exit(void)
2565 {
2566 	kthread_t *t = curthread;
2567 	proc_t *pp = curproc;
2568 	zone_t *zone = pp->p_zone;
2569 
2570 	mutex_enter(&zone_status_lock);
2571 
2572 	/*
2573 	 * Reparent to p0
2574 	 */
2575 	kpreempt_disable();
2576 	mutex_enter(&pp->p_lock);
2577 	t->t_proc_flag &= ~TP_ZTHREAD;
2578 	t->t_procp = &p0;
2579 	hat_thread_exit(t);
2580 	mutex_exit(&pp->p_lock);
2581 	kpreempt_enable();
2582 
2583 	if (t->t_back == t) {
2584 		ASSERT(t->t_forw == t);
2585 		/*
2586 		 * If the zone is empty, once the thread count
2587 		 * goes to zero no further kernel threads can be
2588 		 * created.  This is because if the creator is a process
2589 		 * in the zone, then it must have exited before the zone
2590 		 * state could be set to ZONE_IS_EMPTY.
2591 		 * Otherwise, if the creator is a kernel thread in the
2592 		 * zone, the thread count is non-zero.
2593 		 *
2594 		 * This really means that non-zone kernel threads should
2595 		 * not create zone kernel threads.
2596 		 */
2597 		zone->zone_kthreads = NULL;
2598 		if (zone_status_get(zone) == ZONE_IS_EMPTY) {
2599 			zone_status_set(zone, ZONE_IS_DOWN);
2600 			/*
2601 			 * Remove any CPU caps on this zone.
2602 			 */
2603 			cpucaps_zone_remove(zone);
2604 		}
2605 	} else {
2606 		t->t_forw->t_back = t->t_back;
2607 		t->t_back->t_forw = t->t_forw;
2608 		if (zone->zone_kthreads == t)
2609 			zone->zone_kthreads = t->t_forw;
2610 	}
2611 	mutex_exit(&zone_status_lock);
2612 	zone_rele(zone);
2613 	thread_exit();
2614 	/* NOTREACHED */
2615 }
2616 
2617 static void
2618 zone_chdir(vnode_t *vp, vnode_t **vpp, proc_t *pp)
2619 {
2620 	vnode_t *oldvp;
2621 
2622 	/* we're going to hold a reference here to the directory */
2623 	VN_HOLD(vp);
2624 
2625 #ifdef C2_AUDIT
2626 	if (audit_active)	/* update abs cwd/root path see c2audit.c */
2627 		audit_chdirec(vp, vpp);
2628 #endif
2629 
2630 	mutex_enter(&pp->p_lock);
2631 	oldvp = *vpp;
2632 	*vpp = vp;
2633 	mutex_exit(&pp->p_lock);
2634 	if (oldvp != NULL)
2635 		VN_RELE(oldvp);
2636 }
2637 
2638 /*
2639  * Convert an rctl value represented by an nvlist_t into an rctl_val_t.
2640  */
2641 static int
2642 nvlist2rctlval(nvlist_t *nvl, rctl_val_t *rv)
2643 {
2644 	nvpair_t *nvp = NULL;
2645 	boolean_t priv_set = B_FALSE;
2646 	boolean_t limit_set = B_FALSE;
2647 	boolean_t action_set = B_FALSE;
2648 
2649 	while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
2650 		const char *name;
2651 		uint64_t ui64;
2652 
2653 		name = nvpair_name(nvp);
2654 		if (nvpair_type(nvp) != DATA_TYPE_UINT64)
2655 			return (EINVAL);
2656 		(void) nvpair_value_uint64(nvp, &ui64);
2657 		if (strcmp(name, "privilege") == 0) {
2658 			/*
2659 			 * Currently only privileged values are allowed, but
2660 			 * this may change in the future.
2661 			 */
2662 			if (ui64 != RCPRIV_PRIVILEGED)
2663 				return (EINVAL);
2664 			rv->rcv_privilege = ui64;
2665 			priv_set = B_TRUE;
2666 		} else if (strcmp(name, "limit") == 0) {
2667 			rv->rcv_value = ui64;
2668 			limit_set = B_TRUE;
2669 		} else if (strcmp(name, "action") == 0) {
2670 			if (ui64 != RCTL_LOCAL_NOACTION &&
2671 			    ui64 != RCTL_LOCAL_DENY)
2672 				return (EINVAL);
2673 			rv->rcv_flagaction = ui64;
2674 			action_set = B_TRUE;
2675 		} else {
2676 			return (EINVAL);
2677 		}
2678 	}
2679 
2680 	if (!(priv_set && limit_set && action_set))
2681 		return (EINVAL);
2682 	rv->rcv_action_signal = 0;
2683 	rv->rcv_action_recipient = NULL;
2684 	rv->rcv_action_recip_pid = -1;
2685 	rv->rcv_firing_time = 0;
2686 
2687 	return (0);
2688 }
2689 
2690 /*
2691  * Non-global zone version of start_init.
2692  */
2693 void
2694 zone_start_init(void)
2695 {
2696 	proc_t *p = ttoproc(curthread);
2697 	zone_t *z = p->p_zone;
2698 
2699 	ASSERT(!INGLOBALZONE(curproc));
2700 
2701 	/*
2702 	 * For all purposes (ZONE_ATTR_INITPID and restart_init),
2703 	 * storing just the pid of init is sufficient.
2704 	 */
2705 	z->zone_proc_initpid = p->p_pid;
2706 
2707 	/*
2708 	 * We maintain zone_boot_err so that we can return the cause of the
2709 	 * failure back to the caller of the zone_boot syscall.
2710 	 */
2711 	p->p_zone->zone_boot_err = start_init_common();
2712 
2713 	mutex_enter(&zone_status_lock);
2714 	if (z->zone_boot_err != 0) {
2715 		/*
2716 		 * Make sure we are still in the booting state-- we could have
2717 		 * raced and already be shutting down, or even further along.
2718 		 */
2719 		if (zone_status_get(z) == ZONE_IS_BOOTING) {
2720 			zone_status_set(z, ZONE_IS_SHUTTING_DOWN);
2721 		}
2722 		mutex_exit(&zone_status_lock);
2723 		/* It's gone bad, dispose of the process */
2724 		if (proc_exit(CLD_EXITED, z->zone_boot_err) != 0) {
2725 			mutex_enter(&p->p_lock);
2726 			ASSERT(p->p_flag & SEXITLWPS);
2727 			lwp_exit();
2728 		}
2729 	} else {
2730 		if (zone_status_get(z) == ZONE_IS_BOOTING)
2731 			zone_status_set(z, ZONE_IS_RUNNING);
2732 		mutex_exit(&zone_status_lock);
2733 		/* cause the process to return to userland. */
2734 		lwp_rtt();
2735 	}
2736 }
2737 
2738 struct zsched_arg {
2739 	zone_t *zone;
2740 	nvlist_t *nvlist;
2741 };
2742 
2743 /*
2744  * Per-zone "sched" workalike.  The similarity to "sched" doesn't have
2745  * anything to do with scheduling, but rather with the fact that
2746  * per-zone kernel threads are parented to zsched, just like regular
2747  * kernel threads are parented to sched (p0).
2748  *
2749  * zsched is also responsible for launching init for the zone.
2750  */
2751 static void
2752 zsched(void *arg)
2753 {
2754 	struct zsched_arg *za = arg;
2755 	proc_t *pp = curproc;
2756 	proc_t *initp = proc_init;
2757 	zone_t *zone = za->zone;
2758 	cred_t *cr, *oldcred;
2759 	rctl_set_t *set;
2760 	rctl_alloc_gp_t *gp;
2761 	contract_t *ct = NULL;
2762 	task_t *tk, *oldtk;
2763 	rctl_entity_p_t e;
2764 	kproject_t *pj;
2765 	boolean_t disable_plat_interposition = B_FALSE;
2766 
2767 	nvlist_t *nvl = za->nvlist;
2768 	nvpair_t *nvp = NULL;
2769 
2770 	bcopy("zsched", PTOU(pp)->u_psargs, sizeof ("zsched"));
2771 	bcopy("zsched", PTOU(pp)->u_comm, sizeof ("zsched"));
2772 	PTOU(pp)->u_argc = 0;
2773 	PTOU(pp)->u_argv = NULL;
2774 	PTOU(pp)->u_envp = NULL;
2775 	closeall(P_FINFO(pp));
2776 
2777 	/*
2778 	 * We are this zone's "zsched" process.  As the zone isn't generally
2779 	 * visible yet we don't need to grab any locks before initializing its
2780 	 * zone_proc pointer.
2781 	 */
2782 	zone_hold(zone);  /* this hold is released by zone_destroy() */
2783 	zone->zone_zsched = pp;
2784 	mutex_enter(&pp->p_lock);
2785 	pp->p_zone = zone;
2786 	mutex_exit(&pp->p_lock);
2787 
2788 	/*
2789 	 * Disassociate process from its 'parent'; parent ourselves to init
2790 	 * (pid 1) and change other values as needed.
2791 	 */
2792 	sess_create();
2793 
2794 	mutex_enter(&pidlock);
2795 	proc_detach(pp);
2796 	pp->p_ppid = 1;
2797 	pp->p_flag |= SZONETOP;
2798 	pp->p_ancpid = 1;
2799 	pp->p_parent = initp;
2800 	pp->p_psibling = NULL;
2801 	if (initp->p_child)
2802 		initp->p_child->p_psibling = pp;
2803 	pp->p_sibling = initp->p_child;
2804 	initp->p_child = pp;
2805 
2806 	/* Decrement what newproc() incremented. */
2807 	upcount_dec(crgetruid(CRED()), GLOBAL_ZONEID);
2808 	/*
2809 	 * Our credentials are about to become kcred-like, so we don't care
2810 	 * about the caller's ruid.
2811 	 */
2812 	upcount_inc(crgetruid(kcred), zone->zone_id);
2813 	mutex_exit(&pidlock);
2814 
2815 	/*
2816 	 * getting out of global zone, so decrement lwp counts
2817 	 */
2818 	pj = pp->p_task->tk_proj;
2819 	mutex_enter(&global_zone->zone_nlwps_lock);
2820 	pj->kpj_nlwps -= pp->p_lwpcnt;
2821 	global_zone->zone_nlwps -= pp->p_lwpcnt;
2822 	mutex_exit(&global_zone->zone_nlwps_lock);
2823 
2824 	/*
2825 	 * Decrement locked memory counts on old zone and project.
2826 	 */
2827 	mutex_enter(&global_zone->zone_mem_lock);
2828 	global_zone->zone_locked_mem -= pp->p_locked_mem;
2829 	pj->kpj_data.kpd_locked_mem -= pp->p_locked_mem;
2830 	mutex_exit(&global_zone->zone_mem_lock);
2831 
2832 	/*
2833 	 * Create and join a new task in project '0' of this zone.
2834 	 *
2835 	 * We don't need to call holdlwps() since we know we're the only lwp in
2836 	 * this process.
2837 	 *
2838 	 * task_join() returns with p_lock held.
2839 	 */
2840 	tk = task_create(0, zone);
2841 	mutex_enter(&cpu_lock);
2842 	oldtk = task_join(tk, 0);
2843 
2844 	pj = pp->p_task->tk_proj;
2845 
2846 	mutex_enter(&zone->zone_mem_lock);
2847 	zone->zone_locked_mem += pp->p_locked_mem;
2848 	pj->kpj_data.kpd_locked_mem += pp->p_locked_mem;
2849 	mutex_exit(&zone->zone_mem_lock);
2850 
2851 	/*
2852 	 * add lwp counts to zsched's zone, and increment project's task count
2853 	 * due to the task created in the above tasksys_settaskid
2854 	 */
2855 
2856 	mutex_enter(&zone->zone_nlwps_lock);
2857 	pj->kpj_nlwps += pp->p_lwpcnt;
2858 	pj->kpj_ntasks += 1;
2859 	zone->zone_nlwps += pp->p_lwpcnt;
2860 	mutex_exit(&zone->zone_nlwps_lock);
2861 
2862 	mutex_exit(&curproc->p_lock);
2863 	mutex_exit(&cpu_lock);
2864 	task_rele(oldtk);
2865 
2866 	/*
2867 	 * The process was created by a process in the global zone, hence the
2868 	 * credentials are wrong.  We might as well have kcred-ish credentials.
2869 	 */
2870 	cr = zone->zone_kcred;
2871 	crhold(cr);
2872 	mutex_enter(&pp->p_crlock);
2873 	oldcred = pp->p_cred;
2874 	pp->p_cred = cr;
2875 	mutex_exit(&pp->p_crlock);
2876 	crfree(oldcred);
2877 
2878 	/*
2879 	 * Hold credentials again (for thread)
2880 	 */
2881 	crhold(cr);
2882 
2883 	/*
2884 	 * p_lwpcnt can't change since this is a kernel process.
2885 	 */
2886 	crset(pp, cr);
2887 
2888 	/*
2889 	 * Chroot
2890 	 */
2891 	zone_chdir(zone->zone_rootvp, &PTOU(pp)->u_cdir, pp);
2892 	zone_chdir(zone->zone_rootvp, &PTOU(pp)->u_rdir, pp);
2893 
2894 	/*
2895 	 * Initialize zone's rctl set.
2896 	 */
2897 	set = rctl_set_create();
2898 	gp = rctl_set_init_prealloc(RCENTITY_ZONE);
2899 	mutex_enter(&pp->p_lock);
2900 	e.rcep_p.zone = zone;
2901 	e.rcep_t = RCENTITY_ZONE;
2902 	zone->zone_rctls = rctl_set_init(RCENTITY_ZONE, pp, &e, set, gp);
2903 	mutex_exit(&pp->p_lock);
2904 	rctl_prealloc_destroy(gp);
2905 
2906 	/*
2907 	 * Apply the rctls passed in to zone_create().  This is basically a list
2908 	 * assignment: all of the old values are removed and the new ones
2909 	 * inserted.  That is, if an empty list is passed in, all values are
2910 	 * removed.
2911 	 */
2912 	while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
2913 		rctl_dict_entry_t *rde;
2914 		rctl_hndl_t hndl;
2915 		char *name;
2916 		nvlist_t **nvlarray;
2917 		uint_t i, nelem;
2918 		int error;	/* For ASSERT()s */
2919 
2920 		name = nvpair_name(nvp);
2921 		hndl = rctl_hndl_lookup(name);
2922 		ASSERT(hndl != -1);
2923 		rde = rctl_dict_lookup_hndl(hndl);
2924 		ASSERT(rde != NULL);
2925 
2926 		for (; /* ever */; ) {
2927 			rctl_val_t oval;
2928 
2929 			mutex_enter(&pp->p_lock);
2930 			error = rctl_local_get(hndl, NULL, &oval, pp);
2931 			mutex_exit(&pp->p_lock);
2932 			ASSERT(error == 0);	/* Can't fail for RCTL_FIRST */
2933 			ASSERT(oval.rcv_privilege != RCPRIV_BASIC);
2934 			if (oval.rcv_privilege == RCPRIV_SYSTEM)
2935 				break;
2936 			mutex_enter(&pp->p_lock);
2937 			error = rctl_local_delete(hndl, &oval, pp);
2938 			mutex_exit(&pp->p_lock);
2939 			ASSERT(error == 0);
2940 		}
2941 		error = nvpair_value_nvlist_array(nvp, &nvlarray, &nelem);
2942 		ASSERT(error == 0);
2943 		for (i = 0; i < nelem; i++) {
2944 			rctl_val_t *nvalp;
2945 
2946 			nvalp = kmem_cache_alloc(rctl_val_cache, KM_SLEEP);
2947 			error = nvlist2rctlval(nvlarray[i], nvalp);
2948 			ASSERT(error == 0);
2949 			/*
2950 			 * rctl_local_insert can fail if the value being
2951 			 * inserted is a duplicate; this is OK.
2952 			 */
2953 			mutex_enter(&pp->p_lock);
2954 			if (rctl_local_insert(hndl, nvalp, pp) != 0)
2955 				kmem_cache_free(rctl_val_cache, nvalp);
2956 			mutex_exit(&pp->p_lock);
2957 		}
2958 	}
2959 	/*
2960 	 * Tell the world that we're done setting up.
2961 	 *
2962 	 * At this point we want to set the zone status to ZONE_IS_READY
2963 	 * and atomically set the zone's processor set visibility.  Once
2964 	 * we drop pool_lock() this zone will automatically get updated
2965 	 * to reflect any future changes to the pools configuration.
2966 	 */
2967 	pool_lock();
2968 	mutex_enter(&cpu_lock);
2969 	mutex_enter(&zonehash_lock);
2970 	zone_uniqid(zone);
2971 	zone_zsd_configure(zone);
2972 	if (pool_state == POOL_ENABLED)
2973 		zone_pset_set(zone, pool_default->pool_pset->pset_id);
2974 	mutex_enter(&zone_status_lock);
2975 	ASSERT(zone_status_get(zone) == ZONE_IS_UNINITIALIZED);
2976 	zone_status_set(zone, ZONE_IS_READY);
2977 	mutex_exit(&zone_status_lock);
2978 	mutex_exit(&zonehash_lock);
2979 	mutex_exit(&cpu_lock);
2980 	pool_unlock();
2981 
2982 	/*
2983 	 * Once we see the zone transition to the ZONE_IS_BOOTING state,
2984 	 * we launch init, and set the state to running.
2985 	 */
2986 	zone_status_wait_cpr(zone, ZONE_IS_BOOTING, "zsched");
2987 
2988 	if (zone_status_get(zone) == ZONE_IS_BOOTING) {
2989 		id_t cid;
2990 
2991 		/* enable platform wide brand interposition mechanisms */
2992 		if (ZONE_IS_BRANDED(zone)) {
2993 			disable_plat_interposition = B_TRUE;
2994 			brand_plat_interposition_enable(zone->zone_brand);
2995 		}
2996 
2997 		/*
2998 		 * Ok, this is a little complicated.  We need to grab the
2999 		 * zone's pool's scheduling class ID; note that by now, we
3000 		 * are already bound to a pool if we need to be (zoneadmd
3001 		 * will have done that to us while we're in the READY
3002 		 * state).  *But* the scheduling class for the zone's 'init'
3003 		 * must be explicitly passed to newproc, which doesn't
3004 		 * respect pool bindings.
3005 		 *
3006 		 * We hold the pool_lock across the call to newproc() to
3007 		 * close the obvious race: the pool's scheduling class
3008 		 * could change before we manage to create the LWP with
3009 		 * classid 'cid'.
3010 		 */
3011 		pool_lock();
3012 		if (zone->zone_defaultcid > 0)
3013 			cid = zone->zone_defaultcid;
3014 		else
3015 			cid = pool_get_class(zone->zone_pool);
3016 		if (cid == -1)
3017 			cid = defaultcid;
3018 
3019 		/*
3020 		 * If this fails, zone_boot will ultimately fail.  The
3021 		 * state of the zone will be set to SHUTTING_DOWN-- userland
3022 		 * will have to tear down the zone, and fail, or try again.
3023 		 */
3024 		if ((zone->zone_boot_err = newproc(zone_start_init, NULL, cid,
3025 		    minclsyspri - 1, &ct)) != 0) {
3026 			mutex_enter(&zone_status_lock);
3027 			zone_status_set(zone, ZONE_IS_SHUTTING_DOWN);
3028 			mutex_exit(&zone_status_lock);
3029 		}
3030 		pool_unlock();
3031 	}
3032 
3033 	/*
3034 	 * Wait for zone_destroy() to be called.  This is what we spend
3035 	 * most of our life doing.
3036 	 */
3037 	zone_status_wait_cpr(zone, ZONE_IS_DYING, "zsched");
3038 
3039 	/* disable platform wide brand interposition mechanisms */
3040 	if (disable_plat_interposition)
3041 		brand_plat_interposition_disable(zone->zone_brand);
3042 
3043 	if (ct)
3044 		/*
3045 		 * At this point the process contract should be empty.
3046 		 * (Though if it isn't, it's not the end of the world.)
3047 		 */
3048 		VERIFY(contract_abandon(ct, curproc, B_TRUE) == 0);
3049 
3050 	/*
3051 	 * Allow kcred to be freed when all referring processes
3052 	 * (including this one) go away.  We can't just do this in
3053 	 * zone_free because we need to wait for the zone_cred_ref to
3054 	 * drop to 0 before calling zone_free, and the existence of
3055 	 * zone_kcred will prevent that.  Thus, we call crfree here to
3056 	 * balance the crdup in zone_create.  The crhold calls earlier
3057 	 * in zsched will be dropped when the thread and process exit.
3058 	 */
3059 	crfree(zone->zone_kcred);
3060 	zone->zone_kcred = NULL;
3061 
3062 	exit(CLD_EXITED, 0);
3063 }
3064 
3065 /*
3066  * Helper function to determine if there are any submounts of the
3067  * provided path.  Used to make sure the zone doesn't "inherit" any
3068  * mounts from before it is created.
3069  */
3070 static uint_t
3071 zone_mount_count(const char *rootpath)
3072 {
3073 	vfs_t *vfsp;
3074 	uint_t count = 0;
3075 	size_t rootpathlen = strlen(rootpath);
3076 
3077 	/*
3078 	 * Holding zonehash_lock prevents race conditions with
3079 	 * vfs_list_add()/vfs_list_remove() since we serialize with
3080 	 * zone_find_by_path().
3081 	 */
3082 	ASSERT(MUTEX_HELD(&zonehash_lock));
3083 	/*
3084 	 * The rootpath must end with a '/'
3085 	 */
3086 	ASSERT(rootpath[rootpathlen - 1] == '/');
3087 
3088 	/*
3089 	 * This intentionally does not count the rootpath itself if that
3090 	 * happens to be a mount point.
3091 	 */
3092 	vfs_list_read_lock();
3093 	vfsp = rootvfs;
3094 	do {
3095 		if (strncmp(rootpath, refstr_value(vfsp->vfs_mntpt),
3096 		    rootpathlen) == 0)
3097 			count++;
3098 		vfsp = vfsp->vfs_next;
3099 	} while (vfsp != rootvfs);
3100 	vfs_list_unlock();
3101 	return (count);
3102 }
3103 
3104 /*
3105  * Helper function to make sure that a zone created on 'rootpath'
3106  * wouldn't end up containing other zones' rootpaths.
3107  */
3108 static boolean_t
3109 zone_is_nested(const char *rootpath)
3110 {
3111 	zone_t *zone;
3112 	size_t rootpathlen = strlen(rootpath);
3113 	size_t len;
3114 
3115 	ASSERT(MUTEX_HELD(&zonehash_lock));
3116 
3117 	for (zone = list_head(&zone_active); zone != NULL;
3118 	    zone = list_next(&zone_active, zone)) {
3119 		if (zone == global_zone)
3120 			continue;
3121 		len = strlen(zone->zone_rootpath);
3122 		if (strncmp(rootpath, zone->zone_rootpath,
3123 		    MIN(rootpathlen, len)) == 0)
3124 			return (B_TRUE);
3125 	}
3126 	return (B_FALSE);
3127 }
3128 
3129 static int
3130 zone_set_privset(zone_t *zone, const priv_set_t *zone_privs,
3131     size_t zone_privssz)
3132 {
3133 	priv_set_t *privs = kmem_alloc(sizeof (priv_set_t), KM_SLEEP);
3134 
3135 	if (zone_privssz < sizeof (priv_set_t))
3136 		return (set_errno(ENOMEM));
3137 
3138 	if (copyin(zone_privs, privs, sizeof (priv_set_t))) {
3139 		kmem_free(privs, sizeof (priv_set_t));
3140 		return (EFAULT);
3141 	}
3142 
3143 	zone->zone_privset = privs;
3144 	return (0);
3145 }
3146 
3147 /*
3148  * We make creative use of nvlists to pass in rctls from userland.  The list is
3149  * a list of the following structures:
3150  *
3151  * (name = rctl_name, value = nvpair_list_array)
3152  *
3153  * Where each element of the nvpair_list_array is of the form:
3154  *
3155  * [(name = "privilege", value = RCPRIV_PRIVILEGED),
3156  * 	(name = "limit", value = uint64_t),
3157  * 	(name = "action", value = (RCTL_LOCAL_NOACTION || RCTL_LOCAL_DENY))]
3158  */
3159 static int
3160 parse_rctls(caddr_t ubuf, size_t buflen, nvlist_t **nvlp)
3161 {
3162 	nvpair_t *nvp = NULL;
3163 	nvlist_t *nvl = NULL;
3164 	char *kbuf;
3165 	int error;
3166 	rctl_val_t rv;
3167 
3168 	*nvlp = NULL;
3169 
3170 	if (buflen == 0)
3171 		return (0);
3172 
3173 	if ((kbuf = kmem_alloc(buflen, KM_NOSLEEP)) == NULL)
3174 		return (ENOMEM);
3175 	if (copyin(ubuf, kbuf, buflen)) {
3176 		error = EFAULT;
3177 		goto out;
3178 	}
3179 	if (nvlist_unpack(kbuf, buflen, &nvl, KM_SLEEP) != 0) {
3180 		/*
3181 		 * nvl may have been allocated/free'd, but the value set to
3182 		 * non-NULL, so we reset it here.
3183 		 */
3184 		nvl = NULL;
3185 		error = EINVAL;
3186 		goto out;
3187 	}
3188 	while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
3189 		rctl_dict_entry_t *rde;
3190 		rctl_hndl_t hndl;
3191 		nvlist_t **nvlarray;
3192 		uint_t i, nelem;
3193 		char *name;
3194 
3195 		error = EINVAL;
3196 		name = nvpair_name(nvp);
3197 		if (strncmp(nvpair_name(nvp), "zone.", sizeof ("zone.") - 1)
3198 		    != 0 || nvpair_type(nvp) != DATA_TYPE_NVLIST_ARRAY) {
3199 			goto out;
3200 		}
3201 		if ((hndl = rctl_hndl_lookup(name)) == -1) {
3202 			goto out;
3203 		}
3204 		rde = rctl_dict_lookup_hndl(hndl);
3205 		error = nvpair_value_nvlist_array(nvp, &nvlarray, &nelem);
3206 		ASSERT(error == 0);
3207 		for (i = 0; i < nelem; i++) {
3208 			if (error = nvlist2rctlval(nvlarray[i], &rv))
3209 				goto out;
3210 		}
3211 		if (rctl_invalid_value(rde, &rv)) {
3212 			error = EINVAL;
3213 			goto out;
3214 		}
3215 	}
3216 	error = 0;
3217 	*nvlp = nvl;
3218 out:
3219 	kmem_free(kbuf, buflen);
3220 	if (error && nvl != NULL)
3221 		nvlist_free(nvl);
3222 	return (error);
3223 }
3224 
3225 int
3226 zone_create_error(int er_error, int er_ext, int *er_out) {
3227 	if (er_out != NULL) {
3228 		if (copyout(&er_ext, er_out, sizeof (int))) {
3229 			return (set_errno(EFAULT));
3230 		}
3231 	}
3232 	return (set_errno(er_error));
3233 }
3234 
3235 static int
3236 zone_set_label(zone_t *zone, const bslabel_t *lab, uint32_t doi)
3237 {
3238 	ts_label_t *tsl;
3239 	bslabel_t blab;
3240 
3241 	/* Get label from user */
3242 	if (copyin(lab, &blab, sizeof (blab)) != 0)
3243 		return (EFAULT);
3244 	tsl = labelalloc(&blab, doi, KM_NOSLEEP);
3245 	if (tsl == NULL)
3246 		return (ENOMEM);
3247 
3248 	zone->zone_slabel = tsl;
3249 	return (0);
3250 }
3251 
3252 /*
3253  * Parses a comma-separated list of ZFS datasets into a per-zone dictionary.
3254  */
3255 static int
3256 parse_zfs(zone_t *zone, caddr_t ubuf, size_t buflen)
3257 {
3258 	char *kbuf;
3259 	char *dataset, *next;
3260 	zone_dataset_t *zd;
3261 	size_t len;
3262 
3263 	if (ubuf == NULL || buflen == 0)
3264 		return (0);
3265 
3266 	if ((kbuf = kmem_alloc(buflen, KM_NOSLEEP)) == NULL)
3267 		return (ENOMEM);
3268 
3269 	if (copyin(ubuf, kbuf, buflen) != 0) {
3270 		kmem_free(kbuf, buflen);
3271 		return (EFAULT);
3272 	}
3273 
3274 	dataset = next = kbuf;
3275 	for (;;) {
3276 		zd = kmem_alloc(sizeof (zone_dataset_t), KM_SLEEP);
3277 
3278 		next = strchr(dataset, ',');
3279 
3280 		if (next == NULL)
3281 			len = strlen(dataset);
3282 		else
3283 			len = next - dataset;
3284 
3285 		zd->zd_dataset = kmem_alloc(len + 1, KM_SLEEP);
3286 		bcopy(dataset, zd->zd_dataset, len);
3287 		zd->zd_dataset[len] = '\0';
3288 
3289 		list_insert_head(&zone->zone_datasets, zd);
3290 
3291 		if (next == NULL)
3292 			break;
3293 
3294 		dataset = next + 1;
3295 	}
3296 
3297 	kmem_free(kbuf, buflen);
3298 	return (0);
3299 }
3300 
3301 /*
3302  * System call to create/initialize a new zone named 'zone_name', rooted
3303  * at 'zone_root', with a zone-wide privilege limit set of 'zone_privs',
3304  * and initialized with the zone-wide rctls described in 'rctlbuf', and
3305  * with labeling set by 'match', 'doi', and 'label'.
3306  *
3307  * If extended error is non-null, we may use it to return more detailed
3308  * error information.
3309  */
3310 static zoneid_t
3311 zone_create(const char *zone_name, const char *zone_root,
3312     const priv_set_t *zone_privs, size_t zone_privssz,
3313     caddr_t rctlbuf, size_t rctlbufsz,
3314     caddr_t zfsbuf, size_t zfsbufsz, int *extended_error,
3315     int match, uint32_t doi, const bslabel_t *label,
3316     int flags)
3317 {
3318 	struct zsched_arg zarg;
3319 	nvlist_t *rctls = NULL;
3320 	proc_t *pp = curproc;
3321 	zone_t *zone, *ztmp;
3322 	zoneid_t zoneid;
3323 	int error;
3324 	int error2 = 0;
3325 	char *str;
3326 	cred_t *zkcr;
3327 	boolean_t insert_label_hash;
3328 
3329 	if (secpolicy_zone_config(CRED()) != 0)
3330 		return (set_errno(EPERM));
3331 
3332 	/* can't boot zone from within chroot environment */
3333 	if (PTOU(pp)->u_rdir != NULL && PTOU(pp)->u_rdir != rootdir)
3334 		return (zone_create_error(ENOTSUP, ZE_CHROOTED,
3335 		    extended_error));
3336 
3337 	zone = kmem_zalloc(sizeof (zone_t), KM_SLEEP);
3338 	zoneid = zone->zone_id = id_alloc(zoneid_space);
3339 	zone->zone_status = ZONE_IS_UNINITIALIZED;
3340 	zone->zone_pool = pool_default;
3341 	zone->zone_pool_mod = gethrtime();
3342 	zone->zone_psetid = ZONE_PS_INVAL;
3343 	zone->zone_ncpus = 0;
3344 	zone->zone_ncpus_online = 0;
3345 	zone->zone_restart_init = B_TRUE;
3346 	zone->zone_brand = &native_brand;
3347 	zone->zone_initname = NULL;
3348 	mutex_init(&zone->zone_lock, NULL, MUTEX_DEFAULT, NULL);
3349 	mutex_init(&zone->zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL);
3350 	mutex_init(&zone->zone_mem_lock, NULL, MUTEX_DEFAULT, NULL);
3351 	cv_init(&zone->zone_cv, NULL, CV_DEFAULT, NULL);
3352 	list_create(&zone->zone_zsd, sizeof (struct zsd_entry),
3353 	    offsetof(struct zsd_entry, zsd_linkage));
3354 	list_create(&zone->zone_datasets, sizeof (zone_dataset_t),
3355 	    offsetof(zone_dataset_t, zd_linkage));
3356 	rw_init(&zone->zone_mlps.mlpl_rwlock, NULL, RW_DEFAULT, NULL);
3357 
3358 	if (flags & ZCF_NET_EXCL) {
3359 		zone->zone_flags |= ZF_NET_EXCL;
3360 	}
3361 
3362 	if ((error = zone_set_name(zone, zone_name)) != 0) {
3363 		zone_free(zone);
3364 		return (zone_create_error(error, 0, extended_error));
3365 	}
3366 
3367 	if ((error = zone_set_root(zone, zone_root)) != 0) {
3368 		zone_free(zone);
3369 		return (zone_create_error(error, 0, extended_error));
3370 	}
3371 	if ((error = zone_set_privset(zone, zone_privs, zone_privssz)) != 0) {
3372 		zone_free(zone);
3373 		return (zone_create_error(error, 0, extended_error));
3374 	}
3375 
3376 	/* initialize node name to be the same as zone name */
3377 	zone->zone_nodename = kmem_alloc(_SYS_NMLN, KM_SLEEP);
3378 	(void) strncpy(zone->zone_nodename, zone->zone_name, _SYS_NMLN);
3379 	zone->zone_nodename[_SYS_NMLN - 1] = '\0';
3380 
3381 	zone->zone_domain = kmem_alloc(_SYS_NMLN, KM_SLEEP);
3382 	zone->zone_domain[0] = '\0';
3383 	zone->zone_shares = 1;
3384 	zone->zone_shmmax = 0;
3385 	zone->zone_ipc.ipcq_shmmni = 0;
3386 	zone->zone_ipc.ipcq_semmni = 0;
3387 	zone->zone_ipc.ipcq_msgmni = 0;
3388 	zone->zone_bootargs = NULL;
3389 	zone->zone_initname =
3390 	    kmem_alloc(strlen(zone_default_initname) + 1, KM_SLEEP);
3391 	(void) strcpy(zone->zone_initname, zone_default_initname);
3392 	zone->zone_nlwps = 0;
3393 	zone->zone_nlwps_ctl = INT_MAX;
3394 	zone->zone_locked_mem = 0;
3395 	zone->zone_locked_mem_ctl = UINT64_MAX;
3396 	zone->zone_max_swap = 0;
3397 	zone->zone_max_swap_ctl = UINT64_MAX;
3398 	zone0.zone_lockedmem_kstat = NULL;
3399 	zone0.zone_swapresv_kstat = NULL;
3400 
3401 	/*
3402 	 * Zsched initializes the rctls.
3403 	 */
3404 	zone->zone_rctls = NULL;
3405 
3406 	if ((error = parse_rctls(rctlbuf, rctlbufsz, &rctls)) != 0) {
3407 		zone_free(zone);
3408 		return (zone_create_error(error, 0, extended_error));
3409 	}
3410 
3411 	if ((error = parse_zfs(zone, zfsbuf, zfsbufsz)) != 0) {
3412 		zone_free(zone);
3413 		return (set_errno(error));
3414 	}
3415 
3416 	/*
3417 	 * Read in the trusted system parameters:
3418 	 * match flag and sensitivity label.
3419 	 */
3420 	zone->zone_match = match;
3421 	if (is_system_labeled() && !(zone->zone_flags & ZF_IS_SCRATCH)) {
3422 		error = zone_set_label(zone, label, doi);
3423 		if (error != 0) {
3424 			zone_free(zone);
3425 			return (set_errno(error));
3426 		}
3427 		insert_label_hash = B_TRUE;
3428 	} else {
3429 		/* all zones get an admin_low label if system is not labeled */
3430 		zone->zone_slabel = l_admin_low;
3431 		label_hold(l_admin_low);
3432 		insert_label_hash = B_FALSE;
3433 	}
3434 
3435 	/*
3436 	 * Stop all lwps since that's what normally happens as part of fork().
3437 	 * This needs to happen before we grab any locks to avoid deadlock
3438 	 * (another lwp in the process could be waiting for the held lock).
3439 	 */
3440 	if (curthread != pp->p_agenttp && !holdlwps(SHOLDFORK)) {
3441 		zone_free(zone);
3442 		if (rctls)
3443 			nvlist_free(rctls);
3444 		return (zone_create_error(error, 0, extended_error));
3445 	}
3446 
3447 	if (block_mounts() == 0) {
3448 		mutex_enter(&pp->p_lock);
3449 		if (curthread != pp->p_agenttp)
3450 			continuelwps(pp);
3451 		mutex_exit(&pp->p_lock);
3452 		zone_free(zone);
3453 		if (rctls)
3454 			nvlist_free(rctls);
3455 		return (zone_create_error(error, 0, extended_error));
3456 	}
3457 
3458 	/*
3459 	 * Set up credential for kernel access.  After this, any errors
3460 	 * should go through the dance in errout rather than calling
3461 	 * zone_free directly.
3462 	 */
3463 	zone->zone_kcred = crdup(kcred);
3464 	crsetzone(zone->zone_kcred, zone);
3465 	priv_intersect(zone->zone_privset, &CR_PPRIV(zone->zone_kcred));
3466 	priv_intersect(zone->zone_privset, &CR_EPRIV(zone->zone_kcred));
3467 	priv_intersect(zone->zone_privset, &CR_IPRIV(zone->zone_kcred));
3468 	priv_intersect(zone->zone_privset, &CR_LPRIV(zone->zone_kcred));
3469 
3470 	mutex_enter(&zonehash_lock);
3471 	/*
3472 	 * Make sure zone doesn't already exist.
3473 	 *
3474 	 * If the system and zone are labeled,
3475 	 * make sure no other zone exists that has the same label.
3476 	 */
3477 	if ((ztmp = zone_find_all_by_name(zone->zone_name)) != NULL ||
3478 	    (insert_label_hash &&
3479 	    (ztmp = zone_find_all_by_label(zone->zone_slabel)) != NULL)) {
3480 		zone_status_t status;
3481 
3482 		status = zone_status_get(ztmp);
3483 		if (status == ZONE_IS_READY || status == ZONE_IS_RUNNING)
3484 			error = EEXIST;
3485 		else
3486 			error = EBUSY;
3487 		goto errout;
3488 	}
3489 
3490 	/*
3491 	 * Don't allow zone creations which would cause one zone's rootpath to
3492 	 * be accessible from that of another (non-global) zone.
3493 	 */
3494 	if (zone_is_nested(zone->zone_rootpath)) {
3495 		error = EBUSY;
3496 		goto errout;
3497 	}
3498 
3499 	ASSERT(zonecount != 0);		/* check for leaks */
3500 	if (zonecount + 1 > maxzones) {
3501 		error = ENOMEM;
3502 		goto errout;
3503 	}
3504 
3505 	if (zone_mount_count(zone->zone_rootpath) != 0) {
3506 		error = EBUSY;
3507 		error2 = ZE_AREMOUNTS;
3508 		goto errout;
3509 	}
3510 
3511 	/*
3512 	 * Zone is still incomplete, but we need to drop all locks while
3513 	 * zsched() initializes this zone's kernel process.  We
3514 	 * optimistically add the zone to the hashtable and associated
3515 	 * lists so a parallel zone_create() doesn't try to create the
3516 	 * same zone.
3517 	 */
3518 	zonecount++;
3519 	(void) mod_hash_insert(zonehashbyid,
3520 	    (mod_hash_key_t)(uintptr_t)zone->zone_id,
3521 	    (mod_hash_val_t)(uintptr_t)zone);
3522 	str = kmem_alloc(strlen(zone->zone_name) + 1, KM_SLEEP);
3523 	(void) strcpy(str, zone->zone_name);
3524 	(void) mod_hash_insert(zonehashbyname, (mod_hash_key_t)str,
3525 	    (mod_hash_val_t)(uintptr_t)zone);
3526 	if (insert_label_hash) {
3527 		(void) mod_hash_insert(zonehashbylabel,
3528 		    (mod_hash_key_t)zone->zone_slabel, (mod_hash_val_t)zone);
3529 		zone->zone_flags |= ZF_HASHED_LABEL;
3530 	}
3531 
3532 	/*
3533 	 * Insert into active list.  At this point there are no 'hold's
3534 	 * on the zone, but everyone else knows not to use it, so we can
3535 	 * continue to use it.  zsched() will do a zone_hold() if the
3536 	 * newproc() is successful.
3537 	 */
3538 	list_insert_tail(&zone_active, zone);
3539 	mutex_exit(&zonehash_lock);
3540 
3541 	zarg.zone = zone;
3542 	zarg.nvlist = rctls;
3543 	/*
3544 	 * The process, task, and project rctls are probably wrong;
3545 	 * we need an interface to get the default values of all rctls,
3546 	 * and initialize zsched appropriately.  I'm not sure that that
3547 	 * makes much of a difference, though.
3548 	 */
3549 	if (error = newproc(zsched, (void *)&zarg, syscid, minclsyspri, NULL)) {
3550 		/*
3551 		 * We need to undo all globally visible state.
3552 		 */
3553 		mutex_enter(&zonehash_lock);
3554 		list_remove(&zone_active, zone);
3555 		if (zone->zone_flags & ZF_HASHED_LABEL) {
3556 			ASSERT(zone->zone_slabel != NULL);
3557 			(void) mod_hash_destroy(zonehashbylabel,
3558 			    (mod_hash_key_t)zone->zone_slabel);
3559 		}
3560 		(void) mod_hash_destroy(zonehashbyname,
3561 		    (mod_hash_key_t)(uintptr_t)zone->zone_name);
3562 		(void) mod_hash_destroy(zonehashbyid,
3563 		    (mod_hash_key_t)(uintptr_t)zone->zone_id);
3564 		ASSERT(zonecount > 1);
3565 		zonecount--;
3566 		goto errout;
3567 	}
3568 
3569 	/*
3570 	 * Zone creation can't fail from now on.
3571 	 */
3572 
3573 	/*
3574 	 * Create zone kstats
3575 	 */
3576 	zone_kstat_create(zone);
3577 
3578 	/*
3579 	 * Let the other lwps continue.
3580 	 */
3581 	mutex_enter(&pp->p_lock);
3582 	if (curthread != pp->p_agenttp)
3583 		continuelwps(pp);
3584 	mutex_exit(&pp->p_lock);
3585 
3586 	/*
3587 	 * Wait for zsched to finish initializing the zone.
3588 	 */
3589 	zone_status_wait(zone, ZONE_IS_READY);
3590 	/*
3591 	 * The zone is fully visible, so we can let mounts progress.
3592 	 */
3593 	resume_mounts();
3594 	if (rctls)
3595 		nvlist_free(rctls);
3596 
3597 	return (zoneid);
3598 
3599 errout:
3600 	mutex_exit(&zonehash_lock);
3601 	/*
3602 	 * Let the other lwps continue.
3603 	 */
3604 	mutex_enter(&pp->p_lock);
3605 	if (curthread != pp->p_agenttp)
3606 		continuelwps(pp);
3607 	mutex_exit(&pp->p_lock);
3608 
3609 	resume_mounts();
3610 	if (rctls)
3611 		nvlist_free(rctls);
3612 	/*
3613 	 * There is currently one reference to the zone, a cred_ref from
3614 	 * zone_kcred.  To free the zone, we call crfree, which will call
3615 	 * zone_cred_rele, which will call zone_free.
3616 	 */
3617 	ASSERT(zone->zone_cred_ref == 1);	/* for zone_kcred */
3618 	ASSERT(zone->zone_kcred->cr_ref == 1);
3619 	ASSERT(zone->zone_ref == 0);
3620 	zkcr = zone->zone_kcred;
3621 	zone->zone_kcred = NULL;
3622 	crfree(zkcr);				/* triggers call to zone_free */
3623 	return (zone_create_error(error, error2, extended_error));
3624 }
3625 
3626 /*
3627  * Cause the zone to boot.  This is pretty simple, since we let zoneadmd do
3628  * the heavy lifting.  initname is the path to the program to launch
3629  * at the "top" of the zone; if this is NULL, we use the system default,
3630  * which is stored at zone_default_initname.
3631  */
3632 static int
3633 zone_boot(zoneid_t zoneid)
3634 {
3635 	int err;
3636 	zone_t *zone;
3637 
3638 	if (secpolicy_zone_config(CRED()) != 0)
3639 		return (set_errno(EPERM));
3640 	if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID)
3641 		return (set_errno(EINVAL));
3642 
3643 	mutex_enter(&zonehash_lock);
3644 	/*
3645 	 * Look for zone under hash lock to prevent races with calls to
3646 	 * zone_shutdown, zone_destroy, etc.
3647 	 */
3648 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
3649 		mutex_exit(&zonehash_lock);
3650 		return (set_errno(EINVAL));
3651 	}
3652 
3653 	mutex_enter(&zone_status_lock);
3654 	if (zone_status_get(zone) != ZONE_IS_READY) {
3655 		mutex_exit(&zone_status_lock);
3656 		mutex_exit(&zonehash_lock);
3657 		return (set_errno(EINVAL));
3658 	}
3659 	zone_status_set(zone, ZONE_IS_BOOTING);
3660 	mutex_exit(&zone_status_lock);
3661 
3662 	zone_hold(zone);	/* so we can use the zone_t later */
3663 	mutex_exit(&zonehash_lock);
3664 
3665 	if (zone_status_wait_sig(zone, ZONE_IS_RUNNING) == 0) {
3666 		zone_rele(zone);
3667 		return (set_errno(EINTR));
3668 	}
3669 
3670 	/*
3671 	 * Boot (starting init) might have failed, in which case the zone
3672 	 * will go to the SHUTTING_DOWN state; an appropriate errno will
3673 	 * be placed in zone->zone_boot_err, and so we return that.
3674 	 */
3675 	err = zone->zone_boot_err;
3676 	zone_rele(zone);
3677 	return (err ? set_errno(err) : 0);
3678 }
3679 
3680 /*
3681  * Kills all user processes in the zone, waiting for them all to exit
3682  * before returning.
3683  */
3684 static int
3685 zone_empty(zone_t *zone)
3686 {
3687 	int waitstatus;
3688 
3689 	/*
3690 	 * We need to drop zonehash_lock before killing all
3691 	 * processes, otherwise we'll deadlock with zone_find_*
3692 	 * which can be called from the exit path.
3693 	 */
3694 	ASSERT(MUTEX_NOT_HELD(&zonehash_lock));
3695 	while ((waitstatus = zone_status_timedwait_sig(zone, lbolt + hz,
3696 	    ZONE_IS_EMPTY)) == -1) {
3697 		killall(zone->zone_id);
3698 	}
3699 	/*
3700 	 * return EINTR if we were signaled
3701 	 */
3702 	if (waitstatus == 0)
3703 		return (EINTR);
3704 	return (0);
3705 }
3706 
3707 /*
3708  * This function implements the policy for zone visibility.
3709  *
3710  * In standard Solaris, a non-global zone can only see itself.
3711  *
3712  * In Trusted Extensions, a labeled zone can lookup any zone whose label
3713  * it dominates. For this test, the label of the global zone is treated as
3714  * admin_high so it is special-cased instead of being checked for dominance.
3715  *
3716  * Returns true if zone attributes are viewable, false otherwise.
3717  */
3718 static boolean_t
3719 zone_list_access(zone_t *zone)
3720 {
3721 
3722 	if (curproc->p_zone == global_zone ||
3723 	    curproc->p_zone == zone) {
3724 		return (B_TRUE);
3725 	} else if (is_system_labeled() && !(zone->zone_flags & ZF_IS_SCRATCH)) {
3726 		bslabel_t *curproc_label;
3727 		bslabel_t *zone_label;
3728 
3729 		curproc_label = label2bslabel(curproc->p_zone->zone_slabel);
3730 		zone_label = label2bslabel(zone->zone_slabel);
3731 
3732 		if (zone->zone_id != GLOBAL_ZONEID &&
3733 		    bldominates(curproc_label, zone_label)) {
3734 			return (B_TRUE);
3735 		} else {
3736 			return (B_FALSE);
3737 		}
3738 	} else {
3739 		return (B_FALSE);
3740 	}
3741 }
3742 
3743 /*
3744  * Systemcall to start the zone's halt sequence.  By the time this
3745  * function successfully returns, all user processes and kernel threads
3746  * executing in it will have exited, ZSD shutdown callbacks executed,
3747  * and the zone status set to ZONE_IS_DOWN.
3748  *
3749  * It is possible that the call will interrupt itself if the caller is the
3750  * parent of any process running in the zone, and doesn't have SIGCHLD blocked.
3751  */
3752 static int
3753 zone_shutdown(zoneid_t zoneid)
3754 {
3755 	int error;
3756 	zone_t *zone;
3757 	zone_status_t status;
3758 
3759 	if (secpolicy_zone_config(CRED()) != 0)
3760 		return (set_errno(EPERM));
3761 	if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID)
3762 		return (set_errno(EINVAL));
3763 
3764 	/*
3765 	 * Block mounts so that VFS_MOUNT() can get an accurate view of
3766 	 * the zone's status with regards to ZONE_IS_SHUTTING down.
3767 	 *
3768 	 * e.g. NFS can fail the mount if it determines that the zone
3769 	 * has already begun the shutdown sequence.
3770 	 */
3771 	if (block_mounts() == 0)
3772 		return (set_errno(EINTR));
3773 	mutex_enter(&zonehash_lock);
3774 	/*
3775 	 * Look for zone under hash lock to prevent races with other
3776 	 * calls to zone_shutdown and zone_destroy.
3777 	 */
3778 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
3779 		mutex_exit(&zonehash_lock);
3780 		resume_mounts();
3781 		return (set_errno(EINVAL));
3782 	}
3783 	mutex_enter(&zone_status_lock);
3784 	status = zone_status_get(zone);
3785 	/*
3786 	 * Fail if the zone isn't fully initialized yet.
3787 	 */
3788 	if (status < ZONE_IS_READY) {
3789 		mutex_exit(&zone_status_lock);
3790 		mutex_exit(&zonehash_lock);
3791 		resume_mounts();
3792 		return (set_errno(EINVAL));
3793 	}
3794 	/*
3795 	 * If conditions required for zone_shutdown() to return have been met,
3796 	 * return success.
3797 	 */
3798 	if (status >= ZONE_IS_DOWN) {
3799 		mutex_exit(&zone_status_lock);
3800 		mutex_exit(&zonehash_lock);
3801 		resume_mounts();
3802 		return (0);
3803 	}
3804 	/*
3805 	 * If zone_shutdown() hasn't been called before, go through the motions.
3806 	 * If it has, there's nothing to do but wait for the kernel threads to
3807 	 * drain.
3808 	 */
3809 	if (status < ZONE_IS_EMPTY) {
3810 		uint_t ntasks;
3811 
3812 		mutex_enter(&zone->zone_lock);
3813 		if ((ntasks = zone->zone_ntasks) != 1) {
3814 			/*
3815 			 * There's still stuff running.
3816 			 */
3817 			zone_status_set(zone, ZONE_IS_SHUTTING_DOWN);
3818 		}
3819 		mutex_exit(&zone->zone_lock);
3820 		if (ntasks == 1) {
3821 			/*
3822 			 * The only way to create another task is through
3823 			 * zone_enter(), which will block until we drop
3824 			 * zonehash_lock.  The zone is empty.
3825 			 */
3826 			if (zone->zone_kthreads == NULL) {
3827 				/*
3828 				 * Skip ahead to ZONE_IS_DOWN
3829 				 */
3830 				zone_status_set(zone, ZONE_IS_DOWN);
3831 			} else {
3832 				zone_status_set(zone, ZONE_IS_EMPTY);
3833 			}
3834 		}
3835 	}
3836 	zone_hold(zone);	/* so we can use the zone_t later */
3837 	mutex_exit(&zone_status_lock);
3838 	mutex_exit(&zonehash_lock);
3839 	resume_mounts();
3840 
3841 	if (error = zone_empty(zone)) {
3842 		zone_rele(zone);
3843 		return (set_errno(error));
3844 	}
3845 	/*
3846 	 * After the zone status goes to ZONE_IS_DOWN this zone will no
3847 	 * longer be notified of changes to the pools configuration, so
3848 	 * in order to not end up with a stale pool pointer, we point
3849 	 * ourselves at the default pool and remove all resource
3850 	 * visibility.  This is especially important as the zone_t may
3851 	 * languish on the deathrow for a very long time waiting for
3852 	 * cred's to drain out.
3853 	 *
3854 	 * This rebinding of the zone can happen multiple times
3855 	 * (presumably due to interrupted or parallel systemcalls)
3856 	 * without any adverse effects.
3857 	 */
3858 	if (pool_lock_intr() != 0) {
3859 		zone_rele(zone);
3860 		return (set_errno(EINTR));
3861 	}
3862 	if (pool_state == POOL_ENABLED) {
3863 		mutex_enter(&cpu_lock);
3864 		zone_pool_set(zone, pool_default);
3865 		/*
3866 		 * The zone no longer needs to be able to see any cpus.
3867 		 */
3868 		zone_pset_set(zone, ZONE_PS_INVAL);
3869 		mutex_exit(&cpu_lock);
3870 	}
3871 	pool_unlock();
3872 
3873 	/*
3874 	 * ZSD shutdown callbacks can be executed multiple times, hence
3875 	 * it is safe to not be holding any locks across this call.
3876 	 */
3877 	zone_zsd_callbacks(zone, ZSD_SHUTDOWN);
3878 
3879 	mutex_enter(&zone_status_lock);
3880 	if (zone->zone_kthreads == NULL && zone_status_get(zone) < ZONE_IS_DOWN)
3881 		zone_status_set(zone, ZONE_IS_DOWN);
3882 	mutex_exit(&zone_status_lock);
3883 
3884 	/*
3885 	 * Wait for kernel threads to drain.
3886 	 */
3887 	if (!zone_status_wait_sig(zone, ZONE_IS_DOWN)) {
3888 		zone_rele(zone);
3889 		return (set_errno(EINTR));
3890 	}
3891 
3892 	/*
3893 	 * Zone can be become down/destroyable even if the above wait
3894 	 * returns EINTR, so any code added here may never execute.
3895 	 * (i.e. don't add code here)
3896 	 */
3897 
3898 	zone_rele(zone);
3899 	return (0);
3900 }
3901 
3902 /*
3903  * Systemcall entry point to finalize the zone halt process.  The caller
3904  * must have already successfully called zone_shutdown().
3905  *
3906  * Upon successful completion, the zone will have been fully destroyed:
3907  * zsched will have exited, destructor callbacks executed, and the zone
3908  * removed from the list of active zones.
3909  */
3910 static int
3911 zone_destroy(zoneid_t zoneid)
3912 {
3913 	uint64_t uniqid;
3914 	zone_t *zone;
3915 	zone_status_t status;
3916 
3917 	if (secpolicy_zone_config(CRED()) != 0)
3918 		return (set_errno(EPERM));
3919 	if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID)
3920 		return (set_errno(EINVAL));
3921 
3922 	mutex_enter(&zonehash_lock);
3923 	/*
3924 	 * Look for zone under hash lock to prevent races with other
3925 	 * calls to zone_destroy.
3926 	 */
3927 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
3928 		mutex_exit(&zonehash_lock);
3929 		return (set_errno(EINVAL));
3930 	}
3931 
3932 	if (zone_mount_count(zone->zone_rootpath) != 0) {
3933 		mutex_exit(&zonehash_lock);
3934 		return (set_errno(EBUSY));
3935 	}
3936 	mutex_enter(&zone_status_lock);
3937 	status = zone_status_get(zone);
3938 	if (status < ZONE_IS_DOWN) {
3939 		mutex_exit(&zone_status_lock);
3940 		mutex_exit(&zonehash_lock);
3941 		return (set_errno(EBUSY));
3942 	} else if (status == ZONE_IS_DOWN) {
3943 		zone_status_set(zone, ZONE_IS_DYING); /* Tell zsched to exit */
3944 	}
3945 	mutex_exit(&zone_status_lock);
3946 	zone_hold(zone);
3947 	mutex_exit(&zonehash_lock);
3948 
3949 	/*
3950 	 * wait for zsched to exit
3951 	 */
3952 	zone_status_wait(zone, ZONE_IS_DEAD);
3953 	zone_zsd_callbacks(zone, ZSD_DESTROY);
3954 	zone->zone_netstack = NULL;
3955 	uniqid = zone->zone_uniqid;
3956 	zone_rele(zone);
3957 	zone = NULL;	/* potentially free'd */
3958 
3959 	mutex_enter(&zonehash_lock);
3960 	for (; /* ever */; ) {
3961 		boolean_t unref;
3962 
3963 		if ((zone = zone_find_all_by_id(zoneid)) == NULL ||
3964 		    zone->zone_uniqid != uniqid) {
3965 			/*
3966 			 * The zone has gone away.  Necessary conditions
3967 			 * are met, so we return success.
3968 			 */
3969 			mutex_exit(&zonehash_lock);
3970 			return (0);
3971 		}
3972 		mutex_enter(&zone->zone_lock);
3973 		unref = ZONE_IS_UNREF(zone);
3974 		mutex_exit(&zone->zone_lock);
3975 		if (unref) {
3976 			/*
3977 			 * There is only one reference to the zone -- that
3978 			 * added when the zone was added to the hashtables --
3979 			 * and things will remain this way until we drop
3980 			 * zonehash_lock... we can go ahead and cleanup the
3981 			 * zone.
3982 			 */
3983 			break;
3984 		}
3985 
3986 		if (cv_wait_sig(&zone_destroy_cv, &zonehash_lock) == 0) {
3987 			/* Signaled */
3988 			mutex_exit(&zonehash_lock);
3989 			return (set_errno(EINTR));
3990 		}
3991 
3992 	}
3993 
3994 	/*
3995 	 * Remove CPU cap for this zone now since we're not going to
3996 	 * fail below this point.
3997 	 */
3998 	cpucaps_zone_remove(zone);
3999 
4000 	/* Get rid of the zone's kstats */
4001 	zone_kstat_delete(zone);
4002 
4003 	/* Say goodbye to brand framework. */
4004 	brand_unregister_zone(zone->zone_brand);
4005 
4006 	/*
4007 	 * It is now safe to let the zone be recreated; remove it from the
4008 	 * lists.  The memory will not be freed until the last cred
4009 	 * reference goes away.
4010 	 */
4011 	ASSERT(zonecount > 1);	/* must be > 1; can't destroy global zone */
4012 	zonecount--;
4013 	/* remove from active list and hash tables */
4014 	list_remove(&zone_active, zone);
4015 	(void) mod_hash_destroy(zonehashbyname,
4016 	    (mod_hash_key_t)zone->zone_name);
4017 	(void) mod_hash_destroy(zonehashbyid,
4018 	    (mod_hash_key_t)(uintptr_t)zone->zone_id);
4019 	if (zone->zone_flags & ZF_HASHED_LABEL)
4020 		(void) mod_hash_destroy(zonehashbylabel,
4021 		    (mod_hash_key_t)zone->zone_slabel);
4022 	mutex_exit(&zonehash_lock);
4023 
4024 	/*
4025 	 * Release the root vnode; we're not using it anymore.  Nor should any
4026 	 * other thread that might access it exist.
4027 	 */
4028 	if (zone->zone_rootvp != NULL) {
4029 		VN_RELE(zone->zone_rootvp);
4030 		zone->zone_rootvp = NULL;
4031 	}
4032 
4033 	/* add to deathrow list */
4034 	mutex_enter(&zone_deathrow_lock);
4035 	list_insert_tail(&zone_deathrow, zone);
4036 	mutex_exit(&zone_deathrow_lock);
4037 
4038 	/*
4039 	 * Drop last reference (which was added by zsched()), this will
4040 	 * free the zone unless there are outstanding cred references.
4041 	 */
4042 	zone_rele(zone);
4043 	return (0);
4044 }
4045 
4046 /*
4047  * Systemcall entry point for zone_getattr(2).
4048  */
4049 static ssize_t
4050 zone_getattr(zoneid_t zoneid, int attr, void *buf, size_t bufsize)
4051 {
4052 	size_t size;
4053 	int error = 0, err;
4054 	zone_t *zone;
4055 	char *zonepath;
4056 	char *outstr;
4057 	zone_status_t zone_status;
4058 	pid_t initpid;
4059 	boolean_t global = (curzone == global_zone);
4060 	boolean_t inzone = (curzone->zone_id == zoneid);
4061 	ushort_t flags;
4062 
4063 	mutex_enter(&zonehash_lock);
4064 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
4065 		mutex_exit(&zonehash_lock);
4066 		return (set_errno(EINVAL));
4067 	}
4068 	zone_status = zone_status_get(zone);
4069 	if (zone_status < ZONE_IS_READY) {
4070 		mutex_exit(&zonehash_lock);
4071 		return (set_errno(EINVAL));
4072 	}
4073 	zone_hold(zone);
4074 	mutex_exit(&zonehash_lock);
4075 
4076 	/*
4077 	 * If not in the global zone, don't show information about other zones,
4078 	 * unless the system is labeled and the local zone's label dominates
4079 	 * the other zone.
4080 	 */
4081 	if (!zone_list_access(zone)) {
4082 		zone_rele(zone);
4083 		return (set_errno(EINVAL));
4084 	}
4085 
4086 	switch (attr) {
4087 	case ZONE_ATTR_ROOT:
4088 		if (global) {
4089 			/*
4090 			 * Copy the path to trim the trailing "/" (except for
4091 			 * the global zone).
4092 			 */
4093 			if (zone != global_zone)
4094 				size = zone->zone_rootpathlen - 1;
4095 			else
4096 				size = zone->zone_rootpathlen;
4097 			zonepath = kmem_alloc(size, KM_SLEEP);
4098 			bcopy(zone->zone_rootpath, zonepath, size);
4099 			zonepath[size - 1] = '\0';
4100 		} else {
4101 			if (inzone || !is_system_labeled()) {
4102 				/*
4103 				 * Caller is not in the global zone.
4104 				 * if the query is on the current zone
4105 				 * or the system is not labeled,
4106 				 * just return faked-up path for current zone.
4107 				 */
4108 				zonepath = "/";
4109 				size = 2;
4110 			} else {
4111 				/*
4112 				 * Return related path for current zone.
4113 				 */
4114 				int prefix_len = strlen(zone_prefix);
4115 				int zname_len = strlen(zone->zone_name);
4116 
4117 				size = prefix_len + zname_len + 1;
4118 				zonepath = kmem_alloc(size, KM_SLEEP);
4119 				bcopy(zone_prefix, zonepath, prefix_len);
4120 				bcopy(zone->zone_name, zonepath +
4121 				    prefix_len, zname_len);
4122 				zonepath[size - 1] = '\0';
4123 			}
4124 		}
4125 		if (bufsize > size)
4126 			bufsize = size;
4127 		if (buf != NULL) {
4128 			err = copyoutstr(zonepath, buf, bufsize, NULL);
4129 			if (err != 0 && err != ENAMETOOLONG)
4130 				error = EFAULT;
4131 		}
4132 		if (global || (is_system_labeled() && !inzone))
4133 			kmem_free(zonepath, size);
4134 		break;
4135 
4136 	case ZONE_ATTR_NAME:
4137 		size = strlen(zone->zone_name) + 1;
4138 		if (bufsize > size)
4139 			bufsize = size;
4140 		if (buf != NULL) {
4141 			err = copyoutstr(zone->zone_name, buf, bufsize, NULL);
4142 			if (err != 0 && err != ENAMETOOLONG)
4143 				error = EFAULT;
4144 		}
4145 		break;
4146 
4147 	case ZONE_ATTR_STATUS:
4148 		/*
4149 		 * Since we're not holding zonehash_lock, the zone status
4150 		 * may be anything; leave it up to userland to sort it out.
4151 		 */
4152 		size = sizeof (zone_status);
4153 		if (bufsize > size)
4154 			bufsize = size;
4155 		zone_status = zone_status_get(zone);
4156 		if (buf != NULL &&
4157 		    copyout(&zone_status, buf, bufsize) != 0)
4158 			error = EFAULT;
4159 		break;
4160 	case ZONE_ATTR_FLAGS:
4161 		size = sizeof (zone->zone_flags);
4162 		if (bufsize > size)
4163 			bufsize = size;
4164 		flags = zone->zone_flags;
4165 		if (buf != NULL &&
4166 		    copyout(&flags, buf, bufsize) != 0)
4167 			error = EFAULT;
4168 		break;
4169 	case ZONE_ATTR_PRIVSET:
4170 		size = sizeof (priv_set_t);
4171 		if (bufsize > size)
4172 			bufsize = size;
4173 		if (buf != NULL &&
4174 		    copyout(zone->zone_privset, buf, bufsize) != 0)
4175 			error = EFAULT;
4176 		break;
4177 	case ZONE_ATTR_UNIQID:
4178 		size = sizeof (zone->zone_uniqid);
4179 		if (bufsize > size)
4180 			bufsize = size;
4181 		if (buf != NULL &&
4182 		    copyout(&zone->zone_uniqid, buf, bufsize) != 0)
4183 			error = EFAULT;
4184 		break;
4185 	case ZONE_ATTR_POOLID:
4186 		{
4187 			pool_t *pool;
4188 			poolid_t poolid;
4189 
4190 			if (pool_lock_intr() != 0) {
4191 				error = EINTR;
4192 				break;
4193 			}
4194 			pool = zone_pool_get(zone);
4195 			poolid = pool->pool_id;
4196 			pool_unlock();
4197 			size = sizeof (poolid);
4198 			if (bufsize > size)
4199 				bufsize = size;
4200 			if (buf != NULL && copyout(&poolid, buf, size) != 0)
4201 				error = EFAULT;
4202 		}
4203 		break;
4204 	case ZONE_ATTR_SLBL:
4205 		size = sizeof (bslabel_t);
4206 		if (bufsize > size)
4207 			bufsize = size;
4208 		if (zone->zone_slabel == NULL)
4209 			error = EINVAL;
4210 		else if (buf != NULL &&
4211 		    copyout(label2bslabel(zone->zone_slabel), buf,
4212 		    bufsize) != 0)
4213 			error = EFAULT;
4214 		break;
4215 	case ZONE_ATTR_INITPID:
4216 		size = sizeof (initpid);
4217 		if (bufsize > size)
4218 			bufsize = size;
4219 		initpid = zone->zone_proc_initpid;
4220 		if (initpid == -1) {
4221 			error = ESRCH;
4222 			break;
4223 		}
4224 		if (buf != NULL &&
4225 		    copyout(&initpid, buf, bufsize) != 0)
4226 			error = EFAULT;
4227 		break;
4228 	case ZONE_ATTR_BRAND:
4229 		size = strlen(zone->zone_brand->b_name) + 1;
4230 
4231 		if (bufsize > size)
4232 			bufsize = size;
4233 		if (buf != NULL) {
4234 			err = copyoutstr(zone->zone_brand->b_name, buf,
4235 			    bufsize, NULL);
4236 			if (err != 0 && err != ENAMETOOLONG)
4237 				error = EFAULT;
4238 		}
4239 		break;
4240 	case ZONE_ATTR_INITNAME:
4241 		size = strlen(zone->zone_initname) + 1;
4242 		if (bufsize > size)
4243 			bufsize = size;
4244 		if (buf != NULL) {
4245 			err = copyoutstr(zone->zone_initname, buf, bufsize,
4246 			    NULL);
4247 			if (err != 0 && err != ENAMETOOLONG)
4248 				error = EFAULT;
4249 		}
4250 		break;
4251 	case ZONE_ATTR_BOOTARGS:
4252 		if (zone->zone_bootargs == NULL)
4253 			outstr = "";
4254 		else
4255 			outstr = zone->zone_bootargs;
4256 		size = strlen(outstr) + 1;
4257 		if (bufsize > size)
4258 			bufsize = size;
4259 		if (buf != NULL) {
4260 			err = copyoutstr(outstr, buf, bufsize, NULL);
4261 			if (err != 0 && err != ENAMETOOLONG)
4262 				error = EFAULT;
4263 		}
4264 		break;
4265 	case ZONE_ATTR_PHYS_MCAP:
4266 		size = sizeof (zone->zone_phys_mcap);
4267 		if (bufsize > size)
4268 			bufsize = size;
4269 		if (buf != NULL &&
4270 		    copyout(&zone->zone_phys_mcap, buf, bufsize) != 0)
4271 			error = EFAULT;
4272 		break;
4273 	case ZONE_ATTR_SCHED_CLASS:
4274 		mutex_enter(&class_lock);
4275 
4276 		if (zone->zone_defaultcid >= loaded_classes)
4277 			outstr = "";
4278 		else
4279 			outstr = sclass[zone->zone_defaultcid].cl_name;
4280 		size = strlen(outstr) + 1;
4281 		if (bufsize > size)
4282 			bufsize = size;
4283 		if (buf != NULL) {
4284 			err = copyoutstr(outstr, buf, bufsize, NULL);
4285 			if (err != 0 && err != ENAMETOOLONG)
4286 				error = EFAULT;
4287 		}
4288 
4289 		mutex_exit(&class_lock);
4290 		break;
4291 	default:
4292 		if ((attr >= ZONE_ATTR_BRAND_ATTRS) && ZONE_IS_BRANDED(zone)) {
4293 			size = bufsize;
4294 			error = ZBROP(zone)->b_getattr(zone, attr, buf, &size);
4295 		} else {
4296 			error = EINVAL;
4297 		}
4298 	}
4299 	zone_rele(zone);
4300 
4301 	if (error)
4302 		return (set_errno(error));
4303 	return ((ssize_t)size);
4304 }
4305 
4306 /*
4307  * Systemcall entry point for zone_setattr(2).
4308  */
4309 /*ARGSUSED*/
4310 static int
4311 zone_setattr(zoneid_t zoneid, int attr, void *buf, size_t bufsize)
4312 {
4313 	zone_t *zone;
4314 	zone_status_t zone_status;
4315 	int err;
4316 
4317 	if (secpolicy_zone_config(CRED()) != 0)
4318 		return (set_errno(EPERM));
4319 
4320 	/*
4321 	 * Only the ZONE_ATTR_PHYS_MCAP attribute can be set on the
4322 	 * global zone.
4323 	 */
4324 	if (zoneid == GLOBAL_ZONEID && attr != ZONE_ATTR_PHYS_MCAP) {
4325 		return (set_errno(EINVAL));
4326 	}
4327 
4328 	mutex_enter(&zonehash_lock);
4329 	if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
4330 		mutex_exit(&zonehash_lock);
4331 		return (set_errno(EINVAL));
4332 	}
4333 	zone_hold(zone);
4334 	mutex_exit(&zonehash_lock);
4335 
4336 	/*
4337 	 * At present most attributes can only be set on non-running,
4338 	 * non-global zones.
4339 	 */
4340 	zone_status = zone_status_get(zone);
4341 	if (attr != ZONE_ATTR_PHYS_MCAP && zone_status > ZONE_IS_READY)
4342 		goto done;
4343 
4344 	switch (attr) {
4345 	case ZONE_ATTR_INITNAME:
4346 		err = zone_set_initname(zone, (const char *)buf);
4347 		break;
4348 	case ZONE_ATTR_BOOTARGS:
4349 		err = zone_set_bootargs(zone, (const char *)buf);
4350 		break;
4351 	case ZONE_ATTR_BRAND:
4352 		err = zone_set_brand(zone, (const char *)buf);
4353 		break;
4354 	case ZONE_ATTR_PHYS_MCAP:
4355 		err = zone_set_phys_mcap(zone, (const uint64_t *)buf);
4356 		break;
4357 	case ZONE_ATTR_SCHED_CLASS:
4358 		err = zone_set_sched_class(zone, (const char *)buf);
4359 		break;
4360 	default:
4361 		if ((attr >= ZONE_ATTR_BRAND_ATTRS) && ZONE_IS_BRANDED(zone))
4362 			err = ZBROP(zone)->b_setattr(zone, attr, buf, bufsize);
4363 		else
4364 			err = EINVAL;
4365 	}
4366 
4367 done:
4368 	zone_rele(zone);
4369 	return (err != 0 ? set_errno(err) : 0);
4370 }
4371 
4372 /*
4373  * Return zero if the process has at least one vnode mapped in to its
4374  * address space which shouldn't be allowed to change zones.
4375  *
4376  * Also return zero if the process has any shared mappings which reserve
4377  * swap.  This is because the counting for zone.max-swap does not allow swap
4378  * revervation to be shared between zones.  zone swap reservation is counted
4379  * on zone->zone_max_swap.
4380  */
4381 static int
4382 as_can_change_zones(void)
4383 {
4384 	proc_t *pp = curproc;
4385 	struct seg *seg;
4386 	struct as *as = pp->p_as;
4387 	vnode_t *vp;
4388 	int allow = 1;
4389 
4390 	ASSERT(pp->p_as != &kas);
4391 	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
4392 	for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
4393 
4394 		/*
4395 		 * Cannot enter zone with shared anon memory which
4396 		 * reserves swap.  See comment above.
4397 		 */
4398 		if (seg_can_change_zones(seg) == B_FALSE) {
4399 			allow = 0;
4400 			break;
4401 		}
4402 		/*
4403 		 * if we can't get a backing vnode for this segment then skip
4404 		 * it.
4405 		 */
4406 		vp = NULL;
4407 		if (SEGOP_GETVP(seg, seg->s_base, &vp) != 0 || vp == NULL)
4408 			continue;
4409 		if (!vn_can_change_zones(vp)) { /* bail on first match */
4410 			allow = 0;
4411 			break;
4412 		}
4413 	}
4414 	AS_LOCK_EXIT(as, &as->a_lock);
4415 	return (allow);
4416 }
4417 
4418 /*
4419  * Count swap reserved by curproc's address space
4420  */
4421 static size_t
4422 as_swresv(void)
4423 {
4424 	proc_t *pp = curproc;
4425 	struct seg *seg;
4426 	struct as *as = pp->p_as;
4427 	size_t swap = 0;
4428 
4429 	ASSERT(pp->p_as != &kas);
4430 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
4431 	for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg))
4432 		swap += seg_swresv(seg);
4433 
4434 	return (swap);
4435 }
4436 
4437 /*
4438  * Systemcall entry point for zone_enter().
4439  *
4440  * The current process is injected into said zone.  In the process
4441  * it will change its project membership, privileges, rootdir/cwd,
4442  * zone-wide rctls, and pool association to match those of the zone.
4443  *
4444  * The first zone_enter() called while the zone is in the ZONE_IS_READY
4445  * state will transition it to ZONE_IS_RUNNING.  Processes may only
4446  * enter a zone that is "ready" or "running".
4447  */
4448 static int
4449 zone_enter(zoneid_t zoneid)
4450 {
4451 	zone_t *zone;
4452 	vnode_t *vp;
4453 	proc_t *pp = curproc;
4454 	contract_t *ct;
4455 	cont_process_t *ctp;
4456 	task_t *tk, *oldtk;
4457 	kproject_t *zone_proj0;
4458 	cred_t *cr, *newcr;
4459 	pool_t *oldpool, *newpool;
4460 	sess_t *sp;
4461 	uid_t uid;
4462 	zone_status_t status;
4463 	int err = 0;
4464 	rctl_entity_p_t e;
4465 	size_t swap;
4466 	kthread_id_t t;
4467 
4468 	if (secpolicy_zone_config(CRED()) != 0)
4469 		return (set_errno(EPERM));
4470 	if (zoneid < MIN_USERZONEID || zoneid > MAX_ZONEID)
4471 		return (set_errno(EINVAL));
4472 
4473 	/*
4474 	 * Stop all lwps so we don't need to hold a lock to look at
4475 	 * curproc->p_zone.  This needs to happen before we grab any
4476 	 * locks to avoid deadlock (another lwp in the process could
4477 	 * be waiting for the held lock).
4478 	 */
4479 	if (curthread != pp->p_agenttp && !holdlwps(SHOLDFORK))
4480 		return (set_errno(EINTR));
4481 
4482 	/*
4483 	 * Make sure we're not changing zones with files open or mapped in
4484 	 * to our address space which shouldn't be changing zones.
4485 	 */
4486 	if (!files_can_change_zones()) {
4487 		err = EBADF;
4488 		goto out;
4489 	}
4490 	if (!as_can_change_zones()) {
4491 		err = EFAULT;
4492 		goto out;
4493 	}
4494 
4495 	mutex_enter(&zonehash_lock);
4496 	if (pp->p_zone != global_zone) {
4497 		mutex_exit(&zonehash_lock);
4498 		err = EINVAL;
4499 		goto out;
4500 	}
4501 
4502 	zone = zone_find_all_by_id(zoneid);
4503 	if (zone == NULL) {
4504 		mutex_exit(&zonehash_lock);
4505 		err = EINVAL;
4506 		goto out;
4507 	}
4508 
4509 	/*
4510 	 * To prevent processes in a zone from holding contracts on
4511 	 * extrazonal resources, and to avoid process contract
4512 	 * memberships which span zones, contract holders and processes
4513 	 * which aren't the sole members of their encapsulating process
4514 	 * contracts are not allowed to zone_enter.
4515 	 */
4516 	ctp = pp->p_ct_process;
4517 	ct = &ctp->conp_contract;
4518 	mutex_enter(&ct->ct_lock);
4519 	mutex_enter(&pp->p_lock);
4520 	if ((avl_numnodes(&pp->p_ct_held) != 0) || (ctp->conp_nmembers != 1)) {
4521 		mutex_exit(&pp->p_lock);
4522 		mutex_exit(&ct->ct_lock);
4523 		mutex_exit(&zonehash_lock);
4524 		pool_unlock();
4525 		err = EINVAL;
4526 		goto out;
4527 	}
4528 
4529 	/*
4530 	 * Moreover, we don't allow processes whose encapsulating
4531 	 * process contracts have inherited extrazonal contracts.
4532 	 * While it would be easier to eliminate all process contracts
4533 	 * with inherited contracts, we need to be able to give a
4534 	 * restarted init (or other zone-penetrating process) its
4535 	 * predecessor's contracts.
4536 	 */
4537 	if (ctp->conp_ninherited != 0) {
4538 		contract_t *next;
4539 		for (next = list_head(&ctp->conp_inherited); next;
4540 		    next = list_next(&ctp->conp_inherited, next)) {
4541 			if (contract_getzuniqid(next) != zone->zone_uniqid) {
4542 				mutex_exit(&pp->p_lock);
4543 				mutex_exit(&ct->ct_lock);
4544 				mutex_exit(&zonehash_lock);
4545 				pool_unlock();
4546 				err = EINVAL;
4547 				goto out;
4548 			}
4549 		}
4550 	}
4551 	mutex_exit(&pp->p_lock);
4552 	mutex_exit(&ct->ct_lock);
4553 
4554 	status = zone_status_get(zone);
4555 	if (status < ZONE_IS_READY || status >= ZONE_IS_SHUTTING_DOWN) {
4556 		/*
4557 		 * Can't join
4558 		 */
4559 		mutex_exit(&zonehash_lock);
4560 		err = EINVAL;
4561 		goto out;
4562 	}
4563 
4564 	/*
4565 	 * Make sure new priv set is within the permitted set for caller
4566 	 */
4567 	if (!priv_issubset(zone->zone_privset, &CR_OPPRIV(CRED()))) {
4568 		mutex_exit(&zonehash_lock);
4569 		err = EPERM;
4570 		goto out;
4571 	}
4572 	/*
4573 	 * We want to momentarily drop zonehash_lock while we optimistically
4574 	 * bind curproc to the pool it should be running in.  This is safe
4575 	 * since the zone can't disappear (we have a hold on it).
4576 	 */
4577 	zone_hold(zone);
4578 	mutex_exit(&zonehash_lock);
4579 
4580 	/*
4581 	 * Grab pool_lock to keep the pools configuration from changing
4582 	 * and to stop ourselves from getting rebound to another pool
4583 	 * until we join the zone.
4584 	 */
4585 	if (pool_lock_intr() != 0) {
4586 		zone_rele(zone);
4587 		err = EINTR;
4588 		goto out;
4589 	}
4590 	ASSERT(secpolicy_pool(CRED()) == 0);
4591 	/*
4592 	 * Bind ourselves to the pool currently associated with the zone.
4593 	 */
4594 	oldpool = curproc->p_pool;
4595 	newpool = zone_pool_get(zone);
4596 	if (pool_state == POOL_ENABLED && newpool != oldpool &&
4597 	    (err = pool_do_bind(newpool, P_PID, P_MYID,
4598 	    POOL_BIND_ALL)) != 0) {
4599 		pool_unlock();
4600 		zone_rele(zone);
4601 		goto out;
4602 	}
4603 
4604 	/*
4605 	 * Grab cpu_lock now; we'll need it later when we call
4606 	 * task_join().
4607 	 */
4608 	mutex_enter(&cpu_lock);
4609 	mutex_enter(&zonehash_lock);
4610 	/*
4611 	 * Make sure the zone hasn't moved on since we dropped zonehash_lock.
4612 	 */
4613 	if (zone_status_get(zone) >= ZONE_IS_SHUTTING_DOWN) {
4614 		/*
4615 		 * Can't join anymore.
4616 		 */
4617 		mutex_exit(&zonehash_lock);
4618 		mutex_exit(&cpu_lock);
4619 		if (pool_state == POOL_ENABLED &&
4620 		    newpool != oldpool)
4621 			(void) pool_do_bind(oldpool, P_PID, P_MYID,
4622 			    POOL_BIND_ALL);
4623 		pool_unlock();
4624 		zone_rele(zone);
4625 		err = EINVAL;
4626 		goto out;
4627 	}
4628 
4629 	/*
4630 	 * a_lock must be held while transfering locked memory and swap
4631 	 * reservation from the global zone to the non global zone because
4632 	 * asynchronous faults on the processes' address space can lock
4633 	 * memory and reserve swap via MCL_FUTURE and MAP_NORESERVE
4634 	 * segments respectively.
4635 	 */
4636 	AS_LOCK_ENTER(pp->as, &pp->p_as->a_lock, RW_WRITER);
4637 	swap = as_swresv();
4638 	mutex_enter(&pp->p_lock);
4639 	zone_proj0 = zone->zone_zsched->p_task->tk_proj;
4640 	/* verify that we do not exceed and task or lwp limits */
4641 	mutex_enter(&zone->zone_nlwps_lock);
4642 	/* add new lwps to zone and zone's proj0 */
4643 	zone_proj0->kpj_nlwps += pp->p_lwpcnt;
4644 	zone->zone_nlwps += pp->p_lwpcnt;
4645 	/* add 1 task to zone's proj0 */
4646 	zone_proj0->kpj_ntasks += 1;
4647 	mutex_exit(&zone->zone_nlwps_lock);
4648 
4649 	mutex_enter(&zone->zone_mem_lock);
4650 	zone->zone_locked_mem += pp->p_locked_mem;
4651 	zone_proj0->kpj_data.kpd_locked_mem += pp->p_locked_mem;
4652 	zone->zone_max_swap += swap;
4653 	mutex_exit(&zone->zone_mem_lock);
4654 
4655 	mutex_enter(&(zone_proj0->kpj_data.kpd_crypto_lock));
4656 	zone_proj0->kpj_data.kpd_crypto_mem += pp->p_crypto_mem;
4657 	mutex_exit(&(zone_proj0->kpj_data.kpd_crypto_lock));
4658 
4659 	/* remove lwps from proc's old zone and old project */
4660 	mutex_enter(&pp->p_zone->zone_nlwps_lock);
4661 	pp->p_zone->zone_nlwps -= pp->p_lwpcnt;
4662 	pp->p_task->tk_proj->kpj_nlwps -= pp->p_lwpcnt;
4663 	mutex_exit(&pp->p_zone->zone_nlwps_lock);
4664 
4665 	mutex_enter(&pp->p_zone->zone_mem_lock);
4666 	pp->p_zone->zone_locked_mem -= pp->p_locked_mem;
4667 	pp->p_task->tk_proj->kpj_data.kpd_locked_mem -= pp->p_locked_mem;
4668 	pp->p_zone->zone_max_swap -= swap;
4669 	mutex_exit(&pp->p_zone->zone_mem_lock);
4670 
4671 	mutex_enter(&(pp->p_task->tk_proj->kpj_data.kpd_crypto_lock));
4672 	pp->p_task->tk_proj->kpj_data.kpd_crypto_mem -= pp->p_crypto_mem;
4673 	mutex_exit(&(pp->p_task->tk_proj->kpj_data.kpd_crypto_lock));
4674 
4675 	mutex_exit(&pp->p_lock);
4676 	AS_LOCK_EXIT(pp->p_as, &pp->p_as->a_lock);
4677 
4678 	/*
4679 	 * Joining the zone cannot fail from now on.
4680 	 *
4681 	 * This means that a lot of the following code can be commonized and
4682 	 * shared with zsched().
4683 	 */
4684 
4685 	/*
4686 	 * Reset the encapsulating process contract's zone.
4687 	 */
4688 	ASSERT(ct->ct_mzuniqid == GLOBAL_ZONEUNIQID);
4689 	contract_setzuniqid(ct, zone->zone_uniqid);
4690 
4691 	/*
4692 	 * Create a new task and associate the process with the project keyed
4693 	 * by (projid,zoneid).
4694 	 *
4695 	 * We might as well be in project 0; the global zone's projid doesn't
4696 	 * make much sense in a zone anyhow.
4697 	 *
4698 	 * This also increments zone_ntasks, and returns with p_lock held.
4699 	 */
4700 	tk = task_create(0, zone);
4701 	oldtk = task_join(tk, 0);
4702 	mutex_exit(&cpu_lock);
4703 
4704 	pp->p_flag |= SZONETOP;
4705 	pp->p_zone = zone;
4706 
4707 	/*
4708 	 * call RCTLOP_SET functions on this proc
4709 	 */
4710 	e.rcep_p.zone = zone;
4711 	e.rcep_t = RCENTITY_ZONE;
4712 	(void) rctl_set_dup(NULL, NULL, pp, &e, zone->zone_rctls, NULL,
4713 	    RCD_CALLBACK);
4714 	mutex_exit(&pp->p_lock);
4715 
4716 	/*
4717 	 * We don't need to hold any of zsched's locks here; not only do we know
4718 	 * the process and zone aren't going away, we know its session isn't
4719 	 * changing either.
4720 	 *
4721 	 * By joining zsched's session here, we mimic the behavior in the
4722 	 * global zone of init's sid being the pid of sched.  We extend this
4723 	 * to all zlogin-like zone_enter()'ing processes as well.
4724 	 */
4725 	mutex_enter(&pidlock);
4726 	sp = zone->zone_zsched->p_sessp;
4727 	sess_hold(zone->zone_zsched);
4728 	mutex_enter(&pp->p_lock);
4729 	pgexit(pp);
4730 	sess_rele(pp->p_sessp, B_TRUE);
4731 	pp->p_sessp = sp;
4732 	pgjoin(pp, zone->zone_zsched->p_pidp);
4733 
4734 	/*
4735 	 * If any threads are scheduled to be placed on zone wait queue they
4736 	 * should abandon the idea since the wait queue is changing.
4737 	 * We need to be holding pidlock & p_lock to do this.
4738 	 */
4739 	if ((t = pp->p_tlist) != NULL) {
4740 		do {
4741 			thread_lock(t);
4742 			/*
4743 			 * Kick this thread so that he doesn't sit
4744 			 * on a wrong wait queue.
4745 			 */
4746 			if (ISWAITING(t))
4747 				setrun_locked(t);
4748 
4749 			if (t->t_schedflag & TS_ANYWAITQ)
4750 				t->t_schedflag &= ~ TS_ANYWAITQ;
4751 
4752 			thread_unlock(t);
4753 		} while ((t = t->t_forw) != pp->p_tlist);
4754 	}
4755 
4756 	/*
4757 	 * If there is a default scheduling class for the zone and it is not
4758 	 * the class we are currently in, change all of the threads in the
4759 	 * process to the new class.  We need to be holding pidlock & p_lock
4760 	 * when we call parmsset so this is a good place to do it.
4761 	 */
4762 	if (zone->zone_defaultcid > 0 &&
4763 	    zone->zone_defaultcid != curthread->t_cid) {
4764 		pcparms_t pcparms;
4765 
4766 		pcparms.pc_cid = zone->zone_defaultcid;
4767 		pcparms.pc_clparms[0] = 0;
4768 
4769 		/*
4770 		 * If setting the class fails, we still want to enter the zone.
4771 		 */
4772 		if ((t = pp->p_tlist) != NULL) {
4773 			do {
4774 				(void) parmsset(&pcparms, t);
4775 			} while ((t = t->t_forw) != pp->p_tlist);
4776 		}
4777 	}
4778 
4779 	mutex_exit(&pp->p_lock);
4780 	mutex_exit(&pidlock);
4781 
4782 	mutex_exit(&zonehash_lock);
4783 	/*
4784 	 * We're firmly in the zone; let pools progress.
4785 	 */
4786 	pool_unlock();
4787 	task_rele(oldtk);
4788 	/*
4789 	 * We don't need to retain a hold on the zone since we already
4790 	 * incremented zone_ntasks, so the zone isn't going anywhere.
4791 	 */
4792 	zone_rele(zone);
4793 
4794 	/*
4795 	 * Chroot
4796 	 */
4797 	vp = zone->zone_rootvp;
4798 	zone_chdir(vp, &PTOU(pp)->u_cdir, pp);
4799 	zone_chdir(vp, &PTOU(pp)->u_rdir, pp);
4800 
4801 	/*
4802 	 * Change process credentials
4803 	 */
4804 	newcr = cralloc();
4805 	mutex_enter(&pp->p_crlock);
4806 	cr = pp->p_cred;
4807 	crcopy_to(cr, newcr);
4808 	crsetzone(newcr, zone);
4809 	pp->p_cred = newcr;
4810 
4811 	/*
4812 	 * Restrict all process privilege sets to zone limit
4813 	 */
4814 	priv_intersect(zone->zone_privset, &CR_PPRIV(newcr));
4815 	priv_intersect(zone->zone_privset, &CR_EPRIV(newcr));
4816 	priv_intersect(zone->zone_privset, &CR_IPRIV(newcr));
4817 	priv_intersect(zone->zone_privset, &CR_LPRIV(newcr));
4818 	mutex_exit(&pp->p_crlock);
4819 	crset(pp, newcr);
4820 
4821 	/*
4822 	 * Adjust upcount to reflect zone entry.
4823 	 */
4824 	uid = crgetruid(newcr);
4825 	mutex_enter(&pidlock);
4826 	upcount_dec(uid, GLOBAL_ZONEID);
4827 	upcount_inc(uid, zoneid);
4828 	mutex_exit(&pidlock);
4829 
4830 	/*
4831 	 * Set up core file path and content.
4832 	 */
4833 	set_core_defaults();
4834 
4835 out:
4836 	/*
4837 	 * Let the other lwps continue.
4838 	 */
4839 	mutex_enter(&pp->p_lock);
4840 	if (curthread != pp->p_agenttp)
4841 		continuelwps(pp);
4842 	mutex_exit(&pp->p_lock);
4843 
4844 	return (err != 0 ? set_errno(err) : 0);
4845 }
4846 
4847 /*
4848  * Systemcall entry point for zone_list(2).
4849  *
4850  * Processes running in a (non-global) zone only see themselves.
4851  * On labeled systems, they see all zones whose label they dominate.
4852  */
4853 static int
4854 zone_list(zoneid_t *zoneidlist, uint_t *numzones)
4855 {
4856 	zoneid_t *zoneids;
4857 	zone_t *zone, *myzone;
4858 	uint_t user_nzones, real_nzones;
4859 	uint_t domi_nzones;
4860 	int error;
4861 
4862 	if (copyin(numzones, &user_nzones, sizeof (uint_t)) != 0)
4863 		return (set_errno(EFAULT));
4864 
4865 	myzone = curproc->p_zone;
4866 	if (myzone != global_zone) {
4867 		bslabel_t *mybslab;
4868 
4869 		if (!is_system_labeled()) {
4870 			/* just return current zone */
4871 			real_nzones = domi_nzones = 1;
4872 			zoneids = kmem_alloc(sizeof (zoneid_t), KM_SLEEP);
4873 			zoneids[0] = myzone->zone_id;
4874 		} else {
4875 			/* return all zones that are dominated */
4876 			mutex_enter(&zonehash_lock);
4877 			real_nzones = zonecount;
4878 			domi_nzones = 0;
4879 			if (real_nzones > 0) {
4880 				zoneids = kmem_alloc(real_nzones *
4881 				    sizeof (zoneid_t), KM_SLEEP);
4882 				mybslab = label2bslabel(myzone->zone_slabel);
4883 				for (zone = list_head(&zone_active);
4884 				    zone != NULL;
4885 				    zone = list_next(&zone_active, zone)) {
4886 					if (zone->zone_id == GLOBAL_ZONEID)
4887 						continue;
4888 					if (zone != myzone &&
4889 					    (zone->zone_flags & ZF_IS_SCRATCH))
4890 						continue;
4891 					/*
4892 					 * Note that a label always dominates
4893 					 * itself, so myzone is always included
4894 					 * in the list.
4895 					 */
4896 					if (bldominates(mybslab,
4897 					    label2bslabel(zone->zone_slabel))) {
4898 						zoneids[domi_nzones++] =
4899 						    zone->zone_id;
4900 					}
4901 				}
4902 			}
4903 			mutex_exit(&zonehash_lock);
4904 		}
4905 	} else {
4906 		mutex_enter(&zonehash_lock);
4907 		real_nzones = zonecount;
4908 		domi_nzones = 0;
4909 		if (real_nzones > 0) {
4910 			zoneids = kmem_alloc(real_nzones * sizeof (zoneid_t),
4911 			    KM_SLEEP);
4912 			for (zone = list_head(&zone_active); zone != NULL;
4913 			    zone = list_next(&zone_active, zone))
4914 				zoneids[domi_nzones++] = zone->zone_id;
4915 			ASSERT(domi_nzones == real_nzones);
4916 		}
4917 		mutex_exit(&zonehash_lock);
4918 	}
4919 
4920 	/*
4921 	 * If user has allocated space for fewer entries than we found, then
4922 	 * return only up to his limit.  Either way, tell him exactly how many
4923 	 * we found.
4924 	 */
4925 	if (domi_nzones < user_nzones)
4926 		user_nzones = domi_nzones;
4927 	error = 0;
4928 	if (copyout(&domi_nzones, numzones, sizeof (uint_t)) != 0) {
4929 		error = EFAULT;
4930 	} else if (zoneidlist != NULL && user_nzones != 0) {
4931 		if (copyout(zoneids, zoneidlist,
4932 		    user_nzones * sizeof (zoneid_t)) != 0)
4933 			error = EFAULT;
4934 	}
4935 
4936 	if (real_nzones > 0)
4937 		kmem_free(zoneids, real_nzones * sizeof (zoneid_t));
4938 
4939 	if (error != 0)
4940 		return (set_errno(error));
4941 	else
4942 		return (0);
4943 }
4944 
4945 /*
4946  * Systemcall entry point for zone_lookup(2).
4947  *
4948  * Non-global zones are only able to see themselves and (on labeled systems)
4949  * the zones they dominate.
4950  */
4951 static zoneid_t
4952 zone_lookup(const char *zone_name)
4953 {
4954 	char *kname;
4955 	zone_t *zone;
4956 	zoneid_t zoneid;
4957 	int err;
4958 
4959 	if (zone_name == NULL) {
4960 		/* return caller's zone id */
4961 		return (getzoneid());
4962 	}
4963 
4964 	kname = kmem_zalloc(ZONENAME_MAX, KM_SLEEP);
4965 	if ((err = copyinstr(zone_name, kname, ZONENAME_MAX, NULL)) != 0) {
4966 		kmem_free(kname, ZONENAME_MAX);
4967 		return (set_errno(err));
4968 	}
4969 
4970 	mutex_enter(&zonehash_lock);
4971 	zone = zone_find_all_by_name(kname);
4972 	kmem_free(kname, ZONENAME_MAX);
4973 	/*
4974 	 * In a non-global zone, can only lookup global and own name.
4975 	 * In Trusted Extensions zone label dominance rules apply.
4976 	 */
4977 	if (zone == NULL ||
4978 	    zone_status_get(zone) < ZONE_IS_READY ||
4979 	    !zone_list_access(zone)) {
4980 		mutex_exit(&zonehash_lock);
4981 		return (set_errno(EINVAL));
4982 	} else {
4983 		zoneid = zone->zone_id;
4984 		mutex_exit(&zonehash_lock);
4985 		return (zoneid);
4986 	}
4987 }
4988 
4989 static int
4990 zone_version(int *version_arg)
4991 {
4992 	int version = ZONE_SYSCALL_API_VERSION;
4993 
4994 	if (copyout(&version, version_arg, sizeof (int)) != 0)
4995 		return (set_errno(EFAULT));
4996 	return (0);
4997 }
4998 
4999 /* ARGSUSED */
5000 long
5001 zone(int cmd, void *arg1, void *arg2, void *arg3, void *arg4)
5002 {
5003 	zone_def zs;
5004 
5005 	switch (cmd) {
5006 	case ZONE_CREATE:
5007 		if (get_udatamodel() == DATAMODEL_NATIVE) {
5008 			if (copyin(arg1, &zs, sizeof (zone_def))) {
5009 				return (set_errno(EFAULT));
5010 			}
5011 		} else {
5012 #ifdef _SYSCALL32_IMPL
5013 			zone_def32 zs32;
5014 
5015 			if (copyin(arg1, &zs32, sizeof (zone_def32))) {
5016 				return (set_errno(EFAULT));
5017 			}
5018 			zs.zone_name =
5019 			    (const char *)(unsigned long)zs32.zone_name;
5020 			zs.zone_root =
5021 			    (const char *)(unsigned long)zs32.zone_root;
5022 			zs.zone_privs =
5023 			    (const struct priv_set *)
5024 			    (unsigned long)zs32.zone_privs;
5025 			zs.zone_privssz = zs32.zone_privssz;
5026 			zs.rctlbuf = (caddr_t)(unsigned long)zs32.rctlbuf;
5027 			zs.rctlbufsz = zs32.rctlbufsz;
5028 			zs.zfsbuf = (caddr_t)(unsigned long)zs32.zfsbuf;
5029 			zs.zfsbufsz = zs32.zfsbufsz;
5030 			zs.extended_error =
5031 			    (int *)(unsigned long)zs32.extended_error;
5032 			zs.match = zs32.match;
5033 			zs.doi = zs32.doi;
5034 			zs.label = (const bslabel_t *)(uintptr_t)zs32.label;
5035 			zs.flags = zs32.flags;
5036 #else
5037 			panic("get_udatamodel() returned bogus result\n");
5038 #endif
5039 		}
5040 
5041 		return (zone_create(zs.zone_name, zs.zone_root,
5042 		    zs.zone_privs, zs.zone_privssz,
5043 		    (caddr_t)zs.rctlbuf, zs.rctlbufsz,
5044 		    (caddr_t)zs.zfsbuf, zs.zfsbufsz,
5045 		    zs.extended_error, zs.match, zs.doi,
5046 		    zs.label, zs.flags));
5047 	case ZONE_BOOT:
5048 		return (zone_boot((zoneid_t)(uintptr_t)arg1));
5049 	case ZONE_DESTROY:
5050 		return (zone_destroy((zoneid_t)(uintptr_t)arg1));
5051 	case ZONE_GETATTR:
5052 		return (zone_getattr((zoneid_t)(uintptr_t)arg1,
5053 		    (int)(uintptr_t)arg2, arg3, (size_t)arg4));
5054 	case ZONE_SETATTR:
5055 		return (zone_setattr((zoneid_t)(uintptr_t)arg1,
5056 		    (int)(uintptr_t)arg2, arg3, (size_t)arg4));
5057 	case ZONE_ENTER:
5058 		return (zone_enter((zoneid_t)(uintptr_t)arg1));
5059 	case ZONE_LIST:
5060 		return (zone_list((zoneid_t *)arg1, (uint_t *)arg2));
5061 	case ZONE_SHUTDOWN:
5062 		return (zone_shutdown((zoneid_t)(uintptr_t)arg1));
5063 	case ZONE_LOOKUP:
5064 		return (zone_lookup((const char *)arg1));
5065 	case ZONE_VERSION:
5066 		return (zone_version((int *)arg1));
5067 	case ZONE_ADD_DATALINK:
5068 		return (zone_add_datalink((zoneid_t)(uintptr_t)arg1,
5069 		    (char *)arg2));
5070 	case ZONE_DEL_DATALINK:
5071 		return (zone_remove_datalink((zoneid_t)(uintptr_t)arg1,
5072 		    (char *)arg2));
5073 	case ZONE_CHECK_DATALINK:
5074 		return (zone_check_datalink((zoneid_t *)arg1, (char *)arg2));
5075 	case ZONE_LIST_DATALINK:
5076 		return (zone_list_datalink((zoneid_t)(uintptr_t)arg1,
5077 		    (int *)arg2, (char *)arg3));
5078 	default:
5079 		return (set_errno(EINVAL));
5080 	}
5081 }
5082 
5083 struct zarg {
5084 	zone_t *zone;
5085 	zone_cmd_arg_t arg;
5086 };
5087 
5088 static int
5089 zone_lookup_door(const char *zone_name, door_handle_t *doorp)
5090 {
5091 	char *buf;
5092 	size_t buflen;
5093 	int error;
5094 
5095 	buflen = sizeof (ZONE_DOOR_PATH) + strlen(zone_name);
5096 	buf = kmem_alloc(buflen, KM_SLEEP);
5097 	(void) snprintf(buf, buflen, ZONE_DOOR_PATH, zone_name);
5098 	error = door_ki_open(buf, doorp);
5099 	kmem_free(buf, buflen);
5100 	return (error);
5101 }
5102 
5103 static void
5104 zone_release_door(door_handle_t *doorp)
5105 {
5106 	door_ki_rele(*doorp);
5107 	*doorp = NULL;
5108 }
5109 
5110 static void
5111 zone_ki_call_zoneadmd(struct zarg *zargp)
5112 {
5113 	door_handle_t door = NULL;
5114 	door_arg_t darg, save_arg;
5115 	char *zone_name;
5116 	size_t zone_namelen;
5117 	zoneid_t zoneid;
5118 	zone_t *zone;
5119 	zone_cmd_arg_t arg;
5120 	uint64_t uniqid;
5121 	size_t size;
5122 	int error;
5123 	int retry;
5124 
5125 	zone = zargp->zone;
5126 	arg = zargp->arg;
5127 	kmem_free(zargp, sizeof (*zargp));
5128 
5129 	zone_namelen = strlen(zone->zone_name) + 1;
5130 	zone_name = kmem_alloc(zone_namelen, KM_SLEEP);
5131 	bcopy(zone->zone_name, zone_name, zone_namelen);
5132 	zoneid = zone->zone_id;
5133 	uniqid = zone->zone_uniqid;
5134 	/*
5135 	 * zoneadmd may be down, but at least we can empty out the zone.
5136 	 * We can ignore the return value of zone_empty() since we're called
5137 	 * from a kernel thread and know we won't be delivered any signals.
5138 	 */
5139 	ASSERT(curproc == &p0);
5140 	(void) zone_empty(zone);
5141 	ASSERT(zone_status_get(zone) >= ZONE_IS_EMPTY);
5142 	zone_rele(zone);
5143 
5144 	size = sizeof (arg);
5145 	darg.rbuf = (char *)&arg;
5146 	darg.data_ptr = (char *)&arg;
5147 	darg.rsize = size;
5148 	darg.data_size = size;
5149 	darg.desc_ptr = NULL;
5150 	darg.desc_num = 0;
5151 
5152 	save_arg = darg;
5153 	/*
5154 	 * Since we're not holding a reference to the zone, any number of
5155 	 * things can go wrong, including the zone disappearing before we get a
5156 	 * chance to talk to zoneadmd.
5157 	 */
5158 	for (retry = 0; /* forever */; retry++) {
5159 		if (door == NULL &&
5160 		    (error = zone_lookup_door(zone_name, &door)) != 0) {
5161 			goto next;
5162 		}
5163 		ASSERT(door != NULL);
5164 
5165 		if ((error = door_ki_upcall(door, &darg)) == 0) {
5166 			break;
5167 		}
5168 		switch (error) {
5169 		case EINTR:
5170 			/* FALLTHROUGH */
5171 		case EAGAIN:	/* process may be forking */
5172 			/*
5173 			 * Back off for a bit
5174 			 */
5175 			break;
5176 		case EBADF:
5177 			zone_release_door(&door);
5178 			if (zone_lookup_door(zone_name, &door) != 0) {
5179 				/*
5180 				 * zoneadmd may be dead, but it may come back to
5181 				 * life later.
5182 				 */
5183 				break;
5184 			}
5185 			break;
5186 		default:
5187 			cmn_err(CE_WARN,
5188 			    "zone_ki_call_zoneadmd: door_ki_upcall error %d\n",
5189 			    error);
5190 			goto out;
5191 		}
5192 next:
5193 		/*
5194 		 * If this isn't the same zone_t that we originally had in mind,
5195 		 * then this is the same as if two kadmin requests come in at
5196 		 * the same time: the first one wins.  This means we lose, so we
5197 		 * bail.
5198 		 */
5199 		if ((zone = zone_find_by_id(zoneid)) == NULL) {
5200 			/*
5201 			 * Problem is solved.
5202 			 */
5203 			break;
5204 		}
5205 		if (zone->zone_uniqid != uniqid) {
5206 			/*
5207 			 * zoneid recycled
5208 			 */
5209 			zone_rele(zone);
5210 			break;
5211 		}
5212 		/*
5213 		 * We could zone_status_timedwait(), but there doesn't seem to
5214 		 * be much point in doing that (plus, it would mean that
5215 		 * zone_free() isn't called until this thread exits).
5216 		 */
5217 		zone_rele(zone);
5218 		delay(hz);
5219 		darg = save_arg;
5220 	}
5221 out:
5222 	if (door != NULL) {
5223 		zone_release_door(&door);
5224 	}
5225 	kmem_free(zone_name, zone_namelen);
5226 	thread_exit();
5227 }
5228 
5229 /*
5230  * Entry point for uadmin() to tell the zone to go away or reboot.  Analog to
5231  * kadmin().  The caller is a process in the zone.
5232  *
5233  * In order to shutdown the zone, we will hand off control to zoneadmd
5234  * (running in the global zone) via a door.  We do a half-hearted job at
5235  * killing all processes in the zone, create a kernel thread to contact
5236  * zoneadmd, and make note of the "uniqid" of the zone.  The uniqid is
5237  * a form of generation number used to let zoneadmd (as well as
5238  * zone_destroy()) know exactly which zone they're re talking about.
5239  */
5240 int
5241 zone_kadmin(int cmd, int fcn, const char *mdep, cred_t *credp)
5242 {
5243 	struct zarg *zargp;
5244 	zone_cmd_t zcmd;
5245 	zone_t *zone;
5246 
5247 	zone = curproc->p_zone;
5248 	ASSERT(getzoneid() != GLOBAL_ZONEID);
5249 
5250 	switch (cmd) {
5251 	case A_SHUTDOWN:
5252 		switch (fcn) {
5253 		case AD_HALT:
5254 		case AD_POWEROFF:
5255 			zcmd = Z_HALT;
5256 			break;
5257 		case AD_BOOT:
5258 			zcmd = Z_REBOOT;
5259 			break;
5260 		case AD_IBOOT:
5261 		case AD_SBOOT:
5262 		case AD_SIBOOT:
5263 		case AD_NOSYNC:
5264 			return (ENOTSUP);
5265 		default:
5266 			return (EINVAL);
5267 		}
5268 		break;
5269 	case A_REBOOT:
5270 		zcmd = Z_REBOOT;
5271 		break;
5272 	case A_FTRACE:
5273 	case A_REMOUNT:
5274 	case A_FREEZE:
5275 	case A_DUMP:
5276 		return (ENOTSUP);
5277 	default:
5278 		ASSERT(cmd != A_SWAPCTL);	/* handled by uadmin() */
5279 		return (EINVAL);
5280 	}
5281 
5282 	if (secpolicy_zone_admin(credp, B_FALSE))
5283 		return (EPERM);
5284 	mutex_enter(&zone_status_lock);
5285 
5286 	/*
5287 	 * zone_status can't be ZONE_IS_EMPTY or higher since curproc
5288 	 * is in the zone.
5289 	 */
5290 	ASSERT(zone_status_get(zone) < ZONE_IS_EMPTY);
5291 	if (zone_status_get(zone) > ZONE_IS_RUNNING) {
5292 		/*
5293 		 * This zone is already on its way down.
5294 		 */
5295 		mutex_exit(&zone_status_lock);
5296 		return (0);
5297 	}
5298 	/*
5299 	 * Prevent future zone_enter()s
5300 	 */
5301 	zone_status_set(zone, ZONE_IS_SHUTTING_DOWN);
5302 	mutex_exit(&zone_status_lock);
5303 
5304 	/*
5305 	 * Kill everyone now and call zoneadmd later.
5306 	 * zone_ki_call_zoneadmd() will do a more thorough job of this
5307 	 * later.
5308 	 */
5309 	killall(zone->zone_id);
5310 	/*
5311 	 * Now, create the thread to contact zoneadmd and do the rest of the
5312 	 * work.  This thread can't be created in our zone otherwise
5313 	 * zone_destroy() would deadlock.
5314 	 */
5315 	zargp = kmem_zalloc(sizeof (*zargp), KM_SLEEP);
5316 	zargp->arg.cmd = zcmd;
5317 	zargp->arg.uniqid = zone->zone_uniqid;
5318 	zargp->zone = zone;
5319 	(void) strcpy(zargp->arg.locale, "C");
5320 	/* mdep was already copied in for us by uadmin */
5321 	if (mdep != NULL)
5322 		(void) strlcpy(zargp->arg.bootbuf, mdep,
5323 		    sizeof (zargp->arg.bootbuf));
5324 	zone_hold(zone);
5325 
5326 	(void) thread_create(NULL, 0, zone_ki_call_zoneadmd, zargp, 0, &p0,
5327 	    TS_RUN, minclsyspri);
5328 	exit(CLD_EXITED, 0);
5329 
5330 	return (EINVAL);
5331 }
5332 
5333 /*
5334  * Entry point so kadmin(A_SHUTDOWN, ...) can set the global zone's
5335  * status to ZONE_IS_SHUTTING_DOWN.
5336  */
5337 void
5338 zone_shutdown_global(void)
5339 {
5340 	ASSERT(curproc->p_zone == global_zone);
5341 
5342 	mutex_enter(&zone_status_lock);
5343 	ASSERT(zone_status_get(global_zone) == ZONE_IS_RUNNING);
5344 	zone_status_set(global_zone, ZONE_IS_SHUTTING_DOWN);
5345 	mutex_exit(&zone_status_lock);
5346 }
5347 
5348 /*
5349  * Returns true if the named dataset is visible in the current zone.
5350  * The 'write' parameter is set to 1 if the dataset is also writable.
5351  */
5352 int
5353 zone_dataset_visible(const char *dataset, int *write)
5354 {
5355 	zone_dataset_t *zd;
5356 	size_t len;
5357 	zone_t *zone = curproc->p_zone;
5358 
5359 	if (dataset[0] == '\0')
5360 		return (0);
5361 
5362 	/*
5363 	 * Walk the list once, looking for datasets which match exactly, or
5364 	 * specify a dataset underneath an exported dataset.  If found, return
5365 	 * true and note that it is writable.
5366 	 */
5367 	for (zd = list_head(&zone->zone_datasets); zd != NULL;
5368 	    zd = list_next(&zone->zone_datasets, zd)) {
5369 
5370 		len = strlen(zd->zd_dataset);
5371 		if (strlen(dataset) >= len &&
5372 		    bcmp(dataset, zd->zd_dataset, len) == 0 &&
5373 		    (dataset[len] == '\0' || dataset[len] == '/' ||
5374 		    dataset[len] == '@')) {
5375 			if (write)
5376 				*write = 1;
5377 			return (1);
5378 		}
5379 	}
5380 
5381 	/*
5382 	 * Walk the list a second time, searching for datasets which are parents
5383 	 * of exported datasets.  These should be visible, but read-only.
5384 	 *
5385 	 * Note that we also have to support forms such as 'pool/dataset/', with
5386 	 * a trailing slash.
5387 	 */
5388 	for (zd = list_head(&zone->zone_datasets); zd != NULL;
5389 	    zd = list_next(&zone->zone_datasets, zd)) {
5390 
5391 		len = strlen(dataset);
5392 		if (dataset[len - 1] == '/')
5393 			len--;	/* Ignore trailing slash */
5394 		if (len < strlen(zd->zd_dataset) &&
5395 		    bcmp(dataset, zd->zd_dataset, len) == 0 &&
5396 		    zd->zd_dataset[len] == '/') {
5397 			if (write)
5398 				*write = 0;
5399 			return (1);
5400 		}
5401 	}
5402 
5403 	return (0);
5404 }
5405 
5406 /*
5407  * zone_find_by_any_path() -
5408  *
5409  * kernel-private routine similar to zone_find_by_path(), but which
5410  * effectively compares against zone paths rather than zonerootpath
5411  * (i.e., the last component of zonerootpaths, which should be "root/",
5412  * are not compared.)  This is done in order to accurately identify all
5413  * paths, whether zone-visible or not, including those which are parallel
5414  * to /root/, such as /dev/, /home/, etc...
5415  *
5416  * If the specified path does not fall under any zone path then global
5417  * zone is returned.
5418  *
5419  * The treat_abs parameter indicates whether the path should be treated as
5420  * an absolute path although it does not begin with "/".  (This supports
5421  * nfs mount syntax such as host:any/path.)
5422  *
5423  * The caller is responsible for zone_rele of the returned zone.
5424  */
5425 zone_t *
5426 zone_find_by_any_path(const char *path, boolean_t treat_abs)
5427 {
5428 	zone_t *zone;
5429 	int path_offset = 0;
5430 
5431 	if (path == NULL) {
5432 		zone_hold(global_zone);
5433 		return (global_zone);
5434 	}
5435 
5436 	if (*path != '/') {
5437 		ASSERT(treat_abs);
5438 		path_offset = 1;
5439 	}
5440 
5441 	mutex_enter(&zonehash_lock);
5442 	for (zone = list_head(&zone_active); zone != NULL;
5443 	    zone = list_next(&zone_active, zone)) {
5444 		char	*c;
5445 		size_t	pathlen;
5446 		char *rootpath_start;
5447 
5448 		if (zone == global_zone)	/* skip global zone */
5449 			continue;
5450 
5451 		/* scan backwards to find start of last component */
5452 		c = zone->zone_rootpath + zone->zone_rootpathlen - 2;
5453 		do {
5454 			c--;
5455 		} while (*c != '/');
5456 
5457 		pathlen = c - zone->zone_rootpath + 1 - path_offset;
5458 		rootpath_start = (zone->zone_rootpath + path_offset);
5459 		if (strncmp(path, rootpath_start, pathlen) == 0)
5460 			break;
5461 	}
5462 	if (zone == NULL)
5463 		zone = global_zone;
5464 	zone_hold(zone);
5465 	mutex_exit(&zonehash_lock);
5466 	return (zone);
5467 }
5468 
5469 /* List of data link names which are accessible from the zone */
5470 struct dlnamelist {
5471 	char			dlnl_name[LIFNAMSIZ];
5472 	struct dlnamelist	*dlnl_next;
5473 };
5474 
5475 
5476 /*
5477  * Check whether the datalink name (dlname) itself is present.
5478  * Return true if found.
5479  */
5480 static boolean_t
5481 zone_dlname(zone_t *zone, char *dlname)
5482 {
5483 	struct dlnamelist *dlnl;
5484 	boolean_t found = B_FALSE;
5485 
5486 	mutex_enter(&zone->zone_lock);
5487 	for (dlnl = zone->zone_dl_list; dlnl != NULL; dlnl = dlnl->dlnl_next) {
5488 		if (strncmp(dlnl->dlnl_name, dlname, LIFNAMSIZ) == 0) {
5489 			found = B_TRUE;
5490 			break;
5491 		}
5492 	}
5493 	mutex_exit(&zone->zone_lock);
5494 	return (found);
5495 }
5496 
5497 /*
5498  * Add an data link name for the zone. Does not check for duplicates.
5499  */
5500 static int
5501 zone_add_datalink(zoneid_t zoneid, char *dlname)
5502 {
5503 	struct dlnamelist *dlnl;
5504 	zone_t *zone;
5505 	zone_t *thiszone;
5506 	int err;
5507 
5508 	dlnl = kmem_zalloc(sizeof (struct dlnamelist), KM_SLEEP);
5509 	if ((err = copyinstr(dlname, dlnl->dlnl_name, LIFNAMSIZ, NULL)) != 0) {
5510 		kmem_free(dlnl, sizeof (struct dlnamelist));
5511 		return (set_errno(err));
5512 	}
5513 
5514 	thiszone = zone_find_by_id(zoneid);
5515 	if (thiszone == NULL) {
5516 		kmem_free(dlnl, sizeof (struct dlnamelist));
5517 		return (set_errno(ENXIO));
5518 	}
5519 
5520 	/*
5521 	 * Verify that the datalink name isn't already used by a different
5522 	 * zone while allowing duplicate entries for the same zone (e.g. due
5523 	 * to both using IPv4 and IPv6 on an interface)
5524 	 */
5525 	mutex_enter(&zonehash_lock);
5526 	for (zone = list_head(&zone_active); zone != NULL;
5527 	    zone = list_next(&zone_active, zone)) {
5528 		if (zone->zone_id == zoneid)
5529 			continue;
5530 
5531 		if (zone_dlname(zone, dlnl->dlnl_name)) {
5532 			mutex_exit(&zonehash_lock);
5533 			zone_rele(thiszone);
5534 			kmem_free(dlnl, sizeof (struct dlnamelist));
5535 			return (set_errno(EPERM));
5536 		}
5537 	}
5538 	mutex_enter(&thiszone->zone_lock);
5539 	dlnl->dlnl_next = thiszone->zone_dl_list;
5540 	thiszone->zone_dl_list = dlnl;
5541 	mutex_exit(&thiszone->zone_lock);
5542 	mutex_exit(&zonehash_lock);
5543 	zone_rele(thiszone);
5544 	return (0);
5545 }
5546 
5547 static int
5548 zone_remove_datalink(zoneid_t zoneid, char *dlname)
5549 {
5550 	struct dlnamelist *dlnl, *odlnl, **dlnlp;
5551 	zone_t *zone;
5552 	int err;
5553 
5554 	dlnl = kmem_zalloc(sizeof (struct dlnamelist), KM_SLEEP);
5555 	if ((err = copyinstr(dlname, dlnl->dlnl_name, LIFNAMSIZ, NULL)) != 0) {
5556 		kmem_free(dlnl, sizeof (struct dlnamelist));
5557 		return (set_errno(err));
5558 	}
5559 	zone = zone_find_by_id(zoneid);
5560 	if (zone == NULL) {
5561 		kmem_free(dlnl, sizeof (struct dlnamelist));
5562 		return (set_errno(EINVAL));
5563 	}
5564 
5565 	mutex_enter(&zone->zone_lock);
5566 	/* Look for match */
5567 	dlnlp = &zone->zone_dl_list;
5568 	while (*dlnlp != NULL) {
5569 		if (strncmp(dlnl->dlnl_name, (*dlnlp)->dlnl_name,
5570 		    LIFNAMSIZ) == 0)
5571 			goto found;
5572 		dlnlp = &((*dlnlp)->dlnl_next);
5573 	}
5574 	mutex_exit(&zone->zone_lock);
5575 	zone_rele(zone);
5576 	kmem_free(dlnl, sizeof (struct dlnamelist));
5577 	return (set_errno(ENXIO));
5578 
5579 found:
5580 	odlnl = *dlnlp;
5581 	*dlnlp = (*dlnlp)->dlnl_next;
5582 	kmem_free(odlnl, sizeof (struct dlnamelist));
5583 
5584 	mutex_exit(&zone->zone_lock);
5585 	zone_rele(zone);
5586 	kmem_free(dlnl, sizeof (struct dlnamelist));
5587 	return (0);
5588 }
5589 
5590 /*
5591  * Using the zoneidp as ALL_ZONES, we can lookup which zone is using datalink
5592  * name (dlname); otherwise we just check if the specified zoneidp has access
5593  * to the datalink name.
5594  */
5595 static int
5596 zone_check_datalink(zoneid_t *zoneidp, char *dlname)
5597 {
5598 	zoneid_t id;
5599 	char *dln;
5600 	zone_t *zone;
5601 	int err = 0;
5602 	boolean_t allzones = B_FALSE;
5603 
5604 	if (copyin(zoneidp, &id, sizeof (id)) != 0) {
5605 		return (set_errno(EFAULT));
5606 	}
5607 	dln = kmem_zalloc(LIFNAMSIZ, KM_SLEEP);
5608 	if ((err = copyinstr(dlname, dln, LIFNAMSIZ, NULL)) != 0) {
5609 		kmem_free(dln, LIFNAMSIZ);
5610 		return (set_errno(err));
5611 	}
5612 
5613 	if (id == ALL_ZONES)
5614 		allzones = B_TRUE;
5615 
5616 	/*
5617 	 * Check whether datalink name is already used.
5618 	 */
5619 	mutex_enter(&zonehash_lock);
5620 	for (zone = list_head(&zone_active); zone != NULL;
5621 	    zone = list_next(&zone_active, zone)) {
5622 		if (allzones || (id == zone->zone_id)) {
5623 			if (!zone_dlname(zone, dln))
5624 				continue;
5625 			if (allzones)
5626 				err = copyout(&zone->zone_id, zoneidp,
5627 				    sizeof (*zoneidp));
5628 
5629 			mutex_exit(&zonehash_lock);
5630 			kmem_free(dln, LIFNAMSIZ);
5631 			return (err ? set_errno(EFAULT) : 0);
5632 		}
5633 	}
5634 
5635 	/* datalink name is not found in any active zone. */
5636 	mutex_exit(&zonehash_lock);
5637 	kmem_free(dln, LIFNAMSIZ);
5638 	return (set_errno(ENXIO));
5639 }
5640 
5641 /*
5642  * Get the names of the datalinks assigned to a zone.
5643  * Here *nump is the number of datalinks, and the assumption
5644  * is that the caller will gurantee that the the supplied buffer is
5645  * big enough to hold at least #*nump datalink names, that is,
5646  * LIFNAMSIZ X *nump
5647  * On return, *nump will be the "new" number of datalinks, if it
5648  * ever changed.
5649  */
5650 static int
5651 zone_list_datalink(zoneid_t zoneid, int *nump, char *buf)
5652 {
5653 	int num, dlcount;
5654 	zone_t *zone;
5655 	struct dlnamelist *dlnl;
5656 	char *ptr;
5657 
5658 	if (copyin(nump, &dlcount, sizeof (dlcount)) != 0)
5659 		return (set_errno(EFAULT));
5660 
5661 	zone = zone_find_by_id(zoneid);
5662 	if (zone == NULL) {
5663 		return (set_errno(ENXIO));
5664 	}
5665 
5666 	num = 0;
5667 	mutex_enter(&zone->zone_lock);
5668 	ptr = buf;
5669 	for (dlnl = zone->zone_dl_list; dlnl != NULL; dlnl = dlnl->dlnl_next) {
5670 		/*
5671 		 * If the list changed and the new number is bigger
5672 		 * than what the caller supplied, just count, don't
5673 		 * do copyout
5674 		 */
5675 		if (++num > dlcount)
5676 			continue;
5677 		if (copyout(dlnl->dlnl_name, ptr, LIFNAMSIZ) != 0) {
5678 			mutex_exit(&zone->zone_lock);
5679 			zone_rele(zone);
5680 			return (set_errno(EFAULT));
5681 		}
5682 		ptr += LIFNAMSIZ;
5683 	}
5684 	mutex_exit(&zone->zone_lock);
5685 	zone_rele(zone);
5686 
5687 	/* Increased or decreased, caller should be notified. */
5688 	if (num != dlcount) {
5689 		if (copyout(&num, nump, sizeof (num)) != 0) {
5690 			return (set_errno(EFAULT));
5691 		}
5692 	}
5693 	return (0);
5694 }
5695 
5696 /*
5697  * Public interface for looking up a zone by zoneid. It's a customized version
5698  * for netstack_zone_create(), it:
5699  * 1. Doesn't acquire the zonehash_lock, since it is called from
5700  *    zone_key_create() or zone_zsd_configure(), lock already held.
5701  * 2. Doesn't check the status of the zone.
5702  * 3. It will be called even before zone_init is called, in that case the
5703  *    address of zone0 is returned directly, and netstack_zone_create()
5704  *    will only assign a value to zone0.zone_netstack, won't break anything.
5705  */
5706 zone_t *
5707 zone_find_by_id_nolock(zoneid_t zoneid)
5708 {
5709 	ASSERT(MUTEX_HELD(&zonehash_lock));
5710 
5711 	if (zonehashbyid == NULL)
5712 		return (&zone0);
5713 	else
5714 		return (zone_find_all_by_id(zoneid));
5715 }
5716