1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2015, Joyent Inc. All rights reserved.
25 * Copyright (c) 2016 by Delphix. All rights reserved.
26 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
27 */
28
29 /*
30 * Zones
31 *
32 * A zone is a named collection of processes, namespace constraints,
33 * and other system resources which comprise a secure and manageable
34 * application containment facility.
35 *
36 * Zones (represented by the reference counted zone_t) are tracked in
37 * the kernel in the zonehash. Elsewhere in the kernel, Zone IDs
38 * (zoneid_t) are used to track zone association. Zone IDs are
39 * dynamically generated when the zone is created; if a persistent
40 * identifier is needed (core files, accounting logs, audit trail,
41 * etc.), the zone name should be used.
42 *
43 *
44 * Global Zone:
45 *
46 * The global zone (zoneid 0) is automatically associated with all
47 * system resources that have not been bound to a user-created zone.
48 * This means that even systems where zones are not in active use
49 * have a global zone, and all processes, mounts, etc. are
50 * associated with that zone. The global zone is generally
51 * unconstrained in terms of privileges and access, though the usual
52 * credential and privilege based restrictions apply.
53 *
54 *
55 * Zone States:
56 *
57 * The states in which a zone may be in and the transitions are as
58 * follows:
59 *
60 * ZONE_IS_UNINITIALIZED: primordial state for a zone. The partially
61 * initialized zone is added to the list of active zones on the system but
62 * isn't accessible.
63 *
64 * ZONE_IS_INITIALIZED: Initialization complete except the ZSD callbacks are
65 * not yet completed. Not possible to enter the zone, but attributes can
66 * be retrieved.
67 *
68 * ZONE_IS_READY: zsched (the kernel dummy process for a zone) is
69 * ready. The zone is made visible after the ZSD constructor callbacks are
70 * executed. A zone remains in this state until it transitions into
71 * the ZONE_IS_BOOTING state as a result of a call to zone_boot().
72 *
73 * ZONE_IS_BOOTING: in this shortlived-state, zsched attempts to start
74 * init. Should that fail, the zone proceeds to the ZONE_IS_SHUTTING_DOWN
75 * state.
76 *
77 * ZONE_IS_RUNNING: The zone is open for business: zsched has
78 * successfully started init. A zone remains in this state until
79 * zone_shutdown() is called.
80 *
81 * ZONE_IS_SHUTTING_DOWN: zone_shutdown() has been called, the system is
82 * killing all processes running in the zone. The zone remains
83 * in this state until there are no more user processes running in the zone.
84 * zone_create(), zone_enter(), and zone_destroy() on this zone will fail.
85 * Since zone_shutdown() is restartable, it may be called successfully
86 * multiple times for the same zone_t. Setting of the zone's state to
87 * ZONE_IS_SHUTTING_DOWN is synchronized with mounts, so VOP_MOUNT() may check
88 * the zone's status without worrying about it being a moving target.
89 *
90 * ZONE_IS_EMPTY: zone_shutdown() has been called, and there
91 * are no more user processes in the zone. The zone remains in this
92 * state until there are no more kernel threads associated with the
93 * zone. zone_create(), zone_enter(), and zone_destroy() on this zone will
94 * fail.
95 *
96 * ZONE_IS_DOWN: All kernel threads doing work on behalf of the zone
97 * have exited. zone_shutdown() returns. Henceforth it is not possible to
98 * join the zone or create kernel threads therein.
99 *
100 * ZONE_IS_DYING: zone_destroy() has been called on the zone; zone
101 * remains in this state until zsched exits. Calls to zone_find_by_*()
102 * return NULL from now on.
103 *
104 * ZONE_IS_DEAD: zsched has exited (zone_ntasks == 0). There are no
105 * processes or threads doing work on behalf of the zone. The zone is
106 * removed from the list of active zones. zone_destroy() returns, and
107 * the zone can be recreated.
108 *
109 * ZONE_IS_FREE (internal state): zone_ref goes to 0, ZSD destructor
110 * callbacks are executed, and all memory associated with the zone is
111 * freed.
112 *
113 * Threads can wait for the zone to enter a requested state by using
114 * zone_status_wait() or zone_status_timedwait() with the desired
115 * state passed in as an argument. Zone state transitions are
116 * uni-directional; it is not possible to move back to an earlier state.
117 *
118 *
119 * Zone-Specific Data:
120 *
121 * Subsystems needing to maintain zone-specific data can store that
122 * data using the ZSD mechanism. This provides a zone-specific data
123 * store, similar to thread-specific data (see pthread_getspecific(3C)
124 * or the TSD code in uts/common/disp/thread.c. Also, ZSD can be used
125 * to register callbacks to be invoked when a zone is created, shut
126 * down, or destroyed. This can be used to initialize zone-specific
127 * data for new zones and to clean up when zones go away.
128 *
129 *
130 * Data Structures:
131 *
132 * The per-zone structure (zone_t) is reference counted, and freed
133 * when all references are released. zone_hold and zone_rele can be
134 * used to adjust the reference count. In addition, reference counts
135 * associated with the cred_t structure are tracked separately using
136 * zone_cred_hold and zone_cred_rele.
137 *
138 * Pointers to active zone_t's are stored in two hash tables; one
139 * for searching by id, the other for searching by name. Lookups
140 * can be performed on either basis, using zone_find_by_id and
141 * zone_find_by_name. Both return zone_t pointers with the zone
142 * held, so zone_rele should be called when the pointer is no longer
143 * needed. Zones can also be searched by path; zone_find_by_path
144 * returns the zone with which a path name is associated (global
145 * zone if the path is not within some other zone's file system
146 * hierarchy). This currently requires iterating through each zone,
147 * so it is slower than an id or name search via a hash table.
148 *
149 *
150 * Locking:
151 *
152 * zonehash_lock: This is a top-level global lock used to protect the
153 * zone hash tables and lists. Zones cannot be created or destroyed
154 * while this lock is held.
155 * zone_status_lock: This is a global lock protecting zone state.
156 * Zones cannot change state while this lock is held. It also
157 * protects the list of kernel threads associated with a zone.
158 * zone_lock: This is a per-zone lock used to protect several fields of
159 * the zone_t (see <sys/zone.h> for details). In addition, holding
160 * this lock means that the zone cannot go away.
161 * zone_nlwps_lock: This is a per-zone lock used to protect the fields
162 * related to the zone.max-lwps rctl.
163 * zone_mem_lock: This is a per-zone lock used to protect the fields
164 * related to the zone.max-locked-memory and zone.max-swap rctls.
165 * zone_rctl_lock: This is a per-zone lock used to protect other rctls,
166 * currently just max_lofi
167 * zsd_key_lock: This is a global lock protecting the key state for ZSD.
168 * zone_deathrow_lock: This is a global lock protecting the "deathrow"
169 * list (a list of zones in the ZONE_IS_DEAD state).
170 *
171 * Ordering requirements:
172 * pool_lock --> cpu_lock --> zonehash_lock --> zone_status_lock -->
173 * zone_lock --> zsd_key_lock --> pidlock --> p_lock
174 *
175 * When taking zone_mem_lock or zone_nlwps_lock, the lock ordering is:
176 * zonehash_lock --> a_lock --> pidlock --> p_lock --> zone_mem_lock
177 * zonehash_lock --> a_lock --> pidlock --> p_lock --> zone_nlwps_lock
178 *
179 * Blocking memory allocations are permitted while holding any of the
180 * zone locks.
181 *
182 *
183 * System Call Interface:
184 *
185 * The zone subsystem can be managed and queried from user level with
186 * the following system calls (all subcodes of the primary "zone"
187 * system call):
188 * - zone_create: creates a zone with selected attributes (name,
189 * root path, privileges, resource controls, ZFS datasets)
190 * - zone_enter: allows the current process to enter a zone
191 * - zone_getattr: reports attributes of a zone
192 * - zone_setattr: set attributes of a zone
193 * - zone_boot: set 'init' running for the zone
194 * - zone_list: lists all zones active in the system
195 * - zone_lookup: looks up zone id based on name
196 * - zone_shutdown: initiates shutdown process (see states above)
197 * - zone_destroy: completes shutdown process (see states above)
198 *
199 */
200
201 #include <sys/priv_impl.h>
202 #include <sys/cred.h>
203 #include <c2/audit.h>
204 #include <sys/debug.h>
205 #include <sys/file.h>
206 #include <sys/kmem.h>
207 #include <sys/kstat.h>
208 #include <sys/mutex.h>
209 #include <sys/note.h>
210 #include <sys/pathname.h>
211 #include <sys/proc.h>
212 #include <sys/project.h>
213 #include <sys/sysevent.h>
214 #include <sys/task.h>
215 #include <sys/systm.h>
216 #include <sys/types.h>
217 #include <sys/utsname.h>
218 #include <sys/vnode.h>
219 #include <sys/vfs.h>
220 #include <sys/systeminfo.h>
221 #include <sys/policy.h>
222 #include <sys/cred_impl.h>
223 #include <sys/contract_impl.h>
224 #include <sys/contract/process_impl.h>
225 #include <sys/class.h>
226 #include <sys/pool.h>
227 #include <sys/pool_pset.h>
228 #include <sys/pset.h>
229 #include <sys/strlog.h>
230 #include <sys/sysmacros.h>
231 #include <sys/callb.h>
232 #include <sys/vmparam.h>
233 #include <sys/corectl.h>
234 #include <sys/ipc_impl.h>
235 #include <sys/klpd.h>
236
237 #include <sys/door.h>
238 #include <sys/cpuvar.h>
239 #include <sys/sdt.h>
240
241 #include <sys/uadmin.h>
242 #include <sys/session.h>
243 #include <sys/cmn_err.h>
244 #include <sys/modhash.h>
245 #include <sys/sunddi.h>
246 #include <sys/nvpair.h>
247 #include <sys/rctl.h>
248 #include <sys/fss.h>
249 #include <sys/brand.h>
250 #include <sys/zone.h>
251 #include <net/if.h>
252 #include <sys/cpucaps.h>
253 #include <vm/seg.h>
254 #include <sys/mac.h>
255
256 /*
257 * This constant specifies the number of seconds that threads waiting for
258 * subsystems to release a zone's general-purpose references will wait before
259 * they log the zone's reference counts. The constant's value shouldn't
260 * be so small that reference counts are unnecessarily reported for zones
261 * whose references are slowly released. On the other hand, it shouldn't be so
262 * large that users reboot their systems out of frustration over hung zones
263 * before the system logs the zones' reference counts.
264 */
265 #define ZONE_DESTROY_TIMEOUT_SECS 60
266
267 /* List of data link IDs which are accessible from the zone */
268 typedef struct zone_dl {
269 datalink_id_t zdl_id;
270 nvlist_t *zdl_net;
271 list_node_t zdl_linkage;
272 } zone_dl_t;
273
274 /*
275 * cv used to signal that all references to the zone have been released. This
276 * needs to be global since there may be multiple waiters, and the first to
277 * wake up will free the zone_t, hence we cannot use zone->zone_cv.
278 */
279 static kcondvar_t zone_destroy_cv;
280 /*
281 * Lock used to serialize access to zone_cv. This could have been per-zone,
282 * but then we'd need another lock for zone_destroy_cv, and why bother?
283 */
284 static kmutex_t zone_status_lock;
285
286 /*
287 * ZSD-related global variables.
288 */
289 static kmutex_t zsd_key_lock; /* protects the following two */
290 /*
291 * The next caller of zone_key_create() will be assigned a key of ++zsd_keyval.
292 */
293 static zone_key_t zsd_keyval = 0;
294 /*
295 * Global list of registered keys. We use this when a new zone is created.
296 */
297 static list_t zsd_registered_keys;
298
299 int zone_hash_size = 256;
300 static mod_hash_t *zonehashbyname, *zonehashbyid, *zonehashbylabel;
301 static kmutex_t zonehash_lock;
302 static uint_t zonecount;
303 static id_space_t *zoneid_space;
304
305 /*
306 * The global zone (aka zone0) is the all-seeing, all-knowing zone in which the
307 * kernel proper runs, and which manages all other zones.
308 *
309 * Although not declared as static, the variable "zone0" should not be used
310 * except for by code that needs to reference the global zone early on in boot,
311 * before it is fully initialized. All other consumers should use
312 * 'global_zone'.
313 */
314 zone_t zone0;
315 zone_t *global_zone = NULL; /* Set when the global zone is initialized */
316
317 /*
318 * List of active zones, protected by zonehash_lock.
319 */
320 static list_t zone_active;
321
322 /*
323 * List of destroyed zones that still have outstanding cred references.
324 * Used for debugging. Uses a separate lock to avoid lock ordering
325 * problems in zone_free.
326 */
327 static list_t zone_deathrow;
328 static kmutex_t zone_deathrow_lock;
329
330 /* number of zones is limited by virtual interface limit in IP */
331 uint_t maxzones = 8192;
332
333 /* Event channel to sent zone state change notifications */
334 evchan_t *zone_event_chan;
335
336 /*
337 * This table holds the mapping from kernel zone states to
338 * states visible in the state notification API.
339 * The idea is that we only expose "obvious" states and
340 * do not expose states which are just implementation details.
341 */
342 const char *zone_status_table[] = {
343 ZONE_EVENT_UNINITIALIZED, /* uninitialized */
344 ZONE_EVENT_INITIALIZED, /* initialized */
345 ZONE_EVENT_READY, /* ready */
346 ZONE_EVENT_READY, /* booting */
347 ZONE_EVENT_RUNNING, /* running */
348 ZONE_EVENT_SHUTTING_DOWN, /* shutting_down */
349 ZONE_EVENT_SHUTTING_DOWN, /* empty */
350 ZONE_EVENT_SHUTTING_DOWN, /* down */
351 ZONE_EVENT_SHUTTING_DOWN, /* dying */
352 ZONE_EVENT_UNINITIALIZED, /* dead */
353 };
354
355 /*
356 * This array contains the names of the subsystems listed in zone_ref_subsys_t
357 * (see sys/zone.h).
358 */
359 static char *zone_ref_subsys_names[] = {
360 "NFS", /* ZONE_REF_NFS */
361 "NFSv4", /* ZONE_REF_NFSV4 */
362 "SMBFS", /* ZONE_REF_SMBFS */
363 "MNTFS", /* ZONE_REF_MNTFS */
364 "LOFI", /* ZONE_REF_LOFI */
365 "VFS", /* ZONE_REF_VFS */
366 "IPC" /* ZONE_REF_IPC */
367 };
368
369 /*
370 * This isn't static so lint doesn't complain.
371 */
372 rctl_hndl_t rc_zone_cpu_shares;
373 rctl_hndl_t rc_zone_locked_mem;
374 rctl_hndl_t rc_zone_max_swap;
375 rctl_hndl_t rc_zone_max_lofi;
376 rctl_hndl_t rc_zone_cpu_cap;
377 rctl_hndl_t rc_zone_nlwps;
378 rctl_hndl_t rc_zone_nprocs;
379 rctl_hndl_t rc_zone_shmmax;
380 rctl_hndl_t rc_zone_shmmni;
381 rctl_hndl_t rc_zone_semmni;
382 rctl_hndl_t rc_zone_msgmni;
383
384 const char * const zone_default_initname = "/sbin/init";
385 static char * const zone_prefix = "/zone/";
386 static int zone_shutdown(zoneid_t zoneid);
387 static int zone_add_datalink(zoneid_t, datalink_id_t);
388 static int zone_remove_datalink(zoneid_t, datalink_id_t);
389 static int zone_list_datalink(zoneid_t, int *, datalink_id_t *);
390 static int zone_set_network(zoneid_t, zone_net_data_t *);
391 static int zone_get_network(zoneid_t, zone_net_data_t *);
392
393 typedef boolean_t zsd_applyfn_t(kmutex_t *, boolean_t, zone_t *, zone_key_t);
394
395 static void zsd_apply_all_zones(zsd_applyfn_t *, zone_key_t);
396 static void zsd_apply_all_keys(zsd_applyfn_t *, zone_t *);
397 static boolean_t zsd_apply_create(kmutex_t *, boolean_t, zone_t *, zone_key_t);
398 static boolean_t zsd_apply_shutdown(kmutex_t *, boolean_t, zone_t *,
399 zone_key_t);
400 static boolean_t zsd_apply_destroy(kmutex_t *, boolean_t, zone_t *, zone_key_t);
401 static boolean_t zsd_wait_for_creator(zone_t *, struct zsd_entry *,
402 kmutex_t *);
403 static boolean_t zsd_wait_for_inprogress(zone_t *, struct zsd_entry *,
404 kmutex_t *);
405
406 /*
407 * Bump this number when you alter the zone syscall interfaces; this is
408 * because we need to have support for previous API versions in libc
409 * to support patching; libc calls into the kernel to determine this number.
410 *
411 * Version 1 of the API is the version originally shipped with Solaris 10
412 * Version 2 alters the zone_create system call in order to support more
413 * arguments by moving the args into a structure; and to do better
414 * error reporting when zone_create() fails.
415 * Version 3 alters the zone_create system call in order to support the
416 * import of ZFS datasets to zones.
417 * Version 4 alters the zone_create system call in order to support
418 * Trusted Extensions.
419 * Version 5 alters the zone_boot system call, and converts its old
420 * bootargs parameter to be set by the zone_setattr API instead.
421 * Version 6 adds the flag argument to zone_create.
422 */
423 static const int ZONE_SYSCALL_API_VERSION = 6;
424
425 /*
426 * Certain filesystems (such as NFS and autofs) need to know which zone
427 * the mount is being placed in. Because of this, we need to be able to
428 * ensure that a zone isn't in the process of being created/destroyed such
429 * that nfs_mount() thinks it is in the global/NGZ zone, while by the time
430 * it gets added the list of mounted zones, it ends up on the wrong zone's
431 * mount list. Since a zone can't reside on an NFS file system, we don't
432 * have to worry about the zonepath itself.
433 *
434 * The following functions: block_mounts()/resume_mounts() and
435 * mount_in_progress()/mount_completed() are used by zones and the VFS
436 * layer (respectively) to synchronize zone state transitions and new
437 * mounts within a zone. This syncronization is on a per-zone basis, so
438 * activity for one zone will not interfere with activity for another zone.
439 *
440 * The semantics are like a reader-reader lock such that there may
441 * either be multiple mounts (or zone state transitions, if that weren't
442 * serialized by zonehash_lock) in progress at the same time, but not
443 * both.
444 *
445 * We use cv's so the user can ctrl-C out of the operation if it's
446 * taking too long.
447 *
448 * The semantics are such that there is unfair bias towards the
449 * "current" operation. This means that zone halt may starve if
450 * there is a rapid succession of new mounts coming in to the zone.
451 */
452 /*
453 * Prevent new mounts from progressing to the point of calling
454 * VFS_MOUNT(). If there are already mounts in this "region", wait for
455 * them to complete.
456 */
457 static int
block_mounts(zone_t * zp)458 block_mounts(zone_t *zp)
459 {
460 int retval = 0;
461
462 /*
463 * Since it may block for a long time, block_mounts() shouldn't be
464 * called with zonehash_lock held.
465 */
466 ASSERT(MUTEX_NOT_HELD(&zonehash_lock));
467 mutex_enter(&zp->zone_mount_lock);
468 while (zp->zone_mounts_in_progress > 0) {
469 if (cv_wait_sig(&zp->zone_mount_cv, &zp->zone_mount_lock) == 0)
470 goto signaled;
471 }
472 /*
473 * A negative value of mounts_in_progress indicates that mounts
474 * have been blocked by (-mounts_in_progress) different callers
475 * (remotely possible if two threads enter zone_shutdown at the same
476 * time).
477 */
478 zp->zone_mounts_in_progress--;
479 retval = 1;
480 signaled:
481 mutex_exit(&zp->zone_mount_lock);
482 return (retval);
483 }
484
485 /*
486 * The VFS layer may progress with new mounts as far as we're concerned.
487 * Allow them to progress if we were the last obstacle.
488 */
489 static void
resume_mounts(zone_t * zp)490 resume_mounts(zone_t *zp)
491 {
492 mutex_enter(&zp->zone_mount_lock);
493 if (++zp->zone_mounts_in_progress == 0)
494 cv_broadcast(&zp->zone_mount_cv);
495 mutex_exit(&zp->zone_mount_lock);
496 }
497
498 /*
499 * The VFS layer is busy with a mount; this zone should wait until all
500 * of its mounts are completed to progress.
501 */
502 void
mount_in_progress(zone_t * zp)503 mount_in_progress(zone_t *zp)
504 {
505 mutex_enter(&zp->zone_mount_lock);
506 while (zp->zone_mounts_in_progress < 0)
507 cv_wait(&zp->zone_mount_cv, &zp->zone_mount_lock);
508 zp->zone_mounts_in_progress++;
509 mutex_exit(&zp->zone_mount_lock);
510 }
511
512 /*
513 * VFS is done with one mount; wake up any waiting block_mounts()
514 * callers if this is the last mount.
515 */
516 void
mount_completed(zone_t * zp)517 mount_completed(zone_t *zp)
518 {
519 mutex_enter(&zp->zone_mount_lock);
520 if (--zp->zone_mounts_in_progress == 0)
521 cv_broadcast(&zp->zone_mount_cv);
522 mutex_exit(&zp->zone_mount_lock);
523 }
524
525 /*
526 * ZSD routines.
527 *
528 * Zone Specific Data (ZSD) is modeled after Thread Specific Data as
529 * defined by the pthread_key_create() and related interfaces.
530 *
531 * Kernel subsystems may register one or more data items and/or
532 * callbacks to be executed when a zone is created, shutdown, or
533 * destroyed.
534 *
535 * Unlike the thread counterpart, destructor callbacks will be executed
536 * even if the data pointer is NULL and/or there are no constructor
537 * callbacks, so it is the responsibility of such callbacks to check for
538 * NULL data values if necessary.
539 *
540 * The locking strategy and overall picture is as follows:
541 *
542 * When someone calls zone_key_create(), a template ZSD entry is added to the
543 * global list "zsd_registered_keys", protected by zsd_key_lock. While
544 * holding that lock all the existing zones are marked as
545 * ZSD_CREATE_NEEDED and a copy of the ZSD entry added to the per-zone
546 * zone_zsd list (protected by zone_lock). The global list is updated first
547 * (under zone_key_lock) to make sure that newly created zones use the
548 * most recent list of keys. Then under zonehash_lock we walk the zones
549 * and mark them. Similar locking is used in zone_key_delete().
550 *
551 * The actual create, shutdown, and destroy callbacks are done without
552 * holding any lock. And zsd_flags are used to ensure that the operations
553 * completed so that when zone_key_create (and zone_create) is done, as well as
554 * zone_key_delete (and zone_destroy) is done, all the necessary callbacks
555 * are completed.
556 *
557 * When new zones are created constructor callbacks for all registered ZSD
558 * entries will be called. That also uses the above two phases of marking
559 * what needs to be done, and then running the callbacks without holding
560 * any locks.
561 *
562 * The framework does not provide any locking around zone_getspecific() and
563 * zone_setspecific() apart from that needed for internal consistency, so
564 * callers interested in atomic "test-and-set" semantics will need to provide
565 * their own locking.
566 */
567
568 /*
569 * Helper function to find the zsd_entry associated with the key in the
570 * given list.
571 */
572 static struct zsd_entry *
zsd_find(list_t * l,zone_key_t key)573 zsd_find(list_t *l, zone_key_t key)
574 {
575 struct zsd_entry *zsd;
576
577 for (zsd = list_head(l); zsd != NULL; zsd = list_next(l, zsd)) {
578 if (zsd->zsd_key == key) {
579 return (zsd);
580 }
581 }
582 return (NULL);
583 }
584
585 /*
586 * Helper function to find the zsd_entry associated with the key in the
587 * given list. Move it to the front of the list.
588 */
589 static struct zsd_entry *
zsd_find_mru(list_t * l,zone_key_t key)590 zsd_find_mru(list_t *l, zone_key_t key)
591 {
592 struct zsd_entry *zsd;
593
594 for (zsd = list_head(l); zsd != NULL; zsd = list_next(l, zsd)) {
595 if (zsd->zsd_key == key) {
596 /*
597 * Move to head of list to keep list in MRU order.
598 */
599 if (zsd != list_head(l)) {
600 list_remove(l, zsd);
601 list_insert_head(l, zsd);
602 }
603 return (zsd);
604 }
605 }
606 return (NULL);
607 }
608
609 void
zone_key_create(zone_key_t * keyp,void * (* create)(zoneid_t),void (* shutdown)(zoneid_t,void *),void (* destroy)(zoneid_t,void *))610 zone_key_create(zone_key_t *keyp, void *(*create)(zoneid_t),
611 void (*shutdown)(zoneid_t, void *), void (*destroy)(zoneid_t, void *))
612 {
613 struct zsd_entry *zsdp;
614 struct zsd_entry *t;
615 struct zone *zone;
616 zone_key_t key;
617
618 zsdp = kmem_zalloc(sizeof (*zsdp), KM_SLEEP);
619 zsdp->zsd_data = NULL;
620 zsdp->zsd_create = create;
621 zsdp->zsd_shutdown = shutdown;
622 zsdp->zsd_destroy = destroy;
623
624 /*
625 * Insert in global list of callbacks. Makes future zone creations
626 * see it.
627 */
628 mutex_enter(&zsd_key_lock);
629 key = zsdp->zsd_key = ++zsd_keyval;
630 ASSERT(zsd_keyval != 0);
631 list_insert_tail(&zsd_registered_keys, zsdp);
632 mutex_exit(&zsd_key_lock);
633
634 /*
635 * Insert for all existing zones and mark them as needing
636 * a create callback.
637 */
638 mutex_enter(&zonehash_lock); /* stop the world */
639 for (zone = list_head(&zone_active); zone != NULL;
640 zone = list_next(&zone_active, zone)) {
641 zone_status_t status;
642
643 mutex_enter(&zone->zone_lock);
644
645 /* Skip zones that are on the way down or not yet up */
646 status = zone_status_get(zone);
647 if (status >= ZONE_IS_DOWN ||
648 status == ZONE_IS_UNINITIALIZED) {
649 mutex_exit(&zone->zone_lock);
650 continue;
651 }
652
653 t = zsd_find_mru(&zone->zone_zsd, key);
654 if (t != NULL) {
655 /*
656 * A zsd_configure already inserted it after
657 * we dropped zsd_key_lock above.
658 */
659 mutex_exit(&zone->zone_lock);
660 continue;
661 }
662 t = kmem_zalloc(sizeof (*t), KM_SLEEP);
663 t->zsd_key = key;
664 t->zsd_create = create;
665 t->zsd_shutdown = shutdown;
666 t->zsd_destroy = destroy;
667 if (create != NULL) {
668 t->zsd_flags = ZSD_CREATE_NEEDED;
669 DTRACE_PROBE2(zsd__create__needed,
670 zone_t *, zone, zone_key_t, key);
671 }
672 list_insert_tail(&zone->zone_zsd, t);
673 mutex_exit(&zone->zone_lock);
674 }
675 mutex_exit(&zonehash_lock);
676
677 if (create != NULL) {
678 /* Now call the create callback for this key */
679 zsd_apply_all_zones(zsd_apply_create, key);
680 }
681 /*
682 * It is safe for consumers to use the key now, make it
683 * globally visible. Specifically zone_getspecific() will
684 * always successfully return the zone specific data associated
685 * with the key.
686 */
687 *keyp = key;
688
689 }
690
691 /*
692 * Function called when a module is being unloaded, or otherwise wishes
693 * to unregister its ZSD key and callbacks.
694 *
695 * Remove from the global list and determine the functions that need to
696 * be called under a global lock. Then call the functions without
697 * holding any locks. Finally free up the zone_zsd entries. (The apply
698 * functions need to access the zone_zsd entries to find zsd_data etc.)
699 */
700 int
zone_key_delete(zone_key_t key)701 zone_key_delete(zone_key_t key)
702 {
703 struct zsd_entry *zsdp = NULL;
704 zone_t *zone;
705
706 mutex_enter(&zsd_key_lock);
707 zsdp = zsd_find_mru(&zsd_registered_keys, key);
708 if (zsdp == NULL) {
709 mutex_exit(&zsd_key_lock);
710 return (-1);
711 }
712 list_remove(&zsd_registered_keys, zsdp);
713 mutex_exit(&zsd_key_lock);
714
715 mutex_enter(&zonehash_lock);
716 for (zone = list_head(&zone_active); zone != NULL;
717 zone = list_next(&zone_active, zone)) {
718 struct zsd_entry *del;
719
720 mutex_enter(&zone->zone_lock);
721 del = zsd_find_mru(&zone->zone_zsd, key);
722 if (del == NULL) {
723 /*
724 * Somebody else got here first e.g the zone going
725 * away.
726 */
727 mutex_exit(&zone->zone_lock);
728 continue;
729 }
730 ASSERT(del->zsd_shutdown == zsdp->zsd_shutdown);
731 ASSERT(del->zsd_destroy == zsdp->zsd_destroy);
732 if (del->zsd_shutdown != NULL &&
733 (del->zsd_flags & ZSD_SHUTDOWN_ALL) == 0) {
734 del->zsd_flags |= ZSD_SHUTDOWN_NEEDED;
735 DTRACE_PROBE2(zsd__shutdown__needed,
736 zone_t *, zone, zone_key_t, key);
737 }
738 if (del->zsd_destroy != NULL &&
739 (del->zsd_flags & ZSD_DESTROY_ALL) == 0) {
740 del->zsd_flags |= ZSD_DESTROY_NEEDED;
741 DTRACE_PROBE2(zsd__destroy__needed,
742 zone_t *, zone, zone_key_t, key);
743 }
744 mutex_exit(&zone->zone_lock);
745 }
746 mutex_exit(&zonehash_lock);
747 kmem_free(zsdp, sizeof (*zsdp));
748
749 /* Now call the shutdown and destroy callback for this key */
750 zsd_apply_all_zones(zsd_apply_shutdown, key);
751 zsd_apply_all_zones(zsd_apply_destroy, key);
752
753 /* Now we can free up the zsdp structures in each zone */
754 mutex_enter(&zonehash_lock);
755 for (zone = list_head(&zone_active); zone != NULL;
756 zone = list_next(&zone_active, zone)) {
757 struct zsd_entry *del;
758
759 mutex_enter(&zone->zone_lock);
760 del = zsd_find(&zone->zone_zsd, key);
761 if (del != NULL) {
762 list_remove(&zone->zone_zsd, del);
763 ASSERT(!(del->zsd_flags & ZSD_ALL_INPROGRESS));
764 kmem_free(del, sizeof (*del));
765 }
766 mutex_exit(&zone->zone_lock);
767 }
768 mutex_exit(&zonehash_lock);
769
770 return (0);
771 }
772
773 /*
774 * ZSD counterpart of pthread_setspecific().
775 *
776 * Since all zsd callbacks, including those with no create function,
777 * have an entry in zone_zsd, if the key is registered it is part of
778 * the zone_zsd list.
779 * Return an error if the key wasn't registerd.
780 */
781 int
zone_setspecific(zone_key_t key,zone_t * zone,const void * data)782 zone_setspecific(zone_key_t key, zone_t *zone, const void *data)
783 {
784 struct zsd_entry *t;
785
786 mutex_enter(&zone->zone_lock);
787 t = zsd_find_mru(&zone->zone_zsd, key);
788 if (t != NULL) {
789 /*
790 * Replace old value with new
791 */
792 t->zsd_data = (void *)data;
793 mutex_exit(&zone->zone_lock);
794 return (0);
795 }
796 mutex_exit(&zone->zone_lock);
797 return (-1);
798 }
799
800 /*
801 * ZSD counterpart of pthread_getspecific().
802 */
803 void *
zone_getspecific(zone_key_t key,zone_t * zone)804 zone_getspecific(zone_key_t key, zone_t *zone)
805 {
806 struct zsd_entry *t;
807 void *data;
808
809 mutex_enter(&zone->zone_lock);
810 t = zsd_find_mru(&zone->zone_zsd, key);
811 data = (t == NULL ? NULL : t->zsd_data);
812 mutex_exit(&zone->zone_lock);
813 return (data);
814 }
815
816 /*
817 * Function used to initialize a zone's list of ZSD callbacks and data
818 * when the zone is being created. The callbacks are initialized from
819 * the template list (zsd_registered_keys). The constructor callback is
820 * executed later (once the zone exists and with locks dropped).
821 */
822 static void
zone_zsd_configure(zone_t * zone)823 zone_zsd_configure(zone_t *zone)
824 {
825 struct zsd_entry *zsdp;
826 struct zsd_entry *t;
827
828 ASSERT(MUTEX_HELD(&zonehash_lock));
829 ASSERT(list_head(&zone->zone_zsd) == NULL);
830 mutex_enter(&zone->zone_lock);
831 mutex_enter(&zsd_key_lock);
832 for (zsdp = list_head(&zsd_registered_keys); zsdp != NULL;
833 zsdp = list_next(&zsd_registered_keys, zsdp)) {
834 /*
835 * Since this zone is ZONE_IS_UNCONFIGURED, zone_key_create
836 * should not have added anything to it.
837 */
838 ASSERT(zsd_find(&zone->zone_zsd, zsdp->zsd_key) == NULL);
839
840 t = kmem_zalloc(sizeof (*t), KM_SLEEP);
841 t->zsd_key = zsdp->zsd_key;
842 t->zsd_create = zsdp->zsd_create;
843 t->zsd_shutdown = zsdp->zsd_shutdown;
844 t->zsd_destroy = zsdp->zsd_destroy;
845 if (zsdp->zsd_create != NULL) {
846 t->zsd_flags = ZSD_CREATE_NEEDED;
847 DTRACE_PROBE2(zsd__create__needed,
848 zone_t *, zone, zone_key_t, zsdp->zsd_key);
849 }
850 list_insert_tail(&zone->zone_zsd, t);
851 }
852 mutex_exit(&zsd_key_lock);
853 mutex_exit(&zone->zone_lock);
854 }
855
856 enum zsd_callback_type { ZSD_CREATE, ZSD_SHUTDOWN, ZSD_DESTROY };
857
858 /*
859 * Helper function to execute shutdown or destructor callbacks.
860 */
861 static void
zone_zsd_callbacks(zone_t * zone,enum zsd_callback_type ct)862 zone_zsd_callbacks(zone_t *zone, enum zsd_callback_type ct)
863 {
864 struct zsd_entry *t;
865
866 ASSERT(ct == ZSD_SHUTDOWN || ct == ZSD_DESTROY);
867 ASSERT(ct != ZSD_SHUTDOWN || zone_status_get(zone) >= ZONE_IS_EMPTY);
868 ASSERT(ct != ZSD_DESTROY || zone_status_get(zone) >= ZONE_IS_DOWN);
869
870 /*
871 * Run the callback solely based on what is registered for the zone
872 * in zone_zsd. The global list can change independently of this
873 * as keys are registered and unregistered and we don't register new
874 * callbacks for a zone that is in the process of going away.
875 */
876 mutex_enter(&zone->zone_lock);
877 for (t = list_head(&zone->zone_zsd); t != NULL;
878 t = list_next(&zone->zone_zsd, t)) {
879 zone_key_t key = t->zsd_key;
880
881 /* Skip if no callbacks registered */
882
883 if (ct == ZSD_SHUTDOWN) {
884 if (t->zsd_shutdown != NULL &&
885 (t->zsd_flags & ZSD_SHUTDOWN_ALL) == 0) {
886 t->zsd_flags |= ZSD_SHUTDOWN_NEEDED;
887 DTRACE_PROBE2(zsd__shutdown__needed,
888 zone_t *, zone, zone_key_t, key);
889 }
890 } else {
891 if (t->zsd_destroy != NULL &&
892 (t->zsd_flags & ZSD_DESTROY_ALL) == 0) {
893 t->zsd_flags |= ZSD_DESTROY_NEEDED;
894 DTRACE_PROBE2(zsd__destroy__needed,
895 zone_t *, zone, zone_key_t, key);
896 }
897 }
898 }
899 mutex_exit(&zone->zone_lock);
900
901 /* Now call the shutdown and destroy callback for this key */
902 zsd_apply_all_keys(zsd_apply_shutdown, zone);
903 zsd_apply_all_keys(zsd_apply_destroy, zone);
904
905 }
906
907 /*
908 * Called when the zone is going away; free ZSD-related memory, and
909 * destroy the zone_zsd list.
910 */
911 static void
zone_free_zsd(zone_t * zone)912 zone_free_zsd(zone_t *zone)
913 {
914 struct zsd_entry *t, *next;
915
916 /*
917 * Free all the zsd_entry's we had on this zone.
918 */
919 mutex_enter(&zone->zone_lock);
920 for (t = list_head(&zone->zone_zsd); t != NULL; t = next) {
921 next = list_next(&zone->zone_zsd, t);
922 list_remove(&zone->zone_zsd, t);
923 ASSERT(!(t->zsd_flags & ZSD_ALL_INPROGRESS));
924 kmem_free(t, sizeof (*t));
925 }
926 list_destroy(&zone->zone_zsd);
927 mutex_exit(&zone->zone_lock);
928
929 }
930
931 /*
932 * Apply a function to all zones for particular key value.
933 *
934 * The applyfn has to drop zonehash_lock if it does some work, and
935 * then reacquire it before it returns.
936 * When the lock is dropped we don't follow list_next even
937 * if it is possible to do so without any hazards. This is
938 * because we want the design to allow for the list of zones
939 * to change in any arbitrary way during the time the
940 * lock was dropped.
941 *
942 * It is safe to restart the loop at list_head since the applyfn
943 * changes the zsd_flags as it does work, so a subsequent
944 * pass through will have no effect in applyfn, hence the loop will terminate
945 * in at worst O(N^2).
946 */
947 static void
zsd_apply_all_zones(zsd_applyfn_t * applyfn,zone_key_t key)948 zsd_apply_all_zones(zsd_applyfn_t *applyfn, zone_key_t key)
949 {
950 zone_t *zone;
951
952 mutex_enter(&zonehash_lock);
953 zone = list_head(&zone_active);
954 while (zone != NULL) {
955 if ((applyfn)(&zonehash_lock, B_FALSE, zone, key)) {
956 /* Lock dropped - restart at head */
957 zone = list_head(&zone_active);
958 } else {
959 zone = list_next(&zone_active, zone);
960 }
961 }
962 mutex_exit(&zonehash_lock);
963 }
964
965 /*
966 * Apply a function to all keys for a particular zone.
967 *
968 * The applyfn has to drop zonehash_lock if it does some work, and
969 * then reacquire it before it returns.
970 * When the lock is dropped we don't follow list_next even
971 * if it is possible to do so without any hazards. This is
972 * because we want the design to allow for the list of zsd callbacks
973 * to change in any arbitrary way during the time the
974 * lock was dropped.
975 *
976 * It is safe to restart the loop at list_head since the applyfn
977 * changes the zsd_flags as it does work, so a subsequent
978 * pass through will have no effect in applyfn, hence the loop will terminate
979 * in at worst O(N^2).
980 */
981 static void
zsd_apply_all_keys(zsd_applyfn_t * applyfn,zone_t * zone)982 zsd_apply_all_keys(zsd_applyfn_t *applyfn, zone_t *zone)
983 {
984 struct zsd_entry *t;
985
986 mutex_enter(&zone->zone_lock);
987 t = list_head(&zone->zone_zsd);
988 while (t != NULL) {
989 if ((applyfn)(NULL, B_TRUE, zone, t->zsd_key)) {
990 /* Lock dropped - restart at head */
991 t = list_head(&zone->zone_zsd);
992 } else {
993 t = list_next(&zone->zone_zsd, t);
994 }
995 }
996 mutex_exit(&zone->zone_lock);
997 }
998
999 /*
1000 * Call the create function for the zone and key if CREATE_NEEDED
1001 * is set.
1002 * If some other thread gets here first and sets CREATE_INPROGRESS, then
1003 * we wait for that thread to complete so that we can ensure that
1004 * all the callbacks are done when we've looped over all zones/keys.
1005 *
1006 * When we call the create function, we drop the global held by the
1007 * caller, and return true to tell the caller it needs to re-evalute the
1008 * state.
1009 * If the caller holds zone_lock then zone_lock_held is set, and zone_lock
1010 * remains held on exit.
1011 */
1012 static boolean_t
zsd_apply_create(kmutex_t * lockp,boolean_t zone_lock_held,zone_t * zone,zone_key_t key)1013 zsd_apply_create(kmutex_t *lockp, boolean_t zone_lock_held,
1014 zone_t *zone, zone_key_t key)
1015 {
1016 void *result;
1017 struct zsd_entry *t;
1018 boolean_t dropped;
1019
1020 if (lockp != NULL) {
1021 ASSERT(MUTEX_HELD(lockp));
1022 }
1023 if (zone_lock_held) {
1024 ASSERT(MUTEX_HELD(&zone->zone_lock));
1025 } else {
1026 mutex_enter(&zone->zone_lock);
1027 }
1028
1029 t = zsd_find(&zone->zone_zsd, key);
1030 if (t == NULL) {
1031 /*
1032 * Somebody else got here first e.g the zone going
1033 * away.
1034 */
1035 if (!zone_lock_held)
1036 mutex_exit(&zone->zone_lock);
1037 return (B_FALSE);
1038 }
1039 dropped = B_FALSE;
1040 if (zsd_wait_for_inprogress(zone, t, lockp))
1041 dropped = B_TRUE;
1042
1043 if (t->zsd_flags & ZSD_CREATE_NEEDED) {
1044 t->zsd_flags &= ~ZSD_CREATE_NEEDED;
1045 t->zsd_flags |= ZSD_CREATE_INPROGRESS;
1046 DTRACE_PROBE2(zsd__create__inprogress,
1047 zone_t *, zone, zone_key_t, key);
1048 mutex_exit(&zone->zone_lock);
1049 if (lockp != NULL)
1050 mutex_exit(lockp);
1051
1052 dropped = B_TRUE;
1053 ASSERT(t->zsd_create != NULL);
1054 DTRACE_PROBE2(zsd__create__start,
1055 zone_t *, zone, zone_key_t, key);
1056
1057 result = (*t->zsd_create)(zone->zone_id);
1058
1059 DTRACE_PROBE2(zsd__create__end,
1060 zone_t *, zone, voidn *, result);
1061
1062 ASSERT(result != NULL);
1063 if (lockp != NULL)
1064 mutex_enter(lockp);
1065 mutex_enter(&zone->zone_lock);
1066 t->zsd_data = result;
1067 t->zsd_flags &= ~ZSD_CREATE_INPROGRESS;
1068 t->zsd_flags |= ZSD_CREATE_COMPLETED;
1069 cv_broadcast(&t->zsd_cv);
1070 DTRACE_PROBE2(zsd__create__completed,
1071 zone_t *, zone, zone_key_t, key);
1072 }
1073 if (!zone_lock_held)
1074 mutex_exit(&zone->zone_lock);
1075 return (dropped);
1076 }
1077
1078 /*
1079 * Call the shutdown function for the zone and key if SHUTDOWN_NEEDED
1080 * is set.
1081 * If some other thread gets here first and sets *_INPROGRESS, then
1082 * we wait for that thread to complete so that we can ensure that
1083 * all the callbacks are done when we've looped over all zones/keys.
1084 *
1085 * When we call the shutdown function, we drop the global held by the
1086 * caller, and return true to tell the caller it needs to re-evalute the
1087 * state.
1088 * If the caller holds zone_lock then zone_lock_held is set, and zone_lock
1089 * remains held on exit.
1090 */
1091 static boolean_t
zsd_apply_shutdown(kmutex_t * lockp,boolean_t zone_lock_held,zone_t * zone,zone_key_t key)1092 zsd_apply_shutdown(kmutex_t *lockp, boolean_t zone_lock_held,
1093 zone_t *zone, zone_key_t key)
1094 {
1095 struct zsd_entry *t;
1096 void *data;
1097 boolean_t dropped;
1098
1099 if (lockp != NULL) {
1100 ASSERT(MUTEX_HELD(lockp));
1101 }
1102 if (zone_lock_held) {
1103 ASSERT(MUTEX_HELD(&zone->zone_lock));
1104 } else {
1105 mutex_enter(&zone->zone_lock);
1106 }
1107
1108 t = zsd_find(&zone->zone_zsd, key);
1109 if (t == NULL) {
1110 /*
1111 * Somebody else got here first e.g the zone going
1112 * away.
1113 */
1114 if (!zone_lock_held)
1115 mutex_exit(&zone->zone_lock);
1116 return (B_FALSE);
1117 }
1118 dropped = B_FALSE;
1119 if (zsd_wait_for_creator(zone, t, lockp))
1120 dropped = B_TRUE;
1121
1122 if (zsd_wait_for_inprogress(zone, t, lockp))
1123 dropped = B_TRUE;
1124
1125 if (t->zsd_flags & ZSD_SHUTDOWN_NEEDED) {
1126 t->zsd_flags &= ~ZSD_SHUTDOWN_NEEDED;
1127 t->zsd_flags |= ZSD_SHUTDOWN_INPROGRESS;
1128 DTRACE_PROBE2(zsd__shutdown__inprogress,
1129 zone_t *, zone, zone_key_t, key);
1130 mutex_exit(&zone->zone_lock);
1131 if (lockp != NULL)
1132 mutex_exit(lockp);
1133 dropped = B_TRUE;
1134
1135 ASSERT(t->zsd_shutdown != NULL);
1136 data = t->zsd_data;
1137
1138 DTRACE_PROBE2(zsd__shutdown__start,
1139 zone_t *, zone, zone_key_t, key);
1140
1141 (t->zsd_shutdown)(zone->zone_id, data);
1142 DTRACE_PROBE2(zsd__shutdown__end,
1143 zone_t *, zone, zone_key_t, key);
1144
1145 if (lockp != NULL)
1146 mutex_enter(lockp);
1147 mutex_enter(&zone->zone_lock);
1148 t->zsd_flags &= ~ZSD_SHUTDOWN_INPROGRESS;
1149 t->zsd_flags |= ZSD_SHUTDOWN_COMPLETED;
1150 cv_broadcast(&t->zsd_cv);
1151 DTRACE_PROBE2(zsd__shutdown__completed,
1152 zone_t *, zone, zone_key_t, key);
1153 }
1154 if (!zone_lock_held)
1155 mutex_exit(&zone->zone_lock);
1156 return (dropped);
1157 }
1158
1159 /*
1160 * Call the destroy function for the zone and key if DESTROY_NEEDED
1161 * is set.
1162 * If some other thread gets here first and sets *_INPROGRESS, then
1163 * we wait for that thread to complete so that we can ensure that
1164 * all the callbacks are done when we've looped over all zones/keys.
1165 *
1166 * When we call the destroy function, we drop the global held by the
1167 * caller, and return true to tell the caller it needs to re-evalute the
1168 * state.
1169 * If the caller holds zone_lock then zone_lock_held is set, and zone_lock
1170 * remains held on exit.
1171 */
1172 static boolean_t
zsd_apply_destroy(kmutex_t * lockp,boolean_t zone_lock_held,zone_t * zone,zone_key_t key)1173 zsd_apply_destroy(kmutex_t *lockp, boolean_t zone_lock_held,
1174 zone_t *zone, zone_key_t key)
1175 {
1176 struct zsd_entry *t;
1177 void *data;
1178 boolean_t dropped;
1179
1180 if (lockp != NULL) {
1181 ASSERT(MUTEX_HELD(lockp));
1182 }
1183 if (zone_lock_held) {
1184 ASSERT(MUTEX_HELD(&zone->zone_lock));
1185 } else {
1186 mutex_enter(&zone->zone_lock);
1187 }
1188
1189 t = zsd_find(&zone->zone_zsd, key);
1190 if (t == NULL) {
1191 /*
1192 * Somebody else got here first e.g the zone going
1193 * away.
1194 */
1195 if (!zone_lock_held)
1196 mutex_exit(&zone->zone_lock);
1197 return (B_FALSE);
1198 }
1199 dropped = B_FALSE;
1200 if (zsd_wait_for_creator(zone, t, lockp))
1201 dropped = B_TRUE;
1202
1203 if (zsd_wait_for_inprogress(zone, t, lockp))
1204 dropped = B_TRUE;
1205
1206 if (t->zsd_flags & ZSD_DESTROY_NEEDED) {
1207 t->zsd_flags &= ~ZSD_DESTROY_NEEDED;
1208 t->zsd_flags |= ZSD_DESTROY_INPROGRESS;
1209 DTRACE_PROBE2(zsd__destroy__inprogress,
1210 zone_t *, zone, zone_key_t, key);
1211 mutex_exit(&zone->zone_lock);
1212 if (lockp != NULL)
1213 mutex_exit(lockp);
1214 dropped = B_TRUE;
1215
1216 ASSERT(t->zsd_destroy != NULL);
1217 data = t->zsd_data;
1218 DTRACE_PROBE2(zsd__destroy__start,
1219 zone_t *, zone, zone_key_t, key);
1220
1221 (t->zsd_destroy)(zone->zone_id, data);
1222 DTRACE_PROBE2(zsd__destroy__end,
1223 zone_t *, zone, zone_key_t, key);
1224
1225 if (lockp != NULL)
1226 mutex_enter(lockp);
1227 mutex_enter(&zone->zone_lock);
1228 t->zsd_data = NULL;
1229 t->zsd_flags &= ~ZSD_DESTROY_INPROGRESS;
1230 t->zsd_flags |= ZSD_DESTROY_COMPLETED;
1231 cv_broadcast(&t->zsd_cv);
1232 DTRACE_PROBE2(zsd__destroy__completed,
1233 zone_t *, zone, zone_key_t, key);
1234 }
1235 if (!zone_lock_held)
1236 mutex_exit(&zone->zone_lock);
1237 return (dropped);
1238 }
1239
1240 /*
1241 * Wait for any CREATE_NEEDED flag to be cleared.
1242 * Returns true if lockp was temporarily dropped while waiting.
1243 */
1244 static boolean_t
zsd_wait_for_creator(zone_t * zone,struct zsd_entry * t,kmutex_t * lockp)1245 zsd_wait_for_creator(zone_t *zone, struct zsd_entry *t, kmutex_t *lockp)
1246 {
1247 boolean_t dropped = B_FALSE;
1248
1249 while (t->zsd_flags & ZSD_CREATE_NEEDED) {
1250 DTRACE_PROBE2(zsd__wait__for__creator,
1251 zone_t *, zone, struct zsd_entry *, t);
1252 if (lockp != NULL) {
1253 dropped = B_TRUE;
1254 mutex_exit(lockp);
1255 }
1256 cv_wait(&t->zsd_cv, &zone->zone_lock);
1257 if (lockp != NULL) {
1258 /* First drop zone_lock to preserve order */
1259 mutex_exit(&zone->zone_lock);
1260 mutex_enter(lockp);
1261 mutex_enter(&zone->zone_lock);
1262 }
1263 }
1264 return (dropped);
1265 }
1266
1267 /*
1268 * Wait for any INPROGRESS flag to be cleared.
1269 * Returns true if lockp was temporarily dropped while waiting.
1270 */
1271 static boolean_t
zsd_wait_for_inprogress(zone_t * zone,struct zsd_entry * t,kmutex_t * lockp)1272 zsd_wait_for_inprogress(zone_t *zone, struct zsd_entry *t, kmutex_t *lockp)
1273 {
1274 boolean_t dropped = B_FALSE;
1275
1276 while (t->zsd_flags & ZSD_ALL_INPROGRESS) {
1277 DTRACE_PROBE2(zsd__wait__for__inprogress,
1278 zone_t *, zone, struct zsd_entry *, t);
1279 if (lockp != NULL) {
1280 dropped = B_TRUE;
1281 mutex_exit(lockp);
1282 }
1283 cv_wait(&t->zsd_cv, &zone->zone_lock);
1284 if (lockp != NULL) {
1285 /* First drop zone_lock to preserve order */
1286 mutex_exit(&zone->zone_lock);
1287 mutex_enter(lockp);
1288 mutex_enter(&zone->zone_lock);
1289 }
1290 }
1291 return (dropped);
1292 }
1293
1294 /*
1295 * Frees memory associated with the zone dataset list.
1296 */
1297 static void
zone_free_datasets(zone_t * zone)1298 zone_free_datasets(zone_t *zone)
1299 {
1300 zone_dataset_t *t, *next;
1301
1302 for (t = list_head(&zone->zone_datasets); t != NULL; t = next) {
1303 next = list_next(&zone->zone_datasets, t);
1304 list_remove(&zone->zone_datasets, t);
1305 kmem_free(t->zd_dataset, strlen(t->zd_dataset) + 1);
1306 kmem_free(t, sizeof (*t));
1307 }
1308 list_destroy(&zone->zone_datasets);
1309 }
1310
1311 /*
1312 * zone.cpu-shares resource control support.
1313 */
1314 /*ARGSUSED*/
1315 static rctl_qty_t
zone_cpu_shares_usage(rctl_t * rctl,struct proc * p)1316 zone_cpu_shares_usage(rctl_t *rctl, struct proc *p)
1317 {
1318 ASSERT(MUTEX_HELD(&p->p_lock));
1319 return (p->p_zone->zone_shares);
1320 }
1321
1322 /*ARGSUSED*/
1323 static int
zone_cpu_shares_set(rctl_t * rctl,struct proc * p,rctl_entity_p_t * e,rctl_qty_t nv)1324 zone_cpu_shares_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1325 rctl_qty_t nv)
1326 {
1327 ASSERT(MUTEX_HELD(&p->p_lock));
1328 ASSERT(e->rcep_t == RCENTITY_ZONE);
1329 if (e->rcep_p.zone == NULL)
1330 return (0);
1331
1332 e->rcep_p.zone->zone_shares = nv;
1333 return (0);
1334 }
1335
1336 static rctl_ops_t zone_cpu_shares_ops = {
1337 rcop_no_action,
1338 zone_cpu_shares_usage,
1339 zone_cpu_shares_set,
1340 rcop_no_test
1341 };
1342
1343 /*
1344 * zone.cpu-cap resource control support.
1345 */
1346 /*ARGSUSED*/
1347 static rctl_qty_t
zone_cpu_cap_get(rctl_t * rctl,struct proc * p)1348 zone_cpu_cap_get(rctl_t *rctl, struct proc *p)
1349 {
1350 ASSERT(MUTEX_HELD(&p->p_lock));
1351 return (cpucaps_zone_get(p->p_zone));
1352 }
1353
1354 /*ARGSUSED*/
1355 static int
zone_cpu_cap_set(rctl_t * rctl,struct proc * p,rctl_entity_p_t * e,rctl_qty_t nv)1356 zone_cpu_cap_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1357 rctl_qty_t nv)
1358 {
1359 zone_t *zone = e->rcep_p.zone;
1360
1361 ASSERT(MUTEX_HELD(&p->p_lock));
1362 ASSERT(e->rcep_t == RCENTITY_ZONE);
1363
1364 if (zone == NULL)
1365 return (0);
1366
1367 /*
1368 * set cap to the new value.
1369 */
1370 return (cpucaps_zone_set(zone, nv));
1371 }
1372
1373 static rctl_ops_t zone_cpu_cap_ops = {
1374 rcop_no_action,
1375 zone_cpu_cap_get,
1376 zone_cpu_cap_set,
1377 rcop_no_test
1378 };
1379
1380 /*ARGSUSED*/
1381 static rctl_qty_t
zone_lwps_usage(rctl_t * r,proc_t * p)1382 zone_lwps_usage(rctl_t *r, proc_t *p)
1383 {
1384 rctl_qty_t nlwps;
1385 zone_t *zone = p->p_zone;
1386
1387 ASSERT(MUTEX_HELD(&p->p_lock));
1388
1389 mutex_enter(&zone->zone_nlwps_lock);
1390 nlwps = zone->zone_nlwps;
1391 mutex_exit(&zone->zone_nlwps_lock);
1392
1393 return (nlwps);
1394 }
1395
1396 /*ARGSUSED*/
1397 static int
zone_lwps_test(rctl_t * r,proc_t * p,rctl_entity_p_t * e,rctl_val_t * rcntl,rctl_qty_t incr,uint_t flags)1398 zone_lwps_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
1399 rctl_qty_t incr, uint_t flags)
1400 {
1401 rctl_qty_t nlwps;
1402
1403 ASSERT(MUTEX_HELD(&p->p_lock));
1404 ASSERT(e->rcep_t == RCENTITY_ZONE);
1405 if (e->rcep_p.zone == NULL)
1406 return (0);
1407 ASSERT(MUTEX_HELD(&(e->rcep_p.zone->zone_nlwps_lock)));
1408 nlwps = e->rcep_p.zone->zone_nlwps;
1409
1410 if (nlwps + incr > rcntl->rcv_value)
1411 return (1);
1412
1413 return (0);
1414 }
1415
1416 /*ARGSUSED*/
1417 static int
zone_lwps_set(rctl_t * rctl,struct proc * p,rctl_entity_p_t * e,rctl_qty_t nv)1418 zone_lwps_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e, rctl_qty_t nv)
1419 {
1420 ASSERT(MUTEX_HELD(&p->p_lock));
1421 ASSERT(e->rcep_t == RCENTITY_ZONE);
1422 if (e->rcep_p.zone == NULL)
1423 return (0);
1424 e->rcep_p.zone->zone_nlwps_ctl = nv;
1425 return (0);
1426 }
1427
1428 static rctl_ops_t zone_lwps_ops = {
1429 rcop_no_action,
1430 zone_lwps_usage,
1431 zone_lwps_set,
1432 zone_lwps_test,
1433 };
1434
1435 /*ARGSUSED*/
1436 static rctl_qty_t
zone_procs_usage(rctl_t * r,proc_t * p)1437 zone_procs_usage(rctl_t *r, proc_t *p)
1438 {
1439 rctl_qty_t nprocs;
1440 zone_t *zone = p->p_zone;
1441
1442 ASSERT(MUTEX_HELD(&p->p_lock));
1443
1444 mutex_enter(&zone->zone_nlwps_lock);
1445 nprocs = zone->zone_nprocs;
1446 mutex_exit(&zone->zone_nlwps_lock);
1447
1448 return (nprocs);
1449 }
1450
1451 /*ARGSUSED*/
1452 static int
zone_procs_test(rctl_t * r,proc_t * p,rctl_entity_p_t * e,rctl_val_t * rcntl,rctl_qty_t incr,uint_t flags)1453 zone_procs_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rcntl,
1454 rctl_qty_t incr, uint_t flags)
1455 {
1456 rctl_qty_t nprocs;
1457
1458 ASSERT(MUTEX_HELD(&p->p_lock));
1459 ASSERT(e->rcep_t == RCENTITY_ZONE);
1460 if (e->rcep_p.zone == NULL)
1461 return (0);
1462 ASSERT(MUTEX_HELD(&(e->rcep_p.zone->zone_nlwps_lock)));
1463 nprocs = e->rcep_p.zone->zone_nprocs;
1464
1465 if (nprocs + incr > rcntl->rcv_value)
1466 return (1);
1467
1468 return (0);
1469 }
1470
1471 /*ARGSUSED*/
1472 static int
zone_procs_set(rctl_t * rctl,struct proc * p,rctl_entity_p_t * e,rctl_qty_t nv)1473 zone_procs_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e, rctl_qty_t nv)
1474 {
1475 ASSERT(MUTEX_HELD(&p->p_lock));
1476 ASSERT(e->rcep_t == RCENTITY_ZONE);
1477 if (e->rcep_p.zone == NULL)
1478 return (0);
1479 e->rcep_p.zone->zone_nprocs_ctl = nv;
1480 return (0);
1481 }
1482
1483 static rctl_ops_t zone_procs_ops = {
1484 rcop_no_action,
1485 zone_procs_usage,
1486 zone_procs_set,
1487 zone_procs_test,
1488 };
1489
1490 /*ARGSUSED*/
1491 static rctl_qty_t
zone_shmmax_usage(rctl_t * rctl,struct proc * p)1492 zone_shmmax_usage(rctl_t *rctl, struct proc *p)
1493 {
1494 ASSERT(MUTEX_HELD(&p->p_lock));
1495 return (p->p_zone->zone_shmmax);
1496 }
1497
1498 /*ARGSUSED*/
1499 static int
zone_shmmax_test(rctl_t * r,proc_t * p,rctl_entity_p_t * e,rctl_val_t * rval,rctl_qty_t incr,uint_t flags)1500 zone_shmmax_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
1501 rctl_qty_t incr, uint_t flags)
1502 {
1503 rctl_qty_t v;
1504 ASSERT(MUTEX_HELD(&p->p_lock));
1505 ASSERT(e->rcep_t == RCENTITY_ZONE);
1506 v = e->rcep_p.zone->zone_shmmax + incr;
1507 if (v > rval->rcv_value)
1508 return (1);
1509 return (0);
1510 }
1511
1512 static rctl_ops_t zone_shmmax_ops = {
1513 rcop_no_action,
1514 zone_shmmax_usage,
1515 rcop_no_set,
1516 zone_shmmax_test
1517 };
1518
1519 /*ARGSUSED*/
1520 static rctl_qty_t
zone_shmmni_usage(rctl_t * rctl,struct proc * p)1521 zone_shmmni_usage(rctl_t *rctl, struct proc *p)
1522 {
1523 ASSERT(MUTEX_HELD(&p->p_lock));
1524 return (p->p_zone->zone_ipc.ipcq_shmmni);
1525 }
1526
1527 /*ARGSUSED*/
1528 static int
zone_shmmni_test(rctl_t * r,proc_t * p,rctl_entity_p_t * e,rctl_val_t * rval,rctl_qty_t incr,uint_t flags)1529 zone_shmmni_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
1530 rctl_qty_t incr, uint_t flags)
1531 {
1532 rctl_qty_t v;
1533 ASSERT(MUTEX_HELD(&p->p_lock));
1534 ASSERT(e->rcep_t == RCENTITY_ZONE);
1535 v = e->rcep_p.zone->zone_ipc.ipcq_shmmni + incr;
1536 if (v > rval->rcv_value)
1537 return (1);
1538 return (0);
1539 }
1540
1541 static rctl_ops_t zone_shmmni_ops = {
1542 rcop_no_action,
1543 zone_shmmni_usage,
1544 rcop_no_set,
1545 zone_shmmni_test
1546 };
1547
1548 /*ARGSUSED*/
1549 static rctl_qty_t
zone_semmni_usage(rctl_t * rctl,struct proc * p)1550 zone_semmni_usage(rctl_t *rctl, struct proc *p)
1551 {
1552 ASSERT(MUTEX_HELD(&p->p_lock));
1553 return (p->p_zone->zone_ipc.ipcq_semmni);
1554 }
1555
1556 /*ARGSUSED*/
1557 static int
zone_semmni_test(rctl_t * r,proc_t * p,rctl_entity_p_t * e,rctl_val_t * rval,rctl_qty_t incr,uint_t flags)1558 zone_semmni_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
1559 rctl_qty_t incr, uint_t flags)
1560 {
1561 rctl_qty_t v;
1562 ASSERT(MUTEX_HELD(&p->p_lock));
1563 ASSERT(e->rcep_t == RCENTITY_ZONE);
1564 v = e->rcep_p.zone->zone_ipc.ipcq_semmni + incr;
1565 if (v > rval->rcv_value)
1566 return (1);
1567 return (0);
1568 }
1569
1570 static rctl_ops_t zone_semmni_ops = {
1571 rcop_no_action,
1572 zone_semmni_usage,
1573 rcop_no_set,
1574 zone_semmni_test
1575 };
1576
1577 /*ARGSUSED*/
1578 static rctl_qty_t
zone_msgmni_usage(rctl_t * rctl,struct proc * p)1579 zone_msgmni_usage(rctl_t *rctl, struct proc *p)
1580 {
1581 ASSERT(MUTEX_HELD(&p->p_lock));
1582 return (p->p_zone->zone_ipc.ipcq_msgmni);
1583 }
1584
1585 /*ARGSUSED*/
1586 static int
zone_msgmni_test(rctl_t * r,proc_t * p,rctl_entity_p_t * e,rctl_val_t * rval,rctl_qty_t incr,uint_t flags)1587 zone_msgmni_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e, rctl_val_t *rval,
1588 rctl_qty_t incr, uint_t flags)
1589 {
1590 rctl_qty_t v;
1591 ASSERT(MUTEX_HELD(&p->p_lock));
1592 ASSERT(e->rcep_t == RCENTITY_ZONE);
1593 v = e->rcep_p.zone->zone_ipc.ipcq_msgmni + incr;
1594 if (v > rval->rcv_value)
1595 return (1);
1596 return (0);
1597 }
1598
1599 static rctl_ops_t zone_msgmni_ops = {
1600 rcop_no_action,
1601 zone_msgmni_usage,
1602 rcop_no_set,
1603 zone_msgmni_test
1604 };
1605
1606 /*ARGSUSED*/
1607 static rctl_qty_t
zone_locked_mem_usage(rctl_t * rctl,struct proc * p)1608 zone_locked_mem_usage(rctl_t *rctl, struct proc *p)
1609 {
1610 rctl_qty_t q;
1611 ASSERT(MUTEX_HELD(&p->p_lock));
1612 mutex_enter(&p->p_zone->zone_mem_lock);
1613 q = p->p_zone->zone_locked_mem;
1614 mutex_exit(&p->p_zone->zone_mem_lock);
1615 return (q);
1616 }
1617
1618 /*ARGSUSED*/
1619 static int
zone_locked_mem_test(rctl_t * r,proc_t * p,rctl_entity_p_t * e,rctl_val_t * rcntl,rctl_qty_t incr,uint_t flags)1620 zone_locked_mem_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
1621 rctl_val_t *rcntl, rctl_qty_t incr, uint_t flags)
1622 {
1623 rctl_qty_t q;
1624 zone_t *z;
1625
1626 z = e->rcep_p.zone;
1627 ASSERT(MUTEX_HELD(&p->p_lock));
1628 ASSERT(MUTEX_HELD(&z->zone_mem_lock));
1629 q = z->zone_locked_mem;
1630 if (q + incr > rcntl->rcv_value)
1631 return (1);
1632 return (0);
1633 }
1634
1635 /*ARGSUSED*/
1636 static int
zone_locked_mem_set(rctl_t * rctl,struct proc * p,rctl_entity_p_t * e,rctl_qty_t nv)1637 zone_locked_mem_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1638 rctl_qty_t nv)
1639 {
1640 ASSERT(MUTEX_HELD(&p->p_lock));
1641 ASSERT(e->rcep_t == RCENTITY_ZONE);
1642 if (e->rcep_p.zone == NULL)
1643 return (0);
1644 e->rcep_p.zone->zone_locked_mem_ctl = nv;
1645 return (0);
1646 }
1647
1648 static rctl_ops_t zone_locked_mem_ops = {
1649 rcop_no_action,
1650 zone_locked_mem_usage,
1651 zone_locked_mem_set,
1652 zone_locked_mem_test
1653 };
1654
1655 /*ARGSUSED*/
1656 static rctl_qty_t
zone_max_swap_usage(rctl_t * rctl,struct proc * p)1657 zone_max_swap_usage(rctl_t *rctl, struct proc *p)
1658 {
1659 rctl_qty_t q;
1660 zone_t *z = p->p_zone;
1661
1662 ASSERT(MUTEX_HELD(&p->p_lock));
1663 mutex_enter(&z->zone_mem_lock);
1664 q = z->zone_max_swap;
1665 mutex_exit(&z->zone_mem_lock);
1666 return (q);
1667 }
1668
1669 /*ARGSUSED*/
1670 static int
zone_max_swap_test(rctl_t * r,proc_t * p,rctl_entity_p_t * e,rctl_val_t * rcntl,rctl_qty_t incr,uint_t flags)1671 zone_max_swap_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
1672 rctl_val_t *rcntl, rctl_qty_t incr, uint_t flags)
1673 {
1674 rctl_qty_t q;
1675 zone_t *z;
1676
1677 z = e->rcep_p.zone;
1678 ASSERT(MUTEX_HELD(&p->p_lock));
1679 ASSERT(MUTEX_HELD(&z->zone_mem_lock));
1680 q = z->zone_max_swap;
1681 if (q + incr > rcntl->rcv_value)
1682 return (1);
1683 return (0);
1684 }
1685
1686 /*ARGSUSED*/
1687 static int
zone_max_swap_set(rctl_t * rctl,struct proc * p,rctl_entity_p_t * e,rctl_qty_t nv)1688 zone_max_swap_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1689 rctl_qty_t nv)
1690 {
1691 ASSERT(MUTEX_HELD(&p->p_lock));
1692 ASSERT(e->rcep_t == RCENTITY_ZONE);
1693 if (e->rcep_p.zone == NULL)
1694 return (0);
1695 e->rcep_p.zone->zone_max_swap_ctl = nv;
1696 return (0);
1697 }
1698
1699 static rctl_ops_t zone_max_swap_ops = {
1700 rcop_no_action,
1701 zone_max_swap_usage,
1702 zone_max_swap_set,
1703 zone_max_swap_test
1704 };
1705
1706 /*ARGSUSED*/
1707 static rctl_qty_t
zone_max_lofi_usage(rctl_t * rctl,struct proc * p)1708 zone_max_lofi_usage(rctl_t *rctl, struct proc *p)
1709 {
1710 rctl_qty_t q;
1711 zone_t *z = p->p_zone;
1712
1713 ASSERT(MUTEX_HELD(&p->p_lock));
1714 mutex_enter(&z->zone_rctl_lock);
1715 q = z->zone_max_lofi;
1716 mutex_exit(&z->zone_rctl_lock);
1717 return (q);
1718 }
1719
1720 /*ARGSUSED*/
1721 static int
zone_max_lofi_test(rctl_t * r,proc_t * p,rctl_entity_p_t * e,rctl_val_t * rcntl,rctl_qty_t incr,uint_t flags)1722 zone_max_lofi_test(rctl_t *r, proc_t *p, rctl_entity_p_t *e,
1723 rctl_val_t *rcntl, rctl_qty_t incr, uint_t flags)
1724 {
1725 rctl_qty_t q;
1726 zone_t *z;
1727
1728 z = e->rcep_p.zone;
1729 ASSERT(MUTEX_HELD(&p->p_lock));
1730 ASSERT(MUTEX_HELD(&z->zone_rctl_lock));
1731 q = z->zone_max_lofi;
1732 if (q + incr > rcntl->rcv_value)
1733 return (1);
1734 return (0);
1735 }
1736
1737 /*ARGSUSED*/
1738 static int
zone_max_lofi_set(rctl_t * rctl,struct proc * p,rctl_entity_p_t * e,rctl_qty_t nv)1739 zone_max_lofi_set(rctl_t *rctl, struct proc *p, rctl_entity_p_t *e,
1740 rctl_qty_t nv)
1741 {
1742 ASSERT(MUTEX_HELD(&p->p_lock));
1743 ASSERT(e->rcep_t == RCENTITY_ZONE);
1744 if (e->rcep_p.zone == NULL)
1745 return (0);
1746 e->rcep_p.zone->zone_max_lofi_ctl = nv;
1747 return (0);
1748 }
1749
1750 static rctl_ops_t zone_max_lofi_ops = {
1751 rcop_no_action,
1752 zone_max_lofi_usage,
1753 zone_max_lofi_set,
1754 zone_max_lofi_test
1755 };
1756
1757 /*
1758 * Helper function to brand the zone with a unique ID.
1759 */
1760 static void
zone_uniqid(zone_t * zone)1761 zone_uniqid(zone_t *zone)
1762 {
1763 static uint64_t uniqid = 0;
1764
1765 ASSERT(MUTEX_HELD(&zonehash_lock));
1766 zone->zone_uniqid = uniqid++;
1767 }
1768
1769 /*
1770 * Returns a held pointer to the "kcred" for the specified zone.
1771 */
1772 struct cred *
zone_get_kcred(zoneid_t zoneid)1773 zone_get_kcred(zoneid_t zoneid)
1774 {
1775 zone_t *zone;
1776 cred_t *cr;
1777
1778 if ((zone = zone_find_by_id(zoneid)) == NULL)
1779 return (NULL);
1780 cr = zone->zone_kcred;
1781 crhold(cr);
1782 zone_rele(zone);
1783 return (cr);
1784 }
1785
1786 static int
zone_lockedmem_kstat_update(kstat_t * ksp,int rw)1787 zone_lockedmem_kstat_update(kstat_t *ksp, int rw)
1788 {
1789 zone_t *zone = ksp->ks_private;
1790 zone_kstat_t *zk = ksp->ks_data;
1791
1792 if (rw == KSTAT_WRITE)
1793 return (EACCES);
1794
1795 zk->zk_usage.value.ui64 = zone->zone_locked_mem;
1796 zk->zk_value.value.ui64 = zone->zone_locked_mem_ctl;
1797 return (0);
1798 }
1799
1800 static int
zone_nprocs_kstat_update(kstat_t * ksp,int rw)1801 zone_nprocs_kstat_update(kstat_t *ksp, int rw)
1802 {
1803 zone_t *zone = ksp->ks_private;
1804 zone_kstat_t *zk = ksp->ks_data;
1805
1806 if (rw == KSTAT_WRITE)
1807 return (EACCES);
1808
1809 zk->zk_usage.value.ui64 = zone->zone_nprocs;
1810 zk->zk_value.value.ui64 = zone->zone_nprocs_ctl;
1811 return (0);
1812 }
1813
1814 static int
zone_swapresv_kstat_update(kstat_t * ksp,int rw)1815 zone_swapresv_kstat_update(kstat_t *ksp, int rw)
1816 {
1817 zone_t *zone = ksp->ks_private;
1818 zone_kstat_t *zk = ksp->ks_data;
1819
1820 if (rw == KSTAT_WRITE)
1821 return (EACCES);
1822
1823 zk->zk_usage.value.ui64 = zone->zone_max_swap;
1824 zk->zk_value.value.ui64 = zone->zone_max_swap_ctl;
1825 return (0);
1826 }
1827
1828 static kstat_t *
zone_kstat_create_common(zone_t * zone,char * name,int (* updatefunc)(kstat_t *,int))1829 zone_kstat_create_common(zone_t *zone, char *name,
1830 int (*updatefunc) (kstat_t *, int))
1831 {
1832 kstat_t *ksp;
1833 zone_kstat_t *zk;
1834
1835 ksp = rctl_kstat_create_zone(zone, name, KSTAT_TYPE_NAMED,
1836 sizeof (zone_kstat_t) / sizeof (kstat_named_t),
1837 KSTAT_FLAG_VIRTUAL);
1838
1839 if (ksp == NULL)
1840 return (NULL);
1841
1842 zk = ksp->ks_data = kmem_alloc(sizeof (zone_kstat_t), KM_SLEEP);
1843 ksp->ks_data_size += strlen(zone->zone_name) + 1;
1844 kstat_named_init(&zk->zk_zonename, "zonename", KSTAT_DATA_STRING);
1845 kstat_named_setstr(&zk->zk_zonename, zone->zone_name);
1846 kstat_named_init(&zk->zk_usage, "usage", KSTAT_DATA_UINT64);
1847 kstat_named_init(&zk->zk_value, "value", KSTAT_DATA_UINT64);
1848 ksp->ks_update = updatefunc;
1849 ksp->ks_private = zone;
1850 kstat_install(ksp);
1851 return (ksp);
1852 }
1853
1854
1855 static int
zone_mcap_kstat_update(kstat_t * ksp,int rw)1856 zone_mcap_kstat_update(kstat_t *ksp, int rw)
1857 {
1858 zone_t *zone = ksp->ks_private;
1859 zone_mcap_kstat_t *zmp = ksp->ks_data;
1860
1861 if (rw == KSTAT_WRITE)
1862 return (EACCES);
1863
1864 zmp->zm_pgpgin.value.ui64 = zone->zone_pgpgin;
1865 zmp->zm_anonpgin.value.ui64 = zone->zone_anonpgin;
1866 zmp->zm_execpgin.value.ui64 = zone->zone_execpgin;
1867 zmp->zm_fspgin.value.ui64 = zone->zone_fspgin;
1868 zmp->zm_anon_alloc_fail.value.ui64 = zone->zone_anon_alloc_fail;
1869
1870 return (0);
1871 }
1872
1873 static kstat_t *
zone_mcap_kstat_create(zone_t * zone)1874 zone_mcap_kstat_create(zone_t *zone)
1875 {
1876 kstat_t *ksp;
1877 zone_mcap_kstat_t *zmp;
1878
1879 if ((ksp = kstat_create_zone("memory_cap", zone->zone_id,
1880 zone->zone_name, "zone_memory_cap", KSTAT_TYPE_NAMED,
1881 sizeof (zone_mcap_kstat_t) / sizeof (kstat_named_t),
1882 KSTAT_FLAG_VIRTUAL, zone->zone_id)) == NULL)
1883 return (NULL);
1884
1885 if (zone->zone_id != GLOBAL_ZONEID)
1886 kstat_zone_add(ksp, GLOBAL_ZONEID);
1887
1888 zmp = ksp->ks_data = kmem_zalloc(sizeof (zone_mcap_kstat_t), KM_SLEEP);
1889 ksp->ks_data_size += strlen(zone->zone_name) + 1;
1890 ksp->ks_lock = &zone->zone_mcap_lock;
1891 zone->zone_mcap_stats = zmp;
1892
1893 /* The kstat "name" field is not large enough for a full zonename */
1894 kstat_named_init(&zmp->zm_zonename, "zonename", KSTAT_DATA_STRING);
1895 kstat_named_setstr(&zmp->zm_zonename, zone->zone_name);
1896 kstat_named_init(&zmp->zm_pgpgin, "pgpgin", KSTAT_DATA_UINT64);
1897 kstat_named_init(&zmp->zm_anonpgin, "anonpgin", KSTAT_DATA_UINT64);
1898 kstat_named_init(&zmp->zm_execpgin, "execpgin", KSTAT_DATA_UINT64);
1899 kstat_named_init(&zmp->zm_fspgin, "fspgin", KSTAT_DATA_UINT64);
1900 kstat_named_init(&zmp->zm_anon_alloc_fail, "anon_alloc_fail",
1901 KSTAT_DATA_UINT64);
1902
1903 ksp->ks_update = zone_mcap_kstat_update;
1904 ksp->ks_private = zone;
1905
1906 kstat_install(ksp);
1907 return (ksp);
1908 }
1909
1910 static int
zone_misc_kstat_update(kstat_t * ksp,int rw)1911 zone_misc_kstat_update(kstat_t *ksp, int rw)
1912 {
1913 zone_t *zone = ksp->ks_private;
1914 zone_misc_kstat_t *zmp = ksp->ks_data;
1915 hrtime_t hrtime;
1916 uint64_t tmp;
1917
1918 if (rw == KSTAT_WRITE)
1919 return (EACCES);
1920
1921 tmp = cpu_uarray_sum(zone->zone_ustate, ZONE_USTATE_STIME);
1922 hrtime = UINT64_OVERFLOW_TO_INT64(tmp);
1923 scalehrtime(&hrtime);
1924 zmp->zm_stime.value.ui64 = hrtime;
1925
1926 tmp = cpu_uarray_sum(zone->zone_ustate, ZONE_USTATE_UTIME);
1927 hrtime = UINT64_OVERFLOW_TO_INT64(tmp);
1928 scalehrtime(&hrtime);
1929 zmp->zm_utime.value.ui64 = hrtime;
1930
1931 tmp = cpu_uarray_sum(zone->zone_ustate, ZONE_USTATE_WTIME);
1932 hrtime = UINT64_OVERFLOW_TO_INT64(tmp);
1933 scalehrtime(&hrtime);
1934 zmp->zm_wtime.value.ui64 = hrtime;
1935
1936 zmp->zm_avenrun1.value.ui32 = zone->zone_avenrun[0];
1937 zmp->zm_avenrun5.value.ui32 = zone->zone_avenrun[1];
1938 zmp->zm_avenrun15.value.ui32 = zone->zone_avenrun[2];
1939
1940 zmp->zm_ffcap.value.ui32 = zone->zone_ffcap;
1941 zmp->zm_ffnoproc.value.ui32 = zone->zone_ffnoproc;
1942 zmp->zm_ffnomem.value.ui32 = zone->zone_ffnomem;
1943 zmp->zm_ffmisc.value.ui32 = zone->zone_ffmisc;
1944
1945 zmp->zm_nested_intp.value.ui32 = zone->zone_nested_intp;
1946
1947 zmp->zm_init_pid.value.ui32 = zone->zone_proc_initpid;
1948 zmp->zm_boot_time.value.ui64 = (uint64_t)zone->zone_boot_time;
1949
1950 return (0);
1951 }
1952
1953 static kstat_t *
zone_misc_kstat_create(zone_t * zone)1954 zone_misc_kstat_create(zone_t *zone)
1955 {
1956 kstat_t *ksp;
1957 zone_misc_kstat_t *zmp;
1958
1959 if ((ksp = kstat_create_zone("zones", zone->zone_id,
1960 zone->zone_name, "zone_misc", KSTAT_TYPE_NAMED,
1961 sizeof (zone_misc_kstat_t) / sizeof (kstat_named_t),
1962 KSTAT_FLAG_VIRTUAL, zone->zone_id)) == NULL)
1963 return (NULL);
1964
1965 if (zone->zone_id != GLOBAL_ZONEID)
1966 kstat_zone_add(ksp, GLOBAL_ZONEID);
1967
1968 zmp = ksp->ks_data = kmem_zalloc(sizeof (zone_misc_kstat_t), KM_SLEEP);
1969 ksp->ks_data_size += strlen(zone->zone_name) + 1;
1970 ksp->ks_lock = &zone->zone_misc_lock;
1971 zone->zone_misc_stats = zmp;
1972
1973 /* The kstat "name" field is not large enough for a full zonename */
1974 kstat_named_init(&zmp->zm_zonename, "zonename", KSTAT_DATA_STRING);
1975 kstat_named_setstr(&zmp->zm_zonename, zone->zone_name);
1976 kstat_named_init(&zmp->zm_utime, "nsec_user", KSTAT_DATA_UINT64);
1977 kstat_named_init(&zmp->zm_stime, "nsec_sys", KSTAT_DATA_UINT64);
1978 kstat_named_init(&zmp->zm_wtime, "nsec_waitrq", KSTAT_DATA_UINT64);
1979 kstat_named_init(&zmp->zm_avenrun1, "avenrun_1min", KSTAT_DATA_UINT32);
1980 kstat_named_init(&zmp->zm_avenrun5, "avenrun_5min", KSTAT_DATA_UINT32);
1981 kstat_named_init(&zmp->zm_avenrun15, "avenrun_15min",
1982 KSTAT_DATA_UINT32);
1983 kstat_named_init(&zmp->zm_ffcap, "forkfail_cap", KSTAT_DATA_UINT32);
1984 kstat_named_init(&zmp->zm_ffnoproc, "forkfail_noproc",
1985 KSTAT_DATA_UINT32);
1986 kstat_named_init(&zmp->zm_ffnomem, "forkfail_nomem", KSTAT_DATA_UINT32);
1987 kstat_named_init(&zmp->zm_ffmisc, "forkfail_misc", KSTAT_DATA_UINT32);
1988 kstat_named_init(&zmp->zm_nested_intp, "nested_interp",
1989 KSTAT_DATA_UINT32);
1990 kstat_named_init(&zmp->zm_init_pid, "init_pid", KSTAT_DATA_UINT32);
1991 kstat_named_init(&zmp->zm_boot_time, "boot_time", KSTAT_DATA_UINT64);
1992
1993 ksp->ks_update = zone_misc_kstat_update;
1994 ksp->ks_private = zone;
1995
1996 kstat_install(ksp);
1997 return (ksp);
1998 }
1999
2000 static void
zone_kstat_create(zone_t * zone)2001 zone_kstat_create(zone_t *zone)
2002 {
2003 zone->zone_lockedmem_kstat = zone_kstat_create_common(zone,
2004 "lockedmem", zone_lockedmem_kstat_update);
2005 zone->zone_swapresv_kstat = zone_kstat_create_common(zone,
2006 "swapresv", zone_swapresv_kstat_update);
2007 zone->zone_nprocs_kstat = zone_kstat_create_common(zone,
2008 "nprocs", zone_nprocs_kstat_update);
2009
2010 if ((zone->zone_mcap_ksp = zone_mcap_kstat_create(zone)) == NULL) {
2011 zone->zone_mcap_stats = kmem_zalloc(
2012 sizeof (zone_mcap_kstat_t), KM_SLEEP);
2013 }
2014
2015 if ((zone->zone_misc_ksp = zone_misc_kstat_create(zone)) == NULL) {
2016 zone->zone_misc_stats = kmem_zalloc(
2017 sizeof (zone_misc_kstat_t), KM_SLEEP);
2018 }
2019 }
2020
2021 static void
zone_kstat_delete_common(kstat_t ** pkstat,size_t datasz)2022 zone_kstat_delete_common(kstat_t **pkstat, size_t datasz)
2023 {
2024 void *data;
2025
2026 if (*pkstat != NULL) {
2027 data = (*pkstat)->ks_data;
2028 kstat_delete(*pkstat);
2029 kmem_free(data, datasz);
2030 *pkstat = NULL;
2031 }
2032 }
2033
2034 static void
zone_kstat_delete(zone_t * zone)2035 zone_kstat_delete(zone_t *zone)
2036 {
2037 zone_kstat_delete_common(&zone->zone_lockedmem_kstat,
2038 sizeof (zone_kstat_t));
2039 zone_kstat_delete_common(&zone->zone_swapresv_kstat,
2040 sizeof (zone_kstat_t));
2041 zone_kstat_delete_common(&zone->zone_nprocs_kstat,
2042 sizeof (zone_kstat_t));
2043 zone_kstat_delete_common(&zone->zone_mcap_ksp,
2044 sizeof (zone_mcap_kstat_t));
2045 zone_kstat_delete_common(&zone->zone_misc_ksp,
2046 sizeof (zone_misc_kstat_t));
2047 }
2048
2049 /*
2050 * Called very early on in boot to initialize the ZSD list so that
2051 * zone_key_create() can be called before zone_init(). It also initializes
2052 * portions of zone0 which may be used before zone_init() is called. The
2053 * variable "global_zone" will be set when zone0 is fully initialized by
2054 * zone_init().
2055 */
2056 void
zone_zsd_init(void)2057 zone_zsd_init(void)
2058 {
2059 mutex_init(&zonehash_lock, NULL, MUTEX_DEFAULT, NULL);
2060 mutex_init(&zsd_key_lock, NULL, MUTEX_DEFAULT, NULL);
2061 list_create(&zsd_registered_keys, sizeof (struct zsd_entry),
2062 offsetof(struct zsd_entry, zsd_linkage));
2063 list_create(&zone_active, sizeof (zone_t),
2064 offsetof(zone_t, zone_linkage));
2065 list_create(&zone_deathrow, sizeof (zone_t),
2066 offsetof(zone_t, zone_linkage));
2067
2068 mutex_init(&zone0.zone_lock, NULL, MUTEX_DEFAULT, NULL);
2069 mutex_init(&zone0.zone_nlwps_lock, NULL, MUTEX_DEFAULT, NULL);
2070 mutex_init(&zone0.zone_mem_lock, NULL, MUTEX_DEFAULT, NULL);
2071 zone0.zone_shares = 1;
2072 zone0.zone_nlwps = 0;
2073 zone0.zone_nlwps_ctl = INT_MAX;
2074 zone0.zone_nprocs = 0;
2075 zone0.zone_nprocs_ctl = INT_MAX;
2076 zone0.zone_locked_mem = 0;
2077 zone0.zone_locked_mem_ctl = UINT64_MAX;
2078 ASSERT(zone0.zone_max_swap == 0);
2079 zone0.zone_max_swap_ctl = UINT64_MAX;
2080 zone0.zone_max_lofi = 0;
2081 zone0.zone_max_lofi_ctl = UINT64_MAX;
2082 zone0.zone_shmmax = 0;
2083 zone0.zone_ipc.ipcq_shmmni = 0;
2084 zone0.zone_ipc.ipcq_semmni = 0;
2085 zone0.zone_ipc.ipcq_msgmni = 0;
2086 zone0.zone_name = GLOBAL_ZONENAME;
2087 zone0.zone_nodename = utsname.nodename;
2088 zone0.zone_domain = srpc_domain;
2089 zone0.zone_hostid = HW_INVALID_HOSTID;
2090 zone0.zone_fs_allowed = NULL;
2091 psecflags_default(&zone0.zone_secflags);
2092 zone0.zone_ref = 1;
2093 zone0.zone_id = GLOBAL_ZONEID;
2094 zone0.zone_status = ZONE_IS_RUNNING;
2095 zone0.zone_rootpath = "/";
2096 zone0.zone_rootpathlen = 2;
2097 zone0.zone_psetid = ZONE_PS_INVAL;
2098 zone0.zone_ncpus = 0;
2099 zone0.zone_ncpus_online = 0;
2100 zone0.zone_proc_initpid = 1;
2101 zone0.zone_initname = initname;
2102 zone0.zone_lockedmem_kstat = NULL;
2103 zone0.zone_swapresv_kstat = NULL;
2104 zone0.zone_nprocs_kstat = NULL;
2105
2106 list_create(&zone0.zone_ref_list, sizeof (zone_ref_t),
2107 offsetof(zone_ref_t, zref_linkage));
2108 list_create(&zone0.zone_zsd, sizeof (struct zsd_entry),
2109 offsetof(struct zsd_entry, zsd_linkage));
2110 list_insert_head(&zone_active, &zone0);
2111
2112 /*
2113 * The root filesystem is not mounted yet, so zone_rootvp cannot be set
2114 * to anything meaningful. It is assigned to be 'rootdir' in
2115 * vfs_mountroot().
2116 */
2117 zone0.zone_rootvp = NULL;
2118 zone0.zone_vfslist = NULL;
2119 zone0.zone_bootargs = initargs;
2120 zone0.zone_privset = kmem_alloc(sizeof (priv_set_t), KM_SLEEP);
2121 /*
2122 * The global zone has all privileges
2123 */
2124 priv_fillset(zone0.zone_privset);
2125 /*
2126 * Add p0 to the global zone
2127 */
2128 zone0.zone_zsched = &p0;
2129 p0.p_zone = &zone0;
2130 }
2131
2132 /*
2133 * Compute a hash value based on the contents of the label and the DOI. The
2134 * hash algorithm is somewhat arbitrary, but is based on the observation that
2135 * humans will likely pick labels that differ by amounts that work out to be
2136 * multiples of the number of hash chains, and thus stirring in some primes
2137 * should help.
2138 */
2139 static uint_t
hash_bylabel(void * hdata,mod_hash_key_t key)2140 hash_bylabel(void *hdata, mod_hash_key_t key)
2141 {
2142 const ts_label_t *lab = (ts_label_t *)key;
2143 const uint32_t *up, *ue;
2144 uint_t hash;
2145 int i;
2146
2147 _NOTE(ARGUNUSED(hdata));
2148
2149 hash = lab->tsl_doi + (lab->tsl_doi << 1);
2150 /* we depend on alignment of label, but not representation */
2151 up = (const uint32_t *)&lab->tsl_label;
2152 ue = up + sizeof (lab->tsl_label) / sizeof (*up);
2153 i = 1;
2154 while (up < ue) {
2155 /* using 2^n + 1, 1 <= n <= 16 as source of many primes */
2156 hash += *up + (*up << ((i % 16) + 1));
2157 up++;
2158 i++;
2159 }
2160 return (hash);
2161 }
2162
2163 /*
2164 * All that mod_hash cares about here is zero (equal) versus non-zero (not
2165 * equal). This may need to be changed if less than / greater than is ever
2166 * needed.
2167 */
2168 static int
hash_labelkey_cmp(mod_hash_key_t key1,mod_hash_key_t key2)2169 hash_labelkey_cmp(mod_hash_key_t key1, mod_hash_key_t key2)
2170 {
2171 ts_label_t *lab1 = (ts_label_t *)key1;
2172 ts_label_t *lab2 = (ts_label_t *)key2;
2173
2174 return (label_equal(lab1, lab2) ? 0 : 1);
2175 }
2176
2177 /*
2178 * Called by main() to initialize the zones framework.
2179 */
2180 void
zone_init(void)2181 zone_init(void)
2182 {
2183 rctl_dict_entry_t *rde;
2184 rctl_val_t *dval;
2185 rctl_set_t *set;
2186 rctl_alloc_gp_t *gp;
2187 rctl_entity_p_t e;
2188 int res;
2189
2190 ASSERT(curproc == &p0);
2191
2192 /*
2193 * Create ID space for zone IDs. ID 0 is reserved for the
2194 * global zone.
2195 */
2196 zoneid_space = id_space_create("zoneid_space", 1, MAX_ZONEID);
2197
2198 /*
2199 * Initialize generic zone resource controls, if any.
2200 */
2201 rc_zone_cpu_shares = rctl_register("zone.cpu-shares",
2202 RCENTITY_ZONE, RCTL_GLOBAL_SIGNAL_NEVER | RCTL_GLOBAL_DENY_NEVER |
2203 RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT | RCTL_GLOBAL_SYSLOG_NEVER,
2204 FSS_MAXSHARES, FSS_MAXSHARES, &zone_cpu_shares_ops);
2205
2206 rc_zone_cpu_cap = rctl_register("zone.cpu-cap",
2207 RCENTITY_ZONE, RCTL_GLOBAL_SIGNAL_NEVER | RCTL_GLOBAL_DENY_ALWAYS |
2208 RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT |RCTL_GLOBAL_SYSLOG_NEVER |
2209 RCTL_GLOBAL_INFINITE,
2210 MAXCAP, MAXCAP, &zone_cpu_cap_ops);
2211
2212 rc_zone_nlwps = rctl_register("zone.max-lwps", RCENTITY_ZONE,
2213 RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT,
2214 INT_MAX, INT_MAX, &zone_lwps_ops);
2215
2216 rc_zone_nprocs = rctl_register("zone.max-processes", RCENTITY_ZONE,
2217 RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT,
2218 INT_MAX, INT_MAX, &zone_procs_ops);
2219
2220 /*
2221 * System V IPC resource controls
2222 */
2223 rc_zone_msgmni = rctl_register("zone.max-msg-ids",
2224 RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
2225 RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &zone_msgmni_ops);
2226
2227 rc_zone_semmni = rctl_register("zone.max-sem-ids",
2228 RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
2229 RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &zone_semmni_ops);
2230
2231 rc_zone_shmmni = rctl_register("zone.max-shm-ids",
2232 RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
2233 RCTL_GLOBAL_COUNT, IPC_IDS_MAX, IPC_IDS_MAX, &zone_shmmni_ops);
2234
2235 rc_zone_shmmax = rctl_register("zone.max-shm-memory",
2236 RCENTITY_ZONE, RCTL_GLOBAL_DENY_ALWAYS | RCTL_GLOBAL_NOBASIC |
2237 RCTL_GLOBAL_BYTES, UINT64_MAX, UINT64_MAX, &zone_shmmax_ops);
2238
2239 /*
2240 * Create a rctl_val with PRIVILEGED, NOACTION, value = 1. Then attach
2241 * this at the head of the rctl_dict_entry for ``zone.cpu-shares''.
2242 */
2243 dval = kmem_cache_alloc(rctl_val_cache, KM_SLEEP);
2244 bzero(dval, sizeof (rctl_val_t));
2245 dval->rcv_value = 1;
2246 dval->rcv_privilege = RCPRIV_PRIVILEGED;
2247 dval->rcv_flagaction = RCTL_LOCAL_NOACTION;
2248 dval->rcv_action_recip_pid = -1;
2249
2250 rde = rctl_dict_lookup("zone.cpu-shares");
2251 (void) rctl_val_list_insert(&rde->rcd_default_value, dval);
2252
2253 rc_zone_locked_mem = rctl_register("zone.max-locked-memory",
2254 RCENTITY_ZONE, RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_BYTES |
2255 RCTL_GLOBAL_DENY_ALWAYS, UINT64_MAX, UINT64_MAX,
2256 &zone_locked_mem_ops);
2257
2258 rc_zone_max_swap = rctl_register("zone.max-swap",
2259 RCENTITY_ZONE, RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_BYTES |
2260 RCTL_GLOBAL_DENY_ALWAYS, UINT64_MAX, UINT64_MAX,
2261 &zone_max_swap_ops);
2262
2263 rc_zone_max_lofi = rctl_register("zone.max-lofi",
2264 RCENTITY_ZONE, RCTL_GLOBAL_NOBASIC | RCTL_GLOBAL_COUNT |
2265 RCTL_GLOBAL_DENY_ALWAYS, UINT64_MAX, UINT64_MAX,
2266 &zone_max_lofi_ops);
2267
2268 /*
2269 * Initialize the ``global zone''.
2270 */
2271 set = rctl_set_create();
2272 gp = rctl_set_init_prealloc(RCENTITY_ZONE);
2273 mutex_enter(&p0.p_lock);
2274 e.rcep_p.zone = &zone0;
2275 e.rcep_t = RCENTITY_ZONE;
2276 zone0.zone_rctls = rctl_set_init(RCENTITY_ZONE, &p0, &e, set,
2277 gp);
2278
2279 zone0.zone_nlwps = p0.p_lwpcnt;
2280 zone0.zone_nprocs = 1;
2281 zone0.zone_ntasks = 1;
2282 mutex_exit(&p0.p_lock);
2283 zone0.zone_restart_init = B_TRUE;
2284 zone0.zone_reboot_on_init_exit = B_FALSE;
2285 zone0.zone_restart_init_0 = B_FALSE;
2286 zone0.zone_brand = &native_brand;
2287 rctl_prealloc_destroy(gp);
2288 /*
2289 * pool_default hasn't been initialized yet, so we let pool_init()
2290 * take care of making sure the global zone is in the default pool.
2291 */
2292
2293 /*
2294 * Initialize global zone kstats
2295 */
2296 zone_kstat_create(&zone0);
2297
2298 /*
2299 * Initialize zone label.
2300 * mlp are initialized when tnzonecfg is loaded.
2301 */
2302 zone0.zone_slabel = l_admin_low;
2303 rw_init(&zone0.zone_mlps.mlpl_rwlock, NULL, RW_DEFAULT, NULL);
2304 label_hold(l_admin_low);
2305
2306 /*
2307 * Initialise the lock for the database structure used by mntfs.
2308 */
2309 rw_init(&zone0.zone_mntfs_db_lock, NULL, RW_DEFAULT, NULL);
2310
2311 zone0.zone_ustate = cpu_uarray_zalloc(ZONE_USTATE_MAX, KM_SLEEP);
2312
2313 mutex_enter(&zonehash_lock);
2314 zone_uniqid(&zone0);
2315 ASSERT(zone0.zone_uniqid == GLOBAL_ZONEUNIQID);
2316
2317 zonehashbyid = mod_hash_create_idhash("zone_by_id", zone_hash_size,
2318 mod_hash_null_valdtor);
2319 zonehashbyname = mod_hash_create_strhash("zone_by_name",
2320 zone_hash_size, mod_hash_null_valdtor);
2321 /*
2322 * maintain zonehashbylabel only for labeled systems
2323 */
2324 if (is_system_labeled())
2325 zonehashbylabel = mod_hash_create_extended("zone_by_label",
2326 zone_hash_size, mod_hash_null_keydtor,
2327 mod_hash_null_valdtor, hash_bylabel, NULL,
2328 hash_labelkey_cmp, KM_SLEEP);
2329 zonecount = 1;
2330
2331 (void) mod_hash_insert(zonehashbyid, (mod_hash_key_t)GLOBAL_ZONEID,
2332 (mod_hash_val_t)&zone0);
2333 (void) mod_hash_insert(zonehashbyname, (mod_hash_key_t)zone0.zone_name,
2334 (mod_hash_val_t)&zone0);
2335 if (is_system_labeled()) {
2336 zone0.zone_flags |= ZF_HASHED_LABEL;
2337 (void) mod_hash_insert(zonehashbylabel,
2338 (mod_hash_key_t)zone0.zone_slabel, (mod_hash_val_t)&zone0);
2339 }
2340 mutex_exit(&zonehash_lock);
2341
2342 /*
2343 * We avoid setting zone_kcred until now, since kcred is initialized
2344 * sometime after zone_zsd_init() and before zone_init().
2345 */
2346 zone0.zone_kcred = kcred;
2347 /*
2348 * The global zone is fully initialized (except for zone_rootvp which
2349 * will be set when the root filesystem is mounted).
2350 */
2351 global_zone = &zone0;
2352
2353 /*
2354 * Setup an event channel to send zone status change notifications on
2355 */
2356 res = sysevent_evc_bind(ZONE_EVENT_CHANNEL, &zone_event_chan,
2357 EVCH_CREAT);
2358
2359 if (res)
2360 panic("Sysevent_evc_bind failed during zone setup.\n");
2361
2362 }
2363
2364 static void
zone_free(zone_t * zone)2365 zone_free(zone_t *zone)
2366 {
2367 ASSERT(zone != global_zone);
2368 ASSERT(zone->zone_ntasks == 0);
2369 ASSERT(zone->zone_nlwps == 0);
2370 ASSERT(zone->zone_nprocs == 0);
2371 ASSERT(zone->zone_cred_ref == 0);
2372 ASSERT(zone->zone_kcred == NULL);
2373 ASSERT(zone_status_get(zone) == ZONE_IS_DEAD ||
2374 zone_status_get(zone) == ZONE_IS_UNINITIALIZED);
2375 ASSERT(list_is_empty(&zone->zone_ref_list));
2376
2377 /*
2378 * Remove any zone caps.
2379 */
2380 cpucaps_zone_remove(zone);
2381
2382 ASSERT(zone->zone_cpucap == NULL);
2383
2384 /* remove from deathrow list */
2385 if (zone_status_get(zone) == ZONE_IS_DEAD) {
2386 ASSERT(zone->zone_ref == 0);
2387 mutex_enter(&zone_deathrow_lock);
2388 list_remove(&zone_deathrow, zone);
2389 mutex_exit(&zone_deathrow_lock);
2390 }
2391
2392 list_destroy(&zone->zone_ref_list);
2393 zone_free_zsd(zone);
2394 zone_free_datasets(zone);
2395 list_destroy(&zone->zone_dl_list);
2396
2397 cpu_uarray_free(zone->zone_ustate);
2398
2399 if (zone->zone_rootvp != NULL)
2400 VN_RELE(zone->zone_rootvp);
2401 if (zone->zone_rootpath)
2402 kmem_free(zone->zone_rootpath, zone->zone_rootpathlen);
2403 if (zone->zone_name != NULL)
2404 kmem_free(zone->zone_name, ZONENAME_MAX);
2405 if (zone->zone_slabel != NULL)
2406 label_rele(zone->zone_slabel);
2407 if (zone->zone_nodename != NULL)
2408 kmem_free(zone->zone_nodename, _SYS_NMLN);
2409 if (zone->zone_domain != NULL)
2410 kmem_free(zone->zone_domain, _SYS_NMLN);
2411 if (zone->zone_privset != NULL)
2412 kmem_free(zone->zone_privset, sizeof (priv_set_t));
2413 if (zone->zone_rctls != NULL)
2414 rctl_set_free(zone->zone_rctls);
2415 if (zone->zone_bootargs != NULL)
2416 strfree(zone->zone_bootargs);
2417 if (zone->zone_initname != NULL)
2418 strfree(zone->zone_initname);
2419 if (zone->zone_fs_allowed != NULL)
2420 strfree(zone->zone_fs_allowed);
2421 if (zone->zone_pfexecd != NULL)
2422 klpd_freelist(&zone->zone_pfexecd);
2423 id_free(zoneid_space, zone->zone_id);
2424 mutex_destroy(&zone->zone_lock);
2425 cv_destroy(&zone->zone_cv);
2426 rw_destroy(&zone->zone_mlps.mlpl_rwlock);
2427 rw_destroy(&zone->zone_mntfs_db_lock);
2428 kmem_free(zone, sizeof (zone_t));
2429 }
2430
2431 /*
2432 * See block comment at the top of this file for information about zone
2433 * status values.
2434 */
2435 /*
2436 * Convenience function for setting zone status.
2437 */
2438 static void
zone_status_set(zone_t * zone,zone_status_t status)2439 zone_status_set(zone_t *zone, zone_status_t status)
2440 {
2441
2442 nvlist_t *nvl = NULL;
2443 ASSERT(MUTEX_HELD(&zone_status_lock));
2444 ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE &&
2445 status >= zone_status_get(zone));
2446
2447 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) ||
2448 nvlist_add_string(nvl, ZONE_CB_NAME, zone->zone_name) ||
2449 nvlist_add_string(nvl, ZONE_CB_NEWSTATE,
2450 zone_status_table[status]) ||
2451 nvlist_add_string(nvl, ZONE_CB_OLDSTATE,
2452 zone_status_table[zone->zone_status]) ||
2453 nvlist_add_int32(nvl, ZONE_CB_ZONEID, zone->zone_id) ||
2454 nvlist_add_uint64(nvl, ZONE_CB_TIMESTAMP, (uint64_t)gethrtime()) ||
2455 sysevent_evc_publish(zone_event_chan, ZONE_EVENT_STATUS_CLASS,
2456 ZONE_EVENT_STATUS_SUBCLASS, "sun.com", "kernel", nvl, EVCH_SLEEP)) {
2457 #ifdef DEBUG
2458 (void) printf(
2459 "Failed to allocate and send zone state change event.\n");
2460 #endif
2461 }
2462 nvlist_free(nvl);
2463
2464 zone->zone_status = status;
2465
2466 cv_broadcast(&zone->zone_cv);
2467 }
2468
2469 /*
2470 * Public function to retrieve the zone status. The zone status may
2471 * change after it is retrieved.
2472 */
2473 zone_status_t
zone_status_get(zone_t * zone)2474 zone_status_get(zone_t *zone)
2475 {
2476 return (zone->zone_status);
2477 }
2478
2479 static int
zone_set_bootargs(zone_t * zone,const char * zone_bootargs)2480 zone_set_bootargs(zone_t *zone, const char *zone_bootargs)
2481 {
2482 char *buf = kmem_zalloc(BOOTARGS_MAX, KM_SLEEP);
2483 int err = 0;
2484
2485 ASSERT(zone != global_zone);
2486 if ((err = copyinstr(zone_bootargs, buf, BOOTARGS_MAX, NULL)) != 0)
2487 goto done; /* EFAULT or ENAMETOOLONG */
2488
2489 if (zone->zone_bootargs != NULL)
2490 strfree(zone->zone_bootargs);
2491
2492 zone->zone_bootargs = strdup(buf);
2493
2494 done:
2495 kmem_free(buf, BOOTARGS_MAX);
2496 return (err);
2497 }
2498
2499 static int
zone_set_brand(zone_t * zone,const char * brand)2500 zone_set_brand(zone_t *zone, const char *brand)
2501 {
2502 struct brand_attr *attrp;
2503 brand_t *bp;
2504
2505 attrp = kmem_alloc(sizeof (struct brand_attr), KM_SLEEP);
2506 if (copyin(brand, attrp, sizeof (struct brand_attr)) != 0) {
2507 kmem_free(attrp, sizeof (struct brand_attr));
2508 return (EFAULT);
2509 }
2510
2511 bp = brand_register_zone(attrp);
2512 kmem_free(attrp, sizeof (struct brand_attr));
2513 if (bp == NULL)
2514 return (EINVAL);
2515
2516 /*
2517 * This is the only place where a zone can change it's brand.
2518 * We already need to hold zone_status_lock to check the zone
2519 * status, so we'll just use that lock to serialize zone
2520 * branding requests as well.
2521 */
2522 mutex_enter(&zone_status_lock);
2523
2524 /* Re-Branding is not allowed and the zone can't be booted yet */
2525 if ((ZONE_IS_BRANDED(zone)) ||
2526 (zone_status_get(zone) >= ZONE_IS_BOOTING)) {
2527 mutex_exit(&zone_status_lock);
2528 brand_unregister_zone(bp);
2529 return (EINVAL);
2530 }
2531
2532 /* set up the brand specific data */
2533 zone->zone_brand = bp;
2534 ZBROP(zone)->b_init_brand_data(zone);
2535
2536 mutex_exit(&zone_status_lock);
2537 return (0);
2538 }
2539
2540 static int
zone_set_secflags(zone_t * zone,const psecflags_t * zone_secflags)2541 zone_set_secflags(zone_t *zone, const psecflags_t *zone_secflags)
2542 {
2543 int err = 0;
2544 psecflags_t psf;
2545
2546 ASSERT(zone != global_zone);
2547
2548 if ((err = copyin(zone_secflags, &psf, sizeof (psf))) != 0)
2549 return (err);
2550
2551 if (zone_status_get(zone) > ZONE_IS_READY)
2552 return (EINVAL);
2553
2554 if (!psecflags_validate(&psf))
2555 return (EINVAL);
2556
2557 (void) memcpy(&zone->zone_secflags, &psf, sizeof (psf));
2558
2559 /* Set security flags on the zone's zsched */
2560 (void) memcpy(&zone->zone_zsched->p_secflags, &zone->zone_secflags,
2561 sizeof (zone->zone_zsched->p_secflags));
2562
2563 return (0);
2564 }
2565
2566 static int
zone_set_fs_allowed(zone_t * zone,const char * zone_fs_allowed)2567 zone_set_fs_allowed(zone_t *zone, const char *zone_fs_allowed)
2568 {
2569 char *buf = kmem_zalloc(ZONE_FS_ALLOWED_MAX, KM_SLEEP);
2570 int err = 0;
2571
2572 ASSERT(zone != global_zone);
2573 if ((err = copyinstr(zone_fs_allowed, buf,
2574 ZONE_FS_ALLOWED_MAX, NULL)) != 0)
2575 goto done;
2576
2577 if (zone->zone_fs_allowed != NULL)
2578 strfree(zone->zone_fs_allowed);
2579
2580 zone->zone_fs_allowed = strdup(buf);
2581
2582 done:
2583 kmem_free(buf, ZONE_FS_ALLOWED_MAX);
2584 return (err);
2585 }
2586
2587 static int
zone_set_initname(zone_t * zone,const char * zone_initname)2588 zone_set_initname(zone_t *zone, const char *zone_initname)
2589 {
2590 char initname[INITNAME_SZ];
2591 size_t len;
2592 int err = 0;
2593
2594 ASSERT(zone != global_zone);
2595 if ((err = copyinstr(zone_initname, initname, INITNAME_SZ, &len)) != 0)
2596 return (err); /* EFAULT or ENAMETOOLONG */
2597
2598 if (zone->zone_initname != NULL)
2599 strfree(zone->zone_initname);
2600
2601 zone->zone_initname = kmem_alloc(strlen(initname) + 1, KM_SLEEP);
2602 (void) strcpy(zone->zone_initname, initname);
2603 return (0);
2604 }
2605
2606 static int
zone_set_phys_mcap(zone_t * zone,const uint64_t * zone_mcap)2607 zone_set_phys_mcap(zone_t *zone, const uint64_t *zone_mcap)
2608 {
2609 uint64_t mcap;
2610 int err = 0;
2611
2612 if ((err = copyin(zone_mcap, &mcap, sizeof (uint64_t))) == 0)
2613 zone->zone_phys_mcap = mcap;
2614
2615 return (err);
2616 }
2617
2618 static int
zone_set_sched_class(zone_t * zone,const char * new_class)2619 zone_set_sched_class(zone_t *zone, const char *new_class)
2620 {
2621 char sched_class[PC_CLNMSZ];
2622 id_t classid;
2623 int err;
2624
2625 ASSERT(zone != global_zone);
2626 if ((err = copyinstr(new_class, sched_class, PC_CLNMSZ, NULL)) != 0)
2627 return (err); /* EFAULT or ENAMETOOLONG */
2628
2629 if (getcid(sched_class, &classid) != 0 || CLASS_KERNEL(classid))
2630 return (set_errno(EINVAL));
2631 zone->zone_defaultcid = classid;
2632 ASSERT(zone->zone_defaultcid > 0 &&
2633 zone->zone_defaultcid < loaded_classes);
2634
2635 return (0);
2636 }
2637
2638 /*
2639 * Block indefinitely waiting for (zone_status >= status)
2640 */
2641 void
zone_status_wait(zone_t * zone,zone_status_t status)2642 zone_status_wait(zone_t *zone, zone_status_t status)
2643 {
2644 ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
2645
2646 mutex_enter(&zone_status_lock);
2647 while (zone->zone_status < status) {
2648 cv_wait(&zone->zone_cv, &zone_status_lock);
2649 }
2650 mutex_exit(&zone_status_lock);
2651 }
2652
2653 /*
2654 * Private CPR-safe version of zone_status_wait().
2655 */
2656 static void
zone_status_wait_cpr(zone_t * zone,zone_status_t status,char * str)2657 zone_status_wait_cpr(zone_t *zone, zone_status_t status, char *str)
2658 {
2659 callb_cpr_t cprinfo;
2660
2661 ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
2662
2663 CALLB_CPR_INIT(&cprinfo, &zone_status_lock, callb_generic_cpr,
2664 str);
2665 mutex_enter(&zone_status_lock);
2666 while (zone->zone_status < status) {
2667 CALLB_CPR_SAFE_BEGIN(&cprinfo);
2668 cv_wait(&zone->zone_cv, &zone_status_lock);
2669 CALLB_CPR_SAFE_END(&cprinfo, &zone_status_lock);
2670 }
2671 /*
2672 * zone_status_lock is implicitly released by the following.
2673 */
2674 CALLB_CPR_EXIT(&cprinfo);
2675 }
2676
2677 /*
2678 * Block until zone enters requested state or signal is received. Return (0)
2679 * if signaled, non-zero otherwise.
2680 */
2681 int
zone_status_wait_sig(zone_t * zone,zone_status_t status)2682 zone_status_wait_sig(zone_t *zone, zone_status_t status)
2683 {
2684 ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
2685
2686 mutex_enter(&zone_status_lock);
2687 while (zone->zone_status < status) {
2688 if (!cv_wait_sig(&zone->zone_cv, &zone_status_lock)) {
2689 mutex_exit(&zone_status_lock);
2690 return (0);
2691 }
2692 }
2693 mutex_exit(&zone_status_lock);
2694 return (1);
2695 }
2696
2697 /*
2698 * Block until the zone enters the requested state or the timeout expires,
2699 * whichever happens first. Return (-1) if operation timed out, time remaining
2700 * otherwise.
2701 */
2702 clock_t
zone_status_timedwait(zone_t * zone,clock_t tim,zone_status_t status)2703 zone_status_timedwait(zone_t *zone, clock_t tim, zone_status_t status)
2704 {
2705 clock_t timeleft = 0;
2706
2707 ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
2708
2709 mutex_enter(&zone_status_lock);
2710 while (zone->zone_status < status && timeleft != -1) {
2711 timeleft = cv_timedwait(&zone->zone_cv, &zone_status_lock, tim);
2712 }
2713 mutex_exit(&zone_status_lock);
2714 return (timeleft);
2715 }
2716
2717 /*
2718 * Block until the zone enters the requested state, the current process is
2719 * signaled, or the timeout expires, whichever happens first. Return (-1) if
2720 * operation timed out, 0 if signaled, time remaining otherwise.
2721 */
2722 clock_t
zone_status_timedwait_sig(zone_t * zone,clock_t tim,zone_status_t status)2723 zone_status_timedwait_sig(zone_t *zone, clock_t tim, zone_status_t status)
2724 {
2725 clock_t timeleft = tim - ddi_get_lbolt();
2726
2727 ASSERT(status > ZONE_MIN_STATE && status <= ZONE_MAX_STATE);
2728
2729 mutex_enter(&zone_status_lock);
2730 while (zone->zone_status < status) {
2731 timeleft = cv_timedwait_sig(&zone->zone_cv, &zone_status_lock,
2732 tim);
2733 if (timeleft <= 0)
2734 break;
2735 }
2736 mutex_exit(&zone_status_lock);
2737 return (timeleft);
2738 }
2739
2740 /*
2741 * Zones have two reference counts: one for references from credential
2742 * structures (zone_cred_ref), and one (zone_ref) for everything else.
2743 * This is so we can allow a zone to be rebooted while there are still
2744 * outstanding cred references, since certain drivers cache dblks (which
2745 * implicitly results in cached creds). We wait for zone_ref to drop to
2746 * 0 (actually 1), but not zone_cred_ref. The zone structure itself is
2747 * later freed when the zone_cred_ref drops to 0, though nothing other
2748 * than the zone id and privilege set should be accessed once the zone
2749 * is "dead".
2750 *
2751 * A debugging flag, zone_wait_for_cred, can be set to a non-zero value
2752 * to force halt/reboot to block waiting for the zone_cred_ref to drop
2753 * to 0. This can be useful to flush out other sources of cached creds
2754 * that may be less innocuous than the driver case.
2755 *
2756 * Zones also provide a tracked reference counting mechanism in which zone
2757 * references are represented by "crumbs" (zone_ref structures). Crumbs help
2758 * debuggers determine the sources of leaked zone references. See
2759 * zone_hold_ref() and zone_rele_ref() below for more information.
2760 */
2761
2762 int zone_wait_for_cred = 0;
2763
2764 static void
zone_hold_locked(zone_t * z)2765 zone_hold_locked(zone_t *z)
2766 {
2767 ASSERT(MUTEX_HELD(&z->zone_lock));
2768 z->zone_ref++;
2769 ASSERT(z->zone_ref != 0);
2770 }
2771
2772 /*
2773 * Increment the specified zone's reference count. The zone's zone_t structure
2774 * will not be freed as long as the zone's reference count is nonzero.
2775 * Decrement the zone's reference count via zone_rele().
2776 *
2777 * NOTE: This function should only be used to hold zones for short periods of
2778 * time. Use zone_hold_ref() if the zone must be held for a long time.
2779 */
2780 void
zone_hold(zone_t * z)2781 zone_hold(zone_t *z)
2782 {
2783 mutex_enter(&z->zone_lock);
2784 zone_hold_locked(z);
2785 mutex_exit(&z->zone_lock);
2786 }
2787
2788 /*
2789 * If the non-cred ref count drops to 1 and either the cred ref count
2790 * is 0 or we aren't waiting for cred references, the zone is ready to
2791 * be destroyed.
2792 */
2793 #define ZONE_IS_UNREF(zone) ((zone)->zone_ref == 1 && \
2794 (!zone_wait_for_cred || (zone)->zone_cred_ref == 0))
2795
2796 /*
2797 * Common zone reference release function invoked by zone_rele() and
2798 * zone_rele_ref(). If subsys is ZONE_REF_NUM_SUBSYS, then the specified
2799 * zone's subsystem-specific reference counters are not affected by the
2800 * release. If ref is not NULL, then the zone_ref_t to which it refers is
2801 * removed from the specified zone's reference list. ref must be non-NULL iff
2802 * subsys is not ZONE_REF_NUM_SUBSYS.
2803 */
2804 static void
zone_rele_common(zone_t * z,zone_ref_t * ref,zone_ref_subsys_t subsys)2805 zone_rele_common(zone_t *z, zone_ref_t *ref, zone_ref_subsys_t subsys)
2806 {
2807 boolean_t wakeup;
2808
2809 mutex_enter(&z->zone_lock);
2810 ASSERT(z->zone_ref != 0);
2811 z->zone_ref--;
2812 if (subsys != ZONE_REF_NUM_SUBSYS) {
2813 ASSERT(z->zone_subsys_ref[subsys] != 0);
2814 z->zone_subsys_ref[subsys]--;
2815 list_remove(&z->zone_ref_list, ref);
2816 }
2817 if (z->zone_ref == 0 && z->zone_cred_ref == 0) {
2818 /* no more refs, free the structure */
2819 mutex_exit(&z->zone_lock);
2820 zone_free(z);
2821 return;
2822 }
2823 /* signal zone_destroy so the zone can finish halting */
2824 wakeup = (ZONE_IS_UNREF(z) && zone_status_get(z) >= ZONE_IS_DEAD);
2825 mutex_exit(&z->zone_lock);
2826
2827 if (wakeup) {
2828 /*
2829 * Grabbing zonehash_lock here effectively synchronizes with
2830 * zone_destroy() to avoid missed signals.
2831 */
2832 mutex_enter(&zonehash_lock);
2833 cv_broadcast(&zone_destroy_cv);
2834 mutex_exit(&zonehash_lock);
2835 }
2836 }
2837
2838 /*
2839 * Decrement the specified zone's reference count. The specified zone will
2840 * cease to exist after this function returns if the reference count drops to
2841 * zero. This function should be paired with zone_hold().
2842 */
2843 void
zone_rele(zone_t * z)2844 zone_rele(zone_t *z)
2845 {
2846 zone_rele_common(z, NULL, ZONE_REF_NUM_SUBSYS);
2847 }
2848
2849 /*
2850 * Initialize a zone reference structure. This function must be invoked for
2851 * a reference structure before the structure is passed to zone_hold_ref().
2852 */
2853 void
zone_init_ref(zone_ref_t * ref)2854 zone_init_ref(zone_ref_t *ref)
2855 {
2856 ref->zref_zone = NULL;
2857 list_link_init(&ref->zref_linkage);
2858 }
2859
2860 /*
2861 * Acquire a reference to zone z. The caller must specify the
2862 * zone_ref_subsys_t constant associated with its subsystem. The specified
2863 * zone_ref_t structure will represent a reference to the specified zone. Use
2864 * zone_rele_ref() to release the reference.
2865 *
2866 * The referenced zone_t structure will not be freed as long as the zone_t's
2867 * zone_status field is not ZONE_IS_DEAD and the zone has outstanding
2868 * references.
2869 *
2870 * NOTE: The zone_ref_t structure must be initialized before it is used.
2871 * See zone_init_ref() above.
2872 */
2873 void
zone_hold_ref(zone_t * z,zone_ref_t * ref,zone_ref_subsys_t subsys)2874 zone_hold_ref(zone_t *z, zone_ref_t *ref, zone_ref_subsys_t subsys)
2875 {
2876 ASSERT(subsys >= 0 && subsys < ZONE_REF_NUM_SUBSYS);
2877
2878 /*
2879 * Prevent consumers from reusing a reference structure before
2880 * releasing it.
2881 */
2882 VERIFY(ref->zref_zone == NULL);
2883
2884 ref->zref_zone = z;
2885 mutex_enter(&z->zone_lock);
2886 zone_hold_locked(z);
2887 z->zone_subsys_ref[subsys]++;
2888 ASSERT(z->zone_subsys_ref[subsys] != 0);
2889 list_insert_head(&z->zone_ref_list, ref);
2890 mutex_exit(&z->zone_lock);
2891 }
2892
2893 /*
2894 * Release the zone reference represented by the specified zone_ref_t.
2895 * The reference is invalid after it's released; however, the zone_ref_t
2896 * structure can be reused without having to invoke zone_init_ref().
2897 * subsys should be the same value that was passed to zone_hold_ref()
2898 * when the reference was acquired.
2899 */
2900 void
zone_rele_ref(zone_ref_t * ref,zone_ref_subsys_t subsys)2901 zone_rele_ref(zone_ref_t *ref, zone_ref_subsys_t subsys)
2902 {
2903 zone_rele_common(ref->zref_zone, ref, subsys);
2904
2905 /*
2906 * Set the zone_ref_t's zref_zone field to NULL to generate panics
2907 * when consumers dereference the reference. This helps us catch
2908 * consumers who use released references. Furthermore, this lets
2909 * consumers reuse the zone_ref_t structure without having to
2910 * invoke zone_init_ref().
2911 */
2912 ref->zref_zone = NULL;
2913 }
2914
2915 void
zone_cred_hold(zone_t * z)2916 zone_cred_hold(zone_t *z)
2917 {
2918 mutex_enter(&z->zone_lock);
2919 z->zone_cred_ref++;
2920 ASSERT(z->zone_cred_ref != 0);
2921 mutex_exit(&z->zone_lock);
2922 }
2923
2924 void
zone_cred_rele(zone_t * z)2925 zone_cred_rele(zone_t *z)
2926 {
2927 boolean_t wakeup;
2928
2929 mutex_enter(&z->zone_lock);
2930 ASSERT(z->zone_cred_ref != 0);
2931 z->zone_cred_ref--;
2932 if (z->zone_ref == 0 && z->zone_cred_ref == 0) {
2933 /* no more refs, free the structure */
2934 mutex_exit(&z->zone_lock);
2935 zone_free(z);
2936 return;
2937 }
2938 /*
2939 * If zone_destroy is waiting for the cred references to drain
2940 * out, and they have, signal it.
2941 */
2942 wakeup = (zone_wait_for_cred && ZONE_IS_UNREF(z) &&
2943 zone_status_get(z) >= ZONE_IS_DEAD);
2944 mutex_exit(&z->zone_lock);
2945
2946 if (wakeup) {
2947 /*
2948 * Grabbing zonehash_lock here effectively synchronizes with
2949 * zone_destroy() to avoid missed signals.
2950 */
2951 mutex_enter(&zonehash_lock);
2952 cv_broadcast(&zone_destroy_cv);
2953 mutex_exit(&zonehash_lock);
2954 }
2955 }
2956
2957 void
zone_task_hold(zone_t * z)2958 zone_task_hold(zone_t *z)
2959 {
2960 mutex_enter(&z->zone_lock);
2961 z->zone_ntasks++;
2962 ASSERT(z->zone_ntasks != 0);
2963 mutex_exit(&z->zone_lock);
2964 }
2965
2966 void
zone_task_rele(zone_t * zone)2967 zone_task_rele(zone_t *zone)
2968 {
2969 uint_t refcnt;
2970
2971 mutex_enter(&zone->zone_lock);
2972 ASSERT(zone->zone_ntasks != 0);
2973 refcnt = --zone->zone_ntasks;
2974 if (refcnt > 1) { /* Common case */
2975 mutex_exit(&zone->zone_lock);
2976 return;
2977 }
2978 zone_hold_locked(zone); /* so we can use the zone_t later */
2979 mutex_exit(&zone->zone_lock);
2980 if (refcnt == 1) {
2981 /*
2982 * See if the zone is shutting down.
2983 */
2984 mutex_enter(&zone_status_lock);
2985 if (zone_status_get(zone) != ZONE_IS_SHUTTING_DOWN) {
2986 goto out;
2987 }
2988
2989 /*
2990 * Make sure the ntasks didn't change since we
2991 * dropped zone_lock.
2992 */
2993 mutex_enter(&zone->zone_lock);
2994 if (refcnt != zone->zone_ntasks) {
2995 mutex_exit(&zone->zone_lock);
2996 goto out;
2997 }
2998 mutex_exit(&zone->zone_lock);
2999
3000 /*
3001 * No more user processes in the zone. The zone is empty.
3002 */
3003 zone_status_set(zone, ZONE_IS_EMPTY);
3004 goto out;
3005 }
3006
3007 ASSERT(refcnt == 0);
3008 /*
3009 * zsched has exited; the zone is dead.
3010 */
3011 zone->zone_zsched = NULL; /* paranoia */
3012 mutex_enter(&zone_status_lock);
3013 zone_status_set(zone, ZONE_IS_DEAD);
3014 out:
3015 mutex_exit(&zone_status_lock);
3016 zone_rele(zone);
3017 }
3018
3019 zoneid_t
getzoneid(void)3020 getzoneid(void)
3021 {
3022 return (curproc->p_zone->zone_id);
3023 }
3024
3025 /*
3026 * Internal versions of zone_find_by_*(). These don't zone_hold() or
3027 * check the validity of a zone's state.
3028 */
3029 static zone_t *
zone_find_all_by_id(zoneid_t zoneid)3030 zone_find_all_by_id(zoneid_t zoneid)
3031 {
3032 mod_hash_val_t hv;
3033 zone_t *zone = NULL;
3034
3035 ASSERT(MUTEX_HELD(&zonehash_lock));
3036
3037 if (mod_hash_find(zonehashbyid,
3038 (mod_hash_key_t)(uintptr_t)zoneid, &hv) == 0)
3039 zone = (zone_t *)hv;
3040 return (zone);
3041 }
3042
3043 static zone_t *
zone_find_all_by_label(const ts_label_t * label)3044 zone_find_all_by_label(const ts_label_t *label)
3045 {
3046 mod_hash_val_t hv;
3047 zone_t *zone = NULL;
3048
3049 ASSERT(MUTEX_HELD(&zonehash_lock));
3050
3051 /*
3052 * zonehashbylabel is not maintained for unlabeled systems
3053 */
3054 if (!is_system_labeled())
3055 return (NULL);
3056 if (mod_hash_find(zonehashbylabel, (mod_hash_key_t)label, &hv) == 0)
3057 zone = (zone_t *)hv;
3058 return (zone);
3059 }
3060
3061 static zone_t *
zone_find_all_by_name(char * name)3062 zone_find_all_by_name(char *name)
3063 {
3064 mod_hash_val_t hv;
3065 zone_t *zone = NULL;
3066
3067 ASSERT(MUTEX_HELD(&zonehash_lock));
3068
3069 if (mod_hash_find(zonehashbyname, (mod_hash_key_t)name, &hv) == 0)
3070 zone = (zone_t *)hv;
3071 return (zone);
3072 }
3073
3074 /*
3075 * Public interface for looking up a zone by zoneid. Only returns the zone if
3076 * it is fully initialized, and has not yet begun the zone_destroy() sequence.
3077 * Caller must call zone_rele() once it is done with the zone.
3078 *
3079 * The zone may begin the zone_destroy() sequence immediately after this
3080 * function returns, but may be safely used until zone_rele() is called.
3081 */
3082 zone_t *
zone_find_by_id(zoneid_t zoneid)3083 zone_find_by_id(zoneid_t zoneid)
3084 {
3085 zone_t *zone;
3086 zone_status_t status;
3087
3088 mutex_enter(&zonehash_lock);
3089 if ((zone = zone_find_all_by_id(zoneid)) == NULL) {
3090 mutex_exit(&zonehash_lock);
3091 return (NULL);
3092 }
3093 status = zone_status_get(zone);
3094 if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) {
3095 /*
3096 * For all practical purposes the zone doesn't exist.
3097 */
3098 mutex_exit(&zonehash_lock);
3099 return (NULL);
3100 }
3101 zone_hold(zone);
3102 mutex_exit(&zonehash_lock);
3103 return (zone);
3104 }
3105
3106 /*
3107 * Similar to zone_find_by_id, but using zone label as the key.
3108 */
3109 zone_t *
zone_find_by_label(const ts_label_t * label)3110 zone_find_by_label(const ts_label_t *label)
3111 {
3112 zone_t *zone;
3113 zone_status_t status;
3114
3115 mutex_enter(&zonehash_lock);
3116 if ((zone = zone_find_all_by_label(label)) == NULL) {
3117 mutex_exit(&zonehash_lock);
3118 return (NULL);
3119 }
3120
3121 status = zone_status_get(zone);
3122 if (status > ZONE_IS_DOWN) {
3123 /*
3124 * For all practical purposes the zone doesn't exist.
3125 */
3126 mutex_exit(&zonehash_lock);
3127 return (NULL);
3128 }
3129 zone_hold(zone);
3130 mutex_exit(&zonehash_lock);
3131 return (zone);
3132 }
3133
3134 /*
3135 * Similar to zone_find_by_id, but using zone name as the key.
3136 */
3137 zone_t *
zone_find_by_name(char * name)3138 zone_find_by_name(char *name)
3139 {
3140 zone_t *zone;
3141 zone_status_t status;
3142
3143 mutex_enter(&zonehash_lock);
3144 if ((zone = zone_find_all_by_name(name)) == NULL) {
3145 mutex_exit(&zonehash_lock);
3146 return (NULL);
3147 }
3148 status = zone_status_get(zone);
3149 if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) {
3150 /*
3151 * For all practical purposes the zone doesn't exist.
3152 */
3153 mutex_exit(&zonehash_lock);
3154 return (NULL);
3155 }
3156 zone_hold(zone);
3157 mutex_exit(&zonehash_lock);
3158 return (zone);
3159 }
3160
3161 /*
3162 * Similar to zone_find_by_id(), using the path as a key. For instance,
3163 * if there is a zone "foo" rooted at /foo/root, and the path argument
3164 * is "/foo/root/proc", it will return the held zone_t corresponding to
3165 * zone "foo".
3166 *
3167 * zone_find_by_path() always returns a non-NULL value, since at the
3168 * very least every path will be contained in the global zone.
3169 *
3170 * As with the other zone_find_by_*() functions, the caller is
3171 * responsible for zone_rele()ing the return value of this function.
3172 */
3173 zone_t *
zone_find_by_path(const char * path)3174 zone_find_by_path(const char *path)
3175 {
3176 zone_t *zone;
3177 zone_t *zret = NULL;
3178 zone_status_t status;
3179
3180 if (path == NULL) {
3181 /*
3182 * Call from rootconf().
3183 */
3184 zone_hold(global_zone);
3185 return (global_zone);
3186 }
3187 ASSERT(*path == '/');
3188 mutex_enter(&zonehash_lock);
3189 for (zone = list_head(&zone_active); zone != NULL;
3190 zone = list_next(&zone_active, zone)) {
3191 if (ZONE_PATH_VISIBLE(path, zone))
3192 zret = zone;
3193 }
3194 ASSERT(zret != NULL);
3195 status = zone_status_get(zret);
3196 if (status < ZONE_IS_READY || status > ZONE_IS_DOWN) {
3197 /*
3198 * Zone practically doesn't exist.
3199 */
3200 zret = global_zone;
3201 }
3202 zone_hold(zret);
3203 mutex_exit(&zonehash_lock);
3204 return (zret);
3205 }
3206
3207 /*
3208 * Public interface for updating per-zone load averages. Called once per
3209 * second.
3210 *
3211 * Based on loadavg_update(), genloadavg() and calcloadavg() from clock.c.
3212 */
3213 void
zone_loadavg_update(void)3214 zone_loadavg_update(void)
3215 {
3216 zone_t *zp;
3217 zone_status_t status;
3218 struct loadavg_s *lavg;
3219 hrtime_t zone_total;
3220 uint64_t tmp;
3221 int i;
3222 hrtime_t hr_avg;
3223 int nrun;
3224 static int64_t f[3] = { 135, 27, 9 };
3225 int64_t q, r;
3226
3227 mutex_enter(&zonehash_lock);
3228 for (zp = list_head(&zone_active); zp != NULL;
3229 zp = list_next(&zone_active, zp)) {
3230 mutex_enter(&zp->zone_lock);
3231
3232 /* Skip zones that are on the way down or not yet up */
3233 status = zone_status_get(zp);
3234 if (status < ZONE_IS_READY || status >= ZONE_IS_DOWN) {
3235 /* For all practical purposes the zone doesn't exist. */
3236 mutex_exit(&zp->zone_lock);
3237 continue;
3238 }
3239
3240 /*
3241 * Update the 10 second moving average data in zone_loadavg.
3242 */
3243 lavg = &zp->zone_loadavg;
3244
3245 tmp = cpu_uarray_sum_all(zp->zone_ustate);
3246 zone_total = UINT64_OVERFLOW_TO_INT64(tmp);
3247
3248 scalehrtime(&zone_total);
3249
3250 /* The zone_total should always be increasing. */
3251 lavg->lg_loads[lavg->lg_cur] = (zone_total > lavg->lg_total) ?
3252 zone_total - lavg->lg_total : 0;
3253 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ;
3254 /* lg_total holds the prev. 1 sec. total */
3255 lavg->lg_total = zone_total;
3256
3257 /*
3258 * To simplify the calculation, we don't calculate the load avg.
3259 * until the zone has been up for at least 10 seconds and our
3260 * moving average is thus full.
3261 */
3262 if ((lavg->lg_len + 1) < S_LOADAVG_SZ) {
3263 lavg->lg_len++;
3264 mutex_exit(&zp->zone_lock);
3265 continue;
3266 }
3267
3268 /* Now calculate the 1min, 5min, 15 min load avg. */
3269 hr_avg = 0;
3270 for (i = 0; i < S_LOADAVG_SZ; i++)
3271 hr_avg += lavg->lg_loads[i];
3272 hr_avg = hr_avg / S_LOADAVG_SZ;
3273 nrun = hr_avg / (NANOSEC / LGRP_LOADAVG_IN_THREAD_MAX);
3274
3275 /* Compute load avg. See comment in calcloadavg() */
3276 for (i = 0; i < 3; i++) {
3277 q = (zp->zone_hp_avenrun[i] >> 16) << 7;
3278 r = (zp->zone_hp_avenrun[i] & 0xffff) << 7;
3279 zp->zone_hp_avenrun[i] +=
3280 ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4;
3281
3282 /* avenrun[] can only hold 31 bits of load avg. */
3283 if (zp->zone_hp_avenrun[i] <
3284 ((uint64_t)1<<(31+16-FSHIFT)))
3285 zp->zone_avenrun[i] = (int32_t)
3286 (zp->zone_hp_avenrun[i] >> (16 - FSHIFT));
3287 else
3288 zp->zone_avenrun[i] = 0x7fffffff;
3289 }
3290
3291 mutex_exit(&zp->zone_lock);
3292 }
3293 mutex_exit(&zonehash_lock);
3294 }
3295
3296 /*
3297 * Get the number of cpus visible to this zone. The system-wide global
3298 * 'ncpus' is returned if pools are disabled, the caller is in the
3299 * global zone, or a NULL zone argument is passed in.
3300 */
3301 int
zone_ncpus_get(zone_t * zone)3302 zone_ncpus_get(zone_t *zone)
3303 {
3304 int myncpus = zone == NULL ? 0 : zone->zone_ncpus;
3305
3306 return (myncpus != 0 ? myncpus : ncpus);
3307 }
3308
3309 /*
3310 * Get the number of online cpus visible to this zone. The system-wide
3311 * global 'ncpus_online' is returned if pools are disabled, the caller
3312 * is in the global zone, or a NULL zone argument is passed in.
3313 */
3314 int
zone_ncpus_online_get(zone_t * zone)3315 zone_ncpus_online_get(zone_t *zone)
3316 {
3317 int myncpus_online = zone == NULL ? 0 : zone->zone_ncpus_online;
3318
3319 return (myncpus_online != 0 ? myncpus_online : ncpus_online);
3320 }
3321
3322 /*
3323 * Return the pool to which the zone is currently bound.
3324 */
3325 pool_t *
zone_pool_get(zone_t * zone)3326 zone_pool_get(zone_t *zone)
3327 {
3328 ASSERT(pool_lock_held());
3329
3330 return (zone->zone_pool);
3331 }
3332
3333 /*
3334 * Set the zone's pool pointer and update the zone's visibility to match
3335 * the resources in the new pool.
3336 */
3337 void
zone_pool_set(zone_t * zone,pool_t * pool)3338 zone_pool_set(zone_t *zone, pool_t *pool)
3339 {
3340 ASSERT(pool_lock_held());
3341 ASSERT(MUTEX_HELD(&cpu_lock));
3342
3343 zone->zone_pool = pool;
3344 zone_pset_set(zone, pool->pool_pset->pset_id);
3345 }
3346
3347 /*
3348 * Return the cached value of the id of the processor set to which the
3349 * zone is currently bound. The value will be ZONE_PS_INVAL if the pools
3350 * facility is disabled.
3351 */
3352 psetid_t
zone_pset_get(zone_t * zone)3353 zone_pset_get(zone_t *zone)
3354 {
3355 ASSERT(MUTEX_HELD(&