1#
2# CDDL HEADER START
3#
4# The contents of this file are subject to the terms of the
5# Common Development and Distribution License (the "License").
6# You may not use this file except in compliance with the License.
7#
8# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9# or http://www.opensolaris.org/os/licensing.
10# See the License for the specific language governing permissions
11# and limitations under the License.
12#
13# When distributing Covered Code, include this CDDL HEADER in each
14# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15# If applicable, add the following below this CDDL HEADER, with the
16# fields enclosed by brackets "[]" replaced with your own identifying
17# information: Portions Copyright [yyyy] [name of copyright owner]
18#
19# CDDL HEADER END
20#
21#
22# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23# Use is subject to license terms.
24#
25
26unset LD_LIBRARY_PATH
27PATH=/usr/bin:/usr/sbin
28export PATH
29
30. /usr/lib/brand/shared/common.ksh
31
32# Use the ipkg-brand ZFS property for denoting the zone root's active dataset.
33PROP_ACTIVE="org.opensolaris.libbe:active"
34
35w_sanity_detail=$(gettext "       WARNING: Skipping image sanity checks.")
36f_sanity_detail=$(gettext  "Missing %s at %s")
37f_sanity_sparse=$(gettext  "Is this a sparse zone image?  The image must be whole-root.")
38f_sanity_vers=$(gettext  "The image release version must be 10 (got %s), the zone is not usable on this system.")
39f_not_s10_image=$(gettext  "%s doesn't look like a Solaris 10 image.")
40f_sanity_nopatch=$(gettext "Unable to determine the image's patch level.")
41f_sanity_downrev=$(gettext "The image patch level is downrev for running in a solaris10 branded zone.\n(patchlist %s)")
42f_need_newer_emul=$(gettext "The image requires a newer version of the solaris10 brand emulation.")
43f_zfs_create=$(gettext "Unable to create the zone's ZFS dataset.")
44f_no_ds=$(gettext "No zonepath dataset; the zonepath must be a ZFS dataset.")
45f_multiple_ds=$(gettext "Multiple active datasets.")
46f_no_active_ds=$(gettext "No active dataset; the zone's ZFS root dataset must be configured as\n\ta zone boot environment.")
47f_zfs_unmount=$(gettext "Unable to unmount the zone's root ZFS dataset (%s).\nIs there a global zone process inside the zone root?\nThe current zone boot environment will remain mounted.\n")
48f_zfs_mount=$(gettext "Unable to mount the zone's ZFS dataset.")
49incompat_options=$(gettext "mutually exclusive options.\n%s")
50
51sanity_ok=$(gettext     "  Sanity Check: Passed.  Looks like a Solaris 10 image.")
52sanity_fail=$(gettext   "  Sanity Check: FAILED (see log for details).")
53
54e_badboot=$(gettext "Zone boot failed")
55e_nosingleuser=$(gettext "ERROR: zone did not finish booting to single-user.")
56e_unconfig=$(gettext "sys-unconfig failed")
57v_unconfig=$(gettext "Performing zone sys-unconfig")
58
59sanity_check()
60{
61	typeset dir="$1"
62	res=0
63
64	#
65	# Check for some required directories and make sure this isn't a
66	# sparse zone image.
67	#
68	checks="etc etc/svc var var/svc"
69	for x in $checks; do
70		if [[ ! -e $dir/$x ]]; then
71			log "$f_sanity_detail" "$x" "$dir"
72			res=1
73		fi
74	done
75	# Files from SUNWcsr and SUNWcsu that are in sparse inherit-pkg-dirs.
76	checks="lib/svc sbin/zonename usr/bin/chmod"
77	for x in $checks; do
78		if [[ ! -e $dir/$x ]]; then
79			log "$f_sanity_detail" "$x" "$dir"
80			log "$f_sanity_sparse"
81			res=1
82		fi
83	done
84
85	if (( $res != 0 )); then
86		log "$sanity_fail"
87		fatal "$install_fail" "$ZONENAME"
88	fi
89
90	if [[ "$SANITY_SKIP" == 1 ]]; then
91		log "$w_sanity_detail"
92		return
93	fi
94
95	#
96	# Check image release to be sure its S10.
97	#
98	image_vers="unknown"
99	if [[ -f $dir/var/sadm/system/admin/INST_RELEASE ]]; then
100		image_vers=$(nawk -F= '{if ($1 == "VERSION") print $2}' \
101		    $dir/var/sadm/system/admin/INST_RELEASE)
102	fi
103
104	if [[ "$image_vers" != "10" ]]; then
105		log "$f_sanity_vers" "$image_vers"
106		res=1
107	fi
108
109	#
110	# Make sure we have the minimal KU patch we support.  These are the
111	# KUs for S10u8.
112	#
113	if [[ $(uname -p) == "i386" ]]; then
114		req_patch="141445-09"
115	else
116		req_patch="141444-09"
117	fi
118
119	for i in $dir/var/sadm/pkg/SUNWcakr*
120	do
121		if [[ ! -d $i || ! -f $i/pkginfo ]]; then
122			log "$f_sanity_nopatch"
123			res=1
124		fi
125	done
126
127	#
128	# Check the core kernel pkg for the required KU patch.
129	#
130	found=0
131	for i in $dir/var/sadm/pkg/SUNWcakr*/pkginfo
132	do
133		patches=$(nawk -F= '{if ($1 == "PATCHLIST") print $2}' $i)
134		for patch in $patches
135		do
136			if [[ $patch == $req_patch ]]; then
137				found=1
138				break
139			fi
140		done
141
142		if (( $found == 1 )); then
143			break
144		fi
145	done
146
147	if (( $found != 1 )); then
148		log "$f_sanity_downrev" "$patches"
149		res=1
150	fi
151
152	#
153	# Check the S10 image for a required version of the emulation.
154	#
155	VERS_FILE=/usr/lib/brand/solaris10/version
156	s10vers_needs=0
157	if [[ -f $dir/$VERS_FILE ]]; then
158		s10vers_needs=$(/usr/bin/egrep -v "^#" $dir/$VERS_FILE)
159	fi
160
161	# Now get the current emulation version.
162	emul_vers=$(/usr/bin/egrep -v "^#" $VERS_FILE)
163
164	# Verify that the emulation can run this version of S10.
165	if (( $s10vers_needs > $emul_vers )); then
166		log "$f_need_newer_emul"
167		res=1
168	fi
169
170	if (( $res != 0 )); then
171		log "$sanity_fail"
172		fatal "$install_fail" "$ZONENAME"
173	fi
174
175	vlog "$sanity_ok"
176}
177
178# Find the active dataset under the zonepath dataset to mount on zonepath/root.
179# $1 ZONEPATH_DS
180get_active_ds() {
181	ACTIVE_DS=`/usr/sbin/zfs list -H -r -t filesystem \
182	    -o name,$PROP_ACTIVE $1/ROOT | \
183	    /usr/bin/nawk ' {
184		if ($1 ~ /ROOT\/[^\/]+$/ && $2 == "on") {
185			print $1
186			if (found == 1)
187				exit 1
188			found = 1
189		}
190	    }'`
191
192	if [ $? -ne 0 ]; then
193		fail_fatal "$f_multiple_ds"
194	fi
195
196	if [ -z "$ACTIVE_DS" ]; then
197		fail_fatal "$f_no_active_ds"
198	fi
199}
200
201#
202# Make sure the active dataset is mounted for the zone.  There are several
203# cases to consider:
204# 1) First boot of the zone, nothing is mounted
205# 2) Zone is halting, active dataset remains the same.
206# 3) Zone is halting, there is a new active dataset to mount.
207#
208mount_active_ds() {
209	mount -p | cut -d' ' -f3 | egrep -s "^$zonepath/root$"
210	if (( $? == 0 )); then
211		# Umount current dataset on the root (it might be an old BE).
212		/usr/sbin/umount $zonepath/root
213		if (( $? != 0 )); then
214			# The umount failed, leave the old BE mounted.  If
215			# there are zone processes (i.e. zsched) in the fs,
216			# then we're umounting because we failed validation
217			# during boot, otherwise, warn about gz process
218			# preventing umount.
219			nproc=`pgrep -z $zonename | wc -l`
220			if (( $nproc == 0 )); then
221                       		printf "$f_zfs_unmount" "$zonepath/root"
222			fi
223			return
224		fi
225	fi
226
227	# Mount active dataset on the root.
228	get_zonepath_ds $zonepath
229	get_active_ds $ZONEPATH_DS
230
231	/usr/sbin/mount -F zfs $ACTIVE_DS $zonepath/root || \
232	    fail_fatal "$f_zfs_mount"
233}
234
235#
236# Set up ZFS dataset hierarchy for the zone root dataset.
237#
238create_active_ds() {
239	# Find the zone's current dataset.  This should have been created by
240	# zoneadm (or the attach hook).
241	get_zonepath_ds $zonepath
242
243	#
244	# We need to tolerate errors while creating the datasets and making the
245	# mountpoint, since these could already exist from an attach scenario.
246	#
247
248	/usr/sbin/zfs list -H -o name $ZONEPATH_DS/ROOT >/dev/null 2>&1
249	if (( $? != 0 )); then
250		/usr/sbin/zfs create -o mountpoint=legacy -o zoned=on \
251		    $ZONEPATH_DS/ROOT
252		if (( $? != 0 )); then
253			fail_fatal "$f_zfs_create"
254		fi
255	else
256	       	/usr/sbin/zfs set mountpoint=legacy $ZONEPATH_DS/ROOT \
257		    >/dev/null 2>&1
258	       	/usr/sbin/zfs set zoned=on $ZONEPATH_DS/ROOT \
259		    >/dev/null 2>&1
260	fi
261
262	BENAME=zbe-0
263	/usr/sbin/zfs list -H -o name $ZONEPATH_DS/ROOT/$BENAME >/dev/null 2>&1
264	if (( $? != 0 )); then
265	       	/usr/sbin/zfs create -o $PROP_ACTIVE=on -o canmount=noauto \
266		    $ZONEPATH_DS/ROOT/$BENAME >/dev/null 2>&1
267		if (( $? != 0 )); then
268			fail_fatal "$f_zfs_create"
269		fi
270	else
271	       	/usr/sbin/zfs set $PROP_ACTIVE=on $ZONEPATH_DS/ROOT/$BENAME \
272		    >/dev/null 2>&1
273	       	/usr/sbin/zfs set canmount=noauto $ZONEPATH_DS/ROOT/$BENAME \
274		    >/dev/null 2>&1
275	       	/usr/sbin/zfs inherit mountpoint $ZONEPATH_DS/ROOT/$BENAME \
276		    >/dev/null 2>&1
277	       	/usr/sbin/zfs inherit zoned $ZONEPATH_DS/ROOT/$BENAME \
278		    >/dev/null 2>&1
279	fi
280
281	if [ ! -d $ZONEROOT ]; then
282		/usr/bin/mkdir -m 0755 -p $ZONEROOT || \
283		    fail_fatal "$f_mkdir" "$ZONEROOT"
284	fi
285	/usr/bin/chmod 700 $ZONEPATH || fail_fatal "$f_chmod" "$ZONEPATH"
286
287	/usr/sbin/mount -F zfs $ZONEPATH_DS/ROOT/$BENAME $ZONEROOT || \
288		fail_fatal "$f_zfs_mount"
289}
290
291#
292# Before booting the zone we may need to create a few mnt points, just in
293# case they don't exist for some reason.
294#
295# Whenever we reach into the zone while running in the global zone we
296# need to validate that none of the interim directories are symlinks
297# that could cause us to inadvertently modify the global zone.
298#
299mk_zone_dirs() {
300	vlog "$v_mkdirs"
301	if [[ ! -f $ZONEROOT/tmp && ! -d $ZONEROOT/tmp ]]; then
302		mkdir -m 1777 -p $ZONEROOT/tmp || exit $EXIT_CODE
303	fi
304	if [[ ! -f $ZONEROOT/var/run && ! -d $ZONEROOT/var/run ]]; then
305		mkdir -m 1755 -p $ZONEROOT/var/run || exit $EXIT_CODE
306	fi
307	if [[ ! -f $ZONEROOT/var/tmp && ! -d $ZONEROOT/var/tmp ]]; then
308		mkdir -m 1777 -p $ZONEROOT/var/tmp || exit $EXIT_CODE
309	fi
310	if [[ ! -h $ZONEROOT/etc && ! -f $ZONEROOT/etc/mnttab ]]; then
311		/usr/bin/touch $ZONEROOT/etc/mnttab || exit $EXIT_CODE
312		/usr/bin/chmod 444 $ZONEROOT/etc/mnttab || exit $EXIT_CODE
313	fi
314	if [[ ! -f $ZONEROOT/proc && ! -d $ZONEROOT/proc ]]; then
315		mkdir -m 755 -p $ZONEROOT/proc || exit $EXIT_CODE
316	fi
317	if [[ ! -f $ZONEROOT/dev && ! -d $ZONEROOT/dev ]]; then
318		mkdir -m 755 -p $ZONEROOT/dev || exit $EXIT_CODE
319	fi
320	if [[ ! -h $ZONEROOT/etc && ! -h $ZONEROOT/etc/svc && \
321	    ! -d $ZONEROOT/etc/svc ]]; then
322		mkdir -m 755 -p $ZONEROOT/etc/svc/volatile || exit $EXIT_CODE
323	fi
324}
325
326#
327# We're sys-unconfig-ing the zone.  This will normally halt the zone, however
328# there are problems with sys-unconfig and it can hang when the zone is booted
329# to milestone=none.  Sys-unconfig also sometimes hangs halting the zone.
330# Thus, we take some care to workaround these sys-unconfig limitations.
331#
332# On entry we expect the zone to be booted.  We use sys-unconfig -R to make it
333# think its working on an alternate root and let the caller halt the zone.
334#
335sysunconfig_zone() {
336	/usr/sbin/zlogin -S $ZONENAME /usr/sbin/sys-unconfig -R /./ \
337	    >/dev/null 2>&1
338	if (( $? != 0 )); then
339		error "$e_unconfig"
340		return 1
341	fi
342
343	return 0
344}
345