# # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # # Copyright (c) 2013, 2016 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/include/math.shlib . $STF_SUITE/tests/functional/cli_root/zfs_set/zfs_set_common.kshlib . $STF_SUITE/tests/functional/rsend/rsend.cfg # # Set up test model which includes various datasets # # @final # @snapB # @init # | # ______ pclone # | / # |@psnap # || @final # ||@final @final @snapC # ||@snapC @snapC @snapB # ||@snapA @snapB @snapA # ||@init @init @init # ||| | | # $pool -------- $FS ------- fs1 ------- fs2 # \ \\_____ \ | # vol vol \____ \ @fsnap # | | \ \ \ # @init @vsnap | ------------ fclone # @snapA @init \ | | # @final @snapB \ | @init # @snapC vclone @snapA # @final | @final # @init # @snapC # @final # # $1 pool name # function setup_test_model { typeset pool=$1 log_must zfs create -p $pool/$FS/fs1/fs2 log_must zfs snapshot $pool@psnap log_must zfs clone $pool@psnap $pool/pclone if is_global_zone ; then log_must zfs create -V 16M $pool/vol log_must zfs create -V 16M $pool/$FS/vol log_must zfs snapshot $pool/$FS/vol@vsnap log_must zfs clone $pool/$FS/vol@vsnap $pool/$FS/vclone fi log_must snapshot_tree $pool/$FS/fs1/fs2@fsnap log_must zfs clone $pool/$FS/fs1/fs2@fsnap $pool/$FS/fs1/fclone log_must zfs snapshot -r $pool@init log_must snapshot_tree $pool@snapA log_must snapshot_tree $pool@snapC log_must snapshot_tree $pool/pclone@snapB log_must snapshot_tree $pool/$FS@snapB log_must snapshot_tree $pool/$FS@snapC log_must snapshot_tree $pool/$FS/fs1@snapA log_must snapshot_tree $pool/$FS/fs1@snapB log_must snapshot_tree $pool/$FS/fs1@snapC log_must snapshot_tree $pool/$FS/fs1/fclone@snapA if is_global_zone ; then log_must zfs snapshot $pool/vol@snapA log_must zfs snapshot $pool/$FS/vol@snapB log_must zfs snapshot $pool/$FS/vol@snapC log_must zfs snapshot $pool/$FS/vclone@snapC fi log_must zfs snapshot -r $pool@final return 0 } # # Cleanup the BACKDIR and given pool content and all the sub datasets # # $1 pool name # function cleanup_pool { typeset pool=$1 log_must rm -rf $BACKDIR/* if is_global_zone ; then log_must zfs destroy -Rf $pool else typeset list=$(zfs list -H -r -t all -o name $pool) for ds in $list ; do if [[ $ds != $pool ]] ; then if datasetexists $ds ; then log_must zfs destroy -Rf $ds fi fi done fi typeset mntpnt=$(get_prop mountpoint $pool) if ! ismounted $pool ; then # Make sure mountpoint directory is empty if [[ -d $mntpnt ]]; then log_must rm -rf $mntpnt/* fi log_must zfs mount $pool fi if [[ -d $mntpnt ]]; then rm -rf $mntpnt/* fi return 0 } function cleanup_pools { cleanup_pool $POOL2 destroy_pool $POOL3 } # # Detect if the given two filesystems have same sub-datasets # # $1 source filesystem # $2 destination filesystem # function cmp_ds_subs { typeset src_fs=$1 typeset dst_fs=$2 zfs list -r -H -t all -o name $src_fs > $BACKDIR/src1 zfs list -r -H -t all -o name $dst_fs > $BACKDIR/dst1 eval sed -e 's:^$src_fs:PREFIX:g' < $BACKDIR/src1 > $BACKDIR/src eval sed -e 's:^$dst_fs:PREFIX:g' < $BACKDIR/dst1 > $BACKDIR/dst diff $BACKDIR/src $BACKDIR/dst typeset -i ret=$? rm -f $BACKDIR/src $BACKDIR/dst $BACKDIR/src1 $BACKDIR/dst1 return $ret } # # Compare all the directores and files in two filesystems # # $1 source filesystem # $2 destination filesystem # function cmp_ds_cont { typeset src_fs=$1 typeset dst_fs=$2 typeset srcdir dstdir srcdir=$(get_prop mountpoint $src_fs) dstdir=$(get_prop mountpoint $dst_fs) diff -r $srcdir $dstdir > /dev/null 2>&1 return $? } # # Compare the given two dataset properties # # $1 dataset 1 # $2 dataset 2 # function cmp_ds_prop { typeset dtst1=$1 typeset dtst2=$2 for item in "type" "origin" "volblocksize" "aclinherit" "aclmode" \ "atime" "canmount" "checksum" "compression" "copies" "devices" \ "dnodesize" "exec" "quota" "readonly" "recordsize" "reservation" \ "setuid" "sharenfs" "snapdir" "version" "volsize" "xattr" "zoned" \ "mountpoint"; do zfs get -H -o property,value,source $item $dtst1 >> \ $BACKDIR/dtst1 zfs get -H -o property,value,source $item $dtst2 >> \ $BACKDIR/dtst2 done eval sed -e 's:$dtst1:PREFIX:g' < $BACKDIR/dtst1 > $BACKDIR/dtst1 eval sed -e 's:$dtst2:PREFIX:g' < $BACKDIR/dtst2 > $BACKDIR/dtst2 diff $BACKDIR/dtst1 $BACKDIR/dtst2 typeset -i ret=$? rm -f $BACKDIR/dtst1 $BACKDIR/dtst2 return $ret } # # Random create directories and files # # $1 directory # function random_tree { typeset dir=$1 if [[ -d $dir ]]; then rm -rf $dir fi mkdir -p $dir typeset -i ret=$? typeset -i nl nd nf ((nl = RANDOM % 6 + 1)) ((nd = RANDOM % 3 )) ((nf = RANDOM % 5 )) mktree -b $dir -l $nl -d $nd -f $nf ((ret |= $?)) return $ret } # # Put data in filesystem and take snapshot # # $1 snapshot name # function snapshot_tree { typeset snap=$1 typeset ds=${snap%%@*} typeset type=$(get_prop "type" $ds) typeset -i ret=0 if [[ $type == "filesystem" ]]; then typeset mntpnt=$(get_prop mountpoint $ds) ((ret |= $?)) if ((ret == 0)) ; then eval random_tree $mntpnt/${snap##$ds} ((ret |= $?)) fi fi if ((ret == 0)) ; then zfs snapshot $snap ((ret |= $?)) fi return $ret } # # Destroy the given snapshot and stuff # # $1 snapshot # function destroy_tree { typeset -i ret=0 typeset snap for snap in "$@" ; do zfs destroy $snap ret=$? typeset ds=${snap%%@*} typeset type=$(get_prop "type" $ds) if [[ $type == "filesystem" ]]; then typeset mntpnt=$(get_prop mountpoint $ds) ((ret |= $?)) if ((ret != 0)); then rm -r $mntpnt/$snap ((ret |= $?)) fi fi if ((ret != 0)); then return $ret fi done return 0 } # # Get all the sub-datasets of give dataset with specific suffix # # $1 Given dataset # $2 Suffix # function getds_with_suffix { typeset ds=$1 typeset suffix=$2 typeset list=$(zfs list -r -H -t all -o name $ds | grep "$suffix$") echo $list } # # Output inherited properties whitch is edited for file system # function fs_inherit_prop { typeset fs_prop if is_global_zone ; then fs_prop=$(zfs inherit 2>&1 | \ awk '$2=="YES" && $3=="YES" {print $1}') if ! is_te_enabled ; then fs_prop=$(echo $fs_prop | grep -v "mlslabel") fi else fs_prop=$(zfs inherit 2>&1 | \ awk '$2=="YES" && $3=="YES" {print $1}'| egrep -v "devices|mlslabel|sharenfs|sharesmb|zoned") fi echo $fs_prop } # # Output inherited properties for volume # function vol_inherit_prop { echo "checksum readonly" } # # Get the destination dataset to compare # function get_dst_ds { typeset srcfs=$1 typeset dstfs=$2 # # If the srcfs is not pool # if ! zpool list $srcfs > /dev/null 2>&1 ; then eval dstfs="$dstfs/${srcfs#*/}" fi echo $dstfs } # # Make test files # # $1 Number of files to create # $2 Maximum file size # $3 File ID offset # $4 File system to create the files on # function mk_files { nfiles=$1 maxsize=$2 file_id_offset=$3 fs=$4 for ((i=0; i<$nfiles; i=i+1)); do dd if=/dev/urandom \ of=/$fs/file-$maxsize-$((i+$file_id_offset)) \ bs=$(($RANDOM * $RANDOM % $maxsize)) \ count=1 >/dev/null 2>&1 || log_fail \ "Failed to create /$fs/file-$maxsize-$((i+$file_id_offset))" done echo Created $nfiles files of random sizes up to $maxsize bytes } # # Remove test files # # $1 Number of files to remove # $2 Maximum file size # $3 File ID offset # $4 File system to remove the files from # function rm_files { nfiles=$1 maxsize=$2 file_id_offset=$3 fs=$4 for ((i=0; i<$nfiles; i=i+1)); do rm -f /$fs/file-$maxsize-$((i+$file_id_offset)) done echo Removed $nfiles files of random sizes up to $maxsize bytes } # # Simulate a random set of operations which could be reasonably expected # to occur on an average filesystem. # # $1 Number of files to modify # $2 Maximum file size # $3 File system to modify the file on # $4 Enabled xattrs (optional) # function churn_files { nfiles=$1 maxsize=$2 fs=$3 xattrs=${4:-1} # # Remove roughly half of the files in order to make it more # likely that a dnode will be reallocated. # for ((i=0; i<$nfiles; i=i+1)); do file_name="/$fs/file-$i" if [[ -e $file_name ]]; then if [ $((RANDOM % 2)) -eq 0 ]; then rm $file_name || \ log_fail "Failed to remove $file_name" fi fi done # # Remount the filesystem to simulate normal usage. This resets # the last allocated object id allowing for new objects to be # reallocated in the locations of previously freed objects. # log_must zfs unmount $fs log_must zfs mount $fs for i in {0..$nfiles}; do file_name="/$fs/file-$i" file_size=$((($RANDOM * $RANDOM % ($maxsize - 1)) + 1)) # # When the file exists modify it in one of five ways to # simulate normal usage: # - (20%) Remove and set and extended attribute on the file # - (20%) Overwrite the existing file # - (20%) Truncate the existing file to a random length # - (20%) Truncate the existing file to zero length # - (20%) Remove the file # # Otherwise create the missing file. 20% of the created # files will be small and use embedded block pointers, the # remainder with have random sizes up to the maximum size. # Three extended attributes are attached to all of the files. # if [[ -e $file_name ]]; then value=$((RANDOM % 5)) if [ $value -eq 0 -a $xattrs -ne 0 ]; then attrname="testattr$((RANDOM % 3))" attrlen="$(((RANDOM % 1000) + 1))" attrvalue="$(random_string VALID_NAME_CHAR \ $attrlen)" attr -qr $attrname $file_name || \ log_fail "Failed to remove $attrname" attr -qs $attrname \ -V "$attrvalue" $file_name || \ log_fail "Failed to set $attrname" elif [ $value -eq 1 ]; then dd if=/dev/urandom of=$file_name \ bs=$file_size count=1 >/dev/null 2>&1 || \ log_fail "Failed to overwrite $file_name" elif [ $value -eq 2 ]; then truncate -s $file_size $file_name || \ log_fail "Failed to truncate $file_name" elif [ $value -eq 3 ]; then truncate -s 0 $file_name || \ log_fail "Failed to truncate $file_name" else rm $file_name || \ log_fail "Failed to remove $file_name" fi else if [ $((RANDOM % 5)) -eq 0 ]; then file_size=$((($RANDOM % 64) + 1)) fi dd if=/dev/urandom of=$file_name \ bs=$file_size count=1 >/dev/null 2>&1 || \ log_fail "Failed to create $file_name" if [ $xattrs -ne 0 ]; then for j in {0..2}; do attrname="testattr$j" attrlen="$(((RANDOM % 1000) + 1))" attrvalue="$(random_string \ VALID_NAME_CHAR $attrlen)" attr -qs $attrname \ -V "$attrvalue" $file_name || \ log_fail "Failed to set $attrname" done fi fi done return 0 } # # Mess up file contents # # $1 The file path # function mess_file { file=$1 filesize=$(stat -c '%s' $file) offset=$(($RANDOM * $RANDOM % $filesize)) if (($RANDOM % 7 <= 1)); then # # We corrupt 2 bytes to minimize the chance that we # write the same value that's already there. # log_must eval "dd if=/dev/random of=$file conv=notrunc " \ "bs=1 count=2 oseek=$offset >/dev/null 2>&1" else log_must truncate -s $offset $file fi } # # Diff the send/receive filesystems # # $1 The sent filesystem # $2 The received filesystem # function file_check { sendfs=$1 recvfs=$2 if [[ -d /$recvfs/.zfs/snapshot/a && -d \ /$sendfs/.zfs/snapshot/a ]]; then diff -r /$recvfs/.zfs/snapshot/a /$sendfs/.zfs/snapshot/a [[ $? -eq 0 ]] || log_fail "Differences found in snap a" fi if [[ -d /$recvfs/.zfs/snapshot/b && -d \ /$sendfs/.zfs/snapshot/b ]]; then diff -r /$recvfs/.zfs/snapshot/b /$sendfs/.zfs/snapshot/b [[ $? -eq 0 ]] || log_fail "Differences found in snap b" fi } # # Resume test helper # # $1 The ZFS send command # $2 The filesystem where the streams are sent # $3 The receive filesystem # function resume_test { sendcmd=$1 streamfs=$2 recvfs=$3 stream_num=1 log_must eval "$sendcmd >/$streamfs/$stream_num" for ((i=0; i<2; i=i+1)); do mess_file /$streamfs/$stream_num log_mustnot zfs recv -suv $recvfs /$streamfs/$stream_num" [[ -f /$streamfs/$stream_num ]] || \ log_fail "NO FILE /$streamfs/$stream_num" done log_must zfs recv -suv $recvfs /$sendpool/initial.zsend" log_must eval "zfs send -v -i @a $sendfs@b " \ ">/$sendpool/incremental.zsend" fi log_must zfs create -o compress=lz4 $streamfs } # # Check to see if the specified features are set in a send stream. # The values for these features are found in uts/common/fs/zfs/sys/zfs_ioctl.h # # $1 The stream file # $2-$n The flags expected in the stream # function stream_has_features { typeset file=$1 shift [[ -f $file ]] || log_fail "Couldn't find file: $file" typeset flags=$(cat $file | zstreamdump | awk '/features =/ {print $3}') typeset -A feature feature[dedup]="1" feature[dedupprops]="2" feature[sa_spill]="4" feature[embed_data]="10000" feature[lz4]="20000" feature[mooch_byteswap]="40000" feature[large_blocks]="80000" feature[resuming]="100000" feature[redacted]="200000" feature[compressed]="400000" typeset flag known derived=0 for flag in "$@"; do known=${feature[$flag]} [[ -z $known ]] && log_fail "Unknown feature: $flag" derived=$(echo "$flags & ${feature[$flag]} = X" | mdb | sed 's/ //g') [[ $derived = $known ]] || return 1 done return 0 } # # Parse zstreamdump -v output. The output varies for each kind of record: # BEGIN records are simply output as "BEGIN" # END records are output as "END" # OBJECT records become "OBJECT " # FREEOBJECTS records become "FREEOBJECTS " # FREE records become " " # WRITE records become: # " # " # function parse_dump { sed '/^WRITE/{N;s/\n/ /;}' | grep "^[A-Z]" | awk '{ if ($1 == "BEGIN" || $1 == "END") print $1 if ($1 == "OBJECT") print $1" "$4 if ($1 == "FREEOBJECTS") print $1" "$4" "$7 if ($1 == "FREE") print $1" "$7" "$10 if ($1 == "WRITE") print $1" "$15" "$21" "$24" "$27" "$30}' } # # Given a send stream, verify that the size of the stream matches what's # expected based on the source or target dataset. If the stream is an # incremental stream, subtract the size of the source snapshot before # comparing. This function does not currently handle incremental streams # that remove data. # # $1 The zstreamdump output file # $2 The dataset to compare against # This can be a source of a send or recv target (fs, not snapshot) # $3 The percentage below which verification is deemed a failure # $4 The source snapshot of an incremental send # function verify_stream_size { typeset stream=$1 typeset ds=$2 typeset percent=${3:-90} typeset inc_src=$4 [[ -f $stream ]] || log_fail "No such file: $stream" datasetexists $ds || log_fail "No such dataset: $ds" typeset stream_size=$(cat $stream | zstreamdump | sed -n \ 's/ Total write size = \(.*\) (0x.*)/\1/p') typeset inc_size=0 if [[ -n $inc_src ]]; then inc_size=$(get_prop lrefer $inc_src) if stream_has_features $stream compressed; then inc_size=$(get_prop refer $inc_src) fi fi if stream_has_features $stream compressed; then ds_size=$(get_prop refer $ds) else ds_size=$(get_prop lrefer $ds) fi ds_size=$((ds_size - inc_size)) within_percent $stream_size $ds_size $percent || log_fail \ "$stream_size $ds_size differed by too much" } # Cleanup function for tests involving resumable send function resume_cleanup { typeset sendfs=$1 typeset streamfs=$2 typeset sendpool=${sendfs%%/*} datasetexists $sendfs && log_must zfs destroy -r $sendfs datasetexists $streamfs && log_must zfs destroy -r $streamfs cleanup_pool $POOL2 rm -f /$sendpool/initial.zsend /$sendpool/incremental.zsend } # Randomly set the property to one of the enumerated values. function rand_set_prop { typeset dtst=$1 typeset prop=$2 shift 2 typeset value=$(random_get $@) log_must eval "zfs set $prop='$value' $dtst" } # Generate a recursive checksum of a filesystems contents. Only file # data is included in the checksum (no meta data, or xattrs). function recursive_cksum { find $1 -type f -exec sha256sum {} \; | \ sort -k 2 | awk '{ print $1 }' | sha256sum }