1#!/bin/ksh
2
3#
4# This file and its contents are supplied under the terms of the
5# Common Development and Distribution License ("CDDL"), version 1.0.
6# You may only use this file in accordance with the terms of version
7# 1.0 of the CDDL.
8#
9# A full copy of the text of the CDDL should have accompanied this
10# source.  A copy of the CDDL is also available via the Internet at
11# http://www.illumos.org/license/CDDL.
12#
13
14#
15# Copyright (c) 2017 by Lawrence Livermore National Security, LLC.
16# Copyright (c) 2018 Datto Inc.
17#
18
19. $STF_SUITE/include/libtest.shlib
20. $STF_SUITE/tests/functional/rsend/rsend.kshlib
21
22#
23# Description:
24# Verify incremental receive properly handles objects with changed
25# dnode slot count.
26#
27# Strategy:
28# 1. Populate a dataset with 1k byte dnodes and snapshot
29# 2. Remove objects, set dnodesize=legacy, and remount dataset so new objects
30#    get recycled numbers and formerly "interior" dnode slots get assigned
31#    to new objects
32# 3. Remove objects, set dnodesize=2k, and remount dataset so new objects
33#    overlap with recently recycled and formerly "normal" dnode slots get
34#    assigned to new objects
35# 4. Create an empty file and add xattrs to it to exercise reclaiming a
36#    dnode that requires more than 1 slot for its bonus buffer (Zol #7433)
37# 5. Generate initial and incremental streams
38# 6. Verify initial and incremental streams can be received
39#
40
41verify_runnable "both"
42
43log_assert "Verify incremental receive handles objects with changed dnode size"
44
45function cleanup
46{
47	rm -f $BACKDIR/fs-dn-legacy
48	rm -f $BACKDIR/fs-dn-1k
49	rm -f $BACKDIR/fs-dn-2k
50	rm -f $BACKDIR/fs-attr
51
52	if datasetexists $POOL/fs ; then
53		log_must zfs destroy -rR $POOL/fs
54	fi
55
56	if datasetexists $POOL/newfs ; then
57		log_must zfs destroy -rR $POOL/newfs
58	fi
59}
60
61log_onexit cleanup
62
63# 1. Populate a dataset with 1k byte dnodes and snapshot
64log_must zfs create -o dnodesize=1k $POOL/fs
65log_must mk_files 200 262144 0 $POOL/fs
66log_must zfs snapshot $POOL/fs@a
67
68# 2. Remove objects, set dnodesize=legacy, and remount dataset so new objects
69#    get recycled numbers and formerly "interior" dnode slots get assigned
70#    to new objects
71rm /$POOL/fs/*
72
73log_must zfs unmount $POOL/fs
74log_must zfs set dnodesize=legacy $POOL/fs
75log_must zfs mount $POOL/fs
76
77log_must mk_files 200 262144 0 $POOL/fs
78log_must zfs snapshot $POOL/fs@b
79
80# 3. Remove objects, set dnodesize=2k, and remount dataset so new objects
81#    overlap with recently recycled and formerly "normal" dnode slots get
82#    assigned to new objects
83rm /$POOL/fs/*
84
85log_must zfs unmount $POOL/fs
86log_must zfs set dnodesize=2k $POOL/fs
87log_must zfs mount $POOL/fs
88
89log_must touch /$POOL/fs/attrs
90mk_files 200 262144 0 $POOL/fs
91log_must zfs snapshot $POOL/fs@c
92
93# 4. Create an empty file and add xattrs to it to exercise reclaiming a
94#    dnode that requires more than 1 slot for its bonus buffer (Zol #7433)
95log_must zfs set compression=on xattr=sa $POOL/fs
96log_must eval "python -c 'print \"a\" * 512' | attr -s bigval /$POOL/fs/attrs"
97log_must zfs snapshot $POOL/fs@d
98
99# 5. Generate initial and incremental streams
100log_must eval "zfs send $POOL/fs@a > $BACKDIR/fs-dn-1k"
101log_must eval "zfs send -i $POOL/fs@a $POOL/fs@b > $BACKDIR/fs-dn-legacy"
102log_must eval "zfs send -i $POOL/fs@b $POOL/fs@c > $BACKDIR/fs-dn-2k"
103log_must eval "zfs send -i $POOL/fs@c $POOL/fs@d > $BACKDIR/fs-attr"
104
105# 6. Verify initial and incremental streams can be received
106log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs-dn-1k"
107log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs-dn-legacy"
108log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs-dn-2k"
109log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs-attr"
110
111log_pass "Verify incremental receive handles objects with changed dnode size"
112