Creation of ZFS is very simple.
zfs create ....
We need to set different property values according to our requirement which are helpful to set some limits to our filesystem.
Examples of some properties are quota,reservation,deduplication and compression.
In this post we are going to discuss,
1) ZFS creation and few properties.
2) ZFS Clones and Snapshots.
ZFS creation with default mountpoint. Default mountpoint will be same as filesystem name.
root-mysrv1~# zpool list
NAME SIZE ALLOC FREE CAP HEALTH ALTROOT
rpool 9.94G 6.12G 3.81G 61% ONLINE -
root-mysrv1~#
root-mysrv1~# zpool create mypl c5d1 c5d2 c5d3
root-mysrv1~#
root-mysrv1~# zpool list
NAME SIZE ALLOC FREE CAP HEALTH ALTROOT
mypl 285M 77K 285M 0% ONLINE -
rpool 9.94G 6.12G 3.81G 61% ONLINE -
root-mysrv1~#
My pool " mypl " created.
root-mysrv1~#
root-mysrv1~# zfs create mypl/myfs1
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list mypl
NAME USED AVAIL REFER MOUNTPOINT
mypl 130K 253M 31K /mypl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# df -kh
Filesystem size used avail capacity Mounted on
rpool/ROOT/s10s_u11wos_24a
9.8G 6.1G 3.7G 63% /
/devices 0K 0K 0K 0% /devices
ctfs 0K 0K 0K 0% /system/contract
proc 0K 0K 0K 0% /proc
mnttab 0K 0K 0K 0% /etc/mnttab
swap 4.7G 504K 4.7G 1% /etc/svc/volatile
objfs 0K 0K 0K 0% /system/object
sharefs 0K 0K 0K 0% /etc/dfs/sharetab
/platform/SUNW,Sun-Fire-T200/lib/libc_psr/libc_psr_hwcap1.so.1
9.8G 6.1G 3.7G 63% /platform/sun4v/lib/libc_psr.so.1
/platform/SUNW,Sun-Fire-T200/lib/sparcv9/libc_psr/libc_psr_hwcap1.so.1
9.8G 6.1G 3.7G 63% /platform/sun4v/lib/sparcv9/libc_psr.so.1
fd 0K 0K 0K 0% /dev/fd
swap 4.7G 32K 4.7G 1% /tmp
swap 4.7G 48K 4.7G 1% /var/run
rpool/export 9.8G 32K 3.7G 1% /export
rpool/export/home 9.8G 31K 3.7G 1% /export/home
rpool 9.8G 106K 3.7G 1% /rpool
mypl 253M 31K 253M 1% /mypl
mypl/myfs1 253M 31K 253M 1% /mypl/myfs1
root-mysrv1~#
root-mysrv1~# zpool list
NAME SIZE ALLOC FREE CAP HEALTH ALTROOT
mypl 285M 222K 285M 0% ONLINE -
rpool 9.94G 6.13G 3.81G 61% ONLINE -
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list
NAME USED AVAIL REFER MOUNTPOINT
mypl 130K 253M 32K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
rpool 6.12G 3.66G 106K /rpool
rpool/ROOT 6.12G 3.66G 31K legacy
rpool/ROOT/s10s_u11wos_24a 6.12G 3.66G 6.12G /
rpool/export 63K 3.66G 32K /export
rpool/export/home 31K 3.66G 31K /export/home
root-mysrv1~#
root-mysrv1~#
ZFS creation with specific mountpoint for our filesystem.
root-mysrv1~#
root-mysrv1~# zfs create -o mountpoint=/mypl/mysap mypl/sapmnt
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl 253M 32K 253M 1% /mypl
mypl/myfs1 253M 31K 253M 1% /mypl/myfs1
mypl/sapmnt 253M 31K 253M 1% /mypl/mysap
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 176K 253M 32K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 253M 31K /mypl/mysap
root-mysrv1~#
***********************************************************************
Quota is to set a limit on the amount of space a file system can use.
Reservation is an allocation of space from the pool that is guaranteed to be available to a dateset.
Either we can set property at the time of FS creation or we can set it later....
Now let us create a new FS with some quota.....
root-mysrv1~#
root-mysrv1~# zfs create -o mountpoint=/mypl/myspl -o quota=1m mypl/spl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 224K 253M 33K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 253M 31K /mypl/mysap
mypl/spl 31K 993K 31K /mypl/myspl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl 253M 33K 253M 1% /mypl
mypl/myfs1 253M 31K 253M 1% /mypl/myfs1
mypl/sapmnt 253M 31K 253M 1% /mypl/mysap
mypl/spl 1.0M 31K 993K 4% /mypl/myspl
root-mysrv1~#
To view our property value, or else for all properties values we can give zfs get all fsname.
root-mysrv1~# zfs get quota mypl/spl
NAME PROPERTY VALUE SOURCE
mypl/spl quota 1M local
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs set quota=10m mypl/spl ---- now we can change the value, if necessary..
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs get quota mypl/spl
NAME PROPERTY VALUE SOURCE
mypl/spl quota 10M local
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl 253M 34K 253M 1% /mypl
mypl/myfs1 253M 31K 253M 1% /mypl/myfs1
mypl/sapmnt 253M 31K 253M 1% /mypl/mysap
mypl/spl 10M 31K 10.0M 1% /mypl/myspl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 226K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 253M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
root-mysrv1~#
To set quota for an existing filesystem...
root-mysrv1~# zfs set quota=50m mypl/sapmnt
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 226K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
root-mysrv1~#
root-mysrv1~# zpool status mypl
pool: mypl
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
mypl ONLINE 0 0 0
c5d1 ONLINE 0 0 0
c5d2 ONLINE 0 0 0
c5d3 ONLINE 0 0 0
errors: No known data errors
root-mysrv1~#
***********************************************************************
ZFS snapshots : It is very helpful , to revert back wrong situation.
It is always suggestible to take a snapshot of your existing filesystem before performing some activity or applying some modifications to our existing FS.
Main thing with snapshots is, A snapshot does not occupy any space.It just takes a snap of entire filesystem.
It is helpful to revert when something goes wrong because of an activity.
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl 253M 34K 253M 1% /mypl
mypl/myfs1 253M 31K 253M 1% /mypl/myfs1
mypl/sapmnt 50M 31K 50M 1% /mypl/mysap
mypl/spl 10M 31K 10.0M 1% /mypl/myspl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 228K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
root-mysrv1~#
root-mysrv1~#
Here I am creating a snapshot for my FS at initial state.
root-mysrv1~# zfs snapshot mypl/spl@may2014
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list | grep mypl
mypl 229K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
mypl/spl@may2014 0 - 31K -
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# cd /
root-mysrv1~#
root-mysrv1~# du -sh *
0K bin
295K boot
500M dev
176K devices
49M etc
3K export
0K home
61M kernel
36M lib
1K mnt
6K mypl
0K net
138M opt
1K pl1
320M platform
824M proc
78K rpool
1.6M sbin
8.6M system
6K test
80K tmp
^C
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# cd sbin
root-mysrv1~# ls
autopush ifconfig metainit rc2 su umountall
bootadm ifparse metarecover rc3 su.static uname
bpgetfile in.mpathd metastat rc5 sulogin zfs
dhcpagent init mount rc6 swapadd zonename
dhcpinfo jsh mountall rcS sync zpool
dladm luactivate netstrategy route tnctl
fdisk lucurr pfsh routeadm tzreload
fiocompress metadb rc0 sh uadmin
hostconfig metadevadm rc1 soconfig umount
root-mysrv1~#
To apply some changes, let us copy some data to our filesystem.
root-mysrv1~# cp -rp /sbin /mypl/myspl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# cd /mypl/myspl
root-mysrv1~#
root-mysrv1~# ls -rlth
total 5
drwxr-xr-x 2 root sys 52 Apr 5 08:32 sbin
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl 253M 34K 251M 1% /mypl
mypl/myfs1 253M 31K 251M 1% /mypl/myfs1
mypl/sapmnt 50M 31K 50M 1% /mypl/mysap
mypl/spl 10M 1.8M 8.2M 18% /mypl/myspl
root-mysrv1~#
root-mysrv1~#
Again take a snapshot after copying some files...
root-mysrv1~#
root-mysrv1~# zfs snapshot mypl/spl@may20142nd
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 2.39M 251M 34K /mypl
mypl/myfs1 31K 251M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 1.96M 8.04M 1.94M /mypl/myspl
mypl/spl@may2014 19K - 31K -
mypl/spl@may20142nd 0 - 1.94M -
root-mysrv1~#
As we can see the initial snap occupied some space, but the second snap is 0K.
Now let us rollback our situation...
root-mysrv1~#
root-mysrv1~# zfs rollback mypl/spl@may2014
cannot rollback to 'mypl/spl@may2014': more recent snapshots exist
use '-r' to force deletion of the following snapshots:
mypl/spl@may20142nd
root-mysrv1~#
root-mysrv1~# zfs rollback -r mypl/spl@may2014
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 268K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 32K 9.97M 31K /mypl/myspl
mypl/spl@may2014 1K - 31K -
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl 253M 34K 253M 1% /mypl
mypl/myfs1 253M 31K 253M 1% /mypl/myfs1
mypl/sapmnt 50M 31K 50M 1% /mypl/mysap
mypl/spl 10M 31K 10.0M 1% /mypl/myspl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# cd /mypl/myspl
root-mysrv1~#
root-mysrv1~# ls
root-mysrv1~# ------ Previously there is no data in my filesystem and so my FS is rolled back.
To destroy a snapshot ,
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 268K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 32K 9.97M 31K /mypl/myspl
mypl/spl@may2014 1K - 31K -
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs destroy mypl/spl@may2014
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 456K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
root-mysrv1~#
ZFS cloning : A clone is a exact copy of our existing filesystem. We can clone a FS from its snapshot only. So whenever we need a clone copy of FS, just we need to take its snap and can create a clone from it.
root-mysrv1~#
root-mysrv1~# zpool list
NAME SIZE ALLOC FREE CAP HEALTH ALTROOT
mypl 285M 344K 285M 0% ONLINE -
rpool 9.94G 6.13G 3.81G 61% ONLINE -
root-mysrv1~#
root-mysrv1~# zpool list |grep mypl
mypl 285M 344K 285M 0% ONLINE -
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 247K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
root-mysrv1~#
root-mysrv1~# zfs clone mypl/spl mypl/spl_very ---- can clone a FS from its snapshot only.
cannot open 'mypl/spl': operation not applicable to datasets of this type
root-mysrv1~#
root-mysrv1~# zfs snapshot mypl/spl@12345 ---- creating snapshot.
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 252K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
mypl/spl@12345 0 - 31K -
root-mysrv1~#
root-mysrv1~# zfs clone mypl/spl@12345 mypl/spl_very ---- creating clone
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 274K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
mypl/spl@12345 0 - 31K -
mypl/spl_very 1K 253M 31K /mypl/spl_very
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs clone -o mountpoint=/splclone mypl/spl@12345 mypl/cln1
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 312K 253M 36K /mypl
mypl/cln1 1K 253M 31K /splclone
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
mypl/spl@12345 0 - 31K -
mypl/spl_very 1K 253M 31K /mypl/spl_very
root-mysrv1~#
#################################################################################
zfs create ....
We need to set different property values according to our requirement which are helpful to set some limits to our filesystem.
Examples of some properties are quota,reservation,deduplication and compression.
In this post we are going to discuss,
1) ZFS creation and few properties.
2) ZFS Clones and Snapshots.
ZFS creation with default mountpoint. Default mountpoint will be same as filesystem name.
root-mysrv1~# zpool list
NAME SIZE ALLOC FREE CAP HEALTH ALTROOT
rpool 9.94G 6.12G 3.81G 61% ONLINE -
root-mysrv1~#
root-mysrv1~# zpool create mypl c5d1 c5d2 c5d3
root-mysrv1~#
root-mysrv1~# zpool list
NAME SIZE ALLOC FREE CAP HEALTH ALTROOT
mypl 285M 77K 285M 0% ONLINE -
rpool 9.94G 6.12G 3.81G 61% ONLINE -
root-mysrv1~#
My pool " mypl " created.
root-mysrv1~#
root-mysrv1~# zfs create mypl/myfs1
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list mypl
NAME USED AVAIL REFER MOUNTPOINT
mypl 130K 253M 31K /mypl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# df -kh
Filesystem size used avail capacity Mounted on
rpool/ROOT/s10s_u11wos_24a
9.8G 6.1G 3.7G 63% /
/devices 0K 0K 0K 0% /devices
ctfs 0K 0K 0K 0% /system/contract
proc 0K 0K 0K 0% /proc
mnttab 0K 0K 0K 0% /etc/mnttab
swap 4.7G 504K 4.7G 1% /etc/svc/volatile
objfs 0K 0K 0K 0% /system/object
sharefs 0K 0K 0K 0% /etc/dfs/sharetab
/platform/SUNW,Sun-Fire-T200/lib/libc_psr/libc_psr_hwcap1.so.1
9.8G 6.1G 3.7G 63% /platform/sun4v/lib/libc_psr.so.1
/platform/SUNW,Sun-Fire-T200/lib/sparcv9/libc_psr/libc_psr_hwcap1.so.1
9.8G 6.1G 3.7G 63% /platform/sun4v/lib/sparcv9/libc_psr.so.1
fd 0K 0K 0K 0% /dev/fd
swap 4.7G 32K 4.7G 1% /tmp
swap 4.7G 48K 4.7G 1% /var/run
rpool/export 9.8G 32K 3.7G 1% /export
rpool/export/home 9.8G 31K 3.7G 1% /export/home
rpool 9.8G 106K 3.7G 1% /rpool
mypl 253M 31K 253M 1% /mypl
mypl/myfs1 253M 31K 253M 1% /mypl/myfs1
root-mysrv1~#
root-mysrv1~# zpool list
NAME SIZE ALLOC FREE CAP HEALTH ALTROOT
mypl 285M 222K 285M 0% ONLINE -
rpool 9.94G 6.13G 3.81G 61% ONLINE -
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list
NAME USED AVAIL REFER MOUNTPOINT
mypl 130K 253M 32K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
rpool 6.12G 3.66G 106K /rpool
rpool/ROOT 6.12G 3.66G 31K legacy
rpool/ROOT/s10s_u11wos_24a 6.12G 3.66G 6.12G /
rpool/export 63K 3.66G 32K /export
rpool/export/home 31K 3.66G 31K /export/home
root-mysrv1~#
root-mysrv1~#
ZFS creation with specific mountpoint for our filesystem.
root-mysrv1~#
root-mysrv1~# zfs create -o mountpoint=/mypl/mysap mypl/sapmnt
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl 253M 32K 253M 1% /mypl
mypl/myfs1 253M 31K 253M 1% /mypl/myfs1
mypl/sapmnt 253M 31K 253M 1% /mypl/mysap
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 176K 253M 32K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 253M 31K /mypl/mysap
root-mysrv1~#
***********************************************************************
Let us discuss some properties,
First of all , we should have clear idea about difference between QUOTA and RESERVATION.Quota is to set a limit on the amount of space a file system can use.
Reservation is an allocation of space from the pool that is guaranteed to be available to a dateset.
Either we can set property at the time of FS creation or we can set it later....
Now let us create a new FS with some quota.....
root-mysrv1~#
root-mysrv1~# zfs create -o mountpoint=/mypl/myspl -o quota=1m mypl/spl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 224K 253M 33K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 253M 31K /mypl/mysap
mypl/spl 31K 993K 31K /mypl/myspl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl 253M 33K 253M 1% /mypl
mypl/myfs1 253M 31K 253M 1% /mypl/myfs1
mypl/sapmnt 253M 31K 253M 1% /mypl/mysap
mypl/spl 1.0M 31K 993K 4% /mypl/myspl
root-mysrv1~#
To view our property value, or else for all properties values we can give zfs get all fsname.
root-mysrv1~# zfs get quota mypl/spl
NAME PROPERTY VALUE SOURCE
mypl/spl quota 1M local
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs set quota=10m mypl/spl ---- now we can change the value, if necessary..
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs get quota mypl/spl
NAME PROPERTY VALUE SOURCE
mypl/spl quota 10M local
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl 253M 34K 253M 1% /mypl
mypl/myfs1 253M 31K 253M 1% /mypl/myfs1
mypl/sapmnt 253M 31K 253M 1% /mypl/mysap
mypl/spl 10M 31K 10.0M 1% /mypl/myspl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 226K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 253M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
root-mysrv1~#
To set quota for an existing filesystem...
root-mysrv1~# zfs set quota=50m mypl/sapmnt
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 226K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
root-mysrv1~#
root-mysrv1~# zpool status mypl
pool: mypl
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
mypl ONLINE 0 0 0
c5d1 ONLINE 0 0 0
c5d2 ONLINE 0 0 0
c5d3 ONLINE 0 0 0
errors: No known data errors
root-mysrv1~#
***********************************************************************
ZFS snapshots : It is very helpful , to revert back wrong situation.
It is always suggestible to take a snapshot of your existing filesystem before performing some activity or applying some modifications to our existing FS.
Main thing with snapshots is, A snapshot does not occupy any space.It just takes a snap of entire filesystem.
It is helpful to revert when something goes wrong because of an activity.
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl 253M 34K 253M 1% /mypl
mypl/myfs1 253M 31K 253M 1% /mypl/myfs1
mypl/sapmnt 50M 31K 50M 1% /mypl/mysap
mypl/spl 10M 31K 10.0M 1% /mypl/myspl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 228K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
root-mysrv1~#
root-mysrv1~#
Here I am creating a snapshot for my FS at initial state.
root-mysrv1~# zfs snapshot mypl/spl@may2014
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list | grep mypl
mypl 229K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
mypl/spl@may2014 0 - 31K -
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# cd /
root-mysrv1~#
root-mysrv1~# du -sh *
0K bin
295K boot
500M dev
176K devices
49M etc
3K export
0K home
61M kernel
36M lib
1K mnt
6K mypl
0K net
138M opt
1K pl1
320M platform
824M proc
78K rpool
1.6M sbin
8.6M system
6K test
80K tmp
^C
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# cd sbin
root-mysrv1~# ls
autopush ifconfig metainit rc2 su umountall
bootadm ifparse metarecover rc3 su.static uname
bpgetfile in.mpathd metastat rc5 sulogin zfs
dhcpagent init mount rc6 swapadd zonename
dhcpinfo jsh mountall rcS sync zpool
dladm luactivate netstrategy route tnctl
fdisk lucurr pfsh routeadm tzreload
fiocompress metadb rc0 sh uadmin
hostconfig metadevadm rc1 soconfig umount
root-mysrv1~#
To apply some changes, let us copy some data to our filesystem.
root-mysrv1~# cp -rp /sbin /mypl/myspl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# cd /mypl/myspl
root-mysrv1~#
root-mysrv1~# ls -rlth
total 5
drwxr-xr-x 2 root sys 52 Apr 5 08:32 sbin
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl 253M 34K 251M 1% /mypl
mypl/myfs1 253M 31K 251M 1% /mypl/myfs1
mypl/sapmnt 50M 31K 50M 1% /mypl/mysap
mypl/spl 10M 1.8M 8.2M 18% /mypl/myspl
root-mysrv1~#
root-mysrv1~#
Again take a snapshot after copying some files...
root-mysrv1~#
root-mysrv1~# zfs snapshot mypl/spl@may20142nd
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 2.39M 251M 34K /mypl
mypl/myfs1 31K 251M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 1.96M 8.04M 1.94M /mypl/myspl
mypl/spl@may2014 19K - 31K -
mypl/spl@may20142nd 0 - 1.94M -
root-mysrv1~#
As we can see the initial snap occupied some space, but the second snap is 0K.
Now let us rollback our situation...
root-mysrv1~#
root-mysrv1~# zfs rollback mypl/spl@may2014
cannot rollback to 'mypl/spl@may2014': more recent snapshots exist
use '-r' to force deletion of the following snapshots:
mypl/spl@may20142nd
root-mysrv1~#
root-mysrv1~# zfs rollback -r mypl/spl@may2014
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 268K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 32K 9.97M 31K /mypl/myspl
mypl/spl@may2014 1K - 31K -
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl 253M 34K 253M 1% /mypl
mypl/myfs1 253M 31K 253M 1% /mypl/myfs1
mypl/sapmnt 50M 31K 50M 1% /mypl/mysap
mypl/spl 10M 31K 10.0M 1% /mypl/myspl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# cd /mypl/myspl
root-mysrv1~#
root-mysrv1~# ls
root-mysrv1~# ------ Previously there is no data in my filesystem and so my FS is rolled back.
To destroy a snapshot ,
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 268K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 32K 9.97M 31K /mypl/myspl
mypl/spl@may2014 1K - 31K -
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs destroy mypl/spl@may2014
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 456K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
root-mysrv1~#
ZFS cloning : A clone is a exact copy of our existing filesystem. We can clone a FS from its snapshot only. So whenever we need a clone copy of FS, just we need to take its snap and can create a clone from it.
root-mysrv1~#
root-mysrv1~# zpool list
NAME SIZE ALLOC FREE CAP HEALTH ALTROOT
mypl 285M 344K 285M 0% ONLINE -
rpool 9.94G 6.13G 3.81G 61% ONLINE -
root-mysrv1~#
root-mysrv1~# zpool list |grep mypl
mypl 285M 344K 285M 0% ONLINE -
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 247K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
root-mysrv1~#
root-mysrv1~# zfs clone mypl/spl mypl/spl_very ---- can clone a FS from its snapshot only.
cannot open 'mypl/spl': operation not applicable to datasets of this type
root-mysrv1~#
root-mysrv1~# zfs snapshot mypl/spl@12345 ---- creating snapshot.
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 252K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
mypl/spl@12345 0 - 31K -
root-mysrv1~#
root-mysrv1~# zfs clone mypl/spl@12345 mypl/spl_very ---- creating clone
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 274K 253M 34K /mypl
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
mypl/spl@12345 0 - 31K -
mypl/spl_very 1K 253M 31K /mypl/spl_very
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs clone -o mountpoint=/splclone mypl/spl@12345 mypl/cln1
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl 312K 253M 36K /mypl
mypl/cln1 1K 253M 31K /splclone
mypl/myfs1 31K 253M 31K /mypl/myfs1
mypl/sapmnt 31K 50.0M 31K /mypl/mysap
mypl/spl 31K 9.97M 31K /mypl/myspl
mypl/spl@12345 0 - 31K -
mypl/spl_very 1K 253M 31K /mypl/spl_very
root-mysrv1~#
#################################################################################
No comments:
Post a Comment