Total Pageviews

Wednesday, 30 April 2014

Setting Bootmode to FACTORY-DEFAULT in T5120 !!!

If we want to reset our T5-server to its original state, i.e we dont want any ldoms which are currently present on our server we need to perform a factory-reset.

In T5120 to achieve a control domain back with all its resources we need to perform a boot with factory-default.

OUTPUT of T5120 Control Domain before boot :

root-myt5server:~# ldm list
NAME             STATE      FLAGS   CONS    VCPU  MEMORY   UTIL  UPTIME
primary      active     -n-cv-  SP      8     12160M       0.3%  3d 2h 43m
test1           active     -n----  5000    8     8G       0.0%  4h 48m
test2           active     -n----  5001    8     8G       0.0%  5h 50m


Now login to T5's ILOM :

Waiting for daemons to initialize...
Daemons ready
Oracle(R) Integrated Lights Out Manager
Version 3.0.12.4.y r77080
Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.

->
->
->
-> help
The help command is used to view information about commands and targets

Usage: help [-o|-output terse|verbose] [<command>|legal|targets|<target>|<target > <property>]
Special characters used in the help command are
[] encloses optional keywords or options
<> encloses a description of the keyword
 (If <> is not present, an actual keyword is indicated)
| indicates a choice of keywords or options

help <target> displays description if this target and its propertie s
help <target> <property> displays description of this property of this target
help targets displays a list of targets
help legal displays the product legal notice

Commands are:
cd
create
delete
dump
exit
help
load
reset
set
show
start
stop
version

->
-> show /SYS
 /SYS
 Targets:
 SERVICE
 LOCATE
 ACT
 PS_FAULT
 TEMP_FAULT
 FAN_FAULT
 MB
 HDD0
 HDD1
 HDD2
 HDD3
 PDB
 SASBP
 DVD
 TTYA
 USBBD
 FANBD0
 FANBD1
 PS0
 PS1
 VPS

 Properties:
 type = Host System
 ipmi_name = /SYS
 keyswitch_state = Normal
 product_name = SPARC-Enterprise-T5120
 product_part_number = 602-3580-08
 product_serial_number = BEL07482GU
 product_manufacturer = Oracle Corporation
 fault_state = OK
 

 power_state = On                        ---------- My T5 is in powered on state.
 Commands:
 cd
 reset
 set
 show
 start
 stop

->

Now poweroff our server to change and boot with new bootmode.

-> stop /SYS
Are you sure you want to stop /SYS (y/n)? y
Stopping /SYS

-> show
 /
 Targets:
 HOST
 SYS
 SP

 Properties:
 Commands:
 cd
 show

->
->
->

-> show /SYS                              ------- check the status ..
 /SYS
 Targets:
 SERVICE
 LOCATE
 ACT
 PS_FAULT
 TEMP_FAULT
 FAN_FAULT
 MB
 HDD0
 HDD1
 HDD2
 HDD3
 PDB
 SASBP
 DVD
 TTYA
 USBBD
 FANBD0
 FANBD1
 PS0
 PS1
 VPS

 Properties:
 type = Host System
 ipmi_name = /SYS
 keyswitch_state = Normal
 product_name = SPARC-Enterprise-T5120
 product_part_number = 602-3580-08
 product_serial_number = BEL07482GU
 product_manufacturer = Oracle Corporation
 fault_state = OK
 
power_state = On                        ---------- still in poweron state, takes sometime..              

 Commands:
 cd
 reset
 set
 show
 start
 stop

 
-> show /HOST                            ---------- show /HOST also shows status..
 /HOST
 Targets:
 bootmode
 console
 diag
 domain
 tpm

 Properties:
 autorestart = reset
 autorunonerror = false
 bootfailrecovery = poweroff
 bootrestart = none
 boottimeout = 0
 hypervisor_version = Hypervisor 1.10.7.d 2012/12/11 22:17
 macaddress = 00:14:4f:98:4a:18
 maxbootfail = 3
 obp_version = OpenBoot 4.33.6.b 2012/12/11 20:50
 post_version = POST 4.33.6 2012/03/14 08:28
 send_break_action = (Cannot show property)
 status = Solaris running 

 sysfw_version = Sun System Firmware 7.4.5 2012/12/11 23:47
 Commands:
 cd
 set
 show

->
->


Now check the status again,

-> show /SYS

 /SYS
 Targets:
 SERVICE
 LOCATE
 ACT
 PS_FAULT
 TEMP_FAULT
 FAN_FAULT
 MB
 HDD0
 HDD1
 HDD2
 HDD3
 PDB
 SASBP
 DVD
 TTYA
 USBBD
 FANBD0
 FANBD1
 PS0
 PS1
 VPS

 Properties:
 type = Host System
 ipmi_name = /SYS
 keyswitch_state = Normal
 product_name = SPARC-Enterprise-T5120
 product_part_number = 602-3580-08
 product_serial_number = BEL07482GU
 product_manufacturer = Oracle Corporation
 fault_state = OK
 
power_state = Off                        ---------- Now m
y T5 is in powered off state.
 Commands:
 cd
 reset
 set
 show
 start
 stop

->
->
-> show /HOST

 /HOST
 Targets:
 bootmode
 console
 diag
 domain
 tpm

 Properties:
 autorestart = reset
 autorunonerror = false
 bootfailrecovery = poweroff
 bootrestart = none
 boottimeout = 0
 hypervisor_version = Hypervisor 1.10.7.d 2012/12/11 22:17
 macaddress = 00:14:4f:98:4a:18
 maxbootfail = 3
 obp_version = OpenBoot 4.33.6.b 2012/12/11 20:50
 post_version = POST 4.33.6 2012/03/14 08:28
 send_break_action = (Cannot show property)
 status = Powered off 

 sysfw_version = Sun System Firmware 7.4.5 2012/12/11 23:47
 Commands:
 cd
 set
 show

->

Set the bootmode to factory-default as follows :

-> set /HOST/bootmode config=factory-default
Set 'config' to 'factory-default'
->
-> show /HOST/bootmode

 /HOST/bootmode
 Targets:

 Properties:
 config = factory-default
 expires = (none)
 script = (none)
 state = normal

 Commands:
 cd
 set
 show

->
->


Now start the server to boot with new bootmode.

->
-> start /SYS
Are you sure you want to start /SYS (y/n)? y
Starting /SYS
 
-> show /HOST
 /HOST
 Targets:
 bootmode
 console
 diag
 domain
 tpm

 Properties:
 autorestart = reset
 autorunonerror = false
 bootfailrecovery = poweroff
 bootrestart = none
 boottimeout = 0
 hypervisor_version = Hypervisor 1.10.7.d 2012/12/11 22:17
 macaddress = 00:14:4f:98:4a:18
 maxbootfail = 3
 obp_version = OpenBoot 4.33.6.b 2012/12/11 20:50
 post_version = POST 4.33.6 2012/03/14 08:28
 send_break_action = (Cannot show property)
 status = Powered on 

 sysfw_version = Sun System Firmware 7.4.5 2012/12/11 23:47
 Commands:
 cd
 set
 show

-> start /HOST/console
Are you sure you want to start /HOST/console (y/n)? y
Serial console started. To stop, type #.
2014-05-04 15:25:53.588 0:1:0>End : FPU Move Registers
2014-05-04 15:25:53.802 0:2:0>End : FPU Move Registers
2014-05-04 15:25:53.902 0:3:0>End : FPU Move Registers
2014-05-04 15:25:53.974 0:4:0>End : FPU Move Registers
2014-05-04 15:25:54.053 0:5:0>End : FPU Move Registers
2014-05-04 15:25:54.129 0:6:0>End : FPU Move Registers
2014-05-04 15:25:54.204 0:7:0>End : FPU Move Registers
2014-05-04 15:25:54.401 0:1:0>Begin: FSR Read/Write


 ... OUTPUT TRUNCATED ...

2014-05-04 15:27:15.012 0:0:0>End : Network Tests
2014-05-04 15:27:15.224 0:0:0>Extended Memory Tests.....
2014-05-04 15:27:15.281 0:0:0>Begin: Print Mem Config
2014-05-04 15:27:15.289 0:0:0>Caches : Icache is ON, Dcache is ON.
2014-05-04 15:27:15.294 0:0:0> Total Memory = 00000000.00000000 -> 00000008.00000000
2014-05-04 15:27:15.302 0:0:0>End : Print Mem Config
2014-05-04 15:27:15.371 0:0:0>Begin: Block Mem Test
2014-05-04 15:27:15.526 0:0:0>Block Mem Test 00000000.10000000->00000000.10800000
2014-05-04 15:27:16.413 0:0:0>........
2014-05-04 15:27:25.543 0:0:0>Testing Gaps..
2014-05-04 15:27:25.598 0:0:0>........
2014-05-04 15:27:52.569 0:0:0>End : Block Mem Test
2014-05-04 15:27:52.657 0:0:0>INFO:
2014-05-04 15:27:52.715 0:0:0> POST Passed all devices.
2014-05-04 15:27:52.771 0:0:0>POST: Return to VBSC.
2014-05-04 15:27:52.826 0:0:0>Master set ACK for vbsc runpost command and spin...
/

SPARC Enterprise T5120, No Keyboard
Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
OpenBoot 4.33.6.b, 32640 MB memory available, Serial #77067241.
Ethernet address 0:14:4f:98:4a:18, Host ID: 84984a18.


Boot device: disk File and args:
SunOS Release 5.11 Version 11.1 64-bit
Copyright (c) 1983, 2012, Oracle and/or its affiliates. All rights reserved.
sorry, variable 'ssfcp_enable_auto_configuration' is not defined in the 'fcp' module
WARNING: Illegal stack size. Using 32768
Hostname: myt5server

myt5server console login: root
Password:
May 4 15:35:27 myt5server login: ROOT LOGIN /dev/console
Last login: Sun May 4 15:01:08 from 10.36.5.179
Oracle Corporation SunOS 5.11 11.1 May 2013
You have new mail.
root-myt5server:~#
root-myt5server:~#


Check the status of ldm list ,

root-myt5server:~# ldm list
NAME STATE FLAGS CONS VCPU MEMORY UTIL UPTIME
primary active -n-c-- SP 64 32640M 0.2% 7m
test1    inactive   ------          8     8G
test2    inactive   ------          8     8G
root-myt5server:~#
root-myt5server:~#
root-myt5server:~# ldm list-services
root-myt5server:~#
root-myt5server:~#

Now our control domain is with its full resources 32GB memory and 8 cores CPU(i.e 64VCPU).

We can also observe there are no services dedicated to my control domain like , vds (virtualdiskservices) , vsw (virtual switch) and vcc (virtual console concentrator).

So now we can start creating new ldoms by destroying existing inactive ldoms.

##################################################################################

 


Monday, 28 April 2014

Configuring a Packaging server !!!

Package Administration is so easy with the help of SRU's (Support Repository Update).
Either we can directly access the SRU through https://pkg.oracle.com/solaris/support/

OR

we can copy the iso file and can configure any server a packaging server through which we can access the repo from any client server.
 
To use this repo we create a publisher on our packaging server and will be shared across network.

Let us see how to configure packaging server.

We need the iso of SRU, copy it to our packaging server and take a sync of it.Then create publisher basing on the location of sync...

root-mysrv1:/#
root-mysrv1:/# cd /opt
root-mysrv1:/#
root-mysrv1:/opt# ls -lrth
total 9720689
drwxr-xr-x   3 root     root          11 Nov 17  2010 145880-02
drwxr-xr-x   4 root     root           5 Jun  1  2011 OVM_Server_SPARC-2_1
-rw-r--r--   1 root     root        2.7G Nov  7  2012 sol-11-1111-sru13-04-incr-repo.iso       ------ this is my sru iso file...

-rw-rw-r--   1 root     root         90K Nov 21  2012 readme.html
-rw-rw-r--   1 root     root         73K Nov 21  2012 readme.txt
-rw-r--r--   1 root     root        2.0G May 12  2013 p15879286_1100_SOLARIS64.zip
root-mysrv1:/opt#
root-mysrv1:/opt# cd /
root-mysrv1:/#
root-mysrv1:/# mount -F hsfs /opt/sol-11-1111-sru13-04-incr-repo.iso /mnt                 

root-mysrv1:/#
root-mysrv1:/#
root-mysrv1:/# cd /mnt
root-mysrv1:/mnt#
root-mysrv1:/mnt# ls -lrth
total 20
drwxr-xr-x   3 root     root        2.0K Nov  7  2012 repo
-rwxr-xr-x   1 root     root        1.3K Nov  7  2012 NOTICES
-rw-r--r--   1 root     root        3.2K Nov  7  2012 COPYRIGHT
-rw-r--r--   1 root     root        3.0K Nov  7  2012 README
root-mysrv1:/mnt#
root-mysrv1:/mnt#
root-mysrv1:/mnt# cd /
root-mysrv1:/#
root-mysrv1:/# pkg publisher
PUBLISHER                             TYPE     STATUS   URI
root-mysrv1:/#

root-mysrv1:/# rsync -aP /mnt/repo /export/repoSolaris11
sending incremental file list
created directory /export/repoSolaris11
repo/
repo/pkg5.repository
         300 100%    0.00kB/s    0:00:00 (xfer#1, to-check=1235/1237)
repo/publisher/
repo/publisher/solaris/
repo/publisher/solaris/catalog/
repo/publisher/solaris/catalog/catalog.attrs
         732 100%  714.84kB/s    0:00:00 (xfer#2, to-check=1226/1237)
repo/publisher/solaris/catalog/catalog.base.C
       61715 100%  119.82kB/s    0:00:00 (xfer#3, to-check=1225/1237)
repo/publisher/solaris/catalog/catalog.dependency.C
      682512 100%    1.18MB/s    0:00:00 (xfer#4, to-check=1224/1237)
repo/publisher/solaris/catalog/catalog.summary.C
      287490 100%  490.83kB/s    0:00:00 (xfer#5, to-check=1223/1237)
repo/publisher/solaris/catalog/update.20121107T00Z.C
     1079095 100%    1.53MB/s    0:00:00 (xfer#6, to-check=1222/1237)
repo/publisher/solaris/file/
repo/publisher/solaris/file/00/
repo/publisher/solaris/file/00/00001904e440b8c2a7733ce4bb65e9f8edde72b6
        5841 100%    7.61kB/s    0:00:00 (xfer#7, to-check=1615/1887)
repo/publisher/solaris/file/00/00001d488d4978cfd1776ce8da5b284b3349ab49
       10419 100%   13.55kB/s    0:00:00 (xfer#8, to-check=1614/1887)
repo/publisher/solaris/file/00/0000700262b4d3c65f457dbcae41beafe04956ed
       13850 100%   17.94kB/s    0:00:00 (xfer#9, to-check=1613/1887)
repo/publisher/solaris/file/00/00017c0b1e5058dd73df952c6a7e9fdb43b9f851
        4959 100%    6.41kB/s    0:00:00 (xfer#10, to-check=1612/1887)

....... OUTPUT TRUNCATED .......
root-mysrv1:/#
root-mysrv1:/#
root-mysrv1:/# svccfg -s application/pkg/server setprop pkg/inst_root=/export/repoSolaris11

root-mysrv1:/#

If we want, we can specify read and write permissions accordingly...

root-mysrv1:/# svccfg -s application/pkg/server setprop pkg/readonly=true
root-mysrv1:/#

Refresh and enable the service so that it will update its configuration

root-mysrv1:/#
root-mysrv1:/# svcadm refresh application/pkg/server
root-mysrv1:/#
root-mysrv1:/# svcadm enable application/pkg/server

root-mysrv1:/#

Set the publisher in our packaging server :

root-mysrv1:/# pkgrepo -s /repo refresh
root-mysrv1:/#
root-mysrv1:/# pkg set-publisher -G ‘*’ -g
file:///export/repoSolaris11 solaris
root-mysrv1:/#
root-mysrv1:/# pkg publisher

PUBLISHER                             TYPE     STATUS   URI
solaris                               origin   online   file:///export/repoSolaris11

To provide access only for this server it is enough...

Now to enable this publisher in remote server we need to create publisher in required clients as below. Clients access the repository via HTTP.

As publisher is created with /export/repoSolaris11 directory which is exact location of repo data, only the IP of packaging server is sufficient as url.

root-clntsrv1:/#
root-clntsrv1:/# pkg set-publisher -G ‘*’ -g
http://10.20.10.25 solaris 
 ---Here my packaging server's IP is : 10.20.10.25root-clntsrv1:/#

To unset any publisher,

root-clntsrv1:/# pkg unset-publisher <publishername>

##################################################################################

Image Packaging System (IPS) !!!

Let us discuss about Package Management in Solaris 11.

Unlike in Solaris 10 there is no concept of Patch Administration (Kernel Jumbo Patch) in Solaris 11.

In Oracle Solaris 11, we use Image Packaging System (IPS) which is a network based package management system.


Through IPS we can perform installation, upgrades and removal of software packages.

For this Package Administration, we need repository data and it can downloaded from Oracle.This repo is called as Support Repository Updates.These SRU's will be released monthly by Oracle.

For Installing a package or Updating SRU, a server will contact a Package Publisher.This Publisher contains sync of repo data. A Publisher can be url of repo data or iso image of sru.

Example of a publisher : (url of Oracle IPS repo)

# pkg publisher
PUBLISHER TYPE STATUS URI
solaris  origin online
https://pkg.oracle.com/solaris/support/

For this we need to download key and certificate from Home Page of Oracle's IPS Package Repository. This Publisher helps to update packages directly by redirecting to given url through internet

Copy the downloaded key and certificate to the location /var/pkg/ssl/

-k --- to specify key
-c --- to specify certificate

pkg set-publisher \
-k /var/pkg/ssl/Oracle_Solaris_11_Support.key.pem \
-c /var/pkg/ssl/Oracle_Solaris_11_Support.certificate.pem \
-g
https://pkg.oracle.com/solaris/support/
\
-G
http://pkg.oracle.com/solaris/release/ solaris


Other method is by downloading the ISO file of IPS.

This is used to update without internet connection.Usually downloaded SRU ISO file is maintained in a individual server and package publisher will be created using that server's location.

Or we can copy the iso file and can create a publisher from it.

root-mysrv1:/#
root-mysrv1:/# cd /opt
root-mysrv1:/#
root-mysrv1:/opt# ls -lrth
total 9720689
drwxr-xr-x   3 root     root          11 Nov 17  2010 145880-02
drwxr-xr-x   4 root     root           5 Jun  1  2011 OVM_Server_SPARC-2_1
-rw-r--r--   1 root     root        2.7G Nov  7  2012 sol-11-1111-sru13-04-incr-repo.iso          ------ this is my sru iso file...

-rw-rw-r--   1 root     root         90K Nov 21  2012 readme.html
-rw-rw-r--   1 root     root         73K Nov 21  2012 readme.txt
-rw-r--r--   1 root     root        2.0G May 12  2013 p15879286_1100_SOLARIS64.zip
root-mysrv1:/opt#
root-mysrv1:/opt# cd /
root-mysrv1:/#


Mount SRU image file

root-mysrv1:/# mount -F hsfs /opt/sol-11-1111-sru13-04-incr-repo.iso /mnt
root-mysrv1:/#
root-mysrv1:/#
root-mysrv1:/# cd /mnt
root-mysrv1:/mnt#
root-mysrv1:/mnt# ls -lrth
total 20
drwxr-xr-x   3 root     root        2.0K Nov  7  2012 repo
-rwxr-xr-x   1 root     root        1.3K Nov  7  2012 NOTICES
-rw-r--r--   1 root     root        3.2K Nov  7  2012 COPYRIGHT
-rw-r--r--   1 root     root        3.0K Nov  7  2012 README
root-mysrv1:/mnt#
root-mysrv1:/mnt#
root-mysrv1:/mnt# cd /
root-mysrv1:/#
root-mysrv1:/# pkg publisher
PUBLISHER                             TYPE     STATUS   URI
root-mysrv1:/#


As of now, no publisher is there in my server. Let us set one.....

root-mysrv1:/#
root-mysrv1:/# pkg set-publisher -G '*' -g file:///mnt/repo/ solaris
root-mysrv1:/#
root-mysrv1:/#
root-mysrv1:/# pkg publisher
PUBLISHER                             TYPE     STATUS   URI
solaris                               origin   online  
file:///mnt/repo/
root-mysrv1:/#
root-mysrv1:/#


Now you can use this publisher to update our packages(SRU).

root-mysrv1:/# pkg update
Creating Plan \
.....
..... output truncated
.....

root-mysrv1:/#
root-mysrv1:/#
root-mysrv1:/# pkg info entire         

        Name: entire
       Summary: entire incorporation including Support Repository Update (Oracle Solaris 11 11/11 SRU 13.4).   Description: This package constrains system package versions to the same
                build.  WARNING: Proper system update and correct package
                selection depend on the presence of this incorporation.
                Removing this package will result in an unsupported system.  For
                more information see
https://support.oracle.com/CSP/main/article
                ?cmd=show&type=NOT&doctype=REFERENCE&id=1372094.1.
      Category: Meta Packages/Incorporations
         State: Installed
     Publisher: solaris
       Version: 0.5.11 (Oracle Solaris 11 SRU 13.4)
 Build Release: 5.11
        Branch: 0.175.0.13.0.4.0
Packaging Date: November  6, 2012 07:46:23 PM
          Size: 5.45 kB
          FMRI: pkg://solaris/entire@0.5.11,5.11-0.175.0.13.0.4.0:20121106T194623Z
root-mysrv1:/#
root-mysrv1:/#

 
Hence Package Management made so easy in Solaris 11 with the help of Image Packaging System (IPS) concept.

##################################################################################

Sunday, 20 April 2014

Creation of ZFS FS,Cloning and Snapshots !!!

Creation of ZFS is very simple.
zfs create ....

We need to set different property values according to our requirement which are helpful to set some limits to our filesystem.

Examples of some properties are quota,reservation,deduplication and compression.

In this post we are going to discuss, 

1) ZFS creation and few properties.
2) ZFS Clones and Snapshots.

ZFS creation with default mountpoint. Default mountpoint will be same as filesystem name.

root-mysrv1~# zpool list
NAME    SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
rpool  9.94G  6.12G  3.81G  61%  ONLINE  -
root-mysrv1~#
root-mysrv1~# zpool create mypl c5d1 c5d2 c5d3
root-mysrv1~#
root-mysrv1~# zpool list
NAME    SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
mypl    285M    77K   285M   0%  ONLINE  -
rpool  9.94G  6.12G  3.81G  61%  ONLINE  -
root-mysrv1~#

My pool " mypl " created.

root-mysrv1~#
root-mysrv1~# zfs create mypl/myfs1
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list mypl
NAME  USED  AVAIL  REFER  MOUNTPOINT
mypl  130K   253M    31K  /mypl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# df -kh
Filesystem             size   used  avail capacity  Mounted on
rpool/ROOT/s10s_u11wos_24a
                       9.8G   6.1G   3.7G    63%    /
/devices                 0K     0K     0K     0%    /devices
ctfs                     0K     0K     0K     0%    /system/contract
proc                     0K     0K     0K     0%    /proc
mnttab                   0K     0K     0K     0%    /etc/mnttab
swap                   4.7G   504K   4.7G     1%    /etc/svc/volatile
objfs                    0K     0K     0K     0%    /system/object
sharefs                  0K     0K     0K     0%    /etc/dfs/sharetab
/platform/SUNW,Sun-Fire-T200/lib/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/libc_psr.so.1
/platform/SUNW,Sun-Fire-T200/lib/sparcv9/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/sparcv9/libc_psr.so.1
fd                       0K     0K     0K     0%    /dev/fd
swap                   4.7G    32K   4.7G     1%    /tmp
swap                   4.7G    48K   4.7G     1%    /var/run
rpool/export           9.8G    32K   3.7G     1%    /export
rpool/export/home      9.8G    31K   3.7G     1%    /export/home
rpool                  9.8G   106K   3.7G     1%    /rpool
mypl                   253M    31K   253M     1%    /mypl
mypl/myfs1             253M    31K   253M     1%    /mypl/myfs1
root-mysrv1~#
root-mysrv1~# zpool list
NAME    SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
mypl    285M   222K   285M   0%  ONLINE  -
rpool  9.94G  6.13G  3.81G  61%  ONLINE  -
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list
NAME                         USED  AVAIL  REFER  MOUNTPOINT
mypl                         130K   253M    32K  /mypl
mypl/myfs1                    31K   253M    31K  /mypl/myfs1
rpool                       6.12G  3.66G   106K  /rpool
rpool/ROOT                  6.12G  3.66G    31K  legacy
rpool/ROOT/s10s_u11wos_24a  6.12G  3.66G  6.12G  /
rpool/export                  63K  3.66G    32K  /export
rpool/export/home             31K  3.66G    31K  /export/home
root-mysrv1~#
root-mysrv1~#

ZFS creation with specific mountpoint for our filesystem.

root-mysrv1~#
root-mysrv1~# zfs create -o mountpoint=/mypl/mysap mypl/sapmnt
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl                   253M    32K   253M     1%    /mypl
mypl/myfs1             253M    31K   253M     1%    /mypl/myfs1
mypl/sapmnt            253M    31K   253M     1%    /mypl/mysap
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl                         176K   253M    32K  /mypl
mypl/myfs1                    31K   253M    31K  /mypl/myfs1
mypl/sapmnt                   31K   253M    31K  /mypl/mysap
root-mysrv1~#

***********************************************************************
Let us discuss some properties,
First of all , we should have clear idea about difference between QUOTA and RESERVATION.

Quota is to set a limit on the amount of space a file system can use.
Reservation is an allocation of space from the pool that is guaranteed to be available to a dateset.

Either we can set property at the time of FS creation or we can set it later....

Now let us create a new FS with some quota.....

root-mysrv1~#
root-mysrv1~# zfs create -o mountpoint=/mypl/myspl -o quota=1m mypl/spl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl                         224K   253M    33K  /mypl
mypl/myfs1                    31K   253M    31K  /mypl/myfs1
mypl/sapmnt                   31K   253M    31K  /mypl/mysap
mypl/spl                      31K   993K    31K  /mypl/myspl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl                   253M    33K   253M     1%    /mypl
mypl/myfs1             253M    31K   253M     1%    /mypl/myfs1
mypl/sapmnt            253M    31K   253M     1%    /mypl/mysap
mypl/spl               1.0M    31K   993K     4%    /mypl/myspl
root-mysrv1~#

To view our property value, or else for all properties values we can give zfs get all fsname.

root-mysrv1~# zfs get quota mypl/spl     
NAME      PROPERTY  VALUE  SOURCE
mypl/spl  quota     1M     local
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs set quota=10m mypl/spl          ---- now we can change the value, if necessary..
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs get quota mypl/spl
NAME      PROPERTY  VALUE  SOURCE
mypl/spl  quota     10M    local
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl                   253M    34K   253M     1%    /mypl
mypl/myfs1             253M    31K   253M     1%    /mypl/myfs1
mypl/sapmnt            253M    31K   253M     1%    /mypl/mysap
mypl/spl                10M    31K  10.0M     1%    /mypl/myspl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl                         226K   253M    34K  /mypl
mypl/myfs1                    31K   253M    31K  /mypl/myfs1
mypl/sapmnt                   31K   253M    31K  /mypl/mysap
mypl/spl                      31K  9.97M    31K  /mypl/myspl
root-mysrv1~#

To set quota for an existing filesystem...

root-mysrv1~# zfs set quota=50m mypl/sapmnt
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl                         226K   253M    34K  /mypl
mypl/myfs1                    31K   253M    31K  /mypl/myfs1
mypl/sapmnt                   31K  50.0M    31K  /mypl/mysap
mypl/spl                      31K  9.97M    31K  /mypl/myspl
root-mysrv1~#
root-mysrv1~# zpool status mypl
  pool: mypl
 state: ONLINE
 scan: none requested
config:
        NAME        STATE     READ WRITE CKSUM
        mypl        ONLINE       0     0     0
          c5d1      ONLINE       0     0     0
          c5d2      ONLINE       0     0     0
          c5d3      ONLINE       0     0     0
errors: No known data errors
root-mysrv1~#

***********************************************************************

ZFS snapshots : It is very helpful , to revert back wrong situation.

It is always suggestible to take a snapshot of your existing filesystem before performing some activity or applying some modifications to our existing FS.

Main thing with snapshots is, A snapshot does not occupy any space.It just takes a snap of entire filesystem.

It is helpful to revert when something goes wrong because of an activity.

root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl                   253M    34K   253M     1%    /mypl
mypl/myfs1             253M    31K   253M     1%    /mypl/myfs1
mypl/sapmnt             50M    31K    50M     1%    /mypl/mysap
mypl/spl                10M    31K  10.0M     1%    /mypl/myspl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl                         228K   253M    34K  /mypl
mypl/myfs1                    31K   253M    31K  /mypl/myfs1
mypl/sapmnt                   31K  50.0M    31K  /mypl/mysap
mypl/spl                      31K  9.97M    31K  /mypl/myspl
root-mysrv1~#
root-mysrv1~#

Here I am creating a snapshot for my FS at initial state.

root-mysrv1~# zfs snapshot mypl/spl@may2014
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list | grep mypl
mypl                         229K   253M    34K  /mypl
mypl/myfs1                    31K   253M    31K  /mypl/myfs1
mypl/sapmnt                   31K  50.0M    31K  /mypl/mysap
mypl/spl                      31K  9.97M    31K  /mypl/myspl
mypl/spl@may2014                0      -    31K  -
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# cd /
root-mysrv1~#
root-mysrv1~# du -sh *
   0K   bin
 295K   boot
 500M   dev
 176K   devices
  49M   etc
   3K   export
   0K   home
  61M   kernel
  36M   lib
   1K   mnt
   6K   mypl
   0K   net
 138M   opt
   1K   pl1
 320M   platform
 824M   proc
  78K   rpool
 1.6M   sbin
 8.6M   system
   6K   test
  80K   tmp
^C
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# cd sbin
root-mysrv1~# ls
autopush     ifconfig     metainit     rc2          su           umountall
bootadm      ifparse      metarecover  rc3          su.static    uname
bpgetfile    in.mpathd    metastat     rc5          sulogin      zfs
dhcpagent    init         mount        rc6          swapadd      zonename
dhcpinfo     jsh          mountall     rcS          sync         zpool
dladm        luactivate   netstrategy  route        tnctl
fdisk        lucurr       pfsh         routeadm     tzreload
fiocompress  metadb       rc0          sh           uadmin
hostconfig   metadevadm   rc1          soconfig     umount
root-mysrv1~#

To apply some changes, let us copy some data to our filesystem.

root-mysrv1~# cp -rp /sbin /mypl/myspl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# cd /mypl/myspl
root-mysrv1~#
root-mysrv1~# ls -rlth
total 5
drwxr-xr-x   2 root     sys           52 Apr  5 08:32 sbin
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl                   253M    34K   251M     1%    /mypl
mypl/myfs1             253M    31K   251M     1%    /mypl/myfs1
mypl/sapmnt             50M    31K    50M     1%    /mypl/mysap
mypl/spl                10M   1.8M   8.2M    18%    /mypl/myspl
root-mysrv1~#
root-mysrv1~#

Again take a snapshot after copying some files...

root-mysrv1~#
root-mysrv1~# zfs snapshot mypl/spl@may20142nd
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl                        2.39M   251M    34K  /mypl
mypl/myfs1                    31K   251M    31K  /mypl/myfs1
mypl/sapmnt                   31K  50.0M    31K  /mypl/mysap
mypl/spl                    1.96M  8.04M  1.94M  /mypl/myspl
mypl/spl@may2014              19K      -    31K  -
mypl/spl@may20142nd             0      -  1.94M  -
root-mysrv1~#

As we can see the initial snap occupied some space, but the second snap is 0K.
Now let us rollback our situation...

root-mysrv1~#
root-mysrv1~# zfs rollback mypl/spl@may2014
cannot rollback to 'mypl/spl@may2014': more recent snapshots exist
use '-r' to force deletion of the following snapshots:
mypl/spl@may20142nd
root-mysrv1~#
root-mysrv1~# zfs rollback -r mypl/spl@may2014
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl                         268K   253M    34K  /mypl
mypl/myfs1                    31K   253M    31K  /mypl/myfs1
mypl/sapmnt                   31K  50.0M    31K  /mypl/mysap
mypl/spl                      32K  9.97M    31K  /mypl/myspl
mypl/spl@may2014               1K      -    31K  -
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# df -kh | grep mypl
mypl                   253M    34K   253M     1%    /mypl
mypl/myfs1             253M    31K   253M     1%    /mypl/myfs1
mypl/sapmnt             50M    31K    50M     1%    /mypl/mysap
mypl/spl                10M    31K  10.0M     1%    /mypl/myspl
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# cd /mypl/myspl
root-mysrv1~#
root-mysrv1~# ls
root-mysrv1~#        ------ Previously there is no data in my filesystem and so my FS is rolled back.

To destroy a snapshot ,

root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl                         268K   253M    34K  /mypl
mypl/myfs1                    31K   253M    31K  /mypl/myfs1
mypl/sapmnt                   31K  50.0M    31K  /mypl/mysap
mypl/spl                      32K  9.97M    31K  /mypl/myspl
mypl/spl@may2014               1K      -    31K  -
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs destroy mypl/spl@may2014
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl                         456K   253M    34K  /mypl
mypl/myfs1                    31K   253M    31K  /mypl/myfs1
mypl/sapmnt                   31K  50.0M    31K  /mypl/mysap
mypl/spl                      31K  9.97M    31K  /mypl/myspl
root-mysrv1~#

ZFS cloning : A clone is a exact copy of our existing filesystem. We can clone a FS from its snapshot only. So whenever we need a clone copy of FS, just we need to take its snap and can create a clone from it.

root-mysrv1~#
root-mysrv1~# zpool list
NAME    SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
mypl    285M   344K   285M   0%  ONLINE  -
rpool  9.94G  6.13G  3.81G  61%  ONLINE  -
root-mysrv1~#
root-mysrv1~# zpool list |grep mypl
mypl    285M   344K   285M   0%  ONLINE  -
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl                         247K   253M    34K  /mypl
mypl/myfs1                    31K   253M    31K  /mypl/myfs1
mypl/sapmnt                   31K  50.0M    31K  /mypl/mysap
mypl/spl                      31K  9.97M    31K  /mypl/myspl
root-mysrv1~#
root-mysrv1~# zfs clone mypl/spl mypl/spl_very               ---- can clone a FS from its snapshot only.
cannot open 'mypl/spl': operation not applicable to datasets of this type
root-mysrv1~#
root-mysrv1~# zfs snapshot mypl/spl@12345                    ---- creating snapshot.
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl                         252K   253M    34K  /mypl
mypl/myfs1                    31K   253M    31K  /mypl/myfs1
mypl/sapmnt                   31K  50.0M    31K  /mypl/mysap
mypl/spl                      31K  9.97M    31K  /mypl/myspl
mypl/spl@12345                  0      -    31K  -
root-mysrv1~#
root-mysrv1~# zfs clone mypl/spl@12345 mypl/spl_very    ---- creating clone 
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl                         274K   253M    34K  /mypl
mypl/myfs1                    31K   253M    31K  /mypl/myfs1
mypl/sapmnt                   31K  50.0M    31K  /mypl/mysap
mypl/spl                      31K  9.97M    31K  /mypl/myspl
mypl/spl@12345                  0      -    31K  -
mypl/spl_very                  1K   253M    31K  /mypl/spl_very
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs clone -o mountpoint=/splclone mypl/spl@12345 mypl/cln1
root-mysrv1~#
root-mysrv1~#
root-mysrv1~# zfs list |grep mypl
mypl                         312K   253M    36K  /mypl
mypl/cln1                      1K   253M    31K  /splclone
mypl/myfs1                    31K   253M    31K  /mypl/myfs1
mypl/sapmnt                   31K  50.0M    31K  /mypl/mysap
mypl/spl                      31K  9.97M    31K  /mypl/myspl
mypl/spl@12345                  0      -    31K  -
mypl/spl_very                  1K   253M    31K  /mypl/spl_very
root-mysrv1~#

#################################################################################

Sunday, 13 April 2014

RAIDZ1 , RAIDZ2 , RAIDZ3 techniques in ZFS !!!

In previous post we discussed about types of raids , now let us create raidz1, raidz2, raidz3 volumes.

Creation of raidz1 pool :

root@mysrv1 # zpool list
NAME    SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
rpool  9.94G  6.13G  3.81G  61%  ONLINE  -

root@mysrv1 #
root@mysrv1 # zpool create raidpl1 raidz1 c5d1 c5d2 c5d3
root@mysrv1 #
root@mysrv1 # zpool status raidpl1
  pool: raidpl1
 state: ONLINE
 scan: none requested
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     ONLINE       0     0     0
          raidz1-0  ONLINE       0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    ONLINE       0     0     0
            c5d3    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #
root@mysrv1 # zpool list
NAME      SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
raidpl1   286M   174K   286M   0%  ONLINE  -

rpool    9.94G  6.12G  3.81G  61%  ONLINE  -
root@mysrv1 #
root@mysrv1 # df -kh
Filesystem             size   used  avail capacity  Mounted on
rpool/ROOT/s10s_u11wos_24a
                       9.8G   6.1G   3.7G    63%    /
/devices                 0K     0K     0K     0%    /devices
ctfs                     0K     0K     0K     0%    /system/contract
proc                     0K     0K     0K     0%    /proc
mnttab                   0K     0K     0K     0%    /etc/mnttab
swap                   5.0G   504K   5.0G     1%    /etc/svc/volatile
objfs                    0K     0K     0K     0%    /system/object
sharefs                  0K     0K     0K     0%    /etc/dfs/sharetab
/platform/SUNW,Sun-Fire-T200/lib/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/libc_psr.so.1
/platform/SUNW,Sun-Fire-T200/lib/sparcv9/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/sparcv9/libc_psr.so.1
fd                       0K     0K     0K     0%    /dev/fd
swap                   5.0G    32K   5.0G     1%    /tmp
swap                   5.0G    48K   5.0G     1%    /var/run
rpool/export           9.8G    32K   3.7G     1%    /export
rpool/export/home      9.8G    31K   3.7G     1%    /export/home
rpool                  9.8G   106K   3.7G     1%    /rpool
raidpl1                158M    34K   158M     1%    /raidpl1

root@mysrv1 #
root@mysrv1 #

Creation of filesystem for better understanding of parity and data consistency even if a disk got failed.

 root@mysrv1 #
root@mysrv1 # zfs create raidpl1/oracle
root@mysrv1 #
root@mysrv1 # df -kh
Filesystem             size   used  avail capacity  Mounted on
rpool/ROOT/s10s_u11wos_24a
                       9.8G   6.1G   3.7G    63%    /
/devices                 0K     0K     0K     0%    /devices
ctfs                     0K     0K     0K     0%    /system/contract
proc                     0K     0K     0K     0%    /proc
mnttab                   0K     0K     0K     0%    /etc/mnttab
swap                   5.0G   504K   5.0G     1%    /etc/svc/volatile
objfs                    0K     0K     0K     0%    /system/object
sharefs                  0K     0K     0K     0%    /etc/dfs/sharetab
/platform/SUNW,Sun-Fire-T200/lib/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/libc_psr.so.1
/platform/SUNW,Sun-Fire-T200/lib/sparcv9/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/sparcv9/libc_psr.so.1
fd                       0K     0K     0K     0%    /dev/fd
swap                   5.0G    32K   5.0G     1%    /tmp
swap                   5.0G    48K   5.0G     1%    /var/run
rpool/export           9.8G    32K   3.7G     1%    /export
rpool/export/home      9.8G    31K   3.7G     1%    /export/home
rpool                  9.8G   106K   3.7G     1%    /rpool
raidpl1                158M    34K   158M     1%    /raidpl1
raidpl1/oracle         158M    34K   158M     1%    /raidpl1/oracle

root@mysrv1 #
root@mysrv1 #
Copied some data from /var

root@mysrv1 #
root@mysrv1 #
root@mysrv1 # df -kh /raidpl1/oracle
Filesystem             size   used  avail capacity  Mounted on
raidpl1/oracle         158M    54M   104M    35%    /raidpl1/oracle

root@mysrv1 #
root@mysrv1 #
root@mysrv1 # cd /raidpl1/oracle
root@mysrv1 #
root@mysrv1 # ls -lrth |more
total 62
drwxr-xr-x   2 root     sys            3 Apr  5 08:48 install_data
drwxr-xr-x   2 root     root           3 Apr  5 08:49 patch
dr-xr-xr-x   4 root     bin           10 Apr  5 09:05 install
-r--r--r--   1 root     root        1.1K Apr 14 09:02 README
drwxr-xr-x 657 root     root         657 Apr 14 09:03 pkg

root@mysrv1 #
root@mysrv1 #
root@mysrv1 # cd /dev/dsk/
root@mysrv1 #
root@mysrv1 #

Now let us try corrupting a disk from raidz1 pool....

root@mysrv1 # ls -lrth c5d2
-rw------T   1 root     root        100M Apr 14 09:03 c5d2

root@mysrv1 #
root@mysrv1 # rm c5d2
root@mysrv1 #
root@mysrv1 # ls -lrth c5*
-rw------T   1 root     root        100M Apr 14 08:59 c5d4
-rw------T   1 root     root        100M Apr 14 08:59 c5d5
-rw------T   1 root     root        100M Apr 14 09:03 c5d1
-rw------T   1 root     root        100M Apr 14 09:03 c5d3

root@mysrv1 #
root@mysrv1 # zpool status raidpl1  pool: raidpl1
 state: ONLINE
 scan: none requested
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     ONLINE       0     0     0
          raidz1-0  ONLINE       0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    ONLINE       0     0     0
            c5d3    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #
root@mysrv1 # df -kh
Filesystem             size   used  avail capacity  Mounted on
rpool/ROOT/s10s_u11wos_24a
                       9.8G   6.1G   3.7G    63%    /
/devices                 0K     0K     0K     0%    /devices
ctfs                     0K     0K     0K     0%    /system/contract
proc                     0K     0K     0K     0%    /proc
mnttab                   0K     0K     0K     0%    /etc/mnttab
swap                   4.8G   504K   4.8G     1%    /etc/svc/volatile
objfs                    0K     0K     0K     0%    /system/object
sharefs                  0K     0K     0K     0%    /etc/dfs/sharetab
/platform/SUNW,Sun-Fire-T200/lib/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/libc_psr.so.1
/platform/SUNW,Sun-Fire-T200/lib/sparcv9/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/sparcv9/libc_psr.so.1
fd                       0K     0K     0K     0%    /dev/fd
swap                   4.8G    32K   4.8G     1%    /tmp
swap                   4.8G    48K   4.8G     1%    /var/run
rpool/export           9.8G    32K   3.7G     1%    /export
rpool/export/home      9.8G    31K   3.7G     1%    /export/home
rpool                  9.8G   106K   3.7G     1%    /rpool
raidpl1                158M    36K   100M     1%    /raidpl1
raidpl1/oracle         158M    58M   100M    37%    /raidpl1/oracle

root@mysrv1 #
root@mysrv1 # zfs get all raidpl1
NAME     PROPERTY              VALUE                  SOURCE
raidpl1  type                  filesystem             -
raidpl1  creation              Mon Apr 14  8:56 2014  -
raidpl1  used                  58.6M                  -
raidpl1  available             99.9M                  -
raidpl1  referenced            36.0K                  -
raidpl1  compressratio         1.00x                  -
raidpl1  mounted               yes                    -
raidpl1  quota                 none                   default
raidpl1  reservation           none                   default
raidpl1  recordsize            128K                   default
raidpl1  mountpoint            /raidpl1               default
raidpl1  sharenfs              off                    default
raidpl1  checksum              on                     default
raidpl1  compression           off                    default
raidpl1  atime                 on                     default
raidpl1  devices               on                     default
raidpl1  exec                  on                     default
raidpl1  setuid                on                     default
raidpl1  readonly              off                    default
raidpl1  zoned                 off                    default
raidpl1  snapdir               hidden                 default
raidpl1  aclmode               discard                default
raidpl1  aclinherit            restricted             default
raidpl1  canmount              on                     default
raidpl1  shareiscsi            off                    default
raidpl1  xattr                 on                     default
raidpl1  copies                1                      default
raidpl1  version               5                      -
raidpl1  utf8only              off                    -
raidpl1  normalization         none                   -
raidpl1  casesensitivity       mixed                  -
raidpl1  vscan                 off                    default
raidpl1  nbmand                off                    default
raidpl1  sharesmb              off                    default
raidpl1  refquota              none                   default
raidpl1  refreservation        none                   default
raidpl1  primarycache          all                    default
raidpl1  secondarycache        all                    default
raidpl1  usedbysnapshots       0                      -
raidpl1  usedbydataset         36.0K                  -
raidpl1  usedbychildren        58.5M                  -
raidpl1  usedbyrefreservation  0                      -
raidpl1  logbias               latency                default
raidpl1  sync                  standard               default
raidpl1  rekeydate             -                      default
raidpl1  rstchown              on                     default

root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool offline raidpl1 c5d2        ------ By offlining the disk , pool will be moved DEGRADED state.
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool status raidpl1
  pool: raidpl1
 state: DEGRADED
status: One or more devices has been taken offline by the administrator.
        Sufficient replicas exist for the pool to continue functioning in a
        degraded state.
action: Online the device using 'zpool online' or replace the device with
        'zpool replace'.
 scan: scrub repaired 0 in 0h0m with 0 errors on Mon Apr 14 09:37:02 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     DEGRADED     0     0     0
          raidz1-0  DEGRADED     0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    OFFLINE      0     0     0
            c5d3    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool online raidpl1 c5d2            ------ Since we corrupted disk by removing c5d2 it cannot be moved to online.
SUNW-MSG-ID: ZFS-8000-D3, TYPE: Fault, VER: 1, SEVERITY: Major
EVENT-TIME: 20
PLATFORM: SUNW,Sun-Fire-T200, CSN: -, HOSTNAME: mysrv1
SOURCE: zfs-diagnosis, REV: 1.0
EVENT-ID: 91be2938-68a2-e7f4-db91-bd350ae8b461
DESC: A ZFS device failed.
AUTO-RESPONSE: No automated response will occur.
IMPACT: Fault tolerance of the pool may be compromised.
REC-ACTION: Run 'zpool status -x' for more information. Please refer to the associated reference document at
http://sun.com/msg/ZFS-8000-D3 for the latest service procedures and policies regarding this diagnosis.
warning: device 'c5d2' onlined, but remains in faulted state
use 'zpool replace' to replace devices that are no longer present

root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool status raidpl1                 ------ still degraded , now we can check the disk status UNAVAILABLE. 
 pool: raidpl1
 state: DEGRADED
status: One or more devices could not be opened.  Sufficient replicas exist for
        the pool to continue functioning in a degraded state.
action: Attach the missing device and online it using 'zpool online'.
   see:
http://www.sun.com/msg/ZFS-8000-2Q
 scan: scrub repaired 0 in 0h0m with 0 errors on Mon Apr 14 09:37:02 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     DEGRADED     0     0     0
          raidz1-0  DEGRADED     0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    UNAVAIL      0     0     0  cannot open            c5d3    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool status -x                          ------ To view DEGRADED pools. 
 pool: raidpl1
 state: DEGRADED
status: One or more devices could not be opened.  Sufficient replicas exist for
        the pool to continue functioning in a degraded state.
action: Attach the missing device and online it using 'zpool online'.
   see:
http://www.sun.com/msg/ZFS-8000-2Q
 scan: scrub repaired 0 in 0h0m with 0 errors on Mon Apr 14 09:37:02 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     DEGRADED     0     0     0
          raidz1-0  DEGRADED     0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    UNAVAIL      0     0     0  cannot open
            c5d3    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # mkfile 100m /dev/dsk/c5d2     --- Cretae once again..
root@mysrv1 #
root@mysrv1 # zpool online raidpl1 c5d2
warning: device 'c5d2' onlined, but remains in faulted state
use 'zpool replace' to replace devices that are no longer present

root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool status -x                    
  pool: raidpl1
 state: DEGRADED
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see:
http://www.sun.com/msg/ZFS-8000-4J
 scan: scrub repaired 0 in 0h0m with 0 errors on Mon Apr 14 09:37:02 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     DEGRADED     0     0     0
          raidz1-0  DEGRADED     0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    UNAVAIL      0     0     0  corrupted data  ---- This time data got corrupted 

            c5d3    ONLINE       0     0     0
errors: No known data errors
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # df -kh
Filesystem             size   used  avail capacity  Mounted on
rpool/ROOT/s10s_u11wos_24a
                       9.8G   6.1G   3.7G    63%    /
/devices                 0K     0K     0K     0%    /devices
ctfs                     0K     0K     0K     0%    /system/contract
proc                     0K     0K     0K     0%    /proc
mnttab                   0K     0K     0K     0%    /etc/mnttab
swap                   4.8G   504K   4.8G     1%    /etc/svc/volatile
objfs                    0K     0K     0K     0%    /system/object
sharefs                  0K     0K     0K     0%    /etc/dfs/sharetab
/platform/SUNW,Sun-Fire-T200/lib/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/libc_psr.so.1
/platform/SUNW,Sun-Fire-T200/lib/sparcv9/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/sparcv9/libc_psr.so.1
fd                       0K     0K     0K     0%    /dev/fd
swap                   4.8G    32K   4.8G     1%    /tmp
swap                   4.8G    48K   4.8G     1%    /var/run
rpool/export           9.8G    32K   3.7G     1%    /export
rpool/export/home      9.8G    31K   3.7G     1%    /export/home
rpool                  9.8G   106K   3.7G     1%    /rpool
raidpl1                158M    36K   100M     1%    /raidpl1
raidpl1/oracle         158M    58M   100M    37%    /raidpl1/oracle

root@mysrv1 #
root@mysrv1 # cd /raidpl1/oracle
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # ls -lrth
total 62
drwxr-xr-x   2 root     sys            3 Apr  5 08:48 install_data
drwxr-xr-x   2 root     root           3 Apr  5 08:49 patch
dr-xr-xr-x   4 root     bin           10 Apr  5 09:05 install
-r--r--r--   1 root     root        1.1K Apr 14 09:02 README
drwxr-xr-x 657 root     root         657 Apr 14 09:03 pkg

root@mysrv1 #
root@mysrv1 # cd install_data/
root@mysrv1 #
root@mysrv1 # ls
install_log

root@mysrv1 #
root@mysrv1 # ls -lrth
total 241
-rw-r--r--   1 root     root        120K Apr  5 08:49 install_log

root@mysrv1 #
root@mysrv1 # pwd
/raidpl1/oracle/install_data               -------- Still I can access my data , since this is raidz1.

root@mysrv1 #
root@mysrv1 # zpool status
  pool: raidpl1
 state: DEGRADED
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see:
http://www.sun.com/msg/ZFS-8000-4J
 scan: scrub repaired 0 in 0h0m with 0 errors on Mon Apr 14 09:37:02 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     DEGRADED     0     0     0
          raidz1-0  DEGRADED     0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    UNAVAIL      0     0     0  corrupted data            c5d3    ONLINE       0     0     0

errors: No known data errors
  pool: rpool
 state: ONLINE
 scan: none requested
config:

        NAME        STATE     READ WRITE CKSUM
        rpool       ONLINE       0     0     0
          c0d0s0    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #

Now try offlining one more disk , this is not possible observe the error :

root@mysrv1 # zpool offline raidpl1 c5d3
cannot offline c5d3: no valid replicas

root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool scrub raidpl1
root@mysrv1 #
root@mysrv1 # zpool status raidpl1
  pool: raidpl1
 state: ONLINE
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see:
http://www.sun.com/msg/ZFS-8000-4J
 scan: scrub repaired 0 in 0h0m with 0 errors on Mon Apr 14 09:52:13 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     ONLINE       0     0     0
          raidz1-0  ONLINE       0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    UNAVAIL      0     0     0  corrupted data
            c5d3    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #
root@mysrv1 # zpool add raidpl1 spare c5d5          ------ Now to recover my pool , I need to change the disk with new one.
root@mysrv1 #
root@mysrv1 # zpool status raidpl1
  pool: raidpl1
 state: ONLINE
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see:
http://www.sun.com/msg/ZFS-8000-4J
 scan: scrub repaired 0 in 0h0m with 0 errors on Mon Apr 14 09:52:13 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     ONLINE       0     0     0
          raidz1-0  ONLINE       0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    UNAVAIL      0     0     0  corrupted data           

            c5d3    ONLINE       0     0     0
        spares
          c5d5      AVAIL

errors: No known data errors
root@mysrv1 #

root@mysrv1 #
root@mysrv1 # zpool replace raidpl1 c5d2 c5d5        ------ replacing the disk with spare disk.
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool status raidpl1
  pool: raidpl1
 state: ONLINE
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see:
http://www.sun.com/msg/ZFS-8000-4J
 scan: resilvered 28.0M in 0h0m with 0 errors on Mon Apr 14 09:53:06 2014
config:

        NAME         STATE     READ WRITE CKSUM
        raidpl1      ONLINE       0     0     0
          raidz1-0   ONLINE       0     0     0
            c5d1     ONLINE       0     0     0
            spare-1  ONLINE       0     0     0
              c5d2   UNAVAIL      0     0     0  corrupted data
              c5d5   ONLINE       0     0     0
            c5d3     ONLINE       0     0     0
        spares
          c5d5       INUSE     currently in use

errors: No known data errors
root@mysrv1 #
root@mysrv1 # zpool status raidpl1
  pool: raidpl1
 state: ONLINE
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see: http://www.sun.com/msg/ZFS-8000-4J
 scan: resilvered 28.0M in 0h0m with 0 errors on Mon Apr 14 09:53:06 2014config:

        NAME         STATE     READ WRITE CKSUM
        raidpl1      ONLINE       0     0     0
          raidz1-0   ONLINE       0     0     0
            c5d1     ONLINE       0     0     0
            spare-1  ONLINE       0     0     0
              c5d2   UNAVAIL      0     0     0  corrupted data
              c5d5   ONLINE       0     0     0
            c5d3     ONLINE       0     0     0
        spares
          c5d5       INUSE     currently in use

errors: No known data errors
root@mysrv1  #
root@mysrv1 #

After complete resilvering (attaching) ,detach the faulted disk and your pool will be back to normal state...

root@mysrv1 # zpool detach raidpl1 c5d2
root@mysrv1 #
root@mysrv1 # zpool status raidpl1
  pool: raidpl1
 state: ONLINE
 scan: resilvered 28.0M in 0h0m with 0 errors on Mon Apr 14 09:53:06 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     ONLINE       0     0     0
          raidz1-0  ONLINE       0     0     0
            c5d1    ONLINE       0     0     0
            c5d5    ONLINE       0     0     0
            c5d3    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #

Creation of raidz2 and raidz3 volume is similar to raidz1 and the concept varies regarding failure in disks...

root@mysrv1 # zpool list
NAME    SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
rpool  9.94G  6.03G  3.91G  60%  ONLINE  -
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool create rdpl raidz2 c5d1 c5d2 c5d3 c5d4
root@mysrv1 #
root@mysrv1 # zpool list
NAME    SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
rdpl    382M   231K   382M   0%  ONLINE  -
rpool  9.94G  6.12G  3.82G  61%  ONLINE  -
root@mysrv1 #
root@mysrv1 # zpool status rdpl
  pool: rdpl
 state: ONLINE
 scan: none requested
config:
        NAME        STATE     READ WRITE CKSUM
        rdpl        ONLINE       0     0     0
          raidz2-0  ONLINE       0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    ONLINE       0     0     0
            c5d3    ONLINE       0     0     0
            c5d4    ONLINE       0     0     0
errors: No known data errors
root@mysrv1 #

So in raidz2 allows maximum of 2disks failure.

= = = = = = = = = = = = = = = =

Similarly for raidz3 allows maximum of 3 disks failure. Creation of raidz3 is as follows :

root@mysrv1 #root@mysrv1 # zpool list
NAME    SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
rpool  9.94G  6.12G  3.81G  61%  ONLINE  -

root@mysrv1 #
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool create rpl3 raidz3 c5d1 c5d2 c5d3 c5d4 c5d5
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool list
NAME    SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
rpl3    476M   304K   476M   0%  ONLINE  -
rpool  9.94G  6.16G  3.78G  61%  ONLINE  -
root@mysrv1 #
root@mysrv1 # zpool status rpl3
  pool: rpl3
 state: ONLINE
 scan: none requested
config:

        NAME        STATE     READ WRITE CKSUM
        rpl3        ONLINE       0     0     0
          raidz3-0  ONLINE       0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    ONLINE       0     0     0
            c5d3    ONLINE       0     0     0
            c5d4    ONLINE       0     0     0
            c5d5    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #

root@mysrv1 #

##################################################################################