Total Pageviews

Sunday, 13 April 2014

RAIDZ1 , RAIDZ2 , RAIDZ3 techniques in ZFS !!!

In previous post we discussed about types of raids , now let us create raidz1, raidz2, raidz3 volumes.

Creation of raidz1 pool :

root@mysrv1 # zpool list
NAME    SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
rpool  9.94G  6.13G  3.81G  61%  ONLINE  -

root@mysrv1 #
root@mysrv1 # zpool create raidpl1 raidz1 c5d1 c5d2 c5d3
root@mysrv1 #
root@mysrv1 # zpool status raidpl1
  pool: raidpl1
 state: ONLINE
 scan: none requested
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     ONLINE       0     0     0
          raidz1-0  ONLINE       0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    ONLINE       0     0     0
            c5d3    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #
root@mysrv1 # zpool list
NAME      SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
raidpl1   286M   174K   286M   0%  ONLINE  -

rpool    9.94G  6.12G  3.81G  61%  ONLINE  -
root@mysrv1 #
root@mysrv1 # df -kh
Filesystem             size   used  avail capacity  Mounted on
rpool/ROOT/s10s_u11wos_24a
                       9.8G   6.1G   3.7G    63%    /
/devices                 0K     0K     0K     0%    /devices
ctfs                     0K     0K     0K     0%    /system/contract
proc                     0K     0K     0K     0%    /proc
mnttab                   0K     0K     0K     0%    /etc/mnttab
swap                   5.0G   504K   5.0G     1%    /etc/svc/volatile
objfs                    0K     0K     0K     0%    /system/object
sharefs                  0K     0K     0K     0%    /etc/dfs/sharetab
/platform/SUNW,Sun-Fire-T200/lib/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/libc_psr.so.1
/platform/SUNW,Sun-Fire-T200/lib/sparcv9/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/sparcv9/libc_psr.so.1
fd                       0K     0K     0K     0%    /dev/fd
swap                   5.0G    32K   5.0G     1%    /tmp
swap                   5.0G    48K   5.0G     1%    /var/run
rpool/export           9.8G    32K   3.7G     1%    /export
rpool/export/home      9.8G    31K   3.7G     1%    /export/home
rpool                  9.8G   106K   3.7G     1%    /rpool
raidpl1                158M    34K   158M     1%    /raidpl1

root@mysrv1 #
root@mysrv1 #

Creation of filesystem for better understanding of parity and data consistency even if a disk got failed.

 root@mysrv1 #
root@mysrv1 # zfs create raidpl1/oracle
root@mysrv1 #
root@mysrv1 # df -kh
Filesystem             size   used  avail capacity  Mounted on
rpool/ROOT/s10s_u11wos_24a
                       9.8G   6.1G   3.7G    63%    /
/devices                 0K     0K     0K     0%    /devices
ctfs                     0K     0K     0K     0%    /system/contract
proc                     0K     0K     0K     0%    /proc
mnttab                   0K     0K     0K     0%    /etc/mnttab
swap                   5.0G   504K   5.0G     1%    /etc/svc/volatile
objfs                    0K     0K     0K     0%    /system/object
sharefs                  0K     0K     0K     0%    /etc/dfs/sharetab
/platform/SUNW,Sun-Fire-T200/lib/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/libc_psr.so.1
/platform/SUNW,Sun-Fire-T200/lib/sparcv9/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/sparcv9/libc_psr.so.1
fd                       0K     0K     0K     0%    /dev/fd
swap                   5.0G    32K   5.0G     1%    /tmp
swap                   5.0G    48K   5.0G     1%    /var/run
rpool/export           9.8G    32K   3.7G     1%    /export
rpool/export/home      9.8G    31K   3.7G     1%    /export/home
rpool                  9.8G   106K   3.7G     1%    /rpool
raidpl1                158M    34K   158M     1%    /raidpl1
raidpl1/oracle         158M    34K   158M     1%    /raidpl1/oracle

root@mysrv1 #
root@mysrv1 #
Copied some data from /var

root@mysrv1 #
root@mysrv1 #
root@mysrv1 # df -kh /raidpl1/oracle
Filesystem             size   used  avail capacity  Mounted on
raidpl1/oracle         158M    54M   104M    35%    /raidpl1/oracle

root@mysrv1 #
root@mysrv1 #
root@mysrv1 # cd /raidpl1/oracle
root@mysrv1 #
root@mysrv1 # ls -lrth |more
total 62
drwxr-xr-x   2 root     sys            3 Apr  5 08:48 install_data
drwxr-xr-x   2 root     root           3 Apr  5 08:49 patch
dr-xr-xr-x   4 root     bin           10 Apr  5 09:05 install
-r--r--r--   1 root     root        1.1K Apr 14 09:02 README
drwxr-xr-x 657 root     root         657 Apr 14 09:03 pkg

root@mysrv1 #
root@mysrv1 #
root@mysrv1 # cd /dev/dsk/
root@mysrv1 #
root@mysrv1 #

Now let us try corrupting a disk from raidz1 pool....

root@mysrv1 # ls -lrth c5d2
-rw------T   1 root     root        100M Apr 14 09:03 c5d2

root@mysrv1 #
root@mysrv1 # rm c5d2
root@mysrv1 #
root@mysrv1 # ls -lrth c5*
-rw------T   1 root     root        100M Apr 14 08:59 c5d4
-rw------T   1 root     root        100M Apr 14 08:59 c5d5
-rw------T   1 root     root        100M Apr 14 09:03 c5d1
-rw------T   1 root     root        100M Apr 14 09:03 c5d3

root@mysrv1 #
root@mysrv1 # zpool status raidpl1  pool: raidpl1
 state: ONLINE
 scan: none requested
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     ONLINE       0     0     0
          raidz1-0  ONLINE       0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    ONLINE       0     0     0
            c5d3    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #
root@mysrv1 # df -kh
Filesystem             size   used  avail capacity  Mounted on
rpool/ROOT/s10s_u11wos_24a
                       9.8G   6.1G   3.7G    63%    /
/devices                 0K     0K     0K     0%    /devices
ctfs                     0K     0K     0K     0%    /system/contract
proc                     0K     0K     0K     0%    /proc
mnttab                   0K     0K     0K     0%    /etc/mnttab
swap                   4.8G   504K   4.8G     1%    /etc/svc/volatile
objfs                    0K     0K     0K     0%    /system/object
sharefs                  0K     0K     0K     0%    /etc/dfs/sharetab
/platform/SUNW,Sun-Fire-T200/lib/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/libc_psr.so.1
/platform/SUNW,Sun-Fire-T200/lib/sparcv9/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/sparcv9/libc_psr.so.1
fd                       0K     0K     0K     0%    /dev/fd
swap                   4.8G    32K   4.8G     1%    /tmp
swap                   4.8G    48K   4.8G     1%    /var/run
rpool/export           9.8G    32K   3.7G     1%    /export
rpool/export/home      9.8G    31K   3.7G     1%    /export/home
rpool                  9.8G   106K   3.7G     1%    /rpool
raidpl1                158M    36K   100M     1%    /raidpl1
raidpl1/oracle         158M    58M   100M    37%    /raidpl1/oracle

root@mysrv1 #
root@mysrv1 # zfs get all raidpl1
NAME     PROPERTY              VALUE                  SOURCE
raidpl1  type                  filesystem             -
raidpl1  creation              Mon Apr 14  8:56 2014  -
raidpl1  used                  58.6M                  -
raidpl1  available             99.9M                  -
raidpl1  referenced            36.0K                  -
raidpl1  compressratio         1.00x                  -
raidpl1  mounted               yes                    -
raidpl1  quota                 none                   default
raidpl1  reservation           none                   default
raidpl1  recordsize            128K                   default
raidpl1  mountpoint            /raidpl1               default
raidpl1  sharenfs              off                    default
raidpl1  checksum              on                     default
raidpl1  compression           off                    default
raidpl1  atime                 on                     default
raidpl1  devices               on                     default
raidpl1  exec                  on                     default
raidpl1  setuid                on                     default
raidpl1  readonly              off                    default
raidpl1  zoned                 off                    default
raidpl1  snapdir               hidden                 default
raidpl1  aclmode               discard                default
raidpl1  aclinherit            restricted             default
raidpl1  canmount              on                     default
raidpl1  shareiscsi            off                    default
raidpl1  xattr                 on                     default
raidpl1  copies                1                      default
raidpl1  version               5                      -
raidpl1  utf8only              off                    -
raidpl1  normalization         none                   -
raidpl1  casesensitivity       mixed                  -
raidpl1  vscan                 off                    default
raidpl1  nbmand                off                    default
raidpl1  sharesmb              off                    default
raidpl1  refquota              none                   default
raidpl1  refreservation        none                   default
raidpl1  primarycache          all                    default
raidpl1  secondarycache        all                    default
raidpl1  usedbysnapshots       0                      -
raidpl1  usedbydataset         36.0K                  -
raidpl1  usedbychildren        58.5M                  -
raidpl1  usedbyrefreservation  0                      -
raidpl1  logbias               latency                default
raidpl1  sync                  standard               default
raidpl1  rekeydate             -                      default
raidpl1  rstchown              on                     default

root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool offline raidpl1 c5d2        ------ By offlining the disk , pool will be moved DEGRADED state.
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool status raidpl1
  pool: raidpl1
 state: DEGRADED
status: One or more devices has been taken offline by the administrator.
        Sufficient replicas exist for the pool to continue functioning in a
        degraded state.
action: Online the device using 'zpool online' or replace the device with
        'zpool replace'.
 scan: scrub repaired 0 in 0h0m with 0 errors on Mon Apr 14 09:37:02 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     DEGRADED     0     0     0
          raidz1-0  DEGRADED     0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    OFFLINE      0     0     0
            c5d3    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool online raidpl1 c5d2            ------ Since we corrupted disk by removing c5d2 it cannot be moved to online.
SUNW-MSG-ID: ZFS-8000-D3, TYPE: Fault, VER: 1, SEVERITY: Major
EVENT-TIME: 20
PLATFORM: SUNW,Sun-Fire-T200, CSN: -, HOSTNAME: mysrv1
SOURCE: zfs-diagnosis, REV: 1.0
EVENT-ID: 91be2938-68a2-e7f4-db91-bd350ae8b461
DESC: A ZFS device failed.
AUTO-RESPONSE: No automated response will occur.
IMPACT: Fault tolerance of the pool may be compromised.
REC-ACTION: Run 'zpool status -x' for more information. Please refer to the associated reference document at
http://sun.com/msg/ZFS-8000-D3 for the latest service procedures and policies regarding this diagnosis.
warning: device 'c5d2' onlined, but remains in faulted state
use 'zpool replace' to replace devices that are no longer present

root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool status raidpl1                 ------ still degraded , now we can check the disk status UNAVAILABLE. 
 pool: raidpl1
 state: DEGRADED
status: One or more devices could not be opened.  Sufficient replicas exist for
        the pool to continue functioning in a degraded state.
action: Attach the missing device and online it using 'zpool online'.
   see:
http://www.sun.com/msg/ZFS-8000-2Q
 scan: scrub repaired 0 in 0h0m with 0 errors on Mon Apr 14 09:37:02 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     DEGRADED     0     0     0
          raidz1-0  DEGRADED     0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    UNAVAIL      0     0     0  cannot open            c5d3    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool status -x                          ------ To view DEGRADED pools. 
 pool: raidpl1
 state: DEGRADED
status: One or more devices could not be opened.  Sufficient replicas exist for
        the pool to continue functioning in a degraded state.
action: Attach the missing device and online it using 'zpool online'.
   see:
http://www.sun.com/msg/ZFS-8000-2Q
 scan: scrub repaired 0 in 0h0m with 0 errors on Mon Apr 14 09:37:02 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     DEGRADED     0     0     0
          raidz1-0  DEGRADED     0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    UNAVAIL      0     0     0  cannot open
            c5d3    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # mkfile 100m /dev/dsk/c5d2     --- Cretae once again..
root@mysrv1 #
root@mysrv1 # zpool online raidpl1 c5d2
warning: device 'c5d2' onlined, but remains in faulted state
use 'zpool replace' to replace devices that are no longer present

root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool status -x                    
  pool: raidpl1
 state: DEGRADED
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see:
http://www.sun.com/msg/ZFS-8000-4J
 scan: scrub repaired 0 in 0h0m with 0 errors on Mon Apr 14 09:37:02 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     DEGRADED     0     0     0
          raidz1-0  DEGRADED     0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    UNAVAIL      0     0     0  corrupted data  ---- This time data got corrupted 

            c5d3    ONLINE       0     0     0
errors: No known data errors
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # df -kh
Filesystem             size   used  avail capacity  Mounted on
rpool/ROOT/s10s_u11wos_24a
                       9.8G   6.1G   3.7G    63%    /
/devices                 0K     0K     0K     0%    /devices
ctfs                     0K     0K     0K     0%    /system/contract
proc                     0K     0K     0K     0%    /proc
mnttab                   0K     0K     0K     0%    /etc/mnttab
swap                   4.8G   504K   4.8G     1%    /etc/svc/volatile
objfs                    0K     0K     0K     0%    /system/object
sharefs                  0K     0K     0K     0%    /etc/dfs/sharetab
/platform/SUNW,Sun-Fire-T200/lib/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/libc_psr.so.1
/platform/SUNW,Sun-Fire-T200/lib/sparcv9/libc_psr/libc_psr_hwcap1.so.1
                       9.8G   6.1G   3.7G    63%    /platform/sun4v/lib/sparcv9/libc_psr.so.1
fd                       0K     0K     0K     0%    /dev/fd
swap                   4.8G    32K   4.8G     1%    /tmp
swap                   4.8G    48K   4.8G     1%    /var/run
rpool/export           9.8G    32K   3.7G     1%    /export
rpool/export/home      9.8G    31K   3.7G     1%    /export/home
rpool                  9.8G   106K   3.7G     1%    /rpool
raidpl1                158M    36K   100M     1%    /raidpl1
raidpl1/oracle         158M    58M   100M    37%    /raidpl1/oracle

root@mysrv1 #
root@mysrv1 # cd /raidpl1/oracle
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # ls -lrth
total 62
drwxr-xr-x   2 root     sys            3 Apr  5 08:48 install_data
drwxr-xr-x   2 root     root           3 Apr  5 08:49 patch
dr-xr-xr-x   4 root     bin           10 Apr  5 09:05 install
-r--r--r--   1 root     root        1.1K Apr 14 09:02 README
drwxr-xr-x 657 root     root         657 Apr 14 09:03 pkg

root@mysrv1 #
root@mysrv1 # cd install_data/
root@mysrv1 #
root@mysrv1 # ls
install_log

root@mysrv1 #
root@mysrv1 # ls -lrth
total 241
-rw-r--r--   1 root     root        120K Apr  5 08:49 install_log

root@mysrv1 #
root@mysrv1 # pwd
/raidpl1/oracle/install_data               -------- Still I can access my data , since this is raidz1.

root@mysrv1 #
root@mysrv1 # zpool status
  pool: raidpl1
 state: DEGRADED
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see:
http://www.sun.com/msg/ZFS-8000-4J
 scan: scrub repaired 0 in 0h0m with 0 errors on Mon Apr 14 09:37:02 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     DEGRADED     0     0     0
          raidz1-0  DEGRADED     0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    UNAVAIL      0     0     0  corrupted data            c5d3    ONLINE       0     0     0

errors: No known data errors
  pool: rpool
 state: ONLINE
 scan: none requested
config:

        NAME        STATE     READ WRITE CKSUM
        rpool       ONLINE       0     0     0
          c0d0s0    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #

Now try offlining one more disk , this is not possible observe the error :

root@mysrv1 # zpool offline raidpl1 c5d3
cannot offline c5d3: no valid replicas

root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool scrub raidpl1
root@mysrv1 #
root@mysrv1 # zpool status raidpl1
  pool: raidpl1
 state: ONLINE
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see:
http://www.sun.com/msg/ZFS-8000-4J
 scan: scrub repaired 0 in 0h0m with 0 errors on Mon Apr 14 09:52:13 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     ONLINE       0     0     0
          raidz1-0  ONLINE       0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    UNAVAIL      0     0     0  corrupted data
            c5d3    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #
root@mysrv1 # zpool add raidpl1 spare c5d5          ------ Now to recover my pool , I need to change the disk with new one.
root@mysrv1 #
root@mysrv1 # zpool status raidpl1
  pool: raidpl1
 state: ONLINE
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see:
http://www.sun.com/msg/ZFS-8000-4J
 scan: scrub repaired 0 in 0h0m with 0 errors on Mon Apr 14 09:52:13 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     ONLINE       0     0     0
          raidz1-0  ONLINE       0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    UNAVAIL      0     0     0  corrupted data           

            c5d3    ONLINE       0     0     0
        spares
          c5d5      AVAIL

errors: No known data errors
root@mysrv1 #

root@mysrv1 #
root@mysrv1 # zpool replace raidpl1 c5d2 c5d5        ------ replacing the disk with spare disk.
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool status raidpl1
  pool: raidpl1
 state: ONLINE
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see:
http://www.sun.com/msg/ZFS-8000-4J
 scan: resilvered 28.0M in 0h0m with 0 errors on Mon Apr 14 09:53:06 2014
config:

        NAME         STATE     READ WRITE CKSUM
        raidpl1      ONLINE       0     0     0
          raidz1-0   ONLINE       0     0     0
            c5d1     ONLINE       0     0     0
            spare-1  ONLINE       0     0     0
              c5d2   UNAVAIL      0     0     0  corrupted data
              c5d5   ONLINE       0     0     0
            c5d3     ONLINE       0     0     0
        spares
          c5d5       INUSE     currently in use

errors: No known data errors
root@mysrv1 #
root@mysrv1 # zpool status raidpl1
  pool: raidpl1
 state: ONLINE
status: One or more devices could not be used because the label is missing or
        invalid.  Sufficient replicas exist for the pool to continue
        functioning in a degraded state.
action: Replace the device using 'zpool replace'.
   see: http://www.sun.com/msg/ZFS-8000-4J
 scan: resilvered 28.0M in 0h0m with 0 errors on Mon Apr 14 09:53:06 2014config:

        NAME         STATE     READ WRITE CKSUM
        raidpl1      ONLINE       0     0     0
          raidz1-0   ONLINE       0     0     0
            c5d1     ONLINE       0     0     0
            spare-1  ONLINE       0     0     0
              c5d2   UNAVAIL      0     0     0  corrupted data
              c5d5   ONLINE       0     0     0
            c5d3     ONLINE       0     0     0
        spares
          c5d5       INUSE     currently in use

errors: No known data errors
root@mysrv1  #
root@mysrv1 #

After complete resilvering (attaching) ,detach the faulted disk and your pool will be back to normal state...

root@mysrv1 # zpool detach raidpl1 c5d2
root@mysrv1 #
root@mysrv1 # zpool status raidpl1
  pool: raidpl1
 state: ONLINE
 scan: resilvered 28.0M in 0h0m with 0 errors on Mon Apr 14 09:53:06 2014
config:

        NAME        STATE     READ WRITE CKSUM
        raidpl1     ONLINE       0     0     0
          raidz1-0  ONLINE       0     0     0
            c5d1    ONLINE       0     0     0
            c5d5    ONLINE       0     0     0
            c5d3    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #

Creation of raidz2 and raidz3 volume is similar to raidz1 and the concept varies regarding failure in disks...

root@mysrv1 # zpool list
NAME    SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
rpool  9.94G  6.03G  3.91G  60%  ONLINE  -
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool create rdpl raidz2 c5d1 c5d2 c5d3 c5d4
root@mysrv1 #
root@mysrv1 # zpool list
NAME    SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
rdpl    382M   231K   382M   0%  ONLINE  -
rpool  9.94G  6.12G  3.82G  61%  ONLINE  -
root@mysrv1 #
root@mysrv1 # zpool status rdpl
  pool: rdpl
 state: ONLINE
 scan: none requested
config:
        NAME        STATE     READ WRITE CKSUM
        rdpl        ONLINE       0     0     0
          raidz2-0  ONLINE       0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    ONLINE       0     0     0
            c5d3    ONLINE       0     0     0
            c5d4    ONLINE       0     0     0
errors: No known data errors
root@mysrv1 #

So in raidz2 allows maximum of 2disks failure.

= = = = = = = = = = = = = = = =

Similarly for raidz3 allows maximum of 3 disks failure. Creation of raidz3 is as follows :

root@mysrv1 #root@mysrv1 # zpool list
NAME    SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
rpool  9.94G  6.12G  3.81G  61%  ONLINE  -

root@mysrv1 #
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool create rpl3 raidz3 c5d1 c5d2 c5d3 c5d4 c5d5
root@mysrv1 #
root@mysrv1 #
root@mysrv1 # zpool list
NAME    SIZE  ALLOC   FREE  CAP  HEALTH  ALTROOT
rpl3    476M   304K   476M   0%  ONLINE  -
rpool  9.94G  6.16G  3.78G  61%  ONLINE  -
root@mysrv1 #
root@mysrv1 # zpool status rpl3
  pool: rpl3
 state: ONLINE
 scan: none requested
config:

        NAME        STATE     READ WRITE CKSUM
        rpl3        ONLINE       0     0     0
          raidz3-0  ONLINE       0     0     0
            c5d1    ONLINE       0     0     0
            c5d2    ONLINE       0     0     0
            c5d3    ONLINE       0     0     0
            c5d4    ONLINE       0     0     0
            c5d5    ONLINE       0     0     0

errors: No known data errors
root@mysrv1 #

root@mysrv1 #

##################################################################################



 

No comments:

Post a Comment