Total Pageviews

Showing posts with label SOLARIS 11. Show all posts
Showing posts with label SOLARIS 11. Show all posts

Thursday, 20 October 2016

Solaris 10 Branded ZONE in Solaris 11 Host !!!

In this post, I am going to show how to configure and install a branded zone. In my case I am taking my Solaris 11 host as global zone and installing Solaris 10 in it.

We need a branded zone concept when base machine or global zone is of different OS version and if we are in need of another OS version zone within it.

A branded zone should be installed with separate OS unlike for a whole root zone where all packages were copied from global zone and sparse root zone where the global zone packages were shared.

root@Solaris11:~# zoneadm list -vc
  ID NAME             STATUS      PATH                         BRAND      IP
   0 global           running     /                            solaris    shared
root@Solaris11:~#
root@Solaris11:~#            As you see my Solaris 11 host is brand new with no non global zones...


root@Solaris11:~#
root@Solaris11:~# mkdir -p /export/home/zone1     ----- home directory for our Zone...
root@Solaris11:~#

root@Solaris11:~# dladm show-phys
LINK              MEDIA                STATE      SPEED  DUPLEX    DEVICE
net1              Ethernet             unknown    0      unknown   e1000g1
net2              Ethernet             unknown    0      unknown   e1000g2
net0              Ethernet             up         1000   full      e1000g0
root@Solaris11:~#

root@Solaris11:~# zonecfg -z zone1
Use 'create' to begin configuring a new zone.
zonecfg:zone1> create -b                                     to state it is a branded zone....
zonecfg:zone1>
zonecfg:zone1>
zonecfg:zone1> set brand=solaris10
zonecfg:zone1> 
zonecfg:zone1>
zonecfg:zone1> set zonepath=/export/home/zone1
zonecfg:zone1>
zonecfg:zone1> set autoboot=true
zonecfg:zone1> add net
zonecfg:zone1:net> set address=10.0.0.175/24
zonecfg:zone1:net> set physical=net0
zonecfg:zone1:net> info
net 0:
        address: 10.0.0.175/24
        allowed-address not specified
        configure-allowed-address: true
        physical: net0
        defrouter not specified
zonecfg:zone1:net>
zonecfg:zone1:net>
zonecfg:zone1:net> end
zonecfg:zone1>
zonecfg:zone1> info
zonename: zone1
zonepath: /export/home/zone1
brand: solaris10
autoboot: true
autoshutdown: shutdown
bootargs:
pool:
limitpriv:
scheduling-class:
ip-type: exclusive
hostid:
fs-allowed:
net 0:
        address: 10.0.0.175/24
        allowed-address not specified
        configure-allowed-address: true
        physical: net0
        defrouter not specified
zonecfg:zone1>
zonecfg:zone1>
zonecfg:zone1> set ip-type=shared
zonecfg:zone1> verify
zonecfg:zone1> commit
zonecfg:zone1> exit
root@Solaris11:~#
root@Solaris11:~#
root@Solaris11:~# zoneadm list -vc
  ID NAME             STATUS      PATH                         BRAND      IP
   0 global           running     /                            solaris    shared
   - zone1            configured  /export/home/zone1           solaris10  shared
root@Solaris11:~#

Let me login to my Solaris 10 host to create Flash Archive...

root@Solaris11:~#
root@Solaris11:~# ssh 10.0.0.55
The authenticity of host '10.0.0.55 (10.0.0.55)' can't be established.
RSA key fingerprint is 83:07:bd:a2:f2:02:46:df:67:3d:01:af:ed:d8:9a:cf.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '10.0.0.55' (RSA) to the list of known hosts.
Password:
Last login: Tue Oct  4 22:02:59 2016 from 10.0.0.43
Oracle Corporation      SunOS 5.10      Generic Patch   January 2005
#
# bash
bash-3.2# uname -a
SunOS Solaris10 5.10 Generic_147148-26 i86pc i386 i86pc

bash-3.2# df -kh /
Filesystem             size   used  avail capacity  Mounted on
/dev/dsk/c0d0s0        8.1G   3.9G   4.1G    49%    /
bash-3.2#
bash-3.2# 

bash-3.2# flarcreate -n arch1 -c archive1.flar               --- cmd to create flash archive....
Full Flash
Checking integrity...
Integrity OK.
Running precreation scripts...
Precreation scripts done.
Determining the size of the archive...

7896142 blocks
The archive will be approximately 2.13GB.
Creating the archive...
7896144 blocks
Archive creation complete.
Running postcreation scripts...
Postcreation scripts done.

Running pre-exit scripts...
Pre-exit scripts done.
bash-3.2#

bash-3.2# pwd
/
bash-3.2#
bash-3.2# ls
TT_DB          boot           etc            kernel         mnt            platform       system         var
archive1.flar  dev            export         lib            net            proc           tmp            vol
bin            devices        home           lost+found     opt            sbin           usr
bash-3.2#
bash-3.2#
bash-3.2# ls -lrth archive1.flar
-rw-r--r--   1 root     root        2.1G Oct  4 22:31 archive1.flar
bash-3.2#

bash-3.2# df -kh /
Filesystem             size   used  avail capacity  Mounted on
/dev/dsk/c0d0s0        8.1G   6.0G   2.0G    76%    /
bash-3.2#
bash-3.2# exit
# Connection to 10.0.0.55 closed.
root@Solaris11:~#
root@Solaris11:~#
root@Solaris11:~# scp 10.0.0.55:/archive1.flar /export/home/
Password:
archive1.flar        100% |***********************************************************************|  2188 MB    06:35
root@Solaris11:~#
root@Solaris11:~#
root@Solaris11:~#
root@Solaris11:~# zoneadm list -vc
  ID NAME             STATUS      PATH                         BRAND      IP
   0 global           running     /                            solaris    shared
   - zone1            configured  /export/home/zone1           solaris10  shared
root@Solaris11:~#

Let us install OS in our new Zone....

root@Solaris11:~#
root@Solaris11:~# zoneadm -z zone1 install -a /export/home/archive1.flar -p
The following ZFS file system(s) have been created:
    rpool/export/home/zone1
Progress being logged to /var/log/zones/zoneadm.20161004T232607Z.zone1.install
    Installing: This may take several minutes...
Postprocessing: This may take a while...
   Postprocess: Updating the image to run within a zone
   Postprocess: Migrating data
        from: rpool/export/home/zone1/rpool/ROOT/zbe-0
          to: rpool/export/home/zone1/rpool/export
   Postprocess: A backup copy of /export is stored at /export.backup.20161004T233804Z.
It can be deleted after verifying it was migrated correctly.

        Result: Installation completed successfully.
Log saved in non-global zone as /export/home/zone1/root/var/log/zones/zoneadm.20161004T232607Z.zone1.install
root@Solaris11:~#
root@Solaris11:~#
root@Solaris11:~# zoneadm list -vc
  ID NAME             STATUS      PATH                         BRAND      IP
   0 global           running     /                            solaris    shared
   - zone1            installed   /export/home/zone1           solaris10  shared
root@Solaris11:~#

Next step is to bring our ZONE to "ready" state....

root@Solaris11:~#
root@Solaris11:~# zoneadm -z zone1 ready
root@Solaris11:~#
root@Solaris11:~# zoneadm list -vc
  ID NAME             STATUS      PATH                         BRAND      IP
   0 global           running     /                            solaris    shared
   2 zone1            ready       /export/home/zone1           solaris10  shared
root@Solaris11:~#

Now boot the zone...

root@Solaris11:~# zoneadm -z zone1 boot
root@Solaris11:~# zoneadm list -vc
  ID NAME             STATUS      PATH                         BRAND      IP
   0 global           running     /                            solaris    shared
   2 zone1            running     /export/home/zone1           solaris10  shared
root@Solaris11:~#

Thats it, done with configuration and installation part in which we came across different states of a Zone...

root@Solaris11:~# zlogin -C zone1
[Connected to zone 'zone1' console]

Solaris10 console login: root
Password:
Last login: Tue Oct  4 22:10:05 from 10.0.0.75
Oct  4 19:51:17 Solaris10 login: ROOT LOGIN /dev/console
Oracle Corporation      SunOS 5.10      Generic Patch   January 2005
#
#
# bash
bash-3.2#
bash-3.2# ifconfig -a
lo0:1: flags=2001000849<UP,LOOPBACK,RUNNING,MULTICAST,IPv4,VIRTUAL> mtu 8232 index 1
        inet 127.0.0.1 netmask ff000000
net0:1: flags=100001000843<UP,BROADCAST,RUNNING,MULTICAST,IPv4,PHYSRUNNING> mtu 1500 index 2
        inet 10.0.0.175 netmask ff000000 broadcast 10.255.255.255
lo0:1: flags=2002000849<UP,LOOPBACK,RUNNING,MULTICAST,IPv6,VIRTUAL> mtu 8252 index 1
        inet6 ::1/128
bash-3.2#
bash-3.2#
bash-3.2# who                                     -C is for Console login....
root       console      Oct  4 19:51
bash-3.2#
bash-3.2#
bash-3.2# df -kh
Filesystem             Size   Used  Available Capacity  Mounted on
rpool/ROOT/zbe-0       6.0G   3.8G       2.1G    65%    /
rpool/ROOT/zbe-0/var   6.0G    79M       2.1G     4%    /var
/.SUNWnative/lib       4.9G   2.8G       2.1G    57%    /.SUNWnative/lib
/.SUNWnative/platform
                       4.9G   2.8G       2.1G    57%    /.SUNWnative/platform
/.SUNWnative/sbin      4.9G   2.8G       2.1G    57%    /.SUNWnative/sbin
/.SUNWnative/usr       4.9G   2.8G       2.1G    57%    /.SUNWnative/usr
/dev                     0K     0K         0K     0%    /dev
proc                     0K     0K         0K     0%    /proc
ctfs                     0K     0K         0K     0%    /system/contract
mnttab                   0K     0K         0K     0%    /etc/mnttab
objfs                    0K     0K         0K     0%    /system/object
swap                   1.2G   344K       1.2G     1%    /etc/svc/volatile
/usr/lib/libc/libc_hwcap1.so.1
                       5.9G   3.8G       2.1G    65%    /lib/libc.so.1
fd                       0K     0K         0K     0%    /dev/fd
swap                   1.2G    32K       1.2G     1%    /tmp
/etc/svc/volatile/     1.2G   344K       1.2G     1%    /var/run
rpool/export           6.0G    32K       2.1G     1%    /export
rpool/export/home      6.0G    32K       2.1G     1%    /export/home
rpool                  6.0G    31K       2.1G     1%    /rpool
bash-3.2#
bash-3.2# ~.
[Connection to zone 'zone1' console closed]
root@Solaris11:~#
root@Solaris11:~#
root@Solaris11:~# zoneadm list -vc
  ID NAME             STATUS      PATH                         BRAND      IP
   0 global           running     /                            solaris    shared
   2 zone1            running     /export/home/zone1           solaris10  shared
root@Solaris11:~#
root@Solaris11:~#
root@Solaris11:~# ping 10.0.0.175
10.0.0.175 is alive
root@Solaris11:~#

Easy and simple steps to follow while creating a branded zone, just need a flash archive of required OS version...

#####################################################################################

Monday, 2 February 2015

Renaming Guest LDOM !!!

If we want to rename our guest ldom, we need to do following things :

We can't straight away change our ldom name with a single command, once a ldom is bind and started all its resources are linked with respect to ldom's name. 

So to rename a ldom , firstly we need to stop, unbind it and then we need to remove it.
Then using it's constraints file we can add the domain with its new name.

Following are the steps :

1. Take a fresh backup of constraints for your ldom. (It should be a xml file)

ldm ls-constraints -x testldm >> /tmp/testldm.xml (where testldm is old LDom)

2. Replace the ldom name with it's new name in the file.

vi /tmp/testldm.xml (Replace the old Ldom with new Ldom)

3. Stop,unbind and remove the domain.

ldm stop testldm

ldm unbind testldm

ldm remove-domain testldm

4. Remove the disks assigned to this particular ldom.

ldm list-bindings | grep -i testldm| more (Will list all vdisk names)

ldm remove-vdsdev testldm_disk_1EF3@primary-vds0

ldm remove-vdsdev testldm-1323@primary-vds0

ldm remove-vdsdev testldm-1264@primary-vds0

5. Now there is no info related to old ldom, so proceed by adding ldom with new name using our bkp of constraints file.

ldm add-domain -i /tmp/testldm.xml

ldm bind myprod (where myprod is the new LDom)

ldm start myprod

root@RRLT5DVQA:~# ldm ls | grep myprod
myprod          active     -t----  5003    16    32G      1.2%  1.6%  6s

Thus we can rename a ldom by following above steps.

################################################################################

Hostname Change in Solaris 10 & 11 !!!

In Solaris 10, we all know how to change hostname of server without reboot.
It is so simple, just to edit entry of hostname in few files.

/etc/nodename 
/etc/hostname.*interface 
/etc/inet/hosts or /etc/hosts
/etc/inet/ipnodes 

Then we need to take care regarding crashdump location. Usually we use default location for crash dump location as /var/crash/"hostname". So rename the directory in this location:

# cd /var/crash
# mv oldname newname

Coming to Solaris 11, modifications need to be done in system configuration can be achieved through single command " svccfg ". By Centralizing such management simplifies Configuration and Administration.

Few example like,

# svccfg -s svc:/system/environment:init --- we can modify environmental values like Timezone.

then >setprop TZ=Asia/Calcutta.

Similarly to change hostname, we have svccfg -s svc:/system/identity:node rather than editing individual files like /etc/nodename for hostname and /etc/default/init for Timezone.

To change hostname in Solaris 11 follow these steps :

svccfg -s system/identity:node setprop config/nodename="MyProdServer"

svccfg -s system/identity:node setprop config/loopback="MyProdServer"

svccfg -s system/identity:node refresh

svcadm restart system/identity:node

As shown above, in identity:node service need to change 2 properties " nodename and loopback ". After setting properties just take a restart of the service to reflect modifications.

Take a new session and observe the change in hostname.

################################################################################

Thursday, 8 May 2014

SNAPSHOT from ILOM by CLI and GUI !!!

Snapshot is a utility provided by ILOM to collect SP(Service Processor) data in High end servers like T-series and M-series.

To run the snapshot, we have two methods :
1) ILOM CLI (Command Line Interface).
2) WEBGUI (Graphical User Interface).

Let me start with ILOM CLI, In CLI we use ftp or tftp protocol.
For this we need to login to ILOM.

Oracle(R) Integrated Lights Out Manager
Version 3.2.1.7.d r86718
Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
Warning: password is set to factory default.
Hostname: SDELB0631LLP05-SP

->
-> cd /SP/diag/snapshot               ------ Go to snapshot location.
/SP/diag/snapshot
->
-> show
 /SP/diag/snapshot
    Targets:
    Properties:
        dataset = normal
        dump_uri = (Cannot show property)
        encrypt_output = false
        result = (none)
    Commands:
        cd
        set
        show
->

Here we can observe dump_uri which means, where the snapshot dump should be generated.
So we need to set this dump location.

NOTE: Since snapshot is a large file,ensure the size of desired location before setting dump_uri.

-> cd /HOST
/HOST
-> 
-> start /HOST/console
Are you sure you want to start /HOST/console (y/n)? y
Serial console started.  To stop, type #.
LLPT52 console login: root
Password:
May  8 05:08:53 LLPT52 login: ROOT LOGIN /dev/console
Last login: Wed May  7 18:09:46 from 10.32.232.18
Oracle Corporation      SunOS 5.11      11.1    November 2013
You have new mail.
root@LLPT52:~#
root@LLPT52:~#
root@LLPT52:~# df -kh
Filesystem             Size   Used  Available Capacity  Mounted on
rpool/ROOT/solaris     196G   4.2G       109G     4%    /
/devices                 0K     0K         0K     0%    /devices
/dev                     0K     0K         0K     0%    /dev
ctfs                     0K     0K         0K     0%    /system/contract
proc                     0K     0K         0K     0%    /proc
mnttab                   0K     0K         0K     0%    /etc/mnttab
swap                    98G   2.6M        98G     1%    /system/volatile
objfs                    0K     0K         0K     0%    /system/object
sharefs                  0K     0K         0K     0%    /etc/dfs/sharetab
fd                       0K     0K         0K     0%    /dev/fd
rpool/ROOT/solaris/var
                       196G   290M       109G     1%    /var
swap                    98G    32K        98G     1%    /tmp
rpool/VARSHARE         196G   102K       109G     1%    /var/share
rpool/export           196G    32K       109G     1%    /export
rpool/export/home      196G    31K       109G     1%    /export/home
rpool                  196G    73K       109G     1%    /rpool
root@LLPT52:~#
root@LLPT52:~#
root@LLPT52:~#
root@LLPT52:~# cd /tmp
root@LLPT52:/tmp# ls
dbus-nWFJlDIOUn          dbus-RsYSLGKvTY          gdm-auth-cookies-y7aO6d
dbus-q4KAq7ZbG1          disk-details.txt         hsperfdata_root
root@LLPT52:~#
root@LLPT52:~#
Serial console stopped.
->
->
->
-> cd /SP/diag/snapshot
/SP/diag/snapshot
->
-> show
 /SP/diag/snapshot
    Targets:
    Properties:
        dataset = normal
        dump_uri = (Cannot show property)
        encrypt_output = false
        result = (none)
    Commands:
        cd
        set
        show
->

Now set the dump_uri as shown below : 
Here we need mention protocol and host ip address...(not ILOM IP)

->
-> set dump_uri=ftp://root@10.32.232.16/tmp/
Enter remote user password: *******                    ----- we need to enter our ftp user's passwd
Set 'dump_uri' to 'ftp://root@10.32.232.16/tmp/'

-> show
 /SP/diag/snapshot
    Targets:
    Properties:
        dataset = normal
        dump_uri = (Cannot show property)
        encrypt_output = false
        result = Running                                 ----- we can observe that the status of result is running.
    Commands:
        cd
        set
        show
->
->                  It takes long time to complete, around 1hr minimum.
->
-> show
 /SP/diag/snapshot
    Targets:
    Properties:
        dataset = normal
        dump_uri = (Cannot show property)
        encrypt_output = false
        result = Running
    Commands:
        cd
        set
        show
->

Check the status in periodic time intervals, once it gets completed we will get the status of result as shown below. (SNAPSHOT COMPLETE DONE)

->
-> show
 /SP/diag/snapshot
    Targets:
    Properties:
        dataset = normal
        dump_uri = (Cannot show property)
        encrypt_output = false
        result = Collecting data into
ftp://root@ip_address>/tmp/***.zip
Snapshot Complete.
Done.
->

Now Go and collect the zip file of snapshot from provided directory (/tmp).

******************

Next is GUI method :

IN GUI method, we need to login to ILOM ip from browser url :
https://ipaddress/ 

Soon after entering credentials we can login to ILOM and can see a page as shown below :
NOTE : In screens observe the highlighted red box.



Then click on ILOM Administration :


Now click on the maintenance:


After clicking on maintenance, a page will be loaded then select the snapshot :


Then we can observe the option " Run " , click on it.


Now we need to select option " save " the snapshot in our local PC :


Lastly we need to select our desired location to where our snapshot should be downloaded:


Once download gets completed successfully, we can check our given location " My Documents " for the snapshot file.

#################################################################################

Tuesday, 6 May 2014

Configuring Control/Service Domain !!!

Soon after creating a control domain, we need to make it a service domain so that it can provide virtual services (such as disk drives,switches) using all the resources to guest ldoms.

root-myt5server:~#
root-myt5server:~# ldm list
NAME             STATE      FLAGS   CONS    VCPU  MEMORY   UTIL  UPTIME
primary          active     -n-c--  SP      64    32640M   0.4%  21m
root-myt5server:~#


Reduce control domain's resources as per its requirement....

root-myt5server:~#
root-myt5server:~#
ldm set-vcpu 16 primary
Crypto unit 7 was removed from domain primary
Crypto unit 6 was removed from domain primary
Crypto unit 5 was removed from domain primary
Crypto unit 4 was removed from domain primary
Crypto unit 3 was removed from domain primary
Crypto unit 2 was removed from domain primary
root-myt5server:~#
root-myt5server:~#
root-myt5server:~# ldm list
NAME             STATE      FLAGS   CONS    VCPU  MEMORY   UTIL  UPTIME
primary          active     -n-c--  SP      16    32640M   0.4%  23m
root-myt5server:~#
root-myt5server:~#

root-myt5server:~#
root-myt5server:~# ldm remove-mem 20g primary
root-myt5server:~#
root-myt5server:~#
root-myt5server:~# ldm list
NAME             STATE      FLAGS   CONS    VCPU  MEMORY   UTIL  UPTIME
primary          active     -n-c--  SP      16    12160M   0.3%  24m
root-myt5server:~#
root-myt5server:~#
root-myt5server:~#
root-myt5server:~# ldm list-services
root-myt5server:~#
root-myt5server:~#


Currently there are no virtual services under my control domain.

Let us add virtual services starting with virtual switch (vsw).

root-myt5server:~#
root-myt5server:~#
dladm show-link
LINK                CLASS     MTU    STATE    OVER
net1                phys      1500   unknown  --
net2                phys      1500   unknown  --
net0                phys      1500   up       --                     we need a netdev for a vsw.
net3                phys      1500   unknown  --
root-myt5server:~#
root-myt5server:~#
root-myt5server:~#

root@testserver:~# ldm add-vsw
Virtual switch service and LDom are missing

Usage:
        ldm add-vsw [-q] [default-vlan-id=<vid>] [pvid=<pvid>]
                [vid=<vid1,vid2,...>] [mac-addr=<num>] [net-dev=<device>]
                [linkprop=phys-state] [mode=<mode>] [mtu=<mtu>] [id=<switchid>]
                [inter-vnet-link=<on|off>] <vswitch_name> <ldom>
root-myt5server:~#

root-myt5server:~# ldm add-vsw net-dev=net0 primary-vsw0 primary
root-myt5server:~#
root-myt5server:~#
root-myt5server:~# ldm list-services
VSW
    NAME             LDOM             MAC               NET-DEV   ID   DEVICE     LINKPROP   DEFAULT-VLAN-ID PVID VID                  MTU   MODE   INTER-VNET-LINK
    primary-vsw0     primary          00:14:4f:fa:1a:49 net0      0   
switch@0              1               1                         1500         on
root-myt5server:~#
root-myt5server:~#


Next we need a vcc to provide virtual console service to all our guest ldoms,

root-myt5server:~# ldm add-vcc
Usage:
        ldm add-vcc port-range=<x>-<y> <vcc_name> <ldom>

root-myt5server:~#
root-myt5server:~#

root-myt5server:~# ldm add-vcc port-range=5000-5100 primary-vcc primary
root-myt5server:~#
root-myt5server:~#
root-myt5server:~#
root-myt5server:~# ldm list-services
VCC
    NAME             LDOM             PORT-RANGE
    primary-vcc      primary          5000-5100

VSW
    NAME             LDOM             MAC               NET-DEV   ID   DEVICE     LINKPROP   DEFAULT-VLAN-ID PVID VID                  MTU   MODE   INTER-VNET-LINK
    primary-vsw0     primary          00:14:4f:fa:1a:49 net3      0   
switch@0              1               1                         1500         on

root-myt5server:~#

Virtual disk service which provides disk drives to guest ldoms.

root-myt5server:~#
root-myt5server:~# ldm add-vds primary-vds0 primary
root-myt5server:~#
root-myt5server:~#
root-myt5server:~# ldm list-services
VCC
    NAME             LDOM             PORT-RANGE
    primary-vcc      primary          5000-5100
VSW
    NAME             LDOM             MAC               NET-DEV   ID   DEVICE     LINKPROP   DEFAULT-VLAN-ID PVID VID                  MTU   MODE   INTER-VNET-LINK
    primary-vsw0     primary          00:14:4f:fa:1a:49 net3      0   
switch@0              1               1                         1500         on
VDS
    NAME             LDOM             VOLUME         OPTIONS          MPGROUP        DEVICE
   
primary-vds0     primary

root-myt5server:~#
root-myt5server:~#

Let us add some disks to our vds (virtual disk server).

root-myt5server:~#
root-myt5server:~# zpool list
NAME   SIZE  ALLOC   FREE  CAP  DEDUP  HEALTH  ALTROOT
rpool  136G  55.6G  80.4G  40%  1.00x  ONLINE  -
root-myt5server:~#
root-myt5server:~#
root-myt5server:~# zpool status rpool
  pool: rpool
 state: ONLINE
  scan: none requested
config:

        NAME        STATE     READ WRITE CKSUM
        rpool       ONLINE       0     0     0
         
c3t0d0s0  ONLINE       0     0     0

errors: No known data errors
root-myt5server:~#

root-myt5server:~# format
Searching for disks...done


AVAILABLE DISK SELECTIONS:
       0. c3t0d0 <SUN146G cyl 14087 alt 2 hd 24 sec 848>
         
/pci@0/pci@0/pci@2/scsi@0/sd@0,0

       1. c3t1d0 <SEAGATE-ST914602SSUN146G-0603 cyl 14087 alt 2 hd 24 sec 848>
         
/pci@0/pci@0/pci@2/scsi@0/sd@1,0
Specify
 disk (enter its number): 1
selecting c3t1d0
[disk formatted]

partition> p
Current partition table (unnamed):
Total disk cylinders available: 14087 + 2 (reserved cylinders)

Part      Tag    Flag     Cylinders         Size            Blocks
  0 unassigned    wm       0 -  1030       10.01GB    (1031/0/0)   20982912
  1 unassigned    wu    1031 -  2061       10.01GB    (1031/0/0)   20982912
  2     backup    wu       0 - 14086      136.71GB    (14087/0/0) 286698624
 
3 unassigned    wm    2062 -  4122       20.00GB    (2061/0/0)   41945472
  4 unassigned    wm    4123 -  6183       20.00GB    (2061/0/0)   41945472
  5 unassigned    wm    6184 -  8244       20.00GB    (2061/0/0)   41945472
  6 unassigned    wm    8245 - 10305       20.00GB    (2061/0/0)   41945472
  7 unassigned    wm   10306 - 12366       20.00GB    (2061/0/0)   41945472

partition> q
root-myt5server:~#
root-myt5server:~#
root-myt5server:~# ldm add-vdsdev /dev/dsk/c3t1d0s3 mydsk1@primary-vds0
root-myt5server:~#
root-myt5server:~#
root-myt5server:~# ldm list-services
VCC
    NAME             LDOM             PORT-RANGE
    primary-vcc      primary          5000-5100

VSW
    NAME             LDOM             MAC               NET-DEV   ID   DEVICE     LINKPROP   DEFAULT-VLAN-ID PVID VID                  MTU   MODE   INTER-VNET-LINK
    primary-vsw0     primary          00:14:4f:fa:1a:49 net3      0   
switch@0              1               1                         1500         on

VDS
    NAME             LDOM             VOLUME         OPTIONS          MPGROUP        DEVICE
    primary-vds0     primary          mydsk1                                         /dev/dsk/c3t1d0s3

root-myt5server:~#
root-myt5server:~#
root-myt5server:~# ldm add-vdsdev /dev/dsk/c3t1d0s4
mydsk2@primary-vds0

root-myt5server:~#
root-myt5server:~# ldm add-vdsdev /dev/dsk/c3t1d0s5
mydsk3@primary-vds0

root-myt5server:~#
root-myt5server:~# ldm add-vdsdev /dev/dsk/c3t1d0s6
mydsk4@primary-vds0

root-myt5server:~#
root-myt5server:~# ldm add-vdsdev /dev/dsk/c3t1d0s7
mydsk5@primary-vds0

root-myt5server:~#
root-myt5server:~# ldm list-services
VCC
    NAME             LDOM             PORT-RANGE
    primary-vcc      primary          5000-5100

VSW
    NAME             LDOM             MAC               NET-DEV   ID   DEVICE     LINKPROP   DEFAULT-VLAN-ID PVID VID                  MTU   MODE   INTER-VNET-LINK
    primary-vsw0     primary          00:14:4f:fa:1a:49 net3      0   
switch@0              1               1                         1500         on

VDS
    NAME             LDOM             VOLUME         OPTIONS          MPGROUP        DEVICE
    primary-vds0     primary          mydsk1                      /dev/dsk/c3t1d0s3
                                      mydsk2                                         /dev/dsk/c3t1d0s4
                                      mydsk3                                         /dev/dsk/c3t1d0s5
                                      mydsk4                                         /dev/dsk/c3t1d0s6
                                      mydsk5                                         /dev/dsk/c3t1d0s7

root-myt5server:~#
root-myt5server:~#
root-myt5server:~#
root-myt5server:~#
ldm list-bindings
NAME             STATE      FLAGS   CONS    VCPU  MEMORY   UTIL  UPTIME
primary          active     -n-cv-  SP      16    12160M   0.3%  28m

UUID
    e77f7a54-fb08-c28d-8cdd-eba22d2258fd

MAC
    00:14:4f:98:4a:18

HOSTID
    0x84984a18

CONTROL
    failure-policy=ignore
    extended-mapin-space=off
    cpu-arch=native
    rc-add-policy=
    shutdown-group=0

DEPENDENCY
    master=

CORE
    CID    CPUSET
    0      (0, 1, 2, 3, 4, 5, 6, 7)
    1      (8, 9, 10, 11, 12, 13, 14, 15)

VCPU
    VID    PID    CID    UTIL STRAND
    0      0      0      0.6%   100%
    1      1      0      0.4%   100%
    2      2      0      0.4%   100%
    3      3      0      0.3%   100%
    4      4      0      0.3%   100%
    5      5      0      0.2%   100%
    6      6      0      0.3%   100%
    7      7      0      0.2%   100%
    8      8      1      0.4%   100%
    9      9      1      0.4%   100%
    10     10     1      0.4%   100%
    11     11     1      0.4%   100%
    12     12     1      0.3%   100%
    13     13     1      0.1%   100%
    14     14     1      0.2%   100%
    15     15     1      0.3%   100%

MAU
    ID     CPUSET
    0      (0, 1, 2, 3, 4, 5, 6, 7)
    1      (8, 9, 10, 11, 12, 13, 14, 15)

MEMORY
    RA               PA               SIZE
    0x8000000        0x8000000        128M
    0x510000000      0x510000000      12032M

CONSTRAINT
    threading=max-throughput

VARIABLES
    pm_boot_policy=disabled=1;ttfc=0;ttmr=0;

IO
    DEVICE                           PSEUDONYM        OPTIONS
   
pci@0                            pci
   
niu@80                           niu
   
pci@0/pci@0/pci@8/pci@0/pci@9    MB/RISER0/PCIE0
   
pci@0/pci@0/pci@8/pci@0/pci@1    MB/RISER1/PCIE1
   
pci@0/pci@0/pci@9                MB/RISER2/PCIE2
   
pci@0/pci@0/pci@1/pci@0/pci@2    MB/NET0
   
pci@0/pci@0/pci@1/pci@0/pci@3    MB/NET2
   
pci@0/pci@0/pci@2                MB/SASHBA
VCC
    NAME             PORT-RANGE
    primary-vcc      5000-5100

VSW
    NAME             MAC               NET-DEV   ID   DEVICE     LINKPROP   DEFAULT-VLAN-ID PVID VID                  MTU   MODE   INTER-VNET-LINK
    primary-vsw0     00:14:4f:fa:1a:49 net3      0   
switch@0              1               1                         1500         on
VDS
    NAME             VOLUME         OPTIONS          MPGROUP        DEVICE
  
  primary-vds0     mydsk1                                         /dev/dsk/c3t1d0s3
                     mydsk2                                         /dev/dsk/c3t1d0s4
                     mydsk3                                         /dev/dsk/c3t1d0s5
                     mydsk4                                         /dev/dsk/c3t1d0s6
                     mydsk5                                         /dev/dsk/c3t1d0s7

VCONS
    NAME             SERVICE                     PORT   LOGGING
                     SP
root-myt5server:~#
root-myt5server:~#


Now our control domain is ready enough to provide its services to its guest ldoms.....

#################################################################################