Wednesday, June 26, 2013

DNS SERVER FOR SCAN NAME IN 11GR2 RAC CONFIG

An importatnt part of the 11G RAC cluster is the newly introduced SCAN IP so the need to configure  DNS. In my case, configured DNS on NODE1.


- yum install -y *bind* caching-nameserver

cd /var/named/chroot/etc/


- cp named.caching-nameserver.conf named.conf

vi named.conf

[root@ovmd1 etc]# vi named.conf

//
// named.caching-nameserver.conf
//
// Provided by Red Hat caching-nameserver package to configure the
// ISC BIND named(8) DNS server as a caching only nameserver
// (as a localhost DNS resolver only).
//
// See /usr/share/doc/bind*/sample/ for example named configuration files.
//
// DO NOT EDIT THIS FILE - use system-config-bind or an editor
// to create named.conf - edits to this file will be lost on
// caching-nameserver package upgrade.
//
options {
        listen-on port 53 { 192.168.0.103; };
        listen-on-v6 port 53 { ::1; };
        directory       "/var/named";
        dump-file       "/var/named/data/cache_dump.db";
        statistics-file "/var/named/data/named_stats.txt";
        memstatistics-file "/var/named/data/named_mem_stats.txt";

        // Those options should be used carefully because they disable port
        // randomization
        // query-source    port 53;
        // query-source-v6 port 53;

        allow-query     { any; };
        allow-query-cache { any; };
};
logging {
        channel default_debug {
                file "data/named.run";
                severity dynamic;
        };
};
view localhost_resolver {
        match-clients      { any; };
        match-destinations { 192.168.0.103; };
        recursion yes;
        include "/etc/named.rfc1912.zones";
};




[root@ovmd1 etc]# cat named.rfc1912.zones
// named.rfc1912.zones:
//
// Provided by Red Hat caching-nameserver package
//
// ISC BIND named zone configuration for zones recommended by
// RFC 1912 section 4.1 : localhost TLDs and address zones
//
// See /usr/share/doc/bind*/sample/ for example named configuration files.
//
zone "." IN {
            type hint;
            file "named.ca";
};

zone "example.com" IN {
            type master;
            file "forward.zone";
            allow-update { none; };
};

zone "localhost" IN {
            type master;
            file "localhost.zone";
            allow-update { none; };
};

zone "0.168.192.in-addr.arpa" IN {
            type master;
            file "reverse.zone";
            allow-update { none; };
};

zone "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa" IN {
        type master;
            file "named.ip6.local";
            allow-update { none; };
};

zone "255.in-addr.arpa" IN {
            type master;
            file "named.broadcast";
            allow-update { none; };
};

zone "0.in-addr.arpa" IN {
            type master;
            file "named.zero";
            allow-update { none; };
};




chgrp named named.conf

cd /var/named/chroot/var/named/

cp localdomain.zone forward.zone

[root@ovmd1 etc]# cd /var/named/chroot/var/named/
[root@ovmd1 named]# cat forward.zone
$TTL    86400
@       IN SOA  ovmd1.example.com. root.example.com. (
                    42      ; serial (d. adams)
                    3H      ; refresh
                    15M     ; retry
                    1W      ; expiry
                    1D )        ; minimum
            IN NS       ovmd1.example.com.
ovmd1       IN A        192.168.0.103
       


; Oracle RAC Nodes
ovmd1                 IN A        192.168.0.103
ovmd2                 IN A        192.168.0.104
ovmd1-priv               IN A        192.168.56.103
ovmd2-priv         IN A        192.168.56.104
ovmd1-vip                IN A        192.168.0.251
ovmd2-vip                IN A        192.168.0.252
ovmapp          IN A        192.168.0.105     

; Single Client Access Name (SCAN) virtual IP
rac-cluster-scan    IN A        192.168.0.150
rac-cluster-scan    IN A        192.168.0.151
rac-cluster-scan    IN A        192.168.0.152


[root@ovmd1 named]# cat reverse.zone
$TTL   86400
@       IN      SOA     ovmd1.example.com. root.ovmd1.example.com.  (
                                      1997022700 ; Serial
                                      28800      ; Refresh
                                      14400      ; Retry
                                      3600000    ; Expire
                                      86400 )    ; Minimum
        IN      NS      ovmd1.example.com.
103       IN      PTR     ovmd1.example.com.


; Oracle RAC Nodes
103                     IN PTR      ovmd1.example.com.
104                     IN PTR      ovmd2.example.com.
105                             IN PTR      ovmapp.example.com.               
251                     IN PTR      ovmd1-vip.example.com.
252                     IN PTR      ovmd2-vip.example.com.


; Single Client Access Name (SCAN) virtual IP
150                     IN PTR      rac-cluster-scan.example.com.
151                     IN PTR      rac-cluster-scan.example.com.
152                     IN PTR      rac-cluster-scan.example.com.

chgrp named forward.zone

chgrp named reverse.zone

- Insert into all the nodes.

[root@ovmd1 named]# cat /etc/resolv.conf
search example.com
nameserver 192.168.0.103

service named restart

Stopping named:                                            [  OK  ]
Starting named:                                            [  OK  ]

[root@ovmd1 named]# dig ovmd1

; <<>> DiG 9.3.6-P1-RedHat-9.3.6-20.P1.el5_8.6 <<>> ovmd1
;; global options:  printcmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 27297
;; flags: qr rd ra; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 0

;; QUESTION SECTION:
;ovmd1.                                              IN        A

;; AUTHORITY SECTION:
.                                   10800            IN        SOA     a.root-servers.net. nstld.verisign-grs.com. 2013062301 1800 900 604800 86400

;; Query time: 5150 msec
;; SERVER: 192.168.0.103#53(192.168.0.103)
;; WHEN: Sun Jun 23 22:23:24 2013
;; MSG SIZE  rcvd: 98

chkconfig named on

chkconfig named –list
named           0:off   1:off   2:on    3:on    4:on    5:on    6:off

[root@ovmd1 named]# nslookup rac-cluster-scan
Server:                        192.168.0.103
Address:         192.168.0.103#53

Name: rac-cluster-scan.example.com
Address: 192.168.0.150
Name: rac-cluster-scan.example.com
Address: 192.168.0.151
Name: rac-cluster-scan.example.com
Address: 192.168.0.152

- To prevent the entry in /etc/resolv.conf from being overwritten do ;

chattr +i /etc/resolv.conf

Should now be able to create your two node cluster;

Tuesday, June 25, 2013

R12 BASE INSTALLATION ON LINUX 5R9 (OVMD1 - DATABASE) (OVMAPP - APPS)



1. do  yum install oracle-validated

2. from here

download 
openmotif21-2.1.30-11.EL5.i3861
xorg-x11-libs-compat-6.8.2-1.EL.33.0.1.i386
binutils-2.17.50.0.6-9.0.1.x86_642

then do

rpm -e --nodeps binutils
 rpm -Uvh xorg-x11-libs-compat-6.8.2-1.EL.33.0.1.i386.rpm openmotif21-2.1.30-11.EL5.i386.rpm binutils-2.17.50.0.6-9.0.1.el5.x86_64.rpm 

echo exclude='binutils openmotif21 xorg-x11-libs-compat' >> /etc/yum.conf


3. download this patch 6078836

and install as

mv /usr/lib/libdb.so.2 /usr/lib/libdb.so.2.6078836
cp /media/sf_allshare/downloads/6078836/libdb.so.2 /usr/lib
  ln -s /usr/lib/libgdbm.so.2.0.0 /usr/lib/libdb.so.2
  chmod 644 libdb.so.2

4. then do

yum install oracle-validated gcc.x86_64 gcc-c++.x86_64 glibc.x86_64 glibc.i686 glibc-common.x86_64 glibc-devel.i386 glibc-devel.i686 glibc-devel.x86_64 libgcc.i386 libgcc.x86_64 libstdc++-devel.i386 libstdc++-devel.x86_64 libstdc++.i386 libstdc++.x86_64 make.x86_64 gdbm.i386 gdbm.x86_64 libXp.i386 libXp.x86_64 libaio.i386 libaio.x86_64 libgomp.x86_64 sysstat.x86_64 util-linux.x86_64 compat-libstdc++-296.i386 compat-libstdc++-33.i386 compat-libstdc++-33.x86_64 elfutils-libelf-devel.x86_64 elfutils-libelf-devel-static.x86_64 libaio devel.x86_64 unixODBC.i386 unixODBC-devel.i386 unixODBC.x86_64 unixODBC-devel.x86_64

5. then

unlink /usr/lib/libXtst.so.6
ln -s /usr/X11R6/lib/libXtst.so.6.1 /usr/lib/libXtst.so.6


cd /media/sf_allshare/downloads/startCD/Disk1/rapidwiz

./raqidwiz
















Fill in DB details


Fill in APPS details















Tailing log...
  


Successful DB installation...








Copy over conf_DEV.txt from $ORACLE_HOME/appsutil to location on APPS servers/servers


Call rapidwiz on Apps server









Enter location of conf_DEV.txt

























Successful installation of APPS....


























Sunday, June 23, 2013

ISCSI SERVER ACCESSED FROM CLUSTER NODES


OVMD1 is the first of  my 2 node RAC cluster. Changed eth1, added eth2 and eth3 to look like contents of /etc/hosts below.



root@ovmd1's password:
Last login: Sun Jun 23 22:54:40 2013
[root@ovmd1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth1
DEVICE=eth1
ONBOOT=yes
BOOTPROTO=static
IPADDR=192.168.0.103
NETMASK=255.255.255.0
[root@ovmd1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth2
# Intel Corporation 82540EM Gigabit Ethernet Controller
DEVICE=eth2
BOOTPROTO=static
ONBOOT=yes
IPADDR=1.99.1.1
NETMASK=255.255.0.0
[root@ovmd1 ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth3
DEVICE=eth3
ONBOOT=yes
BOOTPROTO=static
IPADDR=192.168.56.103
NETMASK=255.255.255.0



[root@ovmd1 ~]# cat /etc/hosts
# Do not remove the following line, or various programs
# that require network functionality will fail.
127.0.0.1 localhost.localdomain localhost
192.168.0.103  ovmd1.example.com    ovmd1



192.168.0.105   ovmapp.example.com ovmapp
192.168.0.110   ovmclone.example.com ovmclone

# iSCSI SAN Server
1.99.1.254 san.mgmt.example.com san
##SCAN IP
# This node SAN address
1.99.1.1 ovmd1-san.example.com ovmd1-san
# This node private interconnect address
192.168.56.103 ovmd1-priv.example.com ovmd1-priv
# This node VIP address
192.168.0.251 ovmd1-vip.example.com ovmd1-vip
# Repeat for each node in cluster
192.168.0.104  ovmd2.example.com    ovmd2
1.99.1.2 ovmd2-san.example.com ovmd2-san
192.168.56.104 ovmd2-priv.example.com ovmd2-priv
192.168.0.252 ovmd2-vip.example.com ovmd2-vip
1.99.1.3 ovmclone-san.example.com ovmclone-san

On my Virtual machines i installed 'guest additions..' This enables you to use a usb external drive. This is where i stored my files. Shows on OVMD1 as /media/sf_allshared mount point. I actually have two such drives on all virtual servers. There are however limitations to using them so not advices to install on or even included as a backup location - best used as a staging area for software.


[root@ovmd1 ~]#
[root@ovmd1 ~]# df -h
Filesystem            Size  Used Avail Use% Mounted on
/dev/mapper/VolGroup00-LogVol00
                      287G   50G  223G  19% /
/dev/sda1              99M   43M   52M  45% /boot
tmpfs                1004M  276M  728M  28% /dev/shm
allshare              1.9T   92G  1.8T   5% /media/sf_allshare
allshareplus          1.9T  149G  1.7T   8% /media/sf_allshareplus
[root@ovmd1 ~]#

[root@ovmd1 ~]# cd /media/sf_allshare
[root@ovmd1 sf_allshare]# ls -ltr
total 1272540
-rwxrwx--- 1 root vboxsf    904704 Dec  1  2006 msdia80.dll
drwxrwx--- 1 root vboxsf         0 Mar 11 10:02 $RECYCLE.BIN
drwxrwx--- 1 root vboxsf      4096 Mar 12 14:31 RAC
drwxrwx--- 1 root vboxsf         0 Mar 13 06:40 System Volume Information
drwxrwx--- 1 root vboxsf         0 Mar 13 17:13 win8
drwxrwx--- 1 root vboxsf         0 Mar 13 17:34 winxppro
-rwxrwx--- 1 root vboxsf   1862232 Mar 14 14:17 appsdba_docs.zip
drwxrwx--- 1 root vboxsf         0 Mar 18 09:23 10.5 w DBA Module
-rwxrwx--- 1 root vboxsf 684581290 Mar 18 09:43 win32_11gR2_client.zip
-rwxrwx--- 1 root vboxsf 615698264 Mar 18 17:57 win64_11gR2_client.zip
drwxrwx--- 1 root vboxsf         0 Mar 21 15:24 office
drwxrwx--- 1 root vboxsf         0 Apr  6 07:32 R12bacups
drwxrwx--- 1 root vboxsf      4096 Apr 12 19:59 dba_scripts
drwxrwx--- 1 root vboxsf         0 Apr 24 16:52 host_backup
drwxrwx--- 1 root vboxsf         0 Apr 24 17:20 gparted
drwxrwx--- 1 root vboxsf         0 May 11 11:45 r12_apps_backup
drwxrwx--- 1 root vboxsf         0 May 21 20:10 VIS_RMAN_bkup
drwxrwx--- 1 root vboxsf         0 May 29 13:15 Firefox
drwxrwx--- 1 root vboxsf     12288 May 30 11:19 patches
-rwxrwx--- 1 root vboxsf        76 Jun  4 20:52 nohup.out
drwxrwx--- 1 root vboxsf         0 Jun  5 06:00 rman_backup_scripts
-rwxrwx--- 1 root vboxsf       394 Jun 10 03:15 tnsnames.ora
drwxrwx--- 1 root vboxsf      4096 Jun 14 06:07 swingbench
drwxrwx--- 1 root vboxsf      8192 Jun 20 08:29 downloads
[root@ovmd1 sf_allshare]# 

  • Install the package dependencies for Oracle (answer 'y' when prompted):

yum install iscsi-initiator-utils \
gcc.x86_64 \
gcc-c++.x86_64 \
xorg-x11-utils.x86_64 \
xorg-x11-xinit.x86_64 \
unixODBC-devel.x86_64 \
unixODBC.i386 \
unixODBC-devel.i386 \
sysstat.x86_64 \
libstdc++.so.5 \
compat-libstdc++-33-3.2.3-61.x86_64 \
compat-db-4.2.52-5.1.x86_64 \
libaio-devel.x86_64 \
libaio-devel.i386 \
libXp.x86_64 \
libXp-devel.x86_64 \
libXpm-devel.x86_64 \
libXp.i386 \
gdb.x86_64 \
elfutils-libelf-devel.x86_64 \
compat-gcc-34.x86_64 \
compat-gcc-34-c++.x86_64 \
libgnome.x86_64 \
libc-client.x86_64 \
libc-client-devel.x86_64 \
glibc.x86_64 \
libXtst-devel.x86_64 \
libXtst.i386 \
glibc-devel.i386 \
glibc-devel.x86_64 \
pdksh.x86_64

  • Download Oracle-Validated RPM to the VM and run or just run yum install oracle validated

rpm -Uhv oracle-validated-1.0.0-18.el5.x86_64.rpm
This package creates the oracle id and groups and sets kernel
parameters. This is a lot quicker than doing it all manually.
In theory this should set all the kernel parameter necessary for
Grid and Oracle, however, while running the installer I found
three parameters it complained about. To set for the next boot
edit as root the /etc/sysctl.conf file and set the following values:


fs.file-max = 6815744
net.ipv4.ip_local_port_range = 9000 65500
net.core.wmem_max= 1048576

  • Start iSCSI Client on ovmd1.


service iscsi start

  Make Sure the ISCSI VM is up and running.



  • Run a discovery for the iSCSI target on OVMD1.


iscsiadm -m discovery -t sendtargets -p san.mgmt.example.com 
Your should get:
1.99.1.254:3260,1 iqm.mgmt.volumes-san
You can verify the disks have been discovered by issuing:
fdisk -l
and:
ls -l /dev/sd*
(the sdb through sdm are the iSCSI disks. If you don't see them, reboot the VM and try the discovery again)
  • Set iscsi to start on boot

chkconfig --level 345 iscsi on
  • Edit the /etc/rc.d/rc.local file to chown the iSCSI disks to oracle on boot. Add the following line to the end of the file:

chown oracle:oinstall /dev/sd[b-x] 
  • Create directories for the oracle software (media might already be there):

mkdir /u01 /u02
chown oracle:oinstall /u01 /u02 
 Create oracle user and configure disk for ASM


[root@ovmd1 named]# /usr/sbin/oracleasm listdisks
VOL1
VOL10
VOL11
VOL12
VOL13
VOL14
VOL15
VOL16
VOL17
VOL18
VOL19
VOL2
VOL20
VOL21
VOL22
VOL23
VOL3
VOL4
VOL5
VOL6
VOL7
VOL8
VOL9


Copy virtual server ovmd1 and name copy ovmd2, adjust eth1, eth2 and eth3 per /etc/hosts. Also change hostname to ovmd2 in /etc/sysconfig/network

An importatnt part of the 11G RAC cluster is the newly introduced SCAN IP so the need to configure  DNS. In my case, configured DNS on NODE1.


yum install -y *bind* caching-nameserver

cd /var/named/chroot/etc/


cp named.caching-nameserver.conf named.conf

vi named.conf

[root@ovmd1 etc]# vi named.conf

//
// named.caching-nameserver.conf
//
// Provided by Red Hat caching-nameserver package to configure the
// ISC BIND named(8) DNS server as a caching only nameserver
// (as a localhost DNS resolver only).
//
// See /usr/share/doc/bind*/sample/ for example named configuration files.
//
// DO NOT EDIT THIS FILE - use system-config-bind or an editor
// to create named.conf - edits to this file will be lost on
// caching-nameserver package upgrade.
//
options {
        listen-on port 53 { 192.168.0.103; };
        listen-on-v6 port 53 { ::1; };
        directory       "/var/named";
        dump-file       "/var/named/data/cache_dump.db";
        statistics-file "/var/named/data/named_stats.txt";
        memstatistics-file "/var/named/data/named_mem_stats.txt";

        // Those options should be used carefully because they disable port
        // randomization
        // query-source    port 53;
        // query-source-v6 port 53;

        allow-query     { any; };
        allow-query-cache { any; };
};
logging {
        channel default_debug {
                file "data/named.run";
                severity dynamic;
        };
};
view localhost_resolver {
        match-clients      { any; };
        match-destinations { 192.168.0.103; };
        recursion yes;
        include "/etc/named.rfc1912.zones";
};




[root@ovmd1 etc]# cat named.rfc1912.zones
// named.rfc1912.zones:
//
// Provided by Red Hat caching-nameserver package
//
// ISC BIND named zone configuration for zones recommended by
// RFC 1912 section 4.1 : localhost TLDs and address zones
//
// See /usr/share/doc/bind*/sample/ for example named configuration files.
//
zone "." IN {
            type hint;
            file "named.ca";
};

zone "example.com" IN {
            type master;
            file "forward.zone";
            allow-update { none; };
};

zone "localhost" IN {
            type master;
            file "localhost.zone";
            allow-update { none; };
};

zone "0.168.192.in-addr.arpa" IN {
            type master;
            file "reverse.zone";
            allow-update { none; };
};

zone "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa" IN {
        type master;
            file "named.ip6.local";
            allow-update { none; };
};

zone "255.in-addr.arpa" IN {
            type master;
            file "named.broadcast";
            allow-update { none; };
};

zone "0.in-addr.arpa" IN {
            type master;
            file "named.zero";
            allow-update { none; };
};




chgrp named named.conf

cd /var/named/chroot/var/named/

cp localdomain.zone forward.zone

[root@ovmd1 etc]# cd /var/named/chroot/var/named/
[root@ovmd1 named]# cat forward.zone
$TTL    86400
@       IN SOA  ovmd1.example.com. root.example.com. (
                    42      ; serial (d. adams)
                    3H      ; refresh
                    15M     ; retry
                    1W      ; expiry
                    1D )        ; minimum
            IN NS       ovmd1.example.com.
ovmd1       IN A        192.168.0.103
       


; Oracle RAC Nodes
ovmd1                 IN A        192.168.0.103
ovmd2                 IN A        192.168.0.104
ovmd1-priv               IN A        192.168.56.103
ovmd2-priv         IN A        192.168.56.104
ovmd1-vip                IN A        192.168.0.251
ovmd2-vip                IN A        192.168.0.252
ovmapp          IN A        192.168.0.105     

; Single Client Access Name (SCAN) virtual IP
rac-cluster-scan    IN A        192.168.0.150
rac-cluster-scan    IN A        192.168.0.151
rac-cluster-scan    IN A        192.168.0.152


[root@ovmd1 named]# cat reverse.zone
$TTL   86400
@       IN      SOA     ovmd1.example.com. root.ovmd1.example.com.  (
                                      1997022700 ; Serial
                                      28800      ; Refresh
                                      14400      ; Retry
                                      3600000    ; Expire
                                      86400 )    ; Minimum
        IN      NS      ovmd1.example.com.
103       IN      PTR     ovmd1.example.com.


; Oracle RAC Nodes
103                     IN PTR      ovmd1.example.com.
104                     IN PTR      ovmd2.example.com.
105                             IN PTR      ovmapp.example.com.               
251                     IN PTR      ovmd1-vip.example.com.
252                     IN PTR      ovmd2-vip.example.com.


; Single Client Access Name (SCAN) virtual IP
150                     IN PTR      rac-cluster-scan.example.com.
151                     IN PTR      rac-cluster-scan.example.com.
152                     IN PTR      rac-cluster-scan.example.com.

chgrp named forward.zone

chgrp named reverse.zone

Insert into all the nodes.

[root@ovmd1 named]# cat /etc/resolv.conf
search example.com
nameserver 192.168.0.103

service named restart

Stopping named:                                            [  OK  ]
Starting named:                                            [  OK  ]

[root@ovmd1 named]# dig ovmd1

; <<>> DiG 9.3.6-P1-RedHat-9.3.6-20.P1.el5_8.6 <<>> ovmd1
;; global options:  printcmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 27297
;; flags: qr rd ra; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 0

;; QUESTION SECTION:
;ovmd1.                                              IN        A

;; AUTHORITY SECTION:
.                                   10800            IN        SOA     a.root-servers.net. nstld.verisign-grs.com. 2013062301 1800 900 604800 86400

;; Query time: 5150 msec
;; SERVER: 192.168.0.103#53(192.168.0.103)
;; WHEN: Sun Jun 23 22:23:24 2013
;; MSG SIZE  rcvd: 98

chkconfig named on

chkconfig named –list
named           0:off   1:off   2:on    3:on    4:on    5:on    6:off

[root@ovmd1 named]# nslookup rac-cluster-scan
Server:                        192.168.0.103
Address:         192.168.0.103#53

Name: rac-cluster-scan.example.com
Address: 192.168.0.150
Name: rac-cluster-scan.example.com
Address: 192.168.0.151
Name: rac-cluster-scan.example.com
Address: 192.168.0.152

To prevent the entry in /etc/resolv.conf from being overwritten do ;

chattr +i /etc/resolv.conf

Should now be able to create your two node cluster;