Lately I have applied PSUs to 12cR1 QA and PROD Grid Infrastructure environnements.  I have run into a number of problems and journaled them.  I then created a Korn shell script that checks for the causes of those various problems.  But also, I have developed the following procedure for applying a PSU (here to a 2-node 12cR1 RAC environment).

 

Let's apply the July 2018 PSU to the node1/node2 12cR1 RAC cluster.

 

    Save GRID_HOME on node1:

First, clean up GRID_HOME (so as to save space and have a smaller backup): root@node1: /u01/GI/12.1.0/.patch_storage # df -h .

Filesystem               Size  Used Avail Use% Mounted on

/dev/mapper/vg10-lv1001   89G   57G   28G  68% /app

root@node1: /u01/GI/12.1.0/.patch_storage # rm -rf 25363740_Mar_7_2017_23_34_43 25078431_Jan_25_2017_05_30_05 25363750_Feb_2_2017_23_57_22

root@node1: /u01/GI/12.1.0/.patch_storage # df -h .

Filesystem               Size  Used Avail Use% Mounted on

/dev/mapper/vg10-lv1001   89G   53G   32G  64% /app

root@node1: /u01/GI/12.1.0/.patch_storage # nice --8 tar cfz - /u01/GI/12.1.0  > /u01/GI/GRIDHOMEavril2018.tgz    => started at 10H01

root@node1: /home/giowner # ls -rtlh /u01/GI/*HOMEavril2018*gz

-rw-r--r-- 1 root root 6.2G Jan 29 10:22 /u01/GI/GRIDHOMEavril2018.tgz

     

        Carry-out pre-patch checks (with my in-house shell script):

root@node1: /tmp # ksh /home/giowner/vérifspréoupostPSU.ksh

Description de la log (en 1 seul mot) ?

avantPSUJUILL2018

 

Patch level status of Cluster nodes :  818769343                       node2,node1

OPatch Version: 12.2.0.1.8

 

        Save ORACLE_HOME on node1:

root@node1: /u01/GI/12.1.0/.patch_storage # nice --7 tar cfz - /u01/OH/oracle/product/12.1.0 > /u01/GI/ORACLEHOMEavril2018.tgz &

[1] 33164

 

           Save GRID_HOME on node2:

First, do some housekeeping to save space.         

root@node2: /u01/GI # df -h .

Filesystem               Size  Used Avail Use% Mounted on

/dev/mapper/vg10-lv1001   89G   55G   30G  65% /app

root@node2: /u01/GI # du -h --max-depth=1 /u01/GI/12.1.0/|sort -h        => great command to sort per size

8.0K    /u01/GI/12.1.0/eons

8.0K    /u01/GI/12.1.0/gnsd

8.0K    /u01/GI/12.1.0/QOpatch

8.0K    /u01/GI/12.1.0/utl

12K     /u01/GI/12.1.0/ctss

12K     /u01/GI/12.1.0/dc_ocm

12K     /u01/GI/12.1.0/gipc

12K     /u01/GI/12.1.0/hs

12K     /u01/GI/12.1.0/mdns

12K     /u01/GI/12.1.0/ohasd

12K     /u01/GI/12.1.0/ologgerd

12K     /u01/GI/12.1.0/osysmond

12K     /u01/GI/12.1.0/scheduler

16K     /u01/GI/12.1.0/diagnostics

16K     /u01/GI/12.1.0/slax

20K     /u01/GI/12.1.0/olap

24K     /u01/GI/12.1.0/addnode

32K     /u01/GI/12.1.0/.opatchauto_storage

36K     /u01/GI/12.1.0/auth

40K     /u01/GI/12.1.0/wlm

68K     /u01/GI/12.1.0/relnotes

116K    /u01/GI/12.1.0/clone

228K    /u01/GI/12.1.0/css

256K    /u01/GI/12.1.0/gpnp

312K    /u01/GI/12.1.0/racg

384K    /u01/GI/12.1.0/precomp

428K    /u01/GI/12.1.0/sqlplus

528K    /u01/GI/12.1.0/dbs

536K    /u01/GI/12.1.0/xag

540K    /u01/GI/12.1.0/xdk

556K    /u01/GI/12.1.0/wwg

732K    /u01/GI/12.1.0/ucp

1.5M    /u01/GI/12.1.0/instantclient

1.6M    /u01/GI/12.1.0/plsql

1.7M    /u01/GI/12.1.0/owm

2.0M    /u01/GI/12.1.0/deinstall

2.6M    /u01/GI/12.1.0/opmn

2.7M    /u01/GI/12.1.0/has

6.4M    /u01/GI/12.1.0/evm

6.4M    /u01/GI/12.1.0/network

11M     /u01/GI/12.1.0/install

12M     /u01/GI/12.1.0/ldap

14M     /u01/GI/12.1.0/demo

15M     /u01/GI/12.1.0/cdata

20M     /u01/GI/12.1.0/sqlpatch

22M     /u01/GI/12.1.0/srvm

29M     /u01/GI/12.1.0/log

32M     /u01/GI/12.1.0/ord

32M     /u01/GI/12.1.0/oui

38M     /u01/GI/12.1.0/jdbc

48M     /u01/GI/12.1.0/dmu

48M     /u01/GI/12.1.0/nls

53M     /u01/GI/12.1.0/oracore

74M     /u01/GI/12.1.0/jlib

74M     /u01/GI/12.1.0/perl

76M     /u01/GI/12.1.0/suptools

77M     /u01/GI/12.1.0/cv

78M     /u01/GI/12.1.0/rest

99M     /u01/GI/12.1.0/crs

104M    /u01/GI/12.1.0/md

132M    /u01/GI/12.1.0/cfgtoollogs

147M    /u01/GI/12.1.0/oc4j

160M    /u01/GI/12.1.0/jdk

201M    /u01/GI/12.1.0/OPatch

208M    /u01/GI/12.1.0/crf

262M    /u01/GI/12.1.0/rdbms

263M    /u01/GI/12.1.0/assistants

336M    /u01/GI/12.1.0/javavm

910M    /u01/GI/12.1.0/tfa

985M    /u01/GI/12.1.0/lib

1.5G    /u01/GI/12.1.0/inventory

2.1G    /u01/GI/12.1.0/bin

3.1G    /u01/GI/12.1.0/usm

12G     /u01/GI/12.1.0/.patch_storage

23G     /u01/GI/12.1.0/

root@node2: /u01/GI # cd /u01/GI/12.1.0/.patch_storage

root@node2: /u01/GI/12.1.0/.patch_storage # ls -lrth

total 128K

drwxr-xr-x  4 giowner oracle 4.0K Nov  2  2016 21436941_Aug_13_2015_04_00_40

drwxr-xr-x  4 giowner oracle 4.0K Nov  2  2016 23854735_Sep_29_2016_23_50_00

drwxr-xr-x  4 giowner oracle 4.0K Nov  2  2016 24006101_Oct_1_2016_12_33_50

drwxr-xr-x  4 giowner oracle 4.0K Nov  2  2016 23054246_Jul_5_2016_07_07_59

drwxr-xr-x  4 giowner oracle 4.0K Nov  2  2016 22291127_Apr_6_2016_03_46_21

drwxr-xr-x  4 giowner oracle 4.0K Nov  2  2016 21948354_Dec_20_2015_23_39_33

drwxr-xr-x  4 giowner oracle 4.0K Nov  2  2016 21359755_Oct_21_2015_02_52_58

drwxr-xr-x  4 giowner oracle 4.0K Nov  2  2016 20831110_Jul_11_2015_00_45_40

drwxr-xr-x  4 giowner oracle 4.0K Nov  2  2016 20299023_Mar_16_2015_22_21_54

drwxr-xr-x  4 giowner oracle 4.0K Nov  2  2016 19769480_Dec_15_2014_06_54_52

drwxr-xr-x  4 giowner oracle 4.0K Nov  2  2016 24007012_Aug_30_2016_00_17_17

drwxr-xr-x  4 giowner oracle 4.0K Sep 10  2017 25363740_Mar_7_2017_23_34_43

drwxr-xr-x  4 giowner oracle 4.0K Sep 10  2017 25363750_Feb_2_2017_23_57_22

drwxr-xr-x  4 giowner oracle 4.0K Oct 15 14:12 26983807_Nov_8_2017_07_59_12

drwxr-xr-x  4 giowner oracle 4.0K Oct 15 14:14 27338013_Feb_14_2018_10_26_39

drwxr-xr-x  4 giowner oracle 4.0K Oct 15 14:15 27338020_Mar_27_2018_11_48_03

drwxr-xr-x 28 giowner oracle 4.0K Oct 15 14:15 NApply

-rw-r--r--  1 giowner oracle  12K Oct 15 14:15 record_inventory.txt

-rw-r--r--  1 giowner oracle  15K Oct 15 14:15 interim_inventory.txt

-rw-r--r--  1 giowner oracle   93 Oct 15 14:16 LatestOPatchSession.properties

drwxr-xr-x  4 giowner oracle 4.0K Oct 15 14:16 26925311_Dec_6_2017_01_18_05

drwxr-xr-x  4 giowner oracle 4.0K Oct 15 14:16 24732082_Dec_21_2016_07_15_01

drwxr-xr-x  4 giowner oracle 4.0K Oct 15 14:16 26609783_Aug_10_2017_05_36_42

drwxr-xr-x  4 giowner oracle 4.0K Oct 15 14:16 25755742_Jun_29_2017_09_56_57

drwxr-xr-x  4 giowner oracle 4.0K Oct 15 14:16 25171037_Mar_7_2017_12_37_23

drwxr-xr-x  4 giowner oracle 4.0K Oct 15 14:16 26713565_Sep_29_2017_06_57_50

drwxr-xr-x  4 giowner oracle 4.0K Oct 15 14:16 27338041_Mar_16_2018_02_05_00

root@node2: /u01/GI/12.1.0/.patch_storage # rm -rf 21436941_Aug_13_2015_04_00_40 23854735_Sep_29_2016_23_50_00 24006101_Oct_1_2016_12_33_50 23054246_Jul_5_2016_07_07_59 22291127_Apr_6_2016_03_46_21 21948354_Dec_20_2015_23_39_33 21359755_Oct_21_2015_02_52_58 20831110_Jul_11_2015_00_45_40 20299023_Mar_16_2015_22_21_54 19769480_Dec_15_2014_06_54_52 24007012_Aug_30_2016_00_17_17 25363740_Mar_7_2017_23_34_43 25363750_Feb_2_2017_23_57_22

nice --8 tar cfz - /u01/GI/12.1.0 > /u01/GI/GRIDHOMEavril2018.tgz    => started at 10H21

root@node2: /u01/GI/12.1.0/.patch_storage # ls -lh /u01/GI/GRIDHOMEavril2018.tgz

-rw-r--r-- 1 root root 5.3G Jan 29 10:38 /u01/GI/GRIDHOMEavril2018.tgz

         

            Upgrade OPatch, on all nodes:

        Upgrade Opatch, for all GRID_HOMEs, as root:

scp giowner@otherserver:/u01/OH/oracle/product/12.1.0/DernierOPatch--p6880880_122010_Linux-x86-64.zip /u01/GI/12.1.0/

cd /u01/GI/12.1.0/ ; chown giowner:oracle DernierOPatch--p6880880_122010_Linux-x86-64.zip ; cd $GRID_HOME; ./OPatch/opatch version ; du -sh ./OPatch

cp -R  ./OPatch/ ./OPatch12.2.0.1.8 && du -sh ./OPatch12.2.0.1.8    => save the old OPatch, just in case

rm -Rf ./OPatch/ && unzip DernierOPatch--p6880880_122010_Linux-x86-64.zip &&  ./OPatch/opatch version ; chown -R giowner:oracle ./OPatch  ;  ls -dlah OPatch ; mv DernierOPatch--p6880880_122010_Linux-x86-64.zip /u01/OH/oracle/product/12.1.0/

        Upgrade Opatch, for all ORACLE_HOMEs, as oracle:

cd $ORACLE_HOME;./OPatch/opatch version ; du -sh ./OPatch

cp -R  ./OPatch/ ./OPatch12.2.0.1.8  && du -sh ./OPatch12.2.0.1.8

rm -Rf ./OPatch/ && unzip DernierOPatch--p6880880_122010_Linux-x86-64.zip &&  ./OPatch/opatch version ; ls -dlah OPatch

     

           Save ORACLE_HOME on node2

nice --8 tar cfz - /u01/OH/oracle/product/12.1.0 > /u01/GI/ORACLEHOMEavril2018.tgz    => started at 10H49

     

           Bring the PSU onto node1

        as root:

root@node1: /u01/GI/12.1.0 # mkdir -p /app/distrib/patch/ ; chmod g+w /app/distrib/patch/ ; chown oracle:oracle /app/distrib/patch/ ;mount -o vers=3,nolock 10.20.30.40:/patches/LinuxGI /mnt

        as giowner

cd /mnt/oracle/Linux64/12.1.0.2/ ; unzip PSUJUL2018pourRAC--p27967747_121020_Linux-x86-64.zip -d /app/distrib/patch/

     

            Preparations:

Make sure /tmp has at least 1GB of free space.

    Log some data about the cluster before applying the patch:

as giowner : /u01/GI/12.1.0/bin/kfod op=patchlvl >> ~/$(date +%Y%m%d_)lsinvBEFORE; $GRID_HOME/OPatch/opatch lsinventory -detail -oh $GRID_HOME >> ~/$(date +%Y%m%d_)lsinvBEFORE

     

    Check patch compatibility, as root on node1:

export PATH=$PATH:/u01/GI/12.1.0/OPatch

/u01/GI/12.1.0/OPatch/opatchauto apply /app/distrib/patch/27967747 -analyze -logLevel FINER        => started at 10H56    OPatchAuto successful

     

    Stop any ACFS filesystems, as root:

node1@giowner:+ASM2:~ # /sbin/acfsutil registry|grep "Mount Point"|awk -F ":" '{print "/bin/umount"$2}'

/bin/umount /app/oacfsmp

/bin/umount /data/oacfsmp

root@node1: /u01/GI/12.1.0 #  mount |grep acfs  ;   

 

    Move any cluster resource that runs on this node only (as the whole cluster will be brought down on this node):

root@node1: /u01/GI/12.1.0 # crsctl eval relocate resource dollarU -s node1 -n node2 -f        => the EVAL command just says what will happen if you run this relocate command

root@node1: /u01/GI/12.1.0 # crsctl relocate resource dollarU -s node1 -n node2 -f        => the actual relocate command, which moves the dollarU resource from node1 to node2

 

       Apply the patch on node1, as root

export PATH=$PATH:/u01/GI/12.1.0/OPatch

/u01/GI/12.1.0/OPatch/opatchauto apply /app/distrib/patch/27967747 -logLevel FINE    => started at 11H04    Session log file is /u01/GI/12.1.0/cfgtoollogs/opatchauto/opatchauto2019-01-29_11-04-40AM.log    The id for this session is 6S82

Bringing down CRS service on home /u01/GI/12.1.0

...

It should end with "OPatchauto session completed at Tue Jan 29 11:34:33 2019 Time taken to complete the session 12 minutes, 5 seconds"

 

        Post-patch checks on node1, as giowner

node1@giowner:+ASM1:~ # $ORACLE_HOME/OPatch/opatch lsinventory|grep "Patch description:"

Patch description:  "ACFS PATCH SET UPDATE 12.1.0.2.180717 (27762277)"

Patch description:  "OCW PATCH SET UPDATE 12.1.0.2.180717 (27762253)"

Patch description:  "Database Patch Set Update : 12.1.0.2.180717 (27547329)"

Patch description:  "WLM Patch Set Update: 12.1.0.2.180116 (26983807)"

     

        Post-patch checks on node1, as oracle

$ORACLE_HOME/OPatch/opatch lsinventory|grep "Patch description:"

node1@oracle:MYRACDB1:/u01/OH/oracle/product/12.1.0/bin # $ORACLE_HOME/OPatch/opatch lsinventory|grep "Patch description:"

Patch description:  "OCW PATCH SET UPDATE 12.1.0.2.180717 (27762253)"

Patch description:  "Database Patch Set Update : 12.1.0.2.180717 (27547329)"

     

Log some data about the cluster after applying the patch, as  giowner: /u01/GI/12.1.0/bin/kfod op=patchlvl >> ~/$(date +%Y%m%d_)lsinvAFTER;$GRID_HOME/OPatch/opatch lsinventory -detail -oh $GRID_HOME >> ~/$(date +%Y%m%d_)lsinvAFTER;crsctl query crs activeversion -f  >> ~/$(date +%Y%m%d_)lsinvAFTER     

=> Oracle Clusterware active version on the cluster is [12.1.0.2.0]. The cluster upgrade state is [ROLLING PATCH]. The cluster active patch level is [818769343].

 

If everything is OK so far, and there has been no downtime, let's continue with the next node:

     

    Move any cluster resource that runs on that second node only (as the whole cluster will be brought down on this node): 

root@node2: /u01/GI/12.1.0 # crsctl eval relocate resource dollarU -s node2 -n node1 -f

root@node2: /u01/GI/12.1.0 # crsctl relocate resource dollarU -s node2 -n node1 -f

 

    Carry-out pre-patch checks (with my in-house shell script):

root@node2: /home/giowner # ksh vérifspréoupostPSU.ksh

Description de la log (en 1 seul mot) ?

avantPSUJUILL2018

 

Log any reported problem and fix it.

     

            Bring the PSU onto node2

        as root:

root@node1: /u01/GI/12.1.0 # mkdir -p /app/distrib/patch/ ; chmod g+w /app/distrib/patch/ ; chown oracle:oracle /app/distrib/patch/ ;mount -o vers=3,nolock 10.20.30.40:/patches/LinuxGI /mnt

       as giowner:

cd /mnt/oracle/Linux64/12.1.0.2/ ; unzip PSUJUL2018pourRAC--p27967747_121020_Linux-x86-64.zip -d /app/distrib/patch/

     

            Preparations on node 2:

Make sure /tmp has at least 1GB of free space.

    Log some data about the cluster before applying the patch:

as giowner: /u01/GI/12.1.0/bin/kfod op=patchlvl >> ~/$(date +%Y%m%d_)lsinvBEFORE; $GRID_HOME/OPatch/opatch lsinventory -detail -oh $GRID_HOME >> ~/$(date +%Y%m%d_)lsinvBEFORE

     

    Check patch compatibility (dry run), as root on node1:

export PATH=$PATH:/u01/GI/12.1.0/OPatch

/u01/GI/12.1.0/OPatch/opatchauto apply /app/distrib/patch/27967747 -analyze -logLevel FINER        => started at 10H56    OPatchAuto successful

     

    Stop any ACFS filesystems, as root:

node1@giowner:+ASM2:~ # /sbin/acfsutil registry|grep "Mount Point"|awk -F ":" '{print "/bin/umount"$2}'

/bin/umount /app/oacfsmp

/bin/umount /data/oacfsmp

root@node1: /u01/GI/12.1.0 #  mount |grep acfs  ;   

 

 

                Apply the patch to the 2nd node, as root

export PATH=$PATH:/u01/GI/12.1.0/OPatch

/u01/GI/12.1.0/OPatch/opatchauto apply /app/distrib/patch/27967747 -logLevel FINE        => started at 15H37        Session log file is /u01/GI/12.1.0/cfgtoollogs/opatchauto/opatchauto2019-01-29_03-38-31PM.log    The id for this session is YRQF    OPatchauto session completed at Tue Jan 29 15:51:44 2019    Time taken to complete the session 13 minutes, 38 seconds

     

        Post-patch checks on node2, as giowner

node2@giowner:+ASM2:~ # $ORACLE_HOME/OPatch/opatch lsinventory|grep "Patch description:"

Patch description:  "ACFS PATCH SET UPDATE 12.1.0.2.180717 (27762277)"

Patch description:  "OCW PATCH SET UPDATE 12.1.0.2.180717 (27762253)"

Patch description:  "Database Patch Set Update : 12.1.0.2.180717 (27547329)"

Patch description:  "WLM Patch Set Update: 12.1.0.2.180116 (26983807)"

 

       Post-patch checks on node2, as oracle

node2@oracle:MYRACDB2:/u01/OH/oracle/product/12.1.0/bin # $ORACLE_HOME/OPatch/opatch lsinventory|grep "Patch description:"

Patch description:  "OCW PATCH SET UPDATE 12.1.0.2.180717 (27762253)"

Patch description:  "Database Patch Set Update : 12.1.0.2.180717 (27547329)"

29-01 16:08 MYRACDB2 SYS AS SYSDBA> select PATCH_ID, DESCRIPTION, ACTION, STATUS from DBA_REGISTRY_SQLPATCH;

  PATCH_ID DESCRIPTION                                               ACTION          STATUS

---------- --------------------------------------------------------- --------------- ---------------

  25171037 DATABASE PATCH SET UPDATE 12.1.0.2.170418                 APPLY           SUCCESS

  27338041 DATABASE PATCH SET UPDATE 12.1.0.2.180417                 APPLY           SUCCESS

  27547329 DATABASE PATCH SET UPDATE 12.1.0.2.180717                 APPLY           SUCCESS 

 

Log some data about the cluster after applying the patch, as  giowner: /u01/GI/12.1.0/bin/kfod op=patchlvl >> ~/$(date +%Y%m%d_)lsinvAFTER;$GRID_HOME/OPatch/opatch lsinventory -detail -oh $GRID_HOME >> ~/$(date +%Y%m%d_)lsinvAFTER;crsctl query crs activeversion -f  >> ~/$(date +%Y%m%d_)lsinvAFTER

 

    When all is over, dismount the mount point of the patch:

root@node2: /home/giowner # umount /mnt

root@node1: /tmp # umount /mnt

 

     So I patched both nodes of my clusters, with no downtime, I have a backup of both my ORACLE_HOMEs and GRID_HOMEs and a backup of the previous version of OPatch, and I traced the situation before and after the patch (in the $(date +%Y%m%d_)lsinvBEFORE and $(date +%Y%m%d_)lsinvAFTER text files.  Please tell me what you think in the comments below.