diff --git a/roles/oraasm-manage-diskgroups/defaults/main.yml b/roles/oraasm-manage-diskgroups/defaults/main.yml index 24408ff02..d38107315 100644 --- a/roles/oraasm-manage-diskgroups/defaults/main.yml +++ b/roles/oraasm-manage-diskgroups/defaults/main.yml @@ -12,8 +12,17 @@ oracle_env: ORACLE_HOME: "{{ oracle_home_gi }}" LD_LIBRARY_PATH: "{{ oracle_home_gi }}/lib" +oracle_asm_disk_string: ORCL:* + +oracle_asm_disk_prefix: " + {%- if device_persistence |lower == 'asmlib' -%} + {%- if oracle_asm_disk_string.endswith('*') %}{{ oracle_asm_disk_string[:-1] }} + {%- else -%}{{ oracle_asm_disk_string }} + {%- endif -%} + {%- elif device_persistence |lower == 'udev' -%}{{ oracle_asm_disk_string }} + {%- else -%} + {%- endif -%}" -oracle_asm_disk_prefix: "{% if device_persistence |lower == 'asmlib' %}ORCL:{% elif device_persistence |lower == 'udev' %}/dev/oracle/{% else %}{% endif %}" asmdevice_list: "{% if device_persistence |lower == 'asmlib' %} {%- for disk in item.disk -%}{{ oracle_asm_disk_prefix }}{{ disk.asmlabel | upper }} {%- if not loop.last -%},{%- endif -%} diff --git a/roles/orahost/defaults/main.yml b/roles/orahost/defaults/main.yml index 54f5b3cf1..22c07d689 100644 --- a/roles/orahost/defaults/main.yml +++ b/roles/orahost/defaults/main.yml @@ -20,7 +20,7 @@ oracle_users: # Passwd :Oracle123 - username: oracle uid: 54321 primgroup: oinstall - othergroups: "dba,asmadmin,asmdba,backupdba,dgdba,kmdba,oper" + othergroups: "dba,asmadmin,asmdba,asmoper,backupdba,dgdba,kmdba,oper" passwd: "$6$0xHoAXXF$K75HKb64Hcb/CEcr3YEj2LGERi/U2moJgsCK.ztGxLsKoaXc4UBiNZPL0hlxB5ng6GL.gyipfQOOXplzcdgvD0" grid_users: diff --git a/roles/oraswgi-install/defaults/main.yml b/roles/oraswgi-install/defaults/main.yml index 6a37b7e83..836e728a4 100644 --- a/roles/oraswgi-install/defaults/main.yml +++ b/roles/oraswgi-install/defaults/main.yml @@ -1,6 +1,5 @@ --- -# master_node: true cluster_master: "{{play_hosts[0]}}" role_separation: false hostgroup: "{{ group_names[0] }}" @@ -19,6 +18,11 @@ asmoper_group: asmoper asmdba_group: asmdba # osdba asmadmin_group: asmadmin # osasm +# do not ignore failed runcluvfy.sh +gi_ignoreprereq: false + +giignoreprereqparam: "{% if gi_ignoreprereq | bool %}-ignorePrereq{% endif %}" + www_download_bin: curl # curl (shell module) or get_url module oracle_sw_source_www: http://www/orasw # address to all software if using the get_url module when putting software on the host(s) oracle_sw_source_local: /tmp # Path to all software if using the copy module when putting software on the host(s) @@ -83,9 +87,11 @@ oracle_scan_port: 1521 # Listener port oracle_ic_net: 3.3.3.{{ ansible_all_ipv4_addresses[0].split(".")[-1] }} oracle_asm_init_dg: crs -oracle_asm_disk_string: "{% if device_persistence == 'asmlib' %}{% elif device_persistence == 'udev' %}/dev/oracle/{% else %}{% endif %}" device_persistence: asmlib -cvuqdisk_rpm: "{% if oracle_install_version_gi > '12.1.0.2' %}cvuqdisk-1.0.10-1.rpm{% else %}cvuqdisk-1.0.9-1.rpm{% endif %} " +oracle_asm_disk_string: ORCL:* +cvuqdisk_rpm: "{%- if oracle_install_version_gi > '12.1.0.2' -%}cvuqdisk-1.0.10-1.rpm + {%- else -%}cvuqdisk-1.0.9-1.rpm + {%- endif -%}" oracle_cluster_name: "{{ hostgroup }}" # Name of the cluster when setting up Clustered Grid Infrastructure oracle_hostname: "{{ ansible_fqdn }}" # Full (FQDN) name of the host @@ -107,8 +113,9 @@ oracle_gi_gns_subdomain: a.b.c oracle_gi_gns_vip: gnsvip.a.b.c -opatcharchive: "{{ oracle_stage_install }}/{{ oracle_install_version_gi }}/{% for opatchfile in oracle_opatch_patch if opatchfile['version']==oracle_install_version_gi %}{{ opatchfile['filename'] }}{% endfor %}" -# noqa yaml +opatcharchive: "{{ oracle_stage_install }}/{{ oracle_install_version_gi }}/ + {%- for opatchfile in oracle_opatch_patch if opatchfile['version']==oracle_install_version_gi -%}{{ opatchfile['filename'] }} + {%- endfor -%}" oracle_gi_image: "{%- if oracle_sw_copy %}{{ oracle_stage }} {%- else %}{{ oracle_stage_remote }} diff --git a/roles/oraswgi-install/tasks/19.3.0.0.yml b/roles/oraswgi-install/tasks/19.3.0.0.yml index 2f28da2ad..4420697a9 100644 --- a/roles/oraswgi-install/tasks/19.3.0.0.yml +++ b/roles/oraswgi-install/tasks/19.3.0.0.yml @@ -1,20 +1,25 @@ --- +- block: + - debug: msg="install-home-gi | Start Extract files to ORACLE_HOME (gi)" # noqa unnamed-task + run_once: "{{ configure_cluster }}" + + - name: install-home-gi | Extract files to ORACLE_HOME (gi) + unarchive: src={{ oracle_gi_image }} dest={{ oracle_home_gi }} copy=no + with_items: "{{ oracle_sw_image_gi }}" + loop_control: + label: "{{ oracle_gi_image | default ('') }}" + args: + creates: "{{ oracle_home_gi }}/root.sh" + become: true + become_user: "{{ grid_install_user }}" + run_once: "{{ configure_cluster }}" + when: + - oracle_install_version_gi == item.version -- name: install-home-gi | Extract files to ORACLE_HOME (gi) - unarchive: src={{ oracle_gi_image }} dest={{ oracle_home_gi }} copy=no - with_items: "{{ oracle_sw_image_gi }}" - loop_control: - label: "{{ oracle_gi_image | default ('') }}" - args: - creates: "{{ oracle_home_gi }}/root.sh" - become: true - become_user: "{{ grid_install_user }}" - run_once: "{{ configure_cluster }}" tags: - oragridswunpack when: - oracle_home_gi not in checkgiinstall.stdout - - oracle_install_version_gi == item.version # Check for an existing GRID_HOME before reinstallation of OPatch - name: install-home-gi | Check for file GridSetup.sh @@ -29,28 +34,46 @@ msg: "Cannot find {{ oracle_home_gi }}/gridSetup.sh }}" run_once: "{{ configure_cluster }}" -# unarchive didn't worked in some environments. => using unzip directly -# Overwrite existing files from OPatch due to Note 2321749.1 -# - name: install-home-gi | Distribute latest opatch -# shell: unzip -o -d {{oracle_home_gi}} {{opatcharchive}} -# become: true -# become_user: "{{ grid_install_user }}" -# when: oracle_sw_patches is defined and oracle_home_gi not in checkgiinstall.stdout - -- name: install-home-gi | Install cvuqdisk rpm - yum: name="{{ oracle_home_gi }}/cv/rpm/{{ cvuqdisk_rpm }}" state=present - # noqa ignore-errors +- name: check for existing cvuqdisk_rpm + stat: + path: "{{ oracle_home_gi }}/cv/rpm/{{ cvuqdisk_rpm }}" + register: statcvuqdisk + run_once: true when: configure_cluster tags: cvuqdisk - ignore_errors: true -- name: install-home-gi | Recreate ORACLE_HOME (gi) on other nodes - file: name={{ oracle_home_gi }} state=absent - when: configure_cluster and inventory_hostname != cluster_master and oracle_home_gi not in checkgiinstall.stdout +- block: # when: configure_cluster + + # synchronize didn't work on all environments... + # => fetch to ansible controller + # => copy to nodes + - name: install-home-gi | copy cvuqdisk to ansible controller + fetch: + src: "{{ oracle_home_gi }}/cv/rpm/{{ cvuqdisk_rpm }}" + dest: /tmp + mode: 0644 + run_once: true + when: + - statcvuqdisk.stat.exists + tags: cvuqdisk + + - name: install-home-gi | copy cvuqdisk to cluster nodes + copy: + src: "/tmp/{{ cluster_master }}/{{ oracle_home_gi }}/cv/rpm/{{ cvuqdisk_rpm }}" + dest: "{{ oracle_rsp_stage }}/{{ cvuqdisk_rpm }}" + mode: 0644 + tags: cvuqdisk + + - name: install-home-gi | Install cvuqdisk rpm + package: + name: "{{ oracle_rsp_stage }}/{{ cvuqdisk_rpm }}" + state: present + tags: cvuqdisk -- name: install-home-gi | Recreate ORACLE_HOME (gi) on other nodes - file: name={{ oracle_home_gi }} mode=775 owner={{ oracle_user }} group={{ oracle_group }} state=directory - when: configure_cluster and inventory_hostname != cluster_master and oracle_home_gi not in checkgiinstall.stdout + when: + - configure_cluster + - hostvars[cluster_master]['statcvuqdisk']['stat']['exists'] + tags: cvuqdisk - name: install-home-gi | Setup response file for install (GI) template: @@ -62,99 +85,106 @@ backup=yes with_items: "{{ asm_diskgroups }}" run_once: "{{ configure_cluster }}" + loop_control: + label: "{{ oracle_rsp_stage }}/{{ oracle_grid_responsefile }}" tags: - responsefilegi - when: oracle_home_gi not in checkgiinstall.stdout and item.diskgroup == oracle_asm_init_dg - -# returncode of gridSetup.sh is always <> 0 => forced exit 0 -# - name: install-home-gi | Apply Release Update on ORACLE_HOME -# shell: "{{oracle_home_gi}}/gridSetup.sh -silent -applyPSU {{ oracle_stage_remote }}/{{ oracle_install_version_gi }}/{{item.patchid}} -waitforcompletion ; exit 0" -# become: true -# become_user: "{{ grid_install_user }}" -# with_items: -# - "{{ oracle_sw_patches }}" -# register: command_result -# failed_when: "'Successfully applied the patch.' not in command_result.stdout_lines" -# when: oracle_sw_patches is defined and oracle_install_version_gi == item.version and oracle_home_gi not in checkgiinstall.stdout -# -# - debug: msg="Ignore the failure [FATAL] [INS-40426] in output from gridSetup.sh -applyPSU" -# when: command_result is defined and oracle_sw_patches is defined -# -# - debug: msg={{item.stdout_lines}} -# with_items: -# - "{{command_result.results}}" -# when: oracle_sw_patches is defined and oracle_home_gi not in checkgiinstall.stdout - -- name: install-home-gi | Install Grid Infrastructure - shell: "{{ oracle_home_gi }}/gridSetup.sh -responseFile {{ oracle_rsp_stage }}/{{ oracle_grid_responsefile }} -waitforcompletion -ignorePrereq -silent" - # noqa command-instead-of-shell - become: true - become_user: "{{ grid_install_user }}" - run_once: "{{ configure_cluster }}" - tags: - - oragridinstall - when: oracle_home_gi not in checkgiinstall.stdout # and oracle_sw_unpack - register: giinstall - failed_when: giinstall.rc not in [0,6] + when: + - item.diskgroup == oracle_asm_init_dg + +- block: # when: oracle_home_gi not in checkgiinstall.stdout + + - include_tasks: runcluvfy.yml + when: + - force_runcluvfy | default(false) or oracle_home_gi not in checkgiinstall.stdout + tags: always + + - debug: msg="install-home-gi | Start Install Grid Infrastructure" # noqa unnamed-task + run_once: true + + - name: install-home-gi | Install Grid Infrastructure + command: + argv: + - "{{ oracle_home_gi }}/gridSetup.sh" + - -responseFile + - "{{ oracle_rsp_stage }}/{{ oracle_grid_responsefile }}" + - -waitforcompletion + - -silent + - "{{ giignoreprereqparam | default(omit) }}" + become: true + become_user: "{{ grid_install_user }}" + run_once: "{{ configure_cluster }}" + tags: + - oragridinstall + register: giinstall + failed_when: giinstall.rc not in [0,6] + + - debug: var=giinstall.stdout_lines # noqa unnamed-task + run_once: "{{ configure_cluster }}" + when: giinstall.stdout_lines is defined -- debug: var=giinstall.stdout_lines - # noqa unnamed-task - run_once: "{{ configure_cluster }}" - when: oracle_home_gi not in checkgiinstall.stdout and giinstall.changed + when: oracle_home_gi not in checkgiinstall.stdout -- include_role: - # noqa unnamed-task +- name: install-home-gi | include role oraswgi-manage-patches + include_role: name: oraswgi-manage-patches when: patch_before_rootsh and apply_patches_gi -- name: install-home-gi | Run oraInstroot script after installation - shell: "{{ oracle_inventory_loc }}/orainstRoot.sh" - # noqa command-instead-of-shell - become: true - tags: - - runroot - when: oracle_home_gi not in checkgiinstall.stdout - -- name: install-home-gi | Run root script after installation (Master Node) - shell: "{{ oracle_home_gi }}/root.sh" - # noqa command-instead-of-shell - become: true - run_once: "{{ configure_cluster }}" - tags: - - runroot - when: oracle_home_gi not in checkgiinstall.stdout - register: rootmaster - -- debug: var=rootmaster.stdout_lines - # noqa unnamed-task - run_once: "{{ configure_cluster }}" - when: oracle_home_gi not in checkgiinstall.stdout +- block: # when: oracle_home_gi not in checkgiinstall.stdout + - name: install-home-gi | Run oraInstroot script after installation + command: "{{ oracle_inventory_loc }}/orainstRoot.sh" + become: true + tags: + - runroot + + - debug: msg="install-home-gi | Start Run root script after installation (Master Node)" # noqa unnamed-task + run_once: true + + - name: install-home-gi | Run root script after installation (Master Node) + command: "{{ oracle_home_gi }}/root.sh" + become: true + run_once: "{{ configure_cluster }}" + register: rootmaster + tags: + - runroot + + - debug: var=rootmaster.stdout_lines # noqa unnamed-task + run_once: "{{ configure_cluster }}" + when: rootmaster.stdout_lines is defined + + - debug: msg="install-home-gi | Start Run root script after installation (Other Nodes)" # noqa unnamed-task + run_once: true + + # do not start root.sh on all nodes in parallel + # => sleep + - name: install-home-gi | Run root script after installation (Other Nodes) + script: | + /usr/bin/sleep {{ item.0 * 60 }} + {{ oracle_home_gi }}/root.sh + become: true + with_indexed_items: "{{ groups[hostgroup] }}" + tags: + - runroot + when: + - configure_cluster + - inventory_hostname != cluster_master + - inventory_hostname == item.1 + register: rootother + + - debug: var=rootother.stdout_lines # noqa unnamed-task + when: rootother.stdout_lines is defined + + - name: install-home-gi | Execute ConfigTools + command: "{{ oracle_home_gi }}/gridSetup.sh -executeConfigTools -responseFile {{ oracle_rsp_stage }}/{{ oracle_grid_responsefile }} -silent" + become: true + become_user: "{{ grid_install_user }}" + run_once: "{{ configure_cluster }}" + tags: + - runconfigtool + when: + - run_configtoolallcommand + ignore_errors: true + register: configtool -- name: install-home-gi | Run root script after installation (Other Nodes) - shell: "sleep {{ item.0 * 60 }}; {{ oracle_home_gi }}/root.sh" - # noqa command-instead-of-shell - become: true - with_indexed_items: "{{ groups[hostgroup] }}" - tags: - - runroot - # when: not master_node and oracle_home_gi not in checkgiinstall.stdout and inventory_hostname == item.1 - when: configure_cluster and inventory_hostname != cluster_master and inventory_hostname == item.1 and oracle_home_gi not in checkgiinstall.stdout - register: rootother - -- debug: var=rootother.stdout_lines - # noqa unnamed-task ignore-errors - # when: not master_node and oracle_home_gi not in checkgiinstall.stdout - when: configure_cluster and inventory_hostname != cluster_master and oracle_home_gi not in checkgiinstall.stdout - ignore_errors: true - -- name: install-home-gi | Execute ConfigTools - shell: "{{ oracle_home_gi }}/gridSetup.sh -executeConfigTools -responseFile {{ oracle_rsp_stage }}/{{ oracle_grid_responsefile }} -silent" - # noqa command-instead-of-shell - become: true - become_user: "{{ grid_install_user }}" - run_once: "{{ configure_cluster }}" - tags: - - runconfigtool - when: run_configtoolallcommand and oracle_home_gi not in checkgiinstall.stdout - ignore_errors: true - register: configtool + when: + - oracle_home_gi not in checkgiinstall.stdout diff --git a/roles/oraswgi-install/tasks/21.3.0.0.yml b/roles/oraswgi-install/tasks/21.3.0.0.yml index 6509734a3..4420697a9 100644 --- a/roles/oraswgi-install/tasks/21.3.0.0.yml +++ b/roles/oraswgi-install/tasks/21.3.0.0.yml @@ -1,20 +1,25 @@ --- +- block: + - debug: msg="install-home-gi | Start Extract files to ORACLE_HOME (gi)" # noqa unnamed-task + run_once: "{{ configure_cluster }}" + + - name: install-home-gi | Extract files to ORACLE_HOME (gi) + unarchive: src={{ oracle_gi_image }} dest={{ oracle_home_gi }} copy=no + with_items: "{{ oracle_sw_image_gi }}" + loop_control: + label: "{{ oracle_gi_image | default ('') }}" + args: + creates: "{{ oracle_home_gi }}/root.sh" + become: true + become_user: "{{ grid_install_user }}" + run_once: "{{ configure_cluster }}" + when: + - oracle_install_version_gi == item.version -- name: install-home-gi | Extract files to ORACLE_HOME (gi) - unarchive: src={{ oracle_gi_image }} dest={{ oracle_home_gi }} copy=no - with_items: "{{ oracle_sw_image_gi }}" - loop_control: - label: "{{ oracle_gi_image | default ('') }}" - args: - creates: "{{ oracle_home_gi }}/root.sh" - become: true - become_user: "{{ grid_install_user }}" - run_once: "{{ configure_cluster }}" tags: - oragridswunpack when: - oracle_home_gi not in checkgiinstall.stdout - - oracle_install_version_gi == item.version # Check for an existing GRID_HOME before reinstallation of OPatch - name: install-home-gi | Check for file GridSetup.sh @@ -29,20 +34,46 @@ msg: "Cannot find {{ oracle_home_gi }}/gridSetup.sh }}" run_once: "{{ configure_cluster }}" -- name: install-home-gi | Install cvuqdisk rpm - yum: name="{{ oracle_home_gi }}/cv/rpm/{{ cvuqdisk_rpm }}" state=present - # noqa ignore-errors +- name: check for existing cvuqdisk_rpm + stat: + path: "{{ oracle_home_gi }}/cv/rpm/{{ cvuqdisk_rpm }}" + register: statcvuqdisk + run_once: true when: configure_cluster tags: cvuqdisk - ignore_errors: true -- name: install-home-gi | Recreate ORACLE_HOME (gi) on other nodes - file: name={{ oracle_home_gi }} state=absent - when: configure_cluster and inventory_hostname != cluster_master and oracle_home_gi not in checkgiinstall.stdout +- block: # when: configure_cluster + + # synchronize didn't work on all environments... + # => fetch to ansible controller + # => copy to nodes + - name: install-home-gi | copy cvuqdisk to ansible controller + fetch: + src: "{{ oracle_home_gi }}/cv/rpm/{{ cvuqdisk_rpm }}" + dest: /tmp + mode: 0644 + run_once: true + when: + - statcvuqdisk.stat.exists + tags: cvuqdisk + + - name: install-home-gi | copy cvuqdisk to cluster nodes + copy: + src: "/tmp/{{ cluster_master }}/{{ oracle_home_gi }}/cv/rpm/{{ cvuqdisk_rpm }}" + dest: "{{ oracle_rsp_stage }}/{{ cvuqdisk_rpm }}" + mode: 0644 + tags: cvuqdisk + + - name: install-home-gi | Install cvuqdisk rpm + package: + name: "{{ oracle_rsp_stage }}/{{ cvuqdisk_rpm }}" + state: present + tags: cvuqdisk -- name: install-home-gi | Recreate ORACLE_HOME (gi) on other nodes - file: name={{ oracle_home_gi }} mode=775 owner={{ oracle_user }} group={{ oracle_group }} state=directory - when: configure_cluster and inventory_hostname != cluster_master and oracle_home_gi not in checkgiinstall.stdout + when: + - configure_cluster + - hostvars[cluster_master]['statcvuqdisk']['stat']['exists'] + tags: cvuqdisk - name: install-home-gi | Setup response file for install (GI) template: @@ -54,79 +85,106 @@ backup=yes with_items: "{{ asm_diskgroups }}" run_once: "{{ configure_cluster }}" + loop_control: + label: "{{ oracle_rsp_stage }}/{{ oracle_grid_responsefile }}" tags: - responsefilegi - when: oracle_home_gi not in checkgiinstall.stdout and item.diskgroup == oracle_asm_init_dg - -- name: install-home-gi | Install Grid Infrastructure - shell: "{{ oracle_home_gi }}/gridSetup.sh -responseFile {{ oracle_rsp_stage }}/{{ oracle_grid_responsefile }} -waitforcompletion -ignorePrereq -silent" - # noqa command-instead-of-shell - become: true - become_user: "{{ grid_install_user }}" - run_once: "{{ configure_cluster }}" - tags: - - oragridinstall - when: oracle_home_gi not in checkgiinstall.stdout # and oracle_sw_unpack - register: giinstall - failed_when: giinstall.rc not in [0,6] + when: + - item.diskgroup == oracle_asm_init_dg + +- block: # when: oracle_home_gi not in checkgiinstall.stdout + + - include_tasks: runcluvfy.yml + when: + - force_runcluvfy | default(false) or oracle_home_gi not in checkgiinstall.stdout + tags: always + + - debug: msg="install-home-gi | Start Install Grid Infrastructure" # noqa unnamed-task + run_once: true + + - name: install-home-gi | Install Grid Infrastructure + command: + argv: + - "{{ oracle_home_gi }}/gridSetup.sh" + - -responseFile + - "{{ oracle_rsp_stage }}/{{ oracle_grid_responsefile }}" + - -waitforcompletion + - -silent + - "{{ giignoreprereqparam | default(omit) }}" + become: true + become_user: "{{ grid_install_user }}" + run_once: "{{ configure_cluster }}" + tags: + - oragridinstall + register: giinstall + failed_when: giinstall.rc not in [0,6] + + - debug: var=giinstall.stdout_lines # noqa unnamed-task + run_once: "{{ configure_cluster }}" + when: giinstall.stdout_lines is defined -- debug: var=giinstall.stdout_lines - # noqa unnamed-task - run_once: "{{ configure_cluster }}" - when: oracle_home_gi not in checkgiinstall.stdout and giinstall.changed + when: oracle_home_gi not in checkgiinstall.stdout -- include_role: - # noqa unnamed-task +- name: install-home-gi | include role oraswgi-manage-patches + include_role: name: oraswgi-manage-patches when: patch_before_rootsh and apply_patches_gi -- name: install-home-gi | Run oraInstroot script after installation - shell: "{{ oracle_inventory_loc }}/orainstRoot.sh" - # noqa command-instead-of-shell - become: true - tags: - - runroot - when: oracle_home_gi not in checkgiinstall.stdout +- block: # when: oracle_home_gi not in checkgiinstall.stdout + - name: install-home-gi | Run oraInstroot script after installation + command: "{{ oracle_inventory_loc }}/orainstRoot.sh" + become: true + tags: + - runroot + + - debug: msg="install-home-gi | Start Run root script after installation (Master Node)" # noqa unnamed-task + run_once: true + + - name: install-home-gi | Run root script after installation (Master Node) + command: "{{ oracle_home_gi }}/root.sh" + become: true + run_once: "{{ configure_cluster }}" + register: rootmaster + tags: + - runroot + + - debug: var=rootmaster.stdout_lines # noqa unnamed-task + run_once: "{{ configure_cluster }}" + when: rootmaster.stdout_lines is defined + + - debug: msg="install-home-gi | Start Run root script after installation (Other Nodes)" # noqa unnamed-task + run_once: true + + # do not start root.sh on all nodes in parallel + # => sleep + - name: install-home-gi | Run root script after installation (Other Nodes) + script: | + /usr/bin/sleep {{ item.0 * 60 }} + {{ oracle_home_gi }}/root.sh + become: true + with_indexed_items: "{{ groups[hostgroup] }}" + tags: + - runroot + when: + - configure_cluster + - inventory_hostname != cluster_master + - inventory_hostname == item.1 + register: rootother + + - debug: var=rootother.stdout_lines # noqa unnamed-task + when: rootother.stdout_lines is defined + + - name: install-home-gi | Execute ConfigTools + command: "{{ oracle_home_gi }}/gridSetup.sh -executeConfigTools -responseFile {{ oracle_rsp_stage }}/{{ oracle_grid_responsefile }} -silent" + become: true + become_user: "{{ grid_install_user }}" + run_once: "{{ configure_cluster }}" + tags: + - runconfigtool + when: + - run_configtoolallcommand + ignore_errors: true + register: configtool -- name: install-home-gi | Run root script after installation (Master Node) - shell: "{{ oracle_home_gi }}/root.sh" - # noqa command-instead-of-shell - become: true - run_once: "{{ configure_cluster }}" - tags: - - runroot - when: oracle_home_gi not in checkgiinstall.stdout - register: rootmaster - -- debug: var=rootmaster.stdout_lines - # noqa unnamed-task - run_once: "{{ configure_cluster }}" - when: oracle_home_gi not in checkgiinstall.stdout - -- name: install-home-gi | Run root script after installation (Other Nodes) - shell: "sleep {{ item.0 * 60 }}; {{ oracle_home_gi }}/root.sh" - # noqa command-instead-of-shell - become: true - with_indexed_items: "{{ groups[hostgroup] }}" - tags: - - runroot - # when: not master_node and oracle_home_gi not in checkgiinstall.stdout and inventory_hostname == item.1 - when: configure_cluster and inventory_hostname != cluster_master and inventory_hostname == item.1 and oracle_home_gi not in checkgiinstall.stdout - register: rootother - -- debug: var=rootother.stdout_lines - # noqa unnamed-task ignore-errors - when: configure_cluster and inventory_hostname != cluster_master and oracle_home_gi not in checkgiinstall.stdout - ignore_errors: true - -- name: install-home-gi | Execute ConfigTools - shell: "{{ oracle_home_gi }}/gridSetup.sh -executeConfigTools -responseFile {{ oracle_rsp_stage }}/{{ oracle_grid_responsefile }} -silent" - # noqa command-instead-of-shell - become: true - become_user: "{{ grid_install_user }}" - run_once: "{{ configure_cluster }}" - tags: - - runconfigtool - when: run_configtoolallcommand and oracle_home_gi not in checkgiinstall.stdout - ignore_errors: true - register: configtool + when: + - oracle_home_gi not in checkgiinstall.stdout diff --git a/roles/oraswgi-install/tasks/main.yml b/roles/oraswgi-install/tasks/main.yml index 9d0dc7c23..837ffd339 100644 --- a/roles/oraswgi-install/tasks/main.yml +++ b/roles/oraswgi-install/tasks/main.yml @@ -5,8 +5,7 @@ shell: cat "{{ oracle_inventory_loc }}/ContentsXML/inventory.xml" |grep -w {{ oracle_home_gi }} |awk '{print $3}' | cut -f2 -d'"' # noqa command-instead-of-shell risky-shell-pipe tags: - - checkifgiinstall - - responsefilegi + - always changed_when: false register: checkgiinstall @@ -17,7 +16,9 @@ - name: install-home-gi | set fact for patch_before_rootsh set_fact: patch_before_rootsh: false - when: olrloc.stat.exists and patch_before_rootsh + when: + - olrloc.stat.exists + - patch_before_rootsh - name: install-home-gi | Mount nfs share with installation media mount: src="{{ nfs_server_sw }}:{{ nfs_server_sw_path }}" name={{ oracle_stage_remote }} fstype=nfs state=mounted @@ -67,7 +68,10 @@ become_user: "{{ grid_install_user }}" tags: - oragridsw - when: oracle_home_gi not in checkgiinstall.stdout and is_sw_source_local and oracle_install_version_gi == item.version and oracle_sw_copy + when: + - oracle_home_gi not in checkgiinstall.stdout + - is_sw_source_local and oracle_install_version_gi == item.version + - oracle_sw_copy - name: include_tasks "{{ oracle_install_version_gi }}.yml" include_tasks: "{{ oracle_install_version_gi }}.yml" @@ -115,7 +119,9 @@ - name: include_role oraswgi-manage-patches include_role: name: oraswgi-manage-patches - when: not patch_before_rootsh and apply_patches_gi + when: + - not patch_before_rootsh + - apply_patches_gi - name: install-home-gi | Check opatch lsinventory (GI) shell: "{{ oracle_home_gi }}/OPatch/opatch lspatches" diff --git a/roles/oraswgi-install/tasks/runcluvfy.yml b/roles/oraswgi-install/tasks/runcluvfy.yml new file mode 100644 index 000000000..c6a2973ae --- /dev/null +++ b/roles/oraswgi-install/tasks/runcluvfy.yml @@ -0,0 +1,81 @@ +--- +- block: + + - debug: msg="install-home-gi | Sttart Execute runcluvfy.sh for Grid-Infrastructure" # noqa unnamed-task + run_once: true + when: + - configure_cluster + + - name: install-home-gi | Execute runcluvfy.sh for Grid-Infrastructure + command: + argv: + - "{{ oracle_home_gi }}/runcluvfy.sh" + - stage + - -pre + - crsinst + - -responseFile + - "{{ oracle_rsp_stage }}/{{ oracle_grid_responsefile }}" + register: cmdruncluvfygi + failed_when: cmdruncluvfygi.rc == -1 + changed_when: cmdruncluvfygi.rc == 0 + become: true + become_user: "{{ grid_install_user }}" + run_once: true + when: + - configure_cluster + + - debug: var=cmdruncluvfygi.stdout_lines # noqa unnamed-task + when: cmdruncluvfygi.stdout_lines is defined + + - name: install-home-gi | runcluvfy.sh failed + fail: + msg: "runcluvfy.sh failed - aborting Playbook rc={{ cmdruncluvfygi.rc }}" + when: + - cmdruncluvfygi.rc is defined + - cmdruncluvfygi.rc != 0 + - not gi_ignoreprereq | bool + + - name: install-home-gi | Execute runcluvfy.sh for Oracle Restart + command: + argv: + - "{{ oracle_home_gi }}/runcluvfy.sh" + - comp + - sys + - -p + - crs + - -orainv + - oinstall + register: cmdruncluvfy + failed_when: cmdruncluvfy.rc == -1 + changed_when: cmdruncluvfy.rc == 0 + become: true + become_user: "{{ grid_install_user }}" + when: + - not configure_cluster + + - debug: var=cmdruncluvfy.stdout_lines # noqa unnamed-task + when: cmdruncluvfy.stdout_lines is defined + + - name: install-home-gi | runcluvfy.sh failed + fail: + msg: "runcluvfy.sh failed - aborting Playbook rc={{ cmdruncluvfy.rc }}" + when: + - cmdruncluvfy.rc is defined + - cmdruncluvfy.rc != 0 + - not gi_ignoreprereq | bool + + # Oracle GI/Restart: Configuration will fail when hostname points to localhost ip! + - name: Check for hostname with localhost ip in hosts + lineinfile: + name: /etc/hosts + regexp: "127.0.[0-1].1 .*{{ ansible_hostname }}.*" + state: absent + check_mode: true + failed_when: etchosts.changed + register: etchosts + + # need to run it once on cluster + # => unzip was executed on 1st node only. + # => hopefully the setup is the same on all nodes... + run_once: "{{ configure_cluster }}" + tags: runcluvfy diff --git a/roles/oraswgi-install/templates/grid-install.rsp.19.3.0.0.j2 b/roles/oraswgi-install/templates/grid-install.rsp.19.3.0.0.j2 index b6cfa9d2f..f232f1c71 100644 --- a/roles/oraswgi-install/templates/grid-install.rsp.19.3.0.0.j2 +++ b/roles/oraswgi-install/templates/grid-install.rsp.19.3.0.0.j2 @@ -452,7 +452,13 @@ oracle.install.asm.diskGroup.disksWithFailureGroupNames= # oracle.install.asm.diskGroup.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 # #------------------------------------------------------------------------------- -oracle.install.asm.diskGroup.disks={% if device_persistence=='udev' %}{% for disk in item.disk -%} {{ oracle_asm_disk_string }}{{ disk.asmlabel }}{%- if not loop.last -%} , {%- endif -%} {%- endfor %}{% else %}{% for disk in item.disk -%} ORCL:{{ disk.asmlabel|upper }}{%- if not loop.last -%} , {%- endif -%} {%- endfor %} {% endif %} +oracle.install.asm.diskGroup.disks={%- for disk in item.disk %} + {%- if device_persistence=='udev' -%}{{ disk.device }} + {%- else -%} + {%- if oracle_asm_disk_string.endswith('*') %}{{ oracle_asm_disk_string[:-1] }} + {%- else -%}{{ oracle_asm_disk_string }} + {%- endif -%}{{ disk.asmlabel | upper }} + {%- endif -%}{%- if not loop.last %} , {% endif %}{% endfor %} #------------------------------------------------------------------------------- # List of failure groups to be marked as QUORUM. @@ -471,7 +477,7 @@ oracle.install.asm.diskGroup.quorumFailureGroupNames= # oracle.install.asm.diskGroup.diskDiscoveryString=\\.\ORCLDISK* # #------------------------------------------------------------------------------- -oracle.install.asm.diskGroup.diskDiscoveryString={% if device_persistence == 'asmlib' %}ORCL:*{% else %}{{ oracle_asm_disk_string }}*{% endif %} +oracle.install.asm.diskGroup.diskDiscoveryString={{ oracle_asm_disk_string }} #------------------------------------------------------------------------------- # Password for ASMSNMP account diff --git a/roles/oraswgi-install/templates/grid-install.rsp.21.3.0.0.j2 b/roles/oraswgi-install/templates/grid-install.rsp.21.3.0.0.j2 index bdc7d8579..4b1373d0e 100644 --- a/roles/oraswgi-install/templates/grid-install.rsp.21.3.0.0.j2 +++ b/roles/oraswgi-install/templates/grid-install.rsp.21.3.0.0.j2 @@ -436,7 +436,13 @@ oracle.install.asm.diskGroup.disksWithFailureGroupNames= # oracle.install.asm.diskGroup.disks=\\.\ORCLDISKDATA0,\\.\ORCLDISKDATA1 # #------------------------------------------------------------------------------- -oracle.install.asm.diskGroup.disks={% if device_persistence=='udev' %}{% for disk in item.disk -%} {{ oracle_asm_disk_string }}{{ disk.asmlabel }}{%- if not loop.last -%} , {%- endif -%} {%- endfor %}{% else %}{% for disk in item.disk -%} ORCL:{{ disk.asmlabel|upper }}{%- if not loop.last -%} , {%- endif -%} {%- endfor %} {% endif %} +oracle.install.asm.diskGroup.disks={%- for disk in item.disk %} + {%- if device_persistence=='udev' -%}{{ disk.device }} + {%- else -%} + {%- if oracle_asm_disk_string.endswith('*') %}{{ oracle_asm_disk_string[:-1] }} + {%- else -%}{{ oracle_asm_disk_string }} + {%- endif -%}{{ disk.asmlabel | upper }} + {%- endif -%}{%- if not loop.last %} , {% endif %}{% endfor %} #------------------------------------------------------------------------------- # List of failure groups to be marked as QUORUM. @@ -455,7 +461,7 @@ oracle.install.asm.diskGroup.quorumFailureGroupNames= # oracle.install.asm.diskGroup.diskDiscoveryString=\\.\ORCLDISK* # #------------------------------------------------------------------------------- -oracle.install.asm.diskGroup.diskDiscoveryString={% if device_persistence == 'asmlib' %}ORCL:*{% else %}{{ oracle_asm_disk_string }}*{% endif %} +oracle.install.asm.diskGroup.diskDiscoveryString={{ oracle_asm_disk_string }} #------------------------------------------------------------------------------- # Password for ASMSNMP account