From 9c87fb8c87f4d1b0d1086cc7b67f31ae6f5e3aa5 Mon Sep 17 00:00:00 2001
From: Neil Hanlon <neil@rockylinux.org>
Date: Mon, 10 Jan 2022 13:20:05 -0500
Subject: [PATCH] add and update ansible playbooks for infra

- [openstack_user_config]: remove NFS in favor of (properly isntalled)
  iscsi
- [openstack_user_config]: remove extraneous config in favor of shorter
  version
- [storage] install and enable targetd (target.service)
- [ansible] only run 'infra' tags on the first infra host - never on an
  AIO
- [ansible] change roles to use the ``host`` extra var to configure
  where to run to mitigate accidents
- [ansible] add aio steps to infra playbook
- [ansible] add storage host playbook to configure volumes and iscsi
- [ansible] aio: configure volume groups
---
 .../playbooks/files/openstack_user_config.yml | 107 +++---------------
 ansible/playbooks/handlers/main.yml           |   6 +
 ansible/playbooks/init-nodes.yml              |   2 +-
 ansible/playbooks/role-infra-host.yml         |  40 ++++++-
 ansible/playbooks/role-storage-host.yml       |  49 ++++++++
 5 files changed, 105 insertions(+), 99 deletions(-)
 create mode 100644 ansible/playbooks/role-storage-host.yml

diff --git a/ansible/playbooks/files/openstack_user_config.yml b/ansible/playbooks/files/openstack_user_config.yml
index 583c4e8..fb8180e 100644
--- a/ansible/playbooks/files/openstack_user_config.yml
+++ b/ansible/playbooks/files/openstack_user_config.yml
@@ -71,19 +71,19 @@ x-storage-hosts: &x-storage-hosts
     container_vars:
       cinder_backends:
         limit_container_types: cinder_volume
-        nfs_volume:
-          volume_backend_name: NFS_VOLUME1
-          volume_driver: cinder.volume.drivers.nfs.NfsDriver
-          nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
-          nfs_shares_config: /etc/cinder/nfs_shares
-          shares:
-            - ip: "172.29.228.7"
-              share: "/vol/cinder"
-              #lvm:
-              #    volume_group: cinder-volumes
-              #    volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
-              #    volume_backend_name: LVM_iSCSI
-              #    iscsi_ip_address: "172.29.228.7"
+          #nfs_volume:
+          #  volume_backend_name: NFS_VOLUME1
+          #  volume_driver: cinder.volume.drivers.nfs.NfsDriver
+          #  nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
+          #  nfs_shares_config: /etc/cinder/nfs_shares
+          #  shares:
+          #    - ip: "172.29.228.7"
+          #      share: "/vol/cinder"
+        lvm:
+          volume_group: cinder-volumes
+          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
+          volume_backend_name: LVM_iSCSI
+          iscsi_ip_address: "172.29.228.7"
 
 ##
 ## Infrastructure
@@ -117,84 +117,3 @@ compute_hosts:
   <<: *x-compute-hosts
 storage_hosts:
   <<: *x-storage-hosts
-
-###
-### Infrastructure
-###
-
-  ## galera, memcache, rabbitmq, utility
-  #shared-infra_hosts:
-  #  infra1:
-  #    ip: 172.29.220.5
-  #
-  ## repository (apt cache, python packages, etc)
-  #repo-infra_hosts:
-  #  infra1:
-  #    ip: 172.29.220.5
-  #
-  ## load balancer
-  #haproxy_hosts:
-  #  infra1:
-  #    ip: 172.29.220.5
-  #
-  ####
-  #### OpenStack
-  ####
-  #
-  ## keystone
-  #identity_hosts:
-  #  infra1:
-  #    ip: 172.29.220.5
-  #
-  ## cinder api services
-  #storage-infra_hosts:
-  #  infra1:
-  #    ip: 172.29.220.5
-  #
-  ## glance
-  #image_hosts:
-  #  infra1:
-  #    ip: 172.29.220.5
-  #
-  ## placement
-  #placement-infra_hosts:
-  #  infra1:
-  #    ip: 172.29.220.5
-  #
-  ## nova api, conductor, etc services
-  #compute-infra_hosts:
-  #  infra1:
-  #    ip: 172.29.220.5
-  #
-  ## heat
-  #orchestration_hosts:
-  #  infra1:
-  #    ip: 172.29.220.5
-  #
-  ## horizon
-  #dashboard_hosts:
-  #  infra1:
-  #    ip: 172.29.220.5
-  #
-  ## neutron server, agents (L3, etc)
-  #network_hosts:
-  #  infra1:
-  #    ip: 172.29.220.5
-  #
-  ## nova hypervisors
-  #compute_hosts:
-  #  compute1:
-  #    ip: 172.29.220.6
-  #
-  ## cinder storage host (LVM-backed)
-  #storage_hosts:
-  #  storage1:
-  #    ip: 172.29.220.7
-  #    container_vars:
-  #      cinder_backends:
-  #        limit_container_types: cinder_volume
-  #        lvm:
-  #          volume_group: cinder-volumes
-  #          volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
-  #          volume_backend_name: LVM_iSCSI
-  #          iscsi_ip_address: "172.29.228.7"
diff --git a/ansible/playbooks/handlers/main.yml b/ansible/playbooks/handlers/main.yml
index 3860a81..7a02ee8 100644
--- a/ansible/playbooks/handlers/main.yml
+++ b/ansible/playbooks/handlers/main.yml
@@ -3,3 +3,9 @@
   service:
     name: sshd
     state: restarted
+- name: enable_targetd
+  shell: "systemctl enable --now target"
+- name: restart_targetd
+  service:
+    name: target
+    state: restarted
diff --git a/ansible/playbooks/init-nodes.yml b/ansible/playbooks/init-nodes.yml
index 7875b81..2d658dd 100644
--- a/ansible/playbooks/init-nodes.yml
+++ b/ansible/playbooks/init-nodes.yml
@@ -65,7 +65,7 @@
           when: sshkey_register.ssh_public_key != ""
           register: sshkey_fetch
 
-      when: tag.find("infra") != -1
+      when: tag.find("infra") != -1 and name == "infra1"
       tags:
         - infra
         - sshkey
diff --git a/ansible/playbooks/role-infra-host.yml b/ansible/playbooks/role-infra-host.yml
index b4f4722..cb5f65d 100644
--- a/ansible/playbooks/role-infra-host.yml
+++ b/ansible/playbooks/role-infra-host.yml
@@ -1,7 +1,7 @@
 ---
 
 - name: Bootstrap
-  hosts: infra1
+  hosts: "{{ host }}"
   become: true
 
   handlers:
@@ -43,6 +43,7 @@
         - bootstrap
 
     - name: Deploy and setup configuration
+      when: not aio_install | default('true') # bootstrap-AIO script instead does these things.
       block:
         - name: Copy template to etc
           ansible.builtin.copy:
@@ -50,16 +51,15 @@
             src: /opt/openstack-ansible/etc/openstack_deploy/
             dest: /etc/openstack_deploy/
             directory_mode: yes
+            force: no
 
-        - name: Copy our openstack configs
+        - name: Copy distributed openstack configs
           ansible.builtin.copy:
             src: "files/{{ item }}.yml"
             dest: /etc/openstack_deploy/
             mode: '0644'
           with_items:
             - openstack_user_config
-            - user_lxc
-            - user_galera
 
         - name: Create secrets
           become: true
@@ -69,6 +69,38 @@
           args:
             creates: /etc/openstack_deploy/user_secrets.yml.tar
 
+    - name: "[AIO] Deploy and setup configuration / bootstrap"
+      when: aio_install | default('false')
+      block:
+        - name: Debug - Running AIO bootstrap
+          debug:
+            var: aio_install
+
+        - name: Run bootstrap ansible with included args
+          ansible.builtin.shell: scripts/bootstrap-aio.sh
+          become: true
+          args:
+            chdir: /opt/openstack-ansible/
+            creates: /etc/openstack_deploy/
+          tags:
+            - bootstrap
+            - aio
+
+        - name: Create volume group for cinder
+          lvg:
+            pv_options: --metadatasize=2048
+            pvs: "{{ cinder_pv_device | default('/dev/vdb') }}"
+            vg: cinder-volumes
+
+    - name: Copy common openstack configs
+      ansible.builtin.copy:
+        src: "files/{{ item }}.yml"
+        dest: /etc/openstack_deploy/
+        mode: '0644'
+      with_items:
+        - user_lxc
+        - user_galera
+
 
     - name: Check playbooks
       tags: syntax
diff --git a/ansible/playbooks/role-storage-host.yml b/ansible/playbooks/role-storage-host.yml
new file mode 100644
index 0000000..2f5d049
--- /dev/null
+++ b/ansible/playbooks/role-storage-host.yml
@@ -0,0 +1,49 @@
+---
+# Setup the storage host. Install targetcli and make sure any volumes are mounted.
+
+- name: Storage Host Configuration
+  hosts: storage1
+  become: true
+
+  handlers:
+    - import_tasks: handlers/main.yml
+
+  pre_tasks:
+    - name: Check if ansible cannot be run here
+      stat:
+        path: /etc/no-ansible
+      register: no_ansible
+
+    - name: Verify if we can run ansible
+      assert:
+        that:
+          - "not no_ansible.stat.exists"
+        success_msg: "We are able to run on this node"
+        fail_msg: "/etc/no-ansible exists - skipping run on this node"
+
+  tasks:
+    - name: Loading Variables from OS Common
+      import_tasks: tasks/common_vars.yml
+
+    - name: Install required packages
+      become: true
+      dnf:
+        name: targetcli
+      notify: enable_targetd
+
+    - name: Create volume group for cinder
+      lvg:
+        pv_options: --metadatasize=2048
+        pvs: "{{ cinder_pv_device | default('/dev/vdb') }}"
+        vg: cinder-volumes
+
+
+  post_tasks:
+    - name: Touching run file that ansible has ran here
+      file:
+        path: /var/log/ansible.run
+        state: touch
+        mode: '0644'
+        owner: root
+        group: root
+...