diff --git a/ansible/apt.yml b/ansible/apt.yml index 633454563..1dabb4038 100644 --- a/ansible/apt.yml +++ b/ansible/apt.yml @@ -1,6 +1,6 @@ --- - name: Ensure APT is configured - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ apt_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/container-engine.yml b/ansible/container-engine.yml index 988bed210..7e6b30a23 100644 --- a/ansible/container-engine.yml +++ b/ansible/container-engine.yml @@ -19,7 +19,7 @@ docker_http_proxy: "{{ kolla_http_proxy }}" docker_https_proxy: "{{ kolla_https_proxy }}" docker_no_proxy: "{{ kolla_no_proxy | select | join(',') }}" - when: container_engine == "docker" + when: container_engine_enabled | default(true) | bool and container_engine == "docker" - name: Ensure podman is configured hosts: container-engine @@ -34,4 +34,4 @@ tasks: - include_role: name: openstack.kolla.podman - when: container_engine == "podman" + when: container_engine_enabled | default(true) | bool and container_engine == "podman" diff --git a/ansible/control-host-configure.yml b/ansible/control-host-configure.yml new file mode 100644 index 000000000..996da2c68 --- /dev/null +++ b/ansible/control-host-configure.yml @@ -0,0 +1,23 @@ +--- +- import_playbook: "ssh-known-host.yml" +- import_playbook: "kayobe-ansible-user.yml" +- import_playbook: "logging.yml" +- import_playbook: "proxy.yml" +- import_playbook: "apt.yml" +- import_playbook: "dnf.yml" +- import_playbook: "pip.yml" +- import_playbook: "kayobe-target-venv.yml" +- import_playbook: "wipe-disks.yml" +- import_playbook: "users.yml" +- import_playbook: "dev-tools.yml" +- import_playbook: "selinux.yml" +- import_playbook: "network.yml" +- import_playbook: "firewall.yml" +- import_playbook: "tuned.yml" +- import_playbook: "sysctl.yml" +- import_playbook: "time.yml" +- import_playbook: "mdadm.yml" +- import_playbook: "luks.yml" +- import_playbook: "lvm.yml" +- import_playbook: "swap.yml" +- import_playbook: "container-engine.yml" diff --git a/ansible/dev-tools.yml b/ansible/dev-tools.yml index 5f6f3ed32..228d1643c 100644 --- a/ansible/dev-tools.yml +++ b/ansible/dev-tools.yml @@ -1,6 +1,6 @@ --- - name: Ensure development tools are installed - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ dev_tools_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/dnf.yml b/ansible/dnf.yml index 73999b911..3aa99c486 100644 --- a/ansible/dnf.yml +++ b/ansible/dnf.yml @@ -1,6 +1,6 @@ --- - name: Ensure DNF repos are configured - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ dnf_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/firewall.yml b/ansible/firewall.yml index 8455d05f2..066b2d82c 100644 --- a/ansible/firewall.yml +++ b/ansible/firewall.yml @@ -1,6 +1,6 @@ --- - name: Ensure firewall is configured - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ firewall_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/host-command-run.yml b/ansible/host-command-run.yml index ba5497db6..258994aa7 100644 --- a/ansible/host-command-run.yml +++ b/ansible/host-command-run.yml @@ -1,6 +1,6 @@ --- - name: Run a command - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control gather_facts: False max_fail_percentage: >- {{ host_command_run_max_fail_percentage | diff --git a/ansible/host-package-update.yml b/ansible/host-package-update.yml index 94b014ba1..77be7c722 100644 --- a/ansible/host-package-update.yml +++ b/ansible/host-package-update.yml @@ -1,6 +1,6 @@ --- - name: Update host packages - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ host_package_update_max_fail_percentage | default(kayobe_max_fail_percentage) | diff --git a/ansible/inventory/group_vars/all/ansible-control b/ansible/inventory/group_vars/all/ansible-control new file mode 100644 index 000000000..0f9f555c0 --- /dev/null +++ b/ansible/inventory/group_vars/all/ansible-control @@ -0,0 +1,139 @@ +--- +############################################################################### +# Ansible control host configuration. + +# User with which to access the Ansible control host via SSH during bootstrap, +# in order to setup the Kayobe user account. Default is {{ os_distribution }}. +ansible_control_bootstrap_user: "{{ os_distribution }}" + +############################################################################### +# Ansible control host network interface configuration. + +# List of networks to which Ansible control host are attached. +ansible_control_network_interfaces: > + {{ (ansible_control_default_network_interfaces + + ansible_control_extra_network_interfaces) | select | unique | list }} + +# List of default networks to which Ansible control host are attached. +ansible_control_default_network_interfaces: > + {{ [admin_oc_net_name] | select | unique | list }} + +# List of extra networks to which Ansible control host are attached. +ansible_control_extra_network_interfaces: [] + +############################################################################### +# Ansible control host software RAID configuration. + +# List of software RAID arrays. See mrlesmithjr.mdadm role for format. +ansible_control_mdadm_arrays: [] + +############################################################################### +# Ansible control host encryption configuration. + +# List of block devices to encrypt. See stackhpc.luks role for format. +ansible_control_luks_devices: [] + +############################################################################### +# Ansible control host LVM configuration. + +# List of Ansible control host volume groups. See mrlesmithjr.manage_lvm role +# for format. +ansible_control_lvm_groups: "{{ ansible_control_lvm_groups_default + ansible_control_lvm_groups_extra }}" + +# Default list of Ansible control host volume groups. See +# mrlesmithjr.manage_lvm role for format. +ansible_control_lvm_groups_default: "{{ [ansible_control_lvm_group_data] if ansible_control_lvm_group_data_enabled | bool else [] }}" + +# Additional list of Ansible control host volume groups. See +# mrlesmithjr.manage_lvm role for format. +ansible_control_lvm_groups_extra: [] + +# Whether a 'data' LVM volume group should exist on the Ansible control host. +# By default this contains a 'docker-volumes' logical volume for Docker volume +# storage. Default is false. +ansible_control_lvm_group_data_enabled: false + +# Ansible control host LVM volume group for data. See mrlesmithjr.manage_lvm +# role for format. +ansible_control_lvm_group_data: + vgname: data + disks: "{{ ansible_control_lvm_group_data_disks }}" + create: True + lvnames: "{{ ansible_control_lvm_group_data_lvs }}" + +# List of disks for use by Ansible control host LVM data volume group. Default +# to an invalid value to require configuration. +ansible_control_lvm_group_data_disks: + - changeme + +# List of LVM logical volumes for the data volume group. +ansible_control_lvm_group_data_lvs: + - "{{ ansible_control_lvm_group_data_lv_docker_volumes }}" + +# Docker volumes LVM backing volume. +ansible_control_lvm_group_data_lv_docker_volumes: + lvname: docker-volumes + size: "{{ ansible_control_lvm_group_data_lv_docker_volumes_size }}" + create: True + filesystem: "{{ ansible_control_lvm_group_data_lv_docker_volumes_fs }}" + mount: True + mntp: /var/lib/docker/volumes + +# Size of docker volumes LVM backing volume. +ansible_control_lvm_group_data_lv_docker_volumes_size: 75%VG + +# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. +ansible_control_lvm_group_data_lv_docker_volumes_fs: ext4 + +############################################################################### +# Ansible control host sysctl configuration. + +# Dict of sysctl parameters to set. +ansible_control_sysctl_parameters: {} + +############################################################################### +# Ansible control host tuned configuration. + +# Builtin tuned profile to use. Format is same as that used by giovtorres.tuned +# role. Default is throughput-performance. +ansible_control_tuned_active_builtin_profile: "throughput-performance" + +############################################################################### +# Ansible control host user configuration. + +# List of users to create. This should be in a format accepted by the +# singleplatform-eng.users role. +ansible_control_users: "{{ users_default }}" + +############################################################################### +# Ansible control host firewalld configuration. + +# Whether to install and enable firewalld. +ansible_control_firewalld_enabled: false + +# A list of zones to create. Each item is a dict containing a 'zone' item. +ansible_control_firewalld_zones: [] + +# A firewalld zone to set as the default. Default is unset, in which case the +# default zone will not be changed. +ansible_control_firewalld_default_zone: + +# A list of firewall rules to apply. Each item is a dict containing arguments +# to pass to the firewalld module. Arguments are omitted if not provided, with +# the following exceptions: +# - offline: true +# - permanent: true +# - state: enabled +ansible_control_firewalld_rules: [] + +############################################################################### +# Ansible control host swap configuration. + +# List of swap devices. Each item is a dict containing a 'device' item. +ansible_control_swap: [] + +############################################################################### +# Ansible control host container engine configuration. + +# Whether a container engine should be configured. Default is false. +ansible_control_container_engine_enabled: false diff --git a/ansible/inventory/group_vars/ansible-control/ansible-host b/ansible/inventory/group_vars/ansible-control/ansible-host new file mode 100644 index 000000000..3d291ad41 --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/ansible-host @@ -0,0 +1,3 @@ +--- +# Host/IP with which to access the Ansible control host via SSH. +ansible_host: "{{ admin_oc_net_name | net_ip }}" diff --git a/ansible/inventory/group_vars/ansible-control/ansible-user b/ansible/inventory/group_vars/ansible-control/ansible-user new file mode 100644 index 000000000..d9a48787e --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/ansible-user @@ -0,0 +1,7 @@ +--- +# User with which to access the Ansible control host via SSH. +ansible_user: "{{ kayobe_ansible_user }}" + +# User with which to access the Ansible control host before the +# kayobe_ansible_user account has been created. +bootstrap_user: "{{ ansible_control_bootstrap_user }}" diff --git a/ansible/inventory/group_vars/ansible-control/container-engine b/ansible/inventory/group_vars/ansible-control/container-engine new file mode 100644 index 000000000..e92c6388d --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/container-engine @@ -0,0 +1,5 @@ +--- +############################################################################### +# Ansible control host container engine configuration. + +container_engine_enabled: "{{ ansible_control_container_engine_enabled }}" diff --git a/ansible/inventory/group_vars/ansible-control/firewall b/ansible/inventory/group_vars/ansible-control/firewall new file mode 100644 index 000000000..24bbf8ec6 --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/firewall @@ -0,0 +1,21 @@ +--- +############################################################################### +# Ansible control host firewalld configuration. + +# Whether to install and enable firewalld. +firewalld_enabled: "{{ ansible_control_firewalld_enabled }}" + +# A list of zones to create. Each item is a dict containing a 'zone' item. +firewalld_zones: "{{ ansible_control_firewalld_zones }}" + +# A firewalld zone to set as the default. Default is unset, in which case the +# default zone will not be changed. +firewalld_default_zone: "{{ ansible_control_firewalld_default_zone }}" + +# A list of firewall rules to apply. Each item is a dict containing arguments +# to pass to the firewalld module. Arguments are omitted if not provided, with +# the following exceptions: +# - offline: true +# - permanent: true +# - state: enabled +firewalld_rules: "{{ ansible_control_firewalld_rules }}" diff --git a/ansible/inventory/group_vars/ansible-control/luks b/ansible/inventory/group_vars/ansible-control/luks new file mode 100644 index 000000000..842e10c64 --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/luks @@ -0,0 +1,6 @@ +--- +############################################################################### +# Ansible control host encryption configuration. + +# List of block devices to encrypt. See stackhpc.luks role for format. +luks_devices: "{{ ansible_control_luks_devices }}" diff --git a/ansible/inventory/group_vars/ansible-control/lvm b/ansible/inventory/group_vars/ansible-control/lvm new file mode 100644 index 000000000..ad913dfde --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/lvm @@ -0,0 +1,6 @@ +--- +############################################################################### +# Ansible control host LVM configuration. + +# List of LVM volume groups. +lvm_groups: "{{ ansible_control_lvm_groups }}" diff --git a/ansible/inventory/group_vars/ansible-control/mdadm b/ansible/inventory/group_vars/ansible-control/mdadm new file mode 100644 index 000000000..d5a5cccea --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/mdadm @@ -0,0 +1,6 @@ +--- +############################################################################### +# Ansible control host software RAID configuration. + +# List of software RAID arrays. See mrlesmithjr.mdadm role for format. +mdadm_arrays: "{{ ansible_control_mdadm_arrays }}" diff --git a/ansible/inventory/group_vars/ansible-control/network b/ansible/inventory/group_vars/ansible-control/network new file mode 100644 index 000000000..a14971c67 --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/network @@ -0,0 +1,6 @@ +--- +############################################################################### +# Network interface attachments. + +# List of networks to which these nodes are attached. +network_interfaces: "{{ ansible_control_network_interfaces | unique | list }}" diff --git a/ansible/inventory/group_vars/ansible-control/swap b/ansible/inventory/group_vars/ansible-control/swap new file mode 100644 index 000000000..c2d990bd8 --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/swap @@ -0,0 +1,6 @@ +--- +############################################################################### +# Ansible control host swap configuration. + +# List of swap devices. Each item is a dict containing a 'device' item. +swap: "{{ ansible_control_swap }}" diff --git a/ansible/inventory/group_vars/ansible-control/sysctl b/ansible/inventory/group_vars/ansible-control/sysctl new file mode 100644 index 000000000..dba23c496 --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/sysctl @@ -0,0 +1,3 @@ +--- +# Dict of sysctl parameters to set. +sysctl_parameters: "{{ ansible_control_sysctl_parameters }}" diff --git a/ansible/inventory/group_vars/ansible-control/tuned b/ansible/inventory/group_vars/ansible-control/tuned new file mode 100644 index 000000000..a442126f4 --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/tuned @@ -0,0 +1,7 @@ +--- +############################################################################### +# Ansible control host tuned configuration. + +# Builtin tuned profile to use. Format is same as that used by giovtorres.tuned +# role. +tuned_active_builtin_profile: "{{ ansible_control_tuned_active_builtin_profile }}" diff --git a/ansible/inventory/group_vars/ansible-control/users b/ansible/inventory/group_vars/ansible-control/users new file mode 100644 index 000000000..5414b4cac --- /dev/null +++ b/ansible/inventory/group_vars/ansible-control/users @@ -0,0 +1,4 @@ +--- +# List of users to create. This should be in a format accepted by the +# singleplatform-eng.users role. +users: "{{ ansible_control_users }}" diff --git a/ansible/ip-allocation.yml b/ansible/ip-allocation.yml index b82d934a8..46c49b2a5 100644 --- a/ansible/ip-allocation.yml +++ b/ansible/ip-allocation.yml @@ -1,6 +1,6 @@ --- - name: Ensure IP addresses are allocated - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ ip_allocation_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/kayobe-ansible-user.yml b/ansible/kayobe-ansible-user.yml index c54aa703f..c0d826453 100644 --- a/ansible/kayobe-ansible-user.yml +++ b/ansible/kayobe-ansible-user.yml @@ -7,7 +7,7 @@ # bootstrap process if the account is inaccessible. - name: Determine whether user bootstrapping is required - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control gather_facts: false max_fail_percentage: >- {{ kayobe_ansible_user_max_fail_percentage | @@ -36,8 +36,8 @@ attempting bootstrap when: ssh_result.unreachable | default(false) -- name: Ensure python is installed and the Kayobe Ansible user account exists - hosts: kayobe_user_bootstrap_required_True +- name: Ensure Python is installed + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control gather_facts: no max_fail_percentage: >- {{ kayobe_ansible_user_max_fail_percentage | @@ -46,14 +46,14 @@ default(100) }} vars: ansible_user: "{{ bootstrap_user }}" - # We can't assume that a virtualenv exists at this point, so use the system - # python interpreter. - ansible_python_interpreter: /usr/bin/python3 + apt_options: + - "-y" + - "{% if apt_proxy_http %}-o Acquire::http::proxy='{{ apt_proxy_http }}'{% endif %}" + - "{% if apt_proxy_https %}-o Acquire::https::proxy='{{ apt_proxy_https }}'{% endif %}" dnf_options: - "-y" - "{% if 'proxy' in dnf_config %}--setopt=proxy={{ dnf_config['proxy'] }}{% endif %}" tags: - - kayobe-ansible-user - ensure-python tasks: - name: Check if python is installed @@ -62,11 +62,26 @@ failed_when: false register: check_python - # TODO(priteau): Support apt proxy - - name: Ensure python is installed - raw: "test -e /usr/bin/apt && (sudo apt -y update && sudo apt install -y python3-minimal) || (sudo dnf {{ dnf_options | select | join(' ') }} install python3)" + - name: Ensure Python is installed + raw: "(test -e /usr/bin/apt && sudo apt {{ apt_options | select | join(' ') }} update && sudo apt install {{ apt_options | select | join(' ') }} python3-minimal) || (test -e /usr/bin/dnf && sudo dnf {{ dnf_options | select | join(' ') }} install python3)" when: check_python.rc != 0 +- name: Ensure the Kayobe Ansible user account exists + hosts: kayobe_user_bootstrap_required_True + gather_facts: no + max_fail_percentage: >- + {{ kayobe_ansible_user_max_fail_percentage | + default(host_configure_max_fail_percentage) | + default(kayobe_max_fail_percentage) | + default(100) }} + vars: + ansible_user: "{{ bootstrap_user }}" + # We can't assume that a virtualenv exists at this point, so use the system + # python interpreter. + ansible_python_interpreter: /usr/bin/python3 + tags: + - kayobe-ansible-user + tasks: - import_role: name: singleplatform-eng.users vars: @@ -88,7 +103,7 @@ become: True - name: Verify that the Kayobe Ansible user account is accessible - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control gather_facts: false max_fail_percentage: >- {{ kayobe_ansible_user_max_fail_percentage | diff --git a/ansible/kayobe-target-venv.yml b/ansible/kayobe-target-venv.yml index 11d7a8938..53005c86e 100644 --- a/ansible/kayobe-target-venv.yml +++ b/ansible/kayobe-target-venv.yml @@ -3,7 +3,7 @@ # when running kayobe. - name: Ensure a virtualenv exists for kayobe - hosts: seed:seed-hypervisor:overcloud:infra-vms + hosts: seed:seed-hypervisor:overcloud:infra-vms:ansible-control gather_facts: False max_fail_percentage: >- {{ kayobe_target_venv_max_fail_percentage | @@ -26,9 +26,9 @@ filter: "{{ kayobe_ansible_setup_filter }}" gather_subset: "{{ kayobe_ansible_setup_gather_subset }}" when: - - ansible_facts is undefined or not ansible_facts + - ansible_facts is undefined or ansible_facts is falsy - kayobe_virtualenv is defined - register: gather_facts + register: gather_facts_result # Before any facts are gathered, ansible doesn't know about # python virtualenv. # Use default python3 to be safe for this task. @@ -104,7 +104,7 @@ gather_subset: "{{ kayobe_ansible_setup_gather_subset }}" when: - kayobe_virtualenv is defined - - gather_facts is not skipped + - gather_facts_result is not skipped - lookup('config', 'DEFAULT_GATHERING') != 'implicit' - block: diff --git a/ansible/logging.yml b/ansible/logging.yml index 801ee3a72..9008149f1 100644 --- a/ansible/logging.yml +++ b/ansible/logging.yml @@ -1,6 +1,6 @@ --- - name: Ensure Logging configuration is applied - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ logging_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/luks.yml b/ansible/luks.yml index 57e4796f1..c47f19d70 100644 --- a/ansible/luks.yml +++ b/ansible/luks.yml @@ -1,6 +1,6 @@ --- - name: Ensure encryption configuration is applied - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ luks_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/lvm.yml b/ansible/lvm.yml index 3d46edadd..39ce81f51 100644 --- a/ansible/lvm.yml +++ b/ansible/lvm.yml @@ -1,6 +1,6 @@ --- - name: Ensure LVM configuration is applied - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ lvm_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/mdadm.yml b/ansible/mdadm.yml index 4d69b92ef..e78f56ac3 100644 --- a/ansible/mdadm.yml +++ b/ansible/mdadm.yml @@ -1,6 +1,6 @@ --- - name: Ensure software RAID configuration is applied - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ mdadm_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/network-connectivity.yml b/ansible/network-connectivity.yml index 3e0238d8c..2b7bc2278 100644 --- a/ansible/network-connectivity.yml +++ b/ansible/network-connectivity.yml @@ -1,6 +1,6 @@ --- - name: Check network connectivity between hosts - hosts: seed:seed-hypervisor:overcloud:infra-vms + hosts: seed:seed-hypervisor:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ network_connectivity_max_fail_percentage | default(kayobe_max_fail_percentage) | diff --git a/ansible/network.yml b/ansible/network.yml index c5b99ad0c..e584b7fd8 100644 --- a/ansible/network.yml +++ b/ansible/network.yml @@ -1,6 +1,6 @@ --- - name: Ensure networking is configured - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ network_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/pip.yml b/ansible/pip.yml index 3bea4a70d..98e0473fa 100644 --- a/ansible/pip.yml +++ b/ansible/pip.yml @@ -1,6 +1,6 @@ --- - name: Configure local PyPi mirror - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ pip_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/proxy.yml b/ansible/proxy.yml index e96e5674b..ffde65f3e 100644 --- a/ansible/proxy.yml +++ b/ansible/proxy.yml @@ -1,6 +1,6 @@ --- - name: Configure HTTP(S) proxy settings - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ proxy_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/selinux.yml b/ansible/selinux.yml index aa00d4ce4..489967c03 100644 --- a/ansible/selinux.yml +++ b/ansible/selinux.yml @@ -1,6 +1,6 @@ --- - name: Configure SELinux state and reboot if required - hosts: seed:seed-hypervisor:overcloud:infra-vms + hosts: seed:seed-hypervisor:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ selinux_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/swap.yml b/ansible/swap.yml index 82ccbba85..c3c9d3d96 100644 --- a/ansible/swap.yml +++ b/ansible/swap.yml @@ -1,6 +1,6 @@ --- - name: Configure swap - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control become: true max_fail_percentage: >- {{ swap_max_fail_percentage | diff --git a/ansible/sysctl.yml b/ansible/sysctl.yml index cf2a2793e..7565014c7 100644 --- a/ansible/sysctl.yml +++ b/ansible/sysctl.yml @@ -1,6 +1,6 @@ --- - name: Ensure sysctl parameters are configured - hosts: seed:seed-hypervisor:overcloud:infra-vms + hosts: seed:seed-hypervisor:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ sysctl_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/time.yml b/ansible/time.yml index 2c02e5bbe..8a5d1ecf0 100644 --- a/ansible/time.yml +++ b/ansible/time.yml @@ -1,6 +1,6 @@ --- - name: Ensure timezone is configured - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ time_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/tuned.yml b/ansible/tuned.yml index 2bf4b2f55..87d8268f6 100644 --- a/ansible/tuned.yml +++ b/ansible/tuned.yml @@ -1,6 +1,6 @@ --- - name: Configure tuned profile - hosts: seed:seed-hypervisor:overcloud:infra-vms + hosts: seed:seed-hypervisor:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ tuned_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/users.yml b/ansible/users.yml index 6afc1fd53..67260616c 100644 --- a/ansible/users.yml +++ b/ansible/users.yml @@ -1,6 +1,6 @@ --- - name: Ensure users exist - hosts: seed:seed-hypervisor:overcloud:infra-vms + hosts: seed:seed-hypervisor:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ users_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/ansible/wipe-disks.yml b/ansible/wipe-disks.yml index 79fe2edf1..37caeb641 100644 --- a/ansible/wipe-disks.yml +++ b/ansible/wipe-disks.yml @@ -8,7 +8,7 @@ # also closed and removed from crypttab. - name: Ensure that all unmounted block devices are wiped - hosts: seed-hypervisor:seed:overcloud:infra-vms + hosts: seed-hypervisor:seed:overcloud:infra-vms:ansible-control max_fail_percentage: >- {{ wipe_disks_max_fail_percentage | default(host_configure_max_fail_percentage) | diff --git a/dev/ansible-control-host-configure.sh b/dev/ansible-control-host-configure.sh new file mode 100755 index 000000000..af65250a4 --- /dev/null +++ b/dev/ansible-control-host-configure.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -eu +set -o pipefail + +# Simple script to configure a development environment as an Ansible control host. + +PARENT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +source "${PARENT}/functions" + + +function main { + config_init + ansible_control_host_configure +} + +main diff --git a/dev/functions b/dev/functions index 20e571d26..64c335724 100644 --- a/dev/functions +++ b/dev/functions @@ -320,6 +320,16 @@ function control_host_upgrade { echo "Upgraded control host after $i attempts" } +function ansible_control_host_configure { + # Deploy an Ansible control host. + environment_setup + + control_host_bootstrap + + echo "Configuring the Ansible control host" + run_kayobe control host configure +} + function seed_hypervisor_deploy { # Deploy a seed hypervisor. environment_setup diff --git a/doc/source/administration/ansible-control.rst b/doc/source/administration/ansible-control.rst new file mode 100644 index 000000000..21c3f0dbc --- /dev/null +++ b/doc/source/administration/ansible-control.rst @@ -0,0 +1,59 @@ +=================================== +Ansible Control Host Administration +=================================== + +Updating Packages +================= + +It is possible to update packages on the Ansible control host. + +Package Repositories +-------------------- + +If using custom DNF package repositories on CentOS or Rocky, it may be +necessary to update these prior to running a package update. To do this, update +the configuration in ``${KAYOBE_CONFIG_PATH}/dnf.yml`` and run the following +command:: + + (kayobe) $ kayobe control host configure --tags dnf + +Package Update +-------------- + +To update one or more packages:: + + (kayobe) $ kayobe control host package update --packages , + +To update all eligible packages, use ``*``, escaping if necessary:: + + (kayobe) $ kayobe control host package update --packages "*" + +To only install updates that have been marked security related:: + + (kayobe) $ kayobe control host package update --packages "*" --security + +Note that these commands do not affect packages installed in containers, only +those installed on the host. + +Kernel Updates +-------------- + +If the kernel has been updated, you will probably want to reboot the host +to boot into the new kernel. This can be done using a command such as the +following:: + + (kayobe) $ kayobe control host command run --command "shutdown -r" --become + +Running Commands +================ + +It is possible to run a command on the host:: + + (kayobe) $ kayobe control host command run --command "" + +For example:: + + (kayobe) $ kayobe control host command run --command "service docker restart" + +To execute the command with root privileges, add the ``--become`` argument. +Adding the ``--verbose`` argument allows the output of the command to be seen. diff --git a/doc/source/administration/index.rst b/doc/source/administration/index.rst index 79cee41ce..dbdef7672 100644 --- a/doc/source/administration/index.rst +++ b/doc/source/administration/index.rst @@ -9,6 +9,7 @@ administrative tasks. :maxdepth: 2 general + ansible-control seed infra-vms overcloud diff --git a/doc/source/configuration/reference/hosts.rst b/doc/source/configuration/reference/hosts.rst index 694878339..b0ed019a3 100644 --- a/doc/source/configuration/reference/hosts.rst +++ b/doc/source/configuration/reference/hosts.rst @@ -7,6 +7,7 @@ Host Configuration This section covers configuration of hosts. It does not cover configuration or deployment of containers. Hosts that are configured by Kayobe include: +* Ansible control host (``kayobe control host configure``) * Seed hypervisor (``kayobe seed hypervisor host configure``) * Seed (``kayobe seed host configure``) * Infra VMs (``kayobe infra vm host configure``) @@ -26,6 +27,7 @@ Some host configuration options are set via global variables, and others have a variable for each type of host. The latter variables are included in the following files under ``${KAYOBE_CONFIG_PATH}``: +* ``ansible-control.yml`` * ``seed-hypervisor.yml`` * ``seed.yml`` * ``compute.yml`` @@ -83,6 +85,7 @@ user. In cloud images, there is often a user named after the OS distro, e.g. variable, except for CentOS which uses ``cloud-user``, but may be set via the following variables: +* ``ansible_control_bootstrap_user`` * ``seed_hypervisor_bootstrap_user`` * ``seed_bootstrap_user`` * ``infra_vm_bootstrap_user`` @@ -181,6 +184,7 @@ that used by the ``users`` variable of the `singleplatform-eng.users `__ role. The following variables can be used to set the users for specific types of hosts: +* ``ansible_control_users`` * ``seed_hypervisor_users`` * ``seed_users`` * ``infra_vm_users`` @@ -585,6 +589,7 @@ Ubuntu systems. The following variables can be used to set whether to enable firewalld: +* ``ansible_control_firewalld_enabled`` * ``seed_hypervisor_firewalld_enabled`` * ``seed_firewalld_enabled`` * ``infra_vm_firewalld_enabled`` @@ -596,6 +601,7 @@ The following variables can be used to set whether to enable firewalld: When firewalld is enabled, the following variables can be used to configure a list of zones to create. Each item is a dict containing a ``zone`` item: +* ``ansible_control_firewalld_zones`` * ``seed_hypervisor_firewalld_zones`` * ``seed_firewalld_zones`` * ``infra_vm_firewalld_zones`` @@ -607,6 +613,7 @@ list of zones to create. Each item is a dict containing a ``zone`` item: The following variables can be used to set a default zone. The default is unset, in which case the default zone will not be changed: +* ``ansible_control_firewalld_default_zone`` * ``seed_hypervisor_firewalld_default_zone`` * ``seed_firewalld_default_zone`` * ``infra_vm_firewalld_default_zone`` @@ -621,6 +628,7 @@ are omitted if not provided, with the following exceptions: ``offline`` (default ``true``), ``permanent`` (default ``true``), ``state`` (default ``enabled``): +* ``ansible_control_firewalld_rules`` * ``seed_hypervisor_firewalld_rules`` * ``seed_firewalld_rules`` * ``infra_vm_firewalld_rules`` @@ -693,6 +701,7 @@ Tuned Built-in ``tuned`` profiles can be applied to hosts. The following variables can be used to set a ``tuned`` profile to specific types of hosts: +* ``ansible_control_tuned_active_builtin_profile`` * ``seed_hypervisor_tuned_active_builtin_profile`` * ``seed_tuned_active_builtin_profile`` * ``compute_tuned_active_builtin_profile`` @@ -704,6 +713,7 @@ can be used to set a ``tuned`` profile to specific types of hosts: By default, Kayobe applies a ``tuned`` profile matching the role of each host in the system: +* Ansible control host: ``throughput-performance`` * seed hypervisor: ``virtual-host`` * seed: ``virtual-guest`` * infrastructure VM: ``virtual-guest`` @@ -729,6 +739,7 @@ Arbitrary ``sysctl`` configuration can be applied to hosts. The variable format is a dict/map, mapping parameter names to their required values. The following variables can be used to set ``sysctl`` configuration specific types of hosts: +* ``ansible_control_sysctl_parameters`` * ``seed_hypervisor_sysctl_parameters`` * ``seed_sysctl_parameters`` * ``infra_vm_sysctl_parameters`` @@ -828,6 +839,8 @@ Kayobe will configure `Chrony `__ on all hosts in seed seed-hypervisor overcloud + infra-vms + ansible-control This provides a flexible way to opt in or out of having kayobe manage the NTP service. @@ -870,6 +883,7 @@ arrays they want to manage with Kayobe. Software RAID arrays may be configured via the ``mdadm_arrays`` variable. For convenience, this is mapped to the following variables: +* ``ansible_control_mdadm_arrays`` * ``seed_hypervisor_mdadm_arrays`` * ``seed_mdadm_arrays`` * ``infra_vm_mdadm_arrays`` @@ -906,6 +920,7 @@ Encryption Encrypted block devices may be configured via the ``luks_devices`` variable. For convenience, this is mapped to the following variables: +* ``ansible_control_luks_devices`` * ``seed_hypervisor_luks_devices`` * ``seed_luks_devices`` * ``infra_vm_luks_devices`` @@ -943,6 +958,7 @@ Logical Volume Manager (LVM) physical volumes, volume groups, and logical volumes may be configured via the ``lvm_groups`` variable. For convenience, this is mapped to the following variables: +* ``ansible_control_lvm_groups`` * ``seed_hypervisor_lvm_groups`` * ``seed_lvm_groups`` * ``infra_vm_lvm_groups`` @@ -980,6 +996,7 @@ can optionally be created. The logical volume is created in volume group called This configuration is enabled by the following variables, which default to ``false``: +* ``ansible_control_lvm_group_data_enabled`` * ``compute_lvm_group_data_enabled`` * ``controller_lvm_group_data_enabled`` * ``seed_lvm_group_data_enabled`` @@ -989,6 +1006,7 @@ This configuration is enabled by the following variables, which default to To use this configuration, a list of disks must be configured via the following variables: +* ``ansible_control_lvm_group_data_disks`` * ``seed_lvm_group_data_disks`` * ``infra_vm_lvm_group_data_disks`` * ``compute_lvm_group_data_disks`` @@ -1008,6 +1026,7 @@ For example, to configure two of the seed's disks for use by LVM: The Docker volumes LVM volume is assigned a size given by the following variables, with a default value of 75% (of the volume group's capacity): +* ``ansible_control_lvm_group_data_lv_docker_volumes_size`` * ``seed_lvm_group_data_lv_docker_volumes_size`` * ``infra_vm_lvm_group_data_lv_docker_volumes_size`` * ``compute_lvm_group_data_lv_docker_volumes_size`` @@ -1038,6 +1057,7 @@ Custom LVM To define additional logical logical volumes in the default ``data`` volume group, modify one of the following variables: +* ``ansible_control_lvm_group_data_lvs`` * ``seed_lvm_group_data_lvs`` * ``infra_vm_lvm_group_data_lvs`` * ``compute_lvm_group_data_lvs`` @@ -1063,6 +1083,7 @@ include the LVM volume for Docker volume data: It is possible to define additional LVM volume groups via the following variables: +* ``ansible_control_lvm_groups_extra`` * ``seed_lvm_groups_extra`` * ``infra_vm_lvm_groups_extra`` * ``compute_lvm_groups_extra`` @@ -1134,6 +1155,25 @@ example, to use podman: container_engine: podman +The container engine is deployed on hosts in the ``container-engine`` group. By +default this includes the following groups: + +.. code-block:: ini + + [container-engine:children] + # Hosts in this group will have Docker/Podman installed. + seed + controllers + network + monitoring + storage + compute + ansible-control + +Note that deployment of a container engine is disabled by default on the +Ansible control host. This can be changed by setting +``ansible_control_container_engine_enabled`` to ``true``. + Podman ------ @@ -1360,6 +1400,7 @@ Swap Swap files and devices may be configured via the ``swap`` variable. For convenience, this is mapped to the following variables: +* ``ansible_control_swap`` * ``seed_swap`` * ``seed_hypervisor_swap`` * ``infra_vm_swap`` diff --git a/doc/source/configuration/reference/network.rst b/doc/source/configuration/reference/network.rst index d104f9897..9e6789006 100644 --- a/doc/source/configuration/reference/network.rst +++ b/doc/source/configuration/reference/network.rst @@ -880,6 +880,19 @@ Kayobe's playbook group variables define some sensible defaults for this variable for hosts in the top level standard groups. These defaults are set using the network roles typically required by the group. +Ansible Control Host +-------------------- + +By default, the Ansible control host is attached to the following network: + +* overcloud admin network + +This list may be extended by setting +``ansible_control_extra_network_interfaces`` to a list of names of additional +networks to attach. Alternatively, the list may be completely overridden by +setting ``ansible_control_network_interfaces``. These variables are found in +``${KAYOBE_CONFIG_PATH}/ansible-control.yml``. + Seed ---- diff --git a/doc/source/deployment.rst b/doc/source/deployment.rst index 5f653d5af..c39d0fe3c 100644 --- a/doc/source/deployment.rst +++ b/doc/source/deployment.rst @@ -29,6 +29,35 @@ To bootstrap the Ansible control host:: (kayobe) $ kayobe control host bootstrap +Since the Gazpacho 20.0.0 release it is possible to manage the Ansible control +host's configuration in the same way as other hosts. If using this feature, the +Ansible control host should be added to the Kayobe inventory in the +``ansible-control`` group. Typically this host will be ``localhost``, although +it is also possible to manage an Ansible control host remotely. For example: + +.. code-block:: ini + :caption: ``${KAYOBE_CONFIG_PATH}/inventory/groups`` + + [ansible-control] + localhost + +To configure the Ansible control host OS:: + + (kayobe) $ kayobe control host configure + +.. note:: + + If the Ansible control host uses disks that have been in use in a previous + installation, it may be necessary to wipe partition and LVM data from those + disks. To wipe all disks that are not mounted during host configuration:: + + (kayobe) $ kayobe control host configure --wipe-disks + +.. seealso:: + + Information on configuration of hosts is available :ref:`here + `. + .. _physical-network: Physical Network diff --git a/doc/source/usage.rst b/doc/source/usage.rst index 70581eb9e..499a1b5ee 100644 --- a/doc/source/usage.rst +++ b/doc/source/usage.rst @@ -54,7 +54,7 @@ password for a given service. This can be done with ``ansible-vault view`` however if an absolute path is not provided it will cause the command to fail. Therefore, to make reading the contents of this file easier for administrators -it is possible to use ``kayobe overcloud passwords view`` which will +it is possible to use ``kayobe overcloud service passwords view`` which will temporarily decrypt and display the contents of ``kolla/passwords.yml`` for the active kayobe environment. diff --git a/etc/kayobe/ansible-control.yml b/etc/kayobe/ansible-control.yml new file mode 100644 index 000000000..cd6b563d5 --- /dev/null +++ b/etc/kayobe/ansible-control.yml @@ -0,0 +1,128 @@ +--- +############################################################################### +# Ansible control host configuration. + +# User with which to access the Ansible control host via SSH during bootstrap, +# in order to setup the Kayobe user account. Default is {{ os_distribution }}. +#ansible_control_bootstrap_user: + +############################################################################### +# Ansible control host network interface configuration. + +# List of networks to which Ansible control host are attached. +#ansible_control_network_interfaces: + +# List of default networks to which Ansible control host are attached. +#ansible_control_default_network_interfaces: + +# List of extra networks to which Ansible control host are attached. +#ansible_control_extra_network_interfaces: + +############################################################################### +# Ansible control host software RAID configuration. + +# List of software RAID arrays. See mrlesmithjr.mdadm role for format. +#ansible_control_mdadm_arrays: + +############################################################################### +# Ansible control host encryption configuration. + +# List of block devices to encrypt. See stackhpc.luks role for format. +#ansible_control_luks_devices: + +############################################################################### +# Ansible control host LVM configuration. + +# List of Ansible control host volume groups. See mrlesmithjr.manage_lvm role +# for format. +#ansible_control_lvm_groups: + +# Default list of Ansible control host volume groups. See +# mrlesmithjr.manage_lvm role for format. +#ansible_control_lvm_groups_default: + +# Additional list of Ansible control host volume groups. See +# mrlesmithjr.manage_lvm role for format. +#ansible_control_lvm_groups_extra: + +# Whether a 'data' LVM volume group should exist on the Ansible control host. +# By default this contains a 'docker-volumes' logical volume for Docker volume +# storage. Default is false. +#ansible_control_lvm_group_data_enabled: + +# Ansible control host LVM volume group for data. See mrlesmithjr.manage_lvm +# role for format. +#ansible_control_lvm_group_data: + +# List of disks for use by Ansible control host LVM data volume group. Default +# to an invalid value to require configuration. +#ansible_control_lvm_group_data_disks: + +# List of LVM logical volumes for the data volume group. +#ansible_control_lvm_group_data_lvs: + +# Docker volumes LVM backing volume. +#ansible_control_lvm_group_data_lv_docker_volumes: + +# Size of docker volumes LVM backing volume. +#ansible_control_lvm_group_data_lv_docker_volumes_size: + +# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. +#ansible_control_lvm_group_data_lv_docker_volumes_fs: + +############################################################################### +# Ansible control host sysctl configuration. + +# Dict of sysctl parameters to set. +#ansible_control_sysctl_parameters: + +############################################################################### +# Ansible control host tuned configuration. + +# Builtin tuned profile to use. Format is same as that used by giovtorres.tuned +# role. Default is throughput-performance. +#ansible_control_tuned_active_builtin_profile: + +############################################################################### +# Ansible control host user configuration. + +# List of users to create. This should be in a format accepted by the +# singleplatform-eng.users role. +#ansible_control_users: + +############################################################################### +# Ansible control host firewalld configuration. + +# Whether to install and enable firewalld. +#ansible_control_firewalld_enabled: + +# A list of zones to create. Each item is a dict containing a 'zone' item. +#ansible_control_firewalld_zones: + +# A firewalld zone to set as the default. Default is unset, in which case the +# default zone will not be changed. +#ansible_control_firewalld_default_zone: + +# A list of firewall rules to apply. Each item is a dict containing arguments +# to pass to the firewalld module. Arguments are omitted if not provided, with +# the following exceptions: +# - offline: true +# - permanent: true +# - state: enabled +#ansible_control_firewalld_rules: + +############################################################################### +# Ansible control host swap configuration. + +# List of swap devices. Each item is a dict containing a 'device' item. +#ansible_control_swap: + +############################################################################### +# Ansible control host container engine configuration. + +# Whether a container engine should be configured. Default is false. +#ansible_control_container_engine_enabled: + +############################################################################### +# Dummy variable to allow Ansible to accept this file. +workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/inventory/group_vars/ansible-control/ansible-python-interpreter b/etc/kayobe/inventory/group_vars/ansible-control/ansible-python-interpreter new file mode 100644 index 000000000..54abbf23c --- /dev/null +++ b/etc/kayobe/inventory/group_vars/ansible-control/ansible-python-interpreter @@ -0,0 +1,3 @@ +--- +# Use a virtual environment for remote operations. +ansible_python_interpreter: "{{ virtualenv_path }}/kayobe/bin/python" diff --git a/etc/kayobe/inventory/group_vars/ansible-control/network-interfaces b/etc/kayobe/inventory/group_vars/ansible-control/network-interfaces new file mode 100644 index 000000000..6880128da --- /dev/null +++ b/etc/kayobe/inventory/group_vars/ansible-control/network-interfaces @@ -0,0 +1,20 @@ +--- +############################################################################### +# Network interface definitions for the ansible-control group. + +# NOTE: The content of this section is very deployment-specific, since it +# depends on the names and types of networks in the deployment. It should +# define the group-specific attributes of networks. The following example shows +# a basic configuration for a network called "example": +# +# example_interface: eth0 +# +# Global network attributes such as subnet CIDRs are typically configured in +# etc/kayobe/networks.yml. +# +# Further information on the available network attributes is provided in the +# network configuration reference in the Kayobe documentation. + +############################################################################### +# Dummy variable to allow Ansible to accept this file. +workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/inventory/groups b/etc/kayobe/inventory/groups index 0f57def4a..fee8c48e7 100644 --- a/etc/kayobe/inventory/groups +++ b/etc/kayobe/inventory/groups @@ -1,6 +1,12 @@ # Kayobe groups inventory file. This file should generally not be modified. # If declares the top-level groups and sub-groups. +############################################################################### +# Ansible control host groups. + +[ansible-control] +# Empty group to provide declaration of ansible-control group. + ############################################################################### # Seed groups. @@ -73,6 +79,7 @@ network monitoring storage compute +ansible-control [docker-registry:children] # Hosts in this group will have a Docker Registry deployed. This group should @@ -86,6 +93,7 @@ seed seed-hypervisor overcloud infra-vms +ansible-control ############################################################################### # Baremetal compute node groups. diff --git a/kayobe/cli/commands.py b/kayobe/cli/commands.py index 2c9cb97a0..4dbcb4cdc 100644 --- a/kayobe/cli/commands.py +++ b/kayobe/cli/commands.py @@ -310,6 +310,106 @@ def take_action(self, parsed_args): self.run_kayobe_playbooks(parsed_args, playbooks) +class ControlHostConfigure(KayobeAnsibleMixin, VaultMixin, Command): + """Configure the Ansible control host OS and services. + + * Allocate IP addresses for all configured networks. + * Add the host to SSH known hosts. + * Configure a user account for use by kayobe for SSH access. + * Configure proxy settings. + * Configure package repos. + * Configure a PyPI mirror. + * Optionally, create a virtualenv for remote target hosts. + * Optionally, wipe unmounted disk partitions (--wipe-disks). + * Configure user accounts, group associations, and authorised SSH keys. + * Configure SELinux. + * Configure the host's network interfaces. + * Configure a firewall. + * Configure tuned profile. + * Set sysctl parameters. + * Configure timezone and ntp. + * Optionally, configure software RAID arrays. + * Optionally, configure encryption. + * Configure LVM volumes. + * Configure swap. + * Optionally, configure a container engine. + """ + + def get_parser(self, prog_name): + parser = super(ControlHostConfigure, self).get_parser(prog_name) + group = parser.add_argument_group("Host Configuration") + group.add_argument("--wipe-disks", action='store_true', + help="wipe partition and LVM data from all disks " + "that are not mounted. Warning: this can " + "result in the loss of data") + return parser + + def take_action(self, parsed_args): + self.app.LOG.debug("Configuring Ansible control host OS") + + # Allocate IP addresses. + playbooks = _build_playbook_list("ip-allocation") + self.run_kayobe_playbooks(parsed_args, playbooks, + limit="ansible-control") + + # Kayobe playbooks. + kwargs = {} + if parsed_args.wipe_disks: + kwargs["extra_vars"] = {"wipe_disks": True} + playbooks = _build_playbook_list("control-host-configure") + self.run_kayobe_playbooks(parsed_args, playbooks, + limit="ansible-control", **kwargs) + + +class ControlHostCommandRun(KayobeAnsibleMixin, VaultMixin, Command): + """Run command on the Ansible control host.""" + + def get_parser(self, prog_name): + parser = super(ControlHostCommandRun, self).get_parser(prog_name) + group = parser.add_argument_group("Host Command Run") + group.add_argument("--command", required=True, + help="Command to run (required).") + group.add_argument("--show-output", action='store_true', + help="Show command output") + return parser + + def take_action(self, parsed_args): + self.app.LOG.debug("Run command on Ansible control host") + extra_vars = { + "host_command_to_run": utils.escape_jinja(parsed_args.command), + "show_output": parsed_args.show_output} + playbooks = _build_playbook_list("host-command-run") + self.run_kayobe_playbooks(parsed_args, playbooks, + limit="ansible-control", + extra_vars=extra_vars) + + +class ControlHostPackageUpdate(KayobeAnsibleMixin, VaultMixin, Command): + """Update packages on the Ansible control host.""" + + def get_parser(self, prog_name): + parser = super(ControlHostPackageUpdate, self).get_parser(prog_name) + group = parser.add_argument_group("Host Package Updates") + group.add_argument("--packages", required=True, + help="List of packages to update. Use '*' to " + "update all packages.") + group.add_argument("--security", action='store_true', + help="Only install updates that have been marked " + "security related.") + return parser + + def take_action(self, parsed_args): + self.app.LOG.debug("Updating Ansible control host packages") + extra_vars = { + "host_package_update_packages": parsed_args.packages, + "host_package_update_security": parsed_args.security, + } + playbooks = _build_playbook_list("host-package-update") + self.run_kayobe_playbooks(parsed_args, playbooks, + limit="ansible-control", + extra_vars=extra_vars) + + class ControlHostUpgrade(KayobeAnsibleMixin, VaultMixin, Command): """Upgrade the Kayobe control environment. diff --git a/kayobe/tests/unit/cli/test_commands.py b/kayobe/tests/unit/cli/test_commands.py index f50a6e1e7..a72d76ecd 100644 --- a/kayobe/tests/unit/cli/test_commands.py +++ b/kayobe/tests/unit/cli/test_commands.py @@ -117,6 +117,113 @@ def test_control_host_bootstrap_with_passwords( ] self.assertListEqual(expected_calls, mock_kolla_run.call_args_list) + @mock.patch.object(commands.KayobeAnsibleMixin, + "run_kayobe_playbooks") + def test_control_host_configure(self, mock_run): + command = commands.ControlHostConfigure(TestApp(), []) + parser = command.get_parser("test") + parsed_args = parser.parse_args([]) + + result = command.run(parsed_args) + self.assertEqual(0, result) + + expected_calls = [ + mock.call( + mock.ANY, + [utils.get_data_files_path("ansible", "ip-allocation.yml")], + limit="ansible-control", + ), + mock.call( + mock.ANY, + [ + utils.get_data_files_path( + "ansible", "control-host-configure.yml"), + ], + limit="ansible-control", + ), + ] + self.assertListEqual(expected_calls, mock_run.call_args_list) + + @mock.patch.object(commands.KayobeAnsibleMixin, + "run_kayobe_playbooks") + def test_control_host_configure_wipe_disks(self, mock_run): + command = commands.ControlHostConfigure(TestApp(), []) + parser = command.get_parser("test") + parsed_args = parser.parse_args(["--wipe-disks"]) + + result = command.run(parsed_args) + self.assertEqual(0, result) + + expected_calls = [ + mock.call( + mock.ANY, + [utils.get_data_files_path("ansible", "ip-allocation.yml")], + limit="ansible-control", + ), + mock.call( + mock.ANY, + [ + utils.get_data_files_path( + "ansible", "control-host-configure.yml"), + ], + limit="ansible-control", + extra_vars={"wipe_disks": True}, + ), + ] + self.assertListEqual(expected_calls, mock_run.call_args_list) + + @mock.patch.object(commands.KayobeAnsibleMixin, + "run_kayobe_playbooks") + def test_control_host_command_run(self, mock_run): + command = commands.ControlHostCommandRun(TestApp(), []) + parser = command.get_parser("test") + parsed_args = parser.parse_args(["--command", "ls -a", + "--show-output"]) + + result = command.run(parsed_args) + self.assertEqual(0, result) + + expected_calls = [ + mock.call( + mock.ANY, + [ + utils.get_data_files_path("ansible", + "host-command-run.yml"), + ], + limit="ansible-control", + extra_vars={ + "host_command_to_run": utils.escape_jinja("ls -a"), + "show_output": True} + ), + ] + self.assertListEqual(expected_calls, mock_run.call_args_list) + + @mock.patch.object(commands.KayobeAnsibleMixin, + "run_kayobe_playbooks") + def test_control_host_package_update_all(self, mock_run): + command = commands.ControlHostPackageUpdate(TestApp(), []) + parser = command.get_parser("test") + parsed_args = parser.parse_args(["--packages", "*"]) + + result = command.run(parsed_args) + self.assertEqual(0, result) + + expected_calls = [ + mock.call( + mock.ANY, + [ + utils.get_data_files_path( + "ansible", "host-package-update.yml"), + ], + limit="ansible-control", + extra_vars={ + "host_package_update_packages": "*", + "host_package_update_security": False, + }, + ), + ] + self.assertListEqual(expected_calls, mock_run.call_args_list) + @mock.patch.object(ansible, "install_galaxy_roles", autospec=True) @mock.patch.object(ansible, "install_galaxy_collections", autospec=True) @mock.patch.object(ansible, "prune_galaxy_roles", autospec=True) diff --git a/releasenotes/notes/add-passwords-view-command-2f55d83dca037e3d.yaml b/releasenotes/notes/add-passwords-view-command-2f55d83dca037e3d.yaml index b908fb105..2739d0ba8 100644 --- a/releasenotes/notes/add-passwords-view-command-2f55d83dca037e3d.yaml +++ b/releasenotes/notes/add-passwords-view-command-2f55d83dca037e3d.yaml @@ -2,4 +2,4 @@ features: - | Add support for easily viewing the content of ``kolla/passwords.yml`` with - the new command ``kayobe overcloud passwords view``. + the new command ``kayobe overcloud service passwords view``. diff --git a/releasenotes/notes/bootstrap-apt-proxy-bb121cf577eaeba4.yaml b/releasenotes/notes/bootstrap-apt-proxy-bb121cf577eaeba4.yaml new file mode 100644 index 000000000..5ed203141 --- /dev/null +++ b/releasenotes/notes/bootstrap-apt-proxy-bb121cf577eaeba4.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds support for bootstrapping Python on Ubuntu through a proxy. diff --git a/releasenotes/notes/control-host-configure-ca4bb8c4de59c370.yaml b/releasenotes/notes/control-host-configure-ca4bb8c4de59c370.yaml new file mode 100644 index 000000000..681a4ae08 --- /dev/null +++ b/releasenotes/notes/control-host-configure-ca4bb8c4de59c370.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + Adds support for managing the Ansible control host configuration. This is + provided by the new ``kayobe control host configure`` command, and uses the + existing host configuration features in Kayobe. + + Also provided is a ``kayobe control host command run`` command for running + commands on the Ansible control host, and a ``kayobe control host package + update`` command for updating its OS packages. diff --git a/requirements.yml b/requirements.yml index 861b53393..e0aa67977 100644 --- a/requirements.yml +++ b/requirements.yml @@ -4,7 +4,7 @@ collections: type: git version: master - name: community.docker - version: 3.11.0 + version: 5.0.5 - name: community.network version: 5.1.0 - name: dellemc.os6 @@ -22,37 +22,36 @@ collections: - name: stackhpc.network version: 1.0.0 - name: stackhpc.openstack - version: 0.6.0 + version: 0.9.0 roles: - src: ahuffman.resolv - version: 1.3.1 + version: 1.3.2 - src: giovtorres.tuned - version: 1.2.0 + version: 2.0.2 - src: git+https://github.com/stackhpc/ansible-role-configdrive.git name: jriguera.configdrive version: fb199247333e72e38a9d414cf7b6144daa645477 - src: MichaelRigart.interfaces - version: v1.15.6 + version: v1.16.1 - src: mrlesmithjr.chrony version: v0.1.6 - src: mrlesmithjr.manage_lvm - version: v0.2.8 + version: v0.2.13 - src: mrlesmithjr.mdadm - version: v0.1.1 + version: v0.1.9 - src: singleplatform-eng.users - version: v1.2.5 + version: v1.2.6 - src: stackhpc.drac version: 1.1.6 - src: stackhpc.drac-facts - version: 1.0.0 - - src: git+https://github.com/stackhpc/ansible-role-libvirt-host.git - name: stackhpc.libvirt-host - version: 9a947f74abdcd2e0d4e3371162f8299aef259271 + version: v1.0.1 + - src: stackhpc.libvirt-host + version: v1.15.0 - src: stackhpc.libvirt-vm version: v1.16.3 - src: stackhpc.luks - version: 0.4.2 + version: 0.4.4 - src: stackhpc.os-ironic-state version: v1.3.1 - src: stackhpc.timezone diff --git a/setup.cfg b/setup.cfg index 7e493174f..81bc91a1d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -47,6 +47,9 @@ kayobe.cli= baremetal_compute_serial_console_enable = kayobe.cli.commands:BaremetalComputeSerialConsoleEnable baremetal_compute_serial_console_disable = kayobe.cli.commands:BaremetalComputeSerialConsoleDisable control_host_bootstrap = kayobe.cli.commands:ControlHostBootstrap + control_host_command_run = kayobe.cli.commands:ControlHostCommandRun + control_host_configure = kayobe.cli.commands:ControlHostConfigure + control_host_package_update = kayobe.cli.commands:ControlHostPackageUpdate control_host_upgrade = kayobe.cli.commands:ControlHostUpgrade configuration_dump = kayobe.cli.commands:ConfigurationDump environment_create = kayobe.cli.commands:EnvironmentCreate @@ -127,6 +130,12 @@ kayobe.cli.baremetal_compute_serial_console_disable = hooks = kayobe.cli.commands:HookDispatcher kayobe.cli.control_host_bootstrap = hooks = kayobe.cli.commands:HookDispatcher +kayobe.cli.control_host_command_run = + hooks = kayobe.cli.commands:HookDispatcher +kayobe.cli.control_host_configure = + hooks = kayobe.cli.commands:HookDispatcher +kayobe.cli.control_host_package_update = + hooks = kayobe.cli.commands:HookDispatcher kayobe.cli.control_host_upgrade = hooks = kayobe.cli.commands:HookDispatcher kayobe.cli.configuration_dump = diff --git a/tox.ini b/tox.ini index de8c0fa6b..ed74876cd 100644 --- a/tox.ini +++ b/tox.ini @@ -92,7 +92,7 @@ commands = setenv = {[testenv:linters]setenv} deps = {[testenv:linters]deps} commands = - ansible-lint -p --exclude etc --exclude kayobe/plugins --exclude playbooks --exclude releasenotes --exclude roles --exclude zuul.d --exclude ansible/idrac-bootstrap.yml + ansible-lint -p --exclude etc --exclude kayobe/plugins --exclude playbooks --exclude releasenotes --exclude roles --exclude zuul.d --exclude ansible/idrac-bootstrap.yml --exclude .ansible --exclude ansible/roles/*.* --exclude ansible/collections [testenv:ansible-syntax] commands =