diff --git a/ansible/playbook/ocp/ocp_openstack_install.yml b/ansible/playbook/ocp/ocp_openstack_install.yml index 161cec9a..3c9363e8 100644 --- a/ansible/playbook/ocp/ocp_openstack_install.yml +++ b/ansible/playbook/ocp/ocp_openstack_install.yml @@ -1,18 +1,21 @@ --- +- name: "Build OpenStack authentication for v3password" + import_playbook: "../openstack/openstack_auth_passstore_v3password.yml" + - name: "Install OCP" hosts: localhost gather_facts: true - pre_tasks: - - name: "Set openstack_auth facts" - set_fact: - openstack_auth: - openstack_project_name: "{{ query('passwordstore', 'openstack/host/project_name')[0] }}" - openstack_console_user: "{{ query('passwordstore', 'openstack/host/console_user')[0] }}" - openstack_console_password: "{{ query('passwordstore', 'openstack/host/console_pw')[0] }}" - openstack_user_domain: "{{ query('passwordstore', 'openstack/host/console_domain')[0] }}" - openstack_project_domain: "{{ query('passwordstore', 'openstack/host/os_domain')[0] }}" - openstack_os_auth_url: "{{ query('passwordstore', 'openstack/host/os_auth_url')[0] }}" + # pre_tasks: + # - name: "Set openstack_auth facts" + # set_fact: + # openstack_auth: + # openstack_project_name: "{{ query('passwordstore', 'openstack/host/project_name')[0] }}" + # openstack_console_user: "{{ query('passwordstore', 'openstack/host/console_user')[0] }}" + # openstack_console_password: "{{ query('passwordstore', 'openstack/host/console_pw')[0] }}" + # openstack_user_domain: "{{ query('passwordstore', 'openstack/host/console_domain')[0] }}" + # openstack_project_domain: "{{ query('passwordstore', 'openstack/host/os_domain')[0] }}" + # openstack_os_auth_url: "{{ query('passwordstore', 'openstack/host/os_auth_url')[0] }}" tasks: - name: "Deploy OCP" diff --git a/ansible/playbook/openstack/README.adoc b/ansible/playbook/openstack/README.adoc index 80d4b311..044bc4b8 100644 --- a/ansible/playbook/openstack/README.adoc +++ b/ansible/playbook/openstack/README.adoc @@ -101,12 +101,12 @@ This name will be used both as hostname as well as Ansible Inventory name. [source,bash] ---- -$ VM_NAME=vm20230627-t01 +VM_NAME=vm20230627-t01 ---- [source,bash] ---- -$ ansible-playbook ansible/playbook/openstack/openstack_vm_create_passwordstore.yml -e '{"openstack": {"vm": {"network": "provider_net_shared","image": "Fedora-Cloud-Base-35", "flavor": "m1.medium"}}}' -e vm_name=${VM_NAME} +ansible-playbook ansible/playbook/openstack/openstack_vm_create_passwordstore.yml -e '{"openstack": {"vm": {"network": "provider_net_shared","image": "Fedora-Cloud-Base-35", "flavor": "m1.medium"}}}' -e vm_name=${VM_NAME} ---- Although some failures might occur some might be ignored which shouldn't affect thhe process. This playbook should finish with no failed tasks. diff --git a/ansible/playbook/openstack/check.yml b/ansible/playbook/openstack/check.yml new file mode 100644 index 00000000..74262421 --- /dev/null +++ b/ansible/playbook/openstack/check.yml @@ -0,0 +1,12 @@ +--- +- name: "Check OpenStack" + set_fact: + pass_provider: "openstack" + when: "pass_provider is not defined" + +- name: "Set OpenStack default variables" + set_fact: + use_generic_ssh_key: True + generic_ssh_key_name: generic-key + when: "use_generic_ssh_key is not defined or (use_generic_ssh_key | bool) " +... diff --git a/ansible/playbook/openstack/openstack_auth_passstore_v3applicationcredential.yml b/ansible/playbook/openstack/openstack_auth_passstore_v3applicationcredential.yml new file mode 100644 index 00000000..39af40c9 --- /dev/null +++ b/ansible/playbook/openstack/openstack_auth_passstore_v3applicationcredential.yml @@ -0,0 +1,15 @@ +--- +- name: "OpenStack authentication with passwordstore and v3applicationcredential" + hosts: localhost + + tasks: + + - name: "Set facts" + ansible.builtin.set_fact: + rhos_authentication_type: v3applicationcredential + rhos_authentication: + auth_url: "{{ query('passwordstore', 'openstack/host/os_auth_url')[0] }}" + application_credential_id: "{{ query('passwordstore', 'openstack/host/app_cred_id')[0] }}" + application_credential_secret: "{{ query('passwordstore', 'openstack/host/app_cred_secret')[0] }}" + +... diff --git a/ansible/playbook/openstack/openstack_auth_passstore_v3password.yml b/ansible/playbook/openstack/openstack_auth_passstore_v3password.yml index 59dc0b91..896902a2 100644 --- a/ansible/playbook/openstack/openstack_auth_passstore_v3password.yml +++ b/ansible/playbook/openstack/openstack_auth_passstore_v3password.yml @@ -1,5 +1,5 @@ --- -- name: "OpenStack Authentication v3password" +- name: "OpenStack authentication with passwordstore and v3password" hosts: localhost gather_facts: false diff --git a/ansible/playbook/openstack/openstack_list_objects_v3applicationcredential.yml b/ansible/playbook/openstack/openstack_list_objects_v3applicationcredential.yml new file mode 100644 index 00000000..e7248441 --- /dev/null +++ b/ansible/playbook/openstack/openstack_list_objects_v3applicationcredential.yml @@ -0,0 +1,55 @@ +--- +- name: "Instanciate RHOS authentication" + ansible.builtin.import_playbook: "openstack_auth_passstore_v3applicationcredential.yml" + vars: + vm_user: "snowdrop" + pass_provider: "openstack" + +- name: "OpenStack Authentication" + hosts: localhost + + tasks: + + - name: "Get auth_token" + openstack.cloud.auth: + auth_type: "{{ rhos_authentication_type }}" + auth: "{{ rhos_authentication }}" + register: auth_result + + - name: "Print Openstack Authentication result" + ansible.builtin.debug: + msg: "auth_result: {{ auth_result }}" + verbosity: 0 + + + - name: List Fedora images + openstack.cloud.image_info: + auth_type: "{{ rhos_authentication_type }}" + auth: "{{ rhos_authentication }}" + properties: + os_distro: "fedora" + register: image_info_result + + - name: "Print Openstack output" + ansible.builtin.debug: + var: image_info_result + + # https://docs.openstack.org/ocata/cli-reference/glance-property-keys.html + - name: List Fedora images + openstack.cloud.image_info: + # token, v2token, v3token, admin_token + auth_type: token + auth: + auth_url: "https://rhos-d.infra.prod.upshift.rdu2.redhat.com:13000" + # token: "{{ auth_result.ansible_facts['auth_token'] }}" + token: "{{ auth_result.auth_token }}" + interface: "internal" + properties: + os_distro: "fedora" + register: image_info_result + + - name: "Print Openstack output" + ansible.builtin.debug: + var: image_info_result +... +# ansible-playbook ansible/playbook/openstack/openstack_auth.yml diff --git a/ansible/playbook/openstack/openstack_list_objects.yml b/ansible/playbook/openstack/openstack_list_objects_v3password.yml similarity index 73% rename from ansible/playbook/openstack/openstack_list_objects.yml rename to ansible/playbook/openstack/openstack_list_objects_v3password.yml index a5d736ac..4af6d2a6 100644 --- a/ansible/playbook/openstack/openstack_list_objects.yml +++ b/ansible/playbook/openstack/openstack_list_objects_v3password.yml @@ -1,20 +1,14 @@ -# ansible-playbook ansible/playbook/openstack/openstack_auth.yml --- +- name: "Instanciate RHOS authentication" + ansible.builtin.import_playbook: "openstack_auth_passstore_v3password.yml" + vars: + vm_user: "snowdrop" + pass_provider: "openstack" + - name: "OpenStack Authentication" hosts: localhost gather_facts: false - pre_tasks: - - name: "Set openstack_auth facts" - ansible.builtin.set_fact: - openstack_auth: - openstack_project_name: "{{ query('passwordstore', 'openstack/host/project_name')[0] }}" - openstack_console_user: "{{ query('passwordstore', 'openstack/host/console_user')[0] }}" - openstack_console_password: "{{ query('passwordstore', 'openstack/host/console_pw')[0] }}" - openstack_user_domain: "{{ query('passwordstore', 'openstack/host/console_domain')[0] }}" - openstack_project_domain: "{{ query('passwordstore', 'openstack/host/os_domain')[0] }}" - openstack_os_auth_url: "{{ query('passwordstore', 'openstack/host/os_auth_url')[0] }}" - tasks: - name: "Print Openstack output" diff --git a/ansible/playbook/openstack/openstack_vm_create_passwordstore.yml b/ansible/playbook/openstack/openstack_vm_create_passwordstore.yml index a3ae0b91..283077fa 100644 --- a/ansible/playbook/openstack/openstack_vm_create_passwordstore.yml +++ b/ansible/playbook/openstack/openstack_vm_create_passwordstore.yml @@ -10,37 +10,34 @@ # . k8s_version: Kubernetes version [117 ... 121], empty for no k8s installation - name: "Validate passwordstore" - import_playbook: "../passstore/passstore_controller_check.yml" + ansible.builtin.import_playbook: "../passstore/passstore_controller_check.yml" # tag::initialize_passwordstore_inventory[] - name: "Initialize passwordstore inventory" - import_playbook: "../passstore/passstore_controller_inventory.yml" + ansible.builtin.import_playbook: "../passstore/passstore_controller_inventory.yml" vars: vm_user: "snowdrop" pass_provider: "openstack" # end::initialize_passwordstore_inventory[] +# tag::instanciate_rhos_auth[] +- name: "Instanciate RHOS authentication" + ansible.builtin.import_playbook: "openstack_auth_passstore_v3password.yml" + vars: + vm_user: "snowdrop" + pass_provider: "openstack" +# end::instanciate_rhos_auth[] + - name: "Openstack VM create" hosts: localhost gather_facts: True - - pre_tasks: - - name: "Set openstack_auth facts" - ansible.builtin.set_fact: - openstack_auth: - openstack_project_name: "{{ query('passwordstore', 'openstack/host/project_name')[0] }}" - openstack_console_user: "{{ query('passwordstore', 'openstack/host/console_user')[0] }}" - openstack_console_password: "{{ query('passwordstore', 'openstack/host/console_pw')[0] }}" - openstack_user_domain: "{{ query('passwordstore', 'openstack/host/console_domain')[0] }}" - openstack_project_domain: "{{ query('passwordstore', 'openstack/host/os_domain')[0] }}" - openstack_os_auth_url: "{{ query('passwordstore', 'openstack/host/os_auth_url')[0] }}" tasks: # outputs: # . openstack_vm_ipv4 # . openstack_output - name: "Execute create inventory, if tagged as so" - include_role: + ansible.builtin.include_role: name: "snowdrop.cloud_infra.openstack_vm" apply: tags: @@ -53,55 +50,6 @@ set_fact: openstack_vm_ipv4: "{{ query('passwordstore', 'openstack/' + vm_name + '/ansible_ssh_host create=True userpass=' + openstack_vm_ipv4 )[0] }}" -- name: "Refresh inventory" - hosts: localhost - gather_facts: True - - tasks: - - name: Refresh the inventory so the newly added host is available - meta: refresh_inventory - - - name: "Add host to known hosts {{ hostvars[vm_name]['ansible_ssh_host'] }}" - ansible.builtin.known_hosts: - name: "{{ hostvars[vm_name]['ansible_ssh_host'] }}" - key: "{{ lookup('pipe', 'ssh-keyscan {{ hostvars[vm_name].ansible_ssh_host }}') }}" - hash_host: true - -- name: "Wait for the VM to boot and we can ssh" - hosts: "{{ vm_name }}" - gather_facts: no - - tasks: - - name: "Show 'Wait for connection to host' output" - debug: - msg: - - "ip : {{ query('passwordstore', 'openstack/' + vm_name + '/ansible_ssh_host')[0] }}" - - "port : {{ query('passwordstore', 'openstack/' + vm_name + '/ansible_ssh_port')[0] }}" - - - name: "Wait for connection to host" - ansible.builtin.wait_for: - host: "{{ query('passwordstore', 'openstack/' + vm_name + '/ansible_ssh_host')[0] }}" - port: "{{ query('passwordstore', 'openstack/' + vm_name + '/ansible_ssh_port')[0] }}" - timeout: 120 - register: wait_for_connection_reg - - post_tasks: - - name: "DON'T FORGET TO SECURE YOUR SERVER" - debug: - msg: "Trying to start start server securization automatically For manual execution: $ ansible-playbook ansible/playbook/sec_host.yml -e vm_name={{ vm_name }} -e provider=openstack" - -- name: "Openstack VM init" - hosts: "{{ vm_name }}" - gather_facts: yes - - roles: - - role: "openstack/init_vm" - -- name: "Secure new server" - import_playbook: "../sec_host.yml" - vars: - provider: "openstack" - hosts: "{{ vm_name }}" - vm_name: "{{ vm_name }}" - tags: [always] +- name: "Execute post create actions" + ansible.builtin.import_playbook: "openstack_vm_create_post_passwordstore.yml" ... diff --git a/ansible/roles/k8s_cluster/tasks/install_cni.yml b/ansible/roles/k8s_cluster/tasks/install_cni.yml index 90c9ef04..e78845d4 100644 --- a/ansible/roles/k8s_cluster/tasks/install_cni.yml +++ b/ansible/roles/k8s_cluster/tasks/install_cni.yml @@ -13,6 +13,14 @@ set_fact: install_calico: "(kc_calico_ds is undefined or (kc_calico_ds.rc != 0 and 'NotFound' not in kc_calico_ds.stderr))" +- name: "Pull Calico images" + shell: | + docker pull docker.io/calico/cni:{{ calico_version }} + docker pull docker.io/calico/typha:{{ calico_version }} + docker.io/calico/pod2daemon-flexvol:{{ calico_version }} + register: kc_tigera_calico + failed_when: kc_tigera_calico.rc > 0 and 'already exists' not in kc_tigera_calico.stderr + - name: "Print Decide calico output" debug: msg: "install_calico {{ install_calico }}" diff --git a/kubernetes/README.md b/kubernetes/README.md deleted file mode 100644 index 8b3f04cc..00000000 --- a/kubernetes/README.md +++ /dev/null @@ -1,213 +0,0 @@ -# Table of Contents - -- [Table of Contents](#table-of-contents) -- [Introduction](#introduction) - - [Scope](#scope) -- [Requirements](#requirements) - - [Ansible Inventory](#ansible-inventory) - - [Host provisioning](#host-provisioning) - - [Host-Group Association](#host-group-association) -- [Installation](#installation) -- [Troublehsooting](#troublehsooting) - - [Expired k8s certificate](#expired-k8s-certificate) - - [Problem](#problem) - - [Cause](#cause) - - [Solution {#k8s-cert-sol}](#solution-k8s-cert-sol) - - [Cannot login using kubelet](#cannot-login-using-kubelet) - - [Problem](#problem-1) - - [Cause](#cause-1) - - [Solution](#solution) - -# Introduction - -This document describes the requirements, and the process to execute to install a k8s cluster on a host. The installation will be done using Ansible. - -## Scope - -Describe the steps to execute to install k8s on a host. - -# Requirements - -First of all follow the instructions in the [Ansible Installation Guide section](../ansible/playbook/README.md#installation-guide). - -## Ansible Inventory - -In order to execute the installation of k8s several variables must be provided. To standardize the installation several Ansible Groups have been created for different installations. - -To populate these variables, some groups, with the corresponding group variables, have been created in the [`hosts.yml`](../ansible/inventory/hosts.yml) inventory file. - -The following table shows the existing groups for k8s. - -| Group Type| Group Name | Description | -| --- | --- | --- | -| Components | masters | Kubernetes control plane. Includes information such as firewall ports and services to be open as well as internal subnet information. | -| Components | nodes | Kubernetes node. Similar to masters but for k8s nodes. | -| Versions | k8s_116 | Information v 1.16 specific | -| Versions | k8s_115 | Information v 1.15 specific | - -Installing kubernetes requires a host to be assigned to 2 groups, identified from the previous table as *Group Type*, a k8s component and a k8s version. - -## Host provisioning - -Provisioning a host is done using the appropriate Ansible Playbooks. - -First create the Ansible Inventory records as indicated in the [Create a host](../ansible/playbook/README.md#create-a-host) section of the ansible playbook documentation. - -In this example we create the inventory for a new vm to be provisioned in the hetzner provider. - -```bash -$ ansible-playbook ansible/playbook/passstore_controller_inventory.yml -e vm_name=my-host -e pass_provider=hetzner -e k8s_type=masters -e k8s_version=115 --tags create -``` - -In the pass database we can now see the following structure. - -``` -├── hetzner -| ├── my-host -│   │   ├── ansible_ssh_port -│   │   ├── groups -│   │   │   ├── k8s_115 -│   │   │   └── masters -│   │   ├── id_rsa -│   │   ├── id_rsa.pub -│   │   ├── os_password -│   │   ├── os_user -│   │   └── ssh_port -``` - -This host has already been added to the `masters` and `k8s_115` groups as parte of the script. - -To remove the host from the inventory... - -```bash -$ ansible-playbook ansible/playbook/passstore_controller_inventory_remove.yml -e vm_name=my-host -e pass_provider=hetzner -``` - -## Host-Group Association - -Once the host is in the inventory it can be associated with groups. - -For instance, to install k8s control plane for version 1.15 in a newly created host (`my-host` in this example) we have to to add that host to the `masters` and `k8s_115` groups. -To perform this operation use the `passstore_manage_host_groups.yml` playbook, as shown in the following example. - -Add a host to the `masters` group and to the `k8s_115` group. - -```bash -$ ansible-playbook ansible/playbook/passstore_manage_host_groups.yml -e operation=add -e group_name=masters -e vm_name=my-host -$ ansible-playbook ansible/playbook/passstore_manage_host_groups.yml -e operation=add -e group_name=k8s_115 -e vm_name=my-host -``` - -To remove a host from the `k8s_115` group... - -```bash -$ ansible-playbook ansible/playbook/passstore_manage_host_groups.yml -e operation=remove -e group_name=k8s_115 -e vm_name=my-host -``` - -More information on how hosts are assigned to groups and actually adding and removing hosts from groups [here](../ansible/playbook/README.md#groups). - -# Installation - -Once the host is defined in the inventory and also provisioned, execute the k8s creation playbook. - -```bash -$ ansible-playbook kubernetes/ansible/k8s.yml --limit -``` - -The `limit` option tells ansible to only execute the playbook to the hosts limited in the statement. -Kubernetes version can be changed using the parameter `-e k8s_version=1.21.4` - -**WARNING**: Be sure that a host group entry exists for the version you want to install within the `inventory/hosts` file -```yaml - k8s_121: - vars: - k8s_version: 1.21.4 - k8s_dashboard_version: v2.3.1 -``` - -Example for installing a k8s server from scratch using hetzner provider where we will create a VM. - -```bash -$ VM_NAME=xXx \ - ; ansible-playbook hetzner/ansible/hetzner-delete-server.yml -e vm_name=${VM_NAME} -e hetzner_context_name=snowdrop \ - ; ansible-playbook ansible/playbook/passstore_controller_inventory_remove.yml -e vm_name=${VM_NAME} -e pass_provider=hetzner \ - && ansible-playbook hetzner/ansible/hetzner-create-ssh-key.yml -e vm_name=${VM_NAME} \ - && ansible-playbook ansible/playbook/passstore_controller_inventory.yml -e vm_name=${VM_NAME} -e pass_provider=hetzner -e k8s_type=masters -e k8s_version=115 -e operation=create \ - && ansible-playbook hetzner/ansible/hetzner-create-server.yml -e vm_name=${VM_NAME} -e salt_text=$(gpg --gen-random --armor 1 20) -e hetzner_context_name=snowdrop \ - && ansible-playbook ansible/playbook/sec_host.yml -e vm_name=${VM_NAME} -e provider=hetzner \ - && ansible-playbook kubernetes/ansible/k8s.yml --limit ${VM_NAME} -``` - -> NOTE: Both kubernetes playbooks (`k8s` and `k8s-misc`) can have its host overridden using the `override_host` variable, e.g., `-e override_host=localhost` to launch it on the controller itself. - -To delete the kubernetes cluster (kubeadmin, kubelet, ..), execute this comma,d -```bash -ansible-playbook kubernetes/ansible/k8s.yml --limit ${VM_NAME} -e remove=true -``` - -# Troubleshooting - -## Expired k8s certificate - -### Problem - -- kubelet service shows connection errors. -- The docker container running the k8s API server cannot be started - -### Cause - -```bash -$ docker logs xxxxxxxxxxxx -... -W0121 11:09:31.447982 1 clientconn.go:1251] grpc: addrConn.createTransport failed to connect to {127.0.0.1:2379 0 }. Err :connection error: desc = "transport: authentication handshake failed: x509: certificate has expired or is not yet valid". Reconnecting... -``` - - Check the validity of the kubernetes certificate using the following command. If they have been expired, then apply the trick as defined at the [Solution](#solution-k8s-cert-sol) section - -```bash -$ openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text |grep ' Not ' -``` -### Solution {#k8s-cert-sol} - -The solution applied was the [this answer on stackoverflow thread](https://stackoverflow.com/questions/56320930/renew-kubernetes-pki-after-expired/56334732#56334732) applied to our k8s 1.14 cluster. - -Other references: -* https://www.ibm.com/support/knowledgecenter/SSCKRH_1.1.0/platform/t_certificate_renewal.html - -```bash -$ cd /etc/kubernetes/pki/ -$ mv {apiserver.crt,apiserver-etcd-client.key,apiserver-kubelet-client.crt,front-proxy-ca.crt,front-proxy-client.crt,front-proxy-client.key,front-proxy-ca.key,apiserver-kubelet-client.key,apiserver.key,apiserver-etcd-client.crt} ~/ -$ kubeadm init phase certs all --apiserver-advertise-address -$ cd /etc/kubernetes/ -$ mv {admin.conf,controller-manager.conf,kubelet.conf,scheduler.conf} ~/ -$ kubeadm init phase kubeconfig all -$ reboot -``` - -And then update the user's kube config. - -```bash -$ cp -i /etc/kubernetes/admin.conf $HOME/.kube/config -``` - -## Cannot log in using kubelet - -### Problem - -```bash -$ kubectl get pods -error: You must be logged in to the server (Unauthorized) -``` - -This might happen for instance after renewing the certificates. - -### Cause - -The `~/.kube/config` does not contain the client-certificate-data and client-key-data updated after renewing the certificate. - -### Solution - -```bash -$ cp -i /etc/kubernetes/admin.conf $HOME/.kube/config -``` - - diff --git a/openstack/README.adoc b/openstack/README.adoc index de415081..39e3f1ce 100644 --- a/openstack/README.adoc +++ b/openstack/README.adoc @@ -55,7 +55,6 @@ Different OS images are available on Openstack. === Flavors - .OpenStack Flavor information [%header,cols="2m,1,1,1,1,1"] |===