Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Test deployment locally with tmt #562

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .ansible-lint
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ mock_modules:
# Ansible 2.9.27 in F35 still contains the k8s module so we can ignore the error until F36,
# where we can switch to kubernetes.core.k8s as ansible-5.x in F36 contains it.
- k8s
- kubernetes.core.k8s
# Ignore until F36, where these are in community.crypto collection (part of ansible-5.x rpm).
- openssh_keypair
- openssl_certificate
Expand Down
1 change: 1 addition & 0 deletions .fmf/version
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
1
30 changes: 30 additions & 0 deletions .github/workflows/tf-tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
name: Schedule test on Testing Farm
on:
issue_comment:
types:
- created

# The concurrency key is used to prevent multiple workflows from running at the same time
concurrency:
group: my-concurrency-group
cancel-in-progress: true

jobs:
tests:
runs-on: ubuntu-latest
if: |
github.event.issue.pull_request
&& contains(github.event.comment.body, '[test]')
&& contains(fromJson('["OWNER", "MEMBER"]'), github.event.comment.author_association)
steps:
- name: Schedule test on Testing Farm
uses: sclorg/testing-farm-as-github-action@v2
with:
api_key: ${{ secrets.TF_API_KEY }}
git_url: "https://github.com/majamassarini/deployment"
git_ref: "tf-openshift-tests"
tmt_plan_regex: "deployment"
pull_request_status_name: "Deployment"
create_issue_comment: true
timeout: 3600
secrets: CRC_PULL_SECRET=${{ secrets.CRC_PULL_SECRET }}
51 changes: 51 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,23 @@ AP := ansible-playbook -vv -c local -i localhost, -e ansible_python_interpreter=
# "By default, Ansible runs as if --tags all had been specified."
# https://docs.ansible.com/ansible/latest/user_guide/playbooks_tags.html#special-tags
TAGS ?= all
VAGRANT_SSH_PORT = "$(shell cd containers && vagrant ssh-config | awk '/Port/{print $$2}')"
VAGRANT_SSH_USER = "$(shell cd containers && vagrant ssh-config | awk '/User/{print $$2}')"
VAGRANT_SSH_GUEST = "$(shell cd containers && vagrant ssh-config | awk '/HostName/{print $$2}')"
VAGRANT_SSH_IDENTITY_FILE = "$(shell cd containers && vagrant ssh-config | awk '/IdentityFile/{print $$2}')"
VAGRANT_SSH_CONFIG = $(shell cd containers && vagrant ssh-config | awk 'NR>1 {print " -o "$$1"="$$2}')
#VAGRANT_SHARED_DIR = "/vagrant"
VAGRANT_SHARED_DIR = "/home/tmt/deployment"

CENTOS_VAGRANT_BOX = CentOS-Stream-Vagrant-8-latest.x86_64.vagrant-libvirt.box
CENTOS_VAGRANT_URL = https://cloud.centos.org/centos/8-stream/x86_64/images/$(CENTOS_VAGRANT_BOX)

ifneq "$(shell whoami)" "root"
ASK_PASS ?= --ask-become-pass
endif

# Only for Packit team members with access to Bitwarden vault
# if not working prepend OPENSSL_CONF=/dev/null to script invocation
download-secrets:
./scripts/download_secrets.sh

Expand Down Expand Up @@ -50,3 +61,43 @@ check:
move-stable:
[[ -d move_stable_repositories ]] || scripts/move_stable.py init
scripts/move_stable.py move-all

oc-cluster-create:
# vagrant pointer is broken...
[[ -f $(CENTOS_VAGRANT_BOX) ]] || wget $(CENTOS_VAGRANT_URL)
cd containers && vagrant up

oc-cluster-destroy:
cd containers && vagrant destroy

oc-cluster-up:
cd containers && vagrant up
cd containers && vagrant ssh -c "cd $(VAGRANT_SHARED_DIR) && $(AP) playbooks/oc-cluster-run.yml"

oc-cluster-down:
cd containers && vagrant halt

oc-cluster-ssh: oc-cluster-up
ssh $(VAGRANT_SSH_CONFIG) localhost

test-deploy:
# to be run inside VM where the oc cluster is running! Call make tmt-vagrant-tests instead from outside the vagrant machine.
DEPLOYMENT=dev $(AP) playbooks/generate-local-secrets.yml
DEPLOYMENT=dev $(AP) -e '{"src_dir": $(VAGRANT_SHARED_DIR)}' playbooks/test_deploy_setup.yml
cd $(VAGRANT_SHARED_DIR); DEPLOYMENT=dev $(AP) -e '{"container_engine": "podman", "registry": "default-route-openshift-image-registry.apps-crc.testing", "registry_user": "kubeadmin", "src_dir": $(VAGRANT_SHARED_DIR)}' playbooks/test_deploy.yml

tmt-vagrant-test:
tmt run --all provision --how connect --user vagrant --guest $(VAGRANT_SSH_GUEST) --port $(VAGRANT_SSH_PORT) --key $(VAGRANT_SSH_IDENTITY_FILE)

tf-deploy:
testing-farm request --compose Fedora-Rawhide --git-url https://github.com/majamassarini/deployment --git-ref tf-openshift-tests --plan deployment

# tmt run --id packit-service-deployment --until execute
# tmt run --id packit-service-deployment prepare --force
# tmt run --id packit-service-deployment login --step prepare:start
# tmt run --id packit-service-deployment execute --force
# tmt run --id packit-service-deployment login --step execute:start
# tmt run --id packit-service-deployment finish
# tmt run --id packit-service-deployment clean

# virsh list --all
94 changes: 94 additions & 0 deletions containers/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :

VAGRANTFILE_API_VERSION = "2"
# export VAGRANT_EXPERIMENTAL="1"
# export VAGRANT_EXPERIMENTAL="disks"

Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "centos/stream8"
#config.vm.box_url = "https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-Vagrant-8-latest.x86_64.vagrant-libvirt.box" gives 404
config.vm.box_url = "file:///$VagrantProjectHome/../CentOS-Stream-Vagrant-8-latest.x86_64.vagrant-libvirt.box"


# Forward traffic on the host to the development server on the guest
config.vm.network "forwarded_port", guest: 5000, host: 5000
# Forward traffic on the host to Redis on the guest
config.vm.network "forwarded_port", guest: 6379, host: 6379
# Forward traffic on the host to the SSE server on the guest
config.vm.network "forwarded_port", guest: 8080, host: 8080


if Vagrant.has_plugin?("vagrant-hostmanager")
config.hostmanager.enabled = true
config.hostmanager.manage_host = true
end

# Vagrant can share the source directory using rsync, NFS, or SSHFS (with the vagrant-sshfs
# plugin). By default it rsyncs the current working directory to /vagrant.
#
# If you would prefer to use NFS to share the directory uncomment this and configure NFS
# config.vm.synced_folder ".", "/vagrant", type: "nfs", nfs_version: 4, nfs_udp: false
config.vm.synced_folder "..", "/vagrant"
# config.vm.synced_folder ".", "/vagrant", disabled: true
# config.vm.synced_folder ".", "/srv/pagure",
# ssh_opts_append: "-o IdentitiesOnly=yes",
# type: "sshfs"

# To cache update packages (which is helpful if frequently doing `vagrant destroy && vagrant up`)
# you can create a local directory and share it to the guest's DNF cache. The directory needs to
# exist, so create it before you uncomment the line below.
#Dir.mkdir('.dnf-cache') unless File.exists?('.dnf-cache')
#config.vm.synced_folder ".dnf-cache", "/var/cache/dnf",
# type: "sshfs",
# sshfs_opts_append: "-o nonempty"

# Comment this line if you would like to disable the automatic update during provisioning
config.vm.provision "shell", inline: "sudo dnf -y --disablerepo '*' --enablerepo=extras swap centos-linux-repos centos-stream-repos"

# !!!!!!! resize disk image !!!!!!!!!
config.vm.provision "shell", inline: "sudo dnf install -y cloud-utils-growpart"
config.vm.provision "shell", inline: "sudo growpart /dev/vda 1"
config.vm.provision "shell", inline: "sudo xfs_growfs /dev/vda1"

# bootstrap and run with ansible
config.vm.provision "ansible" do |ansible|
# ansible.verbose = "-vvv"
ansible.verbose = true
ansible.playbook = "../playbooks/oc-cluster-setup.yml"
end
config.vm.provision "ansible" do |ansible|
# ansible.verbose = "-vvv"
ansible.verbose = true
ansible.playbook = "../playbooks/oc-cluster-run.yml"
ansible.extra_vars = "../secrets/openshift-local-pull-secret.yml"
end
config.vm.provision "ansible" do |ansible|
# ansible.verbose = "-vvv"
ansible.verbose = true
ansible.playbook = "../playbooks/oc-cluster-tests-setup.yml"
ansible.extra_vars = {"src_dir": "/vagrant"}
end

# Create the box
config.vm.define "packit-oc-cluster" do |oc|
oc.vm.host_name = "packit-oc-cluster.example.com"

oc.vm.provider :libvirt do |domain|
# Season to taste
domain.cpus = 6
domain.graphics_type = "spice"
domain.memory = 14336
domain.video_type = "qxl"
domain.machine_virtual_size = 100

# Uncomment the following line if you would like to enable libvirt's unsafe cache
# mode. It is called unsafe for a reason, as it causes the virtual host to ignore all
# fsync() calls from the guest. Only do this if you are comfortable with the possibility of
# your development guest becoming corrupted (in which case you should only need to do a
# vagrant destroy and vagrant up to get a new one).
#
# domain.volume_cache = "unsafe"
end
end
end
35 changes: 35 additions & 0 deletions docs/deployment/testing-changes.md
Original file line number Diff line number Diff line change
Expand Up @@ -68,3 +68,38 @@ This repository provides helpful playbook to do this with one command:

Zuul provides a public key for every project. The ansible playbook downloads Zuul repository and pass the project tenant and name as parameters to encryption script. This script then encrypts files with public key of the project.
For more information please refer to [official docs](https://ansible.softwarefactory-project.io/docs/user/zuul_user.html#create-a-secret-to-be-used-in-jobs).

### Test Deployment locally with OpenShift Local

For using OpenShift Local you need a _pull secret_, download it here: https://console.redhat.com/openshift/create/local. Save it in a file called `secrets\openshift-local-pull-secret.yml` following this format:

```
---
pull_secret: <<< DOWNLOADED PULL SECRET CONTENT >>>
```

Populate the `secrets` dir with all the other secrets.
You _should use_ your own secrets but if you have access to `stg` secrets
you can also do:

```
DEPLOYMENT=stg make download-secrets
```

Now you can create and start the OpenShift Local cluster (it take as long as an hour) in a Vagrant Virtual Machine with:

```
make oc-cluster-create
```

And once it is up and running you can test the `packit-service` deployment with the command:

```
make tmt-tests
```

This command will sshed the virtual machine and run the tests there (`make test-deploy`),
you can run the tests as many time you want as long as the virtual machine is up and running and the `crc cluster` is started (`make oc-cluster-up` after every `make oc-cluster-down`).
You can skip the `tmt` environment and run the test directly inside the VM using `make oc-cluster-ssh` and `cd /vagrant && make test-deploy`.

You can destroy the `libvirt` machine with `make oc-cluster-destroy` and re-create it again with `make oc-cluster-create`.
2 changes: 1 addition & 1 deletion openshift/redis.yml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ spec:
spec:
containers:
- name: redis
image: quay.io/sclorg/redis-7-c9s
image: quay.io/sclorg/redis-7-c9s:c9s
ports:
- containerPort: 6379
volumeMounts:
Expand Down
63 changes: 63 additions & 0 deletions plans/deployment.fmf
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
tier: 1

execute:
how: tmt

environment:
DEPLOYMENT: dev
#ANSIBLE_DEBUG: 1

report:
how: display

discover:
how: fmf
test:
- deployment

/local:
summary: Run packit-service deployment test on a local machine managed by tmt
discover:
how: fmf
test:
- deployment
provision:
how: virtual
image: centos-stream
disk: 100
memory: 14336
# cpu.cores: 6 # I can not customize cpu cores in virtual testcloud prepare plugin
# but OpenShift Cluster will not start without 4 cores at least
# change DEFAULT_CPU_COUNT in tmt/steps/provision/testcloud.py to 6
prepare:
- how: ansible
playbook: playbooks/oc-cluster-user.yml
extra-args: '-vvv'
- how: ansible
playbook: playbooks/oc-cluster-setup.yml
extra-args: '-vvv'
- how: ansible
playbook: playbooks/oc-cluster-run.yml
extra-args: '-vvv --extra-vars @./secrets/openshift-local-pull-secret.yml'
- how: ansible
playbook: playbooks/oc-cluster-tests-setup.yml
extra-args: '-vvv'
#- how: ansible
# playbook: playbooks/generate-local-secrets.yml
# extra-args: '-vvv --extra-vars generate_secrets_project_dir="./"'
#- how: ansible
# playbook: playbooks/test_deploy_setup.yml
# extra-args: '-vvv'

#/vagrant:
# summary: Run packit-service deployment test on a vagrant machine already up and running
#
# description:
# guest, port and key details are taken from the tmt-vagrant-test Makefile target.
# This provision is meant to be used just locally with the Vagrant machine already up
# and running. To start the Vagrant machine use the oc-cluster-up Makefile target.
# To access the Vagrant machine use the oc-cluster-ssh Makefile target.
#
# provision:
# how: connect
# user: vagrant
14 changes: 8 additions & 6 deletions playbooks/deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@
memory: "128Mi"
cpu: "50m"
appcode: PCKT-002
registry: 172.30.1.1:5000
registry_user: developer
tasks:
- name: Include tasks/project-dir.yml
ansible.builtin.include_tasks: tasks/project-dir.yml
Expand Down Expand Up @@ -124,25 +126,25 @@
tls_verify_false: "{{ '--tls-verify=false' if 'podman' in container_engine else '' }}"
changed_when: false
- name: Login to local cluster
ansible.builtin.shell: "{{ container_engine }} login -u developer -p $(oc whoami -t) 172.30.1.1:5000 {{ tls_verify_false }}"
ansible.builtin.shell: "{{ container_engine }} login -u {{ registry_user }} -p $(oc whoami -t) {{ registry }} {{ tls_verify_false }}"
changed_when: false
- name: Inspect service image
ansible.builtin.command: "{{ container_engine }} inspect {{ image }}"
changed_when: false
- name: Tag the image with :dev
ansible.builtin.command: "{{ container_engine }} tag {{ image }} 172.30.1.1:5000/myproject/packit-service:dev"
ansible.builtin.command: "{{ container_engine }} tag {{ image }} {{ registry }}/myproject/packit-service:dev"
changed_when: true
- name: Push the image
ansible.builtin.command: "{{ container_engine }} push 172.30.1.1:5000/myproject/packit-service:dev {{ tls_verify_false }}"
ansible.builtin.command: "{{ container_engine }} push {{ registry }}/myproject/packit-service:dev {{ tls_verify_false }}"
changed_when: true
- name: Inspect worker image
ansible.builtin.command: "{{ container_engine }} inspect {{ image_worker }}"
changed_when: false
- name: Tag the image with :dev
ansible.builtin.command: "{{ container_engine }} tag {{ image_worker }} 172.30.1.1:5000/myproject/packit-worker:dev"
ansible.builtin.command: "{{ container_engine }} tag {{ image_worker }} {{ registry }}/myproject/packit-worker:dev"
changed_when: true
- name: Push the image
ansible.builtin.command: "{{ container_engine }} push 172.30.1.1:5000/myproject/packit-worker:dev {{ tls_verify_false }}"
ansible.builtin.command: "{{ container_engine }} push {{ registry }}/myproject/packit-worker:dev {{ tls_verify_false }}"
changed_when: true

- name: Deploy secrets
Expand Down Expand Up @@ -439,7 +441,7 @@

- name: Wait for deploymentconfig rollouts to complete
# timeout 10min to not wait indefinitely in case of a problem
ansible.builtin.command: timeout 10m oc rollout status -w deploy/{{ item }}
ansible.builtin.command: timeout 15m oc rollout status -w deploy/{{ item }}
register: oc_rollout_status
changed_when: false
failed_when: '"successfully rolled out" not in oc_rollout_status.stdout'
Expand Down
Loading
Loading