From 4df0fdb1953c2b3e9e9ae34b9f30d7fbe5784b57 Mon Sep 17 00:00:00 2001 From: Bodo Schulz Date: Wed, 15 Nov 2023 20:15:21 +0100 Subject: [PATCH] transfer all single roles into collection --- .config/ansible-lint.yml | 5 + .config/pycodestyle.cfg | 7 + .github/ISSUE_TEMPLATE/bug_report.md | 30 + .github/ISSUE_TEMPLATE/feature_request.md | 17 + .github/workflows/clean-workflows.yml | 31 + .github/workflows/linter.yml | 66 ++ .github/workflows/main.yml | 147 ++++ .github/workflows/push-to-ansible-galaxy.yml | 29 + CONTRIBUTING.md | 31 + LICENSE | 201 ++++++ Makefile | 36 + galaxy.yml | 43 ++ hooks/converge | 3 + hooks/destroy | 3 + hooks/doc | 31 + hooks/install | 17 + hooks/lint | 3 + hooks/molecule.rc | 11 + hooks/run_tox.sh | 47 ++ hooks/test | 3 + hooks/uninstall | 19 + hooks/verify | 3 + meta/runtime.yml | 3 + plugins/filter/container.py | 658 ++++++++++++++++++ plugins/filter/docker.py | 55 ++ plugins/modules/container_directories.py | 166 +++++ plugins/modules/container_environments.py | 318 +++++++++ plugins/modules/container_mounts.py | 344 +++++++++ plugins/modules/docker_client_configs.py | 476 +++++++++++++ plugins/modules/docker_common_config.py | 657 +++++++++++++++++ plugins/modules/docker_plugins.py | 486 +++++++++++++ plugins/modules/docker_version.py | 136 ++++ roles/container/.ansible-lint | 7 + roles/container/.editorconfig | 23 + roles/container/.flake8 | 19 + .../.github/workflows/clean-workflows.yml | 31 + .../.github/workflows/configured.yml | 58 ++ roles/container/.github/workflows/galaxy.yml | 30 + roles/container/.github/workflows/linter.yml | 56 ++ roles/container/.github/workflows/main.yml | 106 +++ .../.github/workflows/many-property-files.yml | 59 ++ .../multiple-container-with-filter.yml | 58 ++ .../.github/workflows/multiple-container.yml | 58 ++ .../.github/workflows/update-container.yml | 58 ++ .../.github/workflows/update-properties.yml | 58 ++ roles/container/.gitignore | 6 + roles/container/.yamllint | 36 + roles/container/LICENSE | 201 ++++++ roles/container/Makefile | 22 + roles/container/README.md | 454 ++++++++++++ roles/container/collections.yml | 10 + roles/container/defaults/main.yml | 52 ++ roles/container/files/list_all_container.sh | 3 + roles/container/files/list_all_images.sh | 3 + roles/container/files/parse_container_fact.sh | 36 + roles/container/files/prune.sh | 26 + .../files/remove_stopped_container.sh | 9 + .../container/files/remove_untagged_images.sh | 9 + roles/container/handlers/main.yml | 53 ++ roles/container/hooks/converge | 3 + roles/container/hooks/destroy | 3 + roles/container/hooks/lint | 3 + roles/container/hooks/molecule.rc | 74 ++ roles/container/hooks/test | 3 + roles/container/hooks/tox.sh | 52 ++ roles/container/hooks/verify | 3 + roles/container/meta/main.yml | 22 + .../molecule/configured/converge.yml | 12 + .../configured/group_vars/all/vars.yml | 91 +++ .../molecule/configured/molecule.yml | 67 ++ .../container/molecule/configured/prepare.yml | 53 ++ .../molecule/configured/tests/test_default.py | 190 +++++ roles/container/molecule/default/converge.yml | 12 + .../molecule/default/group_vars/all/vars.yml | 3 + roles/container/molecule/default/molecule.yml | 67 ++ roles/container/molecule/default/prepare.yml | 53 ++ .../molecule/default/tests/test_default.py | 94 +++ .../molecule/many-properties/converge.yml | 14 + .../many-properties/group_vars/all/vars.yml | 62 ++ .../molecule/many-properties/molecule.yml | 61 ++ .../molecule/many-properties/prepare.yml | 53 ++ .../molecule/many-properties/requirements.yml | 7 + .../many-properties/tests/test_default.py | 129 ++++ .../converge.yml | 12 + .../group_vars/all/vars.yml | 157 +++++ .../molecule.yml | 64 ++ .../prepare.yml | 53 ++ .../tests/test_default.py | 108 +++ .../molecule/multiple-container/converge.yml | 12 + .../group_vars/all/vars.yml | 375 ++++++++++ .../molecule/multiple-container/molecule.yml | 62 ++ .../molecule/multiple-container/prepare.yml | 53 ++ .../multiple-container/tests/test_default.py | 106 +++ .../molecule/update-container/converge.yml | 20 + .../update-container/group_vars/all/vars.yml | 20 + .../molecule/update-container/molecule.yml | 55 ++ .../molecule/update-container/prepare.yml | 92 +++ .../molecule/update-properties/converge.yml | 11 + .../update-properties/group_vars/all/vars.yml | 33 + .../molecule/update-properties/molecule.yml | 61 ++ .../molecule/update-properties/prepare.yml | 72 ++ .../update-properties/tests/test_default.py | 127 ++++ roles/container/tasks/custom-tasks/main.yml | 65 ++ .../tasks/custom-tasks/post-tasks.yml | 22 + .../tasks/custom-tasks/pre-tasks.yml | 22 + roles/container/tasks/download.yml | 91 +++ roles/container/tasks/get_information.yml | 27 + .../tasks/launch/launch_container.yml | 113 +++ .../tasks/launch/launch_for_older_ansible.yml | 82 +++ roles/container/tasks/launch/main.yml | 23 + roles/container/tasks/login.yml | 22 + roles/container/tasks/main.yml | 83 +++ roles/container/tasks/network.yml | 19 + roles/container/tasks/prepare.yml | 164 +++++ roles/container/templates/container.env.j2 | 7 + .../templates/coremedia_importer.sh.j2 | 108 +++ roles/container/templates/import-users.sh.j2 | 67 ++ .../templates/management-tools.rc.j2 | 57 ++ roles/container/templates/prometheus.yml.j2 | 87 +++ roles/container/templates/requirements.txt.j2 | 4 + .../templates/resetcaefeeder.properties.j2 | 11 + roles/container/test-requirements.txt | 12 + roles/container/tox.ini | 35 + roles/container/vars/alpine.yml | 8 + roles/container/vars/archlinux.yml | 7 + roles/container/vars/debian-10.yml | 13 + roles/container/vars/debian.yml | 10 + roles/container/vars/main.yml | 39 ++ roles/docker/.ansible-lint | 5 + roles/docker/.editorconfig | 23 + roles/docker/.flake8 | 19 + .../docker/.github/linters/.markdown-lint.yml | 36 + roles/docker/.github/linters/.yaml-lint.yml | 24 + .../.github/workflows/clean-workflows.yml | 31 + .../.github/workflows/dockerd-with-plugin.yml | 106 +++ .../.github/workflows/dockerd-with-tls.yml | 106 +++ roles/docker/.github/workflows/galaxy.yml | 30 + roles/docker/.github/workflows/linter.yml | 56 ++ roles/docker/.github/workflows/main.yml | 159 +++++ roles/docker/.gitignore | 6 + roles/docker/.pycodestyle | 5 + roles/docker/.yamllint | 40 ++ roles/docker/LICENSE | 21 + roles/docker/Makefile | 22 + roles/docker/README.md | 399 +++++++++++ roles/docker/collections.yml | 6 + roles/docker/defaults/main.yml | 132 ++++ roles/docker/handlers/main.yml | 76 ++ .../docker_config_to_ansible_yaml.sh | 16 + roles/docker/hooks/converge | 3 + roles/docker/hooks/destroy | 3 + roles/docker/hooks/lint | 3 + roles/docker/hooks/molecule.rc | 74 ++ roles/docker/hooks/test | 3 + roles/docker/hooks/tox.sh | 52 ++ roles/docker/hooks/verify | 3 + roles/docker/meta/main.yml | 45 ++ roles/docker/molecule/default/converge.yml | 12 + .../molecule/default/group_vars/all/vars.yml | 17 + roles/docker/molecule/default/molecule.yml | 61 ++ roles/docker/molecule/default/prepare.yml | 39 ++ .../default/tests/test_custom_config.py | 153 ++++ .../molecule/default/tests/test_default.py | 128 ++++ .../dockerd-with-client-config/converge.yml | 10 + .../group_vars/all/snakeoil.yml | 13 + .../group_vars/all/vars.yml | 52 ++ .../dockerd-with-client-config/molecule.yml | 61 ++ .../dockerd-with-client-config/prepare.yml | 48 ++ .../requirements.yml | 7 + .../tests/test_custom_config.py | 156 +++++ .../tests/test_default.py | 128 ++++ .../molecule/dockerd-with-plugin/converge.yml | 12 + .../group_vars/all/loki.yml | 9 + .../group_vars/all/vars.yml | 73 ++ .../molecule/dockerd-with-plugin/molecule.yml | 75 ++ .../molecule/dockerd-with-plugin/prepare.yml | 49 ++ .../dockerd-with-plugin/requirements.yml | 7 + .../tests/test_custom_config.py | 156 +++++ .../dockerd-with-plugin/tests/test_default.py | 128 ++++ .../molecule/dockerd-with-tls/converge.yml | 10 + .../group_vars/all/snakeoil.yml | 13 + .../dockerd-with-tls/group_vars/all/vars.yml | 65 ++ .../molecule/dockerd-with-tls/molecule.yml | 61 ++ .../molecule/dockerd-with-tls/prepare.yml | 48 ++ .../dockerd-with-tls/requirements.yml | 7 + .../tests/test_custom_config.py | 156 +++++ .../dockerd-with-tls/tests/test_default.py | 128 ++++ .../molecule/update-config/converge.yml | 48 ++ .../update-config/group_vars/all/vars.yml | 25 + .../molecule/update-config/molecule.yml | 61 ++ .../docker/molecule/update-config/prepare.yml | 39 ++ .../update-config/tests/test_custom_config.py | 144 ++++ .../update-config/tests/test_default.py | 128 ++++ roles/docker/tasks/client.obsolete | 30 + roles/docker/tasks/compose.yml | 35 + roles/docker/tasks/configure.yml | 119 ++++ roles/docker/tasks/install.yml | 114 +++ roles/docker/tasks/main.yml | 47 ++ roles/docker/tasks/plugins.yml | 38 + roles/docker/tasks/prepare.yml | 86 +++ roles/docker/tasks/repositories.yml | 47 ++ roles/docker/tasks/service.yml | 12 + roles/docker/tasks/users.yml | 18 + roles/docker/templates/apt/docker-ce.list.j2 | 12 + roles/docker/templates/docker.j2 | 44 ++ .../docker/templates/openrc/conf.d/docker.j2 | 29 + .../docker/templates/openrc/init.d/docker.j2 | 1 + .../templates/systemd/overwrite.conf.j2 | 9 + roles/docker/templates/systemd/proxy.conf.j2 | 4 + roles/docker/test-requirements.txt | 12 + roles/docker/tests/inventory | 1 + roles/docker/tests/test.yml | 5 + roles/docker/tox.ini | 35 + roles/docker/vars/archlinux-openrc.yaml | 24 + roles/docker/vars/archlinux.yaml | 21 + roles/docker/vars/artixlinux.yaml | 24 + roles/docker/vars/debian.yaml | 16 + roles/docker/vars/default.yaml | 7 + roles/docker/vars/main.yaml | 96 +++ roles/docker/vars/redhat.yaml | 13 + roles/registry/.ansible-lint | 5 + roles/registry/.editorconfig | 23 + roles/registry/.flake8 | 19 + .../.github/workflows/clean-workflows.yml | 31 + .../registry/.github/workflows/configured.yml | 60 ++ roles/registry/.github/workflows/galaxy.yml | 30 + roles/registry/.github/workflows/linter.yml | 56 ++ roles/registry/.github/workflows/main.yml | 111 +++ roles/registry/.gitignore | 6 + roles/registry/.yamllint | 40 ++ roles/registry/CONTRIBUTING.md | 31 + roles/registry/LICENSE | 201 ++++++ roles/registry/Makefile | 22 + roles/registry/README.md | 211 ++++++ roles/registry/collections.yml | 5 + roles/registry/defaults/main.yml | 27 + roles/registry/handlers/main.yml | 26 + roles/registry/hooks/converge | 3 + roles/registry/hooks/destroy | 3 + roles/registry/hooks/lint | 3 + roles/registry/hooks/molecule.rc | 74 ++ roles/registry/hooks/test | 3 + roles/registry/hooks/tox.sh | 52 ++ roles/registry/hooks/verify | 3 + roles/registry/meta/main.yml | 27 + .../registry/molecule/configured/converge.yml | 9 + .../configured/group_vars/all/redis.yml | 5 + .../configured/group_vars/all/vars.yml | 72 ++ .../registry/molecule/configured/molecule.yml | 59 ++ .../registry/molecule/configured/prepare.yml | 47 ++ .../molecule/configured/requirements.yml | 6 + .../molecule/configured/tests/test_default.py | 186 +++++ roles/registry/molecule/default/converge.yml | 9 + .../molecule/default/group_vars/all/vars.yml | 3 + roles/registry/molecule/default/molecule.yml | 57 ++ roles/registry/molecule/default/prepare.yml | 40 ++ .../molecule/default/tests/test_default.py | 186 +++++ roles/registry/molecule/latest/converge.yml | 9 + .../molecule/latest/group_vars/all/vars.yml | 5 + roles/registry/molecule/latest/molecule.yml | 57 ++ roles/registry/molecule/latest/prepare.yml | 40 ++ .../molecule/latest/tests/test_default.py | 186 +++++ roles/registry/tasks/configure.yml | 26 + roles/registry/tasks/download.yml | 52 ++ roles/registry/tasks/install.yml | 99 +++ roles/registry/tasks/main.yml | 20 + roles/registry/tasks/prepare.yml | 163 +++++ roles/registry/tasks/service.yml | 10 + .../templates/init/openrc/conf.d/registry.j2 | 9 + .../templates/init/openrc/init.d/registry.j2 | 41 ++ .../init/systemd/registry.service.j2 | 25 + roles/registry/templates/registry.j2 | 25 + .../registry/templates/registry/config.yml.j2 | 225 ++++++ roles/registry/test-requirements.txt | 12 + roles/registry/tox.ini | 35 + roles/registry/vars/archlinux-openrc.yml | 6 + roles/registry/vars/archlinux.yml | 6 + roles/registry/vars/artixlinux.yml | 6 + roles/registry/vars/debian.yml | 3 + roles/registry/vars/main.yml | 295 ++++++++ roles/registry_ui/.ansible-lint | 5 + roles/registry_ui/.editorconfig | 23 + roles/registry_ui/.flake8 | 19 + .../.github/workflows/clean-workflows.yml | 31 + .../.github/workflows/configured.yml | 57 ++ .../registry_ui/.github/workflows/galaxy.yml | 30 + .../registry_ui/.github/workflows/linter.yml | 56 ++ roles/registry_ui/.github/workflows/main.yml | 104 +++ roles/registry_ui/.gitignore | 6 + roles/registry_ui/.yamllint | 40 ++ roles/registry_ui/CONTRIBUTING.md | 31 + roles/registry_ui/LICENSE | 201 ++++++ roles/registry_ui/Makefile | 22 + roles/registry_ui/README.md | 206 ++++++ roles/registry_ui/collections.yml | 5 + roles/registry_ui/defaults/main.yml | 35 + roles/registry_ui/handlers/main.yml | 26 + roles/registry_ui/hooks/converge | 3 + roles/registry_ui/hooks/destroy | 3 + roles/registry_ui/hooks/lint | 3 + roles/registry_ui/hooks/molecule.rc | 74 ++ roles/registry_ui/hooks/test | 3 + roles/registry_ui/hooks/tox.sh | 52 ++ roles/registry_ui/hooks/verify | 3 + roles/registry_ui/meta/main.yml | 27 + .../molecule/configured/converge.yml | 11 + .../configured/group_vars/all/htpasswd.yml | 21 + .../configured/group_vars/all/nginx.yml | 136 ++++ .../configured/group_vars/all/redis.yml | 5 + .../configured/group_vars/all/registry.yml | 72 ++ .../configured/group_vars/all/snakeoil.yml | 16 + .../configured/group_vars/all/vars.yml | 29 + .../molecule/configured/molecule.yml | 61 ++ .../molecule/configured/prepare.yml | 51 ++ .../molecule/configured/requirements.yml | 15 + .../molecule/configured/tests/test_default.py | 188 +++++ .../registry_ui/molecule/default/converge.yml | 11 + .../molecule/default/group_vars/all/vars.yml | 14 + .../registry_ui/molecule/default/molecule.yml | 61 ++ .../registry_ui/molecule/default/prepare.yml | 48 ++ .../molecule/default/requirements.yml | 6 + .../molecule/default/tests/test_default.py | 188 +++++ .../registry_ui/molecule/latest/converge.yml | 11 + .../molecule/latest/group_vars/all/vars.yml | 16 + .../registry_ui/molecule/latest/molecule.yml | 61 ++ roles/registry_ui/molecule/latest/prepare.yml | 48 ++ .../molecule/latest/requirements.yml | 6 + .../molecule/latest/tests/test_default.py | 188 +++++ roles/registry_ui/tasks/configure.yml | 26 + roles/registry_ui/tasks/download.yml | 53 ++ roles/registry_ui/tasks/install.yml | 108 +++ roles/registry_ui/tasks/main.yml | 20 + roles/registry_ui/tasks/prepare.yml | 116 +++ roles/registry_ui/tasks/service.yml | 10 + .../init/openrc/conf.d/registry-ui.j2 | 16 + .../init/openrc/init.d/registry-ui.j2 | 42 ++ .../init/systemd/registry-ui.service.j2 | 27 + roles/registry_ui/templates/registry-ui.j2 | 27 + .../templates/registry-ui/config.yml.j2 | 159 +++++ roles/registry_ui/test-requirements.txt | 12 + roles/registry_ui/tox.ini | 35 + roles/registry_ui/vars/archlinux-openrc.yml | 6 + roles/registry_ui/vars/archlinux.yml | 6 + roles/registry_ui/vars/artixlinux.yml | 6 + roles/registry_ui/vars/debian.yml | 3 + roles/registry_ui/vars/main.yml | 81 +++ test-requirements.txt | 13 + 347 files changed, 20375 insertions(+) create mode 100644 .config/ansible-lint.yml create mode 100644 .config/pycodestyle.cfg create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/workflows/clean-workflows.yml create mode 100644 .github/workflows/linter.yml create mode 100644 .github/workflows/main.yml create mode 100644 .github/workflows/push-to-ansible-galaxy.yml create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 galaxy.yml create mode 100755 hooks/converge create mode 100755 hooks/destroy create mode 100755 hooks/doc create mode 100755 hooks/install create mode 100755 hooks/lint create mode 100755 hooks/molecule.rc create mode 100755 hooks/run_tox.sh create mode 100755 hooks/test create mode 100755 hooks/uninstall create mode 100755 hooks/verify create mode 100644 meta/runtime.yml create mode 100644 plugins/filter/container.py create mode 100644 plugins/filter/docker.py create mode 100644 plugins/modules/container_directories.py create mode 100644 plugins/modules/container_environments.py create mode 100644 plugins/modules/container_mounts.py create mode 100644 plugins/modules/docker_client_configs.py create mode 100644 plugins/modules/docker_common_config.py create mode 100644 plugins/modules/docker_plugins.py create mode 100644 plugins/modules/docker_version.py create mode 100644 roles/container/.ansible-lint create mode 100644 roles/container/.editorconfig create mode 100644 roles/container/.flake8 create mode 100644 roles/container/.github/workflows/clean-workflows.yml create mode 100644 roles/container/.github/workflows/configured.yml create mode 100644 roles/container/.github/workflows/galaxy.yml create mode 100644 roles/container/.github/workflows/linter.yml create mode 100644 roles/container/.github/workflows/main.yml create mode 100644 roles/container/.github/workflows/many-property-files.yml create mode 100644 roles/container/.github/workflows/multiple-container-with-filter.yml create mode 100644 roles/container/.github/workflows/multiple-container.yml create mode 100644 roles/container/.github/workflows/update-container.yml create mode 100644 roles/container/.github/workflows/update-properties.yml create mode 100644 roles/container/.gitignore create mode 100644 roles/container/.yamllint create mode 100644 roles/container/LICENSE create mode 100644 roles/container/Makefile create mode 100644 roles/container/README.md create mode 100644 roles/container/collections.yml create mode 100644 roles/container/defaults/main.yml create mode 100644 roles/container/files/list_all_container.sh create mode 100644 roles/container/files/list_all_images.sh create mode 100644 roles/container/files/parse_container_fact.sh create mode 100644 roles/container/files/prune.sh create mode 100644 roles/container/files/remove_stopped_container.sh create mode 100644 roles/container/files/remove_untagged_images.sh create mode 100644 roles/container/handlers/main.yml create mode 100755 roles/container/hooks/converge create mode 100755 roles/container/hooks/destroy create mode 100755 roles/container/hooks/lint create mode 100644 roles/container/hooks/molecule.rc create mode 100755 roles/container/hooks/test create mode 100755 roles/container/hooks/tox.sh create mode 100755 roles/container/hooks/verify create mode 100644 roles/container/meta/main.yml create mode 100644 roles/container/molecule/configured/converge.yml create mode 100644 roles/container/molecule/configured/group_vars/all/vars.yml create mode 100644 roles/container/molecule/configured/molecule.yml create mode 100644 roles/container/molecule/configured/prepare.yml create mode 100644 roles/container/molecule/configured/tests/test_default.py create mode 100644 roles/container/molecule/default/converge.yml create mode 100644 roles/container/molecule/default/group_vars/all/vars.yml create mode 100644 roles/container/molecule/default/molecule.yml create mode 100644 roles/container/molecule/default/prepare.yml create mode 100644 roles/container/molecule/default/tests/test_default.py create mode 100644 roles/container/molecule/many-properties/converge.yml create mode 100644 roles/container/molecule/many-properties/group_vars/all/vars.yml create mode 100644 roles/container/molecule/many-properties/molecule.yml create mode 100644 roles/container/molecule/many-properties/prepare.yml create mode 100644 roles/container/molecule/many-properties/requirements.yml create mode 100644 roles/container/molecule/many-properties/tests/test_default.py create mode 100644 roles/container/molecule/multiple-container-with-filter/converge.yml create mode 100644 roles/container/molecule/multiple-container-with-filter/group_vars/all/vars.yml create mode 100644 roles/container/molecule/multiple-container-with-filter/molecule.yml create mode 100644 roles/container/molecule/multiple-container-with-filter/prepare.yml create mode 100644 roles/container/molecule/multiple-container-with-filter/tests/test_default.py create mode 100644 roles/container/molecule/multiple-container/converge.yml create mode 100644 roles/container/molecule/multiple-container/group_vars/all/vars.yml create mode 100644 roles/container/molecule/multiple-container/molecule.yml create mode 100644 roles/container/molecule/multiple-container/prepare.yml create mode 100644 roles/container/molecule/multiple-container/tests/test_default.py create mode 100644 roles/container/molecule/update-container/converge.yml create mode 100644 roles/container/molecule/update-container/group_vars/all/vars.yml create mode 100644 roles/container/molecule/update-container/molecule.yml create mode 100644 roles/container/molecule/update-container/prepare.yml create mode 100644 roles/container/molecule/update-properties/converge.yml create mode 100644 roles/container/molecule/update-properties/group_vars/all/vars.yml create mode 100644 roles/container/molecule/update-properties/molecule.yml create mode 100644 roles/container/molecule/update-properties/prepare.yml create mode 100644 roles/container/molecule/update-properties/tests/test_default.py create mode 100644 roles/container/tasks/custom-tasks/main.yml create mode 100644 roles/container/tasks/custom-tasks/post-tasks.yml create mode 100644 roles/container/tasks/custom-tasks/pre-tasks.yml create mode 100644 roles/container/tasks/download.yml create mode 100644 roles/container/tasks/get_information.yml create mode 100644 roles/container/tasks/launch/launch_container.yml create mode 100644 roles/container/tasks/launch/launch_for_older_ansible.yml create mode 100644 roles/container/tasks/launch/main.yml create mode 100644 roles/container/tasks/login.yml create mode 100644 roles/container/tasks/main.yml create mode 100644 roles/container/tasks/network.yml create mode 100644 roles/container/tasks/prepare.yml create mode 100644 roles/container/templates/container.env.j2 create mode 100644 roles/container/templates/coremedia_importer.sh.j2 create mode 100644 roles/container/templates/import-users.sh.j2 create mode 100644 roles/container/templates/management-tools.rc.j2 create mode 100644 roles/container/templates/prometheus.yml.j2 create mode 100644 roles/container/templates/requirements.txt.j2 create mode 100644 roles/container/templates/resetcaefeeder.properties.j2 create mode 100644 roles/container/test-requirements.txt create mode 100644 roles/container/tox.ini create mode 100644 roles/container/vars/alpine.yml create mode 100644 roles/container/vars/archlinux.yml create mode 100644 roles/container/vars/debian-10.yml create mode 100644 roles/container/vars/debian.yml create mode 100644 roles/container/vars/main.yml create mode 100644 roles/docker/.ansible-lint create mode 100644 roles/docker/.editorconfig create mode 100644 roles/docker/.flake8 create mode 100644 roles/docker/.github/linters/.markdown-lint.yml create mode 100644 roles/docker/.github/linters/.yaml-lint.yml create mode 100644 roles/docker/.github/workflows/clean-workflows.yml create mode 100644 roles/docker/.github/workflows/dockerd-with-plugin.yml create mode 100644 roles/docker/.github/workflows/dockerd-with-tls.yml create mode 100644 roles/docker/.github/workflows/galaxy.yml create mode 100644 roles/docker/.github/workflows/linter.yml create mode 100644 roles/docker/.github/workflows/main.yml create mode 100644 roles/docker/.gitignore create mode 100644 roles/docker/.pycodestyle create mode 100644 roles/docker/.yamllint create mode 100644 roles/docker/LICENSE create mode 100644 roles/docker/Makefile create mode 100644 roles/docker/README.md create mode 100644 roles/docker/collections.yml create mode 100644 roles/docker/defaults/main.yml create mode 100644 roles/docker/handlers/main.yml create mode 100644 roles/docker/helper_scripts/docker_config_to_ansible_yaml.sh create mode 100755 roles/docker/hooks/converge create mode 100755 roles/docker/hooks/destroy create mode 100755 roles/docker/hooks/lint create mode 100644 roles/docker/hooks/molecule.rc create mode 100755 roles/docker/hooks/test create mode 100755 roles/docker/hooks/tox.sh create mode 100755 roles/docker/hooks/verify create mode 100644 roles/docker/meta/main.yml create mode 100644 roles/docker/molecule/default/converge.yml create mode 100644 roles/docker/molecule/default/group_vars/all/vars.yml create mode 100644 roles/docker/molecule/default/molecule.yml create mode 100644 roles/docker/molecule/default/prepare.yml create mode 100644 roles/docker/molecule/default/tests/test_custom_config.py create mode 100644 roles/docker/molecule/default/tests/test_default.py create mode 100644 roles/docker/molecule/dockerd-with-client-config/converge.yml create mode 100644 roles/docker/molecule/dockerd-with-client-config/group_vars/all/snakeoil.yml create mode 100644 roles/docker/molecule/dockerd-with-client-config/group_vars/all/vars.yml create mode 100644 roles/docker/molecule/dockerd-with-client-config/molecule.yml create mode 100644 roles/docker/molecule/dockerd-with-client-config/prepare.yml create mode 100644 roles/docker/molecule/dockerd-with-client-config/requirements.yml create mode 100644 roles/docker/molecule/dockerd-with-client-config/tests/test_custom_config.py create mode 100644 roles/docker/molecule/dockerd-with-client-config/tests/test_default.py create mode 100644 roles/docker/molecule/dockerd-with-plugin/converge.yml create mode 100644 roles/docker/molecule/dockerd-with-plugin/group_vars/all/loki.yml create mode 100644 roles/docker/molecule/dockerd-with-plugin/group_vars/all/vars.yml create mode 100644 roles/docker/molecule/dockerd-with-plugin/molecule.yml create mode 100644 roles/docker/molecule/dockerd-with-plugin/prepare.yml create mode 100644 roles/docker/molecule/dockerd-with-plugin/requirements.yml create mode 100644 roles/docker/molecule/dockerd-with-plugin/tests/test_custom_config.py create mode 100644 roles/docker/molecule/dockerd-with-plugin/tests/test_default.py create mode 100644 roles/docker/molecule/dockerd-with-tls/converge.yml create mode 100644 roles/docker/molecule/dockerd-with-tls/group_vars/all/snakeoil.yml create mode 100644 roles/docker/molecule/dockerd-with-tls/group_vars/all/vars.yml create mode 100644 roles/docker/molecule/dockerd-with-tls/molecule.yml create mode 100644 roles/docker/molecule/dockerd-with-tls/prepare.yml create mode 100644 roles/docker/molecule/dockerd-with-tls/requirements.yml create mode 100644 roles/docker/molecule/dockerd-with-tls/tests/test_custom_config.py create mode 100644 roles/docker/molecule/dockerd-with-tls/tests/test_default.py create mode 100644 roles/docker/molecule/update-config/converge.yml create mode 100644 roles/docker/molecule/update-config/group_vars/all/vars.yml create mode 100644 roles/docker/molecule/update-config/molecule.yml create mode 100644 roles/docker/molecule/update-config/prepare.yml create mode 100644 roles/docker/molecule/update-config/tests/test_custom_config.py create mode 100644 roles/docker/molecule/update-config/tests/test_default.py create mode 100644 roles/docker/tasks/client.obsolete create mode 100644 roles/docker/tasks/compose.yml create mode 100644 roles/docker/tasks/configure.yml create mode 100644 roles/docker/tasks/install.yml create mode 100644 roles/docker/tasks/main.yml create mode 100644 roles/docker/tasks/plugins.yml create mode 100644 roles/docker/tasks/prepare.yml create mode 100644 roles/docker/tasks/repositories.yml create mode 100644 roles/docker/tasks/service.yml create mode 100644 roles/docker/tasks/users.yml create mode 100644 roles/docker/templates/apt/docker-ce.list.j2 create mode 100644 roles/docker/templates/docker.j2 create mode 100644 roles/docker/templates/openrc/conf.d/docker.j2 create mode 100644 roles/docker/templates/openrc/init.d/docker.j2 create mode 100644 roles/docker/templates/systemd/overwrite.conf.j2 create mode 100644 roles/docker/templates/systemd/proxy.conf.j2 create mode 100644 roles/docker/test-requirements.txt create mode 100644 roles/docker/tests/inventory create mode 100644 roles/docker/tests/test.yml create mode 100644 roles/docker/tox.ini create mode 100644 roles/docker/vars/archlinux-openrc.yaml create mode 100644 roles/docker/vars/archlinux.yaml create mode 100644 roles/docker/vars/artixlinux.yaml create mode 100644 roles/docker/vars/debian.yaml create mode 100644 roles/docker/vars/default.yaml create mode 100644 roles/docker/vars/main.yaml create mode 100644 roles/docker/vars/redhat.yaml create mode 100644 roles/registry/.ansible-lint create mode 100644 roles/registry/.editorconfig create mode 100644 roles/registry/.flake8 create mode 100644 roles/registry/.github/workflows/clean-workflows.yml create mode 100644 roles/registry/.github/workflows/configured.yml create mode 100644 roles/registry/.github/workflows/galaxy.yml create mode 100644 roles/registry/.github/workflows/linter.yml create mode 100644 roles/registry/.github/workflows/main.yml create mode 100644 roles/registry/.gitignore create mode 100644 roles/registry/.yamllint create mode 100644 roles/registry/CONTRIBUTING.md create mode 100644 roles/registry/LICENSE create mode 100644 roles/registry/Makefile create mode 100644 roles/registry/README.md create mode 100644 roles/registry/collections.yml create mode 100644 roles/registry/defaults/main.yml create mode 100644 roles/registry/handlers/main.yml create mode 100755 roles/registry/hooks/converge create mode 100755 roles/registry/hooks/destroy create mode 100755 roles/registry/hooks/lint create mode 100644 roles/registry/hooks/molecule.rc create mode 100755 roles/registry/hooks/test create mode 100755 roles/registry/hooks/tox.sh create mode 100755 roles/registry/hooks/verify create mode 100644 roles/registry/meta/main.yml create mode 100644 roles/registry/molecule/configured/converge.yml create mode 100644 roles/registry/molecule/configured/group_vars/all/redis.yml create mode 100644 roles/registry/molecule/configured/group_vars/all/vars.yml create mode 100644 roles/registry/molecule/configured/molecule.yml create mode 100644 roles/registry/molecule/configured/prepare.yml create mode 100644 roles/registry/molecule/configured/requirements.yml create mode 100644 roles/registry/molecule/configured/tests/test_default.py create mode 100644 roles/registry/molecule/default/converge.yml create mode 100644 roles/registry/molecule/default/group_vars/all/vars.yml create mode 100644 roles/registry/molecule/default/molecule.yml create mode 100644 roles/registry/molecule/default/prepare.yml create mode 100644 roles/registry/molecule/default/tests/test_default.py create mode 100644 roles/registry/molecule/latest/converge.yml create mode 100644 roles/registry/molecule/latest/group_vars/all/vars.yml create mode 100644 roles/registry/molecule/latest/molecule.yml create mode 100644 roles/registry/molecule/latest/prepare.yml create mode 100644 roles/registry/molecule/latest/tests/test_default.py create mode 100644 roles/registry/tasks/configure.yml create mode 100644 roles/registry/tasks/download.yml create mode 100644 roles/registry/tasks/install.yml create mode 100644 roles/registry/tasks/main.yml create mode 100644 roles/registry/tasks/prepare.yml create mode 100644 roles/registry/tasks/service.yml create mode 100644 roles/registry/templates/init/openrc/conf.d/registry.j2 create mode 100644 roles/registry/templates/init/openrc/init.d/registry.j2 create mode 100644 roles/registry/templates/init/systemd/registry.service.j2 create mode 100644 roles/registry/templates/registry.j2 create mode 100644 roles/registry/templates/registry/config.yml.j2 create mode 100644 roles/registry/test-requirements.txt create mode 100644 roles/registry/tox.ini create mode 100644 roles/registry/vars/archlinux-openrc.yml create mode 100644 roles/registry/vars/archlinux.yml create mode 100644 roles/registry/vars/artixlinux.yml create mode 100644 roles/registry/vars/debian.yml create mode 100644 roles/registry/vars/main.yml create mode 100644 roles/registry_ui/.ansible-lint create mode 100644 roles/registry_ui/.editorconfig create mode 100644 roles/registry_ui/.flake8 create mode 100644 roles/registry_ui/.github/workflows/clean-workflows.yml create mode 100644 roles/registry_ui/.github/workflows/configured.yml create mode 100644 roles/registry_ui/.github/workflows/galaxy.yml create mode 100644 roles/registry_ui/.github/workflows/linter.yml create mode 100644 roles/registry_ui/.github/workflows/main.yml create mode 100644 roles/registry_ui/.gitignore create mode 100644 roles/registry_ui/.yamllint create mode 100644 roles/registry_ui/CONTRIBUTING.md create mode 100644 roles/registry_ui/LICENSE create mode 100644 roles/registry_ui/Makefile create mode 100644 roles/registry_ui/README.md create mode 100644 roles/registry_ui/collections.yml create mode 100644 roles/registry_ui/defaults/main.yml create mode 100644 roles/registry_ui/handlers/main.yml create mode 100755 roles/registry_ui/hooks/converge create mode 100755 roles/registry_ui/hooks/destroy create mode 100755 roles/registry_ui/hooks/lint create mode 100644 roles/registry_ui/hooks/molecule.rc create mode 100755 roles/registry_ui/hooks/test create mode 100755 roles/registry_ui/hooks/tox.sh create mode 100755 roles/registry_ui/hooks/verify create mode 100644 roles/registry_ui/meta/main.yml create mode 100644 roles/registry_ui/molecule/configured/converge.yml create mode 100644 roles/registry_ui/molecule/configured/group_vars/all/htpasswd.yml create mode 100644 roles/registry_ui/molecule/configured/group_vars/all/nginx.yml create mode 100644 roles/registry_ui/molecule/configured/group_vars/all/redis.yml create mode 100644 roles/registry_ui/molecule/configured/group_vars/all/registry.yml create mode 100644 roles/registry_ui/molecule/configured/group_vars/all/snakeoil.yml create mode 100644 roles/registry_ui/molecule/configured/group_vars/all/vars.yml create mode 100644 roles/registry_ui/molecule/configured/molecule.yml create mode 100644 roles/registry_ui/molecule/configured/prepare.yml create mode 100644 roles/registry_ui/molecule/configured/requirements.yml create mode 100644 roles/registry_ui/molecule/configured/tests/test_default.py create mode 100644 roles/registry_ui/molecule/default/converge.yml create mode 100644 roles/registry_ui/molecule/default/group_vars/all/vars.yml create mode 100644 roles/registry_ui/molecule/default/molecule.yml create mode 100644 roles/registry_ui/molecule/default/prepare.yml create mode 100644 roles/registry_ui/molecule/default/requirements.yml create mode 100644 roles/registry_ui/molecule/default/tests/test_default.py create mode 100644 roles/registry_ui/molecule/latest/converge.yml create mode 100644 roles/registry_ui/molecule/latest/group_vars/all/vars.yml create mode 100644 roles/registry_ui/molecule/latest/molecule.yml create mode 100644 roles/registry_ui/molecule/latest/prepare.yml create mode 100644 roles/registry_ui/molecule/latest/requirements.yml create mode 100644 roles/registry_ui/molecule/latest/tests/test_default.py create mode 100644 roles/registry_ui/tasks/configure.yml create mode 100644 roles/registry_ui/tasks/download.yml create mode 100644 roles/registry_ui/tasks/install.yml create mode 100644 roles/registry_ui/tasks/main.yml create mode 100644 roles/registry_ui/tasks/prepare.yml create mode 100644 roles/registry_ui/tasks/service.yml create mode 100644 roles/registry_ui/templates/init/openrc/conf.d/registry-ui.j2 create mode 100644 roles/registry_ui/templates/init/openrc/init.d/registry-ui.j2 create mode 100644 roles/registry_ui/templates/init/systemd/registry-ui.service.j2 create mode 100644 roles/registry_ui/templates/registry-ui.j2 create mode 100644 roles/registry_ui/templates/registry-ui/config.yml.j2 create mode 100644 roles/registry_ui/test-requirements.txt create mode 100644 roles/registry_ui/tox.ini create mode 100644 roles/registry_ui/vars/archlinux-openrc.yml create mode 100644 roles/registry_ui/vars/archlinux.yml create mode 100644 roles/registry_ui/vars/artixlinux.yml create mode 100644 roles/registry_ui/vars/debian.yml create mode 100644 roles/registry_ui/vars/main.yml create mode 100644 test-requirements.txt diff --git a/.config/ansible-lint.yml b/.config/ansible-lint.yml new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/.config/ansible-lint.yml @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/.config/pycodestyle.cfg b/.config/pycodestyle.cfg new file mode 100644 index 0000000..c370dd2 --- /dev/null +++ b/.config/pycodestyle.cfg @@ -0,0 +1,7 @@ +[pycodestyle] + +ignore = E402, E123 + +# It's fine to have line-length of 99 +max-line-length = 99 + diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..3cee0fd --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,30 @@ +--- +name: Bug report +about: Create a report to help us improve +--- + +##### SUMMARY + + +##### STEPS TO REPRODUCE + +```yaml + +``` + + + +##### EXPECTED RESULTS + +```paste below + +``` + +##### ACTUAL RESULTS + +```paste below + +``` + +##### ENVIRONMENT + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..8208392 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,17 @@ +--- +name: Feature request +about: Suggest an idea for this project +--- + +##### SUMMARY + + +##### ADDITIONAL INFORMATION + + + +```yaml + +``` + + diff --git a/.github/workflows/clean-workflows.yml b/.github/workflows/clean-workflows.yml new file mode 100644 index 0000000..4c2b6a8 --- /dev/null +++ b/.github/workflows/clean-workflows.yml @@ -0,0 +1,31 @@ +--- + +name: delete workflow runs + +on: + schedule: + - cron: "10 5 * * 0" + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + type: choice + options: + - info + - warning + - debug + +jobs: + delete-workflow-runs: + runs-on: ubuntu-latest + name: delete old workflow runs + steps: + - name: Delete workflow runs + uses: MajorScruffy/delete-old-workflow-runs@v0.3.0 + with: + repository: bodsch/ansible-collection-docker + older-than-seconds: 2592000 # remove all workflow runs older than 30 day + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml new file mode 100644 index 0000000..00ae685 --- /dev/null +++ b/.github/workflows/linter.yml @@ -0,0 +1,66 @@ +--- + +name: lint plugins + +on: + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + type: choice + options: + - info + - warning + - debug + push: + branches: + - 'feature/**' + - 'fix/**' + - '!doc/**' + paths: + - 'plugins/**' + - '!roles/**' + - '!.github/workflows/test_role*.yml' + - '.config/pycodestyle.cfg' + pull_request: + branches: + - 'feature/**' + - 'fix/**' + - '!doc/**' + paths: + - 'plugins/**' + - '!roles/**' + - '!.github/workflows/test_role*.yml' + - '.config/pycodestyle.cfg' + +jobs: + pep8: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python_version: + - "3.10.11" + - "3.11.3" + + steps: + - name: Check out the codebase. + uses: actions/checkout@v3 + with: + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: Install test dependencies. + run: | + python3 -m pip install --upgrade pip + python3 -m pip install pycodestyle + + - name: Lint code. + run: | + pycodestyle plugins/ --config=.config/pycodestyle.cfg --statistics --count diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000..c4ca8c1 --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,147 @@ +--- +name: CI + +on: + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + type: choice + options: + - info + - warning + - debug + push: + branches: + - 'main' + - 'feature/**' + - 'fix/**' + - '!doc/**' + pull_request: + branches: + - 'main' + - 'feature/**' + - 'fix/**' + - '!doc/**' + +env: + COLLECTION_NAMESPACE: bodsch + COLLECTION_NAME: docker + +jobs: + arch: + name: "${{ matrix.collection_role }} - ${{ matrix.image }}, ansible: ${{ matrix.ansible-version }}, python: ${{ matrix.python_version }}" + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + image: + - archlinux:latest + ansible-version: + - '6.7' + - '7.5' + - '8.5' + python_version: + - "3.10.11" + - "3.11.3" + scenario: + - default + collection_role: + - docker + - container + - registry + - registry_ui + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: Install collection + run: | + make \ + install + + - name: test with tox + run: | + make \ + test \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" \ + -e COLLECTION_ROLE="${{ matrix.collection_role }}" \ + -e COLLECTION_SCENARIO="${{ matrix.scenario }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} + + deb: + name: "${{ matrix.collection_role }} - ${{ matrix.image }}, ansible: ${{ matrix.ansible-version }}, python: ${{ matrix.python_version }}" + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + image: + - debian:11 + - debian:12 + - ubuntu:20.04 + - ubuntu:22.04 + ansible-version: + - '6.7' + - '7.5' + - '8.5' + python_version: + - "3.10.11" + - "3.11.3" + scenario: + - default + collection_role: + - docker + - container + - registry + - registry_ui + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: Install collection + run: | + make \ + install + + - name: test with tox + run: | + make \ + test \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" \ + -e COLLECTION_ROLE="${{ matrix.collection_role }}" \ + -e COLLECTION_SCENARIO="${{ matrix.scenario }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} diff --git a/.github/workflows/push-to-ansible-galaxy.yml b/.github/workflows/push-to-ansible-galaxy.yml new file mode 100644 index 0000000..8a4f135 --- /dev/null +++ b/.github/workflows/push-to-ansible-galaxy.yml @@ -0,0 +1,29 @@ +--- + +name: push to ansible galaxy + +on: + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + type: choice + options: + - info + - warning + - debug + +jobs: + galaxy: + name: galaxy + runs-on: ubuntu-20.04 + steps: + - name: Check out the codebase + uses: actions/checkout@v3 + + - name: Deploy Ansible Galaxy Collection + uses: artis3n/ansible_galaxy_collection@v2.8.3 + with: + api_key: '${{ secrets.GALAXY_API_KEY }}' diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..96bd9db --- /dev/null +++ b/Makefile @@ -0,0 +1,36 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_6.1 + +export COLLECTION_NAMESPACE ?= bodsch +export COLLECTION_NAME ?= docker +export COLLECTION_ROLE ?= +export COLLECTION_SCENARIO ?= default + +.PHONY: install uninstall doc converge test destroy verify lint + +default: converge + +install: + @hooks/install + +uninstall: + @hooks/uninstall + +doc: + @hooks/doc + +converge: + @hooks/converge + +test: + @hooks/test + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +lint: + @hooks/lint diff --git a/galaxy.yml b/galaxy.yml new file mode 100644 index 0000000..e87caaf --- /dev/null +++ b/galaxy.yml @@ -0,0 +1,43 @@ +--- + +namespace: bodsch +name: docker + +version: 0.9.0 + +readme: README.md + +authors: + - Bodo Schulz (@bodsch) + +description: A collection of Ansible roles for the Docker universe. + +license: + - Apache-2.0 + +license_file: '' + +tags: + - system + - docker + - container + +dependencies: + bodsch.core: ">=1.1.4" + bodsch.scm: "*" + +repository: https://github.com/bodsch/ansible-collection-docker +documentation: https://github.com/bodsch/ansible-collection-docker/README.md +homepage: https://github.com/bodsch/ansible-collection-docker +issues: https://github.com/bodsch/ansible-collection-docker/issues + +build_ignore: + - "galaxy.yml" + - "*.pyc" + - "*.retry" + - ".ansible-lint" + - ".editorconfig" + - ".flake8" + - ".gitignore" + - ".yamllint" + - ".github" diff --git a/hooks/converge b/hooks/converge new file mode 100755 index 0000000..9d1d953 --- /dev/null +++ b/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/run_tox.sh "converge" diff --git a/hooks/destroy b/hooks/destroy new file mode 100755 index 0000000..204648f --- /dev/null +++ b/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/run_tox.sh "destroy" diff --git a/hooks/doc b/hooks/doc new file mode 100755 index 0000000..c600aae --- /dev/null +++ b/hooks/doc @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +set -x + +if [ -z "${COLLECTION_DIR}" ] +then + echo "missing collection directory" + echo "run 'make install' first" + + exit 1 +fi + +if [ -d ${COLLECTION_DIR} ] +then + # ansible-doc --list --list_files -t module ${COLLECTION_NAMESPACE}.${COLLECTION_NAME} + + ansible_modules=$( + ansible-doc --list ${COLLECTION_NAMESPACE}.${COLLECTION_NAME} --json | jq -r 'keys[]' + ) + + for i in ${ansible_modules} + do + # echo " - ${i}" + PAGER='cat' ansible-doc --type module ${i} + echo "" + done +fi + +exit 0 diff --git a/hooks/install b/hooks/install new file mode 100755 index 0000000..b583114 --- /dev/null +++ b/hooks/install @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +mkdir \ + --parents \ + "${COLLECTION_DIR}" + +rsync \ + --archive \ + --verbose \ + --recursive \ + --exclude hooks \ + --exclude __pycache__ \ + --delete \ + ../ansible-collection-${COLLECTION_NAME}/* \ + "${COLLECTION_DIR}/" diff --git a/hooks/lint b/hooks/lint new file mode 100755 index 0000000..41392a8 --- /dev/null +++ b/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/run_tox.sh "lint" diff --git a/hooks/molecule.rc b/hooks/molecule.rc new file mode 100755 index 0000000..32a9e42 --- /dev/null +++ b/hooks/molecule.rc @@ -0,0 +1,11 @@ + +COLLECTION_DIR="${HOME}/.ansible/collections/ansible_collections/${COLLECTION_NAMESPACE}/${COLLECTION_NAME}" + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" diff --git a/hooks/run_tox.sh b/hooks/run_tox.sh new file mode 100755 index 0000000..ceb98c1 --- /dev/null +++ b/hooks/run_tox.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +# set -x +set -e + +TOX_TEST="${1}" + +if [[ ! -z "${COLLECTION_ROLE// }" ]] +then + if [ -d "roles/${COLLECTION_ROLE}" ] + then + echo "- ${COLLECTION_ROLE} - ${COLLECTION_SCENARIO}" + echo "" + + pushd "roles/${COLLECTION_ROLE}" + + tox "${TOX_OPTS}" -- molecule ${TOX_TEST} --scenario-name ${COLLECTION_SCENARIO} + + echo "" + popd + else + echo "collection role ${COLLECTION_ROLE} not found" + fi +else + for role in $(find roles -maxdepth 1 -mindepth 1 -type d -printf "%f\n") + do + echo "- ${role} - ${COLLECTION_SCENARIO}" + echo "" + + pushd roles/${role} + + if [ -f "./tox.ini" ] + then + for test in $(find molecule -maxdepth 1 -mindepth 1 -type d -printf "%f\n") + do + export TOX_SCENARIO=${test} + + tox "${TOX_OPTS}" -- molecule ${TOX_TEST} ${TOX_ARGS} + done + fi + + echo "" + popd + done +fi diff --git a/hooks/test b/hooks/test new file mode 100755 index 0000000..419b434 --- /dev/null +++ b/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/run_tox.sh "test" diff --git a/hooks/uninstall b/hooks/uninstall new file mode 100755 index 0000000..c6ec45b --- /dev/null +++ b/hooks/uninstall @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +if [ -z "${COLLECTION_DIR}" ] +then + echo "missing collection directory" + exit 1 +fi + +if [ -d ${COLLECTION_DIR} ] +then + rm \ + --recursive \ + --force \ + "${COLLECTION_DIR}" +fi + +exit 0 diff --git a/hooks/verify b/hooks/verify new file mode 100755 index 0000000..fe9d048 --- /dev/null +++ b/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/run_tox.sh "verify" diff --git a/meta/runtime.yml b/meta/runtime.yml new file mode 100644 index 0000000..2e0d81e --- /dev/null +++ b/meta/runtime.yml @@ -0,0 +1,3 @@ +--- + +requires_ansible: '>=2.9.0' diff --git a/plugins/filter/container.py b/plugins/filter/container.py new file mode 100644 index 0000000..9c13375 --- /dev/null +++ b/plugins/filter/container.py @@ -0,0 +1,658 @@ +# python 3 headers, required if submitting to Ansible + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +from ansible.utils.display import Display + +import json +from ruamel.yaml import YAML +import itertools + +display = Display() + + +class FilterModule(): + """ + ansible filter + """ + + def filters(self): + + return { + # 'container_hashes': self.filter_hashes, + # 'compare_dict': self.filter_compare_dict, + 'container_filter': self.container_filter, + 'container_names': self.filter_names, + 'container_images': self.filter_images, + 'container_state': self.container_state, + 'container_volumes': self.filter_volumes, + 'container_mounts': self.filter_mounts, + 'container_environnments': self.filter_environnments, + 'container_ignore_state': self.container_ignore_state, + 'container_with_states': self.container_with_states, + 'container_filter_by': self.container_filter_by, + 'container_facts': self.container_facts, + # 'remove_values': self.remove_values, + 'remove_custom_fields': self.remove_custom_fields, + 'remove_source_handling': self.remove_source_handling, + 'changed': self.filter_changed, + # 'properties_changed': self.filter_properties_changed, + 'update': self.filter_update, + 'files_available': self.files_available, + 'reporting': self.reporting, + 'combine_registries': self.combine_registries, + 'validate_mountpoints': self.validate_mountpoints, + } + + def filter_hashes(self, data): + """ + return basic information about containers + """ + seen = {} + data = {} + + if isinstance(data, list): + data = data.get('results', []) + + for i in data: + if isinstance(i, dict): + cont = {} + item = {} + + if 'container' in i: + cont = i.get('container') + if 'item' in i: + item = i.get('item') + + if cont: + name = cont.get('Name').strip("/") + # display.vv("found: {}".format(name)) + image = cont.get('Config').get('Image') + created = cont.get('Created') + elif item: + name = item.get('name') + # display.vv("found: {}".format(name)) + image = item.get('image') + created = "None" + else: + pass + else: + pass + + registry = image.split('/')[0] + container = image.split('/')[1].split(':')[0] + container_tag = image.split(':')[1] + + seen[name] = { + "container": container, + "registry": registry, + "tag": container_tag, + "created": created, + } + + # display.v("return : {}".format(seen)) + return seen + + def filter_compare_dict(self, left_dict, right_dict): + """ + """ + result = {} + + if isinstance(left_dict, list): + _dict = {} + + for e in left_dict: + name = e.get('name') + image = e.get('image') + + registry = image.split('/')[0] + container = image.split('/')[1].split(':')[0] + container_tag = image.split(':')[1] + + _dict[name] = { + "container": container, + "registry": registry, + "tag": container_tag, + "created": "None", + } + + left_dict = _dict + + for k in left_dict: + l_dict = left_dict[k] + r_dict = right_dict[k] + _ = l_dict.pop('created') + _ = r_dict.pop('created') + + if (k not in right_dict): + result[k] = l_dict + else: + left = json.dumps(l_dict, sort_keys=True) + right = json.dumps(r_dict, sort_keys=True) + + if (left != right): + result[k] = l_dict + + # display.v(f"= return : {result}") + return result + + def container_filter(self, data, state): + """ + """ + result = {} + + _data = data.copy() + + container_launch = [] + container_names = [] + container_images = [] + container_mounts = [] + container_volumes = [] + container_env = [] + + if len(state) > 0: + container_launch = self.container_ignore_state(_data, state) + + container_names = self.filter_names(_data) + container_images = self.container_state(_data) + container_mounts = self.filter_mounts(_data) + container_volumes = self.filter_volumes(_data) + container_env = self.filter_environnments(_data) + + result = dict( + names=container_names, + images=container_images, + launch=container_launch, + mounts=container_mounts, + volumes=container_volumes, + environnments=container_env, + ) + + return result + + def filter_names(self, data): + """ + """ + return self._get_keys_from_dict(data, 'name') + + def filter_images(self, data): + """ + """ + return self._get_keys_from_dict(data, 'image') + + def filter_environnments(self, data, want_list = ["name", "hostname", "environments", "properties", "property_files"]): + """ + """ + # display.v(f"filter_environnments(self, data, {want_list})") + result = [] + _data = data.copy() + + for i in _data: + # display.v(f" - {i.get('name')}") + res = {} + for k, v in i.items(): + if k in want_list: + res[k] = v + + result.append(res) + + # display.v(f" - result: {result}") + + return result + + def container_state(self, data, state='present', return_value='image'): + """ + state can be + - absent + - present + - stopped + - started ← (default) + """ + # display.v(f"container_state(self, data, {state}, {return_value})") + + result = [] + _defaults_present = ['started', 'present'] + _defaults_absent = ['stopped', 'absent'] + state_filter = [] + + if state in _defaults_present: + state_filter = _defaults_present + else: + state_filter = _defaults_absent + + for i in data: + if isinstance(i, dict): + _state = i.get('state', 'started') + image = i.get(return_value, None) + + if _state in state_filter: + if image: + result.append(image) + + # deduplicate + result = list(set(result)) + result = sorted(result) + + # display.v(f" = result {result}") + return result + + def remove_values(self, data, values): + """ + """ + return self._del_keys_from_dict(data, values) + + def filter_changed(self, data): + """ + """ + result = [] + if isinstance(data, dict): + data = data['results'] + + for i in data: + if isinstance(i, dict): + changed = i.get('changed', False) + item = i.get('item', None) + + if changed: + result.append(item) + + return result + + def filter_properties_changed(self, data): + """ + """ + result = [] + # display.v("filter_properties_changed({})".format({})) + + if isinstance(data, dict): + data = data['results'] + + for i in data: + if isinstance(i, dict): + changed = i.get('changed', False) + item = i.get('item', {}).get('name', None) + + if changed: + result.append(item) + + # display.v(" = result {}".format(result)) + + return result + + def filter_update(self, data, update): + """ + add recreate to changed container entry + """ + # display.v("filter_update(data, {})".format(update)) + for change in update: + for d in data: + if d.get('image') == change or d.get('name') == change: + d['recreate'] = "true" + + return data + + def filter_volumes(self, data): + """ + return volumes + """ + result = [] + volumes = self._get_keys_from_dict(data, 'volumes') + merged = list(itertools.chain(*volumes)) + + # - testing5:/var/tmp/testing5|{owner="1001",mode="0700",ignore=True} + # local : testing5 + # remote : /var/tmp/testing5 + # mount : - + # custom_fields: {owner="1001",mode="0700",ignore=True} + + # filter volumes with this endings + volume_block_list_ends = ( + '.pid', + '.sock', + '.socket', + '.conf', + '.config', + ) + volume_block_list_starts = ( + '/sys', + '/dev', + '/run', + ) + + yaml = YAML() + + def custom_fields(d): + """ + returns only custom fileds as json + """ + d = d.replace('=', ': ') + + if d.startswith("[") and d.endswith("]"): + d = d.replace("[", "") + d = d.replace("]", "") + + if not (d.startswith("{") and d.endswith("}")): + d = "{" + d + "}" + + code = yaml.load(d) + + return dict(code) + + for v in merged: + c_fields = dict() + values = v.split('|') + + if len(values) == 2 and values[1]: + c_fields = custom_fields(values[1]) + v = values[0] + + values = v.split(':') + count = len(values) + + local_volume = values[0] + remote_volume = values[1] + + if not ( + local_volume.endswith(volume_block_list_ends) or local_volume.startswith(volume_block_list_starts) + ): + res = dict( + # docker = "{}:{}".format(values[0], values[1]) + ":{}".format(values[2]) if values[2] + local = local_volume, # values[0], + remote = remote_volume # values[1], + ) + if count == 3 and values[2]: + res['mount'] = values[2] + + if c_fields and len(c_fields) > 0: + res['ansible'] = c_fields + + result.append(res) + + # display.v("return : {}".format(json.dumps(result, indent=4, sort_keys=True))) + + return result + + def filter_mounts(self, data): + """ + return mounts + """ + result = [] + mounts = self._get_keys_from_dict(data, 'mounts') + merged = list(itertools.chain(*mounts)) + + # remove all entries with + # "source_handling": { + # "create": false + # } + for item in merged: + if item.get('source_handling', {}) and item.get('source_handling', {}).get('create'): + result.append(item) + + # display.v("return : {}".format(json.dumps(result, indent=4, sort_keys=True))) + + return result + + def container_with_states(self, data, states=["present"], includes_undefined=True): + """ + """ + _data = data.copy() + + result = [i for i in _data if (i.get('state', 'started') in states)] + # names = [i.get("name") for i in result] + # display.v(f" = result: {names}") + + return result + + def container_ignore_state(self, data, ignore_states=["present"]): + """ + """ + _data = data.copy() + + ignore = [i for i in _data if (i.get('state', 'started') in ignore_states)] + result = [i for i in _data if not (i.get('state', 'started') in ignore_states)] + + ignore_container = [i.get("name") for i in ignore] + launch_container = [i.get("name") for i in result] + + display.v(f" = ignore container: {ignore_container}") + display.v(f" = launch container: {launch_container}") + + return result + + def container_filter_by(self, data, filter_by, filter_values): + """ + :param data: + :param filter_by: + :return: + """ + # display.v(f"container_filter_by(self, data, {filter_by}, {filter_values})") + + if filter_by not in ["name", "hostname", "image"]: + return data + + d = data.copy() + + for entry in d: + if filter_by == "name": + name = entry.get("name") + if name not in filter_values: + # display.v(f" = drop: {name}") + data.remove(entry) + + elif filter_by == "hostname": + hostname = entry.get("hostname") + if hostname not in filter_values: + # display.v(f" = drop: {hostname}") + data.remove(entry) + + elif filter_by == "image": + image = entry.get("image") + if image not in filter_values: + # display.v(f" = drop: {image}") + data.remove(entry) + + return data + + def container_facts(self, data): + """ + """ + display.v("container_facts(self, data)") + display.v(f" {data}") + display.v(f" type {type(data)}") + result = [] + + display.v(f" = result {result}") + + return result + + def remove_custom_fields(self, data): + """ + """ + # display.v(f"remove_custom_fields({data})") + result = [] + + if isinstance(data, list): + for v in data: + result.append(v.split('|')[0]) + else: + result = data + + # display.v(f"= return : {result}") + + return result + + def remove_source_handling(self, data): + """ + """ + # display.v(f"remove_source_handling({data})") + if isinstance(data, list): + data = self._del_keys_from_dict(data, 'source_handling') + + # display.v("return : {}".format(data)) + + return data + + def files_available(self, data): + """ + """ + result = [] + + for k in data: + if k.get('stat', {}).get('exists', False): + result.append(k.get('item')) + + return result + + def reporting(self, data, report_for): + """ + """ + states = [] + result = [] + + if isinstance(data, dict): + results = data.get("results", []) + + for r in results: + failed = r.get('failed', False) + changed = r.get('changed', False) + + if report_for == "failed" and failed: + states.append(r) + + if report_for == "changed" and changed: + states.append(r) + + # display.v(f"states: => {len(states)}") + + for item in states: + """ + """ + data = item.get('item', {}) + name = data.get('name', None) + hostname = data.get('hostname', None) + image = data.get('image', None) + msg = item.get('msg', None) + + # display.v(f" - name {name}") + # display.v(f" - hostname {hostname}") + # display.v(f" - image {image}") + # display.v(f" - msg {msg}") + + if report_for == "changed": + if hostname: + result.append(hostname) + elif name: + result.append(name) + else: + result.append(image) + + if report_for == "failed": + res = {} + if hostname: + res[hostname] = msg + elif name: + res[name] = msg + else: + res[image] = msg + + result.append(res) + + # display.v(f"result: => {result}") + + return result + + def combine_registries(self, data, defaults): + """ + """ + result = [] + + _default = defaults[0].copy() + _data = data.copy() + + if isinstance(_data, dict): + """ + old style for single registry + """ + # merge dictionary with defaults + _default.update(_data) + # remove empty entries + d = {i: j for i, j in _default.items() if j} + result.append(d) + + elif isinstance(_data, list): + """ + """ + for e in _data: + # merge dictionaries + _default.update(e) + # remove empty entries + d = {i: j for i, j in _default.items() if j} + result.append(d) + + # display.v(f"result: => {result}") + + return result + + def validate_mountpoints(self, data): + """ + """ + result = [] + + valid_mount_types = ['bind', 'tmpfs', 'volume'] + + for d in data: + name = d.get("name", None) + mounts = d.get("mounts", []) + + if len(mounts) > 0: + for m in mounts: + error = [] + _source = m.get("source", None) + _target = m.get("target", None) + _type = m.get("type", None) + + if not _source: + error.append("missing source") + + if not _target: + error.append("missing target") + + if not _type: + error.append("missing type") + elif (_type not in valid_mount_types): + error.append("wrong type") + + if len(error) > 0: + _definition = m.copy() + _ = _definition.pop("source_handling", None) + + res = dict( + container = name, + mount_definition = _definition, + error = ", ".join(error) + ) + result.append(res) + # display.v(f" = result {result}") + return result + + def _get_keys_from_dict(self, dictionary, key): + """ + """ + result = [] + for i in dictionary: + if isinstance(i, dict): + k = i.get(key, None) + if k: + result.append(k) + + return result + + def _del_keys_from_dict(self, dictionary, key): + """ + """ + for i in dictionary: + if isinstance(i, dict): + _ = i.pop(key, None) + + return dictionary diff --git a/plugins/filter/docker.py b/plugins/filter/docker.py new file mode 100644 index 0000000..0c281cf --- /dev/null +++ b/plugins/filter/docker.py @@ -0,0 +1,55 @@ +# python 3 headers, required if submitting to Ansible + +from __future__ import (absolute_import, print_function) +__metaclass__ = type + +from ansible.utils.display import Display + +display = Display() + + +class FilterModule(object): + """ + Ansible file jinja2 tests + """ + + def filters(self): + return { + 'validate_log_driver': self.validate_log_driver, + } + + def validate_log_driver(self, data): + """ + """ + build_in_driver = [ + "awslogs", "fluentd", "gcplogs", + "gelf", "journald", "json-file", + "local", "logentries", "splunk", + "syslog", + ] + + log_driver = data.get("log_driver", None) + + if log_driver and log_driver not in build_in_driver: + """ + custom plugin + """ + if ":" not in log_driver: + return dict( + valid = False, + msg = "The format for the desired log driver is wrong!\nPlease use the following format: $driver:$driver_version" + ) + else: + # plugin_name = log_driver.split(":")[0] + plugin_version = log_driver.split(":")[1] + + if len(plugin_version) == 0: + return dict( + valid = False, + msg = "A plugin version is missing!\nPlease use the following format: $driver:$driver_version" + ) + + return dict( + valid = True, + msg = "valid" + ) diff --git a/plugins/modules/container_directories.py b/plugins/modules/container_directories.py new file mode 100644 index 0000000..92f96ec --- /dev/null +++ b/plugins/modules/container_directories.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# (c) 2021-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function +import os + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.directory import create_directory, current_state +from ansible_collections.bodsch.core.plugins.module_utils.lists import compare_two_lists + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: container_directories +version_added: 1.0.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD + +description: + - TBD +""" + +EXAMPLES = """ +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + +class ContainerDirectories(object): + """ + """ + + def __init__(self, module): + """ + """ + self.module = module + + self.base_directory = module.params.get("base_directory") + self.container = module.params.get("container") + self.owner = module.params.get("owner") + self.group = module.params.get("group") + self.mode = module.params.get("mode") + + def run(self): + """ + """ + result = dict( + changed=False, + failed=True, + msg="initial" + ) + + created_directories = [] + + changed = False + + if not os.path.isdir(self.base_directory): + create_directory(directory=self.base_directory, mode="0755") + + for directory in self.container: + d = os.path.join(self.base_directory, directory) + + self.module.log(f" - directory: {d}") + + if not os.path.isdir(d): + pre = self.__analyse_directory(d) + create_directory( + directory=d, + owner=self.owner, + group=self.group, + mode=self.mode + ) + post = self.__analyse_directory(d) + + changed, diff, _ = compare_two_lists(pre, post) + + self.module.log(f" changed: {changed}, diff: {diff}") + + if changed: + created_directories.append(d) + changed = True + + # if not changed and not diff: + + return dict( + changed = changed, + failed = False, + created_directories = created_directories + ) + + return result + + def __analyse_directory(self, directory): + """ + """ + result = [] + + res = {} + + current_owner = None + current_group = None + current_mode = None + + res[directory] = {} + + current_owner, current_group, current_mode = current_state(directory) + + res[directory].update({ + "owner": current_owner, + "group": current_group, + "mode": current_mode, + }) + + result.append(res) + + return result + +# =========================================== +# Module execution. + + +def main(): + """ + """ + args = dict( + base_directory = dict( + required=True, + type='str' + ), + container=dict( + required=True, + type='list' + ), + owner=dict( + required=False + ), + group=dict( + required=False + ), + mode=dict( + required=False, + type="str" + ), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=True, + ) + + p = ContainerDirectories(module) + result = p.run() + + module.log(msg=f"= result: {result}") + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/container_environments.py b/plugins/modules/container_environments.py new file mode 100644 index 0000000..9d64d8b --- /dev/null +++ b/plugins/modules/container_environments.py @@ -0,0 +1,318 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# (c) 2021-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function +import os +import shutil + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.directory import create_directory +from ansible_collections.bodsch.core.plugins.module_utils.checksum import Checksum +# from ansible_collections.bodsch.core.plugins.module_utils.diff import SideBySide +from ansible_collections.bodsch.core.plugins.module_utils.module_results import results +from ansible_collections.bodsch.core.plugins.module_utils.template.template import write_template + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: container_environments +version_added: 1.0.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD + +description: + - TBD +""" + +EXAMPLES = """ +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + +TPL_ENV = """# generated by ansible + +{% for key, value in item.items() %} +{{ key }}={{ value }} +{% endfor %} + +""" + +TPL_PROP = """# generated by ansible + +{% for key, value in item.items() %} +{{ key.ljust(30) }} = {{ value }} +{% endfor %} + +""" + + +class ContainerEnvironments(object): + """ + """ + + def __init__(self, module): + """ + """ + self.module = module + + self.base_directory = module.params.get("base_directory") + self.container = module.params.get("container") + self.owner = module.params.get("owner") + self.group = module.params.get("group") + self.mode = module.params.get("mode") + self.diff = module.params.get("diff") + + pid = os.getpid() + + self.tmp_directory = os.path.join("/run/.ansible", f"container_environments.{str(pid)}") + + def run(self): + """ + """ + result = dict( + changed=False, + failed=True, + msg="initial" + ) + + self.checksum = Checksum(self.module) + + create_directory(directory=self.tmp_directory, mode="0750") + + result_state = [] + + for c in self.container: + """ + """ + name = c.get("name") + environments = c.get("environments", {}) + properties = c.get("properties", {}) + property_files = c.get("property_files", []) + defined_environments = (len(environments) > 0) + defined_properties = (len(properties) > 0) + defined_property_files = (len(property_files) > 0) + + tmp_directory = os.path.join(self.tmp_directory, name) + + create_directory(directory=tmp_directory, mode="0750") + + changed = False + e_changed = False + p_changed = False + + state = [] + + """ + write environments + """ + e_changed, difference = self._write_environments( + container_name=name, + environments=environments + ) + + if defined_environments: + _ = c.pop("environments") + + if e_changed: + state.append("container.env") + + if defined_properties or defined_property_files: + """ + write properties + """ + property_filename = f"{name}.properties" + + property_files.append({"name": property_filename, "properties": properties}) + + for prop in property_files: + property_filename = prop.get("name", None) + properties = prop.get("properties", {}) + + _changed, difference = self._write_properties( + container_name=name, + property_filename=property_filename, + properties=properties + ) + + if _changed: + p_changed = True + state.append(property_filename) + + if defined_properties: + _ = c.pop("properties") + + if defined_property_files: + _ = c.pop("property_files") + + if e_changed or p_changed: + changed = True + + if changed: + # add recreate to dictionary + c['recreate'] = True + + res = {} + state = ", ".join(state) + state += " successful written" + + res[name] = dict( + # changed=True, + state=state, + changed=True + ) + + result_state.append(res) + + # define changed for the running tasks + _state, _changed, _failed, state, changed, failed = results(self.module, result_state) + + result = dict( + changed = _changed, + failed = False, + container_data = self.container, + msg = result_state + ) + + shutil.rmtree(self.tmp_directory) + + return result + + def _write_environments(self, container_name, environments = {}): + """ + """ + tmp_directory = os.path.join(self.tmp_directory, container_name) + + checksum_file = os.path.join(self.base_directory, container_name, "container.env.checksum") + data_file = os.path.join(self.base_directory, container_name, "container.env") + difference = "" + + if os.path.exists(checksum_file): + os.remove(checksum_file) + + """ + write temporary file and generate checksum + """ + tmp_file = os.path.join(tmp_directory, f"{container_name}.env") + self.__write_template("environments", environments, tmp_file) + new_checksum = self.checksum.checksum_from_file(tmp_file) + + """ + read checksum from real file + """ + old_checksum = self.checksum.checksum_from_file(data_file) + + changed = not (new_checksum == old_checksum) + + if changed: + # if self.diff: + # difference = self.__create_diff(data_file, tmp_file) + self.__write_template("environments", environments, data_file) + + return changed, difference + + def _write_properties(self, container_name, property_filename, properties = {}): + """ + """ + tmp_directory = os.path.join(self.tmp_directory, container_name) + + checksum_file = os.path.join(self.base_directory, container_name, f"{property_filename}.checksum") + data_file = os.path.join(self.base_directory, container_name, property_filename) + difference = "" + + if os.path.exists(checksum_file): + os.remove(checksum_file) + + if len(properties) == 0: + if os.path.exists(data_file): + os.remove(data_file) + + return False, difference + + tmp_file = os.path.join(tmp_directory, property_filename) + self.__write_template("properties", properties, tmp_file) + new_checksum = self.checksum.checksum_from_file(tmp_file) + + old_checksum = self.checksum.checksum_from_file(data_file) + + changed = not (new_checksum == old_checksum) + + if changed: + # if self.diff: + # difference = self.__create_diff(data_file, tmp_file) + self.__write_template("properties", properties, data_file) + + return changed, difference + + def __write_template(self, env, data, data_file, checksum = None, checksum_file = None): + """ + """ + if env == "environments": + tpl = TPL_ENV + if env == "properties": + tpl = TPL_PROP + + write_template(data_file, tpl, data) + + if checksum and checksum_file: + self.checksum.write_checksum(checksum_file, checksum) + + def __create_diff(self, data_file, tmp_file): + """ + """ + return None + +# =========================================== +# Module execution. + + +def main(): + """ + """ + args = dict( + base_directory = dict( + required=True, + type='str' + ), + container = dict( + required=True, + type='list' + ), + owner=dict( + required=False + ), + group=dict( + required=False + ), + mode=dict( + required=False, + type="str" + ), + diff=dict( + required=False, + type="bool", + default = False + ), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=True, + ) + + p = ContainerEnvironments(module) + result = p.run() + + module.log(msg=f"= result: {result}") + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/container_mounts.py b/plugins/modules/container_mounts.py new file mode 100644 index 0000000..cc066e4 --- /dev/null +++ b/plugins/modules/container_mounts.py @@ -0,0 +1,344 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# (c) 2021-2023, Bodo Schulz +# Apache-2.0 (see LICENSE or https://opensource.org/license/apache-2-0) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import absolute_import, division, print_function + +from ansible.module_utils.basic import AnsibleModule +from ruamel.yaml import YAML +from ansible_collections.bodsch.core.plugins.module_utils.lists import compare_two_lists +from ansible_collections.bodsch.core.plugins.module_utils.directory import create_directory_tree, current_state + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: container_mounts +version_added: 1.0.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD + +description: + - TBD +""" + +EXAMPLES = """ +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + +class ContainerMounts(object): + """ + """ + + def __init__(self, module): + """ + """ + self.module = module + + self.data = module.params.get("data") + self.volumes = module.params.get("volumes") + self.mounts = module.params.get("mounts") + self.debug = module.params.get("debug") + self.owner = module.params.get("owner") + self.group = module.params.get("group") + self.mode = module.params.get("mode") + + self.volume_block_list_ends = ( + '.pid', + '.sock', + '.socket', + '.conf', + '.config', + ) + self.volume_block_list_starts = ( + '/sys', + '/dev', + '/run', + ) + + self.read_only = { + 'rw': False, + 'ro': True + } + + def run(self): + """ + """ + result = dict( + changed=False, + failed=True, + msg="initial" + ) + + all_mounts = [] + all_volumes = [] + migrated_volumes = [] + + if self.volumes: + all_volumes = self.__volumes() + migrated_volumes = self.__migrate_volumes_to_mounts(all_volumes) + + if self.mounts: + all_mounts = self.__mounts() + + full_list = migrated_volumes + all_mounts + + if len(full_list) == 0: + return dict( + changed=False, + failed=False, + msg="nothing to do" + ) + + current_state = self.__analyse_directories(full_list) + create_directory_tree(full_list, current_state) + final_state = self.__analyse_directories(full_list) + + changed, diff, error_msg = compare_two_lists(list1=current_state, list2=final_state) + + # self.module.log(f" changed: {changed}, diff: {diff}") + + # TODO + # remove custom fields from 'volumes' + if changed: + result['msg'] = "changed or created directories" + msg = "" + for i in diff: + msg += f"- {i}\n" + result['created_directories'] = msg + else: + result['msg'] = "nothing to do" + + result['changed'] = changed + result['failed'] = False + + return result + + def __volumes(self): + """ + return all volume definitions + """ + all_volumes = [] + + for d in self.data: + _v = d.get('volumes', []) + if len(_v) > 0: + all_volumes.append(_v) + + return all_volumes + + def __mounts(self): + """ + get only mountspoint when we add source_handling and set create to True + """ + all_mounts = [] + + for d in self.data: + """ + """ + if self.debug: + self.module.log(f"- {d.get('name')}") + + mount_defintions = d.get('mounts', []) + + for mount in mount_defintions: + if self.debug: + self.module.log(f" mount: {mount}") + + source_handling = mount.get('source_handling', {}).get("create", False) + + if len(mount_defintions) > 0 and source_handling: + all_mounts.append(mount) + + return all_mounts + + def __migrate_volumes_to_mounts(self, volumes): + """ + migrate old volume definition into mount + ignore some definitions like: + - *.sock + - *.conf + etc. see self.volume_block_list_ends and self.volume_block_list_starts! + + for example: + from: /tmp/testing5:/var/tmp/testing5|{owner="1001",mode="0700",ignore=True} + to: + - source: /tmp/testing5 + target: /var/tmp/testing5 + source_handling: + create: false + owner: "1001" + mode: "0700" + + from: /tmp/testing3:/var/tmp/testing3:rw|{owner="999",group="1000"} + to: + - source: /tmp/testing3 + target: /var/tmp/testing3 + source_handling: + create: true + owner: "999" + group: "1000" + """ + if self.debug: + self.module.log("__migrate_volumes_to_mounts(volumes)") + + result = [] + yaml = YAML() + + def custom_fields(d): + """ + returns only custom fileds as json + """ + d = d.replace('=', ': ') + + if d.startswith("[") and d.endswith("]"): + d = d.replace("[", "") + d = d.replace("]", "") + + if not (d.startswith("{") and d.endswith("}")): + d = "{" + d + "}" + + code = yaml.load(d) + + for key, value in code.items(): + # transform ignore=True into create=False + if key == "ignore": + code.insert(0, 'create', not value) + del code[key] + + if self.debug: + self.module.log(f" custom_fields: {dict(code)}") + + return dict(code) + + for d in volumes: + for entry in d: + """ + """ + if self.debug: + self.module.log(f" - {entry}") + + read_mode = None + c_fields = dict() + values = entry.split('|') + + if len(values) == 2 and values[1]: + c_fields = custom_fields(values[1]) + entry = values[0] + + values = entry.split(':') + count = len(values) + + local_volume = values[0] + remote_volume = values[1] + + if count == 3 and values[2]: + read_mode = values[2] + + valid = (local_volume.endswith(self.volume_block_list_ends) or local_volume.startswith( + self.volume_block_list_starts)) + + if not valid: + """ + """ + res = dict( + source=local_volume, # values[0], + target=remote_volume, # values[1], + type="bind", + source_handling=c_fields + ) + + if read_mode is not None: + res['read_only'] = self.read_only.get(read_mode) + + result.append(res) + + return result + + def __analyse_directories(self, directory_tree): + """ + set current owner, group and mode to source entry + """ + result = [] + for entry in directory_tree: + """ + """ + res = {} + + source = entry.get('source') + current_owner = None + current_group = None + current_mode = None + + res[source] = {} + + current_owner, current_group, current_mode = current_state(source) + + res[source].update({ + "owner": current_owner, + "group": current_group, + "mode": current_mode, + }) + + result.append(res) + + return result + + +# =========================================== +# Module execution. + +def main(): + """ + """ + args = dict( + data=dict( + required=True, + type='list' + ), + volumes=dict( + required=True, + type='bool' + ), + mounts=dict( + required=True, + type='bool' + ), + debug=dict( + required=False, + default=False, + type='bool' + ), + owner=dict( + required=False + ), + group=dict( + required=False + ), + mode=dict( + required=False, + type="str" + ), + ) + + module = AnsibleModule( + argument_spec=args, + supports_check_mode=True, + ) + + p = ContainerMounts(module) + result = p.run() + + module.log(msg=f"= result: {result}") + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/docker_client_configs.py b/plugins/modules/docker_client_configs.py new file mode 100644 index 0000000..339642a --- /dev/null +++ b/plugins/modules/docker_client_configs.py @@ -0,0 +1,476 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2023, Bodo Schulz +# BSD 2-clause (see LICENSE or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +import os +import pwd +import grp +import shutil +import json +import base64 +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.directory import create_directory +from ansible_collections.bodsch.core.plugins.module_utils.checksum import Checksum +from ansible_collections.bodsch.core.plugins.module_utils.module_results import results + +__metaclass__ = type + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: docker_client_configs +version_added: 1.0.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD + +description: + - TBD +""" + +EXAMPLES = """ +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + +""" + creates an user configuration like this: + +{ + "auths": { + "registry.gitlab.com": { + "auth": "amVua2luczpydWJiZWwtZGllLWthdHotZHUtZHVtbXNjaHfDpHR6ZXIxCg==" + } + }, + "psFormat": "table {{.ID}}:\\t{{.Names}}\\t{{.Status}}\\t{{.RunningFor}}\\t{{.Ports}}"", + "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}" +} + +""" + +# --------------------------------------------------------------------------------------- + +class DockerClientConfigs(object): + """ + """ + module = None + + def __init__(self, module): + """ + Initialize all needed Variables + """ + self.module = module + self.configs = module.params.get("configs") + pid = os.getpid() + self.tmp_directory = os.path.join("/run/.ansible", f"docker_client_configs.{str(pid)}") + self.cache_directory = "/var/cache/ansible/docker" + + # TODO + # maybe later? + # valid_formate_entries = [ + # '.ID', '.Repository', '.Tag', '.CreatedAt', '.Names', '.Image', '.Command', '.Labels', + # '.Status', '.RunningFor', '.Ports' + # ] + + def run(self): + """ + run + """ + create_directory(directory=self.tmp_directory, mode="0750") + + self.checksum = Checksum(self.module) + + result_state = [] + + if isinstance(self.configs, list): + """ + """ + for conf in self.configs: + destination = conf.get("location", None) + + if destination: + res = {} + res[destination] = self.client(conf) + + result_state.append(res) + + # define changed for the running tasks + _state, _changed, _failed, state, changed, failed = results(self.module, result_state) + + result = dict( + changed = _changed, + failed = _failed, + msg = result_state + ) + + shutil.rmtree(self.tmp_directory) + + return result + + def client(self, client_data): + """ + { + 'location': '/root/.docker/config.json', + 'enabled': True, + 'auths': { + 'registry.gitfoo.tld': { + 'auth': 'amVua2luczpydWJiZWwtZGllLWthdHotZHUtZHVtbXNjaHfDpHR6ZXIxCg==' + }, + 'test.tld': {'username': 'FOO-was-sonst', 'passwort': 'ja-toll-schon-wieder-alles-scheisse!'}}, + 'formats': {} + } + + """ + destination = client_data.get("location", None) + state = client_data.get("state", "present") + auths = client_data.get("auths", {}) + formats = client_data.get("formats", {}) + enabled = client_data.get("enabled", True) + owner = client_data.get("owner", None) + group = client_data.get("group", None) + mode = client_data.get("mode", "0644") + + location_directory = os.path.dirname(destination) + + hashed_dest = self.checksum.checksum(destination) + # checksum_file is obsolete + checksum_file_name = os.path.join(self.cache_directory, f"client_{hashed_dest}.checksum") + + if os.path.exists(checksum_file_name): + os.remove(checksum_file_name) + + if state == 'absent': + """ + remove created files + """ + config_file_exist = False + config_checksum_exists = False + msg = "The Docker Client configuration does not exist." + + if os.path.isfile(destination): + config_file_exist = True + os.remove(destination) + msg = "The Docker Client configuration has been removed." + + if os.path.isfile(checksum_file_name): + config_checksum_exists = True + os.remove(checksum_file_name) + + return dict( + changed = (config_file_exist & config_checksum_exists), + failed = False, + msg = msg + ) + + if not enabled: + msg = "The creation of the Docker Client configuration has been deactivated." + + if os.path.isfile(destination): + msg += "\nBut the configuration file has already been created!\nTo finally remove it, the 'state' must be configured to 'absent'." + + return dict( + failed=False, + changed=False, + msg=msg + ) + + if not destination: + return dict( + failed=True, + msg="No location has been configured." + ) + + if state not in ["absent", "present"]: + return dict( + failed=True, + msg=f"Wrong state '{state}'. Only these are supported: 'absent', 'present'." + ) + + if not isinstance(auths, dict): + return dict( + failed = True, + msg = "'auths' must be an dictionary." + ) + + if not isinstance(formats, dict): + return dict( + failed = True, + msg = "'formats' must be an dictionary." + ) + + # create destination directory + create_directory(directory=location_directory, mode="0750", owner=owner, group=group) + create_directory(directory=self.tmp_directory, mode="0750") + + if not os.path.isfile(destination): + """ + clean manual removements + """ + if os.path.isfile(checksum_file_name): + os.remove(checksum_file_name) + + invalid_authentications, authentications = self._handle_authentications(auths) + formats = self._handle_formats(formats) + + if len(invalid_authentications) > 0: + return dict( + failed = True, + msg = invalid_authentications + ) + + data = { + **authentications, + **formats + } + + tmp_file = os.path.join(self.tmp_directory, f"client_{hashed_dest}") + + self.__write_config(tmp_file, data) + new_checksum = self.checksum.checksum_from_file(tmp_file) + old_checksum = self.checksum.checksum_from_file(destination) + changed = not (new_checksum == old_checksum) + new_file = False + msg = "The Docker Client configuration has not been changed." + + if changed: + new_file = (old_checksum is None) + self.__write_config(destination, data) + msg = "The Docker Client configuration was successfully changed." + + if new_file: + msg = "The Docker Client configuration was successfully created." + + if os.path.isfile(destination): + self.change_owner(destination, owner, group, mode) + + return dict( + changed = changed, + failed = False, + msg = msg + ) + + def _handle_authentications(self, auths): + """ + possible values: + auths: + registry.gitlab.com: + auth: amVua2luczpydWJiZWwtZGllLWthdHotZHUtZHVtbXNjaHfDpHR6ZXIxCg== + registry.githu.com: + user: foobar + password: vaulted_freaking_password + + """ + invalid_authentications = [] + + copy_auths = auths.copy() + + for k, v in auths.items(): + """ + filter broken configs + """ + res = {} + valide, validate_msg = self.__validate_auth(v) + + if not valide: + self.module.log(f" validation error: {validate_msg}") + copy_auths.pop(k) + + res[k] = dict( + failed = True, + state = validate_msg + ) + + invalid_authentications.append(res) + + auths_dict = dict() + auths_dict["auths"] = dict() + + for k, v in copy_auths.items(): + """ + Ensure that the auth string is a base64 encoded thing. + the content of an existing base64 string is not checked here! + """ + auth = self.__base64_auth(v) + # self.module.log(f" - {k} -> {auth}") + auths_dict["auths"].update({ + k: {"auth": auth} + }) + + return invalid_authentications, auths_dict + + def _handle_formats(self, formats): + """ + "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", + "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", + "pluginsFormat": "table {{.ID}}\t{{.Name}}\t{{.Enabled}}", + "statsFormat": "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}", + "servicesFormat": "table {{.ID}}\t{{.Name}}\t{{.Mode}}", + "secretFormat": "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}", + "configFormat": "table {{.ID}}\t{{.Name}}\t{{.CreatedAt}}\t{{.UpdatedAt}}", + "nodesFormat": "table {{.ID}}\t{{.Hostname}}\t{{.Availability}}", + + possible values: + formats: + ps: + - ".ID" + - ".Names" + - ".Status" + - ".RunningFor" + - ".Ports" + - ".Image" + - ".Command" + - ".Labels" + images: + - ".ID" + - ".Image" + - ".Command" + - ".Labels" + """ + def __format_to_string(t): + """ + input: + images: + - ".ID" + - ".Image" + - ".Command" + - ".Labels" + result: + - 'imagesFormat': 'table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}' + """ + _result = "table " + for i, item in enumerate(t): + _result += "{{{{{0}}}}}".format(item) + if not i == len(t) - 1: + _result += "\\t" + + return _result + + result = {} + + for k, v in formats.items(): + if k in ["ps", "images", "plugins", "stats", "services", "secret", "config", "nodes"] and len(v) != 0: + result[f"{k}Format"] = __format_to_string(v) + + return result + + def __validate_auth(self, data): + """ + """ + auth = data.get("auth", None) + username = data.get("username", None) + password = data.get("password", None) + + return_result = False + return_message = None + + if not auth and not username and not password: + return_result = True + return_message = "not authentication defined" + + if auth and (not username and not password): + return_result = True + return_message = "base64 authentication defined" + + if auth and (username and password): + return_result = False + return_message = "Only one variant can be defined!\nPlease choose between 'auth' or the combination of 'username' and 'password'!" + + if not auth and (not username or not password): + return_result = False + return_message = "Either the 'username' or the 'password' is missing!" + + if not auth and (username and password): + return_result = True + return_message = "combination of 'username' and 'password' authentication defined" + + # self.module.log(f"= {return_result}, {return_message})") + return return_result, return_message + + def __base64_auth(self, data): + """ + """ + auth = data.get("auth", None) + username = data.get("username", None) + password = data.get("password", None) + + if auth: + return auth + + d_bytes = f"{username}:{password}".encode('utf-8') + + base64_bytes = base64.standard_b64encode(d_bytes) + base64_message = base64_bytes.decode('utf8') + + return base64_message + + def __write_config(self, file_name, data): + """ + """ + with open(file_name, 'w') as fp: + json_data = json.dumps(data, indent=2, sort_keys=False) + fp.write(f'{json_data}\n') + + def change_owner(self, destination, owner=None, group=None, mode=None): + """ + """ + if mode is not None: + os.chmod(destination, int(mode, base=8)) + + if owner is not None: + try: + owner = pwd.getpwnam(owner).pw_uid + except KeyError: + owner = int(owner) + pass + else: + owner = 0 + + if group is not None: + try: + group = grp.getgrnam(group).gr_gid + except KeyError: + group = int(group) + pass + else: + group = 0 + + if os.path.exists(destination) and owner and group: + os.chown(destination, int(owner), int(group)) + + +# --------------------------------------------------------------------------------------- +# Module execution. +# + + +def main(): + + args = dict( + configs = dict( + required=True, + type=list + ) + ) + + module = AnsibleModule( + argument_spec = args, + supports_check_mode = True, + ) + + dcc = DockerClientConfigs(module) + result = dcc.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == '__main__': + main() diff --git a/plugins/modules/docker_common_config.py b/plugins/modules/docker_common_config.py new file mode 100644 index 0000000..e0a77cd --- /dev/null +++ b/plugins/modules/docker_common_config.py @@ -0,0 +1,657 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# (c) 2020-2022, Bodo Schulz +# BSD 2-clause (see LICENSE or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +import os +import json +import docker + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.directory import create_directory +from ansible_collections.bodsch.core.plugins.module_utils.checksum import Checksum +from ansible_collections.bodsch.core.plugins.module_utils.diff import SideBySide +from ansible_collections.bodsch.core.plugins.module_utils.validate import validate + +__metaclass__ = type + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: docker_common_config +version_added: 1.0.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD + +description: + - TBD +""" + +EXAMPLES = """ +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + +class DockerCommonConfig(object): + """ + """ + module = None + + def __init__(self, module): + """ + Initialize all needed Variables + """ + self.module = module + self.state = module.params.get("state") + self.diff_output = module.params.get("diff_output") + # + # + self.authorization_plugins = module.params.get("authorization_plugins") + self.bip = module.params.get("bip") + self.bridge = module.params.get("bridge") + self.data_root = module.params.get("data_root") + self.debug = module.params.get("debug") + self.default_gateway = module.params.get("default_gateway") + self.default_gateway_v6 = module.params.get("default_gateway_v6") + self.default_shm_size = module.params.get("default_shm_size") + self.default_ulimits = module.params.get("default_ulimits") + self.dns = module.params.get("dns") + self.dns_opts = module.params.get("dns_opts") + self.dns_search = module.params.get("dns_search") + self.experimental = module.params.get("experimental") + self.fixed_cidr = module.params.get("fixed_cidr") + self.fixed_cidr_v6 = module.params.get("fixed_cidr_v6") + self.group = module.params.get("group") + self.hosts = module.params.get("hosts") + self.insecure_registries = module.params.get("insecure_registries") + self.ip = module.params.get("ip") + self.ip6tables = module.params.get("ip6tables") + self.ip_forward = module.params.get("ip_forward") + self.ip_masq = module.params.get("ip_masq") + self.iptables = module.params.get("iptables") + self.ipv6 = module.params.get("ipv6") + self.labels = module.params.get("labels") + self.log_driver = module.params.get("log_driver") + self.log_level = module.params.get("log_level") + self.log_opts = module.params.get("log_opts") + self.max_concurrent_downloads = module.params.get("max_concurrent_downloads") + self.max_concurrent_uploads = module.params.get("max_concurrent_uploads") + self.max_download_attempts = module.params.get("max_download_attempts") + self.metrics_addr = module.params.get("metrics_addr") + self.oom_score_adjust = module.params.get("oom_score_adjust") + self.pidfile = module.params.get("pidfile") + self.raw_logs = module.params.get("raw_logs") + self.registry_mirrors = module.params.get("registry_mirrors") + self.seccomp_profile = module.params.get("seccomp_profile") + self.selinux_enabled = module.params.get("selinux_enabled") + self.shutdown_timeout = module.params.get("shutdown_timeout") + self.storage_driver = module.params.get("storage_driver") + self.storage_opts = module.params.get("storage_opts") + self.tls_ca_cert = module.params.get("tls_ca_cert") + self.tls_cert = module.params.get("tls_cert") + self.tls_key = module.params.get("tls_key") + self.tls_verify = module.params.get("tls_verify") + + self.config_file = "/etc/docker/daemon.json" + # self.checksum_file_name = "/etc/docker/.checksum" + + self.cache_directory = "/var/cache/ansible/docker" + self.checksum_file_name = os.path.join(self.cache_directory, "daemon.checksum") + + pid = os.getpid() + self.tmp_directory = os.path.join("/run/.ansible", f"docker_common_config.{str(pid)}") + + def run(self): + """ + run + """ + create_directory(self.cache_directory) + + checksum = Checksum(self.module) + + if self.state == 'absent': + """ + remove created files + """ + if os.path.isfile(self.config_file): + os.remove(self.config_file) + + if os.path.isfile(self.checksum_file_name): + os.remove(self.checksum_file_name) + + return dict( + changed = True, + failed = False, + msg = "config removed" + ) + + if not os.path.isfile(self.config_file): + if os.path.isfile(self.checksum_file_name): + os.remove(self.checksum_file_name) + + _diff = [] + + self.__docker_client() + + data = self.config_opts() + + create_directory(directory=self.tmp_directory, mode="0750") + tmp_file = os.path.join(self.tmp_directory, "daemon.json") + self.__write_config(tmp_file, data) + new_checksum = checksum.checksum_from_file(tmp_file) + old_checksum = checksum.checksum_from_file(self.config_file) + changed = not (new_checksum == old_checksum) + new_file = False + msg = "The configuration has not been changed." + + # self.module.log(f" changed : {changed}") + # self.module.log(f" new_checksum : {new_checksum}") + # self.module.log(f" old_checksum : {old_checksum}") + + if changed: + new_file = (old_checksum is None) + + if self.diff_output: + difference = self.create_diff(self.config_file, data) + _diff = difference + + self.__write_config(self.config_file, data) + msg = "The configuration has been successfully updated." + + if new_file: + msg = "The configuration was successfully created." + + return dict( + changed = changed, + failed = False, + msg = msg, + diff = _diff + ) + + def config_opts(self): + + data = dict() + + if validate(self.authorization_plugins): + data["authorization-plugins"] = self.authorization_plugins + + if validate(self.bip): + data["bip"] = self.bip + + if validate(self.bridge): + data["bridge"] = self.bridge + + if validate(self.data_root): + data["data-root"] = self.data_root + + if validate(self.debug): + data["debug"] = self.debug + + if validate(self.default_gateway): + data["default-gateway"] = self.default_gateway + + if validate(self.default_gateway_v6): + data["default-gateway-v6"] = self.default_gateway_v6 + + if validate(self.default_shm_size): + data["default-shm-size"] = self.default_shm_size + + if validate(self.default_ulimits): + data["default-ulimits"] = self.default_ulimits + + if validate(self.dns): + data["dns"] = self.dns + + if validate(self.dns_opts): + data["dns-opts"] = self.dns_opts + + if validate(self.dns_search): + data["dns-search"] = self.dns_search + + if validate(self.experimental): + data["experimental"] = self.experimental + + if validate(self.fixed_cidr): + data["fixed-cidr"] = self.fixed_cidr + + if validate(self.fixed_cidr_v6): + data["fixed-cidr-v6"] = self.fixed_cidr_v6 + + if validate(self.group): + data["group"] = self.group + + if validate(self.hosts): + data["hosts"] = self.hosts + + if validate(self.insecure_registries): + data["insecure-registries"] = self.insecure_registries + + if validate(self.ip): + data["ip"] = self.ip + + if validate(self.ip_forward): + data["ip-forward"] = self.ip_forward + + if validate(self.ip_masq): + data["ip-masq"] = self.ip_masq + + if validate(self.iptables): + data["iptables"] = self.iptables + + if validate(self.ip6tables): + data["ip6tables"] = self.ip6tables + + if validate(self.ipv6): + data["ipv6"] = self.ipv6 + + if validate(self.labels): + data["labels"] = self.labels + + if validate(self.log_level) and self.log_level in ["debug", "info", "warn", "error", "fatal"]: + data["log-level"] = self.log_level + + if validate(self.log_driver): + if "loki" in self.log_driver: + plugin_valid, plugin_state_message = self.__check_plugin() + + if not plugin_valid: + self.module.log(msg="ERROR: log_driver are not valid!") + self.module.log(msg=f"ERROR: {plugin_state_message}") + self.log_driver = "json-file" + + data["log-driver"] = self.log_driver + + if validate(self.log_opts): + data["log-opts"] = self.__values_as_string(self.log_opts) + + if validate(self.max_concurrent_downloads): + data["max-concurrent-downloads"] = self.max_concurrent_downloads + + if validate(self.max_concurrent_uploads): + data["max-concurrent-uploads"] = self.max_concurrent_uploads + + if validate(self.max_download_attempts): + data["max-download-attempts"] = self.max_download_attempts + + if validate(self.metrics_addr): + data["metrics-addr"] = self.metrics_addr + data["experimental"] = True + + if validate(self.oom_score_adjust): + data["oom-score-adjust"] = self.oom_score_adjust + + if validate(self.pidfile): + data["pidfile"] = self.pidfile + + if validate(self.raw_logs): + data["raw-logs"] = self.raw_logs + + if validate(self.registry_mirrors): + data["registry-mirrors"] = self.registry_mirrors + + if validate(self.seccomp_profile): + data["seccomp-profile"] = self.seccomp_profile + + if validate(self.selinux_enabled): + data["selinux-enabled"] = self.selinux_enabled + + if validate(self.shutdown_timeout): + data["shutdown-timeout"] = self.shutdown_timeout + + if validate(self.storage_driver): + self.module.log(msg=f" - {self.storage_driver}") + self.module.log(msg=f" - {self.storage_opts}") + valid_storage_drivers = ["aufs", "devicemapper", "btrfs", "zfs", "overlay", "overlay2", "fuse-overlayfs"] + if self.storage_driver in valid_storage_drivers: + data["storage-driver"] = self.storage_driver + + if validate(self.storage_opts): + """ + # TODO + # validate storage_opts + # -> https://docs.docker.com/engine/reference/commandline/dockerd/#options-per-storage-driver + # Options for + # - devicemapper are prefixed with dm + # - zfs start with zfs + # - btrfs start with btrfs + # - overlay2 start with ... + """ + data["storage-opts"] = self.storage_opts + + if self.tls_ca_cert and self.tls_cert and self.tls_key: + """ + """ + data["tls"] = True + + if validate(self.tls_verify): + data["tlsverify"] = self.tls_verify + + if validate(self.tls_ca_cert): + data["tlscacert"] = self.tls_ca_cert + + if validate(self.tls_cert): + data["tlscert"] = self.tls_cert + + if validate(self.tls_key): + data["tlskey"] = self.tls_key + + return data + + def create_diff(self, config_file, data): + """ + """ + old_data = dict() + + if os.path.isfile(config_file): + with open(config_file) as json_file: + old_data = json.load(json_file) + + side_by_side = SideBySide(self.module, old_data, data) + diff_side_by_side = side_by_side.diff(width=140, left_title=" Original", right_title= " Update") + + return diff_side_by_side + + def __values_as_string(self, values): + """ + """ + result = {} + # self.module.log(msg=f"{json.dumps(values, indent=2, sort_keys=False)}") + + if isinstance(values, dict): + for k, v in sorted(values.items()): + if isinstance(v, bool): + v = str(v).lower() + result[k] = str(v) + + # self.module.log(msg=f"{json.dumps(result, indent=2, sort_keys=False)}") + + return result + + def __docker_client(self): + """ + """ + docker_status = False + docker_socket = "/var/run/docker.sock" + # TODO + # with broken ~/.docker/daemon.json will this fail! + try: + if os.path.exists(docker_socket): + # self.module.log("use docker.sock") + self.docker_client = docker.DockerClient(base_url=f"unix://{docker_socket}") + else: + self.docker_client = docker.from_env() + + docker_status = self.docker_client.ping() + except docker.errors.APIError as e: + self.module.log( + msg=f" exception: {e}" + ) + except Exception as e: + self.module.log( + msg=f" exception: {e}" + ) + + if not docker_status: + return dict( + changed = False, + failed = True, + msg = "no running docker found" + ) + + def __check_plugin(self): + """ + """ + installed_plugin_name = None + installed_plugin_shortname = None + installed_plugin_version = None + installed_plugin_enabled = None + + plugin_valid = False + + msg = f"plugin {self.log_driver} ist not installed" + + try: + p_list = self.docker_client.plugins.list() + + for plugin in p_list: + + installed_plugin_enabled = plugin.enabled + + if installed_plugin_enabled: + installed_plugin_name = plugin.name + installed_plugin_shortname = plugin.name.split(':')[0] + installed_plugin_version = plugin.name.split(':')[1] + + break + + except docker.errors.APIError as e: + error = str(e) + self.module.log(msg=f"{error}") + + except Exception as e: + error = str(e) + self.module.log(msg=f"{error}") + + if installed_plugin_name and installed_plugin_version: + msg = f"plugin {installed_plugin_shortname} is installed in version '{installed_plugin_version}'" + + if self.log_driver == installed_plugin_name: + plugin_valid = True + else: + plugin_valid = False + msg += ", but versions are not equal!" + + return plugin_valid, msg + else: + return plugin_valid, msg + + def __write_config(self, file_name, data): + """ + """ + with open(file_name, 'w') as fp: + json_data = json.dumps(data, indent=2, sort_keys=False) + fp.write(f'{json_data}\n') + +# --------------------------------------------------------------------------------------- +# Module execution. +# + + +def main(): + + args = dict( + state = dict( + default="present", + choices=[ + "absent", + "present" + ] + ), + diff_output = dict( + required=False, + type='bool', + default=False + ), + # + authorization_plugins = dict(required=False, type='list'), + bip = dict(required=False, type='str'), + bridge = dict(required=False, type='str'), + data_root = dict(required=False, type='str'), + debug = dict(required=False, type="bool", default=False), + default_gateway = dict(required=False, type='str'), + default_gateway_v6 = dict(required=False, type='str'), + default_shm_size = dict(required=False, type='str'), + default_ulimits = dict(required=False, type='dict'), + dns = dict(required=False, type='list'), + dns_opts = dict(required=False, type='list'), + dns_search = dict(required=False, type='list'), + experimental = dict(required=False, type="bool", default=False), + fixed_cidr = dict(required=False, type='str'), + fixed_cidr_v6 = dict(required=False, type='str'), + group = dict(required=False, type='str'), + hosts = dict(required=False, type='list'), + insecure_registries = dict(required=False, type='list'), + ip = dict(required=False, type='str'), + ip_forward = dict(required=False, type='bool'), + ip_masq = dict(required=False, type='bool'), + iptables = dict(required=False, type='bool'), + ip6tables = dict(required=False, type='bool'), + ipv6 = dict(required=False, type='bool'), + labels = dict(required=False, type='list'), + log_driver = dict(required=False, type='str'), + log_level = dict(required=False, type='str'), + log_opts = dict(required=False, type='dict'), + max_concurrent_downloads = dict(required=False, type="int"), + max_concurrent_uploads = dict(required=False, type='int'), + max_download_attempts = dict(required=False, type='int'), + metrics_addr = dict(required=False, type='str'), + oom_score_adjust = dict(required=False, type='int'), + pidfile = dict(required=False, type="str"), + raw_logs = dict(required=False, type='bool'), + registry_mirrors = dict(required=False, type='list'), + seccomp_profile = dict(required=False, type='str'), + selinux_enabled = dict(required=False, type="bool", default=False), + shutdown_timeout = dict(required=False, type='int'), + storage_driver = dict(required=False, type='str'), + storage_opts = dict(required=False, type='list'), + tls_ca_cert = dict(required=False, type='str'), + tls_cert = dict(required=False, type='str'), + tls_key = dict(required=False, type='str'), + tls_verify = dict(required=False, type="bool", default=False), + ) + + module = AnsibleModule( + argument_spec = args, + supports_check_mode = True, + ) + + dcc = DockerCommonConfig(module) + result = dcc.run() + + module.exit_json(**result) + + +# import module snippets +if __name__ == '__main__': + main() + + +""" +{ + "allow-nondistributable-artifacts": [], + "api-cors-header": "", + "authorization-plugins": [], + "bip": "", + "bridge": "", + "cgroup-parent": "", + "cluster-advertise": "", + "cluster-store": "", + "cluster-store-opts": {}, + "containerd": "/run/containerd/containerd.sock", + "containerd-namespace": "docker", + "containerd-plugin-namespace": "docker-plugins", + "data-root": "", + "debug": true, + "default-address-pools": [ + { + "base": "172.30.0.0/16", + "size": 24 + }, + { + "base": "172.31.0.0/16", + "size": 24 + } + ], + "default-cgroupns-mode": "private", + "default-gateway": "", + "default-gateway-v6": "", + "default-runtime": "runc", + "default-shm-size": "64M", + "default-ulimits": { + "nofile": { + "Hard": 64000, + "Name": "nofile", + "Soft": 64000 + } + }, + "dns": [], + "dns-opts": [], + "dns-search": [], + "exec-opts": [], + "exec-root": "", + "experimental": false, + "features": {}, + "fixed-cidr": "", + "fixed-cidr-v6": "", + "group": "", + "hosts": [], + "icc": false, + "init": false, + "init-path": "/usr/libexec/docker-init", + "insecure-registries": [], + "ip": "0.0.0.0", + "ip-forward": false, + "ip-masq": false, + "iptables": false, + "ip6tables": false, + "ipv6": false, + "labels": [], + "live-restore": true, + "log-driver": "json-file", + "log-level": "", + "log-opts": { + "cache-disabled": "false", + "cache-max-file": "5", + "cache-max-size": "20m", + "cache-compress": "true", + "env": "os,customer", + "labels": "somelabel", + "max-file": "5", + "max-size": "10m" + }, + "max-concurrent-downloads": 3, + "max-concurrent-uploads": 5, + "max-download-attempts": 5, + "mtu": 0, + "no-new-privileges": false, + "node-generic-resources": [ + "NVIDIA-GPU=UUID1", + "NVIDIA-GPU=UUID2" + ], + "oom-score-adjust": -500, + "pidfile": "", + "raw-logs": false, + "registry-mirrors": [], + "runtimes": { + "cc-runtime": { + "path": "/usr/bin/cc-runtime" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] + } + }, + "seccomp-profile": "", + "selinux-enabled": false, + "shutdown-timeout": 15, + "storage-driver": "", + "storage-opts": [], + "swarm-default-advertise-addr": "", + "tls": true, + "tlscacert": "", + "tlscert": "", + "tlskey": "", + "tlsverify": true, + "userland-proxy": false, + "userland-proxy-path": "/usr/libexec/docker-proxy", + "userns-remap": "" +} +""" diff --git a/plugins/modules/docker_plugins.py b/plugins/modules/docker_plugins.py new file mode 100644 index 0000000..931d325 --- /dev/null +++ b/plugins/modules/docker_plugins.py @@ -0,0 +1,486 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# (c) 2020, Bodo Schulz +# BSD 2-clause (see LICENSE or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +import os +import json +import docker + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.bodsch.core.plugins.module_utils.directory import create_directory + +__metaclass__ = type + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: docker_plugins +version_added: 1.0.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD + +description: + - TBD +""" + +EXAMPLES = """ +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + +class DockerPlugins(): + """ + Main Class to implement the installation of docker plugins + """ + module = None + + def __init__(self, module): + """ + Initialize all needed Variables + """ + self.module = module + self.state = module.params.get("state") + # + self.plugin_source = module.params.get("plugin_source") + self.plugin_version = module.params.get("plugin_version") + self.plugin_alias = module.params.get("plugin_alias") + self.docker_data_root = module.params.get("data_root") + + self.cache_directory = "/var/cache/ansible/docker" + self.plugin_information_file = os.path.join(self.cache_directory, f"plugin_{self.plugin_alias}") + + self.docker_socket = "/var/run/docker.sock" + + def run(self): + """ + run + """ + docker_status = False + # TODO + # with broken ~/.docker/daemon.json will this fail! + try: + if os.path.exists(self.docker_socket): + # self.module.log("use docker.sock") + self.docker_client = docker.DockerClient(base_url=f"unix://{self.docker_socket}") + else: + self.docker_client = docker.from_env() + + docker_status = self.docker_client.ping() + docker_info = self.docker_client.info() + + self.docker_current_data_root = docker_info.get('DockerRootDir', None) + + except docker.errors.APIError as e: + self.module.log( + msg=f" exception: {e}" + ) + except Exception as e: + self.module.log( + msg=f" exception: {e}" + ) + + if not docker_status: + return dict( + changed = False, + failed = True, + msg = "no running docker found" + ) + + create_directory(self.cache_directory) + + self.plugin_state, plugin_id, self.plugin_version_equal, plugin_state_message = self.check_plugin() + + # self.module.log(msg=f" plugin_state : {self.plugin_state}") + # self.module.log(msg=f" plugin_version_equal : {self.plugin_version_equal}") + # self.module.log(msg=f" plugin_state_message : {plugin_state_message}") + # self.module.log(msg=f" data_root : {self.docker_data_root}") + # self.module.log(msg=f" current_data_root : {self.docker_current_data_root}") + + if self.state == "test": + """ + """ + if plugin_id: + plugin_config_file = os.path.join(self.docker_data_root, "plugins", plugin_id, "config.json") + + if not os.path.exists(plugin_config_file): + self.module.log(msg=f"The plugin {self.plugin_alias} is not installed under the expected data-root path {self.docker_data_root}.") + + return dict( + changed = False, + installed = self.plugin_state, + equal_versions = self.plugin_version_equal, + msg = plugin_state_message + ) + + if self.state == "absent": + return self.uninstall_plugin() + + return self.install_plugin() + + def check_plugin(self): + """ + """ + installed_plugin_enabled = False + installed_plugin_name = None + installed_plugin_shortname = None + installed_plugin_version = None + installed_plugin_id = None + installed_plugin_short_id = None + + equal_versions = True + + msg = f"plugin {self.plugin_alias} ist not installed" + + try: + p_list = self.docker_client.plugins.list() + + for plugin in p_list: + installed_plugin_enabled = plugin.enabled + installed_plugin_shortname = plugin.name.split(':')[0] + + if installed_plugin_shortname == self.plugin_alias: + installed_plugin_name = plugin.name + installed_plugin_shortname = plugin.name.split(':')[0] + installed_plugin_version = plugin.name.split(':')[1] + installed_plugin_id = plugin.id + installed_plugin_short_id = plugin.short_id + + break + + except docker.errors.APIError as e: + error = str(e) + self.module.log(msg=f"{error}") + + except Exception as e: + error = str(e) + self.module.log(msg=f"{error}") + + # self.module.log(msg=f" name : {installed_plugin_name}") + # self.module.log(msg=f" shortname: {installed_plugin_shortname}") + # self.module.log(msg=f" version : {installed_plugin_version}") + # self.module.log(msg=f" short_id : {installed_plugin_short_id}") + # self.module.log(msg=f" enabled : {installed_plugin_enabled}") + # + # self.module.log(msg=f" version wanted: {self.plugin_version}") + + self.installed_plugin_data = dict( + id = installed_plugin_id, + short_id = installed_plugin_short_id, + name = installed_plugin_name, + short_name = installed_plugin_shortname, + version = installed_plugin_version, + enabled = installed_plugin_enabled + ) + + if installed_plugin_name and installed_plugin_version: + msg = f"plugin {installed_plugin_shortname} is installed in version '{installed_plugin_version}'" + + if self.plugin_version == installed_plugin_version: + self.__write_plugin_information(self.installed_plugin_data) + else: + equal_versions = False + msg += f", but versions are not equal! (your choise {self.plugin_version} vs. installed {installed_plugin_version})" + + return True, installed_plugin_id, equal_versions, msg + else: + return False, None, False, msg + + def plugin_information(self, plugin_data): + """ + """ + self.module.log(msg=f" name : {plugin_data.name}") + self.module.log(msg=f" enabled : {plugin_data.enabled}") + self.module.log(msg=f" shortname: {plugin_data.name.split(':')[0]}") + self.module.log(msg=f" version : {plugin_data.name.split(':')[1]}") + self.module.log(msg=f" short_id : {plugin_data.short_id}") + self.module.log(msg=f" id : {plugin_data.id}") + + self.module.log(msg=f" version wanted: {self.plugin_version}") + + def install_plugin(self): + """ + """ + installed_plugin = self.installed_plugin_data.get('name', None) + + if not self.plugin_version_equal and installed_plugin: + """ + disable old plugin + """ + self.module.log(msg=f"disable other plugin version ({installed_plugin})") + try: + installed_plugin = self.docker_client.plugins.get(f"{installed_plugin}") + + if installed_plugin: + installed_plugin.disable(force=True) + + except docker.errors.APIError as e: + error = str(e) + self.module.log(msg=f"{error}") + + except Exception as e: + error = str(e) + self.module.log(msg=f"{error}") + + self.module.log(msg=f"Check whether the plugin {self.plugin_alias} is already installed in version {self.plugin_version}") + + try: + installed_plugin = self.docker_client.plugins.get(f"{self.plugin_alias}:{self.plugin_version}") + + except docker.errors.APIError as e: + error = str(e) + self.module.log(msg=f"{error}") + installed_plugin = None + pass + + if installed_plugin: + + # _installed_plugin = installed_plugin + self.plugin_information(installed_plugin) + + # self.module.log(msg=f"{self.docker_data_root}") + # self.module.log(msg=f"{str(installed_plugin.id)}") + # self.module.log(msg=f"{installed_plugin.id}") + + plugin_config_file = os.path.join(self.docker_data_root, "plugins", installed_plugin.id, "config.json") + + # self.module.log(msg=f"{plugin_config_file}") + + if not os.path.exists(plugin_config_file): + self.module.log(msg=f"The plugin {self.plugin_alias} is not installed under the expected data-root path {self.docker_data_root}.") + self.uninstall_plugin() + + try: + self.module.log(msg="re-enable plugin") + installed_plugin.enable(timeout=10) + except docker.errors.APIError as e: + error = str(e) + self.module.log(msg=f"{error}") + pass + + try: + self.module.log(msg="reload plugin attrs") + installed_plugin.reload() + except docker.errors.APIError as e: + error = str(e) + self.module.log(msg=f"{error}") + pass + + result = dict( + changed = True, + failed = False, + msg = f"plugin {self.plugin_alias} was successfully re-enabled in version {self.plugin_version}" + ) + + else: + try: + self.module.log(msg=f"install plugin in version {self.plugin_version}") + + plugin = self.docker_client.plugins.install( + remote_name=f"{self.plugin_source}:{self.plugin_version}", + local_name=f"{self.plugin_alias}:{self.plugin_version}") + + try: + self.module.log(msg="enable plugin") + plugin.enable(timeout=10) + except docker.errors.APIError as e: + error = str(e) + self.module.log(msg=f"{error}") + pass + + try: + self.module.log(msg="reload plugin attrs") + plugin.reload() + except docker.errors.APIError as e: + error = str(e) + self.module.log(msg=f"{error}") + pass + + installed_plugin_shortname = plugin.name.split(':')[0] + installed_plugin_version = plugin.name.split(':')[1] + + result = dict( + changed = True, + failed = False, + msg = f"plugin {installed_plugin_shortname} was successfully installed in version {installed_plugin_version}" + ) + + except docker.errors.APIError as e: + error = str(e) + self.module.log(msg=f"{error}") + + result = dict( + changed = False, + failed = True, + msg = error + ) + + except Exception as e: + error = str(e) + self.module.log(msg=f"{error}") + + result = dict( + changed = False, + failed = True, + msg = error + ) + + return result + + def uninstall_plugin(self): + """ + """ + installed_plugin = self.installed_plugin_data.get('name', None) + + if installed_plugin: + """ + disable old plugin + """ + try: + installed_plugin = self.docker_client.plugins.get(f"{installed_plugin}") + + if installed_plugin: + self.module.log(msg=f"disable plugin version ({installed_plugin})") + installed_plugin.disable(force=True) + + self.module.log(msg="remove plugin") + installed_plugin.remove(force=True) + + self.__remove_plugin_information() + + result = dict( + changed = True, + failed = False, + msg = f"plugin {installed_plugin} was successfully removed." + ) + + except docker.errors.APIError as e: + error = str(e) + self.module.log(msg=f"{error}") + + result = dict( + changed = False, + failed = True, + msg = error + ) + + except Exception as e: + error = str(e) + self.module.log(msg=f"{error}") + + result = dict( + changed = False, + failed = True, + msg = error + ) + else: + result = dict( + changed = False, + failed = False, + msg = "plugin is not installed." + ) + + return result + + def docker_config_value(self, value, default): + """ + """ + config_data = self.__read_docker_config() + + config_value = config_data.get(value, None) + if config_value: + return config_value + else: + return default + + def __read_docker_config(self, config_file="/etc/docker/daemon.json"): + """ + """ + self.module.log(msg=f"__read_docker_config(self, {config_file})") + data = dict() + + if os.path.exists(config_file): + self.module.log(" read") + with open(config_file) as json_file: + self.module.log(" {json_file}") + data = json.load(json_file) + else: + self.module.log(f" {config_file} doesnt exists.") + self.module.log(msg=f" {data}") + + return data + + def __write_plugin_information(self, data): + """ + """ + self.module.log(msg=f"persist plugin information in '{self.plugin_information_file}'") + + with open(self.plugin_information_file, 'w') as fp: + json_data = json.dumps(data, indent=2, sort_keys=False) + fp.write(f'{json_data}\n') + + def __remove_plugin_information(self): + """ + """ + if os.path.exists(self.plugin_information_file): + os.remove(self.plugin_information_file) + + +# --------------------------------------------------------------------------------------- +# Module execution. +# + + +def main(): + + args = dict( + state = dict( + default="present", + choices=[ + "absent", + "present", + "test" + ] + ), + # + plugin_source = dict( + required = True, + type='str' + ), + plugin_version = dict( + required = False, + type="str", + default = "latest" + ), + plugin_alias = dict( + required = True, + type='str' + ), + data_root=dict( + type='str', + default="/var/lib/docker" + ) + ) + + module = AnsibleModule( + argument_spec = args, + supports_check_mode = True, + ) + + dp = DockerPlugins(module) + result = dp.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == '__main__': + main() diff --git a/plugins/modules/docker_version.py b/plugins/modules/docker_version.py new file mode 100644 index 0000000..f99e7f5 --- /dev/null +++ b/plugins/modules/docker_version.py @@ -0,0 +1,136 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# (c) 2020, Bodo Schulz +# BSD 2-clause (see LICENSE or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import absolute_import, division, print_function +import os +import json +import docker + +from ansible.module_utils.basic import AnsibleModule + +__metaclass__ = type + +# --------------------------------------------------------------------------------------- + +DOCUMENTATION = """ +module: docker_version +version_added: 1.0.0 +author: "Bodo Schulz (@bodsch) " + +short_description: TBD + +description: + - TBD +""" + +EXAMPLES = """ +""" + +RETURN = """ +""" + +# --------------------------------------------------------------------------------------- + +class DockerVersion(): + """ + Main Class to implement the installation of docker plugins + """ + module = None + + def __init__(self, module): + """ + Initialize all needed Variables + """ + self.module = module + self.state = module.params.get("state") + self.docker_socket = module.params.get("docker_socket") + + def run(self): + """ + run + """ + docker_status = False + docker_version = None + docker_versions = dict() + + error_msg = None + + # TODO + # with broken ~/.docker/daemon.json will this fail! + try: + if os.path.exists(self.docker_socket): + # self.module.log("use docker.sock") + self.docker_client = docker.DockerClient(base_url=f"unix://{self.docker_socket}") + else: + self.docker_client = docker.from_env() + + docker_status = self.docker_client.ping() + + except docker.errors.APIError as e: + error_msg = f"APIError : {e}" + self.module.log(error_msg) + except Exception as e: + error_msg = f"Exception: {e}" + self.module.log(error_msg) + + if not docker_status: + return dict( + changed = False, + failed = True, + msg = f"{error_msg} (no running docker found)" + ) + + docker_version = self.docker_client.version() + + # self.module.log(msg=f" = {json.dumps(docker_version, sort_keys=True)}") + + if docker_version: + docker_versions.update({"api_version": docker_version.get("ApiVersion", None)}) + docker_versions.update({"docker_version": docker_version.get("Version", None)}) + + self.module.log(msg=f" = {json.dumps(docker_versions, sort_keys=True)}") + + return dict( + failed = False, + changed = False, + versions = docker_versions + ) + + +def main(): + + args = dict( + state = dict( + default="present", + choices=[ + "absent", + "present", + "test" + ] + ), + docker_socket=dict( + required = False, + type="str", + default = "/run/docker.sock" + ) + ) + + module = AnsibleModule( + argument_spec = args, + supports_check_mode = True, + ) + + dp = DockerVersion(module) + result = dp.run() + + module.log(msg=f"= result: {result}") + + module.exit_json(**result) + + +# import module snippets +if __name__ == '__main__': + main() diff --git a/roles/container/.ansible-lint b/roles/container/.ansible-lint new file mode 100644 index 0000000..7d8f692 --- /dev/null +++ b/roles/container/.ansible-lint @@ -0,0 +1,7 @@ +--- + +skip_list: + - name[casing] + - name[template] + - args[module] + - command-instead-of-module diff --git a/roles/container/.editorconfig b/roles/container/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/roles/container/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/roles/container/.flake8 b/roles/container/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/roles/container/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/roles/container/.github/workflows/clean-workflows.yml b/roles/container/.github/workflows/clean-workflows.yml new file mode 100644 index 0000000..18ce16d --- /dev/null +++ b/roles/container/.github/workflows/clean-workflows.yml @@ -0,0 +1,31 @@ +--- + +name: delete workflow runs + +on: + schedule: + - cron: "10 4 * * 0" + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + type: choice + options: + - info + - warning + - debug + +jobs: + delete-workflow-runs: + runs-on: ubuntu-latest + name: delete old workflow runs + steps: + - name: Delete workflow runs + uses: MajorScruffy/delete-old-workflow-runs@v0.3.0 + with: + repository: bodsch/ansible-container + older-than-seconds: 2592000 # remove all workflow runs older than 30 day + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/roles/container/.github/workflows/configured.yml b/roles/container/.github/workflows/configured.yml new file mode 100644 index 0000000..1902590 --- /dev/null +++ b/roles/container/.github/workflows/configured.yml @@ -0,0 +1,58 @@ +--- +name: configured + +on: + workflow_run: + workflows: + - "CI" + types: + - completed + +defaults: + run: + working-directory: 'ansible-container' + +jobs: + deb: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - debian:12 + python_version: + - "3.11.3" + ansible-version: + - '6.7' + scenario: + - configured + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-container' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} diff --git a/roles/container/.github/workflows/galaxy.yml b/roles/container/.github/workflows/galaxy.yml new file mode 100644 index 0000000..35dea1c --- /dev/null +++ b/roles/container/.github/workflows/galaxy.yml @@ -0,0 +1,30 @@ +--- + +name: push to ansible galaxy + +on: + workflow_dispatch: + workflow_run: + workflows: + - "CI" + branches: + - main + types: + - completed + +jobs: + galaxy: + name: galaxy + runs-on: ubuntu-20.04 + if: ${{ github.event.workflow_run.conclusion == 'success' }} + steps: + - name: Check out the codebase + uses: actions/checkout@v3 + with: + path: 'ansible-container' + + - name: galaxy + uses: robertdebock/galaxy-action@1.2.1 + with: + galaxy_api_key: ${{ secrets.galaxy_api_key }} + git_branch: main diff --git a/roles/container/.github/workflows/linter.yml b/roles/container/.github/workflows/linter.yml new file mode 100644 index 0000000..4267521 --- /dev/null +++ b/roles/container/.github/workflows/linter.yml @@ -0,0 +1,56 @@ +--- + +name: code linter + +on: + schedule: + - cron: "20 4 * * 0" + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + type: choice + options: + - info + - warning + - debug + push: + branches: + - 'main' + - 'feature/**' + - '!doc/**' + paths: + - "!Makefile" + - "!README.md" + - "tasks/**" + pull_request: + branches: + - 'main' + - 'feature/**' + - '!doc/**' + paths: + - "!Makefile" + - "!README.md" + - "tasks/**" + +jobs: + lint: + name: linting + runs-on: ubuntu-latest + steps: + - name: 🛎 Checkout + uses: actions/checkout@v3 + + - name: lint + uses: docker://ghcr.io/github/super-linter:slim-v4 + env: + DEFAULT_BRANCH: main + GITHUB_TOKEN: ${{ secrets.GH_REGISTRY_TOKEN }} + VALIDATE_ALL_CODEBASE: true + VALIDATE_ANSIBLE: true + # VALIDATE_MARKDOWN: true + VALIDATE_YAML: true + +... diff --git a/roles/container/.github/workflows/main.yml b/roles/container/.github/workflows/main.yml new file mode 100644 index 0000000..9fbeea2 --- /dev/null +++ b/roles/container/.github/workflows/main.yml @@ -0,0 +1,106 @@ +--- +name: CI + +on: + workflow_run: + workflows: + - "code linter" + types: + - completed + +defaults: + run: + working-directory: 'ansible-container' + +jobs: + arch: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - archlinux:latest + python_version: + - "3.10.11" + - "3.11.3" + ansible-version: + - '6.7' + scenario: + - default + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-container' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} + + deb: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - debian:10 + - debian:11 + - debian:12 + python_version: + - "3.10.11" + - "3.11.3" + ansible-version: + - '6.7' + scenario: + - default + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-container' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} diff --git a/roles/container/.github/workflows/many-property-files.yml b/roles/container/.github/workflows/many-property-files.yml new file mode 100644 index 0000000..6d1361c --- /dev/null +++ b/roles/container/.github/workflows/many-property-files.yml @@ -0,0 +1,59 @@ +--- +name: many property files + +on: + workflow_run: + workflows: + - "CI" + types: + - completed + +defaults: + run: + working-directory: 'ansible-container' + +jobs: + deb: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - debian:12 + ansible-version: + - '6.7' + python_version: + - "3.11.3" + scenario: + - many-properties + + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-container' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} diff --git a/roles/container/.github/workflows/multiple-container-with-filter.yml b/roles/container/.github/workflows/multiple-container-with-filter.yml new file mode 100644 index 0000000..155734d --- /dev/null +++ b/roles/container/.github/workflows/multiple-container-with-filter.yml @@ -0,0 +1,58 @@ +--- +name: multiple container with filter by + +on: + workflow_run: + workflows: + - "CI" + types: + - completed + +defaults: + run: + working-directory: 'ansible-container' + +jobs: + deb: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - debian:12 + ansible-version: + - '6.7' + python_version: + - "3.11.3" + scenario: + - multiple-container-with-filter + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-container' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} diff --git a/roles/container/.github/workflows/multiple-container.yml b/roles/container/.github/workflows/multiple-container.yml new file mode 100644 index 0000000..53af1cf --- /dev/null +++ b/roles/container/.github/workflows/multiple-container.yml @@ -0,0 +1,58 @@ +--- +name: multiple container + +on: + workflow_run: + workflows: + - "CI" + types: + - completed + +defaults: + run: + working-directory: 'ansible-container' + +jobs: + deb: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - debian:12 + ansible-version: + - '6.7' + python_version: + - "3.11.3" + scenario: + - multiple-container + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-container' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} diff --git a/roles/container/.github/workflows/update-container.yml b/roles/container/.github/workflows/update-container.yml new file mode 100644 index 0000000..a8793e7 --- /dev/null +++ b/roles/container/.github/workflows/update-container.yml @@ -0,0 +1,58 @@ +--- +name: update container + +on: + workflow_run: + workflows: + - "CI" + types: + - completed + +defaults: + run: + working-directory: 'ansible-container' + +jobs: + deb: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - debian:12 + ansible-version: + - '6.7' + python_version: + - "3.11.3" + scenario: + - update-container + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-container' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} diff --git a/roles/container/.github/workflows/update-properties.yml b/roles/container/.github/workflows/update-properties.yml new file mode 100644 index 0000000..db2e2f3 --- /dev/null +++ b/roles/container/.github/workflows/update-properties.yml @@ -0,0 +1,58 @@ +--- +name: update properties + +on: + workflow_run: + workflows: + - "CI" + types: + - completed + +defaults: + run: + working-directory: 'ansible-container' + +jobs: + deb: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - debian:12 + ansible-version: + - '6.7' + python_version: + - "3.11.3" + scenario: + - update-properties + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-container' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} diff --git a/roles/container/.gitignore b/roles/container/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/roles/container/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/roles/container/.yamllint b/roles/container/.yamllint new file mode 100644 index 0000000..e3f52af --- /dev/null +++ b/roles/container/.yamllint @@ -0,0 +1,36 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable diff --git a/roles/container/LICENSE b/roles/container/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/roles/container/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/container/Makefile b/roles/container/Makefile new file mode 100644 index 0000000..3abaf48 --- /dev/null +++ b/roles/container/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_6.1 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/roles/container/README.md b/roles/container/README.md new file mode 100644 index 0000000..00baf63 --- /dev/null +++ b/roles/container/README.md @@ -0,0 +1,454 @@ + +# Ansible Role: `container` + + +ansible role for docker deployment of generic container applications + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-container/main.yml?branch=main)][ci] +[![GitHub issues](https://img.shields.io/github/issues/bodsch/ansible-container)][issues] +[![GitHub release (latest by date)](https://img.shields.io/github/v/release/bodsch/ansible-container)][releases] +[![Ansible Quality Score](https://img.shields.io/ansible/quality/50067?label=role%20quality)][quality] + +[ci]: https://github.com/bodsch/ansible-container/actions +[issues]: https://github.com/bodsch/ansible-container/issues?q=is%3Aopen+is%3Aissue +[releases]: https://github.com/bodsch/ansible-container/releases +[quality]: https://galaxy.ansible.com/bodsch/container + + +## Requirements & Dependencies + +- pip module `ruamel.yaml` + +Ansible Collections + +- [bodsch.core](https://github.com/bodsch/ansible-collection-core) in Version > 1.0.14 + +```bash +ansible-galaxy collection install bodsch.core +``` +or +```bash +ansible-galaxy collection install --requirements-file collections.yml +``` + + +### Operating systems + +Tested on + +* Arch Linux +* Debian based + - Debian 10 / 11 + - Ubuntu 20.10 + +## usage + +```yaml +container_reporting: + changes: true + failed: true + +container_fail: + error_at_launch: true + +container_env_directory: /opt/container + +container_registry: + host: '' + username: '' + password: '' + +container: [] + +container_pre_tasks: [] +container_post_tasks: [] + +container_use_network: true +container_network: [] + +container_comparisons: + # '*': ignore + image: strict # don't restart containers with older versions of the image + env: strict # we want precisely this environment + labels: ignore + +# filter container by +container_filter: + # ["name", "hostname", "image"] + by: "" + names: [] + +container_default_behavior: "compatibility" +container_clean_update_fact: true +``` + +### `container_reporting` + +If there is a change in the started containers, a report can be issued. +This can concern both `changes` and `failures`. + +```yaml +container_reporting: + changes: true + failed: true +``` + +### `container_fail` + +If there was an error when starting a container, you can define here whether you want to ignore the error. + +```yaml +container_fail: + error_at_launch: true +``` + +### `container_env_directory` + +Defines the directory in which the environment data and the properties are persisted. + +```yaml +container_env_directory: /opt/container +``` + + +### `container registry` + +Configures a container registry. +If `host`, `username` and `password` are defined, a corresponding login to the registry is also carried out. + +```yaml +container_registry: + host: '' + username: '' + password: '' +``` + +If you need to obtain containers from more than one registry, you can also configure them accordingly (Only available from version >2.5.1): + +```yaml +container_registry: + # Log into DockerHub + - username: 'docker' + password: 'rekcod' + # Log into private registry and force re-authorization + - username: 'yourself' + password: 'secrets3' + host: 'your.private.registry.io' + reauthorize: true +``` + +### `container_pre_tasks` and `container_post_tasks` + +You can define your own pre- or post-tasks. +The individual scripts are executed before or after (re)starting the containers. +For example, you can use them to remove old container images, volumes or other things. + +```yaml +container_pre_tasks: [] +container_post_tasks: [] +``` + +A few example scripts can be found under [`files`](./files): + +- `prune.sh` +- `list_all_container.sh` +- `list_all_images.sh` +- `remove_stopped_container.sh` +- `remove_untagged_images.sh` +- `parse_container_fact.sh` + +### `container_network` / `container_use_network` + +It is possible to allow the respective containers to use one (or more) network. + +```yaml +container_use_network: true +container_network: + - name: docker_network + subnet: 172.3.27.0/24 + gateway: 172.3.27.2 + iprange: 172.3.27.0/26 + + - name: monitoring + state: absent + enable_ipv6: false + subnet: 172.9.27.0/24 + gateway: 172.9.27.2 + iprange: 172.9.27.0/26 +``` + +### `container_comparisons` + +The default configuration for `docker_container.comparisons`. +Allows you to specify how properties of existing containers are compared with module options to +decide whether or not to recreate/update the container. + +[see also](https://docs.ansible.com/ansible/latest/collections/community/docker/docker_container_module.html#parameter-comparisons) + +```yaml +container_comparisons: + # '*': ignore + image: strict # don't restart containers with older versions of the image + env: strict # we want precisely this environment + labels: ignore +``` + +### `container_filter` + +In a large environment, there are many containers that need to be considered during a run. +To reduce the runtime, or to roll out only certain containers, these can be filtered. +The filter criteria available here are `name`, `hostname` and `image`. + +```yaml +container_filter: + # ["name", "hostname", "image"] + by: "" + names: [] +``` + +**For example:** + +```yaml +container_filter: + by: hostname + names: + - hello-world-1 +``` +or + +```yaml +container_filter: + by: image + names: + - busybox:latest +``` + + +### `container_default_behavior` + +> In older versions of the `docker_container` module, various module options used to have default values. +> This caused problems with containers which use different values for these options. +> +> The default value is now `no_defaults`. +> To restore the old behavior, set it to `compatibility`, which will ensure that the default values are +> used when the values are not explicitly specified by the user. +> +> This affects the *auto_remove*, *detach*, *init*, *interactive*, *memory*, *paused*, *privileged*, *read_only* and *tty* options. + +[see also](https://docs.ansible.com/ansible/latest/collections/community/docker/docker_container_module.html#parameter-container_default_behavior) + +```yaml +container_default_behavior: "compatibility" +``` + +### `container_clean_update_fact` + +To enable the necessary restart of a container over an error, a corresponding facts is created. +This fact can be evaluated in a post-task, for example. +By default, the created fact is removed after a successful run. +For test and development purposes, the deletion can be deactivated. + +> **Please note that containers may be restarted with each new run of the role!** + +```yaml +container_clean_update_fact: true +``` + +### `container` + +A list with the definition of all containers served by this role. + +> **However, not all parameters of the [`docker_container`](https://docs.ansible.com/ansible/latest/collections/community/docker/docker_container_module.html) module have been implemented!** + +For all supported parameters you should have a look at [`tasks/launch/launch_container.yml`](tasks/launch/launch_container.yml). + + +#### Simple example: + +```yaml +container: + - name: workflow + image: "{{ container_registry.host }}/workflow:{{ container_tag }}" + pull: true + state: started + restart_policy: always + dns_servers: + - "{{ ansible_default_ipv4.address }}" + networks_cli_compatible: true + networks: + - name: coremedia + capabilities: + - SYS_ADMIN + volumes: + - heapdumps:/coremedia/heapdumps + published_ports: + - 40380:8080 + - 40381:8081 + - 40383:40383 + - 40305:5005 + environments: + DEBUG_ENTRYPOINT: "false" + # ... + HEAP_DUMP_FILENAME: workflow-server-heapdump + properties: + publisher.maxRecursionDepth: 600 +``` + +More examples can be found here: + +- [`molecule/default`](molecule/default/group_vars/all/vars.yml) +- [`molecule/multiple-containe`](molecule/multiple-container/group_vars/all/vars.yml) +- [`molecule/update-container`](molecule/update-container/group_vars/all/vars.yml) +- [`molecule/update-properties`](molecule/update-properties/group_vars/all/vars.yml) +- [`molecule/many-properties`](molecule/many-properties/group_vars/all/vars.yml) + +#### environments + +All `environments` entries are persisted to a separate environments file on the target system. + +E.g. under `/opt/container/${CONTAINER_NAME}/container.env` + +The target directory for persistence can be customized via `container_env_directory`. + +#### properties + +All `properties` entries are persisted to a separate properties file on the target system. + +E.g. under `/opt/container/${CONTAINER_NAME}/${CONTAINER_NAME}.properties` + +The target directory for persistence can be customized via `container_env_directory`. + +Since version 2.3, several separate properties files can be created per container. +To do this, a list must be created under `property_files`. + +> Both `properties` and `property_files` can be used in parallel. + +**For example:** + +```yaml + + property_files: + - name: publisher.properties + properties: + # user and password for login to the staging serve + replicator.user: 'replicator' + replicator.password: 'replicator' + # replicator.domain: '' + replicator.tmp_dir: 'var/tmp' + # + publisher.maxRecursionDepth: 200 + - name: database.properties +``` + +If no `properties` is defined here, the associated file is deleted. + + +#### volumes and mounts + +##### custom fileds for volumes + +The idea behind the cutom_fields is to define corresponding rights in addition to the optional +creation of the directories. + +**For example:** + +One can persist the data directory in the host system for a solr container and also assign the +correct rights to this directory. + +However, since it is also possible to mount files or sockets in the container via volumes, it is +possible here to prevent the creation of a directory using `ignore`. + +The following variables can be used: + +- `owner` +- `group` +- `mode` +- `ignore` + +**Example** + +```yaml + + volumes: + - /run/docker.sock:/run/docker.sock:ro + - /tmp/nginx:/tmp/nginx:ro + - /dev/foo:/dev/foo:ro + - testing3:/var/tmp/testing3:rw|{owner="999",group="1000"} + - testing4:/var/tmp/testing4|{owner="1001",mode="0700"} +``` + +##### custom fields for mounts + +The `mounts` are similar to the `volumes`. +Here, too, it is possible to create persistent directories in the host system via an extension `source_handling`. + +With `create`, you can control whether the source directory should be created or not. +The specification of `owner` and `group` enables the setting of access rights. + +**Example** + +```yaml + + mounts: + - source: /tmp/testing1 + target: /var/tmp/testing1 + type: bind + source_handling: + create: true + owner: "1000" + group: "1000" + mode: "0750" + - source: /tmp/testing2 + target: /var/tmp/testing2 + type: bind + source_handling: + create: true + owner: "800" + group: "999" + mode: "0700" + - source: /tmp/testing5 + target: /var/tmp/testing5 + type: bind +``` + + +## tests + +Local tests are executed in a docker container. +Note that this container must provide its own docker daemon (*docker-in-docker*). + +```bash +make +make verify +make destroy +``` + +You can call these tests with different Ansible versions: + +```bash +make -e TOX_ANSIBLE=ansible_6.4 +make destroy -e TOX_ANSIBLE=ansible_6.4 +``` + +The currently testable Ansible versions are defined in [`tox.ini`](./tox.ini). + + +Below `molecule`, various tests are provided. If none is explicitly specified, `default` is used. +To call a special test, you can define it via `-e TOX_SCENARIO=$TEST`. + +```bash +make -e TOX_SCENARIO=multiple-container +make destroy -e TOX_SCENARIO=multiple-container +``` + +--- + +## Author and License + +- Bodo Schulz + +## License + +[Apache](LICENSE) + +**FREE SOFTWARE, HELL YEAH!** diff --git a/roles/container/collections.yml b/roles/container/collections.yml new file mode 100644 index 0000000..c41bb2e --- /dev/null +++ b/roles/container/collections.yml @@ -0,0 +1,10 @@ +--- + +collections: + - name: community.general + version: ">=6.5.0" + - name: community.docker + version: ">=3.4.5" + - name: bodsch.core + version: ">=1.0.18" + type: galaxy diff --git a/roles/container/defaults/main.yml b/roles/container/defaults/main.yml new file mode 100644 index 0000000..98fd835 --- /dev/null +++ b/roles/container/defaults/main.yml @@ -0,0 +1,52 @@ +--- + +container_reporting: + changes: true + failed: true + +container_fail: + error_at_launch: true + +container_env_directory: /opt/container + +container_registry: [] +# - username: '' +# password: '' +# host: '' + +container: [] + +container_pre_tasks: [] +container_post_tasks: [] + +container_custom_tasks: + - prune.sh + - list_all_container.sh + - list_all_images.sh + - remove_stopped_container.sh + - remove_untagged_images.sh + - parse_container_fact.sh + +container_use_network: true +container_network: [] +# - name: docker_network +# subnet: 172.3.27.0/24 +# gateway: 172.3.27.2 +# iprange: 172.3.27.0/26 + +# see: https://docs.ansible.com/ansible/latest/collections/community/docker/docker_container_module.html#parameter-comparisons +container_comparisons: + # '*': ignore + image: strict # don't restart containers with older versions of the image + env: strict # we want precisely this environment + labels: ignore + +# filter by ["name", "hostname", "image"] +container_filter: + by: "" + names: [] + +container_default_behavior: "compatibility" +container_clean_update_fact: true + +... diff --git a/roles/container/files/list_all_container.sh b/roles/container/files/list_all_container.sh new file mode 100644 index 0000000..8f7d4f4 --- /dev/null +++ b/roles/container/files/list_all_container.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +docker ps --all diff --git a/roles/container/files/list_all_images.sh b/roles/container/files/list_all_images.sh new file mode 100644 index 0000000..85a623c --- /dev/null +++ b/roles/container/files/list_all_images.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +docker image ls --all diff --git a/roles/container/files/parse_container_fact.sh b/roles/container/files/parse_container_fact.sh new file mode 100644 index 0000000..f63ecce --- /dev/null +++ b/roles/container/files/parse_container_fact.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +FACTS_FILE="/etc/ansible/facts.d/update_container.fact" + +if [ -f "${FACTS_FILE}" ] +then + + data=$(bash "${FACTS_FILE}") + + recreate=$(echo "${data}" | jq -r '.update_needed[] | select(.recreate)') + + # image=$(echo "${recreate}" | jq -r '.image') + names=$(echo "${recreate}" | jq -r '.name') + + if [ -n "${names}" ] + then + echo "" + echo "special update hook for:" + + for n in ${names} + do + echo " - ${n}" + + if [ "${n}" = "busybox-2" ] + then + echo "cat environments:" + if [ -f "/opt/container/${n}/container.env" ] + then + cat "/opt/container/${n}/container.env" + fi + fi + done + fi + + +fi diff --git a/roles/container/files/prune.sh b/roles/container/files/prune.sh new file mode 100644 index 0000000..cfff83c --- /dev/null +++ b/roles/container/files/prune.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +output() { + + while read -r line + do + echo -e " $line" + done < <(${1}) + + echo "" +} + +echo "- remove all stopped containers" +output "docker container prune --force" + +echo "- remove all unused images, not just dangling ones" +output "docker image prune --all --force" + +echo "- remove all unused networks" +output "docker network prune --force" + +echo "- remove all unused local volumes. Unused local volumes are those which are not referenced by any containers" +output "docker volume prune --force" + +echo "- remove all unused containers, networks, images (both dangling and unreferenced), and optionally, volumes" +output "docker system prune --all --force" diff --git a/roles/container/files/remove_stopped_container.sh b/roles/container/files/remove_stopped_container.sh new file mode 100644 index 0000000..7dde4b4 --- /dev/null +++ b/roles/container/files/remove_stopped_container.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +container=$(docker ps --all --quiet | grep Exit | cut -d ' ' -f 1) + +if [ -n "${container}" ] +then + echo "deleting stopped containers" + docker rm ${container} +fi diff --git a/roles/container/files/remove_untagged_images.sh b/roles/container/files/remove_untagged_images.sh new file mode 100644 index 0000000..dff87b3 --- /dev/null +++ b/roles/container/files/remove_untagged_images.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +container=$(docker images --all --quiet --filter dangling=true) + +if [ -n "${container}" ] +then + echo "deleting untagged images" + docker rmi --force ${container} +fi diff --git a/roles/container/handlers/main.yml b/roles/container/handlers/main.yml new file mode 100644 index 0000000..ea8e5a7 --- /dev/null +++ b/roles/container/handlers/main.yml @@ -0,0 +1,53 @@ +--- + +- name: clean apt cache (ansible < 2.13) # noqa command-instead-of-module + listen: clean apt cache + ansible.builtin.command: | + apt-get clean + register: apt_clean + changed_when: apt_clean.rc != 0 + failed_when: apt_clean.rc != 0 + when: "ansible_version.full is version_compare('2.13', '<')" + +- name: clean apt cache (ansible >= 2.13) + listen: clean apt cache + ansible.builtin.apt: + clean: true + when: "ansible_version.full is version_compare('2.13', '>=')" + +- name: created application directories # noqa no-handler + ansible.builtin.debug: + msg: "{{ _created_directories.created_directories }}" + when: + - _created_directories.changed + - _created_directories.created_directories is defined + +- name: created container volumes and mountpoints # noqa no-handler + ansible.builtin.debug: + msg: "{{ _created_directories.created_directories }}" + when: + - _created_directories.changed + - _created_directories.created_directories is defined + +- name: container restart necessary # noqa no-handler + ansible.builtin.set_fact: + container_update_needed: true + when: + - _container_data.changed + +- name: update container for recreate running docker instance + ansible.builtin.set_fact: + container: "{{ _container_data.container_data }}" + when: + - _container_data is defined + - _container_data.container_data is defined + +- name: created environnments or properties # noqa no-handler + ansible.builtin.debug: + msg: "{{ _container_data.msg }}" + when: + - _container_data is defined + - _container_data.msg is defined + - _container_data.changed + +... diff --git a/roles/container/hooks/converge b/roles/container/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/roles/container/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/roles/container/hooks/destroy b/roles/container/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/roles/container/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/roles/container/hooks/lint b/roles/container/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/roles/container/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/roles/container/hooks/molecule.rc b/roles/container/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/roles/container/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/roles/container/hooks/test b/roles/container/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/roles/container/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/roles/container/hooks/tox.sh b/roles/container/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/roles/container/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/roles/container/hooks/verify b/roles/container/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/roles/container/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/roles/container/meta/main.yml b/roles/container/meta/main.yml new file mode 100644 index 0000000..b884a7f --- /dev/null +++ b/roles/container/meta/main.yml @@ -0,0 +1,22 @@ +--- + +galaxy_info: + role_name: container + author: Bodo Schulz + description: A simple and generic role for deploying container + license: Apache + min_ansible_version: "2.9" + platforms: + - name: ArchLinux + - name: Debian + versions: + - buster + - bullseye + - bookworm + + galaxy_tags: + - container + - docker + - deployment + +dependencies: [] diff --git a/roles/container/molecule/configured/converge.yml b/roles/container/molecule/configured/converge.yml new file mode 100644 index 0000000..51cf3fd --- /dev/null +++ b/roles/container/molecule/configured/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.docker.container diff --git a/roles/container/molecule/configured/group_vars/all/vars.yml b/roles/container/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..846fb93 --- /dev/null +++ b/roles/container/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,91 @@ +--- + +container_registry: [] + # - username: 'bar1' + # - username: 'bodsch' + # password: 'XXX' + # description: login into dockerhub + # - username: 'foo' + # password: 'bar' + # description: login into my private registry + # host: 'hubby' + # - username: 'foo2' + # password: 'bar2' + +container_network: + - name: test + subnet: 172.3.27.0/24 + gateway: 172.3.27.2 + iprange: 172.3.27.0/26 + + - name: monitoring + state: absent + enable_ipv6: false + subnet: 172.9.27.0/24 + gateway: 172.9.27.2 + iprange: 172.9.27.0/26 + +container_fail: + error_at_launch: false + +container_pre_tasks: + - /usr/local/bin/list_all_images.sh + +container_post_tasks: + - /usr/local/bin/remove_untagged_images.sh + - /usr/local/bin/parse_container_fact.sh + +container: + - name: hello-world + hostname: hello-world + image: hello-world:latest + volumes: + - /run/docker.sock:/tmp/docker.sock:ro + - /tmp/nginx:/tmp/nginx:ro + - /dev/foo:/dev/foo:ro + - /tmp/testing3:/var/tmp/testing3:rw|{owner="999",group="1000"} + - /tmp/testing4:/var/tmp/testing4|{owner="1001",mode="0700"} + - /tmp/testing5:/var/tmp/testing5|{owner="1001",mode="0700",ignore=True} + - /tmp/testing6:/var/tmp/testing6:ro|{owner="999",group="1000"} + mounts: + - source: /tmp/testing1 + target: /var/tmp/testing1 + type: bind + source_handling: + create: true + owner: "1000" + group: "1000" + mode: "0750" + - source: /tmp/testing2 + target: /var/tmp/testing2 + type: bind + source_handling: + create: true + owner: "800" + group: "999" + mode: "0700" + - source: /opt/registry + target: /opt/data + type: bind + source_handling: + create: true + owner: "1000" + group: "1000" + mode: "0750" + - source: "{{ container_env_directory }}/registry/config.yml" + target: /etc/docker/registry/config.yml + type: bind + read_only: true + source_handling: + create: false + properties: + publisher.maxRecursionDepth: 600 + # user and password for login to the staging serve + replicator.user: 'replicator' + replicator.password: 'replicator' + # replicator.domain: '' + replicator.tmp_dir: 'var/tmp' + environments: + VIRTUAL_HOST: hello-world.local + +... diff --git a/roles/container/molecule/configured/molecule.yml b/roles/container/molecule/configured/molecule.yml new file mode 100644 index 0000000..efb1007 --- /dev/null +++ b/roles/container/molecule/configured/molecule.yml @@ -0,0 +1,67 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: "${INSTANCE:-instance}" + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + # - /lib/modules:/lib/modules:ro + # - /var/lib/docker/overlay2:/var/lib/docker/overlay2:rw + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + published_ports: + - 8080:80 + - 8443:443 + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + #- lint + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/container/molecule/configured/prepare.yml b/roles/container/molecule/configured/prepare.yml new file mode 100644 index 0000000..3dbc903 --- /dev/null +++ b/roles/container/molecule/configured/prepare.yml @@ -0,0 +1,53 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +- name: prepare container + hosts: all + gather_facts: true + + roles: + - role: bodsch.docker.docker diff --git a/roles/container/molecule/configured/tests/test_default.py b/roles/container/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..82a9aa4 --- /dev/null +++ b/roles/container/molecule/configured/tests/test_default.py @@ -0,0 +1,190 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_env_directory(host, get_vars): + dir = host.file(get_vars.get('container_env_directory')) + assert dir.exists + assert dir.is_directory + + +@pytest.mark.parametrize("directories", [ + "/tmp/testing1", + "/tmp/testing2", + "/tmp/testing3", + "/tmp/testing4", + "/tmp/testing6", +]) +def test_volumes_directories(host, directories): + dir = host.file(directories) + assert dir.is_directory + + +@pytest.mark.parametrize("directories", [ + "/tmp/testing5", +]) +def test_volume_directory(host, directories): + dir = host.file(directories) + assert not dir.is_directory + + +@pytest.mark.parametrize("directories", [ + "/tmp/testing1", + "/opt/registry", +]) +def test_mountpoint_directories(host, directories): + dir = host.file(directories) + assert dir.is_directory + + +@pytest.mark.parametrize("files", [ + "hello-world" +]) +def test_environments(host, get_vars, files): + dir = host.file(get_vars.get('container_env_directory')) + + for file in [ + f"{dir.linked_to}/{files}/container.env", + ]: + f = host.file(file) + assert f.is_file + + +@pytest.mark.parametrize("files", [ + "hello-world" +]) +def test_properties(host, get_vars, files): + dir = host.file(get_vars.get('container_env_directory')) + + for file in [ + f"{dir.linked_to}/{files}/{files}.properties", + ]: + f = host.file(file) + assert f.is_file + + +@pytest.mark.parametrize("files", [ + "/usr/local/bin/list_all_container.sh", + "/usr/local/bin/list_all_images.sh", + "/usr/local/bin/parse_container_fact.sh", + "/usr/local/bin/prune.sh", + "/usr/local/bin/remove_stopped_container.sh", + "/usr/local/bin/remove_untagged_images.sh", +]) +def test_pre_and_post_task_files(host, get_vars, files): + f = host.file(files) + assert f.is_file + + +def test_environment_file(host, get_vars): + """ + """ + dir = host.file(get_vars.get('container_env_directory')) + + virtual_host = "hello-world.local" + + environment_file = host.file(f"{dir.linked_to}/hello-world/container.env") + + assert environment_file.is_file + assert virtual_host in environment_file.content_string + + +def test_property_file(host, get_vars): + """ + """ + dir = host.file(get_vars.get('container_env_directory')) + + repl_user_key = "replicator.user" + repl_user_val = "replicator" + + property_file = host.file(f"{dir.linked_to}/hello-world/hello-world.properties") + + assert property_file.is_file + assert repl_user_key in property_file.content_string + assert repl_user_val in property_file.content_string diff --git a/roles/container/molecule/default/converge.yml b/roles/container/molecule/default/converge.yml new file mode 100644 index 0000000..51cf3fd --- /dev/null +++ b/roles/container/molecule/default/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.docker.container diff --git a/roles/container/molecule/default/group_vars/all/vars.yml b/roles/container/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/roles/container/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/roles/container/molecule/default/molecule.yml b/roles/container/molecule/default/molecule.yml new file mode 100644 index 0000000..efb1007 --- /dev/null +++ b/roles/container/molecule/default/molecule.yml @@ -0,0 +1,67 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: "${INSTANCE:-instance}" + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + # - /lib/modules:/lib/modules:ro + # - /var/lib/docker/overlay2:/var/lib/docker/overlay2:rw + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + published_ports: + - 8080:80 + - 8443:443 + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + #- lint + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/container/molecule/default/prepare.yml b/roles/container/molecule/default/prepare.yml new file mode 100644 index 0000000..3dbc903 --- /dev/null +++ b/roles/container/molecule/default/prepare.yml @@ -0,0 +1,53 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +- name: prepare container + hosts: all + gather_facts: true + + roles: + - role: bodsch.docker.docker diff --git a/roles/container/molecule/default/tests/test_default.py b/roles/container/molecule/default/tests/test_default.py new file mode 100644 index 0000000..a096365 --- /dev/null +++ b/roles/container/molecule/default/tests/test_default.py @@ -0,0 +1,94 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_env_directory(host, get_vars): + dir = host.file(get_vars.get('container_env_directory')) + assert dir.exists + assert dir.is_directory diff --git a/roles/container/molecule/many-properties/converge.yml b/roles/container/molecule/many-properties/converge.yml new file mode 100644 index 0000000..3bb25c5 --- /dev/null +++ b/roles/container/molecule/many-properties/converge.yml @@ -0,0 +1,14 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: ansible-container + +... diff --git a/roles/container/molecule/many-properties/group_vars/all/vars.yml b/roles/container/molecule/many-properties/group_vars/all/vars.yml new file mode 100644 index 0000000..5ff6ed9 --- /dev/null +++ b/roles/container/molecule/many-properties/group_vars/all/vars.yml @@ -0,0 +1,62 @@ +--- + +container_fail: + error_at_launch: false + +_container_defaults: &CONTAINER_DEFAULTS + restart_policy: always + # labels: + # watchdog: "false" + # log_options: + # labels: "service" + # max-size: "1m" + # max-file: "2" + +_images: + image_1: "busybox:latest" + image_2: "busybox:latest" + image_3: "hello-world:latest" + image_4: "busybox:latest" + image_5: "busybox:latest" + +container: "{{ + container_1 | + union(container_2 | default([])) | + union(container_3 | default([])) | + union(container_4 | default([])) | + union(container_5 | default([])) }}" + +container_1: + - name: busybox-1 + hostname: busybox-1 + image: "{{ _images.image_1 }}" + property_files: + - name: publisher.properties + properties: + replicator.user: 'replicator' + replicator.password: 'replicator' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + - name: database.properties + properties: + publisher.maxRecursionDepth: 200 + replicator.tmp_dir: 'var/tmp' + environments: + VIRTUAL_HOST: busybox-1.local + TEST_WORF: true + DBA_HOST: database + DBA_USER: username + +container_3: + - name: hello-world-1 + <<: *CONTAINER_DEFAULTS + # state: stopped + hostname: hello-world-1 + image: "{{ _images.image_3 }}" + properties: + publisher.enabled: false + replicator.tmp_dir: '/tmp/hello-world-1' + environments: + VIRTUAL_HOST: hello-world-1.local + +... diff --git a/roles/container/molecule/many-properties/molecule.yml b/roles/container/molecule/many-properties/molecule.yml new file mode 100644 index 0000000..581e74a --- /dev/null +++ b/roles/container/molecule/many-properties/molecule.yml @@ -0,0 +1,61 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: "${INSTANCE:-instance}" + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + # - /lib/modules:/lib/modules:ro + # - /var/lib/docker/overlay2:/var/lib/docker/overlay2:rw + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - dependency + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/container/molecule/many-properties/prepare.yml b/roles/container/molecule/many-properties/prepare.yml new file mode 100644 index 0000000..39026bd --- /dev/null +++ b/roles/container/molecule/many-properties/prepare.yml @@ -0,0 +1,53 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +- name: prepare container + hosts: all + gather_facts: true + + roles: + - role: docker diff --git a/roles/container/molecule/many-properties/requirements.yml b/roles/container/molecule/many-properties/requirements.yml new file mode 100644 index 0000000..7cdb61f --- /dev/null +++ b/roles/container/molecule/many-properties/requirements.yml @@ -0,0 +1,7 @@ +--- + +- name: docker + src: bodsch.docker + version: 3.7.0 + +... diff --git a/roles/container/molecule/many-properties/tests/test_default.py b/roles/container/molecule/many-properties/tests/test_default.py new file mode 100644 index 0000000..48b1fb8 --- /dev/null +++ b/roles/container/molecule/many-properties/tests/test_default.py @@ -0,0 +1,129 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +@pytest.mark.parametrize("files", [ + "busybox-1", + "hello-world-1" +]) +def test_properties(host, get_vars, files): + dir = host.file(get_vars.get('container_env_directory')) + + for file in [ + f"{dir.linked_to}/{files}/{files}.properties", + ]: + f = host.file(file) + assert f.is_file + + +def test_default_property_file(host, get_vars): + """ + """ + dir = host.file(get_vars.get('container_env_directory')) + + repl_user_key = "replicator.tmp_dir" + repl_user_val = "var/tmp" + + property_file = host.file(f"{dir.linked_to}/busybox-1/busybox-1.properties") + + assert property_file.is_file + assert repl_user_key in property_file.content_string + assert repl_user_val in property_file.content_string + + +def test_custom_property_file(host, get_vars): + """ + """ + dir = host.file(get_vars.get('container_env_directory')) + + repl_user_key = "replicator.user" + repl_user_val = "replicator" + + property_file = host.file(f"{dir.linked_to}/busybox-1/publisher.properties") + + assert property_file.is_file + assert repl_user_key in property_file.content_string + assert repl_user_val in property_file.content_string diff --git a/roles/container/molecule/multiple-container-with-filter/converge.yml b/roles/container/molecule/multiple-container-with-filter/converge.yml new file mode 100644 index 0000000..51cf3fd --- /dev/null +++ b/roles/container/molecule/multiple-container-with-filter/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.docker.container diff --git a/roles/container/molecule/multiple-container-with-filter/group_vars/all/vars.yml b/roles/container/molecule/multiple-container-with-filter/group_vars/all/vars.yml new file mode 100644 index 0000000..6c40107 --- /dev/null +++ b/roles/container/molecule/multiple-container-with-filter/group_vars/all/vars.yml @@ -0,0 +1,157 @@ +--- + +container_network: + - name: test + subnet: 172.3.27.0/24 + gateway: 172.3.27.2 + iprange: 172.3.27.0/26 + +container_fail: + error_at_launch: false + +container_pre_tasks: + - /usr/local/bin/list_all_images.sh + +container_post_tasks: + - /usr/local/bin/remove_untagged_images.sh + - /usr/local/bin/parse_container_fact.sh + +container_filter: + by: "name" + names: + - busybox-2 + +_images: + image_1: "busybox:latest" + image_2: "busybox:latest" + image_3: "hello-world:latest" + image_4: "busybox:latest" + image_5: "busybox:latest" + +_container_defaults: &CONTAINER_DEFAULTS + restart_policy: always + labels: + watchdog: "false" + log_options: + labels: "service" + max-size: "1m" + max-file: "2" + +container: "{{ + container_1 | + union(container_2 | default([])) | + union(container_3 | default([])) | + union(container_4 | default([])) | + union(container_5 | default([])) }}" + +container_1: + - name: busybox-1 + hostname: busybox-1 + image: "{{ _images.image_1 }}" + volumes: + - /run/docker.sock:/tmp/docker.sock:ro + - /dev/foo:/dev/foo:ro + - /tmp/busybox-1/nginx:/tmp/nginx:ro + - /tmp/busybox-1/testing3:/var/tmp/testing3:rw|{owner="1001",group="1000"} + - /tmp/busybox-1/testing4:/var/tmp/testing4|{owner="1001",mode="0700"} + - /tmp/busybox-1/testing5:/var/tmp/testing5|{owner="1001",mode="0700",ignore=True} + - /tmp/busybox-1/testing6:/var/tmp/testing6:ro|{owner="1001",group="1000"} + mounts: + - source: /tmp/busybox-1/testing1 + target: /var/tmp/testing1 + type: bind + source_handling: + create: true + owner: "1000" + group: "1000" + mode: "0750" + - source: /tmp/busybox-1/testing2 + target: /var/tmp/testing2 + type: bind + source_handling: + create: true + owner: "800" + group: "800" + mode: "0700" + - source: /opt/busybox-1/registry + target: /opt/data + type: bind + source_handling: + create: true + owner: "1000" + group: "1000" + mode: "0750" + - source: "{{ container_env_directory }}/registry/config.yml" + target: /etc/docker/registry/config.yml + type: bind + read_only: true + source_handling: + create: false + +container_2: + - name: busybox-2 + <<: *CONTAINER_DEFAULTS + hostname: busybox-2 + image: "{{ _images.image_2 }}" + mounts: + - source: /tmp/busybox-2/testing1 + target: /var/tmp/testing1 + type: bind + source_handling: + create: true + owner: "1000" + group: "1000" + mode: "0750" + - source: /tmp/busybox-2/testing2 + target: /var/tmp/testing2 + type: bind + source_handling: + create: true + owner: "800" + group: "999" + mode: "0700" + environments: + VIRTUAL_HOST: busybox-2.local + # TEST_WORF: true + DBA_HOST: database + DBA_USER: username + properties: + publisher.enabled: "true" + +container_3: + - name: hello-world-1 + <<: *CONTAINER_DEFAULTS + # state: stopped + hostname: hello-world-1 + image: "{{ _images.image_3 }}" + properties: + publisher.maxRecursionDepth: 600 + # user and password for login to the staging serve + replicator.user: 'replicator' + replicator.password: 'replicator' + # replicator.domain: '' + replicator.tmp_dir: 'var/tmp' + environments: + VIRTUAL_HOST: hello-world-1.local + +container_4: + - name: busybox-4 + <<: *CONTAINER_DEFAULTS + state: absent + hostname: busybox-4 + image: "{{ _images.image_4 }}" + environments: + VIRTUAL_HOST: busybox-4.local + +container_5: + - name: busybox-5 + <<: *CONTAINER_DEFAULTS + state: present + hostname: busybox-5 + image: "{{ _images.image_5 }}" + restart: "false" + restart_policy: "no" + environments: + VIRTUAL_HOST: busybox-5.local + +... diff --git a/roles/container/molecule/multiple-container-with-filter/molecule.yml b/roles/container/molecule/multiple-container-with-filter/molecule.yml new file mode 100644 index 0000000..f015373 --- /dev/null +++ b/roles/container/molecule/multiple-container-with-filter/molecule.yml @@ -0,0 +1,64 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: "${INSTANCE:-instance}" + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + published_ports: + - 8080:80 + - 8443:443 + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/container/molecule/multiple-container-with-filter/prepare.yml b/roles/container/molecule/multiple-container-with-filter/prepare.yml new file mode 100644 index 0000000..3dbc903 --- /dev/null +++ b/roles/container/molecule/multiple-container-with-filter/prepare.yml @@ -0,0 +1,53 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +- name: prepare container + hosts: all + gather_facts: true + + roles: + - role: bodsch.docker.docker diff --git a/roles/container/molecule/multiple-container-with-filter/tests/test_default.py b/roles/container/molecule/multiple-container-with-filter/tests/test_default.py new file mode 100644 index 0000000..5ec241f --- /dev/null +++ b/roles/container/molecule/multiple-container-with-filter/tests/test_default.py @@ -0,0 +1,108 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def base_directory(): + cwd = os.getcwd() + + if ('group_vars' in os.listdir(cwd)): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = "molecule/{}".format(os.environ.get('MOLECULE_SCENARIO_NAME')) + + return directory, molecule_directory + + +@pytest.fixture() +def get_vars(host): + """ + + """ + base_dir, molecule_dir = base_directory() + + file_defaults = "file={}/defaults/main.yml name=role_defaults".format(base_dir) + file_vars = "file={}/vars/main.yml name=role_vars".format(base_dir) + file_molecule = "file={}/group_vars/all/vars.yml name=test_vars".format(molecule_dir) + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(molecule_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directory(host, get_vars): + dir = host.file(get_vars.get('container_env_directory')) + assert dir.exists + assert dir.is_directory + +@pytest.mark.parametrize("directories", [ + "/tmp/busybox-2", + # mounts + "/tmp/busybox-2/testing1", + "/tmp/busybox-2/testing2", +]) +def test_volumes_directories(host, directories): + dir = host.file(directories) + assert dir.is_directory + +@pytest.mark.parametrize("directories", [ + "/tmp/busybox-1", + # volumes + "/tmp/busybox-1/nginx", + "/tmp/busybox-1/testing3", + "/tmp/busybox-1/testing4", + "/tmp/busybox-1/testing6", + # mounts + "/tmp/busybox-1/testing1", + "/tmp/busybox-1/testing2", + "/opt/busybox-1/registry", +]) +def test_no_volumes_directories(host, directories): + dir = host.file(directories) + assert not dir.is_directory + + +@pytest.mark.parametrize("files", [ + "busybox-2", +]) +def test_environments(host, get_vars, files): + dir = host.file(get_vars.get('container_env_directory')) + + for file in [ + f"{dir.linked_to}/{files}/container.env", + ]: + f = host.file(file) + assert f.is_file + + +@pytest.mark.parametrize("files", [ + "busybox-2" +]) +def test_properties(host, get_vars, files): + dir = host.file(get_vars.get('container_env_directory')) + + for file in [ + f"{dir.linked_to}/{files}/{files}.properties", + ]: + f = host.file(file) + assert f.is_file diff --git a/roles/container/molecule/multiple-container/converge.yml b/roles/container/molecule/multiple-container/converge.yml new file mode 100644 index 0000000..51cf3fd --- /dev/null +++ b/roles/container/molecule/multiple-container/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + gather_facts: true + + environment: + NETRC: '' + + roles: + - role: bodsch.docker.container diff --git a/roles/container/molecule/multiple-container/group_vars/all/vars.yml b/roles/container/molecule/multiple-container/group_vars/all/vars.yml new file mode 100644 index 0000000..c876e7d --- /dev/null +++ b/roles/container/molecule/multiple-container/group_vars/all/vars.yml @@ -0,0 +1,375 @@ +--- + +container_network: + - name: test + subnet: 172.3.27.0/24 + gateway: 172.3.27.2 + iprange: 172.3.27.0/26 + +container_fail: + error_at_launch: false + +container_custom_tasks: [] + +container_pre_tasks: [] +# - /usr/local/bin/list_all_images.sh + +container_post_tasks: [] +# - /usr/local/bin/remove_untagged_images.sh +# - /usr/local/bin/parse_container_fact.sh + +_images: + image_1: "busybox:latest" + image_2: "busybox:uclibc" + image_3: "hello-world:linux" + image_4: "busybox:stable" + image_5: "busybox:1.36.1-uclibc" + image_6: "busybox:1.36.1" + image_7: "bash:devel-alpine3.18" + image_8: "hello-world:latest" + image_9: "busybox:latest" + image_10: "busybox:latest" + image_11: "bash:4.4.23" + +_container_defaults: &CONTAINER_DEFAULTS + restart_policy: always + labels: + watchdog: "false" + log_options: + labels: "service" + max-size: "1m" + max-file: "2" + +container: "{{ + container_1 | + union(container_2 | default([])) | + union(container_3 | default([])) | + union(container_4 | default([])) | + union(container_5 | default([])) | + union(container_6 | default([])) | + union(container_7 | default([])) | + union(container_8 | default([])) | + union(container_9 | default([])) | + union(container_10 | default([])) | + union(container_11 | default([])) + }}" + +container_1: + - name: busybox-1 + hostname: busybox-1 + image: "{{ _images.image_1 }}" + volumes: + - /run/docker.sock:/tmp/docker.sock:ro + - /dev/foo:/dev/foo:ro + - /tmp/busybox-1/nginx:/tmp/nginx:ro + - /tmp/busybox-1/testing3:/var/tmp/testing3:rw|{owner="1001",group="1000"} + - /tmp/busybox-1/testing4:/var/tmp/testing4|{owner="1001",mode="0700"} + - /tmp/busybox-1/testing5:/var/tmp/testing5|{owner="1001",mode="0700",ignore=True} + - /tmp/busybox-1/testing6:/var/tmp/testing6:ro|{owner="1001",group="1000"} + mounts: + - source: /tmp/busybox-1/testing1 + target: /var/tmp/testing1 + type: bind + source_handling: + create: true + owner: "1000" + group: "1000" + mode: "0750" + - source: /tmp/busybox-1/testing2 + target: /var/tmp/testing2 + type: bind + source_handling: + create: true + owner: "800" + group: "800" + mode: "0700" + - source: /opt/busybox-1/registry + target: /opt/data + type: bind + source_handling: + create: true + owner: "1000" + group: "1000" + mode: "0750" + - source: "{{ container_env_directory }}/registry/config.yml" + target: /etc/docker/registry/config.yml + type: bind + read_only: true + source_handling: + create: false + +container_2: + - name: busybox-2 + <<: *CONTAINER_DEFAULTS + hostname: busybox-2 + image: "{{ _images.image_2 }}" + mounts: + - source: /tmp/busybox-2/testing1 + target: /var/tmp/testing1 + type: bind + source_handling: + create: true + owner: "1000" + group: "1000" + mode: "0750" + - source: /tmp/busybox-2/testing2 + target: /var/tmp/testing2 + type: bind + source_handling: + create: true + owner: "800" + group: "999" + mode: "0700" + environments: + VIRTUAL_HOST: busybox-2.local + # TEST_WORF: true + DBA_HOST: database + DBA_USER: username + +container_3: + - name: hello-world-1 + <<: *CONTAINER_DEFAULTS + state: stopped + hostname: hello-world-1 + image: "{{ _images.image_3 }}" + properties: + publisher.maxRecursionDepth: 600 + # user and password for login to the staging serve + replicator.user: 'replicator' + replicator.password: 'replicator' + # replicator.domain: '' + replicator.tmp_dir: 'var/tmp' + environments: + VIRTUAL_HOST: hello-world-1.local + +container_4: + - name: busybox-4 + <<: *CONTAINER_DEFAULTS + state: absent + hostname: busybox-4 + image: "{{ _images.image_4 }}" + environments: + VIRTUAL_HOST: busybox-4.local + +container_5: + - name: busybox-5 + <<: *CONTAINER_DEFAULTS + hostname: busybox-5 + image: "{{ _images.image_5 }}" + restart: "false" + restart_policy: "no" + environments: + VIRTUAL_HOST: busybox-5.local + property_files: + - name: publisher.properties + properties: + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + - name: database.properties + properties: + # + publisher.maxRecursionDepth: 900 + # user and password for login to the staging serve + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + +container_6: + - name: busybox-6 + <<: *CONTAINER_DEFAULTS + hostname: busybox-6 + image: "{{ _images.image_6 }}" + restart: "false" + environments: + VIRTUAL_HOST: busybox-6.local + property_files: + - name: publisher.properties + properties: + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + - name: database.properties + properties: + # + publisher.maxRecursionDepth: 900 + # user and password for login to the staging serve + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + +container_7: + - name: busybox-7 + <<: *CONTAINER_DEFAULTS + hostname: busybox-7 + image: "{{ _images.image_7 }}" + restart: "false" + environments: + VIRTUAL_HOST: busybox-7.local + property_files: + - name: publisher.properties + properties: + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + - name: database.properties + properties: + # + publisher.maxRecursionDepth: 900 + # user and password for login to the staging serve + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + +container_8: + - name: busybox-8 + <<: *CONTAINER_DEFAULTS + state: present + hostname: busybox-8 + image: "{{ _images.image_8 }}" + restart: "false" + environments: + VIRTUAL_HOST: busybox-8.local + property_files: + - name: publisher.properties + properties: + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + - name: database.properties + properties: + # + publisher.maxRecursionDepth: 900 + # user and password for login to the staging serve + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + +container_9: + - name: busybox-9 + <<: *CONTAINER_DEFAULTS + state: present + hostname: busybox-9 + image: "{{ _images.image_9 }}" + mounts: + - source: /tmp/busybox-9/testing1 + target: /var/tmp/testing1 + type: bind + source_handling: + create: true + owner: "1000" + group: "1000" + mode: "0750" + - source: /tmp/busybox-9/testing2 + target: /var/tmp/testing2 + type: bind + source_handling: + create: true + owner: "800" + group: "999" + mode: "0700" + environments: + VIRTUAL_HOST: busybox-9.local + property_files: + - name: publisher.properties + properties: + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + - name: database.properties + properties: + # + publisher.maxRecursionDepth: 900 + # user and password for login to the staging serve + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + +container_10: + - name: busybox-10 + <<: *CONTAINER_DEFAULTS + state: only_present + hostname: busybox-10 + image: "{{ _images.image_10 }}" + restart: "false" + environments: + VIRTUAL_HOST: busybox-10.local + property_files: + - name: publisher.properties + properties: + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + - name: database.properties + properties: + # + publisher.maxRecursionDepth: 900 + # user and password for login to the staging serve + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + +container_11: + - name: busybox-11 + <<: *CONTAINER_DEFAULTS + state: present + hostname: busybox-11 + image: "{{ _images.image_11 }}" + restart: "false" + environments: + VIRTUAL_HOST: busybox-11.local + property_files: + - name: publisher.properties + properties: + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + - name: database.properties + properties: + # + publisher.maxRecursionDepth: 900 + # user and password for login to the staging serve + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + mounts: + - source: /tmp/busybox-11/testing1 + target: /var/tmp/testing1 + type: bind + source_handling: + create: true + owner: "1000" + group: "1000" + mode: "0750" + - source: /tmp/busybox-11/testing2 + target: /var/tmp/testing2 + type: bind + source_handling: + create: true + owner: "800" + group: "800" + mode: "0700" + - source: /opt/busybox-11/registry + target: /opt/data + type: bind + source_handling: + create: true + owner: "1000" + group: "1000" + mode: "0750" + +... diff --git a/roles/container/molecule/multiple-container/molecule.yml b/roles/container/molecule/multiple-container/molecule.yml new file mode 100644 index 0000000..7aef013 --- /dev/null +++ b/roles/container/molecule/multiple-container/molecule.yml @@ -0,0 +1,62 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: "${INSTANCE:-instance}" + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + published_ports: + - 8080:80 + - 8443:443 + +provisioner: + name: ansible + ansible_args: + - --diff + # - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - dependency + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/container/molecule/multiple-container/prepare.yml b/roles/container/molecule/multiple-container/prepare.yml new file mode 100644 index 0000000..3dbc903 --- /dev/null +++ b/roles/container/molecule/multiple-container/prepare.yml @@ -0,0 +1,53 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +- name: prepare container + hosts: all + gather_facts: true + + roles: + - role: bodsch.docker.docker diff --git a/roles/container/molecule/multiple-container/tests/test_default.py b/roles/container/molecule/multiple-container/tests/test_default.py new file mode 100644 index 0000000..ce786de --- /dev/null +++ b/roles/container/molecule/multiple-container/tests/test_default.py @@ -0,0 +1,106 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def base_directory(): + cwd = os.getcwd() + + if ('group_vars' in os.listdir(cwd)): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = "molecule/{}".format(os.environ.get('MOLECULE_SCENARIO_NAME')) + + return directory, molecule_directory + + +@pytest.fixture() +def get_vars(host): + """ + + """ + base_dir, molecule_dir = base_directory() + + file_defaults = "file={}/defaults/main.yml name=role_defaults".format(base_dir) + file_vars = "file={}/vars/main.yml name=role_vars".format(base_dir) + file_molecule = "file={}/group_vars/all/vars.yml name=test_vars".format(molecule_dir) + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(molecule_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directory(host, get_vars): + dir = host.file(get_vars.get('container_env_directory')) + assert dir.exists + assert dir.is_directory + + +@pytest.mark.parametrize("directories", [ + "/tmp/busybox-1", + "/tmp/busybox-2", + # volumes + "/tmp/busybox-1/nginx", + "/tmp/busybox-1/testing3", + "/tmp/busybox-1/testing4", + "/tmp/busybox-1/testing6", + # mounts + "/tmp/busybox-1/testing1", + "/tmp/busybox-1/testing2", + "/opt/busybox-1/registry", + # mounts + "/tmp/busybox-2/testing1", + "/tmp/busybox-2/testing2", +]) +def test_volumes_directories(host, directories): + dir = host.file(directories) + assert dir.is_directory + + +@pytest.mark.parametrize("files", [ + "busybox-2", + "busybox-4", + "busybox-5", + "hello-world-1" +]) +def test_environments(host, get_vars, files): + dir = host.file(get_vars.get('container_env_directory')) + + for file in [ + f"{dir.linked_to}/{files}/container.env", + ]: + f = host.file(file) + assert f.is_file + + +@pytest.mark.parametrize("files", [ + "hello-world-1" +]) +def test_properties(host, get_vars, files): + dir = host.file(get_vars.get('container_env_directory')) + + for file in [ + f"{dir.linked_to}/{files}/{files}.properties", + ]: + f = host.file(file) + assert f.is_file diff --git a/roles/container/molecule/update-container/converge.yml b/roles/container/molecule/update-container/converge.yml new file mode 100644 index 0000000..5d4d7ac --- /dev/null +++ b/roles/container/molecule/update-container/converge.yml @@ -0,0 +1,20 @@ +--- + +- name: update + hosts: instance + any_errors_fatal: false + gather_facts: true + + vars: + container: + - name: busybox + hostname: busybox + image: busybox:latest + - name: busybox-2 + hostname: busybox-2 + image: busybox:latest + + roles: + - role: bodsch.docker.container + +... diff --git a/roles/container/molecule/update-container/group_vars/all/vars.yml b/roles/container/molecule/update-container/group_vars/all/vars.yml new file mode 100644 index 0000000..d73f99b --- /dev/null +++ b/roles/container/molecule/update-container/group_vars/all/vars.yml @@ -0,0 +1,20 @@ +--- + +# container_clean_update_fact: false + +container_fail: + error_at_launch: false + +container: + - name: busybox + hostname: busybox + image: busybox:latest + - name: busybox-2 + hostname: busybox-2 + image: busybox:latest + +container_post_tasks: + - /usr/local/bin/remove_untagged_images.sh + - /usr/local/bin/list_all_images.sh + +... diff --git a/roles/container/molecule/update-container/molecule.yml b/roles/container/molecule/update-container/molecule.yml new file mode 100644 index 0000000..46d07a1 --- /dev/null +++ b/roles/container/molecule/update-container/molecule.yml @@ -0,0 +1,55 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: "${INSTANCE:-instance}" + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + - /lib/modules:/lib/modules:ro + - /var/lib/docker/overlay2:/var/lib/docker/overlay2:rw + capabilities: ALL + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - dependency + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/container/molecule/update-container/prepare.yml b/roles/container/molecule/update-container/prepare.yml new file mode 100644 index 0000000..f568fb6 --- /dev/null +++ b/roles/container/molecule/update-container/prepare.yml @@ -0,0 +1,92 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +- name: prepare container + hosts: all + gather_facts: true + + roles: + - role: bodsch.docker.docker + +- name: deploy container + hosts: instance + any_errors_fatal: false + gather_facts: true + + vars: + container: + - name: busybox-1 + hostname: busybox-1 + image: busybox:1.35.0 + property_files: + - name: publisher.properties + properties: + replicator.user: 'replicator' + replicator.password: 'replicator' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + - name: database.properties + properties: + publisher.maxRecursionDepth: 200 + replicator.tmp_dir: 'var/tmp' + environments: + VIRTUAL_HOST: busybox-1.local + TEST_WORF: true + DBA_HOST: database + DBA_USER: username + + - name: busybox-2 + hostname: busybox-2 + image: busybox:1.36.0 + + - name: busybox-3 + hostname: busybox-3 + image: busybox:latest + state: present + + roles: + - role: bodsch.docker.container diff --git a/roles/container/molecule/update-properties/converge.yml b/roles/container/molecule/update-properties/converge.yml new file mode 100644 index 0000000..bc6c740 --- /dev/null +++ b/roles/container/molecule/update-properties/converge.yml @@ -0,0 +1,11 @@ +--- + +- name: update + hosts: instance + any_errors_fatal: true + gather_facts: true + + roles: + - role: bodsch.docker.container + +... diff --git a/roles/container/molecule/update-properties/group_vars/all/vars.yml b/roles/container/molecule/update-properties/group_vars/all/vars.yml new file mode 100644 index 0000000..c20de05 --- /dev/null +++ b/roles/container/molecule/update-properties/group_vars/all/vars.yml @@ -0,0 +1,33 @@ +--- + +container_fail: + error_at_launch: false + +container: + - name: hello-world + hostname: hello-world + image: hello-world:latest + environments: + VIRTUAL_HOST: hello-world.local + properties: + # + publisher.maxRecursionDepth: 900 + # user and password for login to the staging serve + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + property_files: + - name: publisher.properties + properties: + replicator.user: 'replicator' + replicator.password: 'RSActgjk3shTcgosJzn4vT2.Z9i47QLV' + replicator.domain: 'localhost' + replicator.tmp_dir: 'var/tmp' + - name: database.properties + +container_post_tasks: + - /usr/local/bin/list_all_images.sh + - /usr/local/bin/remove_untagged_images.sh + +... diff --git a/roles/container/molecule/update-properties/molecule.yml b/roles/container/molecule/update-properties/molecule.yml new file mode 100644 index 0000000..5c18499 --- /dev/null +++ b/roles/container/molecule/update-properties/molecule.yml @@ -0,0 +1,61 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: "${INSTANCE:-instance}" + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + # - /lib/modules:/lib/modules:ro + # - /var/lib/docker/overlay2:/var/lib/docker/overlay2:rw + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 30 + fact_caching_connection: "${MOLECULE_EPHEMERAL_DIRECTORY}/ansible_facts" + +scenario: + test_sequence: + - dependency + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/container/molecule/update-properties/prepare.yml b/roles/container/molecule/update-properties/prepare.yml new file mode 100644 index 0000000..62ed999 --- /dev/null +++ b/roles/container/molecule/update-properties/prepare.yml @@ -0,0 +1,72 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: make sure python3-apt is installed (only debian based) + ansible.builtin.package: + name: + - python3-apt + state: present + when: + - ansible_os_family | lower == 'debian' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +- name: prepare container + hosts: all + gather_facts: true + + roles: + - role: bodsch.docker.docker + +- name: deploy first container + hosts: instance + any_errors_fatal: true + gather_facts: true + + roles: + - role: ansible-container + vars: + container: + - name: hello-world + hostname: hello-world + image: hello-world:latest + properties: + # + publisher.maxRecursionDepth: 200 + # user and password for login to the staging serve + replicator.user: 'replicator' + replicator.password: 'replicator' diff --git a/roles/container/molecule/update-properties/tests/test_default.py b/roles/container/molecule/update-properties/tests/test_default.py new file mode 100644 index 0000000..5934ec2 --- /dev/null +++ b/roles/container/molecule/update-properties/tests/test_default.py @@ -0,0 +1,127 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +@pytest.mark.parametrize("files", [ + "hello-world" +]) +def test_properties(host, get_vars, files): + dir = host.file(get_vars.get('container_env_directory')) + + for file in [ + f"{dir.linked_to}/{files}/{files}.properties", + ]: + f = host.file(file) + assert f.is_file + + +def test_property_file(host, get_vars): + """ + """ + dir = host.file(get_vars.get('container_env_directory')) + + repl_user_key = "replicator.user" + repl_user_val = "replicator" + + property_file = host.file(f"{dir.linked_to}/hello-world/hello-world.properties") + + assert property_file.is_file + assert repl_user_key in property_file.content_string + assert repl_user_val in property_file.content_string + + +def test_property_changes(host, get_vars): + """ + """ + import re + dir = host.file(get_vars.get('container_env_directory')) + + property_file = host.file(f"{dir.linked_to}/hello-world/hello-world.properties") + content = property_file.content_string.split("\n") + + re_recursion_depth = re.compile("publisher.maxRecursionDepth.*= 900") + + assert (len(list(filter(re_recursion_depth.match, content))) > 0) diff --git a/roles/container/tasks/custom-tasks/main.yml b/roles/container/tasks/custom-tasks/main.yml new file mode 100644 index 0000000..35c1938 --- /dev/null +++ b/roles/container/tasks/custom-tasks/main.yml @@ -0,0 +1,65 @@ +--- + +- name: copy custom tasks + become: true + ansible.builtin.copy: + src: "{{ item }}" + dest: "/usr/local/bin" + mode: 0755 + loop: "{{ container_custom_tasks | default([]) }}" + tags: + - container_install_pre_or_post_tasks + +- name: validate pre tasks + when: + - container_pre_tasks is defined + - container_pre_tasks | count > 0 + tags: + - container_install_pre_or_post_tasks + block: + - name: ensure pre tasks are available + ansible.builtin.stat: + path: "{{ item }}" + get_checksum: false + get_checksum: false + get_mime: false + get_attributes: false + register: _pre_task_available + loop: "{{ container_pre_tasks }}" + + - name: assert missing pre task + ansible.builtin.assert: + that: + - _pre_task_available.results | files_available | count == container_pre_tasks | count + msg: + - not all pre tasks are available + - missing {{ container_pre_tasks | difference(_pre_task_available.results | files_available) }} + quiet: true + +- name: validate post tasks + when: + - container_post_tasks is defined + - container_post_tasks | count > 0 + tags: + - container_install_pre_or_post_tasks + block: + - name: ensure post tasks are available + ansible.builtin.stat: + path: "{{ item }}" + get_checksum: false + get_checksum: false + get_mime: false + get_attributes: false + register: _post_task_available + loop: "{{ container_post_tasks }}" + + - name: assert missing post task + ansible.builtin.assert: + that: + - _post_task_available.results | files_available | count == container_post_tasks | count + msg: + - not all pre tasks are available + - missing {{ container_post_tasks | difference(_post_task_available.results | files_available) }} + quiet: true + +... diff --git a/roles/container/tasks/custom-tasks/post-tasks.yml b/roles/container/tasks/custom-tasks/post-tasks.yml new file mode 100644 index 0000000..7930cdf --- /dev/null +++ b/roles/container/tasks/custom-tasks/post-tasks.yml @@ -0,0 +1,22 @@ +--- + +- name: execute post tasks + ansible.builtin.command: | + {{ item }} + changed_when: false + register: _post_task_output + loop: "{{ container_post_tasks }}" + tags: + - container_exec_post_tasks + +- name: result + ansible.builtin.debug: + msg: + - "{{ item.stdout }}" + loop: "{{ _post_task_output.results }}" + loop_control: + label: "{{ item.item }}" + tags: + - container_exec_post_tasks + +... diff --git a/roles/container/tasks/custom-tasks/pre-tasks.yml b/roles/container/tasks/custom-tasks/pre-tasks.yml new file mode 100644 index 0000000..52a985f --- /dev/null +++ b/roles/container/tasks/custom-tasks/pre-tasks.yml @@ -0,0 +1,22 @@ +--- + +- name: execute pre tasks + ansible.builtin.command: | + {{ item }} + changed_when: false + register: _pre_task_output + loop: "{{ container_pre_tasks }}" + tags: + - container_exec_pre_tasks + +- name: result + ansible.builtin.debug: + msg: + - "{{ item.stdout }}" + loop: "{{ _pre_task_output.results }}" + loop_control: + label: "{{ item.item }}" + tags: + - container_exec_pre_tasks + +... diff --git a/roles/container/tasks/download.yml b/roles/container/tasks/download.yml new file mode 100644 index 0000000..a57bb9e --- /dev/null +++ b/roles/container/tasks/download.yml @@ -0,0 +1,91 @@ +--- + +- name: "pull container from container registry" + community.general.docker_image: + name: "{{ item }}" + source: pull + force_tag: true + force_source: true + register: _container_images_information_pulled + ignore_errors: false + loop: "{{ container_images_needed }}" + loop_control: + label: "{{ item }}" + tags: + - container_pull + +- name: update needed | true # noqa no-handler + ansible.builtin.set_fact: + container_update_needed: true + container_changed: "{{ _container_images_information_pulled | changed }}" + when: + - _container_images_information_pulled.changed + tags: + - container_pull + +- name: all images are up to date + ansible.builtin.debug: + msg: + - all images are up to date ... + when: + - not container_update_needed + tags: + - container_pull + +- name: container restart needed + ansible.builtin.debug: + msg: + - container restart needed ... + - "changed container(s) {{ container_changed | list }}" + when: + - container_update_needed + tags: + - container_pull + +- name: update container for recreate running docker instance + ansible.builtin.set_fact: + container: "{{ container | update(container_changed) }}" + when: + - container_update_needed + tags: + - container_pull + +# READ local_facts +# read a present update_container from ansible facts +# +- name: set local fact for container + ansible.builtin.set_fact: + local_container: "{{ ansible_local.update_container.update_needed }}" + when: + - ansible_local.update_container is defined + - ansible_local.update_container.update_needed is defined + - ansible_local.update_container.update_needed | length != 0 + tags: + - container_pull + +- name: save changed containers + when: + - container_update_needed + - container | length != 0 + tags: + - container_pull + block: + - name: create custom fact file + bodsch.core.facts: + name: update_container + facts: + update_needed: "{{ container }}" + + - name: do facts module to get latest information + ansible.builtin.setup: + +- name: redefine container from local fact + ansible.builtin.set_fact: + container: "{{ local_container }}" + when: + - local_container is defined + - local_container | length != 0 + tags: + - container_pull + +... diff --git a/roles/container/tasks/get_information.yml b/roles/container/tasks/get_information.yml new file mode 100644 index 0000000..d027cd9 --- /dev/null +++ b/roles/container/tasks/get_information.yml @@ -0,0 +1,27 @@ +--- + +- name: "get container informations from host '{{ ansible_play_hosts[0] }}'" + community.docker.docker_container_info: + name: "{{ item.name }}" + register: container_info + no_log: true + with_items: "{{ container }}" + +- name: informations + when: + - container_info.results is defined + block: + - name: set local docker data + ansible.builtin.set_fact: + _local_container_data: "{{ container_info | container_hashes }}" + + - name: set diffed container informations + ansible.builtin.set_fact: + _diffed_container_data: "{{ _local_container_data | compare_dict(_pulled_container_data) }}" + +- name: debug + ansible.builtin.debug: + msg: "{{ item }}" + when: item is defined + loop: + - "{{ _diffed_container_data }}" diff --git a/roles/container/tasks/launch/launch_container.yml b/roles/container/tasks/launch/launch_container.yml new file mode 100644 index 0000000..7a6c5ad --- /dev/null +++ b/roles/container/tasks/launch/launch_container.yml @@ -0,0 +1,113 @@ +--- + +- name: "launching container" + community.docker.docker_container: + name: "{{ item.name }}" + hostname: "{{ item.hostname | default(item.name) }}" + image: "{{ item.image }}" + auto_remove: "{{ item.auto_remove | default(omit) }}" + blkio_weight: "{{ item.blkio_weight | default(omit) }}" + cap_drop: "{{ item.cap_drop | default(omit) }}" + capabilities: "{{ item.capabilities | default(omit) }}" + cgroup_parent: "{{ item.cgroup_parent | default(omit) }}" + command: "{{ item.command | default(omit) }}" + command_handling: "{{ item.command_handling | default(omit) }}" + comparisons: "{{ container_comparisons }}" + container_default_behavior: "{{ item.container_default_behavior | default(container_default_behavior) }}" + cpu_period: "{{ item.cpu_period | default(omit) }}" + cpu_quota: "{{ item.cpu_quota | default(omit) }}" + cpu_shares: "{{ item.cpu_shares | default(omit) }}" + cpus: "{{ item.cpus | default(omit) }}" + cpuset_cpus: "{{ item.cpuset_cpus | default(omit) | string }}" + cpuset_mems: "{{ item.cpuset_mems | default(omit) | string }}" + debug: "{{ item.debug | default(False) | bool }}" + default_host_ip: "{{ item.debug | default(omit) | string }}" + device_read_bps: "{{ item.device_read_bps | default(omit) }}" + device_read_iops: "{{ item.device_read_iops | default(omit) }}" + device_requests: "{{ item.device_requests | default(omit) }}" + device_write_bps: "{{ item.device_write_bps | default(omit) }}" + device_write_iops: "{{ item.device_write_iops | default(omit) }}" + devices: "{{ item.devices | default(omit) }}" + dns_opts: "{{ item.dns_opts | default(omit) }}" + dns_search_domains: "{{ item.dns_search_domains | default(omit) }}" + dns_servers: "{{ item.dns_servers | default(omit) }}" + docker_host: "{{ item.docker_host | default(omit) }}" + domainname: "{{ item.domainname | default(omit) }}" + entrypoint: "{{ item.entrypoint | default(omit) }}" + env: "{{ item.env | default(omit) }}" + env_file: "{{ container_env_directory }}/{{ item.name }}/container.env" + etc_hosts: "{{ item.etc_hosts | default(omit) }}" + exposed_ports: "{{ item.exposed_ports | default(omit) }}" + force_kill: "{{ item.force_kill | default(omit) }}" + groups: "{{ item.groups | default(omit) }}" + healthcheck: "{{ item.healthcheck | default(omit) }}" + kernel_memory: "{{ item.kernel_memory | default(omit) }}" + labels: "{{ item.labels | default({}) }}" + links: "{{ item.links | default(omit) }}" + log_driver: "{{ item.log_driver | default('json-file') }}" + log_options: "{{ item.log_options | default({}) }}" + memory: "{{ item.memory | default(omit) }}" + memory_reservation: "{{ item.memory_reservation | default(omit) }}" + memory_swap: "{{ item.memory_swap | default(omit) }}" + memory_swappiness: "{{ item.memory_swappiness | default(omit) }}" + mounts: "{{ item.mounts | default([]) | remove_source_handling }}" + network_mode: "{{ item.network_mode | default('bridge') }}" + networks: "{{ item.networks | default(omit) }}" + networks_cli_compatible: "{{ item.networks_cli_compatible | default(omit) }}" + oom_killer: "{{ item.oom_killer | default(omit) }}" + oom_score_adj: "{{ item.oom_score_adj | default(omit) }}" + output_logs: "{{ item.output_logs | default(omit) }}" # Only effective when log_driver is set to json-file, journald, or local. + privileged: "{{ item.privileged | default(omit) }}" + publish_all_ports: "{{ item.publish_all_ports | default(omit) }}" + published_ports: "{{ item.published_ports | default(omit) }}" + read_only: "{{ item.read_only | default(omit) }}" + recreate: "{{ item.recreate | default(omit) }}" # <== RESTARTS container after EVERY ansible run + restart: "{{ item.restart | default(omit) }}" + restart_policy: "{{ item.restart_policy | default('on-failure') }}" + restart_retries: "{{ item.restart_retries | default(omit) }}" + security_opts: "{{ item.security_opts | default(omit) }}" + shm_size: "{{ item.shm_size | default(omit) }}" + state: "{{ item.state | default('started') }}" + storage_opts: "{{ item.storage_opts | default(omit) }}" + sysctls: "{{ item.sysctls | default(omit) }}" + timeout: "{{ item.timeout | default(omit) }}" + tmpfs: "{{ item.tmpfs | default(omit) }}" + ulimits: "{{ item.ulimits | default(omit) }}" + user: "{{ item.user | default(omit) }}" + volumes: "{{ item.volumes | default([]) | remove_custom_fields }}" + volumes_from: "{{ item.volumes_from | default([]) }}" + working_dir: "{{ item.working_dir | default(omit) }}" + register: create_container_instances + ignore_errors: true + loop: "{{ container_to_launch }}" + loop_control: + label: "{{ item.name.ljust(12) }} - hostname: {{ item.name }} - image: {{ item.image }}" + tags: + - container_launch + +- name: define container states + ansible.builtin.set_fact: + changed: "{{ create_container_instances | reporting('changed') }}" + failed: "{{ create_container_instances | reporting('failed') }}" + +- name: reporting changes + ansible.builtin.debug: + msg: "{{ changed }}" + when: + - container_reporting.changes + +- name: reporting failed + ansible.builtin.debug: + msg: "{{ failed }}" + when: + - container_reporting.failed + - not container_fail.error_at_launch | default('true') + +- name: Exit if the start of a container has failed + ansible.builtin.fail: + msg: "{{ failed }}" + when: + - failed | default([]) | count > 0 + - container_fail.error_at_launch | default('true') + +... diff --git a/roles/container/tasks/launch/launch_for_older_ansible.yml b/roles/container/tasks/launch/launch_for_older_ansible.yml new file mode 100644 index 0000000..47d826f --- /dev/null +++ b/roles/container/tasks/launch/launch_for_older_ansible.yml @@ -0,0 +1,82 @@ +--- + +- name: "launching container (ansible < 2.10)" + community.docker.docker_container: + name: "{{ item.name }}" + hostname: "{{ item.name }}" + image: "{{ item.image }}" + capabilities: "{{ item.capabilities | default(omit) }}" + command: "{{ item.command | default(omit) }}" + comparisons: + # '*': ignore + image: strict # don't restart containers with older versions of the image + env: strict # we want precisely this environment + labels: ignore + cpu_period: "{{ item.cpu_period | default(omit) }}" + cpu_quota: "{{ item.cpu_quota | default(omit) }}" + cpu_shares: "{{ item.cpu_shares | default(omit) }}" + cpus: "{{ item.cpus | default(omit) }}" + cpuset_cpus: "{{ item.cpuset_cpus | default(omit) | string }}" + cpuset_mems: "{{ item.cpuset_mems | default(omit) | string }}" + devices: "{{ item.devices | default(omit) }}" + dns_servers: "{{ item.dns_servers | default(omit) }}" + entrypoint: "{{ item.entrypoint | default(omit) }}" + env_file: "{{ container_env_directory }}/{{ item.name }}/container.env" + etc_hosts: "{{ item.etc_hosts | default(omit) }}" + exposed_ports: "{{ item.exposed_ports | default(omit) }}" + force_kill: "{{ item.force_kill | default(omit) }}" + groups: "{{ item.groups | default(omit) }}" + healthcheck: "{{ item.healthcheck | default(omit) }}" + kernel_memory: "{{ item.kernel_memory | default(omit) }}" + labels: "{{ item.labels | default({}) }}" + links: "{{ item.links | default(omit) }}" + log_driver: "{{ item.log_driver | default('json-file') }}" + log_options: "{{ item.log_options | default({}) }}" + memory: "{{ item.memory | default(omit) }}" + memory_reservation: "{{ item.memory_reservation | default(omit) }}" + memory_swap: "{{ item.memory_swap | default(omit) }}" + memory_swappiness: "{{ item.memory_swappiness | default(omit) }}" + mounts: "{{ item.mounts | default([]) | remove_source_handling }}" + network_mode: "{{ item.network_mode | default('bridge') }}" + # ansible 2.10 + # container_default_behavior: "compatibility" + networks: "{{ item.networks | default(omit) }}" + networks_cli_compatible: true + output_logs: true + privileged: "{{ item.privileged | default(omit) }}" + published_ports: "{{ item.published_ports | default(omit) }}" + read_only: "{{ item.read_only | default(omit) }}" + recreate: "{{ item.recreate | default(omit) }}" # <== RESTARTS container after EVERY ansible run + restart: "{{ item.restart | default(omit) }}" + restart_policy: "{{ item.restart_policy | default('on-failure') }}" + restart_retries: "{{ item.restart_retries | default(omit) }}" + state: "{{ item.state | default('started') }}" + ulimits: "{{ item.ulimits | default(omit) }}" + volumes: "{{ item.volumes | default([]) | remove_custom_fields }}" + volumes_from: "{{ item.volumes_from | default([]) }}" + register: create_container_instances + ignore_errors: true + with_items: "{{ container }}" + loop_control: + label: "{{ item.name }} - hostname: {{ item.name }} - image: {{ item.image }}" + when: "ansible_version.full is version_compare('2.10', '<')" + tags: + - container_launch + +- name: reporting changes + ansible.builtin.debug: + msg: "{{ create_container_instances.results | selectattr('changed', 'equalto', True) | reporting }}" + when: + - container_reporting_changes + tags: + - container_launch + +- name: failed changes + ansible.builtin.debug: + msg: "{{ create_container_instances.results | selectattr('failed', 'equalto', True) | reporting }}" + when: + - container_reporting_changes + tags: + - container_launch + +... diff --git a/roles/container/tasks/launch/main.yml b/roles/container/tasks/launch/main.yml new file mode 100644 index 0000000..4bee3f6 --- /dev/null +++ b/roles/container/tasks/launch/main.yml @@ -0,0 +1,23 @@ +--- + +- name: launch containers + ansible.builtin.debug: + msg: "{{ container_to_launch | container_names }}" + +- name: launch container + ansible.builtin.include_tasks: launch_for_older_ansible.yml + when: "ansible_version.full is version_compare('2.10', '<')" + tags: + - container_launch + +- name: launch container + ansible.builtin.include_tasks: launch_container.yml + when: "ansible_version.full is version_compare('2.10', '>=')" + tags: + - container_launch + +- name: create custom fact file + bodsch.core.facts: + name: update_container + facts: + update_needed: "{{ container }}" diff --git a/roles/container/tasks/login.yml b/roles/container/tasks/login.yml new file mode 100644 index 0000000..101c0f3 --- /dev/null +++ b/roles/container/tasks/login.yml @@ -0,0 +1,22 @@ +--- + +- name: description for registry login + ansible.builtin.debug: + msg: "{{ registry.description if registry.description is defined else 'login as ' + registry.username }}" + +- name: log into private registry and force re-authorization + community.general.docker_login: + state: present + registry: "{{ registry.host | default(omit) }}" + username: "{{ registry.username }}" + password: "{{ registry.password }}" + reauthorize: "{{ registry.reauthorize | default('false') | bool }}" + debug: "{{ registry.debug | default('false') | bool }}" + tls: "{{ registry.tls | default('false') | bool }}" + ca_cert: "{{ registry.ca_cert | default(omit) }}" + client_cert: "{{ registry.client_cert | default(omit) }}" + client_key: "{{ registry.client_key | default(omit) }}" + validate_certs: "{{ registry.validate_certs | default('false') | bool }}" + register: registry_login + tags: + - container_registry_login diff --git a/roles/container/tasks/main.yml b/roles/container/tasks/main.yml new file mode 100644 index 0000000..85b8e13 --- /dev/null +++ b/roles/container/tasks/main.yml @@ -0,0 +1,83 @@ +--- + +- name: preparement + ansible.builtin.include_tasks: prepare.yml + tags: + - always + +- name: pre and post tasks + ansible.builtin.include_tasks: custom-tasks/main.yml + when: + - container_custom_tasks | default([]) | count > 0 + - (container_pre_tasks | default([]) | count > 0) or + (container_post_tasks | default([]) | count > 0) + tags: + - container_install_pre_or_post_tasks + +- name: login into container registry + ansible.builtin.include_tasks: login.yml + no_log: true + loop: + "{{ container_reg }}" + loop_control: + index_var: index + loop_var: registry + label: "" + when: + - container_reg is defined + - container_reg | bodsch.core.type == "list" + - container_reg | count > 0 + - registry.username is defined + - registry.password is defined + - registry.username | string | length > 0 + - registry.password | string | length > 0 + tags: + - container_registry_login + - container_pull + +- name: download new container + ansible.builtin.include_tasks: download.yml + when: + - container_images_needed | default([]) | count > 0 + tags: + - container_pull + +- name: network + ansible.builtin.include_tasks: network.yml + when: + - container_network | default([]) | count > 0 + - container_use_network + tags: + - container_network + +- name: execute pre-tasks + ansible.builtin.include_tasks: custom-tasks/pre-tasks.yml + when: + - container_pre_tasks | default([]) | count > 0 + tags: + - container_install_pre_or_post_tasks + - container_exec_pre_tasks + +- name: launch container + ansible.builtin.include_tasks: launch/main.yml + when: + - container_to_launch | default([]) | count > 0 + tags: + - container_launch + +- name: execute post-tasks + ansible.builtin.include_tasks: custom-tasks/post-tasks.yml + when: + - container_post_tasks | default([]) | count > 0 + tags: + - container_install_pre_or_post_tasks + - container_exec_post_tasks + +- name: remove update fact + ansible.builtin.file: + state: absent + path: /etc/ansible/facts.d/update_container.fact + when: + - container_clean_update_fact + +... diff --git a/roles/container/tasks/network.yml b/roles/container/tasks/network.yml new file mode 100644 index 0000000..cb21db1 --- /dev/null +++ b/roles/container/tasks/network.yml @@ -0,0 +1,19 @@ +--- + +- name: create docker network + community.general.docker_network: + name: "{{ item.name }}" + state: "{{ item.state | default('present') }}" + enable_ipv6: false + ipam_config: + - subnet: "{{ item.subnet }}" + gateway: "{{ item.gateway }}" + iprange: "{{ item.iprange }}" + loop: "{{ container_network }}" + loop_control: + label: "{{ item.name.ljust(12) }}: {{ item.iprange }} - state: {{ item.state | default('present') }}" + when: container_use_network + tags: + - container_network + +... diff --git a/roles/container/tasks/prepare.yml b/roles/container/tasks/prepare.yml new file mode 100644 index 0000000..f2b8ad7 --- /dev/null +++ b/roles/container/tasks/prepare.yml @@ -0,0 +1,164 @@ +--- + +- name: "include OS specific configuration ({{ ansible_distribution }} ({{ ansible_os_family }}) {{ ansible_distribution_major_version }})" + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_distribution | lower }}-{{ ansible_service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_distribution | lower }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_os_family | lower }}.yml" + - default.yaml + skip: true + +- name: python support + when: + - container_python_packages is defined + - container_python_packages | count > 0 + block: + - name: create pip requirements file + bodsch.core.pip_requirements: + name: container + requirements: "{{ container_python_packages }}" + register: pip_requirements + + - name: fail if pip not installed + ansible.builtin.fail: + msg: python pip is not installed + when: + - not pip_requirements.pip.present + + - name: install container python packages # noqa no-handler + ansible.builtin.pip: + state: present + requirements: "{{ pip_requirements.requirements_file }}" + extra_args: "{{ container_python_extra_args | default([]) | bodsch.core.python_extra_args(python_version=ansible_python.version) | default(omit) }}" + when: + - pip_requirements.requirements_file is defined + - pip_requirements.changed + + - name: do facts module to get latest information + ansible.builtin.setup: + +- name: install dependency + ansible.builtin.package: + name: "{{ container_packages }}" + state: present + +- name: filter container by {{ container_filter.by }} + ansible.builtin.set_fact: + container: "{{ container | bodsch.docker.container_filter_by(container_filter.by, container_filter.names) }}" + when: + - container_filter is defined + - container_filter.by is defined + - container_filter.by in ["name", "hostname", "image"] + - container_filter.names is defined + - container_filter.names | count > 0 + +- name: define container registry fact + ansible.builtin.set_fact: + container_reg: "{{ container_registry | bodsch.docker.combine_registries(container_registry_default) }}" + +- name: organise some information about the containers + ansible.builtin.set_fact: + container_information: "{{ container | bodsch.docker.container_filter(['present', 'only_present']) }}" + when: + - container | default([]) | count > 0 + +- name: define important facts about containers + ansible.builtin.set_fact: + container_names: "{{ container_information.names }}" + container_to_launch: "{{ container_information.launch }}" + container_mounts: "{{ container_information.mounts }}" + container_volumes: "{{ container_information.volumes }}" + container_environnments: "{{ container_information.environnments }}" + container_images_needed: "{{ container_information.images }}" + when: + - container_information | default({}) + +- name: validate container mountpoints + when: + - container_mounts | count > 0 + block: + - name: validate container mountpoints + ansible.builtin.set_fact: + container_validate_mounts: "{{ container | bodsch.docker.validate_mountpoints }}" + # no_log: true + + - name: fail + when: + - container_validate_mounts | count > 0 + block: + - name: broken mountpoint definitions + ansible.builtin.debug: + msg: "{{ container_validate_mounts }}" + + - name: fail with broken mountpoint definitions + ansible.builtin.fail: + msg: "your mounts definition is not valid!\n + The following parameters are required:\n + - source\n + - target\n + - type\n + 'type' may only have one of the following values: 'bind','tmpfs' or 'volume'\n + read: https://docs.ansible.com/ansible/latest/collections/community/docker/docker_container_module.html#parameter-mounts" + +- name: create base directory to persist docker environnments + become: true + ansible.builtin.file: + name: "{{ container_env_directory }}" + state: directory + mode: 0755 + +- name: create application directory to persist docker environnments + become: true + when: + - container_names | default([]) | count > 0 + container_directories: + base_directory: "{{ container_env_directory }}" + container: "{{ container_names }}" + owner: '1000' + group: '1000' + mode: "0775" + register: _created_directories + notify: + - created application directories + +- name: persist application docker environnments or properties + become: true + when: + - container_environnments | default([]) | count > 0 + container_environments: + base_directory: "{{ container_env_directory }}" + container: "{{ container_environnments }}" + diff: false + register: _container_data + notify: + - container restart necessary + - update container for recreate running docker instance + - created environnments or properties + +- name: create container volumes and mountpoints + become: true + when: + - container_mounts | default([]) | count > 0 or + container_volumes | default([]) | count > 0 + container_mounts: + data: "{{ container }}" + volumes: true + mounts: true + register: _created_directories + notify: + - created container volumes and mountpoints + +- name: flush handlers + ansible.builtin.meta: flush_handlers + +... diff --git a/roles/container/templates/container.env.j2 b/roles/container/templates/container.env.j2 new file mode 100644 index 0000000..2eafdb7 --- /dev/null +++ b/roles/container/templates/container.env.j2 @@ -0,0 +1,7 @@ +# {{ ansible_managed }} +# +{% if item.environments is defined %} +{% for k in item.environments | sort %} +{{ k }}={{ item.environments[k] }} +{% endfor %} +{% endif %} diff --git a/roles/container/templates/coremedia_importer.sh.j2 b/roles/container/templates/coremedia_importer.sh.j2 new file mode 100644 index 0000000..6d260bc --- /dev/null +++ b/roles/container/templates/coremedia_importer.sh.j2 @@ -0,0 +1,108 @@ +#!/bin/bash + +ENV_DIRECTORY="{{ container_env_directory }}" + +if [ ! -e "${ENV_DIRECTORY}/management-tools/import.rc" ] +then + echo "config '${ENV_DIRECTORY}/management-tools/import.rc' not found!" + exit 1 +fi + +. "${ENV_DIRECTORY}/management-tools/import.rc" + +extract_content_archive() { + + if [ -e ${CONTENT_ARCHIVE} ] + then + pushd ${ENV_DIRECTORY}/management-tools/import > /dev/null + + [ -d users ] && rm -rf users + + unzip -u -o -qq $(basename ${CONTENT_ARCHIVE_URL}) + + for file in ${USER_BLACKLIST} + do + [ -e users/${file} ] && rm -f users/${file} + done + fi +} + +# ------------------------------------------------------------------------------------------------- + +run_importer() { + + ${DOCKER} run \ + --rm \ + ${DOCKER_VOLUMES} \ + ${DOCKER_LINKS} \ + ${DOCKER_NETWORK} \ + ${DOCKER_ENV} \ + ${DOCKER_ADD_HOSTS} \ + ${DOCKER_CONTAINER} \ + patches/patch \ + ${TASKS} +} + +# ------------------------------------------------------------------------------------------------- + +TASKS= + +while [[ $# -gt 0 ]] +do + key="$1" + case "${key}" in + -u|--user) + TASKS="${TASKS} patches/import-users" + shift # past argument + ;; + -t|--themes) + TASKS="${TASKS} patches/import-themes" + shift + ;; + -c|--content) + TASKS="${TASKS} patches/import-content" + shift + ;; + -p|--publish) + TASKS="${TASKS} patches/publish-content" + shift + ;; + *) # unknown option + POSITIONAL+=("$1") # save it in an array for later + shift # past argument + ;; + esac +done +set -- "${POSITIONAL[@]}" # restore positional parameters + +# ------------------------------------------------------------------------------------------------- + +run() { +# echo "${TASKS}" + extract_content_archive + run_importer "${TASKS}" +} + +run + +# ------------------------------------------------------------------------------------------------- + +# ${DOCKER} run \ +# --rm \ +# --add-host rls.cm.local:192.168.124.35 \ +# --entrypoint /coremedia/tools/bin/cm \ +# harbor.cm.local/coremedia/management-tools:2007.1 \ +# runlevel -url http://rls.cm.local:42080/ior --user admin --password admin +# +# docker run \ +# -ti \ +# --volume /etc/coremedia/management-tools/patches:/coremedia/patches:ro \ +# --volume /etc/coremedia/management-tools/import:/coremedia/import/ \ +# --env-file /etc/coremedia/management-tools/container.env \ +# --network coremedia \ +# --link content-management-server \ +# --link workflow-server \ +# --add-host rls.cm.local:192.168.124.35 \ +# --entrypoint "" \ +# harbor.cm.local/coremedia/management-tools:2007.1 \ +# bash diff --git a/roles/container/templates/import-users.sh.j2 b/roles/container/templates/import-users.sh.j2 new file mode 100644 index 0000000..fbaec8a --- /dev/null +++ b/roles/container/templates/import-users.sh.j2 @@ -0,0 +1,67 @@ +#!/bin/bash + +DOCKER=$(command -v docker) + +# ------------------------------------------------------------------------------------------------- +# environments + +ENV_DIRECTORY="{{ container_env_directory }}" + +SKIP_CONTENT="false" +FORCE_REIMPORT_CONTENT="true" +FORCE_REIMPORT_THEMES="false" + +BLOB_STORAGE_URL="/coremedia/import/content-blobs.zip" +CONTENT_ARCHIVE_URL="/coremedia/import/content-users.zip" +THEMES_ARCHIVE_URL="/coremedia/import/frontend.zip" + +#CMS_IOR_URL="{{ container_cms_ior }}" +#MLS_IOR_URL="{{ container_mls_ior }}" +#WFS_IOR_URL="{{ container_wfs_ior }}" + +DOCKER_VOL="--volume {{ volumes | join(' --volume ' ) }}" +DOCKER_LINKS="--link {{ links | join(' --link ' ) }}" + +# {{ networks }} +{% set values = [] %} +{% for k in networks -%} +{{ values.append( k.name ) or "" }} +{% endfor -%} + +# {{ values }} + +DOCKER_NETWORK="--network {{ values | join(' --network ' ) }}" + +# {{ etc_hosts }} + +DOCKER_ADD_HOSTS="--add-host {{ etc_hosts | join(' --add-host ' ) }}" + +DOCKER_ENV="--env CMS_IOR_URL="{{ container_cms_ior }}" \ + --env MLS_IOR_URL="{{ container_mls_ior }}" \ + --env WFS_IOR_URL="{{ container_wfs_ior }}" \ + --env CONTENT_ARCHIVE_URL="/coremedia/import/content-users.zip"" + +# if [ ${ENV_DIRECTORY} + + +# ------------------------------------------------------------------------------------------------- + +# ${DOCKER} run \ +# --rm \ +# --add-host rls.cm.local:192.168.124.35 \ +# --entrypoint /coremedia/tools/bin/cm \ +# harbor.cm.local/coremedia/management-tools:2007.1 \ +# runlevel -url http://rls.cm.local:42080/ior --user admin --password admin +# +# docker run \ +# -ti \ +# --volume /etc/coremedia/management-tools/patches:/coremedia/patches:ro \ +# --volume /etc/coremedia/management-tools/import:/coremedia/import/ \ +# --env-file /etc/coremedia/management-tools/container.env \ +# --network coremedia \ +# --link content-management-server \ +# --link workflow-server \ +# --add-host rls.cm.local:192.168.124.35 \ +# --entrypoint "" \ +# harbor.cm.local/coremedia/management-tools:2007.1 \ +# bash diff --git a/roles/container/templates/management-tools.rc.j2 b/roles/container/templates/management-tools.rc.j2 new file mode 100644 index 0000000..5227dd1 --- /dev/null +++ b/roles/container/templates/management-tools.rc.j2 @@ -0,0 +1,57 @@ +# + +DOCKER=$(command -v docker) + +CMS_IOR_URL="{{ container_cms_ior }}" +MLS_IOR_URL="{{ container_mls_ior }}" +WFS_IOR_URL="{{ container_wfs_ior }}" + +USER_BLACKLIST="{{ user_blacklist | join(' ') }}" + +SKIP_CONTENT="false" +FORCE_REIMPORT_CONTENT="true" +FORCE_REIMPORT_THEMES="true" + +BLOB_STORAGE_URL= # "/coremedia/import/content-blobs.zip" +CONTENT_ARCHIVE_URL="/coremedia/import/content-users.zip" +THEMES_ARCHIVE_URL="/coremedia/import/frontend.zip" + +DOCKER_ADD_HOSTS= +DOCKER_NETWORK= +DOCKER_VOLUMES="--volume {{ volumes | default([]) | join(' --volume ') }}" +DOCKER_LINKS="--link {{ links | default([]) | join(' --link ' ) }}" + +{% if networks is defined %} +{%- set values = [] -%} +{%- for k in networks -%} +{{ values.append( k.name ) or "" }} +{% endfor -%} +DOCKER_NETWORK="--network {{ values | join(' --network ' ) }}" +{% endif %} + +{% if etc_hosts is defined %} +{%- set values = [] -%} +{%- for k,v in etc_hosts.items() -%} +{{ values.append( k + ':' + v ) or "" }} +{% endfor -%} +DOCKER_ADD_HOSTS="--add-host {{ values | join(' --add-host ' ) }}" +{% endif %} + +DOCKER_ENV="--env CMS_IOR_URL="{{ container_cms_ior }}" \ + --env MLS_IOR_URL="{{ container_mls_ior }}" \ + --env WFS_IOR_URL="{{ container_wfs_ior }}" \ + --env CONTENT_ARCHIVE_URL="${CONTENT_ARCHIVE_URL}" \ + --env THEMES_ARCHIVE_URL="${THEMES_ARCHIVE_URL}" \ + --env BLOB_STORAGE_URL="${BLOB_STORAGE_URL}" \ + --env FORCE_REIMPORT_CONTENT="${FORCE_REIMPORT_CONTENT}" \ + --env FORCE_REIMPORT_THEMES="${FORCE_REIMPORT_THEMES}" +" +DOCKER_CONTAINER={{ container }} + +# echo "${DOCKER_VOLUMES}" +# echo "${DOCKER_LINKS}" +# echo "${DOCKER_NETWORK}" +# echo "${DOCKER_ADD_HOSTS}" +# echo "${DOCKER_ENV}" + +CONTENT_ARCHIVE="${ENV_DIRECTORY}/management-tools/import/$(basename ${CONTENT_ARCHIVE_URL})" diff --git a/roles/container/templates/prometheus.yml.j2 b/roles/container/templates/prometheus.yml.j2 new file mode 100644 index 0000000..356011c --- /dev/null +++ b/roles/container/templates/prometheus.yml.j2 @@ -0,0 +1,87 @@ + +global: + scrape_interval: 30s # By default, scrape targets every 15 seconds. + evaluation_interval: 30s + + # Attach these labels to any time series or alerts when communicating with + # external systems (federation, remote storage, Alertmanager). + external_labels: + monitor: 'development' + +{% set valid = [ + 'solr', + 'content-management-server', + 'content-feeder', + 'caefeeder-preview', + 'workflow-server', + 'user-changes', + 'elastic-worker', + 'studio-server', + 'cae-preview', + 'cae-preview-01', + 'cae-preview-02', + 'headless-server-preview', + 'master-live-server', + 'caefeeder-live', + 'replication-live-server', + 'cae-live', + 'cae-live-01', + 'cae-live-02', + 'cae-live-03', + 'cae-live-04', + 'cae-live-05', + 'cae-live-06', + 'headless-server-live', + 'headless-server-live-01', + 'headless-server-live-02' +] %} + +scrape_configs: + - job_name: prometheus + scrape_interval: 15s + static_configs: + - targets: ['localhost:9090'] + labels: + environment: monitoring + zone: monitoring + +{% if 'node_exporter' in container %} + # node exporter + - job_name: 'nodeexporter' + scrape_interval: 15s + static_configs: + - targets: ["{{ ansible_fqdn }}:9100"] + labels: + environment: monitoring + zone: monitoring + collector_type: node_exporter +{% endif %} + +{% if 'cadvisor' in container %} + # cadvisor + - job_name: cadvisor + scrape_interval: 15s + static_configs: + - targets: ["{{ ansible_fqdn }}:8081"] + labels: + environment: monitoring + zone: monitoring + collector_type: cadvisor +{% endif %} + +{% if container %} + +{% for item in container -%} +{% if item['name'] in valid %} + # {{ item['name'] }} + - job_name: {{ item['name'] }} + scrape_interval: 15s + static_configs: + - targets: [ "{{ item['name'] }}:8199" ] + labels: + environment: coremedia + hostname: {{ ansible_fqdn }} + collector_type: coremedia +{% endif %} +{% endfor %} +{% endif %} diff --git a/roles/container/templates/requirements.txt.j2 b/roles/container/templates/requirements.txt.j2 new file mode 100644 index 0000000..0e5ede9 --- /dev/null +++ b/roles/container/templates/requirements.txt.j2 @@ -0,0 +1,4 @@ +{% for k in container_python_packages %} +{{ k.name }}{% if k.version is defined %}>={{ k.version }}{% endif %} + +{% endfor %} diff --git a/roles/container/templates/resetcaefeeder.properties.j2 b/roles/container/templates/resetcaefeeder.properties.j2 new file mode 100644 index 0000000..ddf41eb --- /dev/null +++ b/roles/container/templates/resetcaefeeder.properties.j2 @@ -0,0 +1,11 @@ +# {{ ansible_managed }} +# +{% set caefeeder = (container | selectattr('name', 'search', 'caefeeder') | first ) | default({}) %} +{% set env = caefeeder.environments | default({}) %} +# {{ caefeeder.name }} +{# # {{ caefeeder | to_nice_json(indent=2) }} #} + +jdbc.driver={{ env.JDBC_DRIVER | default('com.mysql.cj.jdbc.Driver') }} +jdbc.url={{ env.JDBC_URL | default('jdbc:mysql://:3306/cm_mcaefeeder') }} +jdbc.user={{ env.JDBC_USER | default('cm_mcaefeeder') }} +jdbc.password={{ env.JDBC_PASSWORD | default('cm_mcaefeeder') }} diff --git a/roles/container/test-requirements.txt b/roles/container/test-requirements.txt new file mode 100644 index 0000000..267ec92 --- /dev/null +++ b/roles/container/test-requirements.txt @@ -0,0 +1,12 @@ +ansible-lint +docker +dnspython +flake8 +molecule>=5.0.1 +molecule-plugins[docker] +netaddr +pytest +pytest-testinfra +tox +tox-gh-actions +yamllint diff --git a/roles/container/tox.ini b/roles/container/tox.ini new file mode 100644 index 0000000..a485358 --- /dev/null +++ b/roles/container/tox.ini @@ -0,0 +1,35 @@ +[tox] +minversion = 3.25 +toxworkdir = /tmp/.tox/ + +envlist = ansible_{2.9,2.10,3.4,4.10,5.1,5.2,6.1} + +skipsdist = true + +[testenv] +passenv = * + +# allowlist_externals = +# /usr/bin/find +# /bin/sh +# rm + +deps = + -r test-requirements.txt + ansible_4.10: ansible>=4.10,<4.11 + ansible_5.1: ansible>=5.1,<5.2 + ansible_5.2: ansible>=5.2,<5.3 + ansible_5.10: ansible>=5.10,<5.11 + ansible_6.1: ansible>=6.1,<6.2 + ansible_6.7: ansible>=6.7,<6.8 + ansible_7.0: ansible>=7.0,<7.1 + ansible_7.5: ansible>=7.5,<7.6 + ansible_8.0: ansible>=8.0,<8.1 + ansible_8.5: ansible>=8.5,<8.6 + +#commands_pre = +# /usr/bin/find {toxinidir} -type f -not -path '{toxworkdir}/*' -path '*/__pycache__/*' -name '*.py[c|o]' -delete +# /bin/sh -c '/usr/bin/find {homedir}/.cache -type d -path "*/molecule_*" -exec rm -rfv \{\} +;' + +commands = + {posargs:molecule test --all --destroy always} diff --git a/roles/container/vars/alpine.yml b/roles/container/vars/alpine.yml new file mode 100644 index 0000000..3243c01 --- /dev/null +++ b/roles/container/vars/alpine.yml @@ -0,0 +1,8 @@ +--- + +container_packages: + - openssl + - docker-py + - jq + +... diff --git a/roles/container/vars/archlinux.yml b/roles/container/vars/archlinux.yml new file mode 100644 index 0000000..dde4e52 --- /dev/null +++ b/roles/container/vars/archlinux.yml @@ -0,0 +1,7 @@ +--- + +container_packages: + - openssl + - jq + +... diff --git a/roles/container/vars/debian-10.yml b/roles/container/vars/debian-10.yml new file mode 100644 index 0000000..06f82c9 --- /dev/null +++ b/roles/container/vars/debian-10.yml @@ -0,0 +1,13 @@ +--- + +container_packages: + - openssl + - python3-docker + - python3-pip + - python3-ruamel.yaml + - python3-jinja2 + - jq + +container_python_packages: [] + +... diff --git a/roles/container/vars/debian.yml b/roles/container/vars/debian.yml new file mode 100644 index 0000000..fd0c117 --- /dev/null +++ b/roles/container/vars/debian.yml @@ -0,0 +1,10 @@ +--- + +container_packages: + - openssl + - python3-docker + - python3-pip + - python3-ruamel.yaml + - jq + +... diff --git a/roles/container/vars/main.yml b/roles/container/vars/main.yml new file mode 100644 index 0000000..7cfb076 --- /dev/null +++ b/roles/container/vars/main.yml @@ -0,0 +1,39 @@ +--- + +python_virtualenv_path: "${HOME}/.venv" + +container_packages: [] + +container_python_extra_args: [] +container_python_packages: + - name: ruamel.yaml + version: 0.17 + - name: jinja2 + version: 3.0.3 + +container_update_needed: false + +container_registry_default: + - username: '' + password: '' + host: + reauthorize: + debug: + tls: + ca_cert: + client_cert: + client_key: + validate_certs: + +_container_images_information_pulled: {} + +container_changed: [] + +container_names: [] +container_to_launch: [] +container_mounts: [] +container_volumes: [] +container_environnments: [] +container_images_needed: [] + +... diff --git a/roles/docker/.ansible-lint b/roles/docker/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/roles/docker/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/roles/docker/.editorconfig b/roles/docker/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/roles/docker/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/roles/docker/.flake8 b/roles/docker/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/roles/docker/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/roles/docker/.github/linters/.markdown-lint.yml b/roles/docker/.github/linters/.markdown-lint.yml new file mode 100644 index 0000000..10cb7c3 --- /dev/null +++ b/roles/docker/.github/linters/.markdown-lint.yml @@ -0,0 +1,36 @@ +--- +########################### +########################### +## Markdown Linter rules ## +########################### +########################### + +# Linter rules doc: +# - https://github.com/DavidAnson/markdownlint +# +# Note: +# To comment out a single error: +# +# any violations you want +# +# + +############### +# Rules by id # +############### +MD001: false +MD004: false # Unordered list style +MD007: + indent: 2 # Unordered list indentation +MD013: + line_length: 400 # Line length 80 is far to short +MD026: + punctuation: ".,;:!。,;:" # List of not allowed +MD029: false # Ordered list item prefix +MD033: false # Allow inline HTML +MD036: false # Emphasis used instead of a heading + +################# +# Rules by tags # +################# +blank_lines: false # Error on blank lines diff --git a/roles/docker/.github/linters/.yaml-lint.yml b/roles/docker/.github/linters/.yaml-lint.yml new file mode 100644 index 0000000..867ad39 --- /dev/null +++ b/roles/docker/.github/linters/.yaml-lint.yml @@ -0,0 +1,24 @@ +--- +extends: default +rules: + line-length: + max: 195 + level: warning + braces: + min-spaces-inside: 0 + max-spaces-inside: 1 + brackets: + min-spaces-inside: 0 + max-spaces-inside: 1 + indentation: + spaces: 2 + comments: disable + comments-indentation: disable + truthy: disable + +ignore: | + .github/ + .molecule/ + .tox + molecule/ + tests/ diff --git a/roles/docker/.github/workflows/clean-workflows.yml b/roles/docker/.github/workflows/clean-workflows.yml new file mode 100644 index 0000000..3b03995 --- /dev/null +++ b/roles/docker/.github/workflows/clean-workflows.yml @@ -0,0 +1,31 @@ +--- + +name: delete workflow runs + +on: + schedule: + - cron: "10 4 * * 0" + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + type: choice + options: + - info + - warning + - debug + +jobs: + delete-workflow-runs: + runs-on: ubuntu-latest + name: delete old workflow runs + steps: + - name: Delete workflow runs + uses: MajorScruffy/delete-old-workflow-runs@v0.3.0 + with: + repository: bodsch/ansible-role-docker + older-than-seconds: 2592000 # remove all workflow runs older than 30 day + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/roles/docker/.github/workflows/dockerd-with-plugin.yml b/roles/docker/.github/workflows/dockerd-with-plugin.yml new file mode 100644 index 0000000..d9de17f --- /dev/null +++ b/roles/docker/.github/workflows/dockerd-with-plugin.yml @@ -0,0 +1,106 @@ +--- + +name: dockerd with installed plugin + +on: + workflow_dispatch: + workflow_run: + workflows: + - "CI" + types: + - completed + +defaults: + run: + working-directory: 'ansible-role-docker' + +jobs: + arch: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - archlinux:latest + python_version: + - "3.11.3" + ansible-version: + - "6.7" + scenario: + - dockerd-with-plugin + + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-role-docker' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} + + deb: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - debian:12 + python_version: + - "3.11.3" + ansible-version: + - "6.7" + scenario: + - dockerd-with-plugin + + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-role-docker' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} diff --git a/roles/docker/.github/workflows/dockerd-with-tls.yml b/roles/docker/.github/workflows/dockerd-with-tls.yml new file mode 100644 index 0000000..260f104 --- /dev/null +++ b/roles/docker/.github/workflows/dockerd-with-tls.yml @@ -0,0 +1,106 @@ +--- + +name: dockerd with tls + +on: + workflow_dispatch: + workflow_run: + workflows: + - "CI" + types: + - completed + +defaults: + run: + working-directory: 'ansible-role-docker' + +jobs: + arch: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - archlinux:latest + python_version: + - "3.11.3" + ansible-version: + - "6.7" + scenario: + - dockerd-with-tls + + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-role-docker' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} + + deb: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - debian:12 + python_version: + - "3.11.3" + ansible-version: + - "6.7" + scenario: + - dockerd-with-tls + + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-role-docker' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} diff --git a/roles/docker/.github/workflows/galaxy.yml b/roles/docker/.github/workflows/galaxy.yml new file mode 100644 index 0000000..983c760 --- /dev/null +++ b/roles/docker/.github/workflows/galaxy.yml @@ -0,0 +1,30 @@ +--- + +name: push to ansible galaxy + +on: + workflow_dispatch: + workflow_run: + workflows: + - "CI" + branches: + - main + types: + - completed + +jobs: + galaxy: + name: galaxy + runs-on: ubuntu-20.04 + if: ${{ github.event.workflow_run.conclusion == 'success' }} + steps: + - name: Check out the codebase + uses: actions/checkout@v3 + with: + path: 'ansible-role-docker' + + - name: galaxy + uses: robertdebock/galaxy-action@1.2.1 + with: + galaxy_api_key: ${{ secrets.galaxy_api_key }} + git_branch: main diff --git a/roles/docker/.github/workflows/linter.yml b/roles/docker/.github/workflows/linter.yml new file mode 100644 index 0000000..952d376 --- /dev/null +++ b/roles/docker/.github/workflows/linter.yml @@ -0,0 +1,56 @@ +--- + +name: code linter + +on: + schedule: + - cron: "10 3 * * 0" + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + type: choice + options: + - info + - warning + - debug + push: + branches: + - 'main' + - 'feature/**' + - '!doc/**' + paths: + - "!Makefile" + - "!README.md" + - "tasks/**" + pull_request: + branches: + - 'main' + - 'feature/**' + - '!doc/**' + paths: + - "!Makefile" + - "!README.md" + - "tasks/**" + +jobs: + lint: + name: linting + runs-on: ubuntu-latest + steps: + - name: 🛎 Checkout + uses: actions/checkout@v3 + + - name: lint + uses: docker://ghcr.io/github/super-linter:slim-v4 + env: + DEFAULT_BRANCH: main + GITHUB_TOKEN: ${{ secrets.GH_REGISTRY_TOKEN }} + VALIDATE_ALL_CODEBASE: true + VALIDATE_ANSIBLE: true + # VALIDATE_MARKDOWN: true + VALIDATE_YAML: true + +... diff --git a/roles/docker/.github/workflows/main.yml b/roles/docker/.github/workflows/main.yml new file mode 100644 index 0000000..5b63f67 --- /dev/null +++ b/roles/docker/.github/workflows/main.yml @@ -0,0 +1,159 @@ +--- + +name: CI + +on: + workflow_run: + workflows: + - "code linter" + types: + - completed + +defaults: + run: + working-directory: 'ansible-role-docker' + +jobs: + arch: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - archlinux:latest + - artixlinux:latest + python_version: + - "3.10.11" + - "3.11.3" + ansible-version: + - "6.7" + scenario: + - default + + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-role-docker' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} + + deb: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - debian:11 + - debian:12 + - ubuntu:20.04 + - ubuntu:22.04 + python_version: + - "3.10.11" + - "3.11.3" + ansible-version: + - "6.7" + scenario: + - default + + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-role-docker' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} + + # rpm: + # name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + # runs-on: ubuntu-20.04 + # if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + # strategy: + # fail-fast: false + # matrix: + # image: + # - oraclelinux:8 + # - rockylinux:8 + # - almalinux:8 + # - oraclelinux:9 + # - rockylinux:9 + # - almalinux:9 + # ansible-version: + # - '6.1' + # scenario: + # - default + # + # steps: + # - name: check out the codebase. + # uses: actions/checkout@v3 + # with: + # path: 'ansible-role-docker' + # ref: ${{ github.event.workflow_run.head_branch }} + # + # - name: 🐍 set up python + # uses: actions/setup-python@v4 + # with: + # python-version: '3.x' + # + # - name: install dependencies + # run: | + # python -m pip install --upgrade pip + # pip install -r test-requirements.txt + # + # - name: test with tox + # run: | + # make \ + # test \ + # -e TOX_SCENARIO="${{ matrix.scenario }}" \ + # -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + # -e DISTRIBUTION="${{ matrix.image }}" + # env: + # PY_COLORS: '1' + # ANSIBLE_FORCE_COLOR: '1' + # DISTRIBUTION: ${{ matrix.image }} diff --git a/roles/docker/.gitignore b/roles/docker/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/roles/docker/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/roles/docker/.pycodestyle b/roles/docker/.pycodestyle new file mode 100644 index 0000000..d1826b0 --- /dev/null +++ b/roles/docker/.pycodestyle @@ -0,0 +1,5 @@ +[pycodestyle] +count = False +ignore = E226,E302,E71,E501,W391 +max-line-length = 160 +statistics = True diff --git a/roles/docker/.yamllint b/roles/docker/.yamllint new file mode 100644 index 0000000..20fd7aa --- /dev/null +++ b/roles/docker/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + +ignore: | + molecule/ + .github diff --git a/roles/docker/LICENSE b/roles/docker/LICENSE new file mode 100644 index 0000000..4925f94 --- /dev/null +++ b/roles/docker/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 George Bolo + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/roles/docker/Makefile b/roles/docker/Makefile new file mode 100644 index 0000000..3abaf48 --- /dev/null +++ b/roles/docker/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_6.1 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/roles/docker/README.md b/roles/docker/README.md new file mode 100644 index 0000000..2d8ecc5 --- /dev/null +++ b/roles/docker/README.md @@ -0,0 +1,399 @@ + +# Ansible Role: `docker` + +This role will fully configure and install [dockerd](https://www.docker.com/). + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-role-docker/main.yml?branch=main)][ci] +[![GitHub issues](https://img.shields.io/github/issues/bodsch/ansible-role-docker)][issues] +[![GitHub release (latest by date)](https://img.shields.io/github/v/release/bodsch/ansible-role-docker)][releases] +[![Ansible Quality Score](https://img.shields.io/ansible/quality/50067?label=role%20quality)][quality] + +[ci]: https://github.com/bodsch/ansible-role-docker/actions +[issues]: https://github.com/bodsch/ansible-role-docker/issues?q=is%3Aopen+is%3Aissue +[releases]: https://github.com/bodsch/ansible-role-docker/releases +[quality]: https://galaxy.ansible.com/bodsch/docker + + +## Requirements & Dependencies + +Ansible Collections + +- [bodsch.core](https://github.com/bodsch/ansible-collection-core) + +```bash +ansible-galaxy collection install bodsch.core +``` +or +```bash +ansible-galaxy collection install --requirements-file collections.yml +``` + +- Connectivity to docker-ce package [repository](https://download.docker.com) + +### Operating systems + +Tested on + +* ArchLinux +* ArtixLinux +* Debian based + - Debian 10 / 11 + - Ubuntu 20.04 / 22.04 + +> **RedHat-based systems are no longer officially supported! May work, but does not have to.** + +## Role Variables + +The following variables can be used to customize the docker installation: + +```yaml +## choose centos docker repo channel enable status +docker_repo: + channel: + stable_enabled: true + test_enabled: false + nightly_enabled: false + +## state of package (present, absent, etc.) +docker_state: present + +## should docker daemon start on boot? +docker_service: + enable: true + ## name of docker service + name: docker + +## install docker-compose in version +docker_compose: {} +# as example: +# docker_compose: +# install: true +# version: 1.29.2 + +docker_users: [] + +docker_plugins: [] + +docker_client_config: [] + +docker_config: {} + +docker_config_diff: true + +docker_python_packages: [] +``` + +### Proxy related +should docker daemon use a proxy for outbound connections? + +```yaml +docker_proxy: + enabled: false + ## list of env variables we should set (comment out the ones you don't need) + env: + - "HTTP_PROXY=http://proxy.example.com:80/" + - "HTTP_PROXY=https://proxy.example.com:443/" + - "NO_PROXY=localhost,127.0.0.1,internalhub.example.com" +``` + +### docker client configuration + +Enable authentication for the Docker Registry. +Here it is possible to create a configuration for different users. +**The password stored here is base64 encoded and not encrypted!** +The creation of a corresponding string can be carried out as follows: + +```bash +echo "jenkins$robot:rubbel-die-katz-du-dummschwätzer1" | base64 +amVua2luczpydWJiZWwtZGllLWthdHotZHUtZHVtbXNjaHfDpHR6ZXIxCg== +``` + +```yaml +docker_client_config: + ## the location we should push client configuration + - location: "/root/.docker/config.json" + enabled: false + auths: + registry.gitfoo.tld: + auth: amVua2luczpydWJiZWwtZGllLWthdHotZHUtZHVtbXNjaHfDpHR6ZXIxCg== +``` + +Alternatively, you can also enter your `username` and `password`. +The Ansible module will make a valid Base64 encoded string out of it. + +```yaml +docker_client_config: + ## the location we should push client configuration + - location: "/var/tmp/foo/config.json" + enabled: false + auths: + "test.tld": + username: "FOO-was-sonst" + passwort: "ja-toll-schon-wieder-alles-scheisse!" +``` + +Since version 3.1.0 it is now also possible to configure the output format of `docker ps` or `docker image`. +Here the fed parameters have to be defined as a list: + +```yaml +docker_client_config: + ## the location we should push client configuration + - location: "/root/.docker/config.json" + enabled: false + auths: + registry.gitfoo.tld: + auth: amVua2luczpydWJiZWwtZGllLWthdHotZHUtZHVtbXNjaHfDpHR6ZXIxCg== + formats: + ps: + - ".ID" + - ".Names" + - ".Status" + - ".Labels" + - ".RunningFor" + - ".Ports" + images: + - ".ID" + - ".Size" + - ".Repository" + - ".Tag" + - ".CreatedAt" +``` + +### default dockerd configuration options + +[configuration reference](https://docs.docker.com/engine/reference/commandline/dockerd/#/linux-configuration-file) + +currently supported options: + +| options | type | default | description | +| :----- | :---- | :---- | :----- | +| `authorization_plugins` | `list` | `[]` | | +| `bip` | `string` | `-` | Specify network bridge IP | +| `bridge` | `string` | `-` | Attach containers to a network bridge | +| `data_root` | `string` | `/var/lib/docker` | Root directory of persistent Docker state | +| `debug` | `bool` | `false` | Enable debug mode | +| `default_gateway` | `string` | `-` | Container default gateway IPv4 address | +| `default_gateway_v6` | `string` | `-` | Container default gateway IPv6 address | +| `default_shm_size` | `string` | `-` | Default shm size for containers (default `64MiB`) | +| `default_ulimits` | `dict` | `{}` | Default ulimits for containers (default []) | +| `dns` | `list` | `[]` | DNS server to use | +| `dns_opts` | `list` | `[]` | DNS options to use | +| `dns_search` | `list` | `[]` | DNS search domains to use | +| `experimental` | `bool` | `false` | Enable experimental features | +| `fixed_cidr` | `string` | `-` | IPv4 subnet for fixed IPs | +| `fixed_cidr_v6` | `string` | `-` | IPv6 subnet for fixed IPs | +| `group` | `group` | `docker` | Group for the unix socket | +| `hosts` | `list` | `[]` | Daemon socket(s) to connect to | +| `insecure_registries` | `list` | `[]` | Enable insecure registry communication | +| `ip` | `string` | `0.0.0.0` | Default IP when binding container ports | +| `ip_forward` | `bool` | `true` | Enable net.ipv4.ip_forward (default true) | +| `ip_masq` | `bool` | `true` | Enable IP masquerading (default true) | +| `iptables` | `bool` | `true` | Enable addition of iptables rules (default true) | +| `ip6tables` | `bool` | `false` | Enable addition of ip6tables rules (default false) | +| `ipv6` | `bool` | `false` | Enable IPv6 networking | +| `labels` | `list` | `[]` | Set key=value labels to the daemon | +| `log_driver` | `string` | `json-file` | Default driver for container logs | +| `log_level` | `string` | `info` | Set the logging level (`debug`,`info`,`warn`,`error`,`fatal`) | +| `log_opts` | `dict` | `{}` | Default log driver options for containers | +| `max_concurrent_downloads` | `int` | `3` | Set the max concurrent downloads for each pull | +| `max_concurrent_uploads` | `int` | `5` | Set the max concurrent uploads for each push | +| `max_download_attempts` | `int` | `5` | Set the max download attempts for each pull | +| `metrics_addr` | `string` | `-` | Set default address and port to serve the metrics api on | +| `oom_score_adjust` | `int` | `-500` | Set the oom_score_adj for the daemon (default -500) | +| `pidfile` | `string` | `/var/run/docker.pid` | Path to use for daemon PID file (default "/var/run/docker.pid") | +| `raw_logs` | `bool` | `false` | Full timestamps without ANSI coloring | +| `registry_mirrors` | `list` | `[]` | Preferred Docker registry mirror | +| `seccomp_profile` | `string` | `-` | Path to seccomp profile | +| `selinux_enabled` | `bool` | `false` | Enable selinux support | +| `shutdown_timeout` | `int` | `15` | Set the default shutdown timeout | +| `storage_driver` | `string` | `overlay2` | [Storage driver](https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-storage-driver) to use (`aufs`, `devicemapper`, `btrfs`, `zfs`, `overlay`, `overlay2`, `fuse-overlayfs`) | +| `storage_opts` | `list` | `[]` | [Storage driver options](https://docs.docker.com/engine/reference/commandline/dockerd/#options-per-storage-driver) | +| `tls.verify` | `bool` | `false` | Use TLS and verify the remote | +| `tls.ca_cert` | `string` | `~/.docker/ca.pem` | Trust certs signed only by this CA (default "~/.docker/ca.pem") | +| `tls.cert` | `string` | `~/.docker/cert.pem` | Path to TLS certificate file (default "~/.docker/cert.pem") | +| `tls.key` | `string` | `~/.docker/key.pem` | Path to TLS key file (default "~/.docker/key.pem") | + + +#### Examples + +```yaml +docker_config: + log_driver: "" + log_opts: {} + # env: "os,customer" + # "max-file": "5" + # "max-size": "10m" + max_concurrent_downloads: 3 + max_concurrent_uploads: 5 + debug: false + log_level: "" + bridge: "" + bip: "172.16.0.1/24" + fixed_cidr: "172.16.0.0/24" + fixed_cidr_v6: "" + default_gateway: "" + default_gateway_v6: "" + selinux_enabled: false + experimental: false + ip: "0.0.0.0" + group: "{{ docker_group }}" + insecure_registries: [] +``` + +When creating the configuration, a diff to the original version can optionally be created and output. + +To do this, the variable `docker_config_diff` must be set to `true`. + + +There are more examples in the molecule tests: + +- [default](molecule/default/group_vars/all/vars.yml) +- [dockerd-with-plugin](molecule/dockerd-with-plugin/group_vars/all/vars.yml) +- [dockerd-with-tls](molecule/dockerd-with-tls/group_vars/all/vars.yml) +- [update-config](molecule/update-config/group_vars/all/vars.yml) + + +### docker_users options + +Adds an **existing user** to the `docker` group. + +Furthermore, it tries to set the access rights to the docker socker by means of `setfacl`. + +```yaml +docker_users: + - jenkins +``` + +### docker_plugins options + +Install and activate custom plugins. + +(Currently only tested with [Loki](https://grafana.com/docs/loki/latest/clients/docker-driver/)!) + +```yaml +docker_plugins: + - alias: loki + source: grafana/loki-docker-driver + version: 2.7.0 + state: present +``` + +### python Support + +Some of the modules in this role require suitable python extensions. +In recent times, there have been a few incompatibilities here, which is why this point is now also configurable. + +The default configuration is as follows: + +```yaml +docker_python_packages: + - name: docker + - name: requests + - name: urllib3 +``` + +If other pip module versions are installed here, you can overwrite these defaults. +It is possible to add versions to each module: + +```yaml +docker_python_packages: + - name: docker + version: 6.1.1 +``` + +If a version is specified, an attempt will be made to install exactly this version. +However, there is also the possibility to influence this behaviour via `compare_direction`. + +```yaml +docker_python_packages: + - name: docker + compare_direction: ">" + version: 6.0.0 +``` + +Or to define a corresponding window via `versions`: + +```yaml +docker_python_packages: + - name: docker + - name: requests + versions: + - ">= 2.27.0" + - "< 2.29.0" + - name: urllib3 + versions: + - ">= 1.26.0" + - "< 2.0.0" +``` + +## Examples + +Install latest docker **stable** release on your local centos server + +```yaml +- hosts: localhost + roles: + - role: docker +``` + +Install latest docker **edge** release on your local centos server + +```yaml +- hosts: localhost + vars: + docker_repo: + channel: + nightly_enabled: true + roles: + - role: docker +``` + +Advanced playbook with various variables applied + +[configuration reference](https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file) + +```yaml +- hosts: localhost + vars: + docker_config: + # store docker containers/images to /opt/docker + data_root: /opt/docker + # change default docker bridge subnet + bip: 172.16.77.77/24 + # set default log driver to journald + log_driver: journald + # enable experimental mode + experimental: true + # expose docker api over socket file and tcp + hosts: + - unix:///var/run/docker.sock + - tcp://0.0.0.0:2376 + # set default search domains + dns_search: + - lab1.linuxctl.com + - lab2.linuxctl.com + # configure logging options + log_opts: + "max-size": 10m + "max-file": "3" + labels: molecule + env: "os,customer" + + roles: + - role: docker +``` +--- + +## Author and License + +- original `docker` role written by: + - George Bolo | [linuxctl.com](https://linuxctl.com) + +- modified: + - Bodo Schulz + +## License + +MIT + +**FREE SOFTWARE, HELL YEAH!** diff --git a/roles/docker/collections.yml b/roles/docker/collections.yml new file mode 100644 index 0000000..e5810e9 --- /dev/null +++ b/roles/docker/collections.yml @@ -0,0 +1,6 @@ +--- + +collections: + - name: bodsch.core + version: ">=1.0.19" + - name: bodsch.scm diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml new file mode 100644 index 0000000..25f829d --- /dev/null +++ b/roles/docker/defaults/main.yml @@ -0,0 +1,132 @@ +--- + +## choose centos docker repo channel enable status +docker_repo: + channel: + stable_enabled: true + test_enabled: false + nightly_enabled: false + +## state of package (present, absent, etc.) +docker_state: present + +## should docker daemon start on boot? +docker_service: + enable: true + ## name of docker service + name: docker + +## install docker-compose in version +docker_compose: {} +# as example: +# docker_compose: +# install: true +# version: 1.29.2 + +docker_users: [] + +docker_plugins: [] +# as example: +# docker_plugins: +# - alias: loki +# source: grafana/loki-docker-driver:latest +# state: present + +# Proxy related ---------------------------------------------------------------- + +## should docker daemon use a proxy for outbound connections? +docker_proxy: + enabled: false + ## list of env variables we should set (comment out the ones you don't need) + env: + - "HTTP_PROXY=http://proxy.example.com:80/" + - "HTTP_PROXY=https://proxy.example.com:443/" + - "NO_PROXY=localhost,127.0.0.1,internalhub.example.com" + +# docker client configuration -------------------------------------------------- + +## enable authentication for docker registry +docker_client_config: [] +# as example: +# docker_client_config: +# - username: jenkins +# enabled: true +# owner: jenkins +# group: jeinkins +# ## the location we should push client configuration +# location: "/var/jenkins_home/.docker/config.json" +# auths: +# "https://harbor.deployment.tld": +# auth: "amVua2luczpydWJiZWwtZGllLWthdHotZHUtZHVtbXNjaHfDpHR6ZXIxCg==" +# email: "jenkins@deployment.tld" +# formats: +# ps: +# - ".ID" +# - ".Names" +# - ".Status" +# - ".Labels" +# - ".RunningFor" +# - ".Ports" +# images: +# - ".ID" +# - ".Size" +# - ".Repository" +# - ".Tag" +# - ".CreatedAt" + +# default dockerd configuration options ---------------------------------------- + +# for full doku, see: README.md +docker_config: + authorization_plugins: [] + bip: "" + bridge: "" + data_root: "/var/lib/docker" + debug: false + default_gateway: "" + default_gateway_v6: "" + default_shm_size: "" + default_ulimits: {} + dns: [] + dns_opts: [] + dns_search: [] + experimental: false + fixed_cidr: "" + fixed_cidr_v6: "" + group: "" + hosts: [] + insecure_registries: [] + ip: "" + ip_forward: + ip_masq: + iptables: + ip6tables: + ipv6: + labels: [] + log_driver: "" + log_level: "" + log_opts: {} + max_concurrent_downloads: 3 + max_concurrent_uploads: 5 + max_download_attempts: + metrics_addr: "" + oom_score_adjust: + pidfile: + raw_logs: + registry_mirrors: [] + seccomp_profile: "" + selinux_enabled: false + shutdown_timeout: + storage_driver: "" + storage_opts: [] + tls: + verify: false + ca_cert: "" + cert: "" + key: "" + +docker_config_diff: false + +docker_python_packages: [] + +... diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml new file mode 100644 index 0000000..46a8597 --- /dev/null +++ b/roles/docker/handlers/main.yml @@ -0,0 +1,76 @@ +--- + +- name: start containerd + ansible.builtin.service: + name: containerd + state: started + when: + - ansible_os_family | lower == 'archlinux' + +- name: restart docker + ansible.builtin.service: + name: docker + state: restarted + +- name: reload docker + ansible.builtin.service: + name: docker + state: reloaded + +- name: validate config # noqa no-changed-when + ansible.builtin.command: | + dockerd --validate + +- name: daemon reload + ansible.builtin.systemd: + daemon_reload: true + force: true + when: + - ansible_service_mgr == 'systemd' + +- name: information about config changes # noqa no-handler + ansible.builtin.debug: + msg: "{{ _changed_docker_configuration.diff }}" + when: + - _changed_docker_configuration.changed + +- name: create daemon.json + bodsch.docker.docker_common_config: + state: present + log_driver: "{{ docker_config.log_driver | default(omit) }}" + log_opts: "{{ docker_config.log_opts | default(omit) }}" + log_level: "{{ docker_config.log_level | default(omit) }}" + dns: "{{ docker_config.dns | default(omit) }}" + dns_opts: "{{ docker_config.dns_opts | default(omit) }}" + dns_search: "{{ docker_config.dns_search | default(omit) }}" + data_root: "{{ docker_config.data_root | default(omit) }}" + max_concurrent_downloads: "{{ docker_config.max_concurrent_downloads | int | default(omit) }}" + max_concurrent_uploads: "{{ docker_config.max_concurrent_uploads | int | default(omit) }}" + max_download_attempts: "{{ docker_config.max_download_attempts | int | default(omit) }}" + metrics_addr: "{{ docker_config.metrics_addr | default(omit) }}" + debug: "{{ docker_config.debug | default('false') | bool }}" + selinux_enabled: "{{ docker_config.selinux_enabled | default('false') | bool }}" + seccomp_profile: "{{ docker_config.seccomp_profile | default(omit) }}" + experimental: "{{ docker_config.experimental | default('false') | bool }}" + storage_driver: "{{ docker_config.storage_driver | default(omit) }}" + storage_opts: "{{ docker_config.storage_opts | default(omit) }}" + group: "{{ docker_config.group | default(omit) }}" + bridge: "{{ docker_config.bridge | default(omit) }}" + bip: "{{ docker_config.bip | default(omit) }}" + ip: "{{ docker_config.ip | default(omit) }}" + fixed_cidr: "{{ docker_config.fixed_cidr | default(omit) }}" + fixed_cidr_v6: "{{ docker_config.fixed_cidr_v6 | default(omit) }}" + default_gateway: "{{ docker_config.default_gateway | default(omit) }}" + default_gateway_v6: "{{ docker_config.default_gateway_v6 | default(omit) }}" + hosts: "{{ docker_config.hosts | default(omit) }}" + insecure_registries: "{{ docker_config.insecure_registries | default(omit) }}" + shutdown_timeout: "{{ docker_config.shutdown_timeout | int | default(omit) }}" + tls_verify: "{{ docker_config.tls.verify | default('false') | bool }}" + tls_ca_cert: "{{ docker_config.tls.ca_cert | default(omit) }}" + tls_cert: "{{ docker_config.tls.cert | default(omit) }}" + tls_key: "{{ docker_config.tls.key | default(omit) }}" + register: _changed_docker_configuration + notify: + - information about config changes + - restart docker +... diff --git a/roles/docker/helper_scripts/docker_config_to_ansible_yaml.sh b/roles/docker/helper_scripts/docker_config_to_ansible_yaml.sh new file mode 100644 index 0000000..fefe4a1 --- /dev/null +++ b/roles/docker/helper_scripts/docker_config_to_ansible_yaml.sh @@ -0,0 +1,16 @@ +# https://docs.docker.com/engine/reference/commandline/dockerd/#/linux-configuration-file + +# Set top-level variables +while read l; do + key=$(echo $l | cut -d : -f 1 | tr -d \" | tr - _); + value=$(echo $l | cut -d : -f 2 | sed 's/.$//'); + echo docker_config_$key: $value; +done < json + +# Set dictionary variable +echo "docker_config_full:" +while read l; do + key=$(echo $l | cut -d : -f 1 | tr -d \" | tr - _); + orig=$(echo $l | cut -d : -f 1); + echo " "$orig: \"{{ docker_config_$key }}\"; +done < json diff --git a/roles/docker/hooks/converge b/roles/docker/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/roles/docker/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/roles/docker/hooks/destroy b/roles/docker/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/roles/docker/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/roles/docker/hooks/lint b/roles/docker/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/roles/docker/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/roles/docker/hooks/molecule.rc b/roles/docker/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/roles/docker/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/roles/docker/hooks/test b/roles/docker/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/roles/docker/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/roles/docker/hooks/tox.sh b/roles/docker/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/roles/docker/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/roles/docker/hooks/verify b/roles/docker/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/roles/docker/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml new file mode 100644 index 0000000..0eca2bb --- /dev/null +++ b/roles/docker/meta/main.yml @@ -0,0 +1,45 @@ +--- + +galaxy_info: + role_name: docker + + author: George Bolo / Bodo Schulz + description: install and configure docker on various linux systems + + # Some suggested licenses: + # - BSD (default) + # - MIT + # - GPLv2 + # - GPLv3 + # - Apache + # - CC-BY + license: MIT + min_ansible_version: "2.10" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + # 11 + - bullseye + # 12 + - bookworm + - name: Ubuntu + versions: + # 20.04 LTS (Focal Fossa) + - focal + # 22.04 LTS (Jammy Jellyfish) + - jammy + # 22.10 (Kinetic Kudu) + # - kinetic + # 23.04 (Lunar Lobster) + # - lunar + + galaxy_tags: + - docker + - container + - microservices + +dependencies: [] diff --git a/roles/docker/molecule/default/converge.yml b/roles/docker/molecule/default/converge.yml new file mode 100644 index 0000000..6732a9c --- /dev/null +++ b/roles/docker/molecule/default/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + become: false + + environment: + NETRC: '' + + roles: + - role: bodsch.docker.docker diff --git a/roles/docker/molecule/default/group_vars/all/vars.yml b/roles/docker/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..e5383ea --- /dev/null +++ b/roles/docker/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,17 @@ +--- + +# docker_python_packages: +# - compare_direction: "==" +# name: docker +# version: 6.0.0 +# +# - version: 39.1.0 +# name: setuptools +# +# - name: requests +# versions: +# - ">= 2.28.0" +# - "< 2.30.0" +# - "!~ 1.1.0" + +... diff --git a/roles/docker/molecule/default/molecule.yml b/roles/docker/molecule/default/molecule.yml new file mode 100644 index 0000000..a9238e5 --- /dev/null +++ b/roles/docker/molecule/default/molecule.yml @@ -0,0 +1,61 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: instance + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/docker/molecule/default/prepare.yml b/roles/docker/molecule/default/prepare.yml new file mode 100644 index 0000000..8c5fd6f --- /dev/null +++ b/roles/docker/molecule/default/prepare.yml @@ -0,0 +1,39 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: update package cache + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +... diff --git a/roles/docker/molecule/default/tests/test_custom_config.py b/roles/docker/molecule/default/tests/test_custom_config.py new file mode 100644 index 0000000..ab433a5 --- /dev/null +++ b/roles/docker/molecule/default/tests/test_custom_config.py @@ -0,0 +1,153 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +from packaging.version import Version + +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("docker") + + +def test_directories(host, get_vars): + """ + """ + docker_config = get_vars.get("docker_config") + docker_version = local_facts(host).get("version", {}).get("docker") + + if docker_config.get("data_root"): + data_root = docker_config.get("data_root") + d = host.file(data_root) + assert d.is_directory + + docker_directories = [ + 'buildkit', + 'containers', + 'image', + 'network', + 'plugins', + 'runtimes', + 'swarm', + 'tmp', + 'volumes'] + + if Version(docker_version) < Version("23.0.0"): + docker_directories.append('trust') + + for directory in docker_directories: + d = host.file(os.path.join(data_root, directory)) + assert d.is_directory + + +def test_listening_socket(host, get_vars): + """ + """ + print(",----------------------------------------------") + for i in host.socket.get_listening_sockets(): + pp.pprint(i) + print("`----------------------------------------------") + + distribution = host.system_info.distribution + release = host.system_info.release + docker_config = get_vars.get("docker_config") + + if docker_config.get("hosts"): + listeners = docker_config.get("hosts") + pp.pprint(listeners) + + for socket in listeners: + pp.pprint(socket) + + if distribution == "ubuntu" and release == "18.04" and socket.startswith("unix"): + continue + + socket = host.socket(socket) + assert socket.is_listening diff --git a/roles/docker/molecule/default/tests/test_default.py b/roles/docker/molecule/default/tests/test_default.py new file mode 100644 index 0000000..c4f8a34 --- /dev/null +++ b/roles/docker/molecule/default/tests/test_default.py @@ -0,0 +1,128 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_packages(host): + """ + """ + distribution = host.system_info.distribution + release = host.system_info.release + + pp.pprint(distribution) + pp.pprint(release) + + packages = [] + # packages.append("iptables") + + if not distribution == "artix": + if distribution == 'arch': + packages.append("docker") + else: + packages.append("docker-ce") + + for package in packages: + p = host.package(package) + assert p.is_installed + + +@pytest.mark.parametrize("dirs", [ + "/etc/docker", +]) +def test_directories(host, dirs): + + d = host.file(dirs) + assert d.is_directory + assert d.exists + + +def test_service_running_and_enabled(host): + + service = host.service('docker') + assert service.is_running + assert service.is_enabled diff --git a/roles/docker/molecule/dockerd-with-client-config/converge.yml b/roles/docker/molecule/dockerd-with-client-config/converge.yml new file mode 100644 index 0000000..ae6eed7 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-client-config/converge.yml @@ -0,0 +1,10 @@ +--- +- name: converge + hosts: all + any_errors_fatal: false + + environment: + NETRC: '' + + roles: + - role: bodsch.docker.docker diff --git a/roles/docker/molecule/dockerd-with-client-config/group_vars/all/snakeoil.yml b/roles/docker/molecule/dockerd-with-client-config/group_vars/all/snakeoil.yml new file mode 100644 index 0000000..509ea4f --- /dev/null +++ b/roles/docker/molecule/dockerd-with-client-config/group_vars/all/snakeoil.yml @@ -0,0 +1,13 @@ +--- + +snakeoil_extract_to: /etc/snakeoil + +snakeoil_domain: docker.local + +snakeoil_life_time: 30 + +snakeoil_alt_names: + - dns: + - daemon.docker.local + +... diff --git a/roles/docker/molecule/dockerd-with-client-config/group_vars/all/vars.yml b/roles/docker/molecule/dockerd-with-client-config/group_vars/all/vars.yml new file mode 100644 index 0000000..c555194 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-client-config/group_vars/all/vars.yml @@ -0,0 +1,52 @@ +--- + +docker_config_diff: true + +docker_client_config: + ## the location we should push client configuration + - location: "/root/.docker/config.json" + enabled: true + auths: + registry.gitfoo.tld: + auth: amVua2luczpydWJiZWwtZGllLWthdHotZHUtZHVtbXNjaHfDpHR6ZXIxCg== + test.tld: + username: "FOO-was-sonst" + password: "ja-toll-schon-wieder-alles-scheisse!" + formats: + ps: + - ".ID" + - ".Names" + - ".Status" + - ".RunningFor" + images: + - ".ID" + - ".Size" + - ".Repository" + - ".Tag" + - ".CreatedAt" + + ## the location we should push client configuration + - location: "/var/tmp/foo/config.json" + state: absent + enabled: false + owner: "nobody" + group: "1000" + auths: + "test.tld": + username: "FOO-was-sonst" + password: "ja-toll-schon-wieder-alles-scheisse!" + formats: + ps: + - ".ID" + - ".Names" + - ".Status" + - ".RunningFor" + + ## must be ignored + - enabled: false + auths: + "test.tld": + username: "FOO-was-sonst" + passwort: "ja-toll-schon-wieder-alles-scheisse!" + +... diff --git a/roles/docker/molecule/dockerd-with-client-config/molecule.yml b/roles/docker/molecule/dockerd-with-client-config/molecule.yml new file mode 100644 index 0000000..a9238e5 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-client-config/molecule.yml @@ -0,0 +1,61 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: instance + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/docker/molecule/dockerd-with-client-config/prepare.yml b/roles/docker/molecule/dockerd-with-client-config/prepare.yml new file mode 100644 index 0000000..e46b599 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-client-config/prepare.yml @@ -0,0 +1,48 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: update package cache + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + + + +- name: prepare container + hosts: all + gather_facts: true + + roles: + - role: snakeoil + +... diff --git a/roles/docker/molecule/dockerd-with-client-config/requirements.yml b/roles/docker/molecule/dockerd-with-client-config/requirements.yml new file mode 100644 index 0000000..751d345 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-client-config/requirements.yml @@ -0,0 +1,7 @@ +--- + +- name: snakeoil + src: https://github.com/bodsch/ansible-snakeoil + version: 1.5.0 + +... diff --git a/roles/docker/molecule/dockerd-with-client-config/tests/test_custom_config.py b/roles/docker/molecule/dockerd-with-client-config/tests/test_custom_config.py new file mode 100644 index 0000000..59f7022 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-client-config/tests/test_custom_config.py @@ -0,0 +1,156 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +from packaging.version import Version + +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("docker") + + +def test_directories(host, get_vars): + """ + """ + docker_config = get_vars.get("docker_config") + docker_version = local_facts(host).get("version", {}).get("docker") + + if docker_config.get("data_root"): + data_root = docker_config.get("data_root") + d = host.file(data_root) + assert d.is_directory + + docker_directories = [ + 'buildkit', + 'containers', + 'image', + 'network', + 'plugins', + 'runtimes', + 'swarm', + 'tmp', + 'volumes'] + + if Version(docker_version) < Version("23.0.0"): + docker_directories.append('trust') + + for directory in docker_directories: + d = host.file(os.path.join(data_root, directory)) + assert d.is_directory + + +def test_listening_socket(host, get_vars): + """ + """ + distribution = host.system_info.distribution + release = host.system_info.release + + pp.pprint(distribution) + pp.pprint(release) + + for i in host.socket.get_listening_sockets(): + pp.pprint(i) + + docker_config = get_vars.get("docker_config") + + if docker_config.get("hosts"): + + listeners = docker_config.get("hosts") + pp.pprint(listeners) + + for socket in listeners: + pp.pprint(socket) + + if distribution == "ubuntu" and release == "18.04" and socket.startswith("unix"): + continue + + socket = host.socket(socket) + assert socket.is_listening diff --git a/roles/docker/molecule/dockerd-with-client-config/tests/test_default.py b/roles/docker/molecule/dockerd-with-client-config/tests/test_default.py new file mode 100644 index 0000000..62ba736 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-client-config/tests/test_default.py @@ -0,0 +1,128 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_packages(host): + """ + """ + distribution = host.system_info.distribution + release = host.system_info.release + + pp.pprint(distribution) + pp.pprint(release) + + packages = [] + packages.append("iptables") + + if not distribution == "artix": + if distribution == 'arch': + packages.append("docker") + else: + packages.append("docker-ce") + + for package in packages: + p = host.package(package) + assert p.is_installed + + +@pytest.mark.parametrize("dirs", [ + "/etc/docker", +]) +def test_directories(host, dirs): + + d = host.file(dirs) + assert d.is_directory + assert d.exists + + +def test_service_running_and_enabled(host): + + service = host.service('docker') + assert service.is_running + assert service.is_enabled diff --git a/roles/docker/molecule/dockerd-with-plugin/converge.yml b/roles/docker/molecule/dockerd-with-plugin/converge.yml new file mode 100644 index 0000000..6732a9c --- /dev/null +++ b/roles/docker/molecule/dockerd-with-plugin/converge.yml @@ -0,0 +1,12 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + become: false + + environment: + NETRC: '' + + roles: + - role: bodsch.docker.docker diff --git a/roles/docker/molecule/dockerd-with-plugin/group_vars/all/loki.yml b/roles/docker/molecule/dockerd-with-plugin/group_vars/all/loki.yml new file mode 100644 index 0000000..4f997e8 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-plugin/group_vars/all/loki.yml @@ -0,0 +1,9 @@ +--- + +loki_version: "2.7.0" + +loki_config_server: + http_listen_address: "0.0.0.0" + http_listen_port: 3100 + +... diff --git a/roles/docker/molecule/dockerd-with-plugin/group_vars/all/vars.yml b/roles/docker/molecule/dockerd-with-plugin/group_vars/all/vars.yml new file mode 100644 index 0000000..92ee45c --- /dev/null +++ b/roles/docker/molecule/dockerd-with-plugin/group_vars/all/vars.yml @@ -0,0 +1,73 @@ +--- + +docker_plugins: + - alias: loki + source: grafana/loki-docker-driver + version: "{{ loki_version }}" + state: present + +docker_client_config: + ## the location we should push client configuration + - location: "/root/.docker/config.json" + enabled: false + auths: + registry.gitfoo.tld: + auth: amVua2luczpydWJiZWwtZGllLWthdHotZHUtZHVtbXNjaHfDpHR6ZXIxCg== + formats: + ps: + - ".ID" + - ".Names" + - ".Status" + - ".Labels" + - ".RunningFor" + - ".Ports" + images: + - ".ID" + - ".Size" + - ".Repository" + - ".Tag" + - ".CreatedAt" + + ## the location we should push client configuration + - location: "/var/tmp/foo/config.json" + enabled: true + auths: + "test.tld": + username: "FOO-was-sonst" + password: "ja-toll-schon-wieder-alles-scheisse!" + +docker_config: + # data_root: /opt/docker + max_concurrent_downloads: 10 + debug: false + # enable experimental mode + experimental: false + # expose docker api over socket file and tcp + hosts: + - unix:///var/run/docker.sock + - tcp://0.0.0.0:3485 + # bip: "192.168.9.0/24" + fixed_cidr: "192.168.9.0/24" + # set default search domains + dns_search: + - docker.local + # Binding to IP address without --tlsverify is insecure and gives root access on this machine + # to everyone who has access to your network. + # Binding to an IP address without --tlsverify is deprecated + # Support for listening on TCP without authentication or explicit intent to run without + # authentication will be removed in the next release + tls: false + tlsverify: false + # storage_driver: overlay2 + # logg driver + log_driver: "loki:{{ loki_version }}" + log_opts: + # https://grafana.com/docs/loki/latest/clients/docker-driver/configuration/ + loki-url: "http://loki:3100/loki/api/v1/push" + loki-retries: "3" + loki-batch-size: 400 + labels: "environment" + no-file: "false" + max-size: "10m" + max-file: "3" + env: "molecule,test" diff --git a/roles/docker/molecule/dockerd-with-plugin/molecule.yml b/roles/docker/molecule/dockerd-with-plugin/molecule.yml new file mode 100644 index 0000000..9385b8f --- /dev/null +++ b/roles/docker/molecule/dockerd-with-plugin/molecule.yml @@ -0,0 +1,75 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: loki + image: "bodsch/ansible-debian:12" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + + - name: instance + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + capabilities: + - ALL + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + - /lib/modules:/lib/modules:Z + # needs only by ubuntu 20.04 + - /var/lib/docker/overlay2:/var/lib/docker/overlay2:Z + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: "${MOLECULE_EPHEMERAL_DIRECTORY}/ansible_facts" + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/docker/molecule/dockerd-with-plugin/prepare.yml b/roles/docker/molecule/dockerd-with-plugin/prepare.yml new file mode 100644 index 0000000..90468f9 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-plugin/prepare.yml @@ -0,0 +1,49 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: update package cache + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +- name: prepare loki + hosts: loki + any_errors_fatal: false + + environment: + NETRC: '' + + roles: + - role: loki + +... diff --git a/roles/docker/molecule/dockerd-with-plugin/requirements.yml b/roles/docker/molecule/dockerd-with-plugin/requirements.yml new file mode 100644 index 0000000..aa1cfbb --- /dev/null +++ b/roles/docker/molecule/dockerd-with-plugin/requirements.yml @@ -0,0 +1,7 @@ +--- + +- name: loki + src: https://github.com/bodsch/ansible-loki + version: 2.1.0 + +... diff --git a/roles/docker/molecule/dockerd-with-plugin/tests/test_custom_config.py b/roles/docker/molecule/dockerd-with-plugin/tests/test_custom_config.py new file mode 100644 index 0000000..e3d16fe --- /dev/null +++ b/roles/docker/molecule/dockerd-with-plugin/tests/test_custom_config.py @@ -0,0 +1,156 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +from packaging.version import Version + +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('instance') + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("docker") + + +def test_directories(host, get_vars): + """ + """ + docker_config = get_vars.get("docker_config") + docker_version = local_facts(host).get("version", {}).get("docker") + + if docker_config.get("data_root"): + data_root = docker_config.get("data_root") + d = host.file(data_root) + assert d.is_directory + + docker_directories = [ + 'buildkit', + 'containers', + 'image', + 'network', + 'plugins', + 'runtimes', + 'swarm', + 'tmp', + 'volumes'] + + if Version(docker_version) < Version("23.0.0"): + docker_directories.append('trust') + + for directory in docker_directories: + d = host.file(os.path.join(data_root, directory)) + assert d.is_directory + + +def test_listening_socket(host, get_vars): + """ + """ + distribution = host.system_info.distribution + release = host.system_info.release + + pp.pprint(distribution) + pp.pprint(release) + + for i in host.socket.get_listening_sockets(): + pp.pprint(i) + + docker_config = get_vars.get("docker_config") + + if docker_config.get("hosts"): + + listeners = docker_config.get("hosts") + pp.pprint(listeners) + + for socket in listeners: + pp.pprint(socket) + + if distribution == "ubuntu" and release == "18.04" and socket.startswith("unix"): + continue + + socket = host.socket(socket) + assert socket.is_listening diff --git a/roles/docker/molecule/dockerd-with-plugin/tests/test_default.py b/roles/docker/molecule/dockerd-with-plugin/tests/test_default.py new file mode 100644 index 0000000..cb13796 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-plugin/tests/test_default.py @@ -0,0 +1,128 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('instance') + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_packages(host): + """ + """ + distribution = host.system_info.distribution + release = host.system_info.release + + pp.pprint(distribution) + pp.pprint(release) + + packages = [] + packages.append("iptables") + + if not distribution == "artix": + if distribution == 'arch': + packages.append("docker") + else: + packages.append("docker-ce") + + for package in packages: + p = host.package(package) + assert p.is_installed + + +@pytest.mark.parametrize("dirs", [ + "/etc/docker", +]) +def test_directories(host, dirs): + + d = host.file(dirs) + assert d.is_directory + assert d.exists + + +def test_service_running_and_enabled(host): + + service = host.service('docker') + assert service.is_running + assert service.is_enabled diff --git a/roles/docker/molecule/dockerd-with-tls/converge.yml b/roles/docker/molecule/dockerd-with-tls/converge.yml new file mode 100644 index 0000000..ae6eed7 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-tls/converge.yml @@ -0,0 +1,10 @@ +--- +- name: converge + hosts: all + any_errors_fatal: false + + environment: + NETRC: '' + + roles: + - role: bodsch.docker.docker diff --git a/roles/docker/molecule/dockerd-with-tls/group_vars/all/snakeoil.yml b/roles/docker/molecule/dockerd-with-tls/group_vars/all/snakeoil.yml new file mode 100644 index 0000000..509ea4f --- /dev/null +++ b/roles/docker/molecule/dockerd-with-tls/group_vars/all/snakeoil.yml @@ -0,0 +1,13 @@ +--- + +snakeoil_extract_to: /etc/snakeoil + +snakeoil_domain: docker.local + +snakeoil_life_time: 30 + +snakeoil_alt_names: + - dns: + - daemon.docker.local + +... diff --git a/roles/docker/molecule/dockerd-with-tls/group_vars/all/vars.yml b/roles/docker/molecule/dockerd-with-tls/group_vars/all/vars.yml new file mode 100644 index 0000000..2e49b6d --- /dev/null +++ b/roles/docker/molecule/dockerd-with-tls/group_vars/all/vars.yml @@ -0,0 +1,65 @@ +--- + +docker_client_defaults: &DOCKER_CLIENTS_DEFAULTS + formats: + ps: + - ".ID" + - ".Names" + - ".Status" + - ".RunningFor" + - ".Ports" + images: + - ".ID" + - ".Size" + - ".Repository" + - ".Tag" + - ".CreatedAt" + +docker_client_config: + ## the location we should push client configuration + - location: "/root/.docker/config.json" + enabled: true + <<: *DOCKER_CLIENTS_DEFAULTS + auths: + "registry.gitfoo.tld": + auth: amVua2luczpydWJiZWwtZGllLWthdHotZHUtZHVtbXNjaHfDpHR6ZXIxCg== + "test.tld": + username: "FOO-was-sonst" + password: "ja-toll-schon-wieder-alles-scheisse!" + + ## the location we should push client configuration + - location: "/var/tmp/foo/config.json" + enabled: false + <<: *DOCKER_CLIENTS_DEFAULTS + auths: + "test.tld": + username: "FOO-was-sonst" + password: "ja-toll-schon-wieder-alles-scheisse!" + +docker_config: + data_root: /opt/docker + max_concurrent_downloads: 10 + debug: false + log_opts: + "cache-disabled": true + "cache-compress": "true" + "cache-max-file": 5 + experimental: false + hosts: + - unix:///run/docker.sock + - tcp://0.0.0.0:3485 + fixed_cidr: "192.168.9.0/24" + dns_search: + - docker.local + metrics_addr: "127.0.0.1:9999" + tls: + verify: true + ca_cert: "{{ snakeoil_extract_to }}/{{ snakeoil_domain }}/{{ snakeoil_domain }}.crt" + cert: "{{ snakeoil_extract_to }}/{{ snakeoil_domain }}/{{ snakeoil_domain }}.pem" + key: "{{ snakeoil_extract_to }}/{{ snakeoil_domain }}/{{ snakeoil_domain }}.key" + # works not in the molecule docker environment + # storage_driver: overlay2 + # storage_opts: + # - overlay2.size=10G + +... diff --git a/roles/docker/molecule/dockerd-with-tls/molecule.yml b/roles/docker/molecule/dockerd-with-tls/molecule.yml new file mode 100644 index 0000000..a9238e5 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-tls/molecule.yml @@ -0,0 +1,61 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: instance + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/docker/molecule/dockerd-with-tls/prepare.yml b/roles/docker/molecule/dockerd-with-tls/prepare.yml new file mode 100644 index 0000000..e46b599 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-tls/prepare.yml @@ -0,0 +1,48 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: update package cache + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + + + +- name: prepare container + hosts: all + gather_facts: true + + roles: + - role: snakeoil + +... diff --git a/roles/docker/molecule/dockerd-with-tls/requirements.yml b/roles/docker/molecule/dockerd-with-tls/requirements.yml new file mode 100644 index 0000000..751d345 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-tls/requirements.yml @@ -0,0 +1,7 @@ +--- + +- name: snakeoil + src: https://github.com/bodsch/ansible-snakeoil + version: 1.5.0 + +... diff --git a/roles/docker/molecule/dockerd-with-tls/tests/test_custom_config.py b/roles/docker/molecule/dockerd-with-tls/tests/test_custom_config.py new file mode 100644 index 0000000..59f7022 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-tls/tests/test_custom_config.py @@ -0,0 +1,156 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +from packaging.version import Version + +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("docker") + + +def test_directories(host, get_vars): + """ + """ + docker_config = get_vars.get("docker_config") + docker_version = local_facts(host).get("version", {}).get("docker") + + if docker_config.get("data_root"): + data_root = docker_config.get("data_root") + d = host.file(data_root) + assert d.is_directory + + docker_directories = [ + 'buildkit', + 'containers', + 'image', + 'network', + 'plugins', + 'runtimes', + 'swarm', + 'tmp', + 'volumes'] + + if Version(docker_version) < Version("23.0.0"): + docker_directories.append('trust') + + for directory in docker_directories: + d = host.file(os.path.join(data_root, directory)) + assert d.is_directory + + +def test_listening_socket(host, get_vars): + """ + """ + distribution = host.system_info.distribution + release = host.system_info.release + + pp.pprint(distribution) + pp.pprint(release) + + for i in host.socket.get_listening_sockets(): + pp.pprint(i) + + docker_config = get_vars.get("docker_config") + + if docker_config.get("hosts"): + + listeners = docker_config.get("hosts") + pp.pprint(listeners) + + for socket in listeners: + pp.pprint(socket) + + if distribution == "ubuntu" and release == "18.04" and socket.startswith("unix"): + continue + + socket = host.socket(socket) + assert socket.is_listening diff --git a/roles/docker/molecule/dockerd-with-tls/tests/test_default.py b/roles/docker/molecule/dockerd-with-tls/tests/test_default.py new file mode 100644 index 0000000..62ba736 --- /dev/null +++ b/roles/docker/molecule/dockerd-with-tls/tests/test_default.py @@ -0,0 +1,128 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_packages(host): + """ + """ + distribution = host.system_info.distribution + release = host.system_info.release + + pp.pprint(distribution) + pp.pprint(release) + + packages = [] + packages.append("iptables") + + if not distribution == "artix": + if distribution == 'arch': + packages.append("docker") + else: + packages.append("docker-ce") + + for package in packages: + p = host.package(package) + assert p.is_installed + + +@pytest.mark.parametrize("dirs", [ + "/etc/docker", +]) +def test_directories(host, dirs): + + d = host.file(dirs) + assert d.is_directory + assert d.exists + + +def test_service_running_and_enabled(host): + + service = host.service('docker') + assert service.is_running + assert service.is_enabled diff --git a/roles/docker/molecule/update-config/converge.yml b/roles/docker/molecule/update-config/converge.yml new file mode 100644 index 0000000..4110919 --- /dev/null +++ b/roles/docker/molecule/update-config/converge.yml @@ -0,0 +1,48 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + become: false + + environment: + NETRC: '' + + roles: + - role: bodsch.docker.docker + + +- name: update daemon config + hosts: instance + any_errors_fatal: false + become: false + + environment: + NETRC: '' + + vars: + docker_config: + # data_root: /opt/docker + max_concurrent_downloads: 10 + max_concurrent_uploads: 5 + debug: false + # enable experimental mode + experimental: false + # expose docker api over socket file and tcp + hosts: + - unix:///var/run/docker.sock + - tcp://0.0.0.0:3485 + fixed_cidr: "192.168.9.0/24" + # set default search domains + dns_search: + - docker.local + log_opts: + "max-size": 10m + "max-file": "3" + labels: molecule + env: "os,customer" + + roles: + - role: bodsch.docker.docker + +... diff --git a/roles/docker/molecule/update-config/group_vars/all/vars.yml b/roles/docker/molecule/update-config/group_vars/all/vars.yml new file mode 100644 index 0000000..2d8670d --- /dev/null +++ b/roles/docker/molecule/update-config/group_vars/all/vars.yml @@ -0,0 +1,25 @@ +--- + +docker_config: + # data_root: /opt/docker + max_concurrent_downloads: 10 + debug: false + # enable experimental mode + experimental: false + # expose docker api over socket file and tcp + hosts: + - unix:///var/run/docker.sock + - tcp://0.0.0.0:3485 + # bip: "192.168.9.0/24" + fixed_cidr: "192.168.9.0/24" + # set default search domains + dns_search: + - docker.local + # Binding to IP address without --tlsverify is insecure and gives root access on this machine + # to everyone who has access to your network. + # Binding to an IP address without --tlsverify is deprecated + # Support for listening on TCP without authentication or explicit intent to run without + # authentication will be removed in the next release + tls: false + tlsverify: false + # storage_driver: overlay2 diff --git a/roles/docker/molecule/update-config/molecule.yml b/roles/docker/molecule/update-config/molecule.yml new file mode 100644 index 0000000..084403d --- /dev/null +++ b/roles/docker/molecule/update-config/molecule.yml @@ -0,0 +1,61 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: instance + image: "bodsch/ansible-${DISTRIBUTION:-debian:10}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + cgroupns_mode: host + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + - /var/lib/containerd + capabilities: + - SYS_ADMIN + tmpfs: + - /run + - /tmp + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/docker/molecule/update-config/prepare.yml b/roles/docker/molecule/update-config/prepare.yml new file mode 100644 index 0000000..8c5fd6f --- /dev/null +++ b/roles/docker/molecule/update-config/prepare.yml @@ -0,0 +1,39 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: update package cache + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +... diff --git a/roles/docker/molecule/update-config/tests/test_custom_config.py b/roles/docker/molecule/update-config/tests/test_custom_config.py new file mode 100644 index 0000000..5b8538d --- /dev/null +++ b/roles/docker/molecule/update-config/tests/test_custom_config.py @@ -0,0 +1,144 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_directories(host, get_vars): + """ + """ + docker_config = get_vars.get("docker_config") + + if docker_config.get("data_root"): + data_root = docker_config.get("data_root") + d = host.file(data_root) + assert d.is_directory + + docker_directories = [ + 'buildkit', + 'containers', + 'image', + 'network', + 'plugins', + 'runtimes', + 'swarm', + 'tmp', + 'trust', + 'volumes'] + + for directory in docker_directories: + d = host.file(os.path.join(data_root, directory)) + assert d.is_directory + + +def test_listening_socket(host, get_vars): + """ + """ + distribution = host.system_info.distribution + release = host.system_info.release + + pp.pprint(distribution) + pp.pprint(release) + + for i in host.socket.get_listening_sockets(): + pp.pprint(i) + + docker_config = get_vars.get("docker_config") + + if docker_config.get("hosts"): + + listeners = docker_config.get("hosts") + pp.pprint(listeners) + + for socket in listeners: + pp.pprint(socket) + + if distribution == "ubuntu" and release == "18.04" and socket.startswith("unix"): + continue + + socket = host.socket(socket) + assert socket.is_listening diff --git a/roles/docker/molecule/update-config/tests/test_default.py b/roles/docker/molecule/update-config/tests/test_default.py new file mode 100644 index 0000000..62ba736 --- /dev/null +++ b/roles/docker/molecule/update-config/tests/test_default.py @@ -0,0 +1,128 @@ + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar +import pytest +import os +import testinfra.utils.ansible_runner + +import pprint +pp = pprint.PrettyPrinter() + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def test_packages(host): + """ + """ + distribution = host.system_info.distribution + release = host.system_info.release + + pp.pprint(distribution) + pp.pprint(release) + + packages = [] + packages.append("iptables") + + if not distribution == "artix": + if distribution == 'arch': + packages.append("docker") + else: + packages.append("docker-ce") + + for package in packages: + p = host.package(package) + assert p.is_installed + + +@pytest.mark.parametrize("dirs", [ + "/etc/docker", +]) +def test_directories(host, dirs): + + d = host.file(dirs) + assert d.is_directory + assert d.exists + + +def test_service_running_and_enabled(host): + + service = host.service('docker') + assert service.is_running + assert service.is_enabled diff --git a/roles/docker/tasks/client.obsolete b/roles/docker/tasks/client.obsolete new file mode 100644 index 0000000..8fd7775 --- /dev/null +++ b/roles/docker/tasks/client.obsolete @@ -0,0 +1,30 @@ +--- + +- name: "remove docker client configuration {{ item.location }}" + docker_client_config: + state: absent + dest: "{{ item.location }}" + when: + - not item.enabled | default('false') | bool + +- name: client configuration + when: + - item.enabled | default('false') | bool + block: + - name: "create docker client directory" + ansible.builtin.file: + state: directory + path: "{{ item.location | dirname }}" + owner: "{{ item.owner | default(omit) }}" + group: "{{ item.group | default(omit) }}" + mode: 0770 + + - name: create client configuration {{ item.location }} + docker_client_config: + state: present + dest: "{{ item.location }}" + auths: "{{ item.auths | default({}) }}" + formats: "{{ item.formats | default({}) }}" + # no_log: true + +... diff --git a/roles/docker/tasks/compose.yml b/roles/docker/tasks/compose.yml new file mode 100644 index 0000000..31db4c0 --- /dev/null +++ b/roles/docker/tasks/compose.yml @@ -0,0 +1,35 @@ +--- + +- name: download and install docker compose + when: + - not stat_docker_compose_binary.stat.exists + block: + - name: download docker_compose archive to local folder + become: false + delegate_to: localhost + ansible.builtin.get_url: + url: "https://github.com/docker/compose/releases/download/{{ docker_compose.version }}/docker-compose-Linux-x86_64" + dest: "/tmp/docker-compose-{{ docker_compose.version }}" + mode: 0640 + register: _download_archive + until: _download_archive is succeeded + retries: 5 + delay: 2 + check_mode: false + + - name: propagate docker_compose + ansible.builtin.copy: + src: "/tmp/docker-compose-{{ docker_compose.version }}" + dest: "/usr/local/bin/" + mode: 0755 + owner: root + group: root + +- name: create link from docker_compose_{{ docker_compose.version }} + ansible.builtin.file: + src: "/usr/local/bin/docker-compose-{{ docker_compose.version }}" + dest: "/usr/local/bin/docker-compose" + state: link + force: true + +... diff --git a/roles/docker/tasks/configure.yml b/roles/docker/tasks/configure.yml new file mode 100644 index 0000000..1a1f897 --- /dev/null +++ b/roles/docker/tasks/configure.yml @@ -0,0 +1,119 @@ +--- + +- name: create docker config directory + ansible.builtin.file: + path: /etc/docker + state: directory + mode: 0775 + owner: root + group: root + +- name: manage users for docker + ansible.builtin.include_tasks: users.yml + loop: "{{ docker_users }}" + loop_control: + index_var: index + label: "username: {{ item }}" + when: + - docker_users is defined + - docker_users | count > 0 + +- name: manage docker client configuration + bodsch.docker.docker_client_configs: + configs: "{{ docker_client_config }}" + when: + - docker_client_config is defined + - docker_client_config | count > 0 + +# configure proxy settings if enabled +- name: configure systemd + when: + - ansible_service_mgr == 'systemd' + block: + - name: create dropin directory + ansible.builtin.file: + path: "/etc/systemd/system/docker.service.d" + state: "directory" + mode: 0750 + owner: root + group: root + + - name: create systemd dropin to using configuration + ansible.builtin.template: + src: systemd/overwrite.conf.j2 + dest: "/etc/systemd/system/docker.service.d/overwrite.conf" + mode: 0644 + owner: root + group: root + notify: + - daemon reload + + - name: create systemd dropin for proxy + ansible.builtin.template: + src: "systemd/proxy.conf.j2" + dest: "/etc/systemd/system/docker.service.d/proxy.conf" + mode: 0644 + owner: root + group: root + notify: + - daemon reload + when: + - docker_proxy.enabled + +- name: create openrc config file + ansible.builtin.template: + src: openrc/conf.d/docker.j2 + dest: /etc/conf.d/docker + mode: 0644 + owner: root + group: root + notify: + - daemon reload + when: + - ansible_service_mgr == 'openrc' + +- name: flush handlers at this point to avoid double restart + ansible.builtin.meta: flush_handlers + +- name: create docker config file daemon.json + bodsch.docker.docker_common_config: + state: present + diff_output: "{{ docker_config_diff }}" + log_driver: "{{ docker_config.log_driver | default(omit) }}" + log_opts: "{{ docker_config.log_opts | default(omit) }}" + log_level: "{{ docker_config.log_level | default(omit) }}" + dns: "{{ docker_config.dns | default(omit) }}" + dns_opts: "{{ docker_config.dns_opts | default(omit) }}" + dns_search: "{{ docker_config.dns_search | default(omit) }}" + data_root: "{{ docker_config.data_root | default(omit) }}" + max_concurrent_downloads: "{{ docker_config.max_concurrent_downloads | int | default(omit) }}" + max_concurrent_uploads: "{{ docker_config.max_concurrent_uploads | int | default(omit) }}" + max_download_attempts: "{{ docker_config.max_download_attempts | int | default(omit) }}" + metrics_addr: "{{ docker_config.metrics_addr | default(omit) }}" + debug: "{{ docker_config.debug | default('false') | bool }}" + selinux_enabled: "{{ docker_config.selinux_enabled | default('false') | bool }}" + seccomp_profile: "{{ docker_config.seccomp_profile | default(omit) }}" + experimental: "{{ docker_config.experimental | default('false') | bool }}" + storage_driver: "{{ docker_config.storage_driver | default(omit) }}" + storage_opts: "{{ docker_config.storage_opts | default(omit) }}" + group: "{{ docker_config.group | default(omit) }}" + bridge: "{{ docker_config.bridge | default(omit) }}" + bip: "{{ docker_config.bip | default(omit) }}" + ip: "{{ docker_config.ip | default(omit) }}" + fixed_cidr: "{{ docker_config.fixed_cidr | default(omit) }}" + fixed_cidr_v6: "{{ docker_config.fixed_cidr_v6 | default(omit) }}" + default_gateway: "{{ docker_config.default_gateway | default(omit) }}" + default_gateway_v6: "{{ docker_config.default_gateway_v6 | default(omit) }}" + hosts: "{{ docker_config.hosts | default(omit) }}" + insecure_registries: "{{ docker_config.insecure_registries | default(omit) }}" + shutdown_timeout: "{{ docker_config.shutdown_timeout | int | default(omit) }}" + tls_verify: "{{ docker_config.tls.verify | default('false') | bool }}" + tls_ca_cert: "{{ docker_config.tls.ca_cert | default(omit) }}" + tls_cert: "{{ docker_config.tls.cert | default(omit) }}" + tls_key: "{{ docker_config.tls.key | default(omit) }}" + register: _changed_docker_configuration + notify: + - restart docker + - information about config changes + +... diff --git a/roles/docker/tasks/install.yml b/roles/docker/tasks/install.yml new file mode 100644 index 0000000..429f798 --- /dev/null +++ b/roles/docker/tasks/install.yml @@ -0,0 +1,114 @@ +--- + +- name: remove default packaged docker + ansible.builtin.package: + name: + - docker + - docker-engine + state: absent + when: + - not (ansible_os_family | lower == 'archlinux' or + ansible_os_family | lower | replace(' ', '') == 'artixlinux') + +- name: install docker + ansible.builtin.package: + name: "{{ docker_packages }}" + state: "{{ docker_state }}" + +- name: create docker run configuration + ansible.builtin.template: + src: "docker.j2" + dest: "{{ docker_defaults_directory }}/docker" + force: true + owner: root + group: root + mode: 0640 + notify: + - validate config + - reload docker + +- name: ensure containerd is running and enabled on archlinux + ansible.builtin.service: + name: containerd + state: started + enabled: "{{ docker_service.enable }}" + when: + - ansible_os_family | lower == 'archlinux' + - ansible_service_mgr | lower == 'systemd' + +- name: ensure {{ docker_service.name }} is running + ansible.builtin.service: + name: "{{ docker_service.name }}" + state: started + register: running_service + ignore_errors: true + +- name: daemon start has failed + when: + - running_service.failed + block: + # - name: journalctl entries from this module + # bodsch.systemd.journalctl: + # identifier: docker + # lines: 10 + # register: journalctl_docker + # when: + # - ansible_service_mgr == 'systemd' + # + # - name: + # debug: + # msg: "{{ journalctl_docker.stdout }}" + # # when: "'failed to start daemon' in journalctl_docker.stdout" + + - name: ensure {{ docker_service.name }} is stopped + ansible.builtin.service: + name: "{{ docker_service.name }}" + state: stopped + + - name: re-create safe docker config file daemon.json + bodsch.docker.docker_common_config: + state: present + diff_output: "{{ docker_config_diff }}" + data_root: "{{ docker_config.data_root | default(omit) }}" + max_concurrent_downloads: "{{ docker_config.max_concurrent_downloads | int | default(omit) }}" + max_concurrent_uploads: "{{ docker_config.max_concurrent_uploads | int | default(omit) }}" + max_download_attempts: "{{ docker_config.max_download_attempts | int | default(omit) }}" + metrics_addr: "{{ docker_config.metrics_addr | default(omit) }}" + debug: "{{ docker_config.debug | default('false') | bool }}" + selinux_enabled: "{{ docker_config.selinux_enabled | default('false') | bool }}" + seccomp_profile: "{{ docker_config.seccomp_profile | default(omit) }}" + experimental: "{{ docker_config.experimental | default('false') | bool }}" + hosts: "{{ docker_config.hosts | default(omit) }}" + insecure_registries: "{{ docker_config.insecure_registries | default(omit) }}" + shutdown_timeout: "{{ docker_config.shutdown_timeout | int | default(omit) }}" + tls_verify: "{{ docker_config.tls.verify | default('false') | bool }}" + tls_ca_cert: "{{ docker_config.tls.ca_cert | default(omit) }}" + tls_cert: "{{ docker_config.tls.cert | default(omit) }}" + tls_key: "{{ docker_config.tls.key | default(omit) }}" + + - name: ensure {{ docker_service.name }} is running + ansible.builtin.service: + name: "{{ docker_service.name }}" + state: started + +- name: wait for running docker + ansible.builtin.wait_for: + path: /run/docker.pid + state: present + delay: 2 + sleep: 2 + msg: Timeout to find file /run/docker.pid + +- name: define docker_version + bodsch.docker.docker_version: + register: docker_version + +- name: create custom fact file + bodsch.core.facts: + name: docker + facts: + version: + docker: "{{ docker_version.versions.docker_version }}" + api: "{{ docker_version.versions.api_version }}" + +... diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml new file mode 100644 index 0000000..79ea9c4 --- /dev/null +++ b/roles/docker/tasks/main.yml @@ -0,0 +1,47 @@ +--- + +- name: prepare + ansible.builtin.import_tasks: prepare.yml + tags: + - always + +- name: repositories + ansible.builtin.import_tasks: repositories.yml + when: + - ansible_os_family | lower in ['debian', 'redhat'] + tags: + - docker_repo + +- name: install + ansible.builtin.import_tasks: install.yml + tags: + - docker_install + +- name: plugins + ansible.builtin.import_tasks: plugins.yml + when: + - docker_plugins is defined + - docker_plugins | count > 0 + tags: + - docker_plugins + +- name: configure + ansible.builtin.import_tasks: configure.yml + tags: + - docker_config + +- name: service + ansible.builtin.import_tasks: service.yml + tags: + - docker_service + +- name: compose + ansible.builtin.import_tasks: compose.yml + tags: + - docker_compose + when: + - docker_compose is defined + - docker_compose.install is defined + - docker_compose.install + +... diff --git a/roles/docker/tasks/plugins.yml b/roles/docker/tasks/plugins.yml new file mode 100644 index 0000000..649f60b --- /dev/null +++ b/roles/docker/tasks/plugins.yml @@ -0,0 +1,38 @@ +--- + +- name: ensure {{ docker_service.name }} is running + ansible.builtin.service: + name: "{{ docker_service.name }}" + state: started + +- name: test docker plugins + become: true + remote_user: root + docker_plugins: + state: test + plugin_source: "{{ item.source }}" + plugin_version: "{{ item.version }}" + plugin_alias: "{{ item.alias }}" + data_root: "{{ docker_config.data_root | default(omit) }}" + loop: "{{ docker_plugins }}" + loop_control: + label: "{{ item.alias }}" + register: _plugin_state + +- name: install docker plugins + docker_plugins: + state: present + plugin_source: "{{ item.source }}" + plugin_version: "{{ item.version }}" + plugin_alias: "{{ item.alias }}" + data_root: "{{ docker_config.data_root | default(omit) }}" + loop: "{{ docker_plugins }}" + loop_control: + label: "{{ item.alias }}" + notify: + - create daemon.json + when: + - not _plugin_state.results[0].installed or + not _plugin_state.results[0].equal_versions + +... diff --git a/roles/docker/tasks/prepare.yml b/roles/docker/tasks/prepare.yml new file mode 100644 index 0000000..8d1338f --- /dev/null +++ b/roles/docker/tasks/prepare.yml @@ -0,0 +1,86 @@ +--- + +- name: include OS specific configuration ({{ ansible_distribution }} ({{ ansible_os_family }}) {{ ansible_distribution_major_version }}) + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version }}.yaml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_distribution | lower }}-{{ ansible_service_mgr | lower }}.yaml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_distribution | lower }}.yaml" + # eg. redhat / debian + - "{{ ansible_os_family | lower }}.yaml" + # artixlinux + - "{{ ansible_os_family | lower | replace(' ', '') }}.yaml" + - default.yaml + skip: true + +- name: merge docker defaults between defaults and custom + ansible.builtin.set_fact: + docker_config: "{{ docker_defaults_config | combine(docker_config, recursive=True) }}" + docker_compose: "{{ docker_defaults_compose | combine(docker_compose, recursive=True) }}" + docker_python_packages: "{{ docker_python_packages | bodsch.core.union_by(docker_defaults_python_packages, union_by='name') }}" + +- name: validate log_driver in docker config + ansible.builtin.set_fact: + valid_docker_config: "{{ docker_config | bodsch.docker.validate_log_driver }}" + +- name: Fail if the log_driver is not properly defined + ansible.builtin.fail: + msg: "{{ valid_docker_config.msg }}" + when: + - valid_docker_config is defined + - not valid_docker_config.valid + +- name: install docker dependencies + ansible.builtin.package: + name: "{{ docker_dependencies }}" + state: present + when: + - docker_dependencies is defined + - docker_dependencies | count > 0 + +- name: python support + when: + - docker_python_packages is defined + - docker_python_packages | count > 0 + block: + - name: create pip requirements file + bodsch.core.pip_requirements: + name: docker + requirements: "{{ docker_python_packages }}" + register: pip_requirements + + - name: fail if pip not installed + ansible.builtin.fail: + msg: python pip is not installed + when: + - not pip_requirements.pip.present + + - name: install docker python packages # noqa no-handler + ansible.builtin.pip: + state: present + requirements: "{{ pip_requirements.requirements_file }}" + extra_args: "{{ docker_python_extra_args | default([]) | bodsch.core.python_extra_args(python_version=ansible_python.version) | default(omit) }}" + when: + - pip_requirements.requirements_file is defined + - pip_requirements.changed + + - name: do facts module to get latest information + ansible.builtin.setup: + +- name: detect installed docker_compose + ansible.builtin.stat: + path: "/usr/local/bin/docker-compose-{{ docker_compose.version }}" + register: stat_docker_compose_binary + when: + - docker_compose is defined + - docker_compose.install is defined + - docker_compose.install + +... diff --git a/roles/docker/tasks/repositories.yml b/roles/docker/tasks/repositories.yml new file mode 100644 index 0000000..f3fd99c --- /dev/null +++ b/roles/docker/tasks/repositories.yml @@ -0,0 +1,47 @@ +--- + +- name: debain based + when: + - ansible_os_family | lower == 'debian' + block: + - name: add apt signing key (debian) + ansible.builtin.apt_key: + id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88 + url: https://download.docker.com/linux/ubuntu/gpg + become: true + + - name: install official docker repositories for debian based + ansible.builtin.template: + src: apt/docker-ce.list.j2 + dest: /etc/apt/sources.list.d/docker-ce.list + mode: 0644 + owner: root + group: root + +- name: redhat based + when: + - ansible_os_family | lower == 'redhat' + block: + - name: install official docker repositories for redhat based + ansible.builtin.yum_repository: + name: "docker-ce-{{ item.channel }}" + description: Docker Repository Stable Channel + baseurl: "https://download.docker.com/linux/centos/$releasever/$basearch/{{ item.channel }}" + gpgkey: https://download.docker.com/linux/centos/gpg + gpgcheck: true + enabled: "{{ item.enabled }}" + loop: + - channel: stable + enabled: "{{ docker_repo.channel.stable_enabled }}" + - channel: nightly + enabled: "{{ docker_repo.channel.nightly_enabled }}" + - channel: test + enabled: "{{ docker_repo.channel.test_enabled }}" + loop_control: + label: " {{ item.channel }}" + +- name: update package cache + ansible.builtin.package: + update_cache: true + +... diff --git a/roles/docker/tasks/service.yml b/roles/docker/tasks/service.yml new file mode 100644 index 0000000..00ba0ef --- /dev/null +++ b/roles/docker/tasks/service.yml @@ -0,0 +1,12 @@ +--- + +- name: flush handlers at this point to avoid double restart + ansible.builtin.meta: flush_handlers + +- name: ensure {{ docker_service.name }} is running and enabled + ansible.builtin.service: + name: "{{ docker_service.name }}" + state: started + enabled: "{{ docker_service.enable }}" + +... diff --git a/roles/docker/tasks/users.yml b/roles/docker/tasks/users.yml new file mode 100644 index 0000000..1a15223 --- /dev/null +++ b/roles/docker/tasks/users.yml @@ -0,0 +1,18 @@ +--- + +- name: add users {{ item }} to docker group + ansible.builtin.user: + name: "{{ item }}" + groups: docker + append: true + state: present + +- name: sets ACL for {{ item }} on /run/docker.sock + ansible.posix.acl: + path: /run/docker.sock + entity: "{{ item }}" + etype: user + permissions: rw + state: present + +... diff --git a/roles/docker/templates/apt/docker-ce.list.j2 b/roles/docker/templates/apt/docker-ce.list.j2 new file mode 100644 index 0000000..62dc5be --- /dev/null +++ b/roles/docker/templates/apt/docker-ce.list.j2 @@ -0,0 +1,12 @@ +# docker-ce apt repositories +# managed via Ansible docker role: https://github.com/gbolo/ansible-role-docker + +{% set repo = "stable" %} +{% if docker_repo.channel.nightly_enabled %} +{% set repo = "edge" %} +{% endif %} +{% if docker_repo.channel.test_enabled %} +{% set repo = "test" %} +{% endif %} + +deb [arch={{ docker_apt_arch | default('amd64') }}] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} {{ repo }} diff --git a/roles/docker/templates/docker.j2 b/roles/docker/templates/docker.j2 new file mode 100644 index 0000000..91510f6 --- /dev/null +++ b/roles/docker/templates/docker.j2 @@ -0,0 +1,44 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +# Docker Upstart and SysVinit configuration file + +# +# THIS FILE DOES NOT APPLY TO SYSTEMD +# +# Please see the documentation for "systemd drop-ins": +# https://docs.docker.com/engine/admin/systemd/ +# + +# Customize location of Docker binary (especially for development testing). +#DOCKERD="/usr/local/bin/dockerd" + +# Use DOCKER_OPTS to modify the daemon startup options. +#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" + +# If you need Docker to use an HTTP proxy, it can also be specified here. +#export http_proxy="http://127.0.0.1:3128/" + +# This is also a handy place to tweak where Docker's temporary files go. +#export DOCKER_TMPDIR="/mnt/bigdrive/docker-tmp" + +OPTIONS=" +{% if docker_tls is defined and + docker_tls | count > 0 %} + {% if docker_tls.verify is defined and + docker_tls.verify %} + --tlsverify \ + {% endif %} + {% if docker_tls.ca_cert is defined and + docker_tls.cert is defined and + docker_tls.key is defined and + docker_tls.ca_cert | string | length > 0 and + docker_tls.cert | string | length > 0 and + docker_tls.key | string | length > 0 %} + --tlscacert={{ docker_tls.ca_cert }} \ + --tlscert={{ docker_tls.cert }} \ + --tlskey={{ docker_tls.key }} \ + {% endif %} +{% endif %} + --containerd={{ docker_containerd.socket }} +" diff --git a/roles/docker/templates/openrc/conf.d/docker.j2 b/roles/docker/templates/openrc/conf.d/docker.j2 new file mode 100644 index 0000000..1d102ce --- /dev/null +++ b/roles/docker/templates/openrc/conf.d/docker.j2 @@ -0,0 +1,29 @@ +# /etc/conf.d/docker: config file for /etc/init.d/docker + +# where the docker daemon output gets piped +# this contains both stdout and stderr. If you need to separate them, +# see the settings below +#DOCKER_LOGFILE="/var/log/docker.log" + +# where the docker daemon stdout gets piped +# if this is not set, DOCKER_LOGFILE is used +#DOCKER_OUTFILE="/var/log/docker-out.log" + +# where the docker daemon stderr gets piped +# if this is not set, DOCKER_LOGFILE is used +#DOCKER_ERRFILE="/var/log/docker-err.log" + +# where docker's pid get stored +#DOCKER_PIDFILE="/run/docker.pid" + +# Settings for process limits (ulimit) +#DOCKER_ULIMIT="-c unlimited -n 1048576 -u unlimited" + +# seconds to wait for sending SIGTERM and SIGKILL signals when stopping docker +#DOCKER_RETRY="TERM/60/KILL/10" + +# where the docker daemon itself is run from +#DOCKERD_BINARY="/usr/bin/dockerd" + +# any other random options you want to pass to docker +DOCKER_OPTS="--tls=false" diff --git a/roles/docker/templates/openrc/init.d/docker.j2 b/roles/docker/templates/openrc/init.d/docker.j2 new file mode 100644 index 0000000..8d1c8b6 --- /dev/null +++ b/roles/docker/templates/openrc/init.d/docker.j2 @@ -0,0 +1 @@ + diff --git a/roles/docker/templates/systemd/overwrite.conf.j2 b/roles/docker/templates/systemd/overwrite.conf.j2 new file mode 100644 index 0000000..f276bad --- /dev/null +++ b/roles/docker/templates/systemd/overwrite.conf.j2 @@ -0,0 +1,9 @@ +{{ ansible_managed | comment }} + +[Service] + +EnvironmentFile = {{ docker_defaults_directory }}/docker +ExecStart = +ExecStart = /usr/bin/dockerd $OPTIONS + +SyslogIdentifier = docker diff --git a/roles/docker/templates/systemd/proxy.conf.j2 b/roles/docker/templates/systemd/proxy.conf.j2 new file mode 100644 index 0000000..1eefbf9 --- /dev/null +++ b/roles/docker/templates/systemd/proxy.conf.j2 @@ -0,0 +1,4 @@ +[Service] +{% for env in docker_proxy.env %} +Environment="{{ env }}" +{% endfor %} diff --git a/roles/docker/test-requirements.txt b/roles/docker/test-requirements.txt new file mode 100644 index 0000000..267ec92 --- /dev/null +++ b/roles/docker/test-requirements.txt @@ -0,0 +1,12 @@ +ansible-lint +docker +dnspython +flake8 +molecule>=5.0.1 +molecule-plugins[docker] +netaddr +pytest +pytest-testinfra +tox +tox-gh-actions +yamllint diff --git a/roles/docker/tests/inventory b/roles/docker/tests/inventory new file mode 100644 index 0000000..d18580b --- /dev/null +++ b/roles/docker/tests/inventory @@ -0,0 +1 @@ +localhost \ No newline at end of file diff --git a/roles/docker/tests/test.yml b/roles/docker/tests/test.yml new file mode 100644 index 0000000..7935c86 --- /dev/null +++ b/roles/docker/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - ansible-role-docker diff --git a/roles/docker/tox.ini b/roles/docker/tox.ini new file mode 100644 index 0000000..a485358 --- /dev/null +++ b/roles/docker/tox.ini @@ -0,0 +1,35 @@ +[tox] +minversion = 3.25 +toxworkdir = /tmp/.tox/ + +envlist = ansible_{2.9,2.10,3.4,4.10,5.1,5.2,6.1} + +skipsdist = true + +[testenv] +passenv = * + +# allowlist_externals = +# /usr/bin/find +# /bin/sh +# rm + +deps = + -r test-requirements.txt + ansible_4.10: ansible>=4.10,<4.11 + ansible_5.1: ansible>=5.1,<5.2 + ansible_5.2: ansible>=5.2,<5.3 + ansible_5.10: ansible>=5.10,<5.11 + ansible_6.1: ansible>=6.1,<6.2 + ansible_6.7: ansible>=6.7,<6.8 + ansible_7.0: ansible>=7.0,<7.1 + ansible_7.5: ansible>=7.5,<7.6 + ansible_8.0: ansible>=8.0,<8.1 + ansible_8.5: ansible>=8.5,<8.6 + +#commands_pre = +# /usr/bin/find {toxinidir} -type f -not -path '{toxworkdir}/*' -path '*/__pycache__/*' -name '*.py[c|o]' -delete +# /bin/sh -c '/usr/bin/find {homedir}/.cache -type d -path "*/molecule_*" -exec rm -rfv \{\} +;' + +commands = + {posargs:molecule test --all --destroy always} diff --git a/roles/docker/vars/archlinux-openrc.yaml b/roles/docker/vars/archlinux-openrc.yaml new file mode 100644 index 0000000..8353af7 --- /dev/null +++ b/roles/docker/vars/archlinux-openrc.yaml @@ -0,0 +1,24 @@ +--- + +docker_dependencies: + - iproute2 + - python-pip + - python-certifi + - python-idna + - python-charset-normalizer + - python-websocket-client + - python-packaging + - python-urllib3 + - python-requests + - python-docker + +docker_defaults_python_packages: [] + +docker_packages: + - docker + - docker-openrc + +docker_containerd: + socket: /run/docker/containerd/containerd.sock + +... diff --git a/roles/docker/vars/archlinux.yaml b/roles/docker/vars/archlinux.yaml new file mode 100644 index 0000000..f7a6b57 --- /dev/null +++ b/roles/docker/vars/archlinux.yaml @@ -0,0 +1,21 @@ +--- + +docker_dependencies: + - iproute2 + - python-pip + # - python-virtualenv + - python-certifi + - python-idna + - python-charset-normalizer + - python-websocket-client + - python-packaging + - python-urllib3 + - python-requests + - python-docker + +docker_defaults_python_packages: [] + +docker_packages: + - docker + +... diff --git a/roles/docker/vars/artixlinux.yaml b/roles/docker/vars/artixlinux.yaml new file mode 100644 index 0000000..8353af7 --- /dev/null +++ b/roles/docker/vars/artixlinux.yaml @@ -0,0 +1,24 @@ +--- + +docker_dependencies: + - iproute2 + - python-pip + - python-certifi + - python-idna + - python-charset-normalizer + - python-websocket-client + - python-packaging + - python-urllib3 + - python-requests + - python-docker + +docker_defaults_python_packages: [] + +docker_packages: + - docker + - docker-openrc + +docker_containerd: + socket: /run/docker/containerd/containerd.sock + +... diff --git a/roles/docker/vars/debian.yaml b/roles/docker/vars/debian.yaml new file mode 100644 index 0000000..f457927 --- /dev/null +++ b/roles/docker/vars/debian.yaml @@ -0,0 +1,16 @@ +--- + +docker_dependencies: + - acl + - python3-pip + - python3-docker + - python3-requests + - python3-urllib3 + # - python3-virtualenv + - net-tools + - iproute2 + - gpg + +docker_defaults_python_packages: [] + +... diff --git a/roles/docker/vars/default.yaml b/roles/docker/vars/default.yaml new file mode 100644 index 0000000..e6080d9 --- /dev/null +++ b/roles/docker/vars/default.yaml @@ -0,0 +1,7 @@ +--- + +docker_dependencies: [] + +docker_packages: [] + +... diff --git a/roles/docker/vars/main.yaml b/roles/docker/vars/main.yaml new file mode 100644 index 0000000..b97e512 --- /dev/null +++ b/roles/docker/vars/main.yaml @@ -0,0 +1,96 @@ +--- + +docker_packages: + - acl + - docker-ce + +docker_python_extra_args: [] + +docker_defaults_python_packages: + - name: docker + #- name: setuptools + # version: 39.1.0 + - name: requests + # versions: + # - ">= 2.27.0" + # - "< 2.29.0" + - name: urllib3 + # versions: + # - ">= 1.26.0" + # - "< 2.0.0" + +docker_defaults_compose: + install: false + version: 1.29.2 + +docker_defaults_service: + tls: + verify: false + ca_cert: "" + cert: "" + key: "" + storage: + opts: [] + driver: "" + config_file: "/etc/docker/daemon.json" + +docker_defaults_tls: + verify: false + ca_cert: "" + cert: "" + key: "" + +docker_defaults_config: + authorization_plugins: [] + bip: "" + bridge: "" + data_root: "/var/lib/docker" + debug: false + default_gateway: "" + default_gateway_v6: "" + default_shm_size: "" + default_ulimits: {} + dns: [] + dns_opts: [] + dns_search: [] + experimental: false + fixed_cidr: "" + fixed_cidr_v6: "" + group: "" + hosts: [] + insecure_registries: [] + ip: "" + ip_forward: + ip_masq: + iptables: + ip6tables: + ipv6: + labels: [] + log_driver: "" + log_level: "" + log_opts: {} + max_concurrent_downloads: 3 + max_concurrent_uploads: 5 + max_download_attempts: + metrics_addr: "" + oom_score_adjust: + pidfile: + raw_logs: + registry_mirrors: [] + seccomp_profile: "" + selinux_enabled: false + shutdown_timeout: + storage_driver: "" + storage_opts: [] + tls: + verify: false + ca_cert: "" + cert: "" + key: "" + +docker_containerd: + socket: /run/containerd/containerd.sock + +docker_defaults_directory: /etc/default + +... diff --git a/roles/docker/vars/redhat.yaml b/roles/docker/vars/redhat.yaml new file mode 100644 index 0000000..11a7063 --- /dev/null +++ b/roles/docker/vars/redhat.yaml @@ -0,0 +1,13 @@ +--- + +docker_dependencies: + - iproute + - iptables + # - iptables-ebtables + - python3-docker + - python3-requests + - python3-urllib3 + +docker_defaults_python_packages: [] + +... diff --git a/roles/registry/.ansible-lint b/roles/registry/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/roles/registry/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/roles/registry/.editorconfig b/roles/registry/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/roles/registry/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/roles/registry/.flake8 b/roles/registry/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/roles/registry/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/roles/registry/.github/workflows/clean-workflows.yml b/roles/registry/.github/workflows/clean-workflows.yml new file mode 100644 index 0000000..6597352 --- /dev/null +++ b/roles/registry/.github/workflows/clean-workflows.yml @@ -0,0 +1,31 @@ +--- + +name: delete workflow runs + +on: + schedule: + - cron: "10 4 * * 0" + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + type: choice + options: + - info + - warning + - debug + +jobs: + delete-workflow-runs: + runs-on: ubuntu-latest + name: delete old workflow runs + steps: + - name: Delete workflow runs + uses: MajorScruffy/delete-old-workflow-runs@v0.3.0 + with: + repository: bodsch/ansible-registry + older-than-seconds: 2592000 # remove all workflow runs older than 30 day + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/roles/registry/.github/workflows/configured.yml b/roles/registry/.github/workflows/configured.yml new file mode 100644 index 0000000..07f8d16 --- /dev/null +++ b/roles/registry/.github/workflows/configured.yml @@ -0,0 +1,60 @@ +--- +name: registry with configuration + +on: + workflow_dispatch: + workflow_run: + workflows: + - "CI" + types: + - completed + +defaults: + run: + working-directory: 'ansible-registry' + +jobs: + deb: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - debian:12 + python_version: + - "3.11.3" + ansible-version: + - '6.7' + scenario: + - configured + + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-registry' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} diff --git a/roles/registry/.github/workflows/galaxy.yml b/roles/registry/.github/workflows/galaxy.yml new file mode 100644 index 0000000..906bb2c --- /dev/null +++ b/roles/registry/.github/workflows/galaxy.yml @@ -0,0 +1,30 @@ +--- + +name: push to ansible galaxy + +on: + workflow_dispatch: + workflow_run: + workflows: + - "CI" + branches: + - main + types: + - completed + +jobs: + galaxy: + name: galaxy + runs-on: ubuntu-20.04 + if: ${{ github.event.workflow_run.conclusion == 'success' }} + steps: + - name: Check out the codebase + uses: actions/checkout@v3 + with: + path: 'ansible-registry' + + - name: galaxy + uses: robertdebock/galaxy-action@1.2.1 + with: + galaxy_api_key: ${{ secrets.galaxy_api_key }} + git_branch: main diff --git a/roles/registry/.github/workflows/linter.yml b/roles/registry/.github/workflows/linter.yml new file mode 100644 index 0000000..3209ad2 --- /dev/null +++ b/roles/registry/.github/workflows/linter.yml @@ -0,0 +1,56 @@ +--- + +name: code linter + +on: + schedule: + - cron: "40 1 * * 0" + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + type: choice + options: + - info + - warning + - debug + push: + branches: + - 'main' + - 'feature/**' + - '!doc/**' + paths: + - "!Makefile" + - "!README.md" + - "tasks/**" + pull_request: + branches: + - 'main' + - 'feature/**' + - '!doc/**' + paths: + - "!Makefile" + - "!README.md" + - "tasks/**" + +jobs: + lint: + name: linting + runs-on: ubuntu-latest + steps: + - name: 🛎 Checkout + uses: actions/checkout@v3 + + - name: lint + uses: docker://ghcr.io/github/super-linter:slim-v4 + env: + DEFAULT_BRANCH: main + GITHUB_TOKEN: ${{ secrets.GH_REGISTRY_TOKEN }} + VALIDATE_ALL_CODEBASE: true + VALIDATE_ANSIBLE: true + # VALIDATE_MARKDOWN: true + VALIDATE_YAML: true + +... diff --git a/roles/registry/.github/workflows/main.yml b/roles/registry/.github/workflows/main.yml new file mode 100644 index 0000000..651d586 --- /dev/null +++ b/roles/registry/.github/workflows/main.yml @@ -0,0 +1,111 @@ +--- +name: CI + +on: + workflow_run: + workflows: + - "code linter" + types: + - completed + +defaults: + run: + working-directory: 'ansible-registry' + +jobs: + + arch: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - archlinux:latest + - artixlinux:latest + python_version: + - "3.10.11" + - "3.11.3" + ansible-version: + - '6.7' + scenario: + - default + + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-registry' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} + + deb: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - debian:11 + - debian:12 + - ubuntu:20.04 + - ubuntu:22.04 + python_version: + - "3.10.11" + - "3.11.3" + ansible-version: + - '6.7' + scenario: + - default + + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-registry' + ref: ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: "${{ matrix.python_version }}" + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} diff --git a/roles/registry/.gitignore b/roles/registry/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/roles/registry/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/roles/registry/.yamllint b/roles/registry/.yamllint new file mode 100644 index 0000000..20fd7aa --- /dev/null +++ b/roles/registry/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + +ignore: | + molecule/ + .github diff --git a/roles/registry/CONTRIBUTING.md b/roles/registry/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/roles/registry/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/roles/registry/LICENSE b/roles/registry/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/roles/registry/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/registry/Makefile b/roles/registry/Makefile new file mode 100644 index 0000000..3abaf48 --- /dev/null +++ b/roles/registry/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_6.1 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/roles/registry/README.md b/roles/registry/README.md new file mode 100644 index 0000000..b41b1b9 --- /dev/null +++ b/roles/registry/README.md @@ -0,0 +1,211 @@ + +# Ansible Role: `registry` + +Ansible role to install and configure docker [registry](https://github.com/distribution/distribution). + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-registry/main.yml?branch=main)][ci] +[![GitHub issues](https://img.shields.io/github/issues/bodsch/ansible-registry)][issues] +[![GitHub release (latest by date)](https://img.shields.io/github/v/release/bodsch/ansible-registry)][releases] +[![Ansible Quality Score](https://img.shields.io/ansible/quality/50067?label=role%20quality)][quality] + +[ci]: https://github.com/bodsch/ansible-registry/actions +[issues]: https://github.com/bodsch/ansible-registry/issues?q=is%3Aopen+is%3Aissue +[releases]: https://github.com/bodsch/ansible-registry/releases +[quality]: https://galaxy.ansible.com/bodsch/registry + +If `latest` is set for `registry_version`, the role tries to install the latest release version. +**Please use this with caution, as incompatibilities between releases may occur!** + +The binaries are installed below `/usr/local/bin/registry/${registry_version}` and later linked to `/usr/bin`. +This should make it possible to downgrade relatively safely. + +The downloaded archive is stored on the Ansible controller, unpacked and then the binaries are copied to the target system. +The cache directory can be defined via the environment variable `CUSTOM_LOCAL_TMP_DIRECTORY`. +By default it is `${HOME}/.cache/ansible/registry`. +If this type of installation is not desired, the download can take place directly on the target system. +However, this must be explicitly activated by setting `registry_direct_download` to `true`. + +## Requirements & Dependencies + +Ansible Collections + +- [bodsch.core](https://github.com/bodsch/ansible-collection-core) +- [bodsch.scm](https://github.com/bodsch/ansible-collection-scm) + +```bash +ansible-galaxy collection install bodsch.core +ansible-galaxy collection install bodsch.scm +``` +or +```bash +ansible-galaxy collection install --requirements-file collections.yml +``` + +## Operating systems + +Tested on + +* Arch Linux +* Debian based + - Debian 10 / 11 + - Ubuntu 20.10 + + +## Contribution + +Please read [Contribution](CONTRIBUTING.md) + +## Development, Branches (Git Tags) + +The `master` Branch is my *Working Horse* includes the "latest, hot shit" and can be complete broken! + +If you want to use something stable, please use a [Tagged Version](https://github.com/bodsch/ansible-registry/tags)! + +## Configuration + +```yaml +registry_version: 2.8.1 + +registry_release_download_url: https://github.com/distribution/distribution/releases + +registry_system_user: registry +registry_system_group: registry +registry_config_dir: /etc/docker/registry + +registry_direct_download: false + +registry_service: {} +registry_log: {} +registry_storage: {} +registry_auth: {} +registry_middleware: {} +registry_reporting: {} +registry_http: {} +registry_notifications: {} +registry_redis: {} +registry_health: {} +registry_proxy: {} +registry_compatibility: {} +registry_validation: {} +``` + +### `registry_log` + +[upstream doku](https://github.com/distribution/distribution/blob/main/docs/configuration.md#log) + +```yaml +registry_log: + accesslog: + disabled: true + level: info + formatter: text + fields: {} +``` + +### `registry_storage` + +[upstream doku](https://github.com/distribution/distribution/blob/main/docs/configuration.md#storage) +```yaml +registry_storage: + filesystem: + rootdirectory: /var/lib/registry + maxthreads: 100 + delete: + enabled: false + cache: + blobdescriptorsize: 10000 + +``` + +### `registry_auth` + +[upstream doku](https://github.com/distribution/distribution/blob/main/docs/configuration.md#auth) +```yaml +registry_auth: {} +``` + +### `registry_middleware` + +[upstream doku](https://github.com/distribution/distribution/blob/main/docs/configuration.md#middleware) +```yaml +registry_middleware: {} +``` + +### `registry_reporting` + +[upstream doku](https://github.com/distribution/distribution/blob/main/docs/configuration.md#reporting) +```yaml +registry_reporting: {} +``` + +### `registry_http` + +[upstream doku](https://github.com/distribution/distribution/blob/main/docs/configuration.md#http) +```yaml + +registry_http: + addr: localhost:5000 + secret: "{{ ansible_host | b64encode }}" + relativeurls: true + debug: + addr: localhost:5001 + prometheus: + enabled: true + path: /metrics +``` + +### `registry_notifications` + +[upstream doku](https://github.com/distribution/distribution/blob/main/docs/configuration.md#notifications) +```yaml +registry_notifications: {} +``` + +### `registry_redis` + +[upstream doku](https://github.com/distribution/distribution/blob/main/docs/configuration.md#redis) +```yaml +registry_redis: {} +``` + +### `registry_health` + +[upstream doku](https://github.com/distribution/distribution/blob/main/docs/configuration.md#health) +```yaml +registry_health: {} +``` + +### `registry_proxy` + +[upstream doku](https://github.com/distribution/distribution/blob/main/docs/configuration.md#proxy) + +```yaml +registry_proxy: {} +``` + +### `registry_compatibility` + +[upstream doku](https://github.com/distribution/distribution/blob/main/docs/configuration.md#compatibility) +```yaml +registry_compatibility: {} +``` + +### `registry_validation` + +[upstream doku](https://github.com/distribution/distribution/blob/main/docs/configuration.md#validation) +```yaml +registry_validation: {} +``` + + +--- + +## Author and License + +- Bodo Schulz + +## License + +[Apache](LICENSE) + +**FREE SOFTWARE, HELL YEAH!** diff --git a/roles/registry/collections.yml b/roles/registry/collections.yml new file mode 100644 index 0000000..3a866a7 --- /dev/null +++ b/roles/registry/collections.yml @@ -0,0 +1,5 @@ +--- + +collections: + - name: bodsch.core + - name: bodsch.scm diff --git a/roles/registry/defaults/main.yml b/roles/registry/defaults/main.yml new file mode 100644 index 0000000..aac55b9 --- /dev/null +++ b/roles/registry/defaults/main.yml @@ -0,0 +1,27 @@ +--- + +registry_version: 2.8.1 + +registry_release_download_url: https://github.com/distribution/distribution/releases + +registry_system_user: registry +registry_system_group: registry +registry_config_dir: /etc/docker/registry + +registry_direct_download: false + +registry_service: {} +registry_log: {} +registry_storage: {} +registry_auth: {} +registry_middleware: {} +registry_reporting: {} +registry_http: {} +registry_notifications: {} +registry_redis: {} +registry_health: {} +registry_proxy: {} +registry_compatibility: {} +registry_validation: {} + +... diff --git a/roles/registry/handlers/main.yml b/roles/registry/handlers/main.yml new file mode 100644 index 0000000..1d54c80 --- /dev/null +++ b/roles/registry/handlers/main.yml @@ -0,0 +1,26 @@ +--- + +- name: restart registry + become: true + ansible.builtin.service: + name: registry + state: restarted + +- name: reload registry + become: true + ansible.builtin.service: + name: registry + state: reloaded + +- name: validate config + ansible.builtin.command: /bin/true + +- name: daemon-reload + become: true + ansible.builtin.systemd: + daemon_reload: true + force: true + when: + - ansible_service_mgr | lower == "systemd" + +... diff --git a/roles/registry/hooks/converge b/roles/registry/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/roles/registry/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/roles/registry/hooks/destroy b/roles/registry/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/roles/registry/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/roles/registry/hooks/lint b/roles/registry/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/roles/registry/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/roles/registry/hooks/molecule.rc b/roles/registry/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/roles/registry/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/roles/registry/hooks/test b/roles/registry/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/roles/registry/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/roles/registry/hooks/tox.sh b/roles/registry/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/roles/registry/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/roles/registry/hooks/verify b/roles/registry/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/roles/registry/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/roles/registry/meta/main.yml b/roles/registry/meta/main.yml new file mode 100644 index 0000000..c1118bd --- /dev/null +++ b/roles/registry/meta/main.yml @@ -0,0 +1,27 @@ +--- + +galaxy_info: + role_name: registry + + author: Bodo Schulz + description: ansible role to setup "the" docker registry + license: Apache + min_ansible_version: "2.9" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + # 11 + - bullseye + - bookworm + + galaxy_tags: + - registry + - docker + +dependencies: [] + +... diff --git a/roles/registry/molecule/configured/converge.yml b/roles/registry/molecule/configured/converge.yml new file mode 100644 index 0000000..2400f1c --- /dev/null +++ b/roles/registry/molecule/configured/converge.yml @@ -0,0 +1,9 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + become: false + + roles: + - role: bodsch.docker.registry diff --git a/roles/registry/molecule/configured/group_vars/all/redis.yml b/roles/registry/molecule/configured/group_vars/all/redis.yml new file mode 100644 index 0000000..e0f81b3 --- /dev/null +++ b/roles/registry/molecule/configured/group_vars/all/redis.yml @@ -0,0 +1,5 @@ +--- + +redis_network_port: 6379 + +... diff --git a/roles/registry/molecule/configured/group_vars/all/vars.yml b/roles/registry/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..fb1b829 --- /dev/null +++ b/roles/registry/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,72 @@ +--- + +registry_log: + level: info + formatter: json + +registry_storage: + filesystem: + rootdirectory: /opt/registry + maxthreads: 100 + delete: + enabled: true +# redirect: +# disable: false + cache: + blobdescriptor: redis + blobdescriptorsize: 10000 + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 10m + dryrun: false + +registry_http: + addr: localhost:5000 + relativeurls: true + secret: ZRhgqhaAjdbuFXj2PLJTzYy5PrRsStNaeYWd9c3Ze3 + debug: + addr: localhost:5001 + prometheus: + enabled: true + path: /metrics + +registry_redis: + addr: localhost:6379 + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + tls: + enabled: false + +registry_notifications: + events: + includereferences: true + endpoints: + - name: alistener + disabled: false + url: http://127.0.0.1:8080/api/events + # headers: + headers: + Content-Type: + - application/json + Authorization: + - "Bearer 74RwH03rOPh8kTnIgcCqAWhgV3cGMAuz" + timeout: 1s + threshold: 10 + backoff: 10s + ignoredmediatypes: + - application/octet-stream + ignore: + mediatypes: + - application/octet-stream + actions: + - pull + +... diff --git a/roles/registry/molecule/configured/molecule.yml b/roles/registry/molecule/configured/molecule.yml new file mode 100644 index 0000000..b1af11d --- /dev/null +++ b/roles/registry/molecule/configured/molecule.yml @@ -0,0 +1,59 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: instance + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + published_ports: + - 80:80 + - 443:443 + - 5000:5000 + - 5001:5001 + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/registry/molecule/configured/prepare.yml b/roles/registry/molecule/configured/prepare.yml new file mode 100644 index 0000000..f8500aa --- /dev/null +++ b/roles/registry/molecule/configured/prepare.yml @@ -0,0 +1,47 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +- name: prepare container + hosts: instance + gather_facts: true + + roles: + - role: redis + +... diff --git a/roles/registry/molecule/configured/requirements.yml b/roles/registry/molecule/configured/requirements.yml new file mode 100644 index 0000000..3ccea38 --- /dev/null +++ b/roles/registry/molecule/configured/requirements.yml @@ -0,0 +1,6 @@ +--- + +- name: redis + src: bodsch.redis + +... diff --git a/roles/registry/molecule/configured/tests/test_default.py b/roles/registry/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..8f03dd1 --- /dev/null +++ b/roles/registry/molecule/configured/tests/test_default.py @@ -0,0 +1,186 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +import json +import pytest +import os + +import testinfra.utils.ansible_runner + +HOST = 'instance' + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("registry") + + +def test_directories(host, get_vars): + """ + """ + root_dir = get_vars.get("registry_storage", {}).get("filesystem", {}).get("rootdirectory", {}) + + directories = [] + directories.append("/etc/docker/registry") + + if root_dir: + directories.append(root_dir) + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + """ + distribution = host.system_info.distribution + release = host.system_info.release + + print(f"distribution: {distribution}") + print(f"release : {release}") + + version = local_facts(host).get("version") + + install_dir = get_vars.get("registry_install_path") + defaults_dir = get_vars.get("registry_defaults_directory") + config_dir = get_vars.get("registry_config_dir") + + if 'latest' in install_dir: + install_dir = install_dir.replace('latest', version) + + files = [] + files.append("/usr/bin/registry") + + if install_dir: + files.append(f"{install_dir}/registry") + if defaults_dir and not distribution == "artix": + files.append(f"{defaults_dir}/registry") + if config_dir: + files.append(f"{config_dir}/config.yml") + + print(files) + + for _file in files: + f = host.file(_file) + assert f.exists + assert f.is_file + + +def test_user(host, get_vars): + """ + """ + user = get_vars.get("registry_system_user", "registry") + group = get_vars.get("registry_system_group", "registry") + + assert host.group(group).exists + assert host.user(user).exists + assert group in host.user(user).groups + assert host.user(user).home == "/nonexistent" + + +def test_service(host, get_vars): + service = host.service("registry") + assert service.is_enabled + assert service.is_running + + +def test_open_port(host, get_vars): + """ + """ + listen_address = "127.0.0.1:5000" + + service = host.socket(f"tcp://{listen_address}") + assert service.is_listening diff --git a/roles/registry/molecule/default/converge.yml b/roles/registry/molecule/default/converge.yml new file mode 100644 index 0000000..2400f1c --- /dev/null +++ b/roles/registry/molecule/default/converge.yml @@ -0,0 +1,9 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + become: false + + roles: + - role: bodsch.docker.registry diff --git a/roles/registry/molecule/default/group_vars/all/vars.yml b/roles/registry/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/roles/registry/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/roles/registry/molecule/default/molecule.yml b/roles/registry/molecule/default/molecule.yml new file mode 100644 index 0000000..1da6797 --- /dev/null +++ b/roles/registry/molecule/default/molecule.yml @@ -0,0 +1,57 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: instance + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + published_ports: + - 5000:5000 + - 5001:5001 + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/registry/molecule/default/prepare.yml b/roles/registry/molecule/default/prepare.yml new file mode 100644 index 0000000..d095763 --- /dev/null +++ b/roles/registry/molecule/default/prepare.yml @@ -0,0 +1,40 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +... diff --git a/roles/registry/molecule/default/tests/test_default.py b/roles/registry/molecule/default/tests/test_default.py new file mode 100644 index 0000000..8f03dd1 --- /dev/null +++ b/roles/registry/molecule/default/tests/test_default.py @@ -0,0 +1,186 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +import json +import pytest +import os + +import testinfra.utils.ansible_runner + +HOST = 'instance' + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("registry") + + +def test_directories(host, get_vars): + """ + """ + root_dir = get_vars.get("registry_storage", {}).get("filesystem", {}).get("rootdirectory", {}) + + directories = [] + directories.append("/etc/docker/registry") + + if root_dir: + directories.append(root_dir) + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + """ + distribution = host.system_info.distribution + release = host.system_info.release + + print(f"distribution: {distribution}") + print(f"release : {release}") + + version = local_facts(host).get("version") + + install_dir = get_vars.get("registry_install_path") + defaults_dir = get_vars.get("registry_defaults_directory") + config_dir = get_vars.get("registry_config_dir") + + if 'latest' in install_dir: + install_dir = install_dir.replace('latest', version) + + files = [] + files.append("/usr/bin/registry") + + if install_dir: + files.append(f"{install_dir}/registry") + if defaults_dir and not distribution == "artix": + files.append(f"{defaults_dir}/registry") + if config_dir: + files.append(f"{config_dir}/config.yml") + + print(files) + + for _file in files: + f = host.file(_file) + assert f.exists + assert f.is_file + + +def test_user(host, get_vars): + """ + """ + user = get_vars.get("registry_system_user", "registry") + group = get_vars.get("registry_system_group", "registry") + + assert host.group(group).exists + assert host.user(user).exists + assert group in host.user(user).groups + assert host.user(user).home == "/nonexistent" + + +def test_service(host, get_vars): + service = host.service("registry") + assert service.is_enabled + assert service.is_running + + +def test_open_port(host, get_vars): + """ + """ + listen_address = "127.0.0.1:5000" + + service = host.socket(f"tcp://{listen_address}") + assert service.is_listening diff --git a/roles/registry/molecule/latest/converge.yml b/roles/registry/molecule/latest/converge.yml new file mode 100644 index 0000000..2400f1c --- /dev/null +++ b/roles/registry/molecule/latest/converge.yml @@ -0,0 +1,9 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + become: false + + roles: + - role: bodsch.docker.registry diff --git a/roles/registry/molecule/latest/group_vars/all/vars.yml b/roles/registry/molecule/latest/group_vars/all/vars.yml new file mode 100644 index 0000000..6c0c22d --- /dev/null +++ b/roles/registry/molecule/latest/group_vars/all/vars.yml @@ -0,0 +1,5 @@ +--- + +registry_version: latest + +... diff --git a/roles/registry/molecule/latest/molecule.yml b/roles/registry/molecule/latest/molecule.yml new file mode 100644 index 0000000..1da6797 --- /dev/null +++ b/roles/registry/molecule/latest/molecule.yml @@ -0,0 +1,57 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: instance + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + published_ports: + - 5000:5000 + - 5001:5001 + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/registry/molecule/latest/prepare.yml b/roles/registry/molecule/latest/prepare.yml new file mode 100644 index 0000000..d095763 --- /dev/null +++ b/roles/registry/molecule/latest/prepare.yml @@ -0,0 +1,40 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +... diff --git a/roles/registry/molecule/latest/tests/test_default.py b/roles/registry/molecule/latest/tests/test_default.py new file mode 100644 index 0000000..8f03dd1 --- /dev/null +++ b/roles/registry/molecule/latest/tests/test_default.py @@ -0,0 +1,186 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +import json +import pytest +import os + +import testinfra.utils.ansible_runner + +HOST = 'instance' + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("registry") + + +def test_directories(host, get_vars): + """ + """ + root_dir = get_vars.get("registry_storage", {}).get("filesystem", {}).get("rootdirectory", {}) + + directories = [] + directories.append("/etc/docker/registry") + + if root_dir: + directories.append(root_dir) + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + """ + distribution = host.system_info.distribution + release = host.system_info.release + + print(f"distribution: {distribution}") + print(f"release : {release}") + + version = local_facts(host).get("version") + + install_dir = get_vars.get("registry_install_path") + defaults_dir = get_vars.get("registry_defaults_directory") + config_dir = get_vars.get("registry_config_dir") + + if 'latest' in install_dir: + install_dir = install_dir.replace('latest', version) + + files = [] + files.append("/usr/bin/registry") + + if install_dir: + files.append(f"{install_dir}/registry") + if defaults_dir and not distribution == "artix": + files.append(f"{defaults_dir}/registry") + if config_dir: + files.append(f"{config_dir}/config.yml") + + print(files) + + for _file in files: + f = host.file(_file) + assert f.exists + assert f.is_file + + +def test_user(host, get_vars): + """ + """ + user = get_vars.get("registry_system_user", "registry") + group = get_vars.get("registry_system_group", "registry") + + assert host.group(group).exists + assert host.user(user).exists + assert group in host.user(user).groups + assert host.user(user).home == "/nonexistent" + + +def test_service(host, get_vars): + service = host.service("registry") + assert service.is_enabled + assert service.is_running + + +def test_open_port(host, get_vars): + """ + """ + listen_address = "127.0.0.1:5000" + + service = host.socket(f"tcp://{listen_address}") + assert service.is_listening diff --git a/roles/registry/tasks/configure.yml b/roles/registry/tasks/configure.yml new file mode 100644 index 0000000..644d6de --- /dev/null +++ b/roles/registry/tasks/configure.yml @@ -0,0 +1,26 @@ +--- + +- name: create registry configuration directory + ansible.builtin.file: + path: "{{ registry_config_dir }}" + state: directory + owner: root + group: "{{ registry_system_group }}" + mode: 0775 + +- name: create config.yml + ansible.builtin.template: + src: "registry/config.yml.j2" + dest: "{{ registry_config_dir }}/config.yml" + force: true + owner: root + group: "{{ registry_system_group }}" + mode: 0664 + notify: + - validate config + - reload registry + +- name: restart registry if needed + ansible.builtin.meta: flush_handlers + +... diff --git a/roles/registry/tasks/download.yml b/roles/registry/tasks/download.yml new file mode 100644 index 0000000..2ebc767 --- /dev/null +++ b/roles/registry/tasks/download.yml @@ -0,0 +1,52 @@ +--- + +- name: checksum + become: false + delegate_to: localhost + run_once: true + block: + - name: get checksum list + bodsch.scm.github_checksum: + project: distribution + repository: distribution + checksum_file: "registry_{{ registry_version }}_linux_{{ system_architecture }}.tar.gz.sha256" + user: "{{ lookup('env', 'GH_USER') | default(omit) }}" + password: "{{ lookup('env', 'GH_TOKEN') | default(omit) }}" + architecture: "{{ ansible_architecture }}" + system: "{{ ansible_facts.system }}" + version: "v{{ registry_version }}" + register: _latest_checksum + + - name: define checksum for {{ go_arch }} architecture + ansible.builtin.set_fact: + __registry_checksum: "{{ _latest_checksum.checksum }}" + when: + - _latest_checksum.rc == 0 + - _latest_checksum.checksum is defined + - _latest_checksum.checksum | string | length > 0 + +- name: download registry binary archive + become: false + delegate_to: "{{ registry_delegate_to }}" + ansible.builtin.get_url: + url: "{{ registry_release_download_url }}/download/v{{ registry_version }}/registry_{{ registry_version }}_{{ ansible_facts.system }}_{{ system_architecture }}.tar.gz" + dest: "{{ registry_local_tmp_directory }}/registry-{{ system_architecture }}.tar.gz" + checksum: "sha256:{{ __registry_checksum }}" + mode: 0660 + register: _download_archive + until: _download_archive is succeeded + retries: 5 + delay: 2 + check_mode: false + +- name: extract registry archive + become: false + delegate_to: "{{ registry_delegate_to }}" + run_once: "{{ 'false' if registry_direct_download else 'true' }}" + ansible.builtin.unarchive: + src: "{{ registry_local_tmp_directory }}/registry-{{ system_architecture }}.tar.gz" + dest: "{{ registry_local_tmp_directory }}" + copy: false + register: _extract_archive + +... diff --git a/roles/registry/tasks/install.yml b/roles/registry/tasks/install.yml new file mode 100644 index 0000000..69ce7cc --- /dev/null +++ b/roles/registry/tasks/install.yml @@ -0,0 +1,99 @@ +--- + +- name: detect binary file for registry on {{ registry_ui_delegate_to }} + become: false + delegate_to: "{{ registry_delegate_to }}" + ansible.builtin.stat: + path: "{{ registry_local_tmp_directory }}/registry" + register: stat_file_binary + +- name: propagate registry binaries + ansible.builtin.copy: + src: "{{ registry_local_tmp_directory }}/registry" + dest: "{{ registry_install_path }}/registry" + mode: 0755 + owner: "{{ registry_system_user }}" + group: "{{ registry_system_group }}" + remote_src: "{{ 'true' if registry_direct_download else 'false' }}" + when: + - stat_file_binary.stat.exists + +- name: make files executable + ansible.builtin.file: + path: "{{ registry_install_path }}/registry" + mode: 0755 + owner: "{{ registry_system_user }}" + group: "{{ registry_system_group }}" + +- name: create custom fact file + bodsch.core.facts: + name: registry + facts: + version: "{{ registry_version }}" + +- name: create registry data directory + ansible.builtin.file: + state: directory + path: "{{ registry_data_dir }}" + mode: 0770 + owner: "{{ registry_system_user }}" + group: "{{ registry_system_group }}" + +- name: create link to binary + ansible.builtin.file: + src: "{{ registry_install_path }}/registry" + dest: "/usr/bin/registry" + state: link + force: true + follow: false + notify: + - restart registry + +- name: systemd + when: + - ansible_service_mgr | lower == "systemd" + block: + - name: create systemd service unit + ansible.builtin.template: + src: "init/systemd/registry.service.j2" + dest: "{{ systemd_lib_directory }}/registry.service" + owner: root + group: root + mode: 0644 + notify: + - daemon-reload + - restart registry + + - name: create systemd service configuration + ansible.builtin.template: + src: "registry.j2" + dest: "{{ registry_defaults_directory }}/registry" + force: true + owner: root + group: "{{ registry_system_group }}" + mode: 0640 + notify: + - validate config + - reload registry + +- name: openrc + when: + - ansible_service_mgr | lower == "openrc" + block: + - name: create openrc service configuration + ansible.builtin.template: + src: "init/openrc/conf.d/registry.j2" + dest: "/etc/conf.d/registry" + owner: root + group: root + mode: 0644 + + - name: create openrc init file + ansible.builtin.template: + src: "init/openrc/init.d/registry.j2" + dest: "/etc/init.d/registry" + owner: root + group: root + mode: 0750 + +... diff --git a/roles/registry/tasks/main.yml b/roles/registry/tasks/main.yml new file mode 100644 index 0000000..4fc3dd6 --- /dev/null +++ b/roles/registry/tasks/main.yml @@ -0,0 +1,20 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yml + +- name: download + ansible.builtin.include_tasks: download.yml + when: + - not stat_registry_binary.stat.exists + +- name: install + ansible.builtin.include_tasks: install.yml + +- name: configure + ansible.builtin.include_tasks: configure.yml + +- name: service + ansible.builtin.include_tasks: service.yml + +... diff --git a/roles/registry/tasks/prepare.yml b/roles/registry/tasks/prepare.yml new file mode 100644 index 0000000..31b0a7b --- /dev/null +++ b/roles/registry/tasks/prepare.yml @@ -0,0 +1,163 @@ +--- + +- name: include OS specific configuration ({{ ansible_distribution }} ({{ ansible_os_family }}) {{ ansible_distribution_major_version }}) + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_distribution | lower }}-{{ ansible_service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_distribution | lower }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_os_family | lower }}.yml" + # artixlinux + - "{{ ansible_os_family | lower | replace(' ', '') }}.yml" + - default.yaml + skip: true + +- name: define delegate instance for download handling + ansible.builtin.set_fact: + registry_delegate_to: "{{ ansible_host }}" + registry_local_tmp_directory: "{{ + lookup('env', 'CUSTOM_LOCAL_TMP_DIRECTORY') | + default('/var/cache/ansible/registry', true) }}/{{ registry_version }}" + when: + - registry_direct_download + +- name: install dependency + ansible.builtin.package: + name: "{{ registry_requirements }}" + state: present + when: + - registry_requirements | default([]) | count > 0 + +- name: get latest release + delegate_to: localhost + become: false + run_once: true + when: + - registry_version == "latest" + block: + - name: get latest release + delegate_to: localhost + become: false + run_once: true + bodsch.scm.github_latest: + project: distribution + repository: distribution + user: "{{ lookup('env', 'GH_USER') | default(omit) }}" + password: "{{ lookup('env', 'GH_TOKEN') | default(omit) }}" + register: _latest_release + + - name: re-define registry_version + ansible.builtin.set_fact: + registry_version: "{{ _latest_release.latest_release }}" + +- name: detect our installation path + ansible.builtin.stat: + path: "{{ registry_install_path | dirname }}" + get_checksum: false + register: stat_registry_path + ignore_errors: true + +- name: old installation + when: + - stat_registry_path is defined + - stat_registry_path.stat is defined + - stat_registry_path.stat.isdir is defined + - not stat_registry_path.stat.isdir + block: + - name: stop old service + ansible.builtin.service: + name: registry + state: stopped + enabled: false + notify: + - daemon-reload + + - name: remove installed systemd unit + ansible.builtin.file: + state: absent + path: /etc/systemd/system/registry.service + when: + - ansible_service_mgr | lower == "systemd" + + - name: remove installed registry + ansible.builtin.file: + state: absent + path: "{{ stat_registry_path.stat.path }}" + +- name: flush handlers + ansible.builtin.meta: flush_handlers + +- name: detect installed registry binary + ansible.builtin.stat: + path: "{{ registry_install_path }}/registry" + register: stat_registry_binary + +- name: create download directory + become: false + delegate_to: "{{ registry_delegate_to }}" + ansible.builtin.file: + path: "{{ registry_local_tmp_directory }}" + state: directory + mode: 0750 + +- name: user and group handling + when: + - registry_system_user != "root" or registry_system_group != "root" + block: + - name: create registry group + ansible.builtin.group: + name: "{{ registry_system_group }}" + state: present + system: true + when: + - registry_system_group != "root" + + - name: create registry user + ansible.builtin.user: + name: "{{ registry_system_user }}" + groups: "{{ registry_system_group }}" + append: true + shell: /usr/sbin/nologin + system: true + createhome: false + home: /nonexistent + when: + - registry_system_user != "root" + +- name: create install directory + ansible.builtin.file: + path: "{{ registry_install_path }}" + state: directory + owner: "{{ registry_system_user }}" + group: "{{ registry_system_group }}" + mode: 0755 + +- name: merge registry configuration between defaults and custom + ansible.builtin.set_fact: + registry_service: "{{ registry_defaults_service | combine(registry_service, recursive=True) }}" + registry_log: "{{ registry_defaults_log | combine(registry_log, recursive=True) }}" + registry_storage: "{{ registry_defaults_storage | combine(registry_storage, recursive=True) }}" + registry_auth: "{{ registry_defaults_auth | combine(registry_auth, recursive=True) }}" + registry_middleware: "{{ registry_defaults_middleware | combine(registry_middleware, recursive=True) }}" + registry_reporting: "{{ registry_defaults_reporting | combine(registry_reporting, recursive=True) }}" + registry_http: "{{ registry_defaults_http | combine(registry_http, recursive=True) }}" + registry_notifications: "{{ registry_defaults_notifications | combine(registry_notifications, recursive=True) }}" + registry_redis: "{{ registry_defaults_redis | combine(registry_redis, recursive=True) }}" + registry_health: "{{ registry_defaults_health | combine(registry_health, recursive=True) }}" + registry_proxy: "{{ registry_defaults_proxy | combine(registry_proxy, recursive=True) }}" + registry_compatibility: "{{ registry_defaults_compatibility | combine(registry_compatibility, recursive=True) }}" + registry_validation: "{{ registry_defaults_validation | combine(registry_validation, recursive=True) }}" + +- name: define registry data directory + ansible.builtin.set_fact: + registry_data_dir: "{{ registry_storage.filesystem.rootdirectory | default('/var/lib/registry') }}" + +... diff --git a/roles/registry/tasks/service.yml b/roles/registry/tasks/service.yml new file mode 100644 index 0000000..ca44699 --- /dev/null +++ b/roles/registry/tasks/service.yml @@ -0,0 +1,10 @@ +--- + +- name: ensure registry is enabled on boot + become: true + ansible.builtin.service: + name: registry + enabled: true + state: started + +... diff --git a/roles/registry/templates/init/openrc/conf.d/registry.j2 b/roles/registry/templates/init/openrc/conf.d/registry.j2 new file mode 100644 index 0000000..32df4af --- /dev/null +++ b/roles/registry/templates/init/openrc/conf.d/registry.j2 @@ -0,0 +1,9 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +# GOMAXPROCS={{ ansible_processor_vcpus | default(ansible_processor_count) }} + +user="{{ registry_system_user }}" +group="{{ registry_system_group }}" + +command_args="serve /etc/docker/registry/config.yml" diff --git a/roles/registry/templates/init/openrc/init.d/registry.j2 b/roles/registry/templates/init/openrc/init.d/registry.j2 new file mode 100644 index 0000000..8c99cf2 --- /dev/null +++ b/roles/registry/templates/init/openrc/init.d/registry.j2 @@ -0,0 +1,41 @@ +#!/usr/bin/openrc-run + +description="registry, the Open Source Registry implementation for storing and distributing container images using the OCI Distribution Specification" +pidfile=${pidfile:-"/run/${RC_SVCNAME}.pid"} +user=${user:-${SVCNAME}} +group=${group:-${SVCNAME}} + +command="/usr/bin/registry" +supervisor="supervise-daemon" + +command_args="${command_args:-serve /etc/docker/registry/config.yml}" +command_user="${user}:${group}" + +command_args_background="--background" +required_files="/etc/docker/registry/config.yml" + +output_log="/var/log/${RC_SVCNAME}/${RC_SVCNAME}.log" +error_log="${output_log}" +extra_started_commands="reload" + +depend() { + # need localmount net + # use dns + after sysfs net +} + +start_pre() { + [ -d $(dirname ${output_log}) ] || mkdir $(dirname ${output_log}) + chown -R ${command_user} $(dirname ${output_log}) + chown -R ${command_user} {{ registry_data_dir }} +} + +reload() { + ebegin "Reloading ${SVCNAME}" + if [ ! -r "${pidfile}" ]; then + eend 1 "${RC_SVCNAME} not running" + else + kill -s HUP $(cat "${pidfile}") 2> /dev/null + eend $? + fi +} diff --git a/roles/registry/templates/init/systemd/registry.service.j2 b/roles/registry/templates/init/systemd/registry.service.j2 new file mode 100644 index 0000000..7cdab97 --- /dev/null +++ b/roles/registry/templates/init/systemd/registry.service.j2 @@ -0,0 +1,25 @@ +{{ ansible_managed | comment }} + +[Unit] +Description = registry, the Open Source Registry implementation for storing and distributing container images using the OCI Distribution Specification +After = network-online.target + +[Service] +Type = simple +Environment = "GOMAXPROCS={{ ansible_processor_vcpus | default(ansible_processor_count) }}" +EnvironmentFile = {{ registry_defaults_directory }}/registry + +User = {{ registry_system_user }} +Group = {{ registry_system_group }} + +ExecReload = /bin/kill -HUP $MAINPID +ExecStart = /usr/bin/registry $OPTIONS + +LimitNOFILE = 65000 +SyslogIdentifier = registry +Restart = on-failure +RestartSec = 30s +RestartSteps = 20 + +[Install] +WantedBy = multi-user.target diff --git a/roles/registry/templates/registry.j2 b/roles/registry/templates/registry.j2 new file mode 100644 index 0000000..a5e0835 --- /dev/null +++ b/roles/registry/templates/registry.j2 @@ -0,0 +1,25 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +OPTIONS="serve /etc/docker/registry/config.yml" + +{# +# registry --help +`registry` + +Usage: + registry [flags] + registry [command] + +Available Commands: + serve `serve` stores and distributes Docker images + garbage-collect `garbage-collect` deletes layers not referenced by any manifests + help Help about any command + +Flags: + -h, --help=false: help for registry + -v, --version=false: show the version and exit + + +Use "registry help [command]" for more information about a command. +#} diff --git a/roles/registry/templates/registry/config.yml.j2 b/roles/registry/templates/registry/config.yml.j2 new file mode 100644 index 0000000..be4f88f --- /dev/null +++ b/roles/registry/templates/registry/config.yml.j2 @@ -0,0 +1,225 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +--- +# {{ ansible_managed }} +version: 0.1 + +{# https://github.com/distribution/distribution/blob/main/docs/configuration.md#log #} +{%- if registry_log is defined and + registry_log | count > 0 %} +log: + {% if registry_log.accesslog is defined and + registry_log.accesslog | count > 0 %} + accesslog: + {% if registry_log.accesslog.disabled is defined %} + disabled: {{ registry_log.accesslog.disabled | bool | bodsch.core.config_bool(true_as='true', false_as='false') }} + {% endif %} + {% endif %} + {% if registry_log.level is defined and + registry_log.level | string | length > 0 %} + {% set _log_level = "info" %} + {% if registry_log.level in ["error", "warn", "info", "debug"] %} + {% set _log_level = registry_log.level %} + {% endif %} + level: {{ _log_level }} + {% endif %} + {% if registry_log.formatter is defined and + registry_log.formatter | string | length > 0 %} + {% set _log_formatter = "text" %} + {% if registry_log.formatter in ["text", "json", "logstash"] %} + {% set _log_formatter = registry_log.formatter %} + {% endif %} + formatter: {{ _log_formatter }} + {% endif %} + {% if registry_log.fields is defined and + registry_log.fields | count > 0 %} + fields: + {% for k, v in registry_log.fields.items() %} + {{ k }}: {{ v }} + {% endfor %} + {% endif %} +{% endif -%} + +{# https://github.com/distribution/distribution/blob/main/docs/configuration.md#storage #} +{%- if registry_storage is defined and + registry_storage | count > 0 %} +storage: + {% if registry_storage.filesystem is defined and + registry_storage.filesystem | count > 0 %} + filesystem: + {% if registry_storage.filesystem.rootdirectory is defined and + registry_storage.filesystem.rootdirectory | string | length > 0 %} + rootdirectory: {{ registry_storage.filesystem.rootdirectory }} + {% endif %} + {% endif %} + {% if registry_storage.delete is defined and + registry_storage.delete.enabled is defined and + registry_storage.delete.enabled %} + delete: + enabled: true + {% endif %} + {% if registry_storage.cache is defined and + registry_storage.cache | count > 0 %} + cache: + {% if registry_storage.cache.blobdescriptor is defined and + registry_storage.cache.blobdescriptor | string | length > 0 %} + {% set _cache_blobdescriptor = "inmemory" %} + {% if registry_storage.cache.blobdescriptor in ["redis", "inmemory"] %} + {% set _cache_blobdescriptor = registry_storage.cache.blobdescriptor %} + {% endif %} + blobdescriptor: {{ _cache_blobdescriptor }} + {% endif %} + {% if registry_storage.cache.blobdescriptorsize is defined and + registry_storage.cache.blobdescriptorsize | string | length > 0 %} + blobdescriptorsize: {{ registry_storage.cache.blobdescriptorsize }} + {% endif %} + {% endif %} + {% if registry_storage.maintenance is defined and + registry_storage.maintenance | count > 0 %} + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 24h + dryrun: false + readonly: + enabled: false + {% endif %} + {% if registry_storage.redirect is defined and + registry_storage.redirect.disable is defined %} + redirect: + disable: {{ registry_storage.redirect.disable | bool | bodsch.core.config_bool(true_as='true', false_as='false') }} + {% endif %} +{% endif -%} + +{# https://github.com/distribution/distribution/blob/main/docs/configuration.md#auth #} +{%- if registry_auth is defined and + registry_auth | count > 0 %} +auth: + {% if registry_auth.htpasswd is defined and + registry_auth.htpasswd | count > 0 %} + htpasswd: + {% if registry_auth.htpasswd.realm is defined and + registry_auth.htpasswd.realm | string | length > 0 %} + realm: {{ registry_auth.htpasswd.realm }} + {% endif %} + {% if registry_auth.htpasswd.path is defined and + registry_auth.htpasswd.path | string | length > 0 %} + path: {{ registry_auth.htpasswd.path }} + {% endif %} + {% endif %} +{% endif -%} + +{# https://github.com/distribution/distribution/blob/main/docs/configuration.md#middleware #} +{%- if registry_middleware is defined and registry_middleware | count > 0 %} +middleware: + {{ registry_middleware | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} +{% endif -%} + +{# https://github.com/distribution/distribution/blob/main/docs/configuration.md#reporting #} +{%- if registry_reporting is defined and registry_reporting | count > 0 %} +reporting: + {{ registry_reporting | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} +{% endif -%} + +{# https://github.com/distribution/distribution/blob/main/docs/configuration.md#http #} +{% if registry_http is defined and registry_http | count > 0 %} +http: + {{ registry_http | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} +{% endif %} + +{# https://github.com/distribution/distribution/blob/main/docs/configuration.md#notifications #} +{% if registry_notifications is defined %} +notifications: + {# registry_notifications | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) #} + {% if registry_notifications.events is defined and + registry_notifications.events.includereferences is defined and + registry_notifications.events.includereferences | string | length > 0 %} + events: + includereferences: {{ registry_notifications.events.includereferences | bool | bodsch.core.config_bool(true_as='true', false_as='false') }} + {% endif %} + {% if registry_notifications.endpoints is defined and + registry_notifications.endpoints | count > 0 %} + endpoints: + {% for e in registry_notifications.endpoints %} + - name: {{ e.name }} + {% if e.disabled is defined %} + disabled: {{ e.disabled | bool | bodsch.core.config_bool(true_as='true', false_as='false') }} + {% endif %} + {% if e.url is defined and e.url | string | length > 0 %} + url: {{ e.url }} + {% endif %} + {% if e.headers is defined and e.headers | count > 0 %} + headers: + {% for k, v in e.headers.items() %} + {{ k }}: + {% for xx in v %} + - {{ xx }} + {% endfor %} + {% endfor %} + {% endif %} + {% if e.timeout is defined and e.timeout | string | length > 0 %} + timeout: {{ e.timeout }} + {% endif %} + {% if e.threshold is defined and e.threshold | string | length > 0 %} + threshold: {{ e.threshold }} + {% endif %} + {% if e.backoff is defined and e.backoff | string | length > 0 %} + backoff: {{ e.backoff }} + {% endif %} + {% if e.ignoredmediatypes is defined and e.ignoredmediatypes | count > 0 %} + ignoredmediatypes: + {% for i in e.ignoredmediatypes %} + - {{ i }} + {% endfor %} + {% endif %} + {% if e.ignore is defined and e.ignore | count > 0 %} + ignore: + {% if e.ignore.mediatypes is defined and e.ignore.mediatypes | count > 0 %} + mediatypes: + {% for i in e.ignore.mediatypes %} + - {{ i }} + {% endfor %} + {% endif %} + {% if e.ignore.actions is defined and e.ignore.actions | count > 0 %} + actions: + {% for i in e.ignore.actions %} + - {{ i }} + {% endfor %} + {% endif %} + {% endif %} + {% endfor %} + {% endif %} +{% endif %} + +{# https://github.com/distribution/distribution/blob/main/docs/configuration.md#redis #} +{%- if registry_redis is defined and registry_redis | count > 0 %} +redis: + {{ registry_redis | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} +{% endif %} + +{# https://github.com/distribution/distribution/blob/main/docs/configuration.md#health #} +{%- if registry_health is defined and registry_health | count > 0 %} +health: + {{ registry_health | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} +{% endif -%} + +{# https://github.com/distribution/distribution/blob/main/docs/configuration.md#proxy #} +{%- if registry_proxy is defined and + registry_proxy | count > 0 %} +proxy: + {{ registry_proxy | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} +{% endif -%} + +{# https://github.com/distribution/distribution/blob/main/docs/configuration.md#compatibility #} +{%- if registry_compatibility is defined and + registry_compatibility | count > 0 %} +compatibility: + {{ registry_compatibility | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} +{% endif -%} + +{# https://github.com/distribution/distribution/blob/main/docs/configuration.md#validation #} +{%- if registry_validation is defined and + registry_validation | count > 0 %} +validation: + {{ registry_validation | to_nice_yaml(indent=2,sort_keys=False) | indent(2, False) }} +{% endif -%} diff --git a/roles/registry/test-requirements.txt b/roles/registry/test-requirements.txt new file mode 100644 index 0000000..267ec92 --- /dev/null +++ b/roles/registry/test-requirements.txt @@ -0,0 +1,12 @@ +ansible-lint +docker +dnspython +flake8 +molecule>=5.0.1 +molecule-plugins[docker] +netaddr +pytest +pytest-testinfra +tox +tox-gh-actions +yamllint diff --git a/roles/registry/tox.ini b/roles/registry/tox.ini new file mode 100644 index 0000000..a485358 --- /dev/null +++ b/roles/registry/tox.ini @@ -0,0 +1,35 @@ +[tox] +minversion = 3.25 +toxworkdir = /tmp/.tox/ + +envlist = ansible_{2.9,2.10,3.4,4.10,5.1,5.2,6.1} + +skipsdist = true + +[testenv] +passenv = * + +# allowlist_externals = +# /usr/bin/find +# /bin/sh +# rm + +deps = + -r test-requirements.txt + ansible_4.10: ansible>=4.10,<4.11 + ansible_5.1: ansible>=5.1,<5.2 + ansible_5.2: ansible>=5.2,<5.3 + ansible_5.10: ansible>=5.10,<5.11 + ansible_6.1: ansible>=6.1,<6.2 + ansible_6.7: ansible>=6.7,<6.8 + ansible_7.0: ansible>=7.0,<7.1 + ansible_7.5: ansible>=7.5,<7.6 + ansible_8.0: ansible>=8.0,<8.1 + ansible_8.5: ansible>=8.5,<8.6 + +#commands_pre = +# /usr/bin/find {toxinidir} -type f -not -path '{toxworkdir}/*' -path '*/__pycache__/*' -name '*.py[c|o]' -delete +# /bin/sh -c '/usr/bin/find {homedir}/.cache -type d -path "*/molecule_*" -exec rm -rfv \{\} +;' + +commands = + {posargs:molecule test --all --destroy always} diff --git a/roles/registry/vars/archlinux-openrc.yml b/roles/registry/vars/archlinux-openrc.yml new file mode 100644 index 0000000..86998d9 --- /dev/null +++ b/roles/registry/vars/archlinux-openrc.yml @@ -0,0 +1,6 @@ +--- + +registry_requirements: + - iproute + +... diff --git a/roles/registry/vars/archlinux.yml b/roles/registry/vars/archlinux.yml new file mode 100644 index 0000000..86998d9 --- /dev/null +++ b/roles/registry/vars/archlinux.yml @@ -0,0 +1,6 @@ +--- + +registry_requirements: + - iproute + +... diff --git a/roles/registry/vars/artixlinux.yml b/roles/registry/vars/artixlinux.yml new file mode 100644 index 0000000..86998d9 --- /dev/null +++ b/roles/registry/vars/artixlinux.yml @@ -0,0 +1,6 @@ +--- + +registry_requirements: + - iproute + +... diff --git a/roles/registry/vars/debian.yml b/roles/registry/vars/debian.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/roles/registry/vars/debian.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/roles/registry/vars/main.yml b/roles/registry/vars/main.yml new file mode 100644 index 0000000..935e86c --- /dev/null +++ b/roles/registry/vars/main.yml @@ -0,0 +1,295 @@ +--- + +registry_main_version: "{{ registry_version[0:3] }}" + +registry_install_path: /usr/local/bin/registry/{{ registry_version }} + +registry_delegate_to: localhost + +registry_local_tmp_directory: "{{ + lookup('env', 'CUSTOM_LOCAL_TMP_DIRECTORY') | + default(lookup('env', 'HOME') ~ '/.cache/ansible/registry', true) }}/{{ registry_version }}" + +registry_requirements: + - iproute2 + - gzip + +# ---------------------------------------------------------------------------------------- + +registry_defaults_service: {} + +# https://github.com/registry/registry/blob/master/v2/pkg/config/config.go + +registry_defaults_log: + accesslog: + disabled: true + level: info + formatter: text + fields: {} + # service: registry + # environment: staging + # hooks: + # - type: mail + # disabled: true + # levels: + # - panic + # options: + # smtp: + # addr: mail.example.com:25 + # username: mailuser + # password: password + # insecure: true + # from: sender@example.com + # to: + # - errors@example.com + +registry_defaults_storage: + filesystem: + rootdirectory: /var/lib/registry + maxthreads: 100 + # inmemory: # This driver takes no parameters + delete: + enabled: false + # redirect: + # disable: false + cache: + # blobdescriptor: redis + blobdescriptorsize: 10000 +# maintenance: +# uploadpurging: +# enabled: true +# age: 168h +# interval: 24h +# dryrun: false +# readonly: +# enabled: false + +# azure: +# accountname: accountname +# accountkey: base64encodedaccountkey +# container: containername +# gcs: +# bucket: bucketname +# keyfile: /path/to/keyfile +# credentials: +# type: service_account +# project_id: project_id_string +# private_key_id: private_key_id_string +# private_key: private_key_string +# client_email: client@example.com +# client_id: client_id_string +# auth_uri: http://example.com/auth_uri +# token_uri: http://example.com/token_uri +# auth_provider_x509_cert_url: http://example.com/provider_cert_url +# client_x509_cert_url: http://example.com/client_cert_url +# rootdirectory: /gcs/object/name/prefix +# chunksize: 5242880 +# s3: +# accesskey: awsaccesskey +# secretkey: awssecretkey +# region: us-west-1 +# regionendpoint: http://myobjects.local +# forcepathstyle: true +# accelerate: false +# bucket: bucketname +# encrypt: true +# keyid: mykeyid +# secure: true +# v4auth: true +# chunksize: 5242880 +# multipartcopychunksize: 33554432 +# multipartcopymaxconcurrency: 100 +# multipartcopythresholdsize: 33554432 +# rootdirectory: /s3/object/name/prefix +# usedualstack: false +# swift: +# username: username +# password: password +# authurl: https://storage.myprovider.com/auth/v1.0 or https://storage.myprovider.com/v2.0 or https://storage.myprovider.com/v3/auth +# tenant: tenantname +# tenantid: tenantid +# domain: domain name for Openstack Identity v3 API +# domainid: domain id for Openstack Identity v3 API +# insecureskipverify: true +# region: fr +# container: containername +# rootdirectory: /swift/object/name/prefix +# oss: +# accesskeyid: accesskeyid +# accesskeysecret: accesskeysecret +# region: OSS region name +# endpoint: optional endpoints +# internal: optional internal endpoint +# bucket: OSS bucket +# encrypt: optional enable server-side encryption +# encryptionkeyid: optional KMS key id for encryption +# secure: optional ssl setting +# chunksize: optional size valye +# rootdirectory: optional root directory + +registry_defaults_auth: {} +# silly: +# realm: silly-realm +# service: silly-service +# token: +# autoredirect: true +# realm: token-realm +# service: token-service +# issuer: registry-token-issuer +# rootcertbundle: /root/certs/bundle +# htpasswd: +# realm: basic-realm +# path: /path/to/htpasswd + +registry_defaults_middleware: {} +# registry: +# - name: ARegistryMiddleware +# options: +# foo: bar +# repository: +# - name: ARepositoryMiddleware +# options: +# foo: bar +# storage: +# - name: cloudfront +# options: +# baseurl: https://my.cloudfronted.domain.com/ +# privatekey: /path/to/pem +# keypairid: cloudfrontkeypairid +# duration: 3000s +# ipfilteredby: awsregion +# awsregion: us-east-1, use-east-2 +# updatefrequency: 12h +# iprangesurl: https://ip-ranges.amazonaws.com/ip-ranges.json +# storage: +# - name: redirect +# options: +# baseurl: https://example.com/ + +registry_defaults_reporting: {} +# bugsnag: +# apikey: bugsnagapikey +# releasestage: bugsnagreleasestage +# endpoint: bugsnagendpoint +# newrelic: +# licensekey: newreliclicensekey +# name: newrelicname +# verbose: true + +registry_defaults_http: + addr: localhost:5000 + # prefix: /my/nested/registry/ + # host: https://myregistryaddress.org:5000 + secret: "{{ ansible_host | b64encode }}" + relativeurls: true + ## draintimeout: 60s + ## tls: + ## certificate: /path/to/x509/public + ## key: /path/to/x509/private + ## clientcas: + ## - /path/to/ca.pem + ## - /path/to/another/ca.pem + ## letsencrypt: + ## cachefile: /path/to/cache-file + ## email: emailused@letsencrypt.com + ## hosts: [myregistryaddress.org] + debug: + addr: localhost:5001 + prometheus: + enabled: true + path: /metrics +# headers: +# X-Content-Type-Options: [nosniff] +# http2: +# disabled: false + +registry_defaults_notifications: {} +# events: +# includereferences: true +# endpoints: +# - name: alistener +# disabled: false +# url: https://my.listener.com/event +# headers: [] +# timeout: 1s +# threshold: 10 +# backoff: 1s +# ignoredmediatypes: +# - application/octet-stream +# ignore: +# mediatypes: +# - application/octet-stream +# actions: +# - pull + +registry_defaults_redis: {} +# addr: localhost:6379 +# password: asecret +# db: 0 +# dialtimeout: 10ms +# readtimeout: 10ms +# writetimeout: 10ms +# pool: +# maxidle: 16 +# maxactive: 64 +# idletimeout: 300s +# tls: +# enabled: false + +registry_defaults_health: + storagedriver: + enabled: true + interval: 10s + threshold: 3 +# file: +# - file: /path/to/checked/file +# interval: 10s +# http: +# - uri: http://server.to.check/must/return/200 +# headers: +# Authorization: [Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==] +# statuscode: 200 +# timeout: 3s +# interval: 10s +# threshold: 3 +# tcp: +# - addr: redis-server.domain.com:6379 +# timeout: 3s +# interval: 10s +# threshold: 3 + +registry_defaults_proxy: {} +# remoteurl: https://registry-1.docker.io +# username: [username] +# password: [password] + +registry_defaults_compatibility: {} +# schema1: +# signingkeyfile: /etc/registry/key.json +# enabled: true + +registry_defaults_validation: {} +# manifests: +# urls: +# allow: +# - ^https?://([^/]+\.)*example\.com/ +# deny: +# - ^https?://www\.example\.com/ + +# ---------------------------------------------------------------------------------------- + +registry_checksum_url: "{{ registry_release_download_url }}/download/v{{ registry_version }}/registry_{{ registry_version }}_linux_{{ system_architecture }}.tar.gz.sha256" + +registry_defaults_directory: /etc/default + +go_arch_map: + x86_64: 'amd64' + aarch64: 'arm64' + armv7l: 'armv7' + armv6l: 'armv6' + +system_architecture: "{{ go_arch_map[ansible_architecture] | default(ansible_architecture) }}" + +systemd_lib_directory: /lib/systemd/system + +... diff --git a/roles/registry_ui/.ansible-lint b/roles/registry_ui/.ansible-lint new file mode 100644 index 0000000..48763f0 --- /dev/null +++ b/roles/registry_ui/.ansible-lint @@ -0,0 +1,5 @@ +--- + +skip_list: + - name[casing] + - name[template] diff --git a/roles/registry_ui/.editorconfig b/roles/registry_ui/.editorconfig new file mode 100644 index 0000000..898cdbd --- /dev/null +++ b/roles/registry_ui/.editorconfig @@ -0,0 +1,23 @@ +# EditorConfig helps developers define and maintain consistent +# coding styles between different editors and IDEs +# https://editorconfig.org/ + + +root = true + +[*] +indent_style = space +indent_size = 2 + +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true +max_line_length = 100 + +[*.py] +indent_style = space +indent_size = 4 + +[*.md] +trim_trailing_whitespace = false diff --git a/roles/registry_ui/.flake8 b/roles/registry_ui/.flake8 new file mode 100644 index 0000000..1962f7e --- /dev/null +++ b/roles/registry_ui/.flake8 @@ -0,0 +1,19 @@ +[flake8] + +# E221 multiple spaces before operator +# E251 unexpected spaces around keyword / parameter equals + +ignore = E221,E251 + +exclude = + # No need to traverse our git directory + .git, + # There's no value in checking cache directories + __pycache__, + .tox + +# E203: https://github.com/python/black/issues/315 +# ignore = D,E741,W503,W504,H,E501,E203 + +max-line-length = 195 + diff --git a/roles/registry_ui/.github/workflows/clean-workflows.yml b/roles/registry_ui/.github/workflows/clean-workflows.yml new file mode 100644 index 0000000..d0a63a5 --- /dev/null +++ b/roles/registry_ui/.github/workflows/clean-workflows.yml @@ -0,0 +1,31 @@ +--- + +name: delete workflow runs + +on: + schedule: + - cron: "10 4 * * 0" + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + type: choice + options: + - info + - warning + - debug + +jobs: + delete-workflow-runs: + runs-on: ubuntu-latest + name: delete old workflow runs + steps: + - name: Delete workflow runs + uses: MajorScruffy/delete-old-workflow-runs@v0.3.0 + with: + repository: bodsch/ansible-registry-ui + older-than-seconds: 2592000 # remove all workflow runs older than 30 day + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/roles/registry_ui/.github/workflows/configured.yml b/roles/registry_ui/.github/workflows/configured.yml new file mode 100644 index 0000000..798c77d --- /dev/null +++ b/roles/registry_ui/.github/workflows/configured.yml @@ -0,0 +1,57 @@ +--- +name: registry-ui with configuration + +on: + workflow_dispatch: + workflow_run: + workflows: + - "CI" + types: + - completed + +defaults: + run: + working-directory: 'ansible-registry-ui' + +jobs: + deb: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - debian:11 + ansible-version: + - '6.1' + scenario: + - configured + + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-registry-ui' + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} diff --git a/roles/registry_ui/.github/workflows/galaxy.yml b/roles/registry_ui/.github/workflows/galaxy.yml new file mode 100644 index 0000000..7e9f5f9 --- /dev/null +++ b/roles/registry_ui/.github/workflows/galaxy.yml @@ -0,0 +1,30 @@ +--- + +name: push to ansible galaxy + +on: + workflow_dispatch: + workflow_run: + workflows: + - "CI" + branches: + - main + types: + - completed + +jobs: + galaxy: + name: galaxy + runs-on: ubuntu-20.04 + if: ${{ github.event.workflow_run.conclusion == 'success' }} + steps: + - name: Check out the codebase + uses: actions/checkout@v3 + with: + path: 'ansible-registry-ui' + + - name: galaxy + uses: robertdebock/galaxy-action@1.2.1 + with: + galaxy_api_key: ${{ secrets.galaxy_api_key }} + git_branch: main diff --git a/roles/registry_ui/.github/workflows/linter.yml b/roles/registry_ui/.github/workflows/linter.yml new file mode 100644 index 0000000..3209ad2 --- /dev/null +++ b/roles/registry_ui/.github/workflows/linter.yml @@ -0,0 +1,56 @@ +--- + +name: code linter + +on: + schedule: + - cron: "40 1 * * 0" + workflow_dispatch: + inputs: + logLevel: + description: 'Log level' + required: true + default: 'warning' + type: choice + options: + - info + - warning + - debug + push: + branches: + - 'main' + - 'feature/**' + - '!doc/**' + paths: + - "!Makefile" + - "!README.md" + - "tasks/**" + pull_request: + branches: + - 'main' + - 'feature/**' + - '!doc/**' + paths: + - "!Makefile" + - "!README.md" + - "tasks/**" + +jobs: + lint: + name: linting + runs-on: ubuntu-latest + steps: + - name: 🛎 Checkout + uses: actions/checkout@v3 + + - name: lint + uses: docker://ghcr.io/github/super-linter:slim-v4 + env: + DEFAULT_BRANCH: main + GITHUB_TOKEN: ${{ secrets.GH_REGISTRY_TOKEN }} + VALIDATE_ALL_CODEBASE: true + VALIDATE_ANSIBLE: true + # VALIDATE_MARKDOWN: true + VALIDATE_YAML: true + +... diff --git a/roles/registry_ui/.github/workflows/main.yml b/roles/registry_ui/.github/workflows/main.yml new file mode 100644 index 0000000..6a0ca8b --- /dev/null +++ b/roles/registry_ui/.github/workflows/main.yml @@ -0,0 +1,104 @@ +--- +name: CI + +on: + workflow_run: + workflows: + - "code linter" + types: + - completed + +defaults: + run: + working-directory: 'ansible-registry-ui' + +jobs: + + arch: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - archlinux:latest + - artixlinux:latest + ansible-version: + - '5.1' + - '6.1' + scenario: + - default + + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-registry-ui' + # ${{ github.event.workflow_run.head_branch }} + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} + + deb: + name: "${{ matrix.image }} / ansible: ${{ matrix.ansible-version }}" + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'schedule' || github.event.workflow_run.conclusion == 'success' }} + strategy: + fail-fast: false + matrix: + image: + - debian:11 + - ubuntu:20.04 + ansible-version: + - '5.1' + - '6.1' + scenario: + - default + + steps: + - name: check out the codebase. + uses: actions/checkout@v3 + with: + path: 'ansible-registry-ui' + + - name: 🐍 set up python + uses: actions/setup-python@v4 + with: + python-version: '3.x' + + - name: install dependencies + run: | + python -m pip install --upgrade pip + pip install -r test-requirements.txt + + - name: test with tox + run: | + make \ + test \ + -e TOX_SCENARIO="${{ matrix.scenario }}" \ + -e TOX_ANSIBLE="ansible_${{ matrix.ansible-version }}" \ + -e DISTRIBUTION="${{ matrix.image }}" + env: + PY_COLORS: '1' + ANSIBLE_FORCE_COLOR: '1' + DISTRIBUTION: ${{ matrix.image }} diff --git a/roles/registry_ui/.gitignore b/roles/registry_ui/.gitignore new file mode 100644 index 0000000..3d35e6a --- /dev/null +++ b/roles/registry_ui/.gitignore @@ -0,0 +1,6 @@ +.tox +.galaxy_install_info +*kate-swp +__pycache__ +.cache +.directory diff --git a/roles/registry_ui/.yamllint b/roles/registry_ui/.yamllint new file mode 100644 index 0000000..20fd7aa --- /dev/null +++ b/roles/registry_ui/.yamllint @@ -0,0 +1,40 @@ +--- +# Based on ansible-lint config +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + colons: + max-spaces-after: -1 + level: error + commas: + max-spaces-after: -1 + level: error + comments: disable + comments-indentation: disable + document-start: disable + empty-lines: + max: 3 + level: error + hyphens: + level: error + indentation: + spaces: 2 + key-duplicates: enable + line-length: + max: 195 + level: warning + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: disable + truthy: disable + +ignore: | + molecule/ + .github diff --git a/roles/registry_ui/CONTRIBUTING.md b/roles/registry_ui/CONTRIBUTING.md new file mode 100644 index 0000000..e3cd4cc --- /dev/null +++ b/roles/registry_ui/CONTRIBUTING.md @@ -0,0 +1,31 @@ +Contributing +============ +If you want to contribute to a project and make it better, your help is very welcome. +Contributing is also a great way to learn more about social coding on Github, new technologies and +and their ecosystems and how to make constructive, helpful bug reports, feature requests and the +noblest of all contributions: a good, clean pull request. + +### How to make a clean pull request + +Look for a project's contribution instructions. If there are any, follow them. + +- Create a personal fork of the project on Github. +- Clone the fork on your local machine. Your remote repo on Github is called `origin`. +- Add the original repository as a remote called `upstream`. +- If you created your fork a while ago be sure to pull upstream changes into your local repository. +- Create a new branch to work on! Branch from `develop` if it exists, else from `master`. +- Implement/fix your feature, comment your code. +- Follow the code style of the project, including indentation. +- If the project has tests run them! +- Write or adapt tests as needed. +- Add or change the documentation as needed. +- Squash your commits into a single commit. Create a new branch if necessary. +- Push your branch to your fork on Github, the remote `origin`. +- From your fork open a pull request in the correct branch. Target the project's `develop` branch if there is one, else go for `master`! +- If the maintainer requests further changes just push them to your branch. The PR will be updated automatically. +- Once the pull request is approved and merged you can pull the changes from `upstream` to your local repo and delete + your extra branch(es). + +And last but not least: Always write your commit messages in the present tense. +Your commit message should describe what the commit, when applied, does to the +code – not what you did to the code. diff --git a/roles/registry_ui/LICENSE b/roles/registry_ui/LICENSE new file mode 100644 index 0000000..8c8472f --- /dev/null +++ b/roles/registry_ui/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Bodo Schulz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/roles/registry_ui/Makefile b/roles/registry_ui/Makefile new file mode 100644 index 0000000..3abaf48 --- /dev/null +++ b/roles/registry_ui/Makefile @@ -0,0 +1,22 @@ +# +export TOX_SCENARIO ?= default +export TOX_ANSIBLE ?= ansible_6.1 + +.PHONY: converge destroy verify test lint + +default: converge + +converge: + @hooks/converge + +destroy: + @hooks/destroy + +verify: + @hooks/verify + +test: + @hooks/test + +lint: + @hooks/lint diff --git a/roles/registry_ui/README.md b/roles/registry_ui/README.md new file mode 100644 index 0000000..8fbb94c --- /dev/null +++ b/roles/registry_ui/README.md @@ -0,0 +1,206 @@ + +# Ansible Role: `registry-ui` + +Ansible role for installing and configuring Docker [registry-ui](https://github.com/Quiq/docker-registry-ui) +without dependencies on a container. +Natively supports systemd and openrc as init system. + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/bodsch/ansible-registry-ui/main.yml?branch=main)][ci] +[![GitHub issues](https://img.shields.io/github/issues/bodsch/ansible-registry-ui)][issues] +[![GitHub release (latest by date)](https://img.shields.io/github/v/release/bodsch/ansible-registry-ui)][releases] +[![Ansible Quality Score](https://img.shields.io/ansible/quality/50067?label=role%20quality)][quality] + +[ci]: https://github.com/bodsch/ansible-registry-ui/actions +[issues]: https://github.com/bodsch/ansible-registry-ui/issues?q=is%3Aopen+is%3Aissue +[releases]: https://github.com/bodsch/ansible-registry-ui/releases +[quality]: https://galaxy.ansible.com/bodsch/registry_ui + +If `latest` is set for `registry_ui_version`, the role tries to install the latest release version. +**Please use this with caution, as incompatibilities between releases may occur!** + +The binaries are installed below `/usr/local/bin/registry-ui/${registry_ui_version}` and later linked to `/usr/bin`. +This should make it possible to downgrade relatively safely. + +The downloaded archive is stored on the Ansible controller, unpacked and then the binaries are copied to the target system. +The cache directory can be defined via the environment variable `CUSTOM_LOCAL_TMP_DIRECTORY`. +By default it is `${HOME}/.cache/ansible/registry-ui`. +If this type of installation is not desired, the download can take place directly on the target system. +However, this must be explicitly activated by setting `registry_ui_direct_download` to `true`. + +## Requirements & Dependencies + +Ansible Collections + +- [bodsch.core](https://github.com/bodsch/ansible-collection-core) +- [bodsch.scm](https://github.com/bodsch/ansible-collection-scm) + +```bash +ansible-galaxy collection install bodsch.core +ansible-galaxy collection install bodsch.scm +``` +or +```bash +ansible-galaxy collection install --requirements-file collections.yml +``` + +## Operating systems + +Tested on + +* Arch Linux +* Debian based + - Debian 10 / 11 + - Ubuntu 20.10 + +## Requirements + +Running Docker Registry. + + +## Contribution + +Please read [Contribution](CONTRIBUTING.md) + +## Development, Branches (Git Tags) + +The `master` Branch is my *Working Horse* includes the "latest, hot shit" and can be complete broken! + +If you want to use something stable, please use a [Tagged Version](https://github.com/bodsch/ansible-registry-ui/tags)! + +## Configuration + +> **Please note:** The release of the registry-ui binary is done from a fork and not from the [original](https://github.com/Quiq/docker-registry-ui), because the original repository does not provide a go-binary yet! + + +```yaml +registry_ui_version: 0.9.5 + +registry_ui_release_download_url: https://github.com/bodsch/docker-registry-ui/releases + +registry_ui_system_user: registry-ui +registry_ui_system_group: registry-ui +registry_ui_config_dir: /etc/registry-ui +registry_ui_data_dir: /var/lib/registry-ui + +registry_ui_direct_download: false + +registry_ui_service: + log_level: info + +registry_ui_listen: + address: 127.0.0.1 + port: 8000 + +registry_ui_base_path: /ui + +registry_ui_debug: false + +registry_ui_registry: {} + +registry_ui_event: {} + +registry_ui_cache: {} + +registry_ui_admins: [] + +registry_ui_purge: {} +``` + +### `registry_ui_listen` + +Listen interface and Port + +```yaml +registry_ui_listen: + address: 127.0.0.1 + port: 8000 +``` + +### `registry_ui_registry` + +Registry URL with schema and port. + +Verify TLS certificate when using https. + +Docker registry credentials. +They need to have a full access to the registry. +If token authentication service is enabled, it will be auto-discovered and those credentials +will be used to obtain access tokens. +When the `password_file` entry is used, the password can be passed as a docker secret +and read from file. This overides the `password` entry. + +```yaml +registry_ui_registry: + url: https://docker-registry.local:5000 + verify_tls: true + username: "" + password: "" + password_file: "" +``` + +### `registry_ui_event` + +Event listener. + +The same one should be configured on Docker registry as Authorization Bearer token. + + +```yaml +registry_ui_event: + listener_token: "" # token + retention_days: 7 + database: + driver: sqlite3 # sqlite3 or mysql + location: "" # data/registry_events.db + username: + password: + hostname: 127.0.0.1:3306 + schemaname: docker_events + deletion_enabled: true + anyone_can_view: true +``` + +### `registry_ui_cache` + +```yaml +registry_ui_cache: + refresh_interval: 10 +``` + +### `registry_ui_admins` + +```yaml +registry_ui_admins: + anyone_can_delete: false + admins: [] +``` + +### `registry_ui_purge` + +Enable built-in cron to schedule purging tags in server mode. +Empty string disables this feature. +Example: `25 54 17 * * *` will run it at 17:54:25 daily. + +Note, the cron schedule format includes seconds! See [robfig/cron](https://godoc.org/github.com/robfig/cron) + +```yaml +registry_ui_purge: + tags_keep_days: 90 + tags_keep_count: 2 + tags_keep_regexp: '' + tags_keep_from_file: '' + tags_schedule: '' +``` + + +--- + +## Author and License + +- Bodo Schulz + +## License + +[Apache](LICENSE) + +**FREE SOFTWARE, HELL YEAH!** diff --git a/roles/registry_ui/collections.yml b/roles/registry_ui/collections.yml new file mode 100644 index 0000000..3a866a7 --- /dev/null +++ b/roles/registry_ui/collections.yml @@ -0,0 +1,5 @@ +--- + +collections: + - name: bodsch.core + - name: bodsch.scm diff --git a/roles/registry_ui/defaults/main.yml b/roles/registry_ui/defaults/main.yml new file mode 100644 index 0000000..266ad8f --- /dev/null +++ b/roles/registry_ui/defaults/main.yml @@ -0,0 +1,35 @@ +--- + +registry_ui_version: 0.9.5 + +registry_ui_release_download_url: https://github.com/bodsch/docker-registry-ui/releases + +registry_ui_system_user: registry-ui +registry_ui_system_group: registry-ui +registry_ui_config_dir: /etc/registry-ui +registry_ui_data_dir: /var/lib/registry-ui + +registry_ui_direct_download: false + +registry_ui_service: + log_level: info + +registry_ui_listen: + address: 127.0.0.1 + port: 8000 + +registry_ui_base_path: / + +registry_ui_debug: false + +registry_ui_registry: {} + +registry_ui_event: {} + +registry_ui_cache: {} + +registry_ui_admins: [] + +registry_ui_purge: {} + +... diff --git a/roles/registry_ui/handlers/main.yml b/roles/registry_ui/handlers/main.yml new file mode 100644 index 0000000..704f99b --- /dev/null +++ b/roles/registry_ui/handlers/main.yml @@ -0,0 +1,26 @@ +--- + +- name: restart registry-ui + become: true + ansible.builtin.service: + name: registry-ui + state: restarted + +- name: reload registry-ui + become: true + ansible.builtin.service: + name: registry-ui + state: reloaded + +- name: validate config + ansible.builtin.command: /bin/true + +- name: daemon-reload + become: true + ansible.builtin.systemd: + daemon_reload: true + force: true + when: + - ansible_service_mgr | lower == "systemd" + +... diff --git a/roles/registry_ui/hooks/converge b/roles/registry_ui/hooks/converge new file mode 100755 index 0000000..0c50932 --- /dev/null +++ b/roles/registry_ui/hooks/converge @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "converge" diff --git a/roles/registry_ui/hooks/destroy b/roles/registry_ui/hooks/destroy new file mode 100755 index 0000000..b4a3f8d --- /dev/null +++ b/roles/registry_ui/hooks/destroy @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "destroy" diff --git a/roles/registry_ui/hooks/lint b/roles/registry_ui/hooks/lint new file mode 100755 index 0000000..ef226a0 --- /dev/null +++ b/roles/registry_ui/hooks/lint @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "lint" diff --git a/roles/registry_ui/hooks/molecule.rc b/roles/registry_ui/hooks/molecule.rc new file mode 100644 index 0000000..78c8621 --- /dev/null +++ b/roles/registry_ui/hooks/molecule.rc @@ -0,0 +1,74 @@ + +TOX_ARGS= + +if [ -n "${TOX_SCENARIO}" ] +then + TOX_ARGS="--scenario-name ${TOX_SCENARIO}" +fi + +TOX_OPTS="-e ${TOX_ANSIBLE}" + +vercomp() { + + [[ $1 == $2 ]] && return 0 + v1=$(echo "$1" | sed -e 's|-|.|g') + v2=$(echo "$2" | sed -e 's|-|.|g') + + local IFS=. + local i ver1=($1) ver2=($2) + # fill empty fields in ver1 with zeros + for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)) + do + ver1[i]=0 + done + + for ((i=0; i<${#ver1[@]}; i++)) + do + if [[ -z ${ver2[i]} ]] + then + # fill empty fields in ver2 with zeros + ver2[i]=0 + fi + if ((10#${ver1[i]} > 10#${ver2[i]})) + then + return 1 + fi + if ((10#${ver1[i]} < 10#${ver2[i]})) + then + return 2 + fi + done + return 0 +} + +install_collection() { + local collection="${1}" + + echo "Install the required collection '${collection}'" + ansible-galaxy collection install ${collection} +} + +remove_collection() { + + local collection="${1}" + + namespace="$(echo "${collection}" | cut -d '.' -f1)" + name="$(echo "${collection}" | cut -d '.' -f2)" + + collection="${HOME}/.ansible/collections/ansible_collections/${namespace}/${name}" + + rm \ + --recursive \ + --force \ + "${collection}" +} + +publish() { + + TOKEN="${HOME}/.ansible/galaxy_token" + + if [ -e "${TOKEN}" ] + then + ansible-galaxy import --token=$(cat "${TOKEN}") bodsch # "???" + fi +} diff --git a/roles/registry_ui/hooks/test b/roles/registry_ui/hooks/test new file mode 100755 index 0000000..2869139 --- /dev/null +++ b/roles/registry_ui/hooks/test @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "test" diff --git a/roles/registry_ui/hooks/tox.sh b/roles/registry_ui/hooks/tox.sh new file mode 100755 index 0000000..c93de29 --- /dev/null +++ b/roles/registry_ui/hooks/tox.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. hooks/molecule.rc + +TOX_TEST="${1}" + +if [ -f "./collections.yml" ] +then + for collection in $(grep -v "#" collections.yml | grep "^ - name: " | awk -F ': ' '{print $2}') + do + collections_installed="$(ansible-galaxy collection list | grep ${collection} 2> /dev/null)" + + if [ -z "${collections_installed}" ] + then + install_collection ${collection} + else + collection_version=$(echo "${collections_installed}" | awk -F ' ' '{print $2}') + version="$(grep -v "#" collections.yml | grep -A1 "^ - name: ${collection}" | grep "version: " 2> /dev/null | awk -F ': ' '{print $2}' | sed -e 's|=||' -e 's|>||' -e 's|"||g')" + + echo "The required collection '${collection}' is installed in version ${collection_version}." + + if [ ! -z "${version}" ] + then + + vercomp "${version}" "${collection_version}" + + case $? in + 0) op='=' ;; + 1) op='>' ;; + 2) op='<' ;; + esac + + if [[ $op = "=" ]] || [[ $op = ">" ]] + then + # echo "FAIL: Expected '$3', Actual '$op', Arg1 '$1', Arg2 '$2'" + echo "re-install for version ${version}" + + remove_collection ${collection} + install_collection ${collection} + else + : + # echo "Pass: '$1 $op $2'" + fi + else + : + fi + fi + done + echo "" +fi + +tox ${TOX_OPTS} -- molecule ${TOX_TEST} ${TOX_ARGS} diff --git a/roles/registry_ui/hooks/verify b/roles/registry_ui/hooks/verify new file mode 100755 index 0000000..5f436af --- /dev/null +++ b/roles/registry_ui/hooks/verify @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +hooks/tox.sh "verify" diff --git a/roles/registry_ui/meta/main.yml b/roles/registry_ui/meta/main.yml new file mode 100644 index 0000000..8a80433 --- /dev/null +++ b/roles/registry_ui/meta/main.yml @@ -0,0 +1,27 @@ +--- + +galaxy_info: + role_name: registry_ui + + author: Bodo Schulz + description: ansible role to setup a Web UI for a docker registry + license: Apache + min_ansible_version: "2.10" + + platforms: + - name: ArchLinux + - name: Debian + versions: + # 10 + - buster + # 11 + - bullseye + + galaxy_tags: + - registry + - web + - webui + +dependencies: [] + +... diff --git a/roles/registry_ui/molecule/configured/converge.yml b/roles/registry_ui/molecule/configured/converge.yml new file mode 100644 index 0000000..5190178 --- /dev/null +++ b/roles/registry_ui/molecule/configured/converge.yml @@ -0,0 +1,11 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + become: false + + roles: + - role: bodsch.docker.registry_ui + +... diff --git a/roles/registry_ui/molecule/configured/group_vars/all/htpasswd.yml b/roles/registry_ui/molecule/configured/group_vars/all/htpasswd.yml new file mode 100644 index 0000000..6a852ef --- /dev/null +++ b/roles/registry_ui/molecule/configured/group_vars/all/htpasswd.yml @@ -0,0 +1,21 @@ +--- + +htpasswd_credentials_path: /etc/nginx/htpasswd.d + +htpasswd_credentials: + + - path: "{{ htpasswd_credentials_path }}/.admin-passwdfile" + users: + - username: admin + password: ZRhgqhaAjdbuFXj2PLJTzYy5PrRsStNaeYWd9c3Ze3 + - username: registry + password: registry + + - path: /etc/docker/registry/.registry-passwdfile + mode: "u=rw,g=r,o-r" + group: registry + users: + - username: registry + password: registry + crypt_scheme: md5_crypt +... diff --git a/roles/registry_ui/molecule/configured/group_vars/all/nginx.yml b/roles/registry_ui/molecule/configured/group_vars/all/nginx.yml new file mode 100644 index 0000000..aa1c45c --- /dev/null +++ b/roles/registry_ui/molecule/configured/group_vars/all/nginx.yml @@ -0,0 +1,136 @@ +--- + +nginx_events: + multi_accept: true + +nginx_gzip: + enabled: true + +nginx_logformat: + json_combined: + format: | + '{' + '"time_local": "$time_local",' + '"remote_addr": "$remote_addr",' + '"remote_user": "$remote_user",' + '"request": "$request",' + '"status": "$status",' + '"body_bytes_sent": "$body_bytes_sent",' + '"request_time": "$request_time",' + '"http_referrer": "$http_referer",' + '"http_user_agent": "$http_user_agent"' + '}'; + # escape: json + +# The client_max_body_size parameter is now set to 16384m, making the maximum upload size equal to 16GB. +nginx_http: + rewrite_log: true + client_max_body_size: 16384m + + extra_options: | + ## Set a variable to help us decide if we need to add the + ## 'Docker-Distribution-Api-Version' header. + ## The registry always sets this header. + ## In the case of nginx performing auth, the header is unset + ## since nginx is auth-ing before proxying. + map $upstream_http_docker_distribution_api_version $docker_distribution_api_version { + '' 'registry/2.0'; + } + + +nginx_custom_includes: + registry.conf: | + # https://nginx.org/en/docs/http/ngx_http_core_module.html#satisfy + satisfy any; + # Die IP des HAProxy + # damit ist es möglich das Jobs, die über den Jenkins aufgerufen werden und gegen + # die admin URLs (https://admin.*.DOMAIN.TLD/admin/*) gehen + # ohne Username/Passwort zu nutzen + # Alle anderen Requests benötigen weiterhin die Authentifizierung via BA + allow 192.168.0.0/24; + deny all; + + auth_basic "Administrator’s Area"; + auth_basic_user_file "{{ htpasswd_credentials_path }}/.admin-passwdfile"; + + +nginx_vhosts: + - name: registry + filename: 00-registry.conf + state: present # default: present + enabled: true # default: true + + domains: + - registry.molecule.lan + - molecule.molecule.lan + + # creates also an HTTP vhost only for redirect to HTTPS + #redirect: + # from_port: 80 + + listen: + - "80" + # - "443 ssl http2" + + upstreams: + - name: registry + servers: + - 127.0.0.1:5000 + - name: registry_ui_metrics + servers: + - 127.0.0.1:5001 + - name: registry_ui_ui + servers: + - 127.0.0.1:8000 + + logfiles: + access: + file: /var/log/nginx/registry.molecule.lan/access.log + # loglevel: json_combined + error: + file: /var/log/nginx/registry.molecule.lan/error.log + loglevel: notice + + # # enable ssl + # ssl: + # enabled: false + # certificate: /etc/snakeoil/matrix.lan/matrix.lan.crt + # certificate_key: /etc/snakeoil/matrix.lan/matrix.lan.key + # dhparam: /etc/snakeoil/matrix.lan/dh.pem + + locations: + "/metrics": + options: | + add_header X-Backend "registry-metrics"; + + proxy_pass http://registry_ui_metrics; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + "/": + options: | + add_header X-Backend "registry-ui"; + + proxy_pass http://registry_ui_ui; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + "/v2/": + options: | + if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { + return 404; + } + + add_header X-Backend "registry"; + add_header 'Docker-Distribution-Api-Version' $docker_distribution_api_version always; + + proxy_pass http://registry; + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_read_timeout 900; diff --git a/roles/registry_ui/molecule/configured/group_vars/all/redis.yml b/roles/registry_ui/molecule/configured/group_vars/all/redis.yml new file mode 100644 index 0000000..e0f81b3 --- /dev/null +++ b/roles/registry_ui/molecule/configured/group_vars/all/redis.yml @@ -0,0 +1,5 @@ +--- + +redis_network_port: 6379 + +... diff --git a/roles/registry_ui/molecule/configured/group_vars/all/registry.yml b/roles/registry_ui/molecule/configured/group_vars/all/registry.yml new file mode 100644 index 0000000..8c5c46f --- /dev/null +++ b/roles/registry_ui/molecule/configured/group_vars/all/registry.yml @@ -0,0 +1,72 @@ +--- + +registry_log: + level: info + formatter: json + +registry_storage: + filesystem: + # rootdirectory: /opt/registry + maxthreads: 100 + delete: + enabled: true +# redirect: +# disable: false + cache: + blobdescriptor: redis + blobdescriptorsize: 10000 + maintenance: + uploadpurging: + enabled: true + age: 168h + interval: 10m + dryrun: false + +registry_http: + addr: localhost:5000 + relativeurls: true + secret: ZRhgqhaAjdbuFXj2PLJTzYy5PrRsStNaeYWd9c3Ze3 + debug: + addr: localhost:5001 + prometheus: + enabled: true + path: /metrics + +registry_redis: + addr: localhost:6379 + db: 0 + dialtimeout: 10ms + readtimeout: 10ms + writetimeout: 10ms + pool: + maxidle: 16 + maxactive: 64 + idletimeout: 300s + tls: + enabled: false + +registry_notifications: + events: + includereferences: true + endpoints: + - name: registry_ui + disabled: false + url: http://127.0.0.1:8000/api/events + # headers: + headers: + Content-Type: + - application/json + Authorization: + - "Bearer 74RwH03rOPh8kTnIgcCqAWhgV3cGMAuz" + timeout: 1s + threshold: 10 + backoff: 10s + ignoredmediatypes: + - application/octet-stream + ignore: + mediatypes: + - application/octet-stream + actions: + - pull + +... diff --git a/roles/registry_ui/molecule/configured/group_vars/all/snakeoil.yml b/roles/registry_ui/molecule/configured/group_vars/all/snakeoil.yml new file mode 100644 index 0000000..81457ec --- /dev/null +++ b/roles/registry_ui/molecule/configured/group_vars/all/snakeoil.yml @@ -0,0 +1,16 @@ +--- + +snakeoil_extract_to: /etc/snakeoil + +# snakeoil_force: true + +snakeoil_domain: matrix.lan + +snakeoil_life_time: 30 + +snakeoil_alt_names: + - dns: + - registry.matrix.lan + - molecule.matrix.lan + +... diff --git a/roles/registry_ui/molecule/configured/group_vars/all/vars.yml b/roles/registry_ui/molecule/configured/group_vars/all/vars.yml new file mode 100644 index 0000000..d175623 --- /dev/null +++ b/roles/registry_ui/molecule/configured/group_vars/all/vars.yml @@ -0,0 +1,29 @@ +--- + +registry_ui_service: + log_level: debug + +registry_ui_debug: true + +registry_ui_base_path: / + +registry_ui_listen: + address: 127.0.0.1 + port: 8000 + +registry_ui_registry: + url: http://127.0.0.1:5000 + verify_tls: false + +registry_ui_delete: + anyone_can_delete: true + +registry_ui_event: + deletion_enabled: true + anyone_can_view: true + token: 74RwH03rOPh8kTnIgcCqAWhgV3cGMAuz + +registry_ui_cache: + refresh_interval: 10 + +... diff --git a/roles/registry_ui/molecule/configured/molecule.yml b/roles/registry_ui/molecule/configured/molecule.yml new file mode 100644 index 0000000..0dac280 --- /dev/null +++ b/roles/registry_ui/molecule/configured/molecule.yml @@ -0,0 +1,61 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: instance + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + published_ports: + - 80:80 + - 443:443 + - 5000:5000 + - 5001:5001 + - 8080:8080 + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + # - lint + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/registry_ui/molecule/configured/prepare.yml b/roles/registry_ui/molecule/configured/prepare.yml new file mode 100644 index 0000000..9e09376 --- /dev/null +++ b/roles/registry_ui/molecule/configured/prepare.yml @@ -0,0 +1,51 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +- name: prepare container + hosts: all + gather_facts: true + + roles: + - role: syslog-ng + - role: snakeoil + - role: redis + - role: nginx + - role: bodsch.docker.registry + +... diff --git a/roles/registry_ui/molecule/configured/requirements.yml b/roles/registry_ui/molecule/configured/requirements.yml new file mode 100644 index 0000000..3876bf5 --- /dev/null +++ b/roles/registry_ui/molecule/configured/requirements.yml @@ -0,0 +1,15 @@ +--- + +- name: snakeoil + src: bodsch.snakeoil + +- name: syslog-ng + src: bodsch.syslog_ng + +- name: nginx + src: bodsch.nginx + +- name: redis + src: bodsch.redis + +... diff --git a/roles/registry_ui/molecule/configured/tests/test_default.py b/roles/registry_ui/molecule/configured/tests/test_default.py new file mode 100644 index 0000000..ce4689c --- /dev/null +++ b/roles/registry_ui/molecule/configured/tests/test_default.py @@ -0,0 +1,188 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +import json +import pytest +import os + +import testinfra.utils.ansible_runner + +HOST = 'instance' + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("registry_ui") + + +def test_directories(host, get_vars): + """ + """ + directories = [] + directories.append("/etc/registry-ui") + directories.append("/var/lib/registry-ui") + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + """ + distribution = host.system_info.distribution + release = host.system_info.release + + print(f"distribution: {distribution}") + print(f"release : {release}") + + version = local_facts(host).get("version") + + install_dir = get_vars.get("registry_ui_install_path") + defaults_dir = get_vars.get("registry_ui_defaults_directory") + config_dir = get_vars.get("registry_ui_config_dir") + + if 'latest' in install_dir: + install_dir = install_dir.replace('latest', version) + + files = [] + files.append("/usr/bin/registry-ui") + + if install_dir: + files.append(f"{install_dir}/registry-ui") + if defaults_dir and not distribution == "artix": + files.append(f"{defaults_dir}/registry-ui") + if config_dir: + files.append(f"{config_dir}/config.yml") + + print(files) + + for _file in files: + f = host.file(_file) + assert f.exists + assert f.is_file + + +def test_user(host, get_vars): + """ + """ + user = get_vars.get("registry_ui_system_user", "registry") + group = get_vars.get("registry_ui_system_group", "registry") + + assert host.group(group).exists + assert host.user(user).exists + assert group in host.user(user).groups + assert host.user(user).home == "/var/lib/registry-ui" + + +def test_service(host, get_vars): + """ + """ + service = host.service("registry-ui") + assert service.is_enabled + assert service.is_running + + +def test_open_port(host, get_vars): + """ + """ + listen = get_vars.get("registry_ui_listen", None) + + if listen: + address = listen.get("address", "127.0.0.1") + port = listen.get("port", "8080") + + service = host.socket(f"tcp://{address}:{port}") + assert service.is_listening diff --git a/roles/registry_ui/molecule/default/converge.yml b/roles/registry_ui/molecule/default/converge.yml new file mode 100644 index 0000000..5190178 --- /dev/null +++ b/roles/registry_ui/molecule/default/converge.yml @@ -0,0 +1,11 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + become: false + + roles: + - role: bodsch.docker.registry_ui + +... diff --git a/roles/registry_ui/molecule/default/group_vars/all/vars.yml b/roles/registry_ui/molecule/default/group_vars/all/vars.yml new file mode 100644 index 0000000..e3e210e --- /dev/null +++ b/roles/registry_ui/molecule/default/group_vars/all/vars.yml @@ -0,0 +1,14 @@ +--- + +registry_ui_service: + log_level: info + +registry_ui_listen: + address: 0.0.0.0 + port: 8080 + +registry_ui_registry: + url: http://127.0.0.1:5000 + verify_tls: false + +... diff --git a/roles/registry_ui/molecule/default/molecule.yml b/roles/registry_ui/molecule/default/molecule.yml new file mode 100644 index 0000000..0dac280 --- /dev/null +++ b/roles/registry_ui/molecule/default/molecule.yml @@ -0,0 +1,61 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: instance + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + published_ports: + - 80:80 + - 443:443 + - 5000:5000 + - 5001:5001 + - 8080:8080 + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + # - lint + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/registry_ui/molecule/default/prepare.yml b/roles/registry_ui/molecule/default/prepare.yml new file mode 100644 index 0000000..4040d4c --- /dev/null +++ b/roles/registry_ui/molecule/default/prepare.yml @@ -0,0 +1,48 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +- name: prepare container + hosts: all + gather_facts: true + + roles: + - role: syslog-ng + - role: bodsch.docker.registry + +... diff --git a/roles/registry_ui/molecule/default/requirements.yml b/roles/registry_ui/molecule/default/requirements.yml new file mode 100644 index 0000000..ecf8dce --- /dev/null +++ b/roles/registry_ui/molecule/default/requirements.yml @@ -0,0 +1,6 @@ +--- + +- name: syslog-ng + src: bodsch.syslog_ng + +... diff --git a/roles/registry_ui/molecule/default/tests/test_default.py b/roles/registry_ui/molecule/default/tests/test_default.py new file mode 100644 index 0000000..ce4689c --- /dev/null +++ b/roles/registry_ui/molecule/default/tests/test_default.py @@ -0,0 +1,188 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +import json +import pytest +import os + +import testinfra.utils.ansible_runner + +HOST = 'instance' + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("registry_ui") + + +def test_directories(host, get_vars): + """ + """ + directories = [] + directories.append("/etc/registry-ui") + directories.append("/var/lib/registry-ui") + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + """ + distribution = host.system_info.distribution + release = host.system_info.release + + print(f"distribution: {distribution}") + print(f"release : {release}") + + version = local_facts(host).get("version") + + install_dir = get_vars.get("registry_ui_install_path") + defaults_dir = get_vars.get("registry_ui_defaults_directory") + config_dir = get_vars.get("registry_ui_config_dir") + + if 'latest' in install_dir: + install_dir = install_dir.replace('latest', version) + + files = [] + files.append("/usr/bin/registry-ui") + + if install_dir: + files.append(f"{install_dir}/registry-ui") + if defaults_dir and not distribution == "artix": + files.append(f"{defaults_dir}/registry-ui") + if config_dir: + files.append(f"{config_dir}/config.yml") + + print(files) + + for _file in files: + f = host.file(_file) + assert f.exists + assert f.is_file + + +def test_user(host, get_vars): + """ + """ + user = get_vars.get("registry_ui_system_user", "registry") + group = get_vars.get("registry_ui_system_group", "registry") + + assert host.group(group).exists + assert host.user(user).exists + assert group in host.user(user).groups + assert host.user(user).home == "/var/lib/registry-ui" + + +def test_service(host, get_vars): + """ + """ + service = host.service("registry-ui") + assert service.is_enabled + assert service.is_running + + +def test_open_port(host, get_vars): + """ + """ + listen = get_vars.get("registry_ui_listen", None) + + if listen: + address = listen.get("address", "127.0.0.1") + port = listen.get("port", "8080") + + service = host.socket(f"tcp://{address}:{port}") + assert service.is_listening diff --git a/roles/registry_ui/molecule/latest/converge.yml b/roles/registry_ui/molecule/latest/converge.yml new file mode 100644 index 0000000..5190178 --- /dev/null +++ b/roles/registry_ui/molecule/latest/converge.yml @@ -0,0 +1,11 @@ +--- + +- name: converge + hosts: instance + any_errors_fatal: false + become: false + + roles: + - role: bodsch.docker.registry_ui + +... diff --git a/roles/registry_ui/molecule/latest/group_vars/all/vars.yml b/roles/registry_ui/molecule/latest/group_vars/all/vars.yml new file mode 100644 index 0000000..58323f8 --- /dev/null +++ b/roles/registry_ui/molecule/latest/group_vars/all/vars.yml @@ -0,0 +1,16 @@ +--- + +registry_ui_version: latest + +registry_ui_service: + log_level: info + +registry_ui_listen: + address: 0.0.0.0 + port: 8080 + +registry_ui_registry: + url: http://127.0.0.1:5000 + verify_tls: false + +... diff --git a/roles/registry_ui/molecule/latest/molecule.yml b/roles/registry_ui/molecule/latest/molecule.yml new file mode 100644 index 0000000..0dac280 --- /dev/null +++ b/roles/registry_ui/molecule/latest/molecule.yml @@ -0,0 +1,61 @@ +--- + +role_name_check: 1 + +dependency: + name: galaxy + +driver: + name: docker + +lint: | + set -e + yamllint . + ansible-lint . + flake8 . + +platforms: + - name: instance + image: "bodsch/ansible-${DISTRIBUTION:-debian:12}" + command: ${MOLECULE_DOCKER_COMMAND:-""} + docker_host: "${DOCKER_HOST:-unix://run/docker.sock}" + privileged: true + pre_build_image: true + published_ports: + - 80:80 + - 443:443 + - 5000:5000 + - 5001:5001 + - 8080:8080 + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + +provisioner: + name: ansible + ansible_args: + - --diff + - -v + config_options: + defaults: + deprecation_warnings: true + stdout_callback: yaml + callbacks_enabled: profile_tasks + gathering: smart + fact_caching: jsonfile + fact_caching_timeout: 8640 + fact_caching_connection: ansible_facts + +scenario: + test_sequence: + # - lint + - destroy + - dependency + - syntax + - create + - prepare + - converge + - verify + - destroy + +verifier: + name: testinfra diff --git a/roles/registry_ui/molecule/latest/prepare.yml b/roles/registry_ui/molecule/latest/prepare.yml new file mode 100644 index 0000000..4040d4c --- /dev/null +++ b/roles/registry_ui/molecule/latest/prepare.yml @@ -0,0 +1,48 @@ +--- + +- name: information + hosts: all + gather_facts: true + + pre_tasks: + - name: arch- / artixlinux + when: + - ansible_distribution | lower == 'archlinux' or + ansible_os_family | lower == 'artix linux' + block: + - name: update pacman system + ansible.builtin.command: | + pacman --refresh --sync --sysupgrade --noconfirm + + - name: create depends service + ansible.builtin.copy: + mode: 0755 + dest: /etc/init.d/net + content: | + #!/usr/bin/openrc-run + true + when: + - ansible_os_family | lower == 'artix linux' + + - name: update package cache + become: true + ansible.builtin.package: + update_cache: true + + - name: environment + ansible.builtin.debug: + msg: + - "os family : {{ ansible_distribution }} ({{ ansible_os_family }})" + - "distribution version : {{ ansible_distribution_major_version }}" + - "ansible version : {{ ansible_version.full }}" + - "python version : {{ ansible_python.version.major }}.{{ ansible_python.version.minor }}" + +- name: prepare container + hosts: all + gather_facts: true + + roles: + - role: syslog-ng + - role: bodsch.docker.registry + +... diff --git a/roles/registry_ui/molecule/latest/requirements.yml b/roles/registry_ui/molecule/latest/requirements.yml new file mode 100644 index 0000000..ecf8dce --- /dev/null +++ b/roles/registry_ui/molecule/latest/requirements.yml @@ -0,0 +1,6 @@ +--- + +- name: syslog-ng + src: bodsch.syslog_ng + +... diff --git a/roles/registry_ui/molecule/latest/tests/test_default.py b/roles/registry_ui/molecule/latest/tests/test_default.py new file mode 100644 index 0000000..ce4689c --- /dev/null +++ b/roles/registry_ui/molecule/latest/tests/test_default.py @@ -0,0 +1,188 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from ansible.parsing.dataloader import DataLoader +from ansible.template import Templar + +import json +import pytest +import os + +import testinfra.utils.ansible_runner + +HOST = 'instance' + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts(HOST) + + +def pp_json(json_thing, sort=True, indents=2): + if type(json_thing) is str: + print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents)) + else: + print(json.dumps(json_thing, sort_keys=sort, indent=indents)) + return None + + +def base_directory(): + """ + """ + cwd = os.getcwd() + + if 'group_vars' in os.listdir(cwd): + directory = "../.." + molecule_directory = "." + else: + directory = "." + molecule_directory = f"molecule/{os.environ.get('MOLECULE_SCENARIO_NAME')}" + + return directory, molecule_directory + + +def read_ansible_yaml(file_name, role_name): + """ + """ + read_file = None + + for e in ["yml", "yaml"]: + test_file = "{}.{}".format(file_name, e) + if os.path.isfile(test_file): + read_file = test_file + break + + return f"file={read_file} name={role_name}" + + +@pytest.fixture() +def get_vars(host): + """ + parse ansible variables + - defaults/main.yml + - vars/main.yml + - vars/${DISTRIBUTION}.yaml + - molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml + """ + base_dir, molecule_dir = base_directory() + distribution = host.system_info.distribution + operation_system = None + + if distribution in ['debian', 'ubuntu']: + operation_system = "debian" + elif distribution in ['redhat', 'ol', 'centos', 'rocky', 'almalinux']: + operation_system = "redhat" + elif distribution in ['arch', 'artix']: + operation_system = f"{distribution}linux" + + # print(" -> {} / {}".format(distribution, os)) + # print(" -> {}".format(base_dir)) + + file_defaults = read_ansible_yaml(f"{base_dir}/defaults/main", "role_defaults") + file_vars = read_ansible_yaml(f"{base_dir}/vars/main", "role_vars") + file_distibution = read_ansible_yaml(f"{base_dir}/vars/{operation_system}", "role_distibution") + file_molecule = read_ansible_yaml(f"{molecule_dir}/group_vars/all/vars", "test_vars") + # file_host_molecule = read_ansible_yaml("{}/host_vars/{}/vars".format(base_dir, HOST), "host_vars") + + defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults") + vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars") + distibution_vars = host.ansible("include_vars", file_distibution).get("ansible_facts").get("role_distibution") + molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars") + # host_vars = host.ansible("include_vars", file_host_molecule).get("ansible_facts").get("host_vars") + + ansible_vars = defaults_vars + ansible_vars.update(vars_vars) + ansible_vars.update(distibution_vars) + ansible_vars.update(molecule_vars) + # ansible_vars.update(host_vars) + + templar = Templar(loader=DataLoader(), variables=ansible_vars) + result = templar.template(ansible_vars, fail_on_undefined=False) + + return result + + +def local_facts(host): + """ + return local facts + """ + return host.ansible("setup").get("ansible_facts").get("ansible_local").get("registry_ui") + + +def test_directories(host, get_vars): + """ + """ + directories = [] + directories.append("/etc/registry-ui") + directories.append("/var/lib/registry-ui") + + for dirs in directories: + d = host.file(dirs) + assert d.is_directory + + +def test_files(host, get_vars): + """ + """ + distribution = host.system_info.distribution + release = host.system_info.release + + print(f"distribution: {distribution}") + print(f"release : {release}") + + version = local_facts(host).get("version") + + install_dir = get_vars.get("registry_ui_install_path") + defaults_dir = get_vars.get("registry_ui_defaults_directory") + config_dir = get_vars.get("registry_ui_config_dir") + + if 'latest' in install_dir: + install_dir = install_dir.replace('latest', version) + + files = [] + files.append("/usr/bin/registry-ui") + + if install_dir: + files.append(f"{install_dir}/registry-ui") + if defaults_dir and not distribution == "artix": + files.append(f"{defaults_dir}/registry-ui") + if config_dir: + files.append(f"{config_dir}/config.yml") + + print(files) + + for _file in files: + f = host.file(_file) + assert f.exists + assert f.is_file + + +def test_user(host, get_vars): + """ + """ + user = get_vars.get("registry_ui_system_user", "registry") + group = get_vars.get("registry_ui_system_group", "registry") + + assert host.group(group).exists + assert host.user(user).exists + assert group in host.user(user).groups + assert host.user(user).home == "/var/lib/registry-ui" + + +def test_service(host, get_vars): + """ + """ + service = host.service("registry-ui") + assert service.is_enabled + assert service.is_running + + +def test_open_port(host, get_vars): + """ + """ + listen = get_vars.get("registry_ui_listen", None) + + if listen: + address = listen.get("address", "127.0.0.1") + port = listen.get("port", "8080") + + service = host.socket(f"tcp://{address}:{port}") + assert service.is_listening diff --git a/roles/registry_ui/tasks/configure.yml b/roles/registry_ui/tasks/configure.yml new file mode 100644 index 0000000..9685326 --- /dev/null +++ b/roles/registry_ui/tasks/configure.yml @@ -0,0 +1,26 @@ +--- + +- name: create registry-ui configuration directory + ansible.builtin.file: + path: "{{ registry_ui_config_dir }}" + state: directory + owner: root + group: "{{ registry_ui_system_group }}" + mode: 0775 + +- name: create config.yml + ansible.builtin.template: + src: "registry-ui/config.yml.j2" + dest: "{{ registry_ui_config_dir }}/config.yml" + force: true + owner: root + group: "{{ registry_ui_system_group }}" + mode: 0664 + notify: + - validate config + - reload registry-ui + +- name: restart registry if needed + ansible.builtin.meta: flush_handlers + +... diff --git a/roles/registry_ui/tasks/download.yml b/roles/registry_ui/tasks/download.yml new file mode 100644 index 0000000..250e315 --- /dev/null +++ b/roles/registry_ui/tasks/download.yml @@ -0,0 +1,53 @@ +--- + +- name: checksum + become: false + delegate_to: localhost + run_once: true + block: + - name: get checksum list + bodsch.scm.github_checksum: + project: bodsch + repository: docker-registry-ui + checksum_file: "{{ registry_ui_archive }}.sha256" + user: "{{ lookup('env', 'GH_USER') | default(omit) }}" + password: "{{ lookup('env', 'GH_TOKEN') | default(omit) }}" + architecture: "{{ ansible_architecture }}" + system: "{{ ansible_facts.system }}" + version: "{{ registry_ui_version }}" + register: _latest_checksum + + - name: define checksum for {{ go_arch }} architecture + ansible.builtin.set_fact: + __registry_ui_checksum: "{{ _latest_checksum.checksum }}" + when: + - _latest_checksum.rc == 0 + - _latest_checksum.checksum is defined + - _latest_checksum.checksum | string | length > 0 + +- name: download registry-ui binary archive + become: false + delegate_to: "{{ registry_ui_delegate_to }}" + run_once: "{{ 'false' if registry_ui_direct_download else 'true' }}" + ansible.builtin.get_url: + url: "{{ registry_ui_release_download_url }}/download/{{ registry_ui_version }}/registry-ui-{{ registry_ui_version }}-{{ ansible_facts.system }}-{{ system_architecture }}.tar.gz" + dest: "{{ registry_ui_local_tmp_directory }}/registry-ui-{{ system_architecture }}.tar.gz" + checksum: "sha256:{{ __registry_ui_checksum }}" + mode: 0660 + register: _download_archive + until: _download_archive is succeeded + retries: 5 + delay: 2 + check_mode: false + +- name: extract registry-ui archive + become: false + delegate_to: "{{ registry_ui_delegate_to }}" + run_once: "{{ 'false' if registry_ui_direct_download else 'true' }}" + ansible.builtin.unarchive: + src: "{{ registry_ui_local_tmp_directory }}/registry-ui-{{ system_architecture }}.tar.gz" + dest: "{{ registry_ui_local_tmp_directory }}" + copy: false + register: _extract_archive + +... diff --git a/roles/registry_ui/tasks/install.yml b/roles/registry_ui/tasks/install.yml new file mode 100644 index 0000000..f747c61 --- /dev/null +++ b/roles/registry_ui/tasks/install.yml @@ -0,0 +1,108 @@ +--- + +- name: detect extracted binary file for registry on {{ registry_ui_delegate_to }} + become: false + delegate_to: "{{ registry_ui_delegate_to }}" + run_once: "{{ 'false' if registry_ui_direct_download else 'true' }}" + ansible.builtin.stat: + path: "{{ registry_ui_local_tmp_directory }}/registry-ui" + register: stat_file_binary + +- name: copy files + when: + - stat_file_binary.stat.exists + block: + - name: propagate registry binaries + ansible.builtin.copy: + src: "{{ registry_ui_local_tmp_directory }}/registry-ui" + dest: "{{ registry_ui_install_path }}/registry-ui" + mode: 0755 + owner: "{{ registry_ui_system_user }}" + group: "{{ registry_ui_system_group }}" + remote_src: "{{ 'true' if registry_ui_direct_download else 'false' }}" + + - name: propagate static files + ansible.builtin.copy: + src: "{{ registry_ui_local_tmp_directory }}/{{ item }}/" + dest: "{{ registry_ui_data_dir }}/{{ item }}/" + mode: 0644 + owner: "{{ registry_ui_system_user }}" + group: "{{ registry_ui_system_group }}" + remote_src: "{{ 'true' if registry_ui_direct_download else 'false' }}" + loop: + - static + - templates + notify: + - reload registry-ui + +- name: make files executable + ansible.builtin.file: + path: "{{ registry_ui_install_path }}/registry-ui" + mode: 0755 + owner: "{{ registry_ui_system_user }}" + group: "{{ registry_ui_system_group }}" + +- name: create custom fact file + bodsch.core.facts: + name: registry_ui + facts: + version: "{{ registry_ui_version }}" + +- name: create link to binary + ansible.builtin.file: + src: "{{ registry_ui_install_path }}/registry-ui" + dest: "/usr/bin/registry-ui" + state: link + force: true + follow: false + notify: + - restart registry-ui + +- name: systemd + when: + - ansible_service_mgr | lower == "systemd" + block: + - name: create systemd service unit + ansible.builtin.template: + src: "init/systemd/registry-ui.service.j2" + dest: "{{ systemd_lib_directory }}/registry-ui.service" + owner: root + group: root + mode: 0644 + notify: + - daemon-reload + - restart registry-ui + + - name: create systemd service configuration + ansible.builtin.template: + src: "registry-ui.j2" + dest: "{{ registry_ui_defaults_directory }}/registry-ui" + force: true + owner: root + group: "{{ registry_ui_system_group }}" + mode: 0640 + notify: + - validate config + - reload registry-ui + +- name: openrc + when: + - ansible_service_mgr | lower == "openrc" + block: + - name: create openrc service configuration + ansible.builtin.template: + src: "init/openrc/conf.d/registry-ui.j2" + dest: "/etc/conf.d/registry-ui" + owner: root + group: root + mode: 0644 + + - name: create openrc init file + ansible.builtin.template: + src: "init/openrc/init.d/registry-ui.j2" + dest: "/etc/init.d/registry-ui" + owner: root + group: root + mode: 0750 + +... diff --git a/roles/registry_ui/tasks/main.yml b/roles/registry_ui/tasks/main.yml new file mode 100644 index 0000000..7d99bb3 --- /dev/null +++ b/roles/registry_ui/tasks/main.yml @@ -0,0 +1,20 @@ +--- + +- name: prepare + ansible.builtin.include_tasks: prepare.yml + +- name: download + ansible.builtin.include_tasks: download.yml + when: + - not stat_registry_ui_binary.stat.exists + +- name: install + ansible.builtin.include_tasks: install.yml + +- name: configure + ansible.builtin.include_tasks: configure.yml + +- name: service + ansible.builtin.include_tasks: service.yml + +... diff --git a/roles/registry_ui/tasks/prepare.yml b/roles/registry_ui/tasks/prepare.yml new file mode 100644 index 0000000..e5774d4 --- /dev/null +++ b/roles/registry_ui/tasks/prepare.yml @@ -0,0 +1,116 @@ +--- + +- name: include OS specific configuration ({{ ansible_distribution }} ({{ ansible_os_family }}) {{ ansible_distribution_major_version }}) + ansible.builtin.include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + paths: + - "vars" + files: + # eg. debian-10 / ubuntu-20.04 / centos-8 / oraclelinux-8 + - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version }}.yml" + # eg. archlinux-systemd / archlinux-openrc + - "{{ ansible_distribution | lower }}-{{ ansible_service_mgr | lower }}.yml" + # eg. debian / ubuntu / centos / oraclelinux + - "{{ ansible_distribution | lower }}.yml" + # eg. redhat / debian / archlinux + - "{{ ansible_os_family | lower }}.yml" + # artixlinux + - "{{ ansible_os_family | lower | replace(' ', '') }}.yml" + - default.yaml + skip: true + +- name: define delegate instance for download handling + ansible.builtin.set_fact: + registry_ui_delegate_to: "{{ ansible_host }}" + registry_ui_local_tmp_directory: "{{ + lookup('env', 'CUSTOM_LOCAL_TMP_DIRECTORY') | + default('/var/cache/ansible/registry', true) }}/{{ registry_ui_version }}" + when: + - registry_ui_direct_download + +- name: install dependency + ansible.builtin.package: + name: "{{ registry_ui_requirements }}" + state: present + when: + - registry_ui_requirements | default([]) | count > 0 + +- name: get latest release + delegate_to: localhost + become: false + run_once: true + when: + - registry_ui_version == "latest" + block: + - name: get latest release + delegate_to: localhost + become: false + run_once: true + bodsch.scm.github_latest: + project: bodsch + repository: docker-registry-ui + user: "{{ lookup('env', 'GH_USER') | default(omit) }}" + password: "{{ lookup('env', 'GH_TOKEN') | default(omit) }}" + register: _latest_release + + - name: re-define registry_ui_version + ansible.builtin.set_fact: + registry_ui_version: "{{ _latest_release.latest_release }}" + +- name: detect installed registry-ui binary + ansible.builtin.stat: + path: "{{ registry_ui_install_path }}/registry-ui" + register: stat_registry_ui_binary + +- name: create download directory + become: false + delegate_to: "{{ registry_ui_delegate_to }}" + ansible.builtin.file: + path: "{{ registry_ui_local_tmp_directory }}" + state: directory + mode: 0750 + +- name: user and group handling + when: + - registry_ui_system_user != "root" or registry_ui_system_group != "root" + block: + - name: create registry group + ansible.builtin.group: + name: "{{ registry_ui_system_group }}" + state: present + system: true + when: + - registry_ui_system_group != "root" + + - name: create registry user + ansible.builtin.user: + name: "{{ registry_ui_system_user }}" + groups: "{{ registry_ui_system_group }}" + append: true + shell: /usr/sbin/nologin + system: true + createhome: false + home: "{{ registry_ui_data_dir }}" + when: + - registry_ui_system_user != "root" + +- name: create install directory + ansible.builtin.file: + path: "{{ registry_ui_install_path }}" + state: directory + owner: "{{ registry_ui_system_user }}" + group: "{{ registry_ui_system_group }}" + mode: 0755 + +- name: merge registry configuration between defaults and custom + ansible.builtin.set_fact: + registry_ui_service: "{{ registry_ui_defaults_service | combine(registry_ui_service, recursive=True) }}" + registry_ui_listen: "{{ registry_ui_defaults_listen | combine(registry_ui_listen, recursive=True) }}" + registry_ui_registry: "{{ registry_ui_defaults_registry | combine(registry_ui_registry, recursive=True) }}" + registry_ui_event: "{{ registry_ui_defaults_event | combine(registry_ui_event, recursive=True) }}" + registry_ui_cache: "{{ registry_ui_defaults_cache | combine(registry_ui_cache, recursive=True) }}" + registry_ui_admins: "{{ registry_ui_defaults_admins | combine(registry_ui_admins, recursive=True) }}" + registry_ui_purge: "{{ registry_ui_defaults_purge | combine(registry_ui_purge, recursive=True) }}" + +... diff --git a/roles/registry_ui/tasks/service.yml b/roles/registry_ui/tasks/service.yml new file mode 100644 index 0000000..7b375f8 --- /dev/null +++ b/roles/registry_ui/tasks/service.yml @@ -0,0 +1,10 @@ +--- + +- name: ensure registry-ui is enabled on boot + become: true + ansible.builtin.service: + name: registry-ui + enabled: true + state: started + +... diff --git a/roles/registry_ui/templates/init/openrc/conf.d/registry-ui.j2 b/roles/registry_ui/templates/init/openrc/conf.d/registry-ui.j2 new file mode 100644 index 0000000..5736c7f --- /dev/null +++ b/roles/registry_ui/templates/init/openrc/conf.d/registry-ui.j2 @@ -0,0 +1,16 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% set _log_level = "info" %} +{% if registry_ui_service.log_level is defined and + registry_ui_service.log_level | string | length > 0 and + registry_ui_service.log_level in ["error", "warn", "info", "debug" ] %} + {% set _log_level = registry_ui_service.log_level %} +{% endif %} + +user="{{ registry_ui_system_user }}" +group="{{ registry_ui_system_group }}" + +command_args=" + -config-file {{ registry_ui_config_dir }}/config.yml \ + -log-level {{ _log_level }}" diff --git a/roles/registry_ui/templates/init/openrc/init.d/registry-ui.j2 b/roles/registry_ui/templates/init/openrc/init.d/registry-ui.j2 new file mode 100644 index 0000000..9bdc871 --- /dev/null +++ b/roles/registry_ui/templates/init/openrc/init.d/registry-ui.j2 @@ -0,0 +1,42 @@ +#!/usr/bin/openrc-run + +description="registry, the Open Source Registry implementation for storing and distributing container images using the OCI Distribution Specification" +pidfile=${pidfile:-"/run/${RC_SVCNAME}.pid"} +user=${user:-${SVCNAME}} +group=${group:-${SVCNAME}} + +command="/usr/bin/registry-ui" +supervisor="supervise-daemon" + +command_args="${command_args:--config-file {{ registry_ui_config_dir }}/config.yml}" +command_user="${user}:${group}" + +command_args_background="--background" +required_files="/etc/registry-ui/config.yml" +directory="{{ registry_ui_data_dir }}" + +output_log="/var/log/${RC_SVCNAME}/${RC_SVCNAME}.log" +error_log="${output_log}" +extra_started_commands="reload" + +depend() { + # need localmount net + # use dns + after sysfs net +} + +start_pre() { + [ -d $(dirname ${output_log}) ] || mkdir $(dirname ${output_log}) + chown -R ${command_user} $(dirname ${output_log}) + chown -R ${command_user} {{ registry_ui_data_dir }} +} + +reload() { + ebegin "Reloading ${SVCNAME}" + if [ ! -r "${pidfile}" ]; then + eend 1 "${RC_SVCNAME} not running" + else + kill -s HUP $(cat "${pidfile}") 2> /dev/null + eend $? + fi +} diff --git a/roles/registry_ui/templates/init/systemd/registry-ui.service.j2 b/roles/registry_ui/templates/init/systemd/registry-ui.service.j2 new file mode 100644 index 0000000..82daeba --- /dev/null +++ b/roles/registry_ui/templates/init/systemd/registry-ui.service.j2 @@ -0,0 +1,27 @@ +{{ ansible_managed | comment }} + +[Unit] +Description = registry ui, a Web UI for Docker Registry +After = network-online.target + +[Service] +Type = simple +Environment = GOMAXPROCS={{ ansible_processor_vcpus | default(ansible_processor_count) }} +Environment = HOME={{ registry_ui_data_dir }} +EnvironmentFile = {{ registry_ui_defaults_directory }}/registry-ui + +User = {{ registry_ui_system_user }} +Group = {{ registry_ui_system_group }} +WorkingDirectory = {{ registry_ui_data_dir }} + +ExecReload = /bin/kill -HUP $MAINPID +ExecStart = /usr/bin/registry-ui $OPTIONS + +LimitNOFILE = 65000 +SyslogIdentifier = registry-ui +Restart = on-failure +RestartSec = 30s +RestartSteps = 20 + +[Install] +WantedBy = multi-user.target diff --git a/roles/registry_ui/templates/registry-ui.j2 b/roles/registry_ui/templates/registry-ui.j2 new file mode 100644 index 0000000..5230be9 --- /dev/null +++ b/roles/registry_ui/templates/registry-ui.j2 @@ -0,0 +1,27 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% set _log_level = "info" %} +{% if registry_ui_service.log_level is defined and + registry_ui_service.log_level | string | length > 0 and + registry_ui_service.log_level in ["error", "warn", "info", "debug" ] %} + {% set _log_level = registry_ui_service.log_level %} +{% endif %} + +OPTIONS=" + -config-file {{ registry_ui_config_dir }}/config.yml \ + -log-level {{ _log_level }}" + +{# +# registry-ui --help +Usage of registry-ui: + -config-file string + path to the config file (default "config.yml") + -dry-run + dry-run for purging task, does not delete anything + -log-level string + logging level (default "info") + -purge-tags + purge old tags instead of running a web server + +#} diff --git a/roles/registry_ui/templates/registry-ui/config.yml.j2 b/roles/registry_ui/templates/registry-ui/config.yml.j2 new file mode 100644 index 0000000..15ea4b2 --- /dev/null +++ b/roles/registry_ui/templates/registry-ui/config.yml.j2 @@ -0,0 +1,159 @@ +#jinja2: trim_blocks: True, lstrip_blocks: True +# {{ ansible_managed }} + +{% if registry_ui_listen is defined and + registry_ui_listen | count > 0 %} +# Listen interface. +listen_addr: {{ registry_ui_listen.address | default('127.0.0.1') }}:{{ registry_ui_listen.port | default('8000') }} +{% endif %} + +{% if registry_ui_base_path is defined and + registry_ui_base_path | string | length > 0 %} +# Base path of Docker Registry UI. +base_path: {{ registry_ui_base_path }} +{% endif %} + +{% if registry_ui_registry is defined and + registry_ui_registry | count > 0 %} + {% if registry_ui_registry.url is defined and + registry_ui_registry.url | string | length > 0 %} +# Registry URL with schema and port. +registry_url: {{ registry_ui_registry.url }} + {% endif %} + {% if registry_ui_registry.verify_tls is defined and + registry_ui_registry.verify_tls %} +# Verify TLS certificate when using https. +verify_tls: {{ registry_ui_registry.verify_tls | bool | bodsch.core.config_bool(true_as='true', false_as='false') }} + {% endif %} +# Docker registry credentials. +# They need to have a full access to the registry. +# If token authentication service is enabled, it will be auto-discovered and those credentials +# will be used to obtain access tokens. +# When the registry_password_file entry is used, the password can be passed as a docker secret +# and read from file. This overides the registry_password entry. + {% if registry_ui_registry.username is defined and + registry_ui_registry.username | string | length > 0 %} +registry_username: {{ registry_ui_registry.username }} + {% endif %} + {% if registry_ui_registry.password is defined and + registry_ui_registry.password | string | length > 0 %} +registry_password: {{ registry_ui_registry.password }} + {% endif %} + {% if registry_ui_registry.password_file is defined and + registry_ui_registry.password_file | string | length > 0 %} +registry_password_file: {{ registry_ui_registry.password_file }} + {% endif %} +{% endif %} + +{% if registry_ui_event is defined and + registry_ui_event | count > 0 %} +# Event listener token. +# The same one should be configured on Docker registry as Authorization Bearer token. + {% if registry_ui_event.token is defined and + registry_ui_event.token | string | length > 0 %} +event_listener_token: {{ registry_ui_event.token }} + {% endif %} + {% if registry_ui_event.retention_days is defined and + registry_ui_event.retention_days | string | length > 0 %} +# Retention of records to keep. +event_retention_days: {{ registry_ui_event.retention_days }} + {% endif %} + + {% if registry_ui_event.database is defined and + registry_ui_event.database | count > 0 and + registry_ui_event.database.driver is defined and + registry_ui_event.database.driver | string | length > 0 %} + {% if registry_ui_event.database.driver in ["sqlite3", "mysql"] %} + {% if registry_ui_event.database.driver == "sqlite3" %} +# Event listener storage. +event_database_driver: sqlite3 + {% set _database_location = registry_ui_data_dir ~ "/registry_events.db" %} + {% if registry_ui_event.database.location is defined and + registry_ui_event.database.location | string | length > 0 %} + {% set _database_location = registry_ui_event.database.location %} + {% endif %} +event_database_location: {{ _database_location }} + {% endif %} + {% if registry_ui_event.database.driver == "mysql" %} +event_database_driver: mysql + {% if registry_ui_event.database.username is defined and + registry_ui_event.database.username | string | length > 0 and + registry_ui_event.database.password is defined and + registry_ui_event.database.password | string | length > 0 %} + # user:password@tcp(localhost:3306)/docker_events +event_database_location: {{ registry_ui_event.database.username }}:{{ registry_ui_event.database.password }}@tcp({{ registry_ui_event.database.hostname | default('127.0.0.1:3306') }})/{{ registry_ui_event.database.schemaname }} + {% endif %} + {% endif%} + {% endif %} + {% endif %} + +# You can disable event deletion on some hosts when you are running docker-registry on master-master or +# cluster setup to avoid deadlocks or replication break. +event_deletion_enabled: {{ registry_ui_event.deletion_enabled | bool | bodsch.core.config_bool(true_as='true', false_as='false') }} + +# If all users can view the event log. If set to false, then only admins listed below. +anyone_can_view_events: {{ registry_ui_event.anyone_can_view | bool | bodsch.core.config_bool(true_as='true', false_as='false') }} +{% endif %} + +{% if registry_ui_cache is defined and + registry_ui_cache | count > 0 %} + {% if registry_ui_cache.refresh_interval is defined and + registry_ui_cache.refresh_interval | string | length > 0 %} +# Cache refresh interval in minutes. +# How long to cache repository list and tag counts. +cache_refresh_interval: {{ registry_ui_cache.refresh_interval }} + {% endif %} +{% endif %} + +{% if registry_ui_delete is defined and + registry_ui_delete | count > 0 %} + {% if registry_ui_delete.anyone_can_delete is defined and + registry_ui_delete.anyone_can_delete %} +# If all users can delete tags. If set to false, then only admins listed below. +anyone_can_delete: {{ registry_ui_delete.anyone_can_delete | bool | bodsch.core.config_bool(true_as='true', false_as='false') }} + {% endif %} + {% if registry_ui_delete.admins is defined and + registry_ui_delete.admins %} +# Users allowed to delete tags. +# This should be sent via X-WEBAUTH-USER header from your proxy. +admins: [" {{ registry_ui_delete.admins | join(",") }}"] + {% endif %} +{% endif %} + +# Debug mode. Affects only templates. +debug: {{ registry_ui_debug | default('false') | bool | bodsch.core.config_bool(true_as='true', false_as='false') }} + +{% if registry_ui_purge is defined and + registry_ui_purge | count > 0 %} + {% if registry_ui_purge.tags_keep_days is defined and + registry_ui_purge.tags_keep_days | string | length > 0 %} +# How many days to keep tags but also keep the minimal count provided no matter how old. +purge_tags_keep_days: {{ registry_ui_purge.tags_keep_days }} + {% endif %} + {% if registry_ui_purge.tags_keep_count is defined and + registry_ui_purge.tags_keep_count | string | length > 0 %} +purge_tags_keep_count: {{ registry_ui_purge.tags_keep_count }} + {% endif %} + {% if registry_ui_purge.tags_keep_regexp is defined and + registry_ui_purge.tags_keep_regexp | string | length > 0 %} +# Keep tags matching regexp no matter how old, e.g. '^latest$' +# Empty string disables this feature. +purge_tags_keep_regexp: {{ registry_ui_purge.tags_keep_regexp }} + {% endif %} + + {% if registry_ui_purge.tags_keep_from_file is defined and + registry_ui_purge.tags_keep_from_file | string | length > 0 %} +# Keep tags listed in the file no matter how old. +# File format is JSON: {"repo1": ["tag1", "tag2"], "repoX": ["tagX"]} +# Empty string disables this feature. +purge_tags_keep_from_file: {{ registry_ui_purge.tags_keep_from_file }} + {%endif %} + {% if registry_ui_purge.tags_schedule is defined and + registry_ui_purge.tags_schedule | string | length > 0 %} +# Enable built-in cron to schedule purging tags in server mode. +# Empty string disables this feature. +# Example: '25 54 17 * * *' will run it at 17:54:25 daily. +# Note, the cron schedule format includes seconds! See https://godoc.org/github.com/robfig/cron +purge_tags_schedule: "{{ registry_ui_purge.tags_schedule }}" + {% endif %} +{% endif %} diff --git a/roles/registry_ui/test-requirements.txt b/roles/registry_ui/test-requirements.txt new file mode 100644 index 0000000..267ec92 --- /dev/null +++ b/roles/registry_ui/test-requirements.txt @@ -0,0 +1,12 @@ +ansible-lint +docker +dnspython +flake8 +molecule>=5.0.1 +molecule-plugins[docker] +netaddr +pytest +pytest-testinfra +tox +tox-gh-actions +yamllint diff --git a/roles/registry_ui/tox.ini b/roles/registry_ui/tox.ini new file mode 100644 index 0000000..a485358 --- /dev/null +++ b/roles/registry_ui/tox.ini @@ -0,0 +1,35 @@ +[tox] +minversion = 3.25 +toxworkdir = /tmp/.tox/ + +envlist = ansible_{2.9,2.10,3.4,4.10,5.1,5.2,6.1} + +skipsdist = true + +[testenv] +passenv = * + +# allowlist_externals = +# /usr/bin/find +# /bin/sh +# rm + +deps = + -r test-requirements.txt + ansible_4.10: ansible>=4.10,<4.11 + ansible_5.1: ansible>=5.1,<5.2 + ansible_5.2: ansible>=5.2,<5.3 + ansible_5.10: ansible>=5.10,<5.11 + ansible_6.1: ansible>=6.1,<6.2 + ansible_6.7: ansible>=6.7,<6.8 + ansible_7.0: ansible>=7.0,<7.1 + ansible_7.5: ansible>=7.5,<7.6 + ansible_8.0: ansible>=8.0,<8.1 + ansible_8.5: ansible>=8.5,<8.6 + +#commands_pre = +# /usr/bin/find {toxinidir} -type f -not -path '{toxworkdir}/*' -path '*/__pycache__/*' -name '*.py[c|o]' -delete +# /bin/sh -c '/usr/bin/find {homedir}/.cache -type d -path "*/molecule_*" -exec rm -rfv \{\} +;' + +commands = + {posargs:molecule test --all --destroy always} diff --git a/roles/registry_ui/vars/archlinux-openrc.yml b/roles/registry_ui/vars/archlinux-openrc.yml new file mode 100644 index 0000000..89267f8 --- /dev/null +++ b/roles/registry_ui/vars/archlinux-openrc.yml @@ -0,0 +1,6 @@ +--- + +registry_ui_requirements: + - iproute + +... diff --git a/roles/registry_ui/vars/archlinux.yml b/roles/registry_ui/vars/archlinux.yml new file mode 100644 index 0000000..89267f8 --- /dev/null +++ b/roles/registry_ui/vars/archlinux.yml @@ -0,0 +1,6 @@ +--- + +registry_ui_requirements: + - iproute + +... diff --git a/roles/registry_ui/vars/artixlinux.yml b/roles/registry_ui/vars/artixlinux.yml new file mode 100644 index 0000000..89267f8 --- /dev/null +++ b/roles/registry_ui/vars/artixlinux.yml @@ -0,0 +1,6 @@ +--- + +registry_ui_requirements: + - iproute + +... diff --git a/roles/registry_ui/vars/debian.yml b/roles/registry_ui/vars/debian.yml new file mode 100644 index 0000000..c81cf5b --- /dev/null +++ b/roles/registry_ui/vars/debian.yml @@ -0,0 +1,3 @@ +--- + +... diff --git a/roles/registry_ui/vars/main.yml b/roles/registry_ui/vars/main.yml new file mode 100644 index 0000000..2c488d5 --- /dev/null +++ b/roles/registry_ui/vars/main.yml @@ -0,0 +1,81 @@ +--- + +registry_ui_main_version: "{{ registry_ui_version[0:3] }}" + +registry_ui_install_path: /usr/local/bin/registry-ui/{{ registry_ui_version }} + +registry_ui_delegate_to: localhost + +registry_ui_local_tmp_directory: "{{ + lookup('env', 'CUSTOM_LOCAL_TMP_DIRECTORY') | + default(lookup('env', 'HOME') ~ '/.cache/ansible/registry-ui', true) }}/{{ registry_ui_version }}" + +registry_ui_requirements: + - iproute2 + - gzip + +# ---------------------------------------------------------------------------------------- + +registry_ui_defaults_service: + log_level: info + +registry_ui_defaults_listen: + address: 127.0.0.1 + port: 8000 + +registry_ui_defaults_base_path: /ui + +registry_ui_defaults_registry: + url: https://docker-registry.local:5000 + verify_tls: true + username: "" # user + password: "" # pass + password_file: "" # /run/secrets/registry_password_file + +registry_ui_defaults_event: + listener_token: "" # token + retention_days: 7 + database: + driver: sqlite3 # sqlite3 or mysql + location: "" # data/registry_events.db + username: + password: + hostname: 127.0.0.1:3306 + schemaname: docker_events + deletion_enabled: true + anyone_can_view: true + +registry_ui_defaults_cache: + refresh_interval: 10 + +registry_ui_defaults_delete: + anyone_can_delete: false + admins: [] + +registry_ui_defaults_admins: [] + +registry_ui_defaults_purge: + tags_keep_days: 90 + tags_keep_count: 2 + tags_keep_regexp: '' + tags_keep_from_file: '' + tags_schedule: '' + +# ---------------------------------------------------------------------------------------- + +registry_ui_archive: "registry-ui-{{ registry_ui_version }}-{{ ansible_facts.system }}-{{ system_architecture }}.tar.gz" +registry_ui_checksum_url: "{{ registry_ui_release_download_url }}/download/{{ registry_ui_version }}/{{ registry_ui_archive }}.sha256" + +registry_ui_defaults_directory: /etc/default + +go_arch_map: + x86_64: 'amd64' + aarch64: 'arm64' + armv7l: 'armv7' + armv6l: 'armv6' + +system_architecture: "{{ go_arch_map[ansible_architecture] | default(ansible_architecture) }}" + +systemd_lib_directory: /lib/systemd/system + +... diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 0000000..13763a6 --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,13 @@ +ansible-lint +docker +dnspython +flake8 +molecule>=5.0.1 +molecule-plugins[docker] +netaddr +pytest +pytest-testinfra +tox +tox-gh-actions +yamllint +jmespath