diff --git a/.github/scs-compliance-check/openstack/clouds.yaml b/.github/scs-compliance-check/openstack/clouds.yaml index 325f6e33b..63a2b9805 100644 --- a/.github/scs-compliance-check/openstack/clouds.yaml +++ b/.github/scs-compliance-check/openstack/clouds.yaml @@ -89,6 +89,14 @@ clouds: auth: auth_url: https://identity.l1a.cloudandheat.com/v3 application_credential_id: "7ab4e3339ea04255bc131868974cfe63" + scaleup-occ2: + auth_type: v3applicationcredential + auth: + auth_url: https://keystone.occ2.scaleup.cloud + application_credential_id: "5d2eea4e8bf8448092490b4190d4430a" + region_name: "RegionOne" + interface: "public" + identity_api_version: 3 syseleven-dus2: interface: public identity_api_verion: 3 diff --git a/.github/workflows/check-scaleup-occ2-v4.yml b/.github/workflows/check-scaleup-occ2-v4.yml new file mode 100644 index 000000000..b5bf70a2d --- /dev/null +++ b/.github/workflows/check-scaleup-occ2-v4.yml @@ -0,0 +1,23 @@ +name: "Compliance IaaS v4 of scaleup-occ2" + +on: + # Trigger compliance check every day at 4:30 UTC + schedule: + - cron: '30 4 * * *' + # Trigger compliance check after Docker image has been built + workflow_run: + workflows: [Build and publish scs-compliance-check Docker image] + types: + - completed + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + check-scaleup-occ2: + uses: ./.github/workflows/scs-compliance-check-with-application-credential.yml + with: + version: v4 + layer: iaas + cloud: scaleup-occ2 + secret_name: OS_PASSWORD_SCALEUP_OCC2 + secrets: inherit diff --git a/.gitignore b/.gitignore index 4595d59ea..2b83a0983 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ **/__pycache__/ .venv/ .idea +.sandbox .DS_Store node_modules Tests/kaas/results/ diff --git a/.zuul.d/secure.yaml b/.zuul.d/secure.yaml index 318a1b6b0..78dbb906f 100644 --- a/.zuul.d/secure.yaml +++ b/.zuul.d/secure.yaml @@ -233,6 +233,28 @@ VCsXjf0qBBMrzz6HP9z95Bk44fiJ3L/LkA3Iij961dYrQXbZKDrKOiX/QPwrcSrVmjmew UbPexJFHgvTCqjadoLejSt9cUd9lVzhuzLJ8CS+CcCMbZOno6qathrd2B88riQaPNIGNu gfkNT9R63ZzKB1qIA2n5RZi7SH9DPIUd0AwLMn2bhp3uok5pNAPP/4/1RkQiCA= + scaleup_occ2_ac_id: !encrypted/pkcs1-oaep + - N2duwkcMdOXw6wF0deE/0BPM1M/URt3eWmrnBJ89VHeCDENGfTfDHcWPYs3wW4rSRCG6t + gqgNuA049OvOhL7rtjNHZ6yIj6xEHH/YdqT4UxjXPS9GFwoJXDtE8rIGjK3KU8GfUgKnG + DLplyyzGzx5j39rJAS628InmC56aip47rO1J4HQE9Ku25Wb06R7ykx+0ZOWr0HXjV/VsV + uwfyL+DPgewbL+4u8/XkcI0FwAM9/KkF/CcYUq5aVMdQS2foatTQW0C2idg+pffSTRaau + VF44rkVfzsCOz4MYAFpLIaL9Zxx1FifaPOd0oi6rEFjGd6vFtFCHk1BRpKmOITLyx3Te5 + zVffSkQAsqpn/4er8800bjQzxXvqmQmR0QwPM7dhvRnrNbTSCA/Awm5BPaUgeCZFN3MPN + Mc0XIaEwjuJvDK6fqj5tJrVIs5bxAmqRDj8d76AlJcOdDxHicTHgR3aUG4AKOWkUsskgQ + 3xR8lPh31O/HgzG9tq6o/DCPA1O9wyyOyT7KwJAaRASPCA1O80ZAzhZUNUVyut6dYEwaS + QXP4IaEJOxP8EkxR7FDEuO99UFZ7TXQ1CF7ots4wIs5tEpQvcdLnvBjJckp0fNBFTuGMm + FCvhgBK30NC93U4DxQv6xZBhqtvHYjHcTOXvz2fryRJT2teMN+eI+RDdV1Jj8Y= + scaleup_occ2_ac_secret: !encrypted/pkcs1-oaep + - LfUHhslK41JDp3CpslWGGA4bZ3udZh4KnytcXohkdbchb8QVt8eNc4nD0ti0/XS18YKwq + DlHOWw2rDJZ8RGIXENVUYzDbECoBErE8IAqQE0q3oS/8Oq0NYOFTGvvlKuue7U4s87Pwi + YFi+Q0Rv7vO8cWFVtbRHK+Hw6pC42Biq2T+tuVBCLqylIMViXpuEy9UpFLEv59zr6EHa9 + uB3xkjnpWuabe7vrG+LQHc0pJ5tNhcLiOnJggU5Ef02FBy+t6xvuJW8f6cXCnRRj1q0fl + D/vTmC7avwHnWC+J4WLL69HCwW05I7iHftVSWOXQgRzMBd4D4ND2OXfsWElu0eOV5XG6X + JsQH8lDnVN/lqaDAOYR4fk4+9yt3RURwvNL5FUnDK1t7LAI4X0gcvLrQAfzgOlpBYDXSK + 0kbUzqwivuw1v2zO/gxQU+J28PsOfZaKf/7ZZyj3e/tiq4wBpvPb0mVBwWXigKqzr+QED + Iy2u/g3x2qdcTpXR/RPq+xiXM2B2rw1V5gdkscdL+avXtTF7hT9HrcayHx3HDZ/h6aGPD + RWIJ8bstl+x2Q4zExgR13amWM8ZR1iLGCN20U/ZAaqANCqjDbrSVSTjTPzYtNFwAXwxkB + 3NHhPDHZ1MIdr6IJE4IZ4TCMsIeTA2UHNfF4RCzeDSIJ+CXOQxUFWOxZkf97WY= syseleven_dus2_ac_id: !encrypted/pkcs1-oaep - SjwtIvJO7DkLJDmS+T/Z5utFBa22hmPRBd8mzonJHGgURB2W7fmXFreD9NPrLfbt7ujKi KNqJm8k1Vr1F3Mu+Osr0BWSnq5makwVt2ikBY4qPbL8iyVXsByaT/HNPLCOokqy+REpfu diff --git a/Standards/scs-0100-w1-flavor-naming-implementation-testing.md b/Standards/scs-0100-w1-flavor-naming-implementation-testing.md index d9f5f62b2..868215476 100644 --- a/Standards/scs-0100-w1-flavor-naming-implementation-testing.md +++ b/Standards/scs-0100-w1-flavor-naming-implementation-testing.md @@ -2,7 +2,7 @@ title: "SCS Flavor Naming Standard: Implementation and Testing Notes" type: Supplement track: IaaS -status: Proposal +status: Draft supplements: - scs-0100-v1-flavor-naming.md - scs-0100-v2-flavor-naming.md diff --git a/Standards/scs-0101-w1-entropy-implementation-testing.md b/Standards/scs-0101-w1-entropy-implementation-testing.md index 432a25fec..19e1f43dc 100644 --- a/Standards/scs-0101-w1-entropy-implementation-testing.md +++ b/Standards/scs-0101-w1-entropy-implementation-testing.md @@ -2,7 +2,7 @@ title: "SCS Entropy: Implementation and Testing Notes" type: Supplement track: IaaS -status: Proposal +status: Draft supplements: - scs-0101-v1-entropy.md --- diff --git a/Standards/scs-0102-v1-image-metadata.md b/Standards/scs-0102-v1-image-metadata.md index 8b0ab98ba..18d42adf7 100644 --- a/Standards/scs-0102-v1-image-metadata.md +++ b/Standards/scs-0102-v1-image-metadata.md @@ -1,5 +1,5 @@ --- -title: SCS Image Metadata Standard +title: SCS Image Metadata type: Standard stabilized_at: 2022-10-31 status: Stable diff --git a/Standards/scs-0102-w1-image-metadata-implementation-testing.md b/Standards/scs-0102-w1-image-metadata-implementation-testing.md index 05fb05831..b2d9f5b75 100644 --- a/Standards/scs-0102-w1-image-metadata-implementation-testing.md +++ b/Standards/scs-0102-w1-image-metadata-implementation-testing.md @@ -2,7 +2,7 @@ title: "SCS Image Metadata: Implementation and Testing Notes" type: Supplement track: IaaS -status: Proposal +status: Draft supplements: - scs-0102-v1-image-metadata.md --- diff --git a/Standards/scs-0104-w1-standard-images-implementation.md b/Standards/scs-0104-w1-standard-images-implementation.md index 07b5715ee..9a18a9056 100644 --- a/Standards/scs-0104-w1-standard-images-implementation.md +++ b/Standards/scs-0104-w1-standard-images-implementation.md @@ -2,7 +2,7 @@ title: "SCS Standard Images: Implementation Notes" type: Supplement track: IaaS -status: Proposal +status: Draft supplements: - scs-0104-v1-standard-images.md --- diff --git a/Standards/scs-0114-v1-volume-type-standard.md b/Standards/scs-0114-v1-volume-type-standard.md index 9ed0d730c..003db9a24 100644 --- a/Standards/scs-0114-v1-volume-type-standard.md +++ b/Standards/scs-0114-v1-volume-type-standard.md @@ -1,8 +1,9 @@ --- -title: Volume Type Standard +title: SCS Volume Types type: Standard -status: Draft -track: IaaS +status: Stable +stabilized_at: 2024-11-13 +track: IaaS --- ## Introduction diff --git a/Standards/scs-0115-v1-default-rules-for-security-groups.md b/Standards/scs-0115-v1-default-rules-for-security-groups.md index b118dcf1f..8809a2857 100644 --- a/Standards/scs-0115-v1-default-rules-for-security-groups.md +++ b/Standards/scs-0115-v1-default-rules-for-security-groups.md @@ -1,7 +1,8 @@ --- title: Default Rules for Security Groups type: Standard -status: Draft +status: Stable +stabilized_at: 2024-11-13 track: IaaS --- @@ -25,7 +26,7 @@ Administrator (abbr. Admin) ### Default Security Groups, Custom Security Groups and default Security Group Rules -To properly understand the concepts in this standard and avoid ambiguity, is very important to distinguish between the following similar-sounding but different resources in the OpenStack Networking API: +To properly understand the concepts in this standard and avoid ambiguity, it is very important to distinguish between the following similar-sounding but different resources in the OpenStack Networking API: 1. default Security Group 2. custom Security Group @@ -59,10 +60,10 @@ Therefore, this standard proposes default Security Group rules that MUST be set ## Design Considerations -Up to the 2023.1 release (antelope) the default Security Group rules are hardcoded in the OpenStack code. -We should not require to change this behavior through code changes in deployments. +Up to the 2023.1 release (Antelope) the default Security Group rules are defined in the OpenStack code. +We should not require changing this behavior through code changes in deployments. -Beginning with the 2023.2 release (bobcat) the default Security Group rules can now be edited by administrators through an API. +Beginning with the 2023.2 release (Bobcat) the default Security Group rules can now be edited by administrators through an API. All rules that should be present as default in Security Groups have to be configured by admins through this API. There are two ways to approach a standard for the default rules of Security Groups. diff --git a/Standards/scs-0116-v1-key-manager-standard.md b/Standards/scs-0116-v1-key-manager-standard.md index 55d74f0d0..b0dd19139 100644 --- a/Standards/scs-0116-v1-key-manager-standard.md +++ b/Standards/scs-0116-v1-key-manager-standard.md @@ -1,7 +1,8 @@ --- -title: Key Manager Standard +title: SCS Key Manager Standard type: Standard -status: Draft +status: Stable +stabilized_at: 2024-11-13 track: IaaS --- diff --git a/Standards/scs-0116-w1-key-manager-implementation-testing.md b/Standards/scs-0116-w1-key-manager-implementation-testing.md index 0ca20bf2e..d3acc6b4c 100644 --- a/Standards/scs-0116-w1-key-manager-implementation-testing.md +++ b/Standards/scs-0116-w1-key-manager-implementation-testing.md @@ -2,7 +2,7 @@ title: "SCS Key Manager Standard: Implementation and Testing Notes" type: Supplement track: IaaS -status: Proposal +status: Draft supplements: - scs-0116-v1-key-manager-standard.md --- @@ -44,6 +44,11 @@ This can be done with a small change in the policy.yaml file. The `creator` has The check for the presence of a Key Manager is done with a test script, that checks the presence of a Key Manager service in the catalog endpoint of Openstack. This check can eventually be moved to the checks for the mandatory an supported service/API list, in case of a promotion of the Key Manager to the mandatory list. +### Implementation + +The script [`check-for-key-manager.py`](https://github.com/SovereignCloudStack/standards/blob/main/Tests/iaas/key-manager/check-for-key-manager.py) +connects to OpenStack and performs the checks described in this section. + ## Manual Tests It is not possible to check a deployment for a correctly protected Master KEK automatically from the outside. diff --git a/Standards/scs-0117-v1-volume-backup-service.md b/Standards/scs-0117-v1-volume-backup-service.md index d272dfa05..9838536fa 100644 --- a/Standards/scs-0117-v1-volume-backup-service.md +++ b/Standards/scs-0117-v1-volume-backup-service.md @@ -1,7 +1,8 @@ --- title: Volume Backup Functionality type: Standard -status: Draft +status: Stable +stabilized_at: 2024-11-13 track: IaaS --- diff --git a/Standards/scs-0118-v1-taxonomy-of-failsafe-levels.md b/Standards/scs-0118-v1-taxonomy-of-failsafe-levels.md index 069fdfc52..45f494368 100644 --- a/Standards/scs-0118-v1-taxonomy-of-failsafe-levels.md +++ b/Standards/scs-0118-v1-taxonomy-of-failsafe-levels.md @@ -1,5 +1,5 @@ --- -title: Taxonomy of Failsafe Levels +title: SCS Taxonomy of Failsafe Levels type: Decision Record status: Draft track: IaaS diff --git a/Standards/scs-0118-w1-example-impacts-of-failure-scenarios.md b/Standards/scs-0118-w1-example-impacts-of-failure-scenarios.md index 5bd84e76d..a41ceb6ea 100644 --- a/Standards/scs-0118-w1-example-impacts-of-failure-scenarios.md +++ b/Standards/scs-0118-w1-example-impacts-of-failure-scenarios.md @@ -2,7 +2,7 @@ title: "SCS Taxonomy of Failsafe Levels: Examples of Failure Cases and their impact on IaaS and KaaS resources" type: Supplement track: IaaS -status: Proposal +status: Draft supplements: - scs-0118-v1-taxonomy-of-failsafe-levels.md --- diff --git a/Standards/scs-0121-v1-Availability-Zones-Standard.md b/Standards/scs-0121-v1-Availability-Zones-Standard.md index ea1f6e937..0dc9ed698 100644 --- a/Standards/scs-0121-v1-Availability-Zones-Standard.md +++ b/Standards/scs-0121-v1-Availability-Zones-Standard.md @@ -1,7 +1,8 @@ --- -title: Availability Zones Standard +title: SCS Availability Zones type: Standard -status: Draft +status: Stable +stabilized_at: 2024-11-13 track: IaaS --- diff --git a/Standards/scs-0121-w1-Availability-Zones-Standard.md b/Standards/scs-0121-w1-Availability-Zones-Standard.md index 5fbb88cc1..9ec3dbc82 100644 --- a/Standards/scs-0121-w1-Availability-Zones-Standard.md +++ b/Standards/scs-0121-w1-Availability-Zones-Standard.md @@ -1,5 +1,5 @@ --- -title: "SCS Availability Zone Standard: Implementation and Testing Notes" +title: "SCS Availability Zones: Implementation and Testing Notes" type: Supplement track: IaaS status: Draft diff --git a/Drafts/node-to-node-encryption.md b/Standards/scs-0122-v1-node-to-node-encryption.md similarity index 99% rename from Drafts/node-to-node-encryption.md rename to Standards/scs-0122-v1-node-to-node-encryption.md index 4234b64db..f3d298706 100644 --- a/Drafts/node-to-node-encryption.md +++ b/Standards/scs-0122-v1-node-to-node-encryption.md @@ -1,7 +1,7 @@ --- title: _End-to-End Encryption between Customer Workloads_ type: Decision Record -status: Proposal +status: Draft track: IaaS --- diff --git a/Standards/scs-0123-v1-mandatory-and-supported-IaaS-services.md b/Standards/scs-0123-v1-mandatory-and-supported-IaaS-services.md new file mode 100644 index 000000000..1d94990bc --- /dev/null +++ b/Standards/scs-0123-v1-mandatory-and-supported-IaaS-services.md @@ -0,0 +1,82 @@ +--- +title: Mandatory and Supported IaaS Services +type: Standard +status: Draft +track: IaaS +--- + +## Introduction + +To be SCS-compliant a Cloud Service Provider (CSP) has to fulfill all SCS standards. +Some of those standards are broad and consider all APIs of all services on the IaaS-Layer like the consideration of a [role standard](https://github.com/SovereignCloudStack/issues/issues/396). +There exist many services on that layer and for a first step they need to be limited to have a clear scope for the standards and the Cloud Service Providers following them. +For this purpose, this standard will establish lists for mandatory services whose APIs have to be present in a SCS cloud as well as supported services, which APIs are considered by some standards and may even be tested for their integration but are optional in a sense that their omission will not violate SCS conformance. + +## Motivation + +There are many OpenStack APIs and their corresponding services that can be deployed on the IaaS level. +These services have differences in the quality of their implementation and liveness and some of them may be easily omitted when creating an IaaS deployment. +To fulfill all SCS-provided standards only a subset of these APIs are required. +Some more but not all remaining OpenStack APIs are also supported additionally by the SCS project and may be part of its reference implementation. +This results in different levels of support for specific services. +This document will give readers insight about how the SCS classifies the OpenStack APIs accordingly. +If a cloud provides all mandatory and any number of supported OpenStack APIs, it can be tested for SCS-compliance. +Any unsupported APIs will not be tested. + +## Mandatory IaaS APIs + +The following IaaS APIs MUST be present in SCS-compliant IaaS deployments and could be implemented with the corresponding OpenStack services: + +| Mandatory API | corresponding OpenStack Service | description | +|-----|-----|-----| +| **block-storage** | Cinder | Block Storage service | +| **compute** | Nova | Compute service | +| **identity** | Keystone | Identity service | +| **image** | Glance | Image service | +| **load-balancer** | Octavia | Load-balancer service | +| **network** | Neutron | Networking service | +| **s3** | S3 API object storage | Object Storage service | + +:::caution + +S3 API implementations may differ in certain offered features. +CSPs must publicly describe, which implementation they use in their deployment. +Users should always research whether a needed feature is supported in the offered implementation. + +::: + +The endpoints of services MUST be findable through the `catalog list` of the identity API[^1]. + +[^1]: [Integrate into the service catalog of Keystone](https://docs.openstack.org/keystone/latest/contributor/service-catalog.html) + +## Supported IaaS APIs + +The following IaaS APIs MAY be present in SCS-compliant IaaS deployment, e.g. implemented thorugh the corresponding OpenStack services, and are considered in the SCS standards. + +| Supported API | corresponding OpenStack Service | description | +|-----|-----|-----| +| **bare-metal** | Ironic | Bare Metal provisioning service | +| **billing** | CloudKitty | Rating/Billing service | +| **dns** | Designate | DNS service | +| **ha** | Masakari | Instances High Availability service | +| **key-manager** | Barbican | Key Manager service | +| **object-store** | Swift | Object Store with different possible backends | +| **orchestration** | Heat | Orchestration service | +| **shared-file-systems** | Manila | Shared File Systems service | +| **telemetry** | Ceilometer | Telemetry service | +| **time-series-database** | Gnocchi | Time Series Database service | + +## Unsupported IaaS APIs + +All other OpenStack services, whose APIs are not mentioned in the mandatory or supported lists will not be tested for their compatibility and conformance in SCS clouds by the SCS community. +Those services MAY be integrated into IaaS deployments by a Cloud Service Provider on their own responsibility but the SCS will not assume they are present and potential issues that occur during deployment or usage have to be handled by the CSP on their own accord. +The SCS standard offers no guarantees for compatibility or reliability of services categorized as unsupported. + +## Related Documents + +[The OpenStack Services](https://www.openstack.org/software/) + +## Conformance Tests + +The presence of the mandatory OpenStack APIs will be tested in [this test-script](https://github.com/SovereignCloudStack/standards/blob/mandatory-and-supported-IaaS-services/Tests/iaas/mandatory-services/mandatory-iaas-services.py). +The test will further check, whether the object store endpoint is compatible to s3. diff --git a/Standards/scs-0211-w1-kaas-default-storage-class-implementation-testing.md b/Standards/scs-0211-w1-kaas-default-storage-class-implementation-testing.md index d8112f299..1eeb89e48 100644 --- a/Standards/scs-0211-w1-kaas-default-storage-class-implementation-testing.md +++ b/Standards/scs-0211-w1-kaas-default-storage-class-implementation-testing.md @@ -2,7 +2,7 @@ title: "SCS KaaS default storage class: Implementation and Testing Notes" type: Supplement track: KaaS -status: Proposal +status: Draft supplements: - scs-0211-v1-kaas-default-storage-class.md --- diff --git a/Standards/scs-0214-w1-k8s-node-distribution-implementation-testing.md b/Standards/scs-0214-w1-k8s-node-distribution-implementation-testing.md index 79282fbd7..4366365a0 100644 --- a/Standards/scs-0214-w1-k8s-node-distribution-implementation-testing.md +++ b/Standards/scs-0214-w1-k8s-node-distribution-implementation-testing.md @@ -2,7 +2,7 @@ title: "Kubernetes Node Distribution and Availability: Implementation and Testing Notes" type: Supplement track: KaaS -status: Proposal +status: Draft supplements: - scs-0214-v1-k8s-node-distribution.md - scs-0214-v2-k8s-node-distribution.md diff --git a/Standards/scs-0219-v1-kaas-networking.md b/Standards/scs-0219-v1-kaas-networking.md new file mode 100644 index 000000000..8f35f7925 --- /dev/null +++ b/Standards/scs-0219-v1-kaas-networking.md @@ -0,0 +1,99 @@ +--- +title: KaaS Networking Standard +type: Standard +status: Draft +track: KaaS +--- + +## Introduction + +Kubernetes defines a networking model that needs to be implemented by a separate CNI plugin. +Beyond basic connectivity within the cluster, however, there are many networking features that are specified but optional. +Some of these optional features provide vital functionality, such as the NetworkPolicy API and the Ingress API. + +This standard specifies a minimal set of networking features that users can expect in clusters created by an SCS-compliant KaaS provider. + +## Terminology + +The following terms are used throughout this document: + +| Term | Meaning | +|------|---------| +| KaaS, managed Kubernetes | Kubernetes as a Service, automated on-demand deployment of Kubernetes clusters. | +| CSP | Cloud Service Provider, the provider of the KaaS infrastructure. | +| CNI | Container Network Interface, a standardized networking interface for container runtimes. | +| CNI plugin, networking plugin | Kubernetes bindings for a CNI implementation, translates Kubernetes API concepts into more basic container networking concepts. | +| network policy | A set of rules to restrict network traffic in a Kubernetes cluster. | + +## Motivation + +KaaS providers will typically support aditional networking functionality beyond basic Kubernetes networking. +The specific range of features depends on the used CNI plugin, but may also be extended by additional operators. +Users may expect certain optional functionality, so we should define a baseline feature set that has to be available in an SCS-compliant KaaS cluster. + +## Design Considerations + +The Kubernetes API can be extended arbitrarily. +Many CNI plugins will define custom resources to enable functionality that is not covered in the official [API specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/). +Sometimes they will even reuse names from different API groups, such as `NetworkPolicy`, which exists in the basic `networking.k8s.io/v1` API, but also in `projectcalico.org/v3`. + +To avoid any ambiguity, we should therefore be explicit about the API groups and versions of resources. +We should also avoid mandating third-party API extensions, to avoid dependencies on specific third-party software and keep the standard as generic as possible. + +### Options considered + +#### NetworkPolicy API + +Kubernetes network policies are used to restrict network traffic between pods in a cluster, but also between pods and external network resources. +The policy rules can filter based on port and address ranges, but also on Kubernetes-specific target attributes such as namespaces and labels. +They must be implemented by the CNI plugin, and though they are widely supported, they are still technically optional, and there are some lightweight networking plugins, such as Flannel, that are not enforcing them. + +Nonetheless, network policies are widely used and most users will expect them in a managed Kubernetes cluster. +The wide, but varying support among CNI plugins makes them a good target for SCS standardization. + +#### Default Network Policies in Namespaces + +Basic network policies are namespaced resources, and can only filter traffic to and from pods in their own namespace. +In a newly created namespace without policies the default behavior will apply, which is to not restrict traffic at all. + +It can be desirable to automatically create default network policies in new namespaces, using a policy operator such as Kyverno. +A CSP could provide such an operator and offer a number of default policies, like blocking connections to other namespaces by default, or blocking access to the OpenStack metadata service. + +Any user with permissions to manage their own network policies in a namespace will of course be able to remove or modify any default network policies in that namespace. +CSP-provided network policies should thus only be viewed as a safety default, and should only be deployed if they are actually beneficial to users. + +#### AdminNetworkPolicy API + +An alternative to automatically created default network policies are API extensions that allow cluster-wide networking rules. +Some CNI plugins have implemented such extensions, e.g. Calico's `GlobalNetworkPolicy` and Cilium's `CiliumClusterwideNetworkPolicy`. + +The Kubernetes Network Special Interest Group is currently working on an [official API extension](https://network-policy-api.sigs.k8s.io/api-overview/) to cover this functionality. +This API extension introduces the new `AdminNetworkPolicy` and `BaselineAdminNetworkPolicy` resources, which represent cluster-wide network policies with respectively higher or lower precedence than namespaced network policies. + +This API is also a good candidate for standardization because it consolidates a number of vendor-specific workarounds to limitations of the NetworkPolicy API. +It has not been stabilized yet, so currently we can at most recommend CNI plugins where there is ongoing work to support these features. + +#### Ingress API + +The Ingress API allows the external exposure of HTTP/HTTPS-based services running in the cluster. +Unlike the L3/L4-based LoadBalancer Service type, Ingress provides L7 load balancing, HTTP routing, and TLS termination for services. +This functionality can be provided within the cluster by a pod-based ingress controller such as `ingress-nginx`, that exposes Ingress resources as Services. + +However, there are also Ingress controllers that integrate with underlying infrastructure and may help to reduce overhead. +Examples for this are the Cilium CNI plugin, which comes with built-in Ingress support, and the Octavia Ingress controller, which may be a good choice if OpenStack Octavia is already used to provide L3/L4 load balancing. + +The CSPs that manage the underlying infrastructure can of course make the best choice for such an integrated Ingress controller, so they should be encouraged to do so. +Even with a CSP-provided default Ingress controller present, users will be able to use alternative Ingress controllers by creating a new `IngressClass`, which can then be referenced in Ingress resources. + +## Decision + +CSPs MUST provide a network plugin that fully supports `NetworkPolicy` resources in the API version `networking.k8s.io/v1`. +CSPs SHOULD provide a network plugin that supports or is working on support for the `AdminNetworkPolicy` and `BaselineAdminNetworkPolicy` resources of the `policy.networking.k8s.io` API group, in their latest version, up to `v1`. + +CSPs SHOULD offer the option for a managed, `networking.k8s.io/v1`-compliant Ingress controller and a default `IngressClass` resource for this controller. + +CSPs MAY add default networking restrictions, using either `networking.k8s.io/v1`-compliant `NetworkPolicy` resources with a policy operator, or alternatively any cluster-wide network policy extensions provided by the CNI plugin. + +## Conformance Tests + +Required support for network policies will be tested using the upstream e2e tests via Sonobuoy. diff --git a/Standards/scs-0219-w1-kaas-networking.md b/Standards/scs-0219-w1-kaas-networking.md new file mode 100644 index 000000000..3e34948d2 --- /dev/null +++ b/Standards/scs-0219-w1-kaas-networking.md @@ -0,0 +1,27 @@ +--- +title: "KaaS Networking Standard: Implementation Notes" +type: Supplement +track: KaaS +status: Draft +supplements: + - scs-0219-v1-kaas-networking.md +--- +## List of compliant CNI Plugins + +The Kubernetes Network Policy API working group maintains a [list of work-in-progress implementations](https://network-policy-api.sigs.k8s.io/implementations/) of the AdminNetworkPolicy and BaselineAdminNetworkPolicy resources. +Besides their own proof-of-concept implementation of [kube-network-policies](https://github.com/kubernetes-sigs/kube-network-policies), at the time of writing they list the following CNI plugins: + +- [OVN-Kubernetes](https://github.com/ovn-org/ovn-kubernetes/) +- [Antrea](https://github.com/antrea-io/antrea/) +- [KubeOVN](https://github.com/kubeovn/kube-ovn) +- [Calico](https://github.com/projectcalico/calico) +- [Cilium](https://github.com/cilium/cilium) + +All of these plugins also implement the basic NetworkPolicy API, and are therefore compliant both with the standard's requirements and recommendations. + +The CNI plugin [Flannel](https://github.com/flannel-io/flannel) does not support network policies by itself, but can be combined with Calico for policy enforcement. +This configuration is known as [Canal](https://docs.tigera.io/calico/latest/getting-started/kubernetes/flannel/install-for-flannel) and will likely profit from Calico's support for AdminNetworkPolicy. + +There are more CNI plugins that support the NetworkPolicy API, but are not known to work on support of the AdminNetworkPolicy extensions. +As such they are still compliant with the current version of the Standard. +However, these seem to be either vendor-specific, like the [Azure CNI](https://learn.microsoft.com/de-de/azure/aks/configure-azure-cni), or unmaintained, like [Weave](https://github.com/weaveworks/weave). diff --git a/Standards/scs-0302-v1-domain-manager-role.md b/Standards/scs-0302-v1-domain-manager-role.md index 29ffa5a7c..a418a23b7 100644 --- a/Standards/scs-0302-v1-domain-manager-role.md +++ b/Standards/scs-0302-v1-domain-manager-role.md @@ -1,17 +1,27 @@ --- title: Domain Manager configuration for Keystone type: Standard -status: Draft +status: Stable +stabilized_at: 2024-11-13 track: IAM --- ## Introduction SCS Clouds should provide a way to grant Domain Manager rights to SCS Customers which provides IAM self-service capabilities within an OpenStack domain. -This is not properly implemented in the default OpenStack configuration and requires specific adjustments to the Keystone identity management configuration. +Such capabilities should enable the SCS customer to manage identity resources within their domain without involving the provider of the cloud. To avoid conflict with the unscoped `admin` role in OpenStack we want to refer to this new persona as "Domain Manager", introducing the `manager` role in the API for domains. -### Glossary +:::info + +The Domain Manager functionality will be a native part of the official OpenStack beginning with release 2024.2 ("Dalmatian"). + +To implement the Domain Manager in SCS clouds using an OpenStack release older than 2024.2, please refer to the supplemental [implementation notes for this standard](https://github.com/SovereignCloudStack/standards/blob/main/Standards/scs-0302-w1-domain-manager-implementation-notes.md). +The implementation notes document describes an alternative implementation that can be used for OpenStack 2024.1 and older releases. + +::: + +## Terminology The following special terms are used throughout this standard document: @@ -31,16 +41,6 @@ The following special terms are used throughout this standard document: [^1]: [OpenStack Documentation: Role-Based Access Control Overview](https://static.opendev.org/docs/patrole/latest/rbac-overview.html) -### Impact - -Applying this standard modifies the API policy configuration of Keystone and introduces a new persona to Keystone to enable IAM self-service for customers within a domain. -Once assigned, this persona allows special Domain Manager users within a domain to manage users, project, groups and role assignments as part of the IAM self-service. - -However, the configuration change introduced by this standard does not automatically assign the Domain Manager persona to any users per default. -Assigning the new persona and granting customers the resulting self-service capabilities is a deliberate action to be taken by the CSP on a per-tenant (i.e. per domain) basis. - -Omitting the provisioning of any Domain Manager users (i.e. not assigning the new persona to any user) will result in an OpenStack cloud that behaves identically to a configuration without the standard applied, making the actual usage of the functionality a CSP's choice and entirely optional. - ## Motivation In the default configuration of Keystone, only users with the `admin` role may manage the IAM resources such as projects, groups and users and their relation through role assignments. @@ -94,180 +94,52 @@ This means that by creating a new role and extending Keystone's API policy confi [^4]: [OpenStack Documentation: Administering Applications that use oslo.policy](https://docs.openstack.org/oslo.policy/latest/admin/index.html) -## Open questions - -### Limitations - -The approach described in this standard imposes the following limitations: +## Decision -1. as a result of the "`identity:list_domains`" rule (see below), Domain Managers are able to see all domains[^5] via "`openstack domain list`" and can inspect the metadata of other domains with "`openstack domain show`" -2. as a result of the "`identity:list_roles`" rule (see below), Domain Managers are able to see all roles via "`openstack role list`" and can inspect the metadata of other roles with "`openstack role show`" +A role named "`manager`" MUST be present in the identity service. -**As a result of points 1 and 2, metadata of all domains and roles will be exposed to all Domain Managers!** +The identity service MUST implement the Domain Manager functionality for this role. +The implementation details depend on the OpenStack Keystone version used. +See the sections below for reference. -If a CSP deems either of these points critical, they may abstain from granting the `"manager"` role to any user in a domain scope, effectively disabling the Domain Manager functionality. See [Impact](#impact). +### For OpenStack Keystone 2024.2 or later -[^5]: see the [corresponding Launchpad bug at Keystone](https://bugs.launchpad.net/keystone/+bug/2041611) +For OpenStack Keystone 2024.2 or later the Domain Manager persona is already integrated natively. +To guarantee proper scope protection, the Identity API MUST be configured with "`enforce_scope`" and "`enforce_new_defaults`" enabled for the oslo.policy library. -## Decision +Example entries for the `keystone.conf` configuration file: -A role named "`manager`" is to be created via the Keystone API and the policy adjustments quoted below are to be applied. - -### Policy adjustments - -The following policy has to be applied to Keystone in a verbatim fashion. -The only parts of the policy definitions that may be changed are: - -1. The "`base_*`" definitions to align them to the correct OpenStack defaults matching the OpenStack release of the environment in case those differ from this template. -2. The "`is_domain_managed_role`" definition (see next section below). - -```yaml -# SCS Domain Manager policy configuration - -# Section A: OpenStack base definitions -# The entries beginning with "base_" should be exact copies of the -# default "identity:" definitions for the target OpenStack release. -# They will be extended upon for the manager role below this section. -"base_get_domain": "(role:reader and system_scope:all) or token.domain.id:%(target.domain.id)s or token.project.domain.id:%(target.domain.id)s" -"base_list_domains": "(role:reader and system_scope:all)" -"base_list_roles": "(role:reader and system_scope:all)" -"base_get_role": "(role:reader and system_scope:all)" -"base_list_users": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.domain_id)s)" -"base_get_user": "(role:reader and system_scope:all) or (role:reader and token.domain.id:%(target.user.domain_id)s) or user_id:%(target.user.id)s" -"base_create_user": "(role:admin and system_scope:all) or (role:admin and token.domain.id:%(target.user.domain_id)s)" -"base_update_user": "(role:admin and system_scope:all) or (role:admin and token.domain.id:%(target.user.domain_id)s)" -"base_delete_user": "(role:admin and system_scope:all) or (role:admin and token.domain.id:%(target.user.domain_id)s)" -"base_list_projects": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.domain_id)s)" -"base_get_project": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.project.domain_id)s) or project_id:%(target.project.id)s" -"base_create_project": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.project.domain_id)s)" -"base_update_project": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.project.domain_id)s)" -"base_delete_project": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.project.domain_id)s)" -"base_list_user_projects": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.user.domain_id)s) or user_id:%(target.user.id)s" -"base_check_grant": "(role:reader and system_scope:all) or ((role:reader and domain_id:%(target.user.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:reader and domain_id:%(target.user.domain_id)s and domain_id:%(target.domain.id)s) or (role:reader and domain_id:%(target.group.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:reader and domain_id:%(target.group.domain_id)s and domain_id:%(target.domain.id)s)) and (domain_id:%(target.role.domain_id)s or None:%(target.role.domain_id)s)" -"base_list_grants": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.user.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:reader and domain_id:%(target.user.domain_id)s and domain_id:%(target.domain.id)s) or (role:reader and domain_id:%(target.group.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:reader and domain_id:%(target.group.domain_id)s and domain_id:%(target.domain.id)s)" -"base_create_grant": "(role:admin and system_scope:all) or ((role:admin and domain_id:%(target.user.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:admin and domain_id:%(target.user.domain_id)s and domain_id:%(target.domain.id)s) or (role:admin and domain_id:%(target.group.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:admin and domain_id:%(target.group.domain_id)s and domain_id:%(target.domain.id)s)) and (domain_id:%(target.role.domain_id)s or None:%(target.role.domain_id)s)" -"base_revoke_grant": "(role:admin and system_scope:all) or ((role:admin and domain_id:%(target.user.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:admin and domain_id:%(target.user.domain_id)s and domain_id:%(target.domain.id)s) or (role:admin and domain_id:%(target.group.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:admin and domain_id:%(target.group.domain_id)s and domain_id:%(target.domain.id)s)) and (domain_id:%(target.role.domain_id)s or None:%(target.role.domain_id)s)" -"base_list_role_assignments": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.domain_id)s)" -"base_list_groups": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.group.domain_id)s)" -"base_get_group": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.group.domain_id)s)" -"base_create_group": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.group.domain_id)s)" -"base_update_group": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.group.domain_id)s)" -"base_delete_group": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.group.domain_id)s)" -"base_list_groups_for_user": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.user.domain_id)s) or user_id:%(user_id)s" -"base_list_users_in_group": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.group.domain_id)s)" -"base_remove_user_from_group": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.group.domain_id)s and domain_id:%(target.user.domain_id)s)" -"base_check_user_in_group": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.group.domain_id)s and domain_id:%(target.user.domain_id)s)" -"base_add_user_to_group": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.group.domain_id)s and domain_id:%(target.user.domain_id)s)" - -# Section B: Domain Manager Extensions - -# classify domain managers with a special role -"is_domain_manager": "role:manager" - -# specify a rule that whitelists roles which domain admins are permitted -# to assign and revoke within their domain -"is_domain_managed_role": "'member':%(target.role.name)s or 'load-balancer_member':%(target.role.name)s" - -# allow domain admins to retrieve their own domain (does not need changes) -"identity:get_domain": "rule:base_get_domain or rule:admin_required" - -# list_domains is needed for GET /v3/domains?name=... requests -# this is mandatory for things like -# `create user --domain $DOMAIN_NAME $USER_NAME` to correctly discover -# domains by name -"identity:list_domains": "rule:is_domain_manager or rule:base_list_domains or rule:admin_required" - -# list_roles is needed for GET /v3/roles?name=... requests -# this is mandatory for things like `role add ... $ROLE_NAME`` to correctly -# discover roles by name -"identity:list_roles": "rule:is_domain_manager or rule:base_list_roles or rule:admin_required" - -# get_role is needed for GET /v3/roles/{role_id} requests -# this is mandatory for the OpenStack SDK to properly process role assignments -# which are issued by role id instead of name -"identity:get_role": "(rule:is_domain_manager and rule:is_domain_managed_role) or rule:base_get_role or rule:admin_required" - -# allow domain admins to manage users within their domain -"identity:list_users": "(rule:is_domain_manager and token.domain.id:%(target.domain_id)s) or rule:base_list_users or rule:admin_required" -"identity:get_user": "(rule:is_domain_manager and token.domain.id:%(target.user.domain_id)s) or rule:base_get_user or rule:admin_required" -"identity:create_user": "(rule:is_domain_manager and token.domain.id:%(target.user.domain_id)s) or rule:base_create_user or rule:admin_required" -"identity:update_user": "(rule:is_domain_manager and token.domain.id:%(target.user.domain_id)s) or rule:base_update_user or rule:admin_required" -"identity:delete_user": "(rule:is_domain_manager and token.domain.id:%(target.user.domain_id)s) or rule:base_delete_user or rule:admin_required" - -# allow domain admins to manage projects within their domain -"identity:list_projects": "(rule:is_domain_manager and token.domain.id:%(target.domain_id)s) or rule:base_list_projects or rule:admin_required" -"identity:get_project": "(rule:is_domain_manager and token.domain.id:%(target.project.domain_id)s) or rule:base_get_project or rule:admin_required" -"identity:create_project": "(rule:is_domain_manager and token.domain.id:%(target.project.domain_id)s) or rule:base_create_project or rule:admin_required" -"identity:update_project": "(rule:is_domain_manager and token.domain.id:%(target.project.domain_id)s) or rule:base_update_project or rule:admin_required" -"identity:delete_project": "(rule:is_domain_manager and token.domain.id:%(target.project.domain_id)s) or rule:base_delete_project or rule:admin_required" -"identity:list_user_projects": "(rule:is_domain_manager and token.domain.id:%(target.user.domain_id)s) or rule:base_list_user_projects or rule:admin_required" - -# allow domain managers to manage role assignments within their domain -# (restricted to specific roles by the 'is_domain_managed_role' rule) -# -# project-level role assignment to user within domain -"is_domain_user_project_grant": "token.domain.id:%(target.user.domain_id)s and token.domain.id:%(target.project.domain_id)s" -# project-level role assignment to group within domain -"is_domain_group_project_grant": "token.domain.id:%(target.group.domain_id)s and token.domain.id:%(target.project.domain_id)s" -# domain-level role assignment to group -"is_domain_level_group_grant": "token.domain.id:%(target.group.domain_id)s and token.domain.id:%(target.domain.id)s" -# domain-level role assignment to user -"is_domain_level_user_grant": "token.domain.id:%(target.user.domain_id)s and token.domain.id:%(target.domain.id)s" -"domain_manager_grant": "rule:is_domain_manager and (rule:is_domain_user_project_grant or rule:is_domain_group_project_grant or rule:is_domain_level_group_grant or rule:is_domain_level_user_grant)" -"identity:check_grant": "rule:domain_manager_grant or rule:base_check_grant or rule:admin_required" -"identity:list_grants": "rule:domain_manager_grant or rule:base_list_grants or rule:admin_required" -"identity:create_grant": "(rule:domain_manager_grant and rule:is_domain_managed_role) or rule:base_create_grant or rule:admin_required" -"identity:revoke_grant": "(rule:domain_manager_grant and rule:is_domain_managed_role) or rule:base_revoke_grant or rule:admin_required" -"identity:list_role_assignments": "(rule:is_domain_manager and token.domain.id:%(target.domain_id)s) or rule:base_list_role_assignments or rule:admin_required" - - -# allow domain managers to manage groups within their domain -"identity:list_groups": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s) or (role:reader and system_scope:all) or rule:base_list_groups or rule:admin_required" -"identity:get_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s) or (role:reader and system_scope:all) or rule:base_get_group or rule:admin_required" -"identity:create_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s) or rule:base_create_group or rule:admin_required" -"identity:update_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s) or rule:base_update_group or rule:admin_required" -"identity:delete_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s) or rule:base_delete_group or rule:admin_required" -"identity:list_groups_for_user": "(rule:is_domain_manager and token.domain.id:%(target.user.domain_id)s) or rule:base_list_groups_for_user or rule:admin_required" -"identity:list_users_in_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s) or rule:base_list_users_in_group or rule:admin_required" -"identity:remove_user_from_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s and token.domain.id:%(target.user.domain_id)s) or rule:base_remove_user_from_group or rule:admin_required" -"identity:check_user_in_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s and token.domain.id:%(target.user.domain_id)s) or rule:base_check_user_in_group or rule:admin_required" -"identity:add_user_to_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s and token.domain.id:%(target.user.domain_id)s) or rule:base_add_user_to_group or rule:admin_required" +```ini +[oslo_policy] +enforce_new_defaults = True +enforce_scope = True ``` -Note that the policy file begins with a list of "`base_*`" rule definitions ("Section A"). -These mirror the default policies of recent OpenStack releases. -They are used as a basis for the domain-manager-specific changes which are implemented in "Section B" where they are referenced to via "`or rule:base_*`" accordingly. -The section of "`base_*`" rules is meant for easy maintenance/update of default rules while keeping the domain-manager-specific rules separate. - -> **Note:** -> The "`or rule:admin_required`" appendix to the rule definitions in "Section B" is included for backwards compatibility with environments not yet fully configured for the new secure RBAC standard[^6]. - -[^6]: [OpenStack Technical Committee Governance Documents: Consistent and Secure Default RBAC](https://governance.openstack.org/tc/goals/selected/consistent-and-secure-rbac.html) +The "`is_domain_managed_role`" policy rule MAY be adjusted using a dedicated `policy.yaml` file for the Identity API in order to adjust the set of roles a Domain Manager is able to assign/revoke. +When doing so, the `admin` role MUST NOT be added to this set. -#### Specifying manageable roles via "`is_domain_managed_role`" +#### Note about upgrading from SCS Domain Manager to native integration -The "`is_domain_managed_role`" rule of the above policy template may be adjusted according to the requirements of the CSP and infrastructure architecture to specify different or multiple roles as manageable by Domain Managers as long as the policy rule adheres to the following: +In case the Identity API was upgraded from an older version where the policy-based Domain Manager implementation of SCS described in the [implementation notes for this standard](https://github.com/SovereignCloudStack/standards/blob/main/Standards/scs-0302-w1-domain-manager-implementation-notes.md) was still in use, the policies described there MUST be removed. +The only exception to this is the "`is_domain_managed_role`" rule in case any adjustments have been made to that rule and the CSP wants to preserve them. -- the "`is_domain_managed_role`" rule MUST NOT contain the "`admin`" role, neither directly nor transitively -- the "`is_domain_managed_role`" rule MUST define all applicable roles directly, it MUST NOT contain a "`rule:`" reference within itself +### For OpenStack Keystone 2024.1 or below -##### Example: permitting multiple roles +For OpenStack Keystone 2024.1 or below, the Domain Manager functionality MUST be implemented using API policies. +For details, refer to the [implementation notes for this standard](https://github.com/SovereignCloudStack/standards/blob/main/Standards/scs-0302-w1-domain-manager-implementation-notes.md). -The following example permits the "`reader`" role to be assigned/revoked by a Domain Manager in addition to the default "`member`" and "`load-balancer_member`" roles. -Further roles can be appended using the logical `or` directive. +For the release 2024.1 and below, changing the "`enforce_scope`" and "`enforce_new_defaults`" options for the Identity API is not necessary for the Domain Manager implementation. -```yaml -"is_domain_managed_role": "'member':%(target.role.name)s or 'load-balancer_member':%(target.role.name)s or 'reader':%(target.role.name)s" -``` - -**Note regarding the `manager` role** +## Related Documents -When adjusting the "`is_domain_managed_role`" rule a CSP might opt to also include the "`manager`" role itself in the manageable roles, resulting in Domain Managers being able to propagate the Domain Manager capabilities to other users within their domain. -This increases the self-service capabilities of the customer but introduces risks of Domain Managers also being able to revoke this role from themselves or each other (within their domain) in an unintended fashion. +### Upstream contribution spec for the Domain Manager functionality -CSPs have to carefully evaluate whether Domain Manager designation authority should reside solely on their side or be part of the customer self-service scope and decide about adding "`'manager':%(target.role.name)s`" to the rule accordingly. +**Description:** Upstream Identity service specification to introduce the Domain Manager functionality natively in OpenStack Keystone. +After implementing the Domain Manager functionality as described in the [implementation notes for this standard](https://github.com/SovereignCloudStack/standards/blob/main/Standards/scs-0302-w1-domain-manager-implementation-notes.md), the SCS project contributed the functionality to the official OpenStack project. +This eventually resulted in the feature being integrated natively in OpenStack Keystone starting with the 2024.2 release. +The specification was the starting point of the contribution. -## Related Documents +**Link:** [OpenStack Identity Specs: Domain Manager Persona for domain-scoped self-service administration](https://specs.openstack.org/openstack/keystone-specs/specs/keystone/2024.1/domain-manager-persona.html) ### "admin"-ness not properly scoped diff --git a/Standards/scs-0302-w1-domain-manager-implementation-notes.md b/Standards/scs-0302-w1-domain-manager-implementation-notes.md new file mode 100644 index 000000000..6e2c60298 --- /dev/null +++ b/Standards/scs-0302-w1-domain-manager-implementation-notes.md @@ -0,0 +1,194 @@ +--- +title: Domain Manager implementation notes +type: Supplement +track: IAM +status: Draft +supplements: + - scs-0302-v1-domain-manager-role.md +--- + +## Implementation notes + +:::caution + +If a Keystone release of OpenStack 2024.2 or later is used, **the policy configuration described in this document MUST be removed again** in case it was applied in the past prior to the upgrade. + +::: + +:::info + +The implementation described in this document only applies to Keystone releases prior to the OpenStack release 2024.2 ("Dalmatian"). +This document describes a transitional solution to offer the Domain Manager functionality for SCS clouds based on an OpenStack release earlier than 2024.2. + +Beginning with the 2024.2 release of OpenStack, the Domain Manager persona is integrated natively into Keystone and the implementation described below is unnecessary and might conflict with the native implementation. + +::: + +### Policy adjustments + +The following policy can be applied to Keystone releases older than 2024.2 ("Dalmatian"). +It mimics the Domain Manager persona implemented by Keystone starting with version 2024.2 and makes the functionality available for earlier releases of Keystone. + +The only parts of the policy definitions below that may be changed are: + +1. The "`base_*`" definitions to align them to the correct OpenStack defaults matching the OpenStack release of the environment in case those differ from this template. +2. The "`is_domain_managed_role`" definition (see next section below). + +```yaml +# SCS Domain Manager policy configuration + +# Section A: OpenStack base definitions +# The entries beginning with "base_" should be exact copies of the +# default "identity:" definitions for the target OpenStack release. +# They will be extended upon for the manager role below this section. +"base_get_domain": "(role:reader and system_scope:all) or token.domain.id:%(target.domain.id)s or token.project.domain.id:%(target.domain.id)s" +"base_list_domains": "(role:reader and system_scope:all)" +"base_list_roles": "(role:reader and system_scope:all)" +"base_get_role": "(role:reader and system_scope:all)" +"base_list_users": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.domain_id)s)" +"base_get_user": "(role:reader and system_scope:all) or (role:reader and token.domain.id:%(target.user.domain_id)s) or user_id:%(target.user.id)s" +"base_create_user": "(role:admin and system_scope:all) or (role:admin and token.domain.id:%(target.user.domain_id)s)" +"base_update_user": "(role:admin and system_scope:all) or (role:admin and token.domain.id:%(target.user.domain_id)s)" +"base_delete_user": "(role:admin and system_scope:all) or (role:admin and token.domain.id:%(target.user.domain_id)s)" +"base_list_projects": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.domain_id)s)" +"base_get_project": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.project.domain_id)s) or project_id:%(target.project.id)s" +"base_create_project": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.project.domain_id)s)" +"base_update_project": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.project.domain_id)s)" +"base_delete_project": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.project.domain_id)s)" +"base_list_user_projects": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.user.domain_id)s) or user_id:%(target.user.id)s" +"base_check_grant": "(role:reader and system_scope:all) or ((role:reader and domain_id:%(target.user.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:reader and domain_id:%(target.user.domain_id)s and domain_id:%(target.domain.id)s) or (role:reader and domain_id:%(target.group.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:reader and domain_id:%(target.group.domain_id)s and domain_id:%(target.domain.id)s)) and (domain_id:%(target.role.domain_id)s or None:%(target.role.domain_id)s)" +"base_list_grants": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.user.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:reader and domain_id:%(target.user.domain_id)s and domain_id:%(target.domain.id)s) or (role:reader and domain_id:%(target.group.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:reader and domain_id:%(target.group.domain_id)s and domain_id:%(target.domain.id)s)" +"base_create_grant": "(role:admin and system_scope:all) or ((role:admin and domain_id:%(target.user.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:admin and domain_id:%(target.user.domain_id)s and domain_id:%(target.domain.id)s) or (role:admin and domain_id:%(target.group.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:admin and domain_id:%(target.group.domain_id)s and domain_id:%(target.domain.id)s)) and (domain_id:%(target.role.domain_id)s or None:%(target.role.domain_id)s)" +"base_revoke_grant": "(role:admin and system_scope:all) or ((role:admin and domain_id:%(target.user.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:admin and domain_id:%(target.user.domain_id)s and domain_id:%(target.domain.id)s) or (role:admin and domain_id:%(target.group.domain_id)s and domain_id:%(target.project.domain_id)s) or (role:admin and domain_id:%(target.group.domain_id)s and domain_id:%(target.domain.id)s)) and (domain_id:%(target.role.domain_id)s or None:%(target.role.domain_id)s)" +"base_list_role_assignments": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.domain_id)s)" +"base_list_groups": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.group.domain_id)s)" +"base_get_group": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.group.domain_id)s)" +"base_create_group": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.group.domain_id)s)" +"base_update_group": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.group.domain_id)s)" +"base_delete_group": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.group.domain_id)s)" +"base_list_groups_for_user": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.user.domain_id)s) or user_id:%(user_id)s" +"base_list_users_in_group": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.group.domain_id)s)" +"base_remove_user_from_group": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.group.domain_id)s and domain_id:%(target.user.domain_id)s)" +"base_check_user_in_group": "(role:reader and system_scope:all) or (role:reader and domain_id:%(target.group.domain_id)s and domain_id:%(target.user.domain_id)s)" +"base_add_user_to_group": "(role:admin and system_scope:all) or (role:admin and domain_id:%(target.group.domain_id)s and domain_id:%(target.user.domain_id)s)" + +# Section B: Domain Manager Extensions + +# classify domain managers with a special role +"is_domain_manager": "role:manager" + +# specify a rule that whitelists roles which domain admins are permitted +# to assign and revoke within their domain +"is_domain_managed_role": "'member':%(target.role.name)s or 'load-balancer_member':%(target.role.name)s" + +# allow domain admins to retrieve their own domain (does not need changes) +"identity:get_domain": "rule:base_get_domain or rule:admin_required" + +# list_domains is needed for GET /v3/domains?name=... requests +# this is mandatory for things like +# `create user --domain $DOMAIN_NAME $USER_NAME` to correctly discover +# domains by name +"identity:list_domains": "rule:is_domain_manager or rule:base_list_domains or rule:admin_required" + +# list_roles is needed for GET /v3/roles?name=... requests +# this is mandatory for things like `role add ... $ROLE_NAME`` to correctly +# discover roles by name +"identity:list_roles": "rule:is_domain_manager or rule:base_list_roles or rule:admin_required" + +# get_role is needed for GET /v3/roles/{role_id} requests +# this is mandatory for the OpenStack SDK to properly process role assignments +# which are issued by role id instead of name +"identity:get_role": "(rule:is_domain_manager and rule:is_domain_managed_role) or rule:base_get_role or rule:admin_required" + +# allow domain admins to manage users within their domain +"identity:list_users": "(rule:is_domain_manager and token.domain.id:%(target.domain_id)s) or rule:base_list_users or rule:admin_required" +"identity:get_user": "(rule:is_domain_manager and token.domain.id:%(target.user.domain_id)s) or rule:base_get_user or rule:admin_required" +"identity:create_user": "(rule:is_domain_manager and token.domain.id:%(target.user.domain_id)s) or rule:base_create_user or rule:admin_required" +"identity:update_user": "(rule:is_domain_manager and token.domain.id:%(target.user.domain_id)s) or rule:base_update_user or rule:admin_required" +"identity:delete_user": "(rule:is_domain_manager and token.domain.id:%(target.user.domain_id)s) or rule:base_delete_user or rule:admin_required" + +# allow domain admins to manage projects within their domain +"identity:list_projects": "(rule:is_domain_manager and token.domain.id:%(target.domain_id)s) or rule:base_list_projects or rule:admin_required" +"identity:get_project": "(rule:is_domain_manager and token.domain.id:%(target.project.domain_id)s) or rule:base_get_project or rule:admin_required" +"identity:create_project": "(rule:is_domain_manager and token.domain.id:%(target.project.domain_id)s) or rule:base_create_project or rule:admin_required" +"identity:update_project": "(rule:is_domain_manager and token.domain.id:%(target.project.domain_id)s) or rule:base_update_project or rule:admin_required" +"identity:delete_project": "(rule:is_domain_manager and token.domain.id:%(target.project.domain_id)s) or rule:base_delete_project or rule:admin_required" +"identity:list_user_projects": "(rule:is_domain_manager and token.domain.id:%(target.user.domain_id)s) or rule:base_list_user_projects or rule:admin_required" + +# allow domain managers to manage role assignments within their domain +# (restricted to specific roles by the 'is_domain_managed_role' rule) +# +# project-level role assignment to user within domain +"is_domain_user_project_grant": "token.domain.id:%(target.user.domain_id)s and token.domain.id:%(target.project.domain_id)s" +# project-level role assignment to group within domain +"is_domain_group_project_grant": "token.domain.id:%(target.group.domain_id)s and token.domain.id:%(target.project.domain_id)s" +# domain-level role assignment to group +"is_domain_level_group_grant": "token.domain.id:%(target.group.domain_id)s and token.domain.id:%(target.domain.id)s" +# domain-level role assignment to user +"is_domain_level_user_grant": "token.domain.id:%(target.user.domain_id)s and token.domain.id:%(target.domain.id)s" +"domain_manager_grant": "rule:is_domain_manager and (rule:is_domain_user_project_grant or rule:is_domain_group_project_grant or rule:is_domain_level_group_grant or rule:is_domain_level_user_grant)" +"identity:check_grant": "rule:domain_manager_grant or rule:base_check_grant or rule:admin_required" +"identity:list_grants": "rule:domain_manager_grant or rule:base_list_grants or rule:admin_required" +"identity:create_grant": "(rule:domain_manager_grant and rule:is_domain_managed_role) or rule:base_create_grant or rule:admin_required" +"identity:revoke_grant": "(rule:domain_manager_grant and rule:is_domain_managed_role) or rule:base_revoke_grant or rule:admin_required" +"identity:list_role_assignments": "(rule:is_domain_manager and token.domain.id:%(target.domain_id)s) or rule:base_list_role_assignments or rule:admin_required" + +# allow domain managers to manage groups within their domain +"identity:list_groups": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s) or (role:reader and system_scope:all) or rule:base_list_groups or rule:admin_required" +"identity:get_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s) or (role:reader and system_scope:all) or rule:base_get_group or rule:admin_required" +"identity:create_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s) or rule:base_create_group or rule:admin_required" +"identity:update_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s) or rule:base_update_group or rule:admin_required" +"identity:delete_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s) or rule:base_delete_group or rule:admin_required" +"identity:list_groups_for_user": "(rule:is_domain_manager and token.domain.id:%(target.user.domain_id)s) or rule:base_list_groups_for_user or rule:admin_required" +"identity:list_users_in_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s) or rule:base_list_users_in_group or rule:admin_required" +"identity:remove_user_from_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s and token.domain.id:%(target.user.domain_id)s) or rule:base_remove_user_from_group or rule:admin_required" +"identity:check_user_in_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s and token.domain.id:%(target.user.domain_id)s) or rule:base_check_user_in_group or rule:admin_required" +"identity:add_user_to_group": "(rule:is_domain_manager and token.domain.id:%(target.group.domain_id)s and token.domain.id:%(target.user.domain_id)s) or rule:base_add_user_to_group or rule:admin_required" +``` + +Note that the policy file begins with a list of "`base_*`" rule definitions ("Section A"). +These mirror the default policies of recent OpenStack releases. +They are used as a basis for the domain-manager-specific changes which are implemented in "Section B" where they are referenced to via "`or rule:base_*`" accordingly. +The section of "`base_*`" rules is meant for easy maintenance/update of default rules while keeping the domain-manager-specific rules separate. + +> **Note:** +> The "`or rule:admin_required`" appendix to the rule definitions in "Section B" is included for backwards compatibility with environments not yet fully configured for the new secure RBAC standard[^1]. + +[^1]: [OpenStack Technical Committee Governance Documents: Consistent and Secure Default RBAC](https://governance.openstack.org/tc/goals/selected/consistent-and-secure-rbac.html) + +#### Specifying manageable roles via "`is_domain_managed_role`" + +The "`is_domain_managed_role`" rule of the above policy template may be adjusted according to the requirements of the CSP and infrastructure architecture to specify different or multiple roles as manageable by Domain Managers as long as the policy rule adheres to the following: + +- the "`is_domain_managed_role`" rule MUST NOT contain the "`admin`" role, neither directly nor transitively +- the "`is_domain_managed_role`" rule MUST define all applicable roles directly, it MUST NOT contain a "`rule:`" reference within itself + +##### Example: permitting multiple roles + +The following example permits the "`reader`" role to be assigned/revoked by a Domain Manager in addition to the default "`member`" and "`load-balancer_member`" roles. +Further roles can be appended using the logical `or` directive. + +```yaml +"is_domain_managed_role": "'member':%(target.role.name)s or 'load-balancer_member':%(target.role.name)s or 'reader':%(target.role.name)s" +``` + +**Note regarding the `manager` role** + +When adjusting the "`is_domain_managed_role`" rule a CSP might opt to also include the "`manager`" role itself in the manageable roles, resulting in Domain Managers being able to propagate the Domain Manager capabilities to other users within their domain. +This increases the self-service capabilities of the customer but introduces risks of Domain Managers also being able to revoke this role from themselves or each other (within their domain) in an unintended fashion. + +CSPs have to carefully evaluate whether Domain Manager designation authority should reside solely on their side or be part of the customer self-service scope and decide about adding "`'manager':%(target.role.name)s`" to the rule accordingly. + +### Impact + +Applying this implementation modifies the API policy configuration of Keystone and introduces a new persona to Keystone to enable IAM self-service for customers within a domain. +Once assigned, this persona allows special Domain Manager users within a domain to manage users, project, groups and role assignments as part of the IAM self-service. + +However, the configuration change introduced by this implementation does not automatically assign the Domain Manager persona to any users per default. +Assigning the new persona and granting customers the resulting self-service capabilities is a deliberate action to be taken by the CSP on a per-tenant (i.e. per domain) basis. + +Omitting the provisioning of any Domain Manager users (i.e. not assigning the new persona to any user) will result in an OpenStack cloud that behaves identically to a configuration without the implementation applied, making the actual usage of the functionality a CSP's choice and entirely optional. + +#### Security implications + +As a result of the "`identity:list_roles`" rule (see above), Domain Managers are able to see all roles via "`openstack role list`" and can inspect the metadata of any role with "`openstack role show`" diff --git a/Standards/scs-XXXX-vN-decision-record-template.md b/Standards/scs-XXXX-vN-decision-record-template.md index 4b73c1ca0..774bd10b6 100644 --- a/Standards/scs-XXXX-vN-decision-record-template.md +++ b/Standards/scs-XXXX-vN-decision-record-template.md @@ -1,7 +1,7 @@ --- title: _Descriptive title_ type: Decision Record -status: Proposal +status: Draft track: Global # | IaaS | Ops | KaaS | IAM --- diff --git a/Standards/scs-XXXX-vN-standard-template.md b/Standards/scs-XXXX-vN-standard-template.md index 52a4e7c6e..1b8afaf22 100644 --- a/Standards/scs-XXXX-vN-standard-template.md +++ b/Standards/scs-XXXX-vN-standard-template.md @@ -1,7 +1,7 @@ --- title: _Descriptive title_ type: Standard # | Procedural -status: Proposal +status: Draft track: Global # | IaaS | Ops | KaaS | IAM --- diff --git a/Tests/config.toml b/Tests/config.toml index 0f270cd4b..a0173c25d 100644 --- a/Tests/config.toml +++ b/Tests/config.toml @@ -26,6 +26,7 @@ subjects = [ "poc-kdo", "poc-wgcloud", "regio-a", + "scaleup-occ2", "syseleven-dus2", "syseleven-ham1", "wavestack", diff --git a/Tests/iaas/key-manager/check-for-key-manager.py b/Tests/iaas/key-manager/check-for-key-manager.py old mode 100644 new mode 100755 index 6b5a5b70a..dae49acdd --- a/Tests/iaas/key-manager/check-for-key-manager.py +++ b/Tests/iaas/key-manager/check-for-key-manager.py @@ -1,135 +1,84 @@ -"""Mandatory APIs checker +#!/usr/bin/env python3 +"""Key Manager service checker for scs-0116-v1-key-manager-standard.md + This script retrieves the endpoint catalog from Keystone using the OpenStack -SDK and checks whether a key manager APi endpoint is present. +SDK and checks whether a key manager API endpoint is present. +It then checks whether a user with the maximum of a member role can create secrets. +This will only work after policy adjustments or with the new secure RBAC roles and policies. The script relies on an OpenStack SDK compatible clouds.yaml file for authentication with Keystone. """ import argparse -import json import logging import os +import sys import openstack - logger = logging.getLogger(__name__) -def connect(cloud_name: str) -> openstack.connection.Connection: - """Create a connection to an OpenStack cloud - :param string cloud_name: - The name of the configuration to load from clouds.yaml. - :returns: openstack.connnection.Connection - """ - return openstack.connect( - cloud=cloud_name, - ) +def initialize_logging(): + logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO) -def check_for_member_role(conn: openstack.connection.Connection - ) -> None: - """Checks whether the current user has at maximum privileges - of the member role. - :param connection: - The current connection to an OpenStack cloud. - :returns: boolean, when role with most priviledges is member - """ +def check_for_member_role(conn: openstack.connection.Connection) -> None: + """Checks whether the current user has at maximum privileges of the member role. - auth_data = conn.auth - auth_dict = { - "identity": { - "methods": ["password"], - "password": { - "user": { - "name": auth_data['username'], - "domain": {"name": auth_data['project_domain_name']}, - "password": auth_data['password'] - } - }, - }, - "scope": { - "project": { - "domain": {"name": auth_data['project_domain_name']}, - "name": auth_data['project_name'] - } - } - } - - has_member_role = False - request = conn.session.request(auth_data['auth_url'] + '/v3/auth/tokens', - 'POST', - json={'auth': auth_dict}) - for role in json.loads(request.content)["token"]["roles"]: - role_name = role["name"] - if role_name == "admin" or role_name == "manager": - return False - elif role_name == "member": - print("User has member role.") - has_member_role = True - elif role_name == "reader": - print("User has reader role.") - else: - print("User has custom role.") - return False - return has_member_role - - -def check_presence_of_key_manager(cloud_name: str): + :param conn: connection to an OpenStack cloud. + :returns: boolean, when role with most privileges is member + """ + role_names = set(conn.session.auth.get_access(conn.session).role_names) + if role_names & {"admin", "manager"}: + return False + if "reader" in role_names: + logger.info("User has reader role.") + custom_roles = sorted(role_names - {"reader", "member"}) + if custom_roles: + logger.info(f"User has custom roles {', '.join(custom_roles)}.") + return "member" in role_names + + +def check_presence_of_key_manager(conn: openstack.connection.Connection) -> None: try: - connection = connect(cloud_name) - services = connection.service_catalog - except Exception as e: - print(str(e)) - raise Exception( - f"Connection to cloud '{cloud_name}' was not successfully. " - f"The Catalog endpoint could not be accessed. " - f"Please check your cloud connection and authorization." - ) + services = conn.service_catalog + except Exception: + logger.critical("Could not access Catalog endpoint.") + raise for svc in services: - svc_type = svc['type'] + svc_type = svc["type"] if svc_type == "key-manager": # key-manager is present # now we want to check whether a user with member role # can create and access secrets - check_key_manager_permissions(connection) - return 0 + logger.info("Key Manager is present") + return True + - # we did not find the key-manager service - logger.warning("There is no key-manager endpoint in the cloud.") - # we do not fail, until a key-manager MUST be present - return 0 +def _find_secret(conn: openstack.connection.Connection, secret_name_or_id: str): + """Replacement method for finding secrets. + + Mimicks the behavior of Connection.key_manager.find_secret() + but fixes an issue with the internal implementation raising an + exception due to an unexpected microversion parameter. + """ + secrets = conn.key_manager.secrets() + for s in secrets: + if s.name == secret_name_or_id or s.id == secret_name_or_id: + return s -def check_key_manager_permissions(conn: openstack.connection.Connection - ) -> None: +def check_key_manager_permissions(conn: openstack.connection.Connection) -> None: """ After checking that the current user only has the member and maybe the reader role, this method verifies that the user with a member role has sufficient access to the Key Manager API functionality. """ secret_name = "scs-member-role-test-secret" - if not check_for_member_role(conn): - logger.warning("Cannot test key-manager permissions. " - "User has wrong roles") - return None - - def _find_secret(secret_name_or_id: str): - """Replacement method for finding secrets. - - Mimicks the behavior of Connection.key_manager.find_secret() - but fixes an issue with the internal implementation raising an - exception due to an unexpected microversion parameter. - """ - secrets = conn.key_manager.secrets() - for s in secrets: - if s.name == secret_name_or_id or s.id == secret_name_or_id: - return s - return None - try: - existing_secret = _find_secret(secret_name) + existing_secret = _find_secret(conn, secret_name) if existing_secret: conn.key_manager.delete_secret(existing_secret) @@ -137,54 +86,71 @@ def _find_secret(secret_name_or_id: str): name=secret_name, payload_content_type="text/plain", secret_type="opaque", - payload="foo" - ) - - new_secret = _find_secret(secret_name) - assert new_secret, ( - f"Secret created with name '{secret_name}' was not discoverable by " - f"the user" - ) - conn.key_manager.delete_secret(new_secret) - except openstack.exceptions.ForbiddenException as e: - print( - "Users of the 'member' role can use Key Manager API: FAIL" + payload="foo", ) - print( - f"ERROR: {str(e)}" + try: + new_secret = _find_secret(conn, secret_name) + if not new_secret: + raise ValueError(f"Secret '{secret_name}' was not discoverable by the user") + finally: + conn.key_manager.delete_secret(new_secret) + except openstack.exceptions.ForbiddenException: + logger.debug('exception details', exc_info=True) + logger.error( + "Users with the 'member' role can use Key Manager API: FAIL" ) - exit(1) - print( - "Users of the 'member' role can use Key Manager API: PASS" + return 1 + logger.info( + "Users with the 'member' role can use Key Manager API: PASS" ) def main(): - parser = argparse.ArgumentParser( - description="SCS Mandatory IaaS Service Checker") + initialize_logging() + parser = argparse.ArgumentParser(description="SCS Mandatory IaaS Service Checker") parser.add_argument( - "--os-cloud", type=str, + "--os-cloud", + type=str, help="Name of the cloud from clouds.yaml, alternative " - "to the OS_CLOUD environment variable" + "to the OS_CLOUD environment variable", ) parser.add_argument( - "--debug", action="store_true", - help="Enable OpenStack SDK debug logging" + "--debug", action="store_true", help="Enable OpenStack SDK debug logging" ) args = parser.parse_args() - openstack.enable_logging(debug=args.debug) + # @mbuechse: I think this is so much as to be unusable! + # (If necessary, a developer can always uncomment) + # openstack.enable_logging(debug=args.debug) + if args.debug: + logger.setLevel(logging.DEBUG) # parse cloud name for lookup in clouds.yaml - cloud = os.environ.get("OS_CLOUD", None) - if args.os_cloud: - cloud = args.os_cloud - assert cloud, ( - "You need to have the OS_CLOUD environment variable set to your cloud " - "name or pass it via --os-cloud" - ) - - return check_presence_of_key_manager(cloud) + cloud = args.os_cloud or os.environ.get("OS_CLOUD", None) + if not cloud: + logger.critical( + "You need to have the OS_CLOUD environment variable set to your cloud " + "name or pass it via --os-cloud" + ) + return 2 + + with openstack.connect(cloud=cloud) as conn: + if not check_for_member_role(conn): + logger.critical("Cannot test key-manager permissions. User has wrong roles") + return 2 + if check_presence_of_key_manager(conn): + return check_key_manager_permissions(conn) + else: + # not an error, because key manager is merely recommended + logger.warning("There is no key-manager endpoint in the cloud.") if __name__ == "__main__": - main() + try: + sys.exit(main() or 0) + except SystemExit as e: + if e.code < 2: + print("key-manager-check: " + ('PASS', 'FAIL')[min(1, e.code)]) + raise + except BaseException: + logger.critical("exception", exc_info=True) + sys.exit(2) diff --git a/Tests/iaas/mandatory-services/README.md b/Tests/iaas/mandatory-services/README.md new file mode 100644 index 000000000..33a66d7f4 --- /dev/null +++ b/Tests/iaas/mandatory-services/README.md @@ -0,0 +1,66 @@ +# Mandatory IaaS Service APIs Test Suite + +## Test Environment Setup + +### Test Execution Environment + +> **NOTE:** The test execution procedure does not require cloud admin rights. + +To execute the test suite a valid cloud configuration for the OpenStack SDK in the shape of "`clouds.yaml`" is mandatory[^1]. +**The file is expected to be located in the current working directory where the test script is executed unless configured otherwise.** + +[^1]: [OpenStack Documentation: Configuring OpenStack SDK Applications](https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html) + +The test execution environment can be located on any system outside of the cloud infrastructure that has OpenStack API access. +Make sure that the API access is configured properly in "`clouds.yaml`". + +It is recommended to use a Python virtual environment[^2]. +Next, install the OpenStack SDK and boto3 required by the test suite: + +```bash +pip3 install openstacksdk +pip3 install boto3 +``` + +Within this environment execute the test suite. + +[^2]: [Python 3 Documentation: Virtual Environments and Packages](https://docs.python.org/3/tutorial/venv.html) + +## Test Execution + +The test suite is executed as follows: + +```bash +python3 mandatory-iaas-services.py --os-cloud mycloud +``` + +As an alternative to "`--os-cloud`", the "`OS_CLOUD`" environment variable may be specified instead. +The parameter is used to look up the correct cloud configuration in "`clouds.yaml`". +For the example command above, this file should contain a `clouds.mycloud` section like this: + +```yaml +--- +clouds: + mycloud: + auth: + auth_url: ... + ... + ... +``` + +If the deployment uses s3 only and does not have the object store endpoint specified in the service catalog, the "`--s3-endpoint`" flag may be used to specify the s3 endpoint. +In that case the "`--s3-access`" and "`--s3-access-secret`" flags must also be set, to give all necessery credentials to the test suite: + +```bash +python3 mandatory-iaas-services3.py --os-cloud mycloud2 --s3-endpoint "http://s3-endpoint:9000" --s3-access test-user --s3-access-secret test-user-secret +``` + +For any further options consult the output of "`python3 volume-backup-tester.py --help`". + +### Script Behavior & Test Results + +If all tests pass, the script will return with an exit code of `0`. + +If any test fails, the script will halt, print the exact error to `stderr` and return with a non-zero exit code. + +There is no cleanup done by this test as it mainly only inspect the service catalog and only for the object store creates a bucket, which is then promptly deleted. diff --git a/Tests/iaas/mandatory-services/mandatory-iaas-services.py b/Tests/iaas/mandatory-services/mandatory-iaas-services.py new file mode 100644 index 000000000..ab5cc0a2f --- /dev/null +++ b/Tests/iaas/mandatory-services/mandatory-iaas-services.py @@ -0,0 +1,299 @@ +"""Mandatory APIs checker +This script retrieves the endpoint catalog from Keystone using the OpenStack +SDK and checks whether all mandatory APi endpoints, are present. +The script relies on an OpenStack SDK compatible clouds.yaml file for +authentication with Keystone. +As the s3 endpoint might differ, a missing one will only result in a warning. +""" + +import argparse +import boto3 +from collections import Counter +import logging +import os +import re +import sys +import uuid + +import openstack + + +TESTCONTNAME = "scs-test-container" + +logger = logging.getLogger(__name__) +mandatory_services = ["compute", "identity", "image", "network", + "load-balancer", "placement", "object-store"] +block_storage_service = ["volume", "volumev3", "block-storage"] + + +def connect(cloud_name: str) -> openstack.connection.Connection: + """Create a connection to an OpenStack cloud + :param string cloud_name: + The name of the configuration to load from clouds.yaml. + :returns: openstack.connnection.Connection + """ + return openstack.connect( + cloud=cloud_name, + ) + + +def check_presence_of_mandatory_services(cloud_name: str, s3_credentials=None): + try: + connection = connect(cloud_name) + services = connection.service_catalog + except Exception as e: + print(str(e)) + raise Exception( + f"Connection to cloud '{cloud_name}' was not successfully. " + f"The Catalog endpoint could not be accessed. " + f"Please check your cloud connection and authorization." + ) + + if s3_credentials: + mandatory_services.remove("object-store") + for svc in services: + svc_type = svc['type'] + if svc_type in mandatory_services: + mandatory_services.remove(svc_type) + continue + if svc_type in block_storage_service: + block_storage_service.remove(svc_type) + + bs_service_not_present = 0 + if len(block_storage_service) == 3: + # neither block-storage nor volume nor volumev3 is present + # we must assume, that there is no volume service + logger.error("FAIL: No block-storage (volume) endpoint found.") + mandatory_services.append(block_storage_service[0]) + bs_service_not_present = 1 + if not mandatory_services: + # every mandatory service API had an endpoint + return 0 + bs_service_not_present + else: + # there were multiple mandatory APIs not found + logger.error(f"FAIL: The following endpoints are missing: " + f"{mandatory_services}") + return len(mandatory_services) + bs_service_not_present + + +def list_containers(conn): + "Gets a list of buckets" + return [cont.name for cont in conn.object_store.containers()] + + +def create_container(conn, name): + "Creates a test container" + conn.object_store.create_container(name) + return list_containers(conn) + + +def del_container(conn, name): + "Deletes a test container" + conn.object_store.delete(name) + # return list_containers(conn) + + +def s3_conn(creds, conn=None): + "Return an s3 client conn" + vrfy = True + if conn: + cacert = conn.config.config.get("cacert") + # TODO: Handle self-signed certs (from ca_cert in openstack config) + if cacert: + print("WARNING: Trust all Certificates in S3, " + f"OpenStack uses {cacert}", file=sys.stderr) + vrfy = False + return boto3.resource('s3', aws_access_key_id=creds["AK"], + aws_secret_access_key=creds["SK"], + endpoint_url=creds["HOST"], + verify=vrfy) + + +def list_s3_buckets(s3): + "Get a list of s3 buckets" + return [buck.name for buck in s3.buckets.all()] + + +def create_bucket(s3, name): + "Create an s3 bucket" + # bucket = s3.Bucket(name) + # bucket.create() + s3.create_bucket(Bucket=name) + return list_s3_buckets(s3) + + +def del_bucket(s3, name): + "Delete an s3 bucket" + buck = s3.Bucket(name=name) + buck.delete() + # s3.delete_bucket(Bucket=name) + + +def s3_from_env(creds, fieldnm, env, prefix=""): + "Set creds[fieldnm] to os.environ[env] if set" + if env in os.environ: + creds[fieldnm] = prefix + os.environ[env] + if fieldnm not in creds: + print(f"WARNING: s3_creds[{fieldnm}] not set", file=sys.stderr) + + +def s3_from_ostack(creds, conn, endpoint): + "Set creds from openstack swift/keystone" + rgx = re.compile(r"^(https*://[^/]*)/") + match = rgx.match(endpoint) + if match: + creds["HOST"] = match.group(1) + # Use first ec2 cred if one exists + ec2_creds = [cred for cred in conn.identity.credentials() + if cred.type == "ec2"] + if len(ec2_creds): + # FIXME: Assume cloud is not evil + ec2_dict = eval(ec2_creds[0].blob, {"null": None}) + creds["AK"] = ec2_dict["access"] + creds["SK"] = ec2_dict["secret"] + return + # Generate keyid and secret + ak = uuid.uuid4().hex + sk = uuid.uuid4().hex + blob = f'{{"access": "{ak}", "secret": "{sk}"}}' + try: + conn.identity.create_credential(type="ec2", blob=blob, + user_id=conn.current_user_id, + project_id=conn.current_project_id) + creds["AK"] = ak + creds["SK"] = sk + except BaseException as exc: + print(f"WARNING: ec2 creds creation failed: {exc!s}", file=sys.stderr) + # pass + + +def check_for_s3_and_swift(cloud_name: str, s3_credentials=None): + # If we get credentials we assume, that there is no Swift and only test s3 + if s3_credentials: + try: + s3 = s3_conn(s3_credentials) + except Exception as e: + print(str(e)) + logger.error("FAIL: Connection to s3 failed.") + return 1 + s3_buckets = list_s3_buckets(s3) + if not s3_buckets: + s3_buckets = create_bucket(s3, TESTCONTNAME) + assert s3_buckets + if s3_buckets == [TESTCONTNAME]: + del_bucket(s3, TESTCONTNAME) + # everything worked, and we don't need to test for Swift: + print("SUCCESS: S3 exists") + return 0 + # there were no credentials given, so we assume s3 is accessable via + # the service catalog and Swift might exist too + try: + connection = connect(cloud_name) + connection.authorize() + except Exception as e: + print(str(e)) + raise Exception( + f"Connection to cloud '{cloud_name}' was not successfully. " + f"The Catalog endpoint could not be accessed. " + f"Please check your cloud connection and authorization." + ) + s3_creds = {} + try: + endpoint = connection.object_store.get_endpoint() + except Exception as e: + logger.error( + f"FAIL: No object store endpoint found in cloud " + f"'{cloud_name}'. No testing for the s3 service possible. " + f"Details: %s", e + ) + return 1 + # Get S3 endpoint (swift) and ec2 creds from OpenStack (keystone) + s3_from_ostack(s3_creds, connection, endpoint) + # Overrides (var names are from libs3, in case you wonder) + s3_from_env(s3_creds, "HOST", "S3_HOSTNAME", "https://") + s3_from_env(s3_creds, "AK", "S3_ACCESS_KEY_ID") + s3_from_env(s3_creds, "SK", "S3_SECRET_ACCESS_KEY") + + s3 = s3_conn(s3_creds, connection) + s3_buckets = list_s3_buckets(s3) + if not s3_buckets: + s3_buckets = create_bucket(s3, TESTCONTNAME) + assert s3_buckets + + # If we got till here, s3 is working, now swift + swift_containers = list_containers(connection) + # if not swift_containers: + # swift_containers = create_container(connection, TESTCONTNAME) + result = 0 + if Counter(s3_buckets) != Counter(swift_containers): + print("WARNING: S3 buckets and Swift Containers differ:\n" + f"S3: {sorted(s3_buckets)}\nSW: {sorted(swift_containers)}") + result = 1 + else: + print("SUCCESS: S3 and Swift exist and agree") + # Clean up + # FIXME: Cleanup created EC2 credential + # if swift_containers == [TESTCONTNAME]: + # del_container(connection, TESTCONTNAME) + # Cleanup created S3 bucket + if s3_buckets == [TESTCONTNAME]: + del_bucket(s3, TESTCONTNAME) + return result + + +def main(): + parser = argparse.ArgumentParser( + description="SCS Mandatory IaaS Service Checker") + parser.add_argument( + "--os-cloud", type=str, + help="Name of the cloud from clouds.yaml, alternative " + "to the OS_CLOUD environment variable" + ) + parser.add_argument( + "--s3-endpoint", type=str, + help="URL to the s3 service." + ) + parser.add_argument( + "--s3-access", type=str, + help="Access Key to connect to the s3 service." + ) + parser.add_argument( + "--s3-access-secret", type=str, + help="Access secret to connect to the s3 service." + ) + parser.add_argument( + "--debug", action="store_true", + help="Enable OpenStack SDK debug logging" + ) + args = parser.parse_args() + openstack.enable_logging(debug=args.debug) + + # parse cloud name for lookup in clouds.yaml + cloud = os.environ.get("OS_CLOUD", None) + if args.os_cloud: + cloud = args.os_cloud + assert cloud, ( + "You need to have the OS_CLOUD environment variable set to your cloud " + "name or pass it via --os-cloud" + ) + + s3_credentials = None + if args.s3_endpoint: + if (not args.s3_access) or (not args.s3_access_secret): + print("WARNING: test for external s3 needs access key and access secret.") + s3_credentials = { + "AK": args.s3_access, + "SK": args.s3_access_secret, + "HOST": args.s3_endpoint + } + elif args.s3_access or args.s3_access_secret: + print("WARNING: access to s3 was given, but no endpoint provided.") + + result = check_presence_of_mandatory_services(cloud, s3_credentials) + result = result + check_for_s3_and_swift(cloud, s3_credentials) + + return result + + +if __name__ == "__main__": + main() diff --git a/Tests/iaas/security-groups/default-security-group-rules.py b/Tests/iaas/security-groups/default-security-group-rules.py old mode 100644 new mode 100755 index 773cf0bb8..def511956 --- a/Tests/iaas/security-groups/default-security-group-rules.py +++ b/Tests/iaas/security-groups/default-security-group-rules.py @@ -1,130 +1,181 @@ +#!/usr/bin/env python3 """Default Security Group Rules Checker This script tests the absence of any ingress default security group rule except for ingress rules from the same Security Group. Furthermore the presence of default rules for egress traffic is checked. """ +import argparse +from collections import Counter +import logging +import os +import sys import openstack -import os -import argparse +from openstack.exceptions import ResourceNotFound +logger = logging.getLogger(__name__) -def connect(cloud_name: str) -> openstack.connection.Connection: - """Create a connection to an OpenStack cloud +SG_NAME = "scs-test-default-sg" +DESCRIPTION = "scs-test-default-sg" - :param string cloud_name: - The name of the configuration to load from clouds.yaml. - :returns: openstack.connnection.Connection +def check_default_rules(rules, short=False): """ - return openstack.connect( - cloud=cloud_name, - ) + counts all verall ingress rules and egress rules, depending on the requested testing mode - -def test_rules(cloud_name: str): - try: - connection = connect(cloud_name) - rules = connection.network.default_security_group_rules() - except Exception as e: - print(str(e)) - raise Exception( - f"Connection to cloud '{cloud_name}' was not successfully. " - f"The default Security Group Rules could not be accessed. " - f"Please check your cloud connection and authorization." - ) - - # count all overall ingress rules and egress rules. - ingress_rules = 0 - ingress_from_same_sg = 0 - egress_rules = 0 - egress_ipv4_default_sg = 0 - egress_ipv4_custom_sg = 0 - egress_ipv6_default_sg = 0 - egress_ipv6_custom_sg = 0 + :param bool short + if short is True, the testing mode is set on short for older OpenStack versions + """ + ingress_rules = egress_rules = 0 + egress_vars = {'IPv4': {}, 'IPv6': {}} + for key, value in egress_vars.items(): + value['default'] = 0 + if not short: + value['custom'] = 0 if not rules: - print("No default security group rules defined.") - else: - for rule in rules: - direction = rule.direction - ethertype = rule.ethertype - r_custom_sg = rule.used_in_non_default_sg - r_default_sg = rule.used_in_default_sg - if direction == "ingress": - ingress_rules += 1 + logger.info("No default security group rules defined.") + for rule in rules: + direction = rule["direction"] + ethertype = rule["ethertype"] + if direction == "ingress": + if not short: # we allow ingress from the same security group # but only for the default security group - r_group_id = rule.remote_group_id - if (r_group_id == "PARENT" and not r_custom_sg): - ingress_from_same_sg += 1 - elif direction == "egress" and ethertype == "IPv4": - egress_rules += 1 - if rule.remote_ip_prefix: - # this rule does not allow traffic to all external ips - continue - if r_custom_sg: - egress_ipv4_custom_sg += 1 - if r_default_sg: - egress_ipv4_default_sg += 1 - elif direction == "egress" and ethertype == "IPv6": - egress_rules += 1 - if rule.remote_ip_prefix: - # this rule does not allow traffic to all external ips + if rule.remote_group_id == "PARENT" and not rule["used_in_non_default_sg"]: continue - if r_custom_sg: - egress_ipv6_custom_sg += 1 - if r_default_sg: - egress_ipv6_default_sg += 1 - - # test whether there are no other than the allowed ingress rules - assert ingress_rules == ingress_from_same_sg, ( - f"Expected only ingress rules for default security groups, " - f"that allow ingress traffic from the same group. " - f"But there are more - in total {ingress_rules} ingress rules. " - f"There should be only {ingress_from_same_sg} ingress rules.") - assert egress_rules > 0, ( - f"Expected to have more than {egress_rules} egress rules present.") - var_list = [egress_ipv4_default_sg, egress_ipv4_custom_sg, - egress_ipv6_default_sg, egress_ipv6_custom_sg] - assert all([var > 0 for var in var_list]), ( - "Not all expected egress rules are present. " - "Expected rules for egress for IPv4 and IPv6 " - "both for default and custom security groups.") - - result_dict = { - "Ingress Rules": ingress_rules, - "Egress Rules": egress_rules - } - return result_dict + ingress_rules += 1 + elif direction == "egress" and ethertype in egress_vars: + egress_rules += 1 + if short: + egress_vars[ethertype]['default'] += 1 + continue + if rule.remote_ip_prefix: + # this rule does not allow traffic to all external ips + continue + # note: these two are not mutually exclusive + if rule["used_in_default_sg"]: + egress_vars[ethertype]['default'] += 1 + if rule["used_in_non_default_sg"]: + egress_vars[ethertype]['custom'] += 1 + # test whether there are no unallowed ingress rules + if ingress_rules: + logger.error(f"Expected no default ingress rules, found {ingress_rules}.") + # test whether all expected egress rules are present + missing = [(key, key2) for key, val in egress_vars.items() for key2, val2 in val.items() if not val2] + if missing: + logger.error( + "Expected rules for egress for IPv4 and IPv6 both for default and custom security groups. " + f"Missing rule types: {', '.join(str(x) for x in missing)}" + ) + logger.info(str({ + "Unallowed Ingress Rules": ingress_rules, + "Egress Rules": egress_rules, + })) + + +def create_security_group(conn, sg_name: str = SG_NAME, description: str = DESCRIPTION): + """Create security group in openstack + + :returns: + ~openstack.network.v2.security_group.SecurityGroup: The new security group or None + """ + sg = conn.network.create_security_group(name=sg_name, description=description) + return sg.id + + +def delete_security_group(conn, sg_id): + conn.network.delete_security_group(sg_id) + # in case of a successful delete finding the sg will throw an exception + try: + conn.network.find_security_group(name_or_id=sg_id) + except ResourceNotFound: + logger.debug(f"Security group {sg_id} was deleted successfully.") + except Exception: + logger.critical(f"Security group {sg_id} was not deleted successfully") + raise + + +def altern_test_rules(connection: openstack.connection.Connection): + sg_id = create_security_group(connection) + try: + sg = connection.network.find_security_group(name_or_id=sg_id) + check_default_rules(sg.security_group_rules, short=True) + finally: + delete_security_group(connection, sg_id) + + +def test_rules(connection: openstack.connection.Connection): + try: + rules = list(connection.network.default_security_group_rules()) + except ResourceNotFound: + logger.info( + "API call failed. OpenStack components might not be up to date. " + "Falling back to old-style test method. " + ) + logger.debug("traceback", exc_info=True) + altern_test_rules(connection) + else: + check_default_rules(rules) + + +class CountingHandler(logging.Handler): + def __init__(self, level=logging.NOTSET): + super().__init__(level=level) + self.bylevel = Counter() + + def handle(self, record): + self.bylevel[record.levelno] += 1 def main(): parser = argparse.ArgumentParser( - description="SCS Default Security Group Rules Checker") + description="SCS Default Security Group Rules Checker", + ) parser.add_argument( - "--os-cloud", type=str, + "--os-cloud", + type=str, help="Name of the cloud from clouds.yaml, alternative " - "to the OS_CLOUD environment variable" + "to the OS_CLOUD environment variable", ) parser.add_argument( - "--debug", action="store_true", - help="Enable OpenStack SDK debug logging" + "--debug", action="store_true", help="Enable debug logging", ) args = parser.parse_args() openstack.enable_logging(debug=args.debug) + logging.basicConfig( + format="%(levelname)s: %(message)s", + level=logging.DEBUG if args.debug else logging.INFO, + ) + + # count the number of log records per level (used for summary and return code) + counting_handler = CountingHandler(level=logging.INFO) + logger.addHandler(counting_handler) # parse cloud name for lookup in clouds.yaml - cloud = os.environ.get("OS_CLOUD", None) - if args.os_cloud: - cloud = args.os_cloud - assert cloud, ( - "You need to have the OS_CLOUD environment variable set to your cloud " - "name or pass it via --os-cloud" - ) + cloud = args.os_cloud or os.environ.get("OS_CLOUD", None) + if not cloud: + raise ValueError( + "You need to have the OS_CLOUD environment variable set to your cloud " + "name or pass it via --os-cloud" + ) - print(test_rules(cloud)) + with openstack.connect(cloud) as conn: + test_rules(conn) + + c = counting_handler.bylevel + logger.debug(f"Total critical / error / warning: {c[logging.CRITICAL]} / {c[logging.ERROR]} / {c[logging.WARNING]}") + if not c[logging.CRITICAL]: + print("security-groups-default-rules-check: " + ('PASS', 'FAIL')[min(1, c[logging.ERROR])]) + return min(127, c[logging.CRITICAL] + c[logging.ERROR]) # cap at 127 due to OS restrictions if __name__ == "__main__": - main() + try: + sys.exit(main()) + except SystemExit: + raise + except BaseException as exc: + logging.debug("traceback", exc_info=True) + logging.critical(str(exc)) + sys.exit(1) diff --git a/Tests/iaas/volume-backup/volume-backup-tester.py b/Tests/iaas/volume-backup/volume-backup-tester.py old mode 100644 new mode 100755 index f4fa9522d..bcbb89664 --- a/Tests/iaas/volume-backup/volume-backup-tester.py +++ b/Tests/iaas/volume-backup/volume-backup-tester.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 """Volume Backup API tester for Block Storage API This test script executes basic operations on the Block Storage API centered @@ -14,7 +15,9 @@ import argparse import getpass +import logging import os +import sys import time import typing @@ -29,28 +32,23 @@ WAIT_TIMEOUT = 60 -def connect(cloud_name: str, password: typing.Optional[str] = None - ) -> openstack.connection.Connection: - """Create a connection to an OpenStack cloud - - :param string cloud_name: - The name of the configuration to load from clouds.yaml. - - :param string password: - Optional password override for the connection. - - :returns: openstack.connnection.Connection - """ - - if password: - return openstack.connect( - cloud=cloud_name, - password=password - ) - else: - return openstack.connect( - cloud=cloud_name, - ) +def wait_for_resource( + get_func: typing.Callable[[str], openstack.resource.Resource], + resource_id: str, + expected_status=("available", ), + timeout=WAIT_TIMEOUT, +) -> None: + seconds_waited = 0 + resource = get_func(resource_id) + while resource is None or resource.status not in expected_status: + time.sleep(1.0) + seconds_waited += 1 + if seconds_waited >= timeout: + raise RuntimeError( + f"Timed out after {seconds_waited} s: waiting for resource {resource_id} " + f"to be in status {expected_status} (current: {resource and resource.status})" + ) + resource = get_func(resource_id) def test_backup(conn: openstack.connection.Connection, @@ -64,133 +62,90 @@ def test_backup(conn: openstack.connection.Connection, """ # CREATE VOLUME - print("Creating volume ...") - volume = conn.block_storage.create_volume( - name=f"{prefix}volume", - size=1 - ) - assert volume is not None, ( - "Initial volume creation failed" - ) + volume_name = f"{prefix}volume" + logging.info(f"Creating volume '{volume_name}' ...") + volume = conn.block_storage.create_volume(name=volume_name, size=1) + if volume is None: + raise RuntimeError(f"Creation of initial volume '{volume_name}' failed") volume_id = volume.id - assert conn.block_storage.get_volume(volume_id) is not None, ( - "Retrieving initial volume by ID failed" - ) + if conn.block_storage.get_volume(volume_id) is None: + raise RuntimeError(f"Retrieving initial volume by ID '{volume_id}' failed") - print( + logging.info( f"↳ waiting for volume with ID '{volume_id}' to reach status " f"'available' ..." ) - seconds_waited = 0 - while conn.block_storage.get_volume(volume_id).status != "available": - time.sleep(1.0) - seconds_waited += 1 - assert seconds_waited < timeout, ( - f"Timeout reached while waiting for volume to reach status " - f"'available' (volume id: {volume_id}) after {seconds_waited} " - f"seconds" - ) - print("Create empty volume: PASS") + wait_for_resource(conn.block_storage.get_volume, volume_id, timeout=timeout) + logging.info("Create empty volume: PASS") # CREATE BACKUP - print("Creating backup from volume ...") - backup = conn.block_storage.create_backup( - name=f"{prefix}volume-backup", - volume_id=volume_id - ) - assert backup is not None, ( - "Backup creation failed" - ) + logging.info("Creating backup from volume ...") + backup = conn.block_storage.create_backup(name=f"{prefix}volume-backup", volume_id=volume_id) + if backup is None: + raise RuntimeError("Backup creation failed") backup_id = backup.id - assert conn.block_storage.get_backup(backup_id) is not None, ( - "Retrieving backup by ID failed" - ) + if conn.block_storage.get_backup(backup_id) is None: + raise RuntimeError("Retrieving backup by ID failed") - print(f"↳ waiting for backup '{backup_id}' to become available ...") - seconds_waited = 0 - while conn.block_storage.get_backup(backup_id).status != "available": - time.sleep(1.0) - seconds_waited += 1 - assert seconds_waited < timeout, ( - f"Timeout reached while waiting for backup to reach status " - f"'available' (backup id: {backup_id}) after {seconds_waited} " - f"seconds" - ) - print("Create backup from volume: PASS") + logging.info(f"↳ waiting for backup '{backup_id}' to become available ...") + wait_for_resource(conn.block_storage.get_backup, backup_id, timeout=timeout) + logging.info("Create backup from volume: PASS") # RESTORE BACKUP - print("Restoring backup to volume ...") restored_volume_name = f"{prefix}restored-backup" - conn.block_storage.restore_backup( - backup_id, - name=restored_volume_name - ) + logging.info(f"Restoring backup to volume '{restored_volume_name}' ...") + conn.block_storage.restore_backup(backup_id, name=restored_volume_name) - print( + logging.info( f"↳ waiting for restoration target volume '{restored_volume_name}' " f"to be created ..." ) - seconds_waited = 0 - while conn.block_storage.find_volume(restored_volume_name) is None: - time.sleep(1.0) - seconds_waited += 1 - assert seconds_waited < timeout, ( - f"Timeout reached while waiting for restored volume to be created " - f"(volume name: {restored_volume_name}) after {seconds_waited} " - f"seconds" - ) + wait_for_resource(conn.block_storage.find_volume, restored_volume_name, timeout=timeout) # wait for the volume restoration to finish - print( + logging.info( f"↳ waiting for restoration target volume '{restored_volume_name}' " f"to reach 'available' status ..." ) volume_id = conn.block_storage.find_volume(restored_volume_name).id - while conn.block_storage.get_volume(volume_id).status != "available": - time.sleep(1.0) - seconds_waited += 1 - assert seconds_waited < timeout, ( - f"Timeout reached while waiting for restored volume reach status " - f"'available' (volume id: {volume_id}) after {seconds_waited} " - f"seconds" - ) - print("Restore volume from backup: PASS") + wait_for_resource(conn.block_storage.get_volume, volume_id, timeout=timeout) + logging.info("Restore volume from backup: PASS") def cleanup(conn: openstack.connection.Connection, prefix=DEFAULT_PREFIX, - timeout=WAIT_TIMEOUT): + timeout=WAIT_TIMEOUT) -> bool: """ Looks up volume and volume backup resources matching the given prefix and deletes them. + Returns False if there were any errors during cleanup which might leave + resources behind. Otherwise returns True to indicate cleanup success. """ - def wait_for_resource(resource_type: str, resource_id: str, - expected_status="available") -> None: - seconds_waited = 0 - get_func = getattr(conn.block_storage, f"get_{resource_type}") - while get_func(resource_id).status != expected_status: - time.sleep(1.0) - seconds_waited += 1 - assert seconds_waited < timeout, ( - f"Timeout reached while waiting for {resource_type} during " - f"cleanup to be in status '{expected_status}' " - f"({resource_type} id: {resource_id}) after {seconds_waited} " - f"seconds" - ) - - print(f"\nPerforming cleanup for resources with the " - f"'{prefix}' prefix ...") + logging.info(f"Performing cleanup for resources with the '{prefix}' prefix ...") + cleanup_issues = 0 # count failed cleanup operations backups = conn.block_storage.backups() for backup in backups: - if backup.name.startswith(prefix): - try: - wait_for_resource("backup", backup.id) - except openstack.exceptions.ResourceNotFound: - # if the resource has vanished on - # its own in the meantime ignore it - continue - print(f"↳ deleting volume backup '{backup.id}' ...") + if not backup.name.startswith(prefix): + continue + try: + # we can only delete if status is available or error, so try and wait + wait_for_resource( + conn.block_storage.get_backup, + backup.id, + expected_status=("available", "error"), + timeout=timeout, + ) + logging.info(f"↳ deleting volume backup '{backup.id}' ...") conn.block_storage.delete_backup(backup.id) + except openstack.exceptions.ResourceNotFound: + # if the resource has vanished on its own in the meantime ignore it + continue + except Exception as e: + # Most common exception would be a timeout in wait_for_resource. + # We do not need to increment cleanup_issues here since + # any remaining ones will be caught in the next loop down below anyway. + logging.debug("traceback", exc_info=True) + logging.warning(str(e)) # wait for all backups to be cleaned up before attempting to remove volumes seconds_waited = 0 @@ -200,22 +155,42 @@ def wait_for_resource(resource_type: str, resource_id: str, ) > 0: time.sleep(1.0) seconds_waited += 1 - assert seconds_waited < timeout, ( - f"Timeout reached while waiting for all backups with prefix " - f"'{prefix}' to finish deletion" - ) + if seconds_waited >= timeout: + cleanup_issues += 1 + logging.warning( + f"Timeout reached while waiting for all backups with prefix " + f"'{prefix}' to finish deletion during cleanup after " + f"{seconds_waited} seconds" + ) + break volumes = conn.block_storage.volumes() for volume in volumes: - if volume.name.startswith(prefix): - try: - wait_for_resource("volume", volume.id) - except openstack.exceptions.ResourceNotFound: - # if the resource has vanished on - # its own in the meantime ignore it - continue - print(f"↳ deleting volume '{volume.id}' ...") + if not volume.name.startswith(prefix): + continue + try: + wait_for_resource( + conn.block_storage.get_volume, + volume.id, + expected_status=("available", "error"), + timeout=timeout, + ) + logging.info(f"↳ deleting volume '{volume.id}' ...") conn.block_storage.delete_volume(volume.id) + except openstack.exceptions.ResourceNotFound: + # if the resource has vanished on its own in the meantime ignore it + continue + except Exception as e: + logging.debug("traceback", exc_info=True) + logging.warning(str(e)) + cleanup_issues += 1 + + if cleanup_issues: + logging.info( + f"Some resources with the '{prefix}' prefix were not cleaned up!" + ) + + return not cleanup_issues def main(): @@ -257,26 +232,43 @@ def main(): ) args = parser.parse_args() openstack.enable_logging(debug=args.debug) + logging.basicConfig( + format="%(levelname)s: %(message)s", + level=logging.DEBUG if args.debug else logging.INFO, + ) # parse cloud name for lookup in clouds.yaml - cloud = os.environ.get("OS_CLOUD", None) - if args.os_cloud: - cloud = args.os_cloud - assert cloud, ( - "You need to have the OS_CLOUD environment variable set to your " - "cloud name or pass it via --os-cloud" - ) - conn = connect( - cloud, - password=getpass.getpass("Enter password: ") if args.ask else None - ) - if args.cleanup_only: - cleanup(conn, prefix=args.prefix, timeout=args.timeout) - else: - cleanup(conn, prefix=args.prefix, timeout=args.timeout) - test_backup(conn, prefix=args.prefix, timeout=args.timeout) - cleanup(conn, prefix=args.prefix, timeout=args.timeout) + cloud = args.os_cloud or os.environ.get("OS_CLOUD", None) + if not cloud: + raise Exception( + "You need to have the OS_CLOUD environment variable set to your " + "cloud name or pass it via --os-cloud" + ) + password = getpass.getpass("Enter password: ") if args.ask else None + + with openstack.connect(cloud, password=password) as conn: + if not cleanup(conn, prefix=args.prefix, timeout=args.timeout): + raise RuntimeError("Initial cleanup failed") + if args.cleanup_only: + logging.info("Cleanup-only run finished.") + return + try: + test_backup(conn, prefix=args.prefix, timeout=args.timeout) + except BaseException: + print('volume-backup-check: FAIL') + raise + else: + print('volume-backup-check: PASS') + finally: + cleanup(conn, prefix=args.prefix, timeout=args.timeout) if __name__ == "__main__": - main() + try: + sys.exit(main()) + except SystemExit: + raise + except BaseException as exc: + logging.debug("traceback", exc_info=True) + logging.critical(str(exc)) + sys.exit(1) diff --git a/Tests/iaas/volume-types/volume-types-check.py b/Tests/iaas/volume-types/volume-types-check.py old mode 100644 new mode 100755 index 444755816..4b1945fb8 --- a/Tests/iaas/volume-types/volume-types-check.py +++ b/Tests/iaas/volume-types/volume-types-check.py @@ -141,6 +141,8 @@ def main(argv): "Total critical / error / warning: " f"{c[logging.CRITICAL]} / {c[logging.ERROR]} / {c[logging.WARNING]}" ) + if not c[logging.CRITICAL]: + print("volume-types-check: " + ('PASS', 'FAIL')[min(1, c[logging.ERROR])]) return min(127, c[logging.CRITICAL] + c[logging.ERROR]) # cap at 127 due to OS restrictions diff --git a/Tests/iam/domain-manager/domain-manager-check.py b/Tests/iam/domain-manager/domain-manager-check.py old mode 100644 new mode 100755 index e56aad884..41040122b --- a/Tests/iam/domain-manager/domain-manager-check.py +++ b/Tests/iam/domain-manager/domain-manager-check.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 """Domain Manager policy configuration checker This script uses the OpenStack SDK to validate the proper implementation diff --git a/Tests/kaas/k8s-version-policy/k8s-eol-data.yml b/Tests/kaas/k8s-version-policy/k8s-eol-data.yml index 6a549c464..3a3d3b2eb 100644 --- a/Tests/kaas/k8s-version-policy/k8s-eol-data.yml +++ b/Tests/kaas/k8s-version-policy/k8s-eol-data.yml @@ -1,5 +1,7 @@ # https://kubernetes.io/releases/patch-releases/#detailed-release-history-for-active-branches +- branch: '1.31' + end-of-life: '2025-10-28' - branch: '1.30' end-of-life: '2025-06-28' - branch: '1.29' diff --git a/Tests/scs-compatible-iaas.yaml b/Tests/scs-compatible-iaas.yaml index 0d9c0ee61..5ad119fbf 100644 --- a/Tests/scs-compatible-iaas.yaml +++ b/Tests/scs-compatible-iaas.yaml @@ -154,7 +154,75 @@ modules: tags: [mandatory] description: > Must fulfill all requirements of + - id: scs-0114-v1 + name: Volume Types + url: https://docs.scs.community/standards/scs-0114-v1-volume-type-standard + run: + - executable: ./iaas/volume-types/volume-types-check.py + args: -c {os_cloud} -d + testcases: + - id: volume-types-check + tags: [mandatory] + description: > + Must fulfill all requirements of + - id: scs-0115-v1 + name: Default rules for security groups + url: https://docs.scs.community/standards/scs-0115-v1-default-rules-for-security-groups + run: + - executable: ./iaas/security-groups/default-security-group-rules.py + args: --os-cloud {os_cloud} --debug + testcases: + - id: security-groups-default-rules-check + tags: [mandatory] + description: > + Must fulfill all requirements of + - id: scs-0116-v1 + name: Key manager + url: https://docs.scs.community/standards/scs-0116-v1-key-manager-standard + run: + - executable: ./iaas/key-manager/check-for-key-manager.py + args: --os-cloud {os_cloud} --debug + testcases: + - id: key-manager-check + tags: [mandatory] + description: > + Must fulfill all requirements of + - id: scs-0117-v1 + name: Volume backup + url: https://docs.scs.community/standards/scs-0117-v1-volume-backup-service + run: + - executable: ./iaas/volume-backup/volume-backup-tester.py + args: --os-cloud {os_cloud} --debug + testcases: + - id: volume-backup-check + tags: [mandatory] + description: > + Must fulfill all requirements of + - id: scs-0121-v1 + name: Availability Zones + url: https://docs.scs.community/standards/scs-0121-v1-Availability-Zones-Standard + testcases: + - id: availability-zones-check + tags: [availability-zones] + description: > + Note: manual check! Must fulfill all requirements of + - id: scs-0302-v1 + name: Domain Manager Role + url: https://docs.scs.community/standards/scs-0302-v1-domain-manager-role + # run: + # - executable: ./iam/domain-manager/domain-manager-check.py + # args: --os-cloud {os_cloud} --debug --domain-config ... + testcases: + - id: domain-manager-check + tags: [domain-manager] + description: > + Note: manual check! Must fulfill all requirements of timeline: + - date: 2024-11-08 + versions: + v5: draft + v4: effective + v3: deprecated - date: 2024-08-23 versions: v5: draft @@ -202,8 +270,15 @@ versions: - ref: scs-0104-v1 parameters: image_spec: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Tests/iaas/scs-0104-v1-images-v5.yaml + - scs-0114-v1 + - scs-0115-v1 + - scs-0116-v1 + - scs-0117-v1 + - scs-0121-v1 + - scs-0302-v1 targets: main: mandatory + preview: domain-manager/availability-zones - version: v4 stabilized_at: 2024-02-28 include: diff --git a/compliance-monitor/bootstrap.yaml b/compliance-monitor/bootstrap.yaml index 50b722703..8339c422d 100644 --- a/compliance-monitor/bootstrap.yaml +++ b/compliance-monitor/bootstrap.yaml @@ -50,6 +50,9 @@ accounts: - public_key: "AAAAC3NzaC1lZDI1NTE5AAAAILufk4C7e0eQQIkmUDK8GB2IoiDjYtv6mx2eE8wZ3VWT" public_key_type: "ssh-ed25519" public_key_name: "primary" + - subject: scaleup-occ2 + delegates: + - zuul_ci - subject: syseleven-dus2 delegates: - zuul_ci diff --git a/compliance-monitor/monitor.py b/compliance-monitor/monitor.py index aa02cbae1..c6dcb2a41 100755 --- a/compliance-monitor/monitor.py +++ b/compliance-monitor/monitor.py @@ -96,6 +96,11 @@ class ViewType(Enum): fragment = "fragment" +VIEW_REPORT = { + ViewType.markdown: 'report.md', + ViewType.fragment: 'report.md', + ViewType.page: 'overview.html', +} VIEW_DETAIL = { ViewType.markdown: 'details.md', ViewType.fragment: 'details.md', @@ -111,7 +116,7 @@ class ViewType(Enum): ViewType.fragment: 'scope.md', ViewType.page: 'overview.html', } -REQUIRED_TEMPLATES = tuple(set(fn for view in (VIEW_DETAIL, VIEW_TABLE, VIEW_SCOPE) for fn in view.values())) +REQUIRED_TEMPLATES = tuple(set(fn for view in (VIEW_REPORT, VIEW_DETAIL, VIEW_TABLE, VIEW_SCOPE) for fn in view.values())) # do I hate these globals, but I don't see another way with these frameworks @@ -276,18 +281,16 @@ def evaluate(self, scope_results): by_validity[self.versions[vname].validity].append(vname) # go through worsening validity values until a passing version is found relevant = [] + best_passed = None for validity in ('effective', 'warn', 'deprecated'): vnames = by_validity[validity] relevant.extend(vnames) if any(version_results[vname]['result'] == 1 for vname in vnames): + best_passed = validity break # always include draft (but only at the end) relevant.extend(by_validity['draft']) passed = [vname for vname in relevant if version_results[vname]['result'] == 1] - if passed: - summary = 1 if self.versions[passed[0]].validity in ('effective', 'warn') else -1 - else: - summary = 0 return { 'name': self.name, 'versions': version_results, @@ -297,7 +300,7 @@ def evaluate(self, scope_results): vname + ASTERISK_LOOKUP[self.versions[vname].validity] for vname in passed ]), - 'summary': summary, + 'best_passed': best_passed, } def update_lookup(self, target_dict): @@ -544,14 +547,23 @@ async def get_status( return convert_result_rows_to_dict2(rows2, get_scopes(), include_report=True) -def render_view(view, view_type, base_url='/', title=None, **kwargs): +def _build_report_url(base_url, report, *args, **kwargs): + if kwargs.get('download'): + return f"{base_url}reports/{report}" + url = f"{base_url}page/report/{report}" + if len(args) == 2: # version, testcase_id --> add corresponding fragment specifier + url += f"#{args[0]}_{args[1]}" + return url + + +def render_view(view, view_type, detail_page='detail', base_url='/', title=None, **kwargs): media_type = {ViewType.markdown: 'text/markdown'}.get(view_type, 'text/html') stage1 = stage2 = view[view_type] if view_type is ViewType.page: stage1 = view[ViewType.fragment] def scope_url(uuid): return f"{base_url}page/scope/{uuid}" # noqa: E306,E704 - def detail_url(subject, scope): return f"{base_url}page/detail/{subject}/{scope}" # noqa: E306,E704 - def report_url(report): return f"{base_url}reports/{report}" # noqa: E306,E704 + def detail_url(subject, scope): return f"{base_url}page/{detail_page}/{subject}/{scope}" # noqa: E306,E704 + def report_url(report, *args, **kwargs): return _build_report_url(base_url, report, *args, **kwargs) # noqa: E306,E704 fragment = templates_map[stage1].render(detail_url=detail_url, report_url=report_url, scope_url=scope_url, **kwargs) if view_type != ViewType.markdown and stage1.endswith('.md'): fragment = markdown(fragment, extensions=['extra']) @@ -560,6 +572,23 @@ def report_url(report): return f"{base_url}reports/{report}" # noqa: E306,E704 return Response(content=fragment, media_type=media_type) +@app.get("/{view_type}/report/{report_uuid}") +async def get_report_view( + request: Request, + account: Annotated[Optional[tuple[str, str]], Depends(auth)], + conn: Annotated[connection, Depends(get_conn)], + view_type: ViewType, + report_uuid: str, +): + with conn.cursor() as cur: + specs = db_get_report(cur, report_uuid) + if not specs: + raise HTTPException(status_code=404) + spec = specs[0] + check_role(account, spec['subject'], ROLES['read_any']) + return render_view(VIEW_REPORT, view_type, report=spec, base_url=settings.base_url, title=f'Report {report_uuid}') + + @app.get("/{view_type}/detail/{subject}/{scopeuuid}") async def get_detail( request: Request, @@ -618,7 +647,11 @@ async def get_table_full( with conn.cursor() as cur: rows2 = db_get_relevant_results2(cur, approved_only=False) results2 = convert_result_rows_to_dict2(rows2, get_scopes()) - return render_view(VIEW_TABLE, view_type, results=results2, base_url=settings.base_url, title="SCS compliance overview") + return render_view( + VIEW_TABLE, view_type, results=results2, + detail_page='detail_full', base_url=settings.base_url, + title="SCS compliance overview", + ) @app.get("/{view_type}/scope/{scopeuuid}") @@ -692,8 +725,13 @@ def pick_filter(results, subject, scope): def summary_filter(scope_results): """Jinja filter to construct summary from `scope_results`""" passed_str = scope_results.get('passed_str', '') or '–' - summary = scope_results.get('summary', 0) - color = {1: '✅'}.get(summary, '🛑') # instead of 🟢🔴 (hard to distinguish for color-blind folks) + best_passed = scope_results.get('best_passed') + # avoid simple 🟢🔴 (hard to distinguish for color-blind folks) + color = { + 'effective': '✅', + 'warn': '✅', # forgo differentiation here in favor of simplicity (will be apparent in version list) + 'deprecated': '🟧', + }.get(best_passed, '🛑') return f'{color} {passed_str}' diff --git a/compliance-monitor/templates/details.md.j2 b/compliance-monitor/templates/details.md.j2 index e812cd741..30136b149 100644 --- a/compliance-monitor/templates/details.md.j2 +++ b/compliance-monitor/templates/details.md.j2 @@ -24,7 +24,7 @@ No recent test results available. {% set res = version_result.results[testcase_id] if testcase_id in version_result.results else dict(result=0) -%} | {% if res.result != 1 %}⚠️ {% endif %}{{ testcase.id }} | {#- #} {% if res.report -%} -[{{ res.result | verdict_check }}]({{ report_url(res.report) }}) +[{{ res.result | verdict_check }}]({{ report_url(res.report, version, testcase_id) }}) {%- else -%} {{ res.result | verdict_check }} {%- endif -%} diff --git a/compliance-monitor/templates/overview.html.j2 b/compliance-monitor/templates/overview.html.j2 index 154bd0cb2..830b94121 100644 --- a/compliance-monitor/templates/overview.html.j2 +++ b/compliance-monitor/templates/overview.html.j2 @@ -1,16 +1,18 @@ + + +{{ title or 'SCS compliance overview' }} + + - - -{{ title or 'SCS compliance overview' }} - - {% if title %}

{{title}}

{% endif %}{{fragment}} diff --git a/compliance-monitor/templates/overview.md.j2 b/compliance-monitor/templates/overview.md.j2 index 36e3ced23..77ba6bcc9 100644 --- a/compliance-monitor/templates/overview.md.j2 +++ b/compliance-monitor/templates/overview.md.j2 @@ -2,6 +2,9 @@ we could of course iterate over results etc., but hardcode the table (except the actual results, of course) for the time being to have the highest degree of control -#} + +Version numbers are suffixed by a symbol depending on state: * for _draft_, † for _warn_ (soon to be deprecated), and †† for _deprecated_. + {% set iaas = '50393e6f-2ae1-4c5c-a62c-3b75f2abef3f' -%} | Name | Description | Operator | [SCS-compatible IaaS](https://docs.scs.community/standards/scs-compatible-iaas/) | HealthMon | |-------|--------------|-----------|----------------------|:----------:| @@ -32,6 +35,9 @@ for the time being to have the highest degree of control | [REGIO.cloud](https://regio.digital) | Public cloud for customers | OSISM GmbH | {#- #} [{{ results | pick('regio-a', iaas) | summary }}]({{ detail_url('regio-a', iaas) }}) {# -#} | [HM](https://apimon.services.regio.digital/public-dashboards/17cf094a47404398a5b8e35a4a3968d4?orgId=1&refresh=5m) | +| [ScaleUp Open Cloud](https://www.scaleuptech.com/cloud-hosting/) | Public cloud for customers | ScaleUp Technologies GmbH & Co. KG | +{#- #} [{{ results | pick('scaleup-occ2', iaas) | summary }}]({{ detail_url('scaleup-occ2', iaas) }}) {# -#} +| [HM](https://health.occ2.scaleup.sovereignit.cloud) | | [syseleven](https://www.syseleven.de/en/products-services/openstack-cloud/)
(2 SCS regions) | Public OpenStack Cloud | SysEleven GmbH | {# #} {#- #}dus2: [{{ results | pick('syseleven-dus2', iaas) | summary }}]({{ detail_url('syseleven-dus2', iaas) }}){# -#}
diff --git a/compliance-monitor/templates/report.md.j2 b/compliance-monitor/templates/report.md.j2 new file mode 100644 index 000000000..e46c2e086 --- /dev/null +++ b/compliance-monitor/templates/report.md.j2 @@ -0,0 +1,66 @@ +## General info + +- uuid: [{{ report.run.uuid }}]({{ report_url(report.run.uuid, download=True) }}) +- subject: {{ report.subject }} +- scope: [{{ report.spec.name }}]({{ scope_url(report.spec.uuid) }}) +- checked at: {{ report.checked_at }} + +## Results + +{% for version, version_results in report.versions.items() %}{% if version_results %} +### {{ version }} + +| test case | result | invocation | +|---|---|---| +{% for testcase_id, result_data in version_results.items() -%} +| {{ testcase_id }} {: #{{ version + '_' + testcase_id }} } | {{ result_data.result | verdict_check }} | [{{ result_data.invocation }}](#{{ result_data.invocation }}) | +{% endfor %} +{% endif %}{% endfor %} + +## Run + +### Variable assignment + +| key | value | +|---|---| +{% for key, value in report.run.assignment.items() -%} +| `{{ key }}` | `{{ value }}` | +{% endfor %} + +### Check tool invocations + +{% for invid, invdata in report.run.invocations.items() %} +#### Invocation {{invid}} {: #{{ invid }} } + +- cmd: `{{ invdata.cmd }}` +- rc: {{ invdata.rc }} +- channel summary +{%- for channel in ('critical', 'error', 'warning') %} +{%- if invdata[channel] %} + - **{{ channel }}: {{ invdata[channel] }}** +{%- else %} + - {{ channel }}: – +{%- endif %} +{%- endfor %} +- results +{%- for resultid, result in invdata.results.items() %} + - {{ resultid }}: {{ result | verdict_check }} +{%- endfor %} + +{% if invdata.stdout -%} +
Captured stdout +```text +{{ '\n'.join(invdata.stdout) }} +``` +
+{%- endif %} + +{% if invdata.stderr -%} +
Captured stderr +{%- for line in invdata.stderr %} +
{% if line.split(':', 1)[0].lower() in ('warning', 'error', 'critical') %}{{ '' + line + '' }}{% else %}{{ line }}{% endif %}
+{%- endfor %} +
+{%- endif %} + +{% endfor %} diff --git a/playbooks/clouds.yaml.j2 b/playbooks/clouds.yaml.j2 index da0d3602d..2df1cdbd8 100644 --- a/playbooks/clouds.yaml.j2 +++ b/playbooks/clouds.yaml.j2 @@ -83,6 +83,15 @@ clouds: application_credential_id: "{{ clouds_conf.regio_a_ac_id }}" application_credential_secret: "{{ clouds_conf.regio_a_ac_secret }}" auth_type: "v3applicationcredential" + scaleup-occ2: + auth_type: v3applicationcredential + auth: + auth_url: https://keystone.occ2.scaleup.cloud + application_credential_id: "{{ clouds_conf.scaleup_occ2_ac_id }}" + application_credential_secret: "{{ clouds_conf.scaleup_occ2_ac_secret }}" + region_name: "RegionOne" + interface: "public" + identity_api_version: 3 syseleven-dus2: interface: public identity_api_verion: 3