diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml deleted file mode 100644 index 36a9954..0000000 --- a/.github/workflows/integration_test.yaml +++ /dev/null @@ -1,14 +0,0 @@ -name: Integration tests - -on: - pull_request: - -jobs: - integration-tests: - uses: canonical/operator-workflows/.github/workflows/integration_test.yaml@main - secrets: inherit - with: - channel: 1.28-strict/stable - juju-channel: 3.4/stable - self-hosted-runner: true - self-hosted-runner-label: "edge" diff --git a/.github/workflows/publish_charm.yaml b/.github/workflows/publish_charm.yaml index e14e332..7fc9275 100644 --- a/.github/workflows/publish_charm.yaml +++ b/.github/workflows/publish_charm.yaml @@ -12,3 +12,5 @@ jobs: secrets: inherit with: channel: latest/edge + charmcraft-channel: latest/edge + integration-test-workflow-file: test.yaml diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index bd1426c..302905c 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -8,5 +8,15 @@ jobs: uses: canonical/operator-workflows/.github/workflows/test.yaml@main secrets: inherit with: + charmcraft-channel: latest/edge + self-hosted-runner: true + self-hosted-runner-label: "edge" + integration-tests: + uses: canonical/operator-workflows/.github/workflows/integration_test.yaml@main + secrets: inherit + with: + channel: 1.28-strict/stable + charmcraft-channel: latest/edge + juju-channel: 3.4/stable self-hosted-runner: true self-hosted-runner-label: "edge" diff --git a/.licenserc.yaml b/.licenserc.yaml index ef7164e..afdab38 100644 --- a/.licenserc.yaml +++ b/.licenserc.yaml @@ -9,15 +9,29 @@ header: - '**' paths-ignore: - '.github/**' + - '**/.gitkeep' + - '**/*.cfg' + - '**/*.conf' + - '**/*.j2' - '**/*.json' - '**/*.md' + - '**/*.rule' + - '**/*.tmpl' - '**/*.txt' + - '.codespellignore' + - '.dockerignore' + - '.flake8' - '.jujuignore' - '.gitignore' - '.licenserc.yaml' + - '.trivyignore' + - '.woke.yaml' + - '.woke.yml' - 'CODEOWNERS' + - 'icon.svg' - 'LICENSE' - - 'trivy.yaml' - 'pyproject.toml' + - 'trivy.yaml' - 'zap_rules.tsv' + - 'lib/**' comment: on-failure diff --git a/.trivyignore b/.trivyignore new file mode 100644 index 0000000..a3574f5 --- /dev/null +++ b/.trivyignore @@ -0,0 +1,15 @@ +# penpot +CVE-2022-31159 +CVE-2020-36518 +CVE-2021-46877 +CVE-2022-42003 +CVE-2022-42004 +CVE-2022-25647 +CVE-2024-5971 +CVE-2024-6162 +CVE-2023-5685 +CVE-2021-37714 +CVE-2022-1471 +CVE-2024-21634 +# nodejs +CVE-2024-37890 diff --git a/.woke.yaml b/.woke.yaml new file mode 100644 index 0000000..df1c258 --- /dev/null +++ b/.woke.yaml @@ -0,0 +1,2 @@ +ignore_files: + - lib/charms/redis_k8s/v0/redis.py diff --git a/charmcraft.yaml b/charmcraft.yaml index 04d6a48..e4b49a1 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -1,17 +1,6 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. -type: charm -bases: - - build-on: - - name: ubuntu - channel: "22.04" - architectures: [amd64] - run-on: - - name: ubuntu - channel: "24.04" - architectures: [amd64] - name: penpot title: Penpot description: | @@ -24,3 +13,77 @@ links: source: https://github.com/canonical/penpot-operator contact: - https://launchpad.net/~canonical-is-devops + +config: + options: + smtp-from-address: + description: >- + The default "from" address penpot uses to send emails. + If unspecified, it is automatically derived from the SMTP integration, typically `@`, + or `no-reply@` if the SMTP username is not provided in the SMTP integration. + For more detailed information on SMTP integration, visit https://charmhub.io/smtp-integrator/configuration. + type: string + +actions: + create-profile: + description: Create a new penpot user. + params: + email: + type: string + fullname: + type: string + + delete-profile: + description: Delete an existing penpot user. + params: + email: + type: string + +peers: + penpot_peer: + interface: penpot_peer + +requires: + postgresql: + interface: postgresql_client + limit: 1 + redis: + interface: redis + limit: 1 + s3: + interface: s3 + limit: 1 + ingress: + interface: ingress + limit: 1 + smtp: + interface: smtp + limit: 1 + optional: true + +resources: + penpot-image: + type: oci-image + description: OCI image for penpot + +containers: + penpot: + resource: penpot-image + +type: charm +base: ubuntu@24.04 +build-base: ubuntu@24.04 +platforms: + amd64: + +parts: + charm: + build-packages: + - cargo + - libffi-dev + - libssl-dev + - pkg-config + - rustc + +assumes: + - juju >= 3.4 diff --git a/config.yaml b/config.yaml deleted file mode 100644 index 2d47f78..0000000 --- a/config.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -options: - # An example config option to customise the log level of the workload - log-level: - description: | - Configures the log level of gunicorn. - - Acceptable values are: "info", "debug", "warning", "error" and "critical" - default: "info" - type: string diff --git a/lib/charms/data_platform_libs/v0/data_interfaces.py b/lib/charms/data_platform_libs/v0/data_interfaces.py new file mode 100644 index 0000000..a2162aa --- /dev/null +++ b/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -0,0 +1,3495 @@ +# Copyright 2023 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Library to manage the relation for the data-platform products. + +This library contains the Requires and Provides classes for handling the relation +between an application and multiple managed application supported by the data-team: +MySQL, Postgresql, MongoDB, Redis, and Kafka. + +### Database (MySQL, Postgresql, MongoDB, and Redis) + +#### Requires Charm +This library is a uniform interface to a selection of common database +metadata, with added custom events that add convenience to database management, +and methods to consume the application related data. + + +Following an example of using the DatabaseCreatedEvent, in the context of the +application charm code: + +```python + +from charms.data_platform_libs.v0.data_interfaces import ( + DatabaseCreatedEvent, + DatabaseRequires, +) + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Charm events defined in the database requires charm library. + self.database = DatabaseRequires(self, relation_name="database", database_name="database") + self.framework.observe(self.database.on.database_created, self._on_database_created) + + def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + + # Start application with rendered configuration + self._start_application(config_file) + + # Set active status + self.unit.status = ActiveStatus("received database credentials") +``` + +As shown above, the library provides some custom events to handle specific situations, +which are listed below: + +- database_created: event emitted when the requested database is created. +- endpoints_changed: event emitted when the read/write endpoints of the database have changed. +- read_only_endpoints_changed: event emitted when the read-only endpoints of the database + have changed. Event is not triggered if read/write endpoints changed too. + +If it is needed to connect multiple database clusters to the same relation endpoint +the application charm can implement the same code as if it would connect to only +one database cluster (like the above code example). + +To differentiate multiple clusters connected to the same relation endpoint +the application charm can use the name of the remote application: + +```python + +def _on_database_created(self, event: DatabaseCreatedEvent) -> None: + # Get the remote app name of the cluster that triggered this event + cluster = event.relation.app.name +``` + +It is also possible to provide an alias for each different database cluster/relation. + +So, it is possible to differentiate the clusters in two ways. +The first is to use the remote application name, i.e., `event.relation.app.name`, as above. + +The second way is to use different event handlers to handle each cluster events. +The implementation would be something like the following code: + +```python + +from charms.data_platform_libs.v0.data_interfaces import ( + DatabaseCreatedEvent, + DatabaseRequires, +) + +class ApplicationCharm(CharmBase): + # Application charm that connects to database charms. + + def __init__(self, *args): + super().__init__(*args) + + # Define the cluster aliases and one handler for each cluster database created event. + self.database = DatabaseRequires( + self, + relation_name="database", + database_name="database", + relations_aliases = ["cluster1", "cluster2"], + ) + self.framework.observe( + self.database.on.cluster1_database_created, self._on_cluster1_database_created + ) + self.framework.observe( + self.database.on.cluster2_database_created, self._on_cluster2_database_created + ) + + def _on_cluster1_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster1 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + + def _on_cluster2_database_created(self, event: DatabaseCreatedEvent) -> None: + # Handle the created database on the cluster named cluster2 + + # Create configuration file for app + config_file = self._render_app_config_file( + event.username, + event.password, + event.endpoints, + ) + ... + +``` + +When it's needed to check whether a plugin (extension) is enabled on the PostgreSQL +charm, you can use the is_postgresql_plugin_enabled method. To use that, you need to +add the following dependency to your charmcraft.yaml file: + +```yaml + +parts: + charm: + charm-binary-python-packages: + - psycopg[binary] + +``` + +### Provider Charm + +Following an example of using the DatabaseRequestedEvent, in the context of the +database charm code: + +```python +from charms.data_platform_libs.v0.data_interfaces import DatabaseProvides + +class SampleCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + # Charm events defined in the database provides charm library. + self.provided_database = DatabaseProvides(self, relation_name="database") + self.framework.observe(self.provided_database.on.database_requested, + self._on_database_requested) + # Database generic helper + self.database = DatabaseHelper() + + def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: + # Handle the event triggered by a new database requested in the relation + # Retrieve the database name using the charm library. + db_name = event.database + # generate a new user credential + username = self.database.generate_user() + password = self.database.generate_password() + # set the credentials for the relation + self.provided_database.set_credentials(event.relation.id, username, password) + # set other variables for the relation event.set_tls("False") +``` +As shown above, the library provides a custom event (database_requested) to handle +the situation when an application charm requests a new database to be created. +It's preferred to subscribe to this event instead of relation changed event to avoid +creating a new database when other information other than a database name is +exchanged in the relation databag. + +### Kafka + +This library is the interface to use and interact with the Kafka charm. This library contains +custom events that add convenience to manage Kafka, and provides methods to consume the +application related data. + +#### Requirer Charm + +```python + +from charms.data_platform_libs.v0.data_interfaces import ( + BootstrapServerChangedEvent, + KafkaRequires, + TopicCreatedEvent, +) + +class ApplicationCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.kafka = KafkaRequires(self, "kafka_client", "test-topic") + self.framework.observe( + self.kafka.on.bootstrap_server_changed, self._on_kafka_bootstrap_server_changed + ) + self.framework.observe( + self.kafka.on.topic_created, self._on_kafka_topic_created + ) + + def _on_kafka_bootstrap_server_changed(self, event: BootstrapServerChangedEvent): + # Event triggered when a bootstrap server was changed for this application + + new_bootstrap_server = event.bootstrap_server + ... + + def _on_kafka_topic_created(self, event: TopicCreatedEvent): + # Event triggered when a topic was created for this application + username = event.username + password = event.password + tls = event.tls + tls_ca= event.tls_ca + bootstrap_server event.bootstrap_server + consumer_group_prefic = event.consumer_group_prefix + zookeeper_uris = event.zookeeper_uris + ... + +``` + +As shown above, the library provides some custom events to handle specific situations, +which are listed below: + +- topic_created: event emitted when the requested topic is created. +- bootstrap_server_changed: event emitted when the bootstrap server have changed. +- credential_changed: event emitted when the credentials of Kafka changed. + +### Provider Charm + +Following the previous example, this is an example of the provider charm. + +```python +class SampleCharm(CharmBase): + +from charms.data_platform_libs.v0.data_interfaces import ( + KafkaProvides, + TopicRequestedEvent, +) + + def __init__(self, *args): + super().__init__(*args) + + # Default charm events. + self.framework.observe(self.on.start, self._on_start) + + # Charm events defined in the Kafka Provides charm library. + self.kafka_provider = KafkaProvides(self, relation_name="kafka_client") + self.framework.observe(self.kafka_provider.on.topic_requested, self._on_topic_requested) + # Kafka generic helper + self.kafka = KafkaHelper() + + def _on_topic_requested(self, event: TopicRequestedEvent): + # Handle the on_topic_requested event. + + topic = event.topic + relation_id = event.relation.id + # set connection info in the databag relation + self.kafka_provider.set_bootstrap_server(relation_id, self.kafka.get_bootstrap_server()) + self.kafka_provider.set_credentials(relation_id, username=username, password=password) + self.kafka_provider.set_consumer_group_prefix(relation_id, ...) + self.kafka_provider.set_tls(relation_id, "False") + self.kafka_provider.set_zookeeper_uris(relation_id, ...) + +``` +As shown above, the library provides a custom event (topic_requested) to handle +the situation when an application charm requests a new topic to be created. +It is preferred to subscribe to this event instead of relation changed event to avoid +creating a new topic when other information other than a topic name is +exchanged in the relation databag. +""" + +import copy +import json +import logging +from abc import ABC, abstractmethod +from collections import UserDict, namedtuple +from datetime import datetime +from enum import Enum +from typing import ( + Callable, + Dict, + ItemsView, + KeysView, + List, + Optional, + Set, + Tuple, + Union, + ValuesView, +) + +from ops import JujuVersion, Model, Secret, SecretInfo, SecretNotFoundError +from ops.charm import ( + CharmBase, + CharmEvents, + RelationChangedEvent, + RelationCreatedEvent, + RelationEvent, + SecretChangedEvent, +) +from ops.framework import EventSource, Object +from ops.model import Application, ModelError, Relation, Unit + +# The unique Charmhub library identifier, never change it +LIBID = "6c3e6b6680d64e9c89e611d1a15f65be" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 38 + +PYDEPS = ["ops>=2.0.0"] + +logger = logging.getLogger(__name__) + +Diff = namedtuple("Diff", "added changed deleted") +Diff.__doc__ = """ +A tuple for storing the diff between two data mappings. + +added - keys that were added +changed - keys that still exist but have new values +deleted - key that were deleted""" + + +PROV_SECRET_PREFIX = "secret-" +REQ_SECRET_FIELDS = "requested-secrets" +GROUP_MAPPING_FIELD = "secret_group_mapping" +GROUP_SEPARATOR = "@" + + +class SecretGroup(str): + """Secret groups specific type.""" + + +class SecretGroupsAggregate(str): + """Secret groups with option to extend with additional constants.""" + + def __init__(self): + self.USER = SecretGroup("user") + self.TLS = SecretGroup("tls") + self.EXTRA = SecretGroup("extra") + + def __setattr__(self, name, value): + """Setting internal constants.""" + if name in self.__dict__: + raise RuntimeError("Can't set constant!") + else: + super().__setattr__(name, SecretGroup(value)) + + def groups(self) -> list: + """Return the list of stored SecretGroups.""" + return list(self.__dict__.values()) + + def get_group(self, group: str) -> Optional[SecretGroup]: + """If the input str translates to a group name, return that.""" + return SecretGroup(group) if group in self.groups() else None + + +SECRET_GROUPS = SecretGroupsAggregate() + + +class DataInterfacesError(Exception): + """Common ancestor for DataInterfaces related exceptions.""" + + +class SecretError(DataInterfacesError): + """Common ancestor for Secrets related exceptions.""" + + +class SecretAlreadyExistsError(SecretError): + """A secret that was to be added already exists.""" + + +class SecretsUnavailableError(SecretError): + """Secrets aren't yet available for Juju version used.""" + + +class SecretsIllegalUpdateError(SecretError): + """Secrets aren't yet available for Juju version used.""" + + +class IllegalOperationError(DataInterfacesError): + """To be used when an operation is not allowed to be performed.""" + + +def get_encoded_dict( + relation: Relation, member: Union[Unit, Application], field: str +) -> Optional[Dict[str, str]]: + """Retrieve and decode an encoded field from relation data.""" + data = json.loads(relation.data[member].get(field, "{}")) + if isinstance(data, dict): + return data + logger.error("Unexpected datatype for %s instead of dict.", str(data)) + + +def get_encoded_list( + relation: Relation, member: Union[Unit, Application], field: str +) -> Optional[List[str]]: + """Retrieve and decode an encoded field from relation data.""" + data = json.loads(relation.data[member].get(field, "[]")) + if isinstance(data, list): + return data + logger.error("Unexpected datatype for %s instead of list.", str(data)) + + +def set_encoded_field( + relation: Relation, + member: Union[Unit, Application], + field: str, + value: Union[str, list, Dict[str, str]], +) -> None: + """Set an encoded field from relation data.""" + relation.data[member].update({field: json.dumps(value)}) + + +def diff(event: RelationChangedEvent, bucket: Optional[Union[Unit, Application]]) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + bucket: bucket of the databag (app or unit) + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + # Retrieve the old data from the data key in the application relation databag. + if not bucket: + return Diff([], [], []) + + old_data = get_encoded_dict(event.relation, bucket, "data") + + if not old_data: + old_data = {} + + # Retrieve the new data from the event relation databag. + new_data = ( + {key: value for key, value in event.relation.data[event.app].items() if key != "data"} + if event.app + else {} + ) + + # These are the keys that were added to the databag and triggered this event. + added = new_data.keys() - old_data.keys() # pyright: ignore [reportAssignmentType] + # These are the keys that were removed from the databag and triggered this event. + deleted = old_data.keys() - new_data.keys() # pyright: ignore [reportAssignmentType] + # These are the keys that already existed in the databag, + # but had their values changed. + changed = { + key + for key in old_data.keys() & new_data.keys() # pyright: ignore [reportAssignmentType] + if old_data[key] != new_data[key] # pyright: ignore [reportAssignmentType] + } + # Convert the new_data to a serializable format and save it for a next diff check. + set_encoded_field(event.relation, bucket, "data", new_data) + + # Return the diff with all possible changes. + return Diff(added, changed, deleted) + + +def leader_only(f): + """Decorator to ensure that only leader can perform given operation.""" + + def wrapper(self, *args, **kwargs): + if self.component == self.local_app and not self.local_unit.is_leader(): + logger.error( + "This operation (%s()) can only be performed by the leader unit", f.__name__ + ) + return + return f(self, *args, **kwargs) + + wrapper.leader_only = True + return wrapper + + +def juju_secrets_only(f): + """Decorator to ensure that certain operations would be only executed on Juju3.""" + + def wrapper(self, *args, **kwargs): + if not self.secrets_enabled: + raise SecretsUnavailableError("Secrets unavailable on current Juju version") + return f(self, *args, **kwargs) + + return wrapper + + +def dynamic_secrets_only(f): + """Decorator to ensure that certain operations would be only executed when NO static secrets are defined.""" + + def wrapper(self, *args, **kwargs): + if self.static_secret_fields: + raise IllegalOperationError( + "Unsafe usage of statically and dynamically defined secrets, aborting." + ) + return f(self, *args, **kwargs) + + return wrapper + + +def either_static_or_dynamic_secrets(f): + """Decorator to ensure that static and dynamic secrets won't be used in parallel.""" + + def wrapper(self, *args, **kwargs): + if self.static_secret_fields and set(self.current_secret_fields) - set( + self.static_secret_fields + ): + raise IllegalOperationError( + "Unsafe usage of statically and dynamically defined secrets, aborting." + ) + return f(self, *args, **kwargs) + + return wrapper + + +class Scope(Enum): + """Peer relations scope.""" + + APP = "app" + UNIT = "unit" + + +################################################################################ +# Secrets internal caching +################################################################################ + + +class CachedSecret: + """Locally cache a secret. + + The data structure is precisely re-using/simulating as in the actual Secret Storage + """ + + def __init__( + self, + model: Model, + component: Union[Application, Unit], + label: str, + secret_uri: Optional[str] = None, + legacy_labels: List[str] = [], + ): + self._secret_meta = None + self._secret_content = {} + self._secret_uri = secret_uri + self.label = label + self._model = model + self.component = component + self.legacy_labels = legacy_labels + self.current_label = None + + def add_secret( + self, + content: Dict[str, str], + relation: Optional[Relation] = None, + label: Optional[str] = None, + ) -> Secret: + """Create a new secret.""" + if self._secret_uri: + raise SecretAlreadyExistsError( + "Secret is already defined with uri %s", self._secret_uri + ) + + label = self.label if not label else label + + secret = self.component.add_secret(content, label=label) + if relation and relation.app != self._model.app: + # If it's not a peer relation, grant is to be applied + secret.grant(relation) + self._secret_uri = secret.id + self._secret_meta = secret + return self._secret_meta + + @property + def meta(self) -> Optional[Secret]: + """Getting cached secret meta-information.""" + if not self._secret_meta: + if not (self._secret_uri or self.label): + return + + for label in [self.label] + self.legacy_labels: + try: + self._secret_meta = self._model.get_secret(label=label) + except SecretNotFoundError: + pass + else: + if label != self.label: + self.current_label = label + break + + # If still not found, to be checked by URI, to be labelled with the proposed label + if not self._secret_meta and self._secret_uri: + self._secret_meta = self._model.get_secret(id=self._secret_uri, label=self.label) + return self._secret_meta + + def get_content(self) -> Dict[str, str]: + """Getting cached secret content.""" + if not self._secret_content: + if self.meta: + try: + self._secret_content = self.meta.get_content(refresh=True) + except (ValueError, ModelError) as err: + # https://bugs.launchpad.net/juju/+bug/2042596 + # Only triggered when 'refresh' is set + known_model_errors = [ + "ERROR either URI or label should be used for getting an owned secret but not both", + "ERROR secret owner cannot use --refresh", + ] + if isinstance(err, ModelError) and not any( + msg in str(err) for msg in known_model_errors + ): + raise + # Due to: ValueError: Secret owner cannot use refresh=True + self._secret_content = self.meta.get_content() + return self._secret_content + + def _move_to_new_label_if_needed(self): + """Helper function to re-create the secret with a different label.""" + if not self.current_label or not (self.meta and self._secret_meta): + return + + # Create a new secret with the new label + content = self._secret_meta.get_content() + self._secret_uri = None + + # I wish we could just check if we are the owners of the secret... + try: + self._secret_meta = self.add_secret(content, label=self.label) + except ModelError as err: + if "this unit is not the leader" not in str(err): + raise + self.current_label = None + + def set_content(self, content: Dict[str, str]) -> None: + """Setting cached secret content.""" + if not self.meta: + return + + # DPE-4182: do not create new revision if the content stay the same + if content == self.get_content(): + return + + if content: + self._move_to_new_label_if_needed() + self.meta.set_content(content) + self._secret_content = content + else: + self.meta.remove_all_revisions() + + def get_info(self) -> Optional[SecretInfo]: + """Wrapper function to apply the corresponding call on the Secret object within CachedSecret if any.""" + if self.meta: + return self.meta.get_info() + + def remove(self) -> None: + """Remove secret.""" + if not self.meta: + raise SecretsUnavailableError("Non-existent secret was attempted to be removed.") + try: + self.meta.remove_all_revisions() + except SecretNotFoundError: + pass + self._secret_content = {} + self._secret_meta = None + self._secret_uri = None + + +class SecretCache: + """A data structure storing CachedSecret objects.""" + + def __init__(self, model: Model, component: Union[Application, Unit]): + self._model = model + self.component = component + self._secrets: Dict[str, CachedSecret] = {} + + def get( + self, label: str, uri: Optional[str] = None, legacy_labels: List[str] = [] + ) -> Optional[CachedSecret]: + """Getting a secret from Juju Secret store or cache.""" + if not self._secrets.get(label): + secret = CachedSecret( + self._model, self.component, label, uri, legacy_labels=legacy_labels + ) + if secret.meta: + self._secrets[label] = secret + return self._secrets.get(label) + + def add(self, label: str, content: Dict[str, str], relation: Relation) -> CachedSecret: + """Adding a secret to Juju Secret.""" + if self._secrets.get(label): + raise SecretAlreadyExistsError(f"Secret {label} already exists") + + secret = CachedSecret(self._model, self.component, label) + secret.add_secret(content, relation) + self._secrets[label] = secret + return self._secrets[label] + + def remove(self, label: str) -> None: + """Remove a secret from the cache.""" + if secret := self.get(label): + try: + secret.remove() + self._secrets.pop(label) + except (SecretsUnavailableError, KeyError): + pass + else: + return + logging.debug("Non-existing Juju Secret was attempted to be removed %s", label) + + +################################################################################ +# Relation Data base/abstract ancestors (i.e. parent classes) +################################################################################ + + +# Base Data + + +class DataDict(UserDict): + """Python Standard Library 'dict' - like representation of Relation Data.""" + + def __init__(self, relation_data: "Data", relation_id: int): + self.relation_data = relation_data + self.relation_id = relation_id + + @property + def data(self) -> Dict[str, str]: + """Return the full content of the Abstract Relation Data dictionary.""" + result = self.relation_data.fetch_my_relation_data([self.relation_id]) + try: + result_remote = self.relation_data.fetch_relation_data([self.relation_id]) + except NotImplementedError: + result_remote = {self.relation_id: {}} + if result: + result_remote[self.relation_id].update(result[self.relation_id]) + return result_remote.get(self.relation_id, {}) + + def __setitem__(self, key: str, item: str) -> None: + """Set an item of the Abstract Relation Data dictionary.""" + self.relation_data.update_relation_data(self.relation_id, {key: item}) + + def __getitem__(self, key: str) -> str: + """Get an item of the Abstract Relation Data dictionary.""" + result = None + + # Avoiding "leader_only" error when cross-charm non-leader unit, not to report useless error + if ( + not hasattr(self.relation_data.fetch_my_relation_field, "leader_only") + or self.relation_data.component != self.relation_data.local_app + or self.relation_data.local_unit.is_leader() + ): + result = self.relation_data.fetch_my_relation_field(self.relation_id, key) + + if not result: + try: + result = self.relation_data.fetch_relation_field(self.relation_id, key) + except NotImplementedError: + pass + + if not result: + raise KeyError + return result + + def __eq__(self, d: dict) -> bool: + """Equality.""" + return self.data == d + + def __repr__(self) -> str: + """String representation Abstract Relation Data dictionary.""" + return repr(self.data) + + def __len__(self) -> int: + """Length of the Abstract Relation Data dictionary.""" + return len(self.data) + + def __delitem__(self, key: str) -> None: + """Delete an item of the Abstract Relation Data dictionary.""" + self.relation_data.delete_relation_data(self.relation_id, [key]) + + def has_key(self, key: str) -> bool: + """Does the key exist in the Abstract Relation Data dictionary?""" + return key in self.data + + def update(self, items: Dict[str, str]): + """Update the Abstract Relation Data dictionary.""" + self.relation_data.update_relation_data(self.relation_id, items) + + def keys(self) -> KeysView[str]: + """Keys of the Abstract Relation Data dictionary.""" + return self.data.keys() + + def values(self) -> ValuesView[str]: + """Values of the Abstract Relation Data dictionary.""" + return self.data.values() + + def items(self) -> ItemsView[str, str]: + """Items of the Abstract Relation Data dictionary.""" + return self.data.items() + + def pop(self, item: str) -> str: + """Pop an item of the Abstract Relation Data dictionary.""" + result = self.relation_data.fetch_my_relation_field(self.relation_id, item) + if not result: + raise KeyError(f"Item {item} doesn't exist.") + self.relation_data.delete_relation_data(self.relation_id, [item]) + return result + + def __contains__(self, item: str) -> bool: + """Does the Abstract Relation Data dictionary contain item?""" + return item in self.data.values() + + def __iter__(self): + """Iterate through the Abstract Relation Data dictionary.""" + return iter(self.data) + + def get(self, key: str, default: Optional[str] = None) -> Optional[str]: + """Safely get an item of the Abstract Relation Data dictionary.""" + try: + if result := self[key]: + return result + except KeyError: + return default + + +class Data(ABC): + """Base relation data mainpulation (abstract) class.""" + + SCOPE = Scope.APP + + # Local map to associate mappings with secrets potentially as a group + SECRET_LABEL_MAP = { + "username": SECRET_GROUPS.USER, + "password": SECRET_GROUPS.USER, + "uris": SECRET_GROUPS.USER, + "tls": SECRET_GROUPS.TLS, + "tls-ca": SECRET_GROUPS.TLS, + } + + def __init__( + self, + model: Model, + relation_name: str, + ) -> None: + self._model = model + self.local_app = self._model.app + self.local_unit = self._model.unit + self.relation_name = relation_name + self._jujuversion = None + self.component = self.local_app if self.SCOPE == Scope.APP else self.local_unit + self.secrets = SecretCache(self._model, self.component) + self.data_component = None + + @property + def relations(self) -> List[Relation]: + """The list of Relation instances associated with this relation_name.""" + return [ + relation + for relation in self._model.relations[self.relation_name] + if self._is_relation_active(relation) + ] + + @property + def secrets_enabled(self): + """Is this Juju version allowing for Secrets usage?""" + if not self._jujuversion: + self._jujuversion = JujuVersion.from_environ() + return self._jujuversion.has_secrets + + @property + def secret_label_map(self): + """Exposing secret-label map via a property -- could be overridden in descendants!""" + return self.SECRET_LABEL_MAP + + # Mandatory overrides for internal/helper methods + + @abstractmethod + def _get_relation_secret( + self, relation_id: int, group_mapping: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret that's been stored in the relation databag.""" + raise NotImplementedError + + @abstractmethod + def _fetch_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation.""" + raise NotImplementedError + + @abstractmethod + def _fetch_my_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + raise NotImplementedError + + @abstractmethod + def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: + """Update data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + raise NotImplementedError + + @abstractmethod + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + raise NotImplementedError + + # Internal helper methods + + @staticmethod + def _is_relation_active(relation: Relation): + """Whether the relation is active based on contained data.""" + try: + _ = repr(relation.data) + return True + except (RuntimeError, ModelError): + return False + + @staticmethod + def _is_secret_field(field: str) -> bool: + """Is the field in question a secret reference (URI) field or not?""" + return field.startswith(PROV_SECRET_PREFIX) + + @staticmethod + def _generate_secret_label( + relation_name: str, relation_id: int, group_mapping: SecretGroup + ) -> str: + """Generate unique group_mappings for secrets within a relation context.""" + return f"{relation_name}.{relation_id}.{group_mapping}.secret" + + def _generate_secret_field_name(self, group_mapping: SecretGroup) -> str: + """Generate unique group_mappings for secrets within a relation context.""" + return f"{PROV_SECRET_PREFIX}{group_mapping}" + + def _relation_from_secret_label(self, secret_label: str) -> Optional[Relation]: + """Retrieve the relation that belongs to a secret label.""" + contents = secret_label.split(".") + + if not (contents and len(contents) >= 3): + return + + contents.pop() # ".secret" at the end + contents.pop() # Group mapping + relation_id = contents.pop() + try: + relation_id = int(relation_id) + except ValueError: + return + + # In case '.' character appeared in relation name + relation_name = ".".join(contents) + + try: + return self.get_relation(relation_name, relation_id) + except ModelError: + return + + def _group_secret_fields(self, secret_fields: List[str]) -> Dict[SecretGroup, List[str]]: + """Helper function to arrange secret mappings under their group. + + NOTE: All unrecognized items end up in the 'extra' secret bucket. + Make sure only secret fields are passed! + """ + secret_fieldnames_grouped = {} + for key in secret_fields: + if group := self.secret_label_map.get(key): + secret_fieldnames_grouped.setdefault(group, []).append(key) + else: + secret_fieldnames_grouped.setdefault(SECRET_GROUPS.EXTRA, []).append(key) + return secret_fieldnames_grouped + + def _get_group_secret_contents( + self, + relation: Relation, + group: SecretGroup, + secret_fields: Union[Set[str], List[str]] = [], + ) -> Dict[str, str]: + """Helper function to retrieve collective, requested contents of a secret.""" + if (secret := self._get_relation_secret(relation.id, group)) and ( + secret_data := secret.get_content() + ): + return { + k: v for k, v in secret_data.items() if not secret_fields or k in secret_fields + } + return {} + + def _content_for_secret_group( + self, content: Dict[str, str], secret_fields: Set[str], group_mapping: SecretGroup + ) -> Dict[str, str]: + """Select : pairs from input, that belong to this particular Secret group.""" + if group_mapping == SECRET_GROUPS.EXTRA: + return { + k: v + for k, v in content.items() + if k in secret_fields and k not in self.secret_label_map.keys() + } + + return { + k: v + for k, v in content.items() + if k in secret_fields and self.secret_label_map.get(k) == group_mapping + } + + @juju_secrets_only + def _get_relation_secret_data( + self, relation_id: int, group_mapping: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[Dict[str, str]]: + """Retrieve contents of a Juju Secret that's been stored in the relation databag.""" + secret = self._get_relation_secret(relation_id, group_mapping, relation_name) + if secret: + return secret.get_content() + + # Core operations on Relation Fields manipulations (regardless whether the field is in the databag or in a secret) + # Internal functions to be called directly from transparent public interface functions (+closely related helpers) + + def _process_secret_fields( + self, + relation: Relation, + req_secret_fields: Optional[List[str]], + impacted_rel_fields: List[str], + operation: Callable, + *args, + **kwargs, + ) -> Tuple[Dict[str, str], Set[str]]: + """Isolate target secret fields of manipulation, and execute requested operation by Secret Group.""" + result = {} + + # If the relation started on a databag, we just stay on the databag + # (Rolling upgrades may result in a relation starting on databag, getting secrets enabled on-the-fly) + # self.local_app is sufficient to check (ignored if Requires, never has secrets -- works if Provider) + fallback_to_databag = ( + req_secret_fields + and (self.local_unit == self._model.unit and self.local_unit.is_leader()) + and set(req_secret_fields) & set(relation.data[self.component]) + ) + + normal_fields = set(impacted_rel_fields) + if req_secret_fields and self.secrets_enabled and not fallback_to_databag: + normal_fields = normal_fields - set(req_secret_fields) + secret_fields = set(impacted_rel_fields) - set(normal_fields) + + secret_fieldnames_grouped = self._group_secret_fields(list(secret_fields)) + + for group in secret_fieldnames_grouped: + # operation() should return nothing when all goes well + if group_result := operation(relation, group, secret_fields, *args, **kwargs): + # If "meaningful" data was returned, we take it. (Some 'operation'-s only return success/failure.) + if isinstance(group_result, dict): + result.update(group_result) + else: + # If it wasn't found as a secret, let's give it a 2nd chance as "normal" field + # Needed when Juju3 Requires meets Juju2 Provider + normal_fields |= set(secret_fieldnames_grouped[group]) + return (result, normal_fields) + + def _fetch_relation_data_without_secrets( + self, component: Union[Application, Unit], relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetching databag contents when no secrets are involved. + + Since the Provider's databag is the only one holding secrest, we can apply + a simplified workflow to read the Require's side's databag. + This is used typically when the Provider side wants to read the Requires side's data, + or when the Requires side may want to read its own data. + """ + if component not in relation.data or not relation.data[component]: + return {} + + if fields: + return { + k: relation.data[component][k] for k in fields if k in relation.data[component] + } + else: + return dict(relation.data[component]) + + def _fetch_relation_data_with_secrets( + self, + component: Union[Application, Unit], + req_secret_fields: Optional[List[str]], + relation: Relation, + fields: Optional[List[str]] = None, + ) -> Dict[str, str]: + """Fetching databag contents when secrets may be involved. + + This function has internal logic to resolve if a requested field may be "hidden" + within a Relation Secret, or directly available as a databag field. Typically + used to read the Provider side's databag (eigher by the Requires side, or by + Provider side itself). + """ + result = {} + normal_fields = [] + + if not fields: + if component not in relation.data: + return {} + + all_fields = list(relation.data[component].keys()) + normal_fields = [field for field in all_fields if not self._is_secret_field(field)] + fields = normal_fields + req_secret_fields if req_secret_fields else normal_fields + + if fields: + result, normal_fields = self._process_secret_fields( + relation, req_secret_fields, fields, self._get_group_secret_contents + ) + + # Processing "normal" fields. May include leftover from what we couldn't retrieve as a secret. + # (Typically when Juju3 Requires meets Juju2 Provider) + if normal_fields: + result.update( + self._fetch_relation_data_without_secrets(component, relation, list(normal_fields)) + ) + return result + + def _update_relation_data_without_secrets( + self, component: Union[Application, Unit], relation: Relation, data: Dict[str, str] + ) -> None: + """Updating databag contents when no secrets are involved.""" + if component not in relation.data or relation.data[component] is None: + return + + if relation: + relation.data[component].update(data) + + def _delete_relation_data_without_secrets( + self, component: Union[Application, Unit], relation: Relation, fields: List[str] + ) -> None: + """Remove databag fields 'fields' from Relation.""" + if component not in relation.data or relation.data[component] is None: + return + + for field in fields: + try: + relation.data[component].pop(field) + except KeyError: + logger.debug( + "Non-existing field '%s' was attempted to be removed from the databag (relation ID: %s)", + str(field), + str(relation.id), + ) + pass + + # Public interface methods + # Handling Relation Fields seamlessly, regardless if in databag or a Juju Secret + + def as_dict(self, relation_id: int) -> UserDict: + """Dict behavior representation of the Abstract Data.""" + return DataDict(self, relation_id) + + def get_relation(self, relation_name, relation_id) -> Relation: + """Safe way of retrieving a relation.""" + relation = self._model.get_relation(relation_name, relation_id) + + if not relation: + raise DataInterfacesError( + "Relation %s %s couldn't be retrieved", relation_name, relation_id + ) + + return relation + + def fetch_relation_data( + self, + relation_ids: Optional[List[int]] = None, + fields: Optional[List[str]] = None, + relation_name: Optional[str] = None, + ) -> Dict[int, Dict[str, str]]: + """Retrieves data from relation. + + This function can be used to retrieve data from a relation + in the charm code when outside an event callback. + Function cannot be used in `*-relation-broken` events and will raise an exception. + + Returns: + a dict of the values stored in the relation data bag + for all relation instances (indexed by the relation ID). + """ + if not relation_name: + relation_name = self.relation_name + + relations = [] + if relation_ids: + relations = [ + self.get_relation(relation_name, relation_id) for relation_id in relation_ids + ] + else: + relations = self.relations + + data = {} + for relation in relations: + if not relation_ids or (relation_ids and relation.id in relation_ids): + data[relation.id] = self._fetch_specific_relation_data(relation, fields) + return data + + def fetch_relation_field( + self, relation_id: int, field: str, relation_name: Optional[str] = None + ) -> Optional[str]: + """Get a single field from the relation data.""" + return ( + self.fetch_relation_data([relation_id], [field], relation_name) + .get(relation_id, {}) + .get(field) + ) + + def fetch_my_relation_data( + self, + relation_ids: Optional[List[int]] = None, + fields: Optional[List[str]] = None, + relation_name: Optional[str] = None, + ) -> Optional[Dict[int, Dict[str, str]]]: + """Fetch data of the 'owner' (or 'this app') side of the relation. + + NOTE: Since only the leader can read the relation's 'this_app'-side + Application databag, the functionality is limited to leaders + """ + if not relation_name: + relation_name = self.relation_name + + relations = [] + if relation_ids: + relations = [ + self.get_relation(relation_name, relation_id) for relation_id in relation_ids + ] + else: + relations = self.relations + + data = {} + for relation in relations: + if not relation_ids or relation.id in relation_ids: + data[relation.id] = self._fetch_my_specific_relation_data(relation, fields) + return data + + def fetch_my_relation_field( + self, relation_id: int, field: str, relation_name: Optional[str] = None + ) -> Optional[str]: + """Get a single field from the relation data -- owner side. + + NOTE: Since only the leader can read the relation's 'this_app'-side + Application databag, the functionality is limited to leaders + """ + if relation_data := self.fetch_my_relation_data([relation_id], [field], relation_name): + return relation_data.get(relation_id, {}).get(field) + + @leader_only + def update_relation_data(self, relation_id: int, data: dict) -> None: + """Update the data within the relation.""" + relation_name = self.relation_name + relation = self.get_relation(relation_name, relation_id) + return self._update_relation_data(relation, data) + + @leader_only + def delete_relation_data(self, relation_id: int, fields: List[str]) -> None: + """Remove field from the relation.""" + relation_name = self.relation_name + relation = self.get_relation(relation_name, relation_id) + return self._delete_relation_data(relation, fields) + + +class EventHandlers(Object): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: Data, unique_key: str = ""): + """Manager of base client relations.""" + if not unique_key: + unique_key = relation_data.relation_name + super().__init__(charm, unique_key) + + self.charm = charm + self.relation_data = relation_data + + self.framework.observe( + charm.on[self.relation_data.relation_name].relation_changed, + self._on_relation_changed_event, + ) + + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + return diff(event, self.relation_data.data_component) + + @abstractmethod + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation data has changed.""" + raise NotImplementedError + + +# Base ProviderData and RequiresData + + +class ProviderData(Data): + """Base provides-side of the data products relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + ) -> None: + super().__init__(model, relation_name) + self.data_component = self.local_app + + # Private methods handling secrets + + @juju_secrets_only + def _add_relation_secret( + self, + relation: Relation, + group_mapping: SecretGroup, + secret_fields: Set[str], + data: Dict[str, str], + uri_to_databag=True, + ) -> bool: + """Add a new Juju Secret that will be registered in the relation databag.""" + secret_field = self._generate_secret_field_name(group_mapping) + if uri_to_databag and relation.data[self.component].get(secret_field): + logging.error("Secret for relation %s already exists, not adding again", relation.id) + return False + + content = self._content_for_secret_group(data, secret_fields, group_mapping) + + label = self._generate_secret_label(self.relation_name, relation.id, group_mapping) + secret = self.secrets.add(label, content, relation) + + # According to lint we may not have a Secret ID + if uri_to_databag and secret.meta and secret.meta.id: + relation.data[self.component][secret_field] = secret.meta.id + + # Return the content that was added + return True + + @juju_secrets_only + def _update_relation_secret( + self, + relation: Relation, + group_mapping: SecretGroup, + secret_fields: Set[str], + data: Dict[str, str], + ) -> bool: + """Update the contents of an existing Juju Secret, referred in the relation databag.""" + secret = self._get_relation_secret(relation.id, group_mapping) + + if not secret: + logging.error("Can't update secret for relation %s", relation.id) + return False + + content = self._content_for_secret_group(data, secret_fields, group_mapping) + + old_content = secret.get_content() + full_content = copy.deepcopy(old_content) + full_content.update(content) + secret.set_content(full_content) + + # Return True on success + return True + + def _add_or_update_relation_secrets( + self, + relation: Relation, + group: SecretGroup, + secret_fields: Set[str], + data: Dict[str, str], + uri_to_databag=True, + ) -> bool: + """Update contents for Secret group. If the Secret doesn't exist, create it.""" + if self._get_relation_secret(relation.id, group): + return self._update_relation_secret(relation, group, secret_fields, data) + else: + return self._add_relation_secret(relation, group, secret_fields, data, uri_to_databag) + + @juju_secrets_only + def _delete_relation_secret( + self, relation: Relation, group: SecretGroup, secret_fields: List[str], fields: List[str] + ) -> bool: + """Update the contents of an existing Juju Secret, referred in the relation databag.""" + secret = self._get_relation_secret(relation.id, group) + + if not secret: + logging.error("Can't delete secret for relation %s", str(relation.id)) + return False + + old_content = secret.get_content() + new_content = copy.deepcopy(old_content) + for field in fields: + try: + new_content.pop(field) + except KeyError: + logging.debug( + "Non-existing secret was attempted to be removed %s, %s", + str(relation.id), + str(field), + ) + return False + + # Remove secret from the relation if it's fully gone + if not new_content: + field = self._generate_secret_field_name(group) + try: + relation.data[self.component].pop(field) + except KeyError: + pass + label = self._generate_secret_label(self.relation_name, relation.id, group) + self.secrets.remove(label) + else: + secret.set_content(new_content) + + # Return the content that was removed + return True + + # Mandatory internal overrides + + @juju_secrets_only + def _get_relation_secret( + self, relation_id: int, group_mapping: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret that's been stored in the relation databag.""" + if not relation_name: + relation_name = self.relation_name + + label = self._generate_secret_label(relation_name, relation_id, group_mapping) + if secret := self.secrets.get(label): + return secret + + relation = self._model.get_relation(relation_name, relation_id) + if not relation: + return + + secret_field = self._generate_secret_field_name(group_mapping) + if secret_uri := relation.data[self.local_app].get(secret_field): + return self.secrets.get(label, secret_uri) + + def _fetch_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetching relation data for Provider. + + NOTE: Since all secret fields are in the Provider side of the databag, we don't need to worry about that + """ + if not relation.app: + return {} + + return self._fetch_relation_data_without_secrets(relation.app, relation, fields) + + def _fetch_my_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> dict: + """Fetching our own relation data.""" + secret_fields = None + if relation.app: + secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) + + return self._fetch_relation_data_with_secrets( + self.local_app, + secret_fields, + relation, + fields, + ) + + def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: + """Set values for fields not caring whether it's a secret or not.""" + req_secret_fields = [] + if relation.app: + req_secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) + + _, normal_fields = self._process_secret_fields( + relation, + req_secret_fields, + list(data), + self._add_or_update_relation_secrets, + data=data, + ) + + normal_content = {k: v for k, v in data.items() if k in normal_fields} + self._update_relation_data_without_secrets(self.local_app, relation, normal_content) + + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Delete fields from the Relation not caring whether it's a secret or not.""" + req_secret_fields = [] + if relation.app: + req_secret_fields = get_encoded_list(relation, relation.app, REQ_SECRET_FIELDS) + + _, normal_fields = self._process_secret_fields( + relation, req_secret_fields, fields, self._delete_relation_secret, fields=fields + ) + self._delete_relation_data_without_secrets(self.local_app, relation, list(normal_fields)) + + # Public methods - "native" + + def set_credentials(self, relation_id: int, username: str, password: str) -> None: + """Set credentials. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + username: user that was created. + password: password of the created user. + """ + self.update_relation_data(relation_id, {"username": username, "password": password}) + + def set_tls(self, relation_id: int, tls: str) -> None: + """Set whether TLS is enabled. + + Args: + relation_id: the identifier for a particular relation. + tls: whether tls is enabled (True or False). + """ + self.update_relation_data(relation_id, {"tls": tls}) + + def set_tls_ca(self, relation_id: int, tls_ca: str) -> None: + """Set the TLS CA in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + tls_ca: TLS certification authority. + """ + self.update_relation_data(relation_id, {"tls-ca": tls_ca}) + + # Public functions -- inherited + + fetch_my_relation_data = leader_only(Data.fetch_my_relation_data) + fetch_my_relation_field = leader_only(Data.fetch_my_relation_field) + + +class RequirerData(Data): + """Requirer-side of the relation.""" + + SECRET_FIELDS = ["username", "password", "tls", "tls-ca", "uris"] + + def __init__( + self, + model, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ): + """Manager of base client relations.""" + super().__init__(model, relation_name) + self.extra_user_roles = extra_user_roles + self._secret_fields = list(self.SECRET_FIELDS) + if additional_secret_fields: + self._secret_fields += additional_secret_fields + self.data_component = self.local_unit + + @property + def secret_fields(self) -> Optional[List[str]]: + """Local access to secrets field, in case they are being used.""" + if self.secrets_enabled: + return self._secret_fields + + # Internal helper functions + + def _register_secret_to_relation( + self, relation_name: str, relation_id: int, secret_id: str, group: SecretGroup + ): + """Fetch secrets and apply local label on them. + + [MAGIC HERE] + If we fetch a secret using get_secret(id=, label=), + then will be "stuck" on the Secret object, whenever it may + appear (i.e. as an event attribute, or fetched manually) on future occasions. + + This will allow us to uniquely identify the secret on Provider side (typically on + 'secret-changed' events), and map it to the corresponding relation. + """ + label = self._generate_secret_label(relation_name, relation_id, group) + + # Fetching the Secret's meta information ensuring that it's locally getting registered with + CachedSecret(self._model, self.component, label, secret_id).meta + + def _register_secrets_to_relation(self, relation: Relation, params_name_list: List[str]): + """Make sure that secrets of the provided list are locally 'registered' from the databag. + + More on 'locally registered' magic is described in _register_secret_to_relation() method + """ + if not relation.app: + return + + for group in SECRET_GROUPS.groups(): + secret_field = self._generate_secret_field_name(group) + if secret_field in params_name_list: + if secret_uri := relation.data[relation.app].get(secret_field): + self._register_secret_to_relation( + relation.name, relation.id, secret_uri, group + ) + + def _is_resource_created_for_relation(self, relation: Relation) -> bool: + if not relation.app: + return False + + data = self.fetch_relation_data([relation.id], ["username", "password"]).get( + relation.id, {} + ) + return bool(data.get("username")) and bool(data.get("password")) + + def is_resource_created(self, relation_id: Optional[int] = None) -> bool: + """Check if the resource has been created. + + This function can be used to check if the Provider answered with data in the charm code + when outside an event callback. + + Args: + relation_id (int, optional): When provided the check is done only for the relation id + provided, otherwise the check is done for all relations + + Returns: + True or False + + Raises: + IndexError: If relation_id is provided but that relation does not exist + """ + if relation_id is not None: + try: + relation = [relation for relation in self.relations if relation.id == relation_id][ + 0 + ] + return self._is_resource_created_for_relation(relation) + except IndexError: + raise IndexError(f"relation id {relation_id} cannot be accessed") + else: + return ( + all( + self._is_resource_created_for_relation(relation) for relation in self.relations + ) + if self.relations + else False + ) + + # Mandatory internal overrides + + @juju_secrets_only + def _get_relation_secret( + self, relation_id: int, group: SecretGroup, relation_name: Optional[str] = None + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret that's been stored in the relation databag.""" + if not relation_name: + relation_name = self.relation_name + + label = self._generate_secret_label(relation_name, relation_id, group) + return self.secrets.get(label) + + def _fetch_specific_relation_data( + self, relation, fields: Optional[List[str]] = None + ) -> Dict[str, str]: + """Fetching Requirer data -- that may include secrets.""" + if not relation.app: + return {} + return self._fetch_relation_data_with_secrets( + relation.app, self.secret_fields, relation, fields + ) + + def _fetch_my_specific_relation_data(self, relation, fields: Optional[List[str]]) -> dict: + """Fetching our own relation data.""" + return self._fetch_relation_data_without_secrets(self.local_app, relation, fields) + + def _update_relation_data(self, relation: Relation, data: dict) -> None: + """Updates a set of key-value pairs in the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation: the particular relation. + data: dict containing the key-value pairs + that should be updated in the relation. + """ + return self._update_relation_data_without_secrets(self.local_app, relation, data) + + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Deletes a set of fields from the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation: the particular relation. + fields: list containing the field names that should be removed from the relation. + """ + return self._delete_relation_data_without_secrets(self.local_app, relation, fields) + + # Public functions -- inherited + + fetch_my_relation_data = leader_only(Data.fetch_my_relation_data) + fetch_my_relation_field = leader_only(Data.fetch_my_relation_field) + + +class RequirerEventHandlers(EventHandlers): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: RequirerData, unique_key: str = ""): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + + self.framework.observe( + self.charm.on[relation_data.relation_name].relation_created, + self._on_relation_created_event, + ) + self.framework.observe( + charm.on.secret_changed, + self._on_secret_changed_event, + ) + + # Event handlers + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the relation is created.""" + if not self.relation_data.local_unit.is_leader(): + return + + if self.relation_data.secret_fields: # pyright: ignore [reportAttributeAccessIssue] + set_encoded_field( + event.relation, + self.relation_data.component, + REQ_SECRET_FIELDS, + self.relation_data.secret_fields, # pyright: ignore [reportAttributeAccessIssue] + ) + + @abstractmethod + def _on_secret_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation data has changed.""" + raise NotImplementedError + + +################################################################################ +# Peer Relation Data +################################################################################ + + +class DataPeerData(RequirerData, ProviderData): + """Represents peer relations data.""" + + SECRET_FIELDS = [] + SECRET_FIELD_NAME = "internal_secret" + SECRET_LABEL_MAP = {} + + def __init__( + self, + model, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + ): + """Manager of base client relations.""" + RequirerData.__init__( + self, + model, + relation_name, + extra_user_roles, + additional_secret_fields, + ) + self.secret_field_name = secret_field_name if secret_field_name else self.SECRET_FIELD_NAME + self.deleted_label = deleted_label + self._secret_label_map = {} + # Secrets that are being dynamically added within the scope of this event handler run + self._new_secrets = [] + self._additional_secret_group_mapping = additional_secret_group_mapping + + for group, fields in additional_secret_group_mapping.items(): + if group not in SECRET_GROUPS.groups(): + setattr(SECRET_GROUPS, group, group) + for field in fields: + secret_group = SECRET_GROUPS.get_group(group) + internal_field = self._field_to_internal_name(field, secret_group) + self._secret_label_map.setdefault(group, []).append(internal_field) + self._secret_fields.append(internal_field) + + @property + def scope(self) -> Optional[Scope]: + """Turn component information into Scope.""" + if isinstance(self.component, Application): + return Scope.APP + if isinstance(self.component, Unit): + return Scope.UNIT + + @property + def secret_label_map(self) -> Dict[str, str]: + """Property storing secret mappings.""" + return self._secret_label_map + + @property + def static_secret_fields(self) -> List[str]: + """Re-definition of the property in a way that dynamically extended list is retrieved.""" + return self._secret_fields + + @property + def secret_fields(self) -> List[str]: + """Re-definition of the property in a way that dynamically extended list is retrieved.""" + return ( + self.static_secret_fields if self.static_secret_fields else self.current_secret_fields + ) + + @property + def current_secret_fields(self) -> List[str]: + """Helper method to get all currently existing secret fields (added statically or dynamically).""" + if not self.secrets_enabled: + return [] + + if len(self._model.relations[self.relation_name]) > 1: + raise ValueError(f"More than one peer relation on {self.relation_name}") + + relation = self._model.relations[self.relation_name][0] + fields = [] + + ignores = [SECRET_GROUPS.get_group("user"), SECRET_GROUPS.get_group("tls")] + for group in SECRET_GROUPS.groups(): + if group in ignores: + continue + if content := self._get_group_secret_contents(relation, group): + fields += list(content.keys()) + return list(set(fields) | set(self._new_secrets)) + + @dynamic_secrets_only + def set_secret( + self, + relation_id: int, + field: str, + value: str, + group_mapping: Optional[SecretGroup] = None, + ) -> None: + """Public interface method to add a Relation Data field specifically as a Juju Secret. + + Args: + relation_id: ID of the relation + field: The secret field that is to be added + value: The string value of the secret + group_mapping: The name of the "secret group", in case the field is to be added to an existing secret + """ + full_field = self._field_to_internal_name(field, group_mapping) + if self.secrets_enabled and full_field not in self.current_secret_fields: + self._new_secrets.append(full_field) + if self._no_group_with_databag(field, full_field): + self.update_relation_data(relation_id, {full_field: value}) + + # Unlike for set_secret(), there's no harm using this operation with static secrets + # The restricion is only added to keep the concept clear + @dynamic_secrets_only + def get_secret( + self, + relation_id: int, + field: str, + group_mapping: Optional[SecretGroup] = None, + ) -> Optional[str]: + """Public interface method to fetch secrets only.""" + full_field = self._field_to_internal_name(field, group_mapping) + if ( + self.secrets_enabled + and full_field not in self.current_secret_fields + and field not in self.current_secret_fields + ): + return + if self._no_group_with_databag(field, full_field): + return self.fetch_my_relation_field(relation_id, full_field) + + @dynamic_secrets_only + def delete_secret( + self, + relation_id: int, + field: str, + group_mapping: Optional[SecretGroup] = None, + ) -> Optional[str]: + """Public interface method to delete secrets only.""" + full_field = self._field_to_internal_name(field, group_mapping) + if self.secrets_enabled and full_field not in self.current_secret_fields: + logger.warning(f"Secret {field} from group {group_mapping} was not found") + return + if self._no_group_with_databag(field, full_field): + self.delete_relation_data(relation_id, [full_field]) + + # Helpers + + @staticmethod + def _field_to_internal_name(field: str, group: Optional[SecretGroup]) -> str: + if not group or group == SECRET_GROUPS.EXTRA: + return field + return f"{field}{GROUP_SEPARATOR}{group}" + + @staticmethod + def _internal_name_to_field(name: str) -> Tuple[str, SecretGroup]: + parts = name.split(GROUP_SEPARATOR) + if not len(parts) > 1: + return (parts[0], SECRET_GROUPS.EXTRA) + secret_group = SECRET_GROUPS.get_group(parts[1]) + if not secret_group: + raise ValueError(f"Invalid secret field {name}") + return (parts[0], secret_group) + + def _group_secret_fields(self, secret_fields: List[str]) -> Dict[SecretGroup, List[str]]: + """Helper function to arrange secret mappings under their group. + + NOTE: All unrecognized items end up in the 'extra' secret bucket. + Make sure only secret fields are passed! + """ + secret_fieldnames_grouped = {} + for key in secret_fields: + field, group = self._internal_name_to_field(key) + secret_fieldnames_grouped.setdefault(group, []).append(field) + return secret_fieldnames_grouped + + def _content_for_secret_group( + self, content: Dict[str, str], secret_fields: Set[str], group_mapping: SecretGroup + ) -> Dict[str, str]: + """Select : pairs from input, that belong to this particular Secret group.""" + if group_mapping == SECRET_GROUPS.EXTRA: + return {k: v for k, v in content.items() if k in self.secret_fields} + return { + self._internal_name_to_field(k)[0]: v + for k, v in content.items() + if k in self.secret_fields + } + + # Backwards compatibility + + def _check_deleted_label(self, relation, fields) -> None: + """Helper function for legacy behavior.""" + current_data = self.fetch_my_relation_data([relation.id], fields) + if current_data is not None: + # Check if the secret we wanna delete actually exists + # Given the "deleted label", here we can't rely on the default mechanism (i.e. 'key not found') + if non_existent := (set(fields) & set(self.secret_fields)) - set( + current_data.get(relation.id, []) + ): + logger.debug( + "Non-existing secret %s was attempted to be removed.", + ", ".join(non_existent), + ) + + def _remove_secret_from_databag(self, relation, fields: List[str]) -> None: + """For Rolling Upgrades -- when moving from databag to secrets usage. + + Practically what happens here is to remove stuff from the databag that is + to be stored in secrets. + """ + if not self.secret_fields: + return + + secret_fields_passed = set(self.secret_fields) & set(fields) + for field in secret_fields_passed: + if self._fetch_relation_data_without_secrets(self.component, relation, [field]): + self._delete_relation_data_without_secrets(self.component, relation, [field]) + + def _remove_secret_field_name_from_databag(self, relation) -> None: + """Making sure that the old databag URI is gone. + + This action should not be executed more than once. + """ + # Nothing to do if 'internal-secret' is not in the databag + if not (relation.data[self.component].get(self._generate_secret_field_name())): + return + + # Making sure that the secret receives its label + # (This should have happened by the time we get here, rather an extra security measure.) + secret = self._get_relation_secret(relation.id) + + # Either app scope secret with leader executing, or unit scope secret + leader_or_unit_scope = self.component != self.local_app or self.local_unit.is_leader() + if secret and leader_or_unit_scope: + # Databag reference to the secret URI can be removed, now that it's labelled + relation.data[self.component].pop(self._generate_secret_field_name(), None) + + def _previous_labels(self) -> List[str]: + """Generator for legacy secret label names, for backwards compatibility.""" + result = [] + members = [self._model.app.name] + if self.scope: + members.append(self.scope.value) + result.append(f"{'.'.join(members)}") + return result + + def _no_group_with_databag(self, field: str, full_field: str) -> bool: + """Check that no secret group is attempted to be used together with databag.""" + if not self.secrets_enabled and full_field != field: + logger.error( + f"Can't access {full_field}: no secrets available (i.e. no secret groups either)." + ) + return False + return True + + # Event handlers + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + pass + + def _on_secret_changed_event(self, event: SecretChangedEvent) -> None: + """Event emitted when the secret has changed.""" + pass + + # Overrides of Relation Data handling functions + + def _generate_secret_label( + self, relation_name: str, relation_id: int, group_mapping: SecretGroup + ) -> str: + members = [relation_name, self._model.app.name] + if self.scope: + members.append(self.scope.value) + if group_mapping != SECRET_GROUPS.EXTRA: + members.append(group_mapping) + return f"{'.'.join(members)}" + + def _generate_secret_field_name(self, group_mapping: SecretGroup = SECRET_GROUPS.EXTRA) -> str: + """Generate unique group_mappings for secrets within a relation context.""" + return f"{self.secret_field_name}" + + @juju_secrets_only + def _get_relation_secret( + self, + relation_id: int, + group_mapping: SecretGroup = SECRET_GROUPS.EXTRA, + relation_name: Optional[str] = None, + ) -> Optional[CachedSecret]: + """Retrieve a Juju Secret specifically for peer relations. + + In case this code may be executed within a rolling upgrade, and we may need to + migrate secrets from the databag to labels, we make sure to stick the correct + label on the secret, and clean up the local databag. + """ + if not relation_name: + relation_name = self.relation_name + + relation = self._model.get_relation(relation_name, relation_id) + if not relation: + return + + label = self._generate_secret_label(relation_name, relation_id, group_mapping) + secret_uri = relation.data[self.component].get(self._generate_secret_field_name(), None) + + # URI or legacy label is only to applied when moving single legacy secret to a (new) label + if group_mapping == SECRET_GROUPS.EXTRA: + # Fetching the secret with fallback to URI (in case label is not yet known) + # Label would we "stuck" on the secret in case it is found + return self.secrets.get(label, secret_uri, legacy_labels=self._previous_labels()) + return self.secrets.get(label) + + def _get_group_secret_contents( + self, + relation: Relation, + group: SecretGroup, + secret_fields: Union[Set[str], List[str]] = [], + ) -> Dict[str, str]: + """Helper function to retrieve collective, requested contents of a secret.""" + secret_fields = [self._internal_name_to_field(k)[0] for k in secret_fields] + result = super()._get_group_secret_contents(relation, group, secret_fields) + if self.deleted_label: + result = {key: result[key] for key in result if result[key] != self.deleted_label} + if self._additional_secret_group_mapping: + return {self._field_to_internal_name(key, group): result[key] for key in result} + return result + + @either_static_or_dynamic_secrets + def _fetch_my_specific_relation_data( + self, relation: Relation, fields: Optional[List[str]] + ) -> Dict[str, str]: + """Fetch data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + return self._fetch_relation_data_with_secrets( + self.component, self.secret_fields, relation, fields + ) + + @either_static_or_dynamic_secrets + def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> None: + """Update data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + self._remove_secret_from_databag(relation, list(data.keys())) + _, normal_fields = self._process_secret_fields( + relation, + self.secret_fields, + list(data), + self._add_or_update_relation_secrets, + data=data, + uri_to_databag=False, + ) + self._remove_secret_field_name_from_databag(relation) + + normal_content = {k: v for k, v in data.items() if k in normal_fields} + self._update_relation_data_without_secrets(self.component, relation, normal_content) + + @either_static_or_dynamic_secrets + def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: + """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" + if self.secret_fields and self.deleted_label: + # Legacy, backwards compatibility + self._check_deleted_label(relation, fields) + + _, normal_fields = self._process_secret_fields( + relation, + self.secret_fields, + fields, + self._update_relation_secret, + data={field: self.deleted_label for field in fields}, + ) + else: + _, normal_fields = self._process_secret_fields( + relation, self.secret_fields, fields, self._delete_relation_secret, fields=fields + ) + self._delete_relation_data_without_secrets(self.component, relation, list(normal_fields)) + + def fetch_relation_data( + self, + relation_ids: Optional[List[int]] = None, + fields: Optional[List[str]] = None, + relation_name: Optional[str] = None, + ) -> Dict[int, Dict[str, str]]: + """This method makes no sense for a Peer Relation.""" + raise NotImplementedError( + "Peer Relation only supports 'self-side' fetch methods: " + "fetch_my_relation_data() and fetch_my_relation_field()" + ) + + def fetch_relation_field( + self, relation_id: int, field: str, relation_name: Optional[str] = None + ) -> Optional[str]: + """This method makes no sense for a Peer Relation.""" + raise NotImplementedError( + "Peer Relation only supports 'self-side' fetch methods: " + "fetch_my_relation_data() and fetch_my_relation_field()" + ) + + # Public functions -- inherited + + fetch_my_relation_data = Data.fetch_my_relation_data + fetch_my_relation_field = Data.fetch_my_relation_field + + +class DataPeerEventHandlers(RequirerEventHandlers): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: RequirerData, unique_key: str = ""): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + pass + + def _on_secret_changed_event(self, event: SecretChangedEvent) -> None: + """Event emitted when the secret has changed.""" + pass + + +class DataPeer(DataPeerData, DataPeerEventHandlers): + """Represents peer relations.""" + + def __init__( + self, + charm, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + unique_key: str = "", + ): + DataPeerData.__init__( + self, + charm.model, + relation_name, + extra_user_roles, + additional_secret_fields, + additional_secret_group_mapping, + secret_field_name, + deleted_label, + ) + DataPeerEventHandlers.__init__(self, charm, self, unique_key) + + +class DataPeerUnitData(DataPeerData): + """Unit data abstraction representation.""" + + SCOPE = Scope.UNIT + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + +class DataPeerUnit(DataPeerUnitData, DataPeerEventHandlers): + """Unit databag representation.""" + + def __init__( + self, + charm, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + unique_key: str = "", + ): + DataPeerData.__init__( + self, + charm.model, + relation_name, + extra_user_roles, + additional_secret_fields, + additional_secret_group_mapping, + secret_field_name, + deleted_label, + ) + DataPeerEventHandlers.__init__(self, charm, self, unique_key) + + +class DataPeerOtherUnitData(DataPeerUnitData): + """Unit data abstraction representation.""" + + def __init__(self, unit: Unit, *args, **kwargs): + super().__init__(*args, **kwargs) + self.local_unit = unit + self.component = unit + + def update_relation_data(self, relation_id: int, data: dict) -> None: + """This method makes no sense for a Other Peer Relation.""" + raise NotImplementedError("It's not possible to update data of another unit.") + + def delete_relation_data(self, relation_id: int, fields: List[str]) -> None: + """This method makes no sense for a Other Peer Relation.""" + raise NotImplementedError("It's not possible to delete data of another unit.") + + +class DataPeerOtherUnitEventHandlers(DataPeerEventHandlers): + """Requires-side of the relation.""" + + def __init__(self, charm: CharmBase, relation_data: DataPeerUnitData): + """Manager of base client relations.""" + unique_key = f"{relation_data.relation_name}-{relation_data.local_unit.name}" + super().__init__(charm, relation_data, unique_key=unique_key) + + +class DataPeerOtherUnit(DataPeerOtherUnitData, DataPeerOtherUnitEventHandlers): + """Unit databag representation for another unit than the executor.""" + + def __init__( + self, + unit: Unit, + charm: CharmBase, + relation_name: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + additional_secret_group_mapping: Dict[str, str] = {}, + secret_field_name: Optional[str] = None, + deleted_label: Optional[str] = None, + ): + DataPeerOtherUnitData.__init__( + self, + unit, + charm.model, + relation_name, + extra_user_roles, + additional_secret_fields, + additional_secret_group_mapping, + secret_field_name, + deleted_label, + ) + DataPeerOtherUnitEventHandlers.__init__(self, charm, self) + + +################################################################################ +# Cross-charm Relatoins Data Handling and Evenets +################################################################################ + +# Generic events + + +class ExtraRoleEvent(RelationEvent): + """Base class for data events.""" + + @property + def extra_user_roles(self) -> Optional[str]: + """Returns the extra user roles that were requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("extra-user-roles") + + +class RelationEventWithSecret(RelationEvent): + """Base class for Relation Events that need to handle secrets.""" + + @property + def _secrets(self) -> dict: + """Caching secrets to avoid fetching them each time a field is referrd. + + DON'T USE the encapsulated helper variable outside of this function + """ + if not hasattr(self, "_cached_secrets"): + self._cached_secrets = {} + return self._cached_secrets + + def _get_secret(self, group) -> Optional[Dict[str, str]]: + """Retrieving secrets.""" + if not self.app: + return + if not self._secrets.get(group): + self._secrets[group] = None + secret_field = f"{PROV_SECRET_PREFIX}{group}" + if secret_uri := self.relation.data[self.app].get(secret_field): + secret = self.framework.model.get_secret(id=secret_uri) + self._secrets[group] = secret.get_content() + return self._secrets[group] + + @property + def secrets_enabled(self): + """Is this Juju version allowing for Secrets usage?""" + return JujuVersion.from_environ().has_secrets + + +class AuthenticationEvent(RelationEventWithSecret): + """Base class for authentication fields for events. + + The amount of logic added here is not ideal -- but this was the only way to preserve + the interface when moving to Juju Secrets + """ + + @property + def username(self) -> Optional[str]: + """Returns the created username.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("user") + if secret: + return secret.get("username") + + return self.relation.data[self.relation.app].get("username") + + @property + def password(self) -> Optional[str]: + """Returns the password for the created user.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("user") + if secret: + return secret.get("password") + + return self.relation.data[self.relation.app].get("password") + + @property + def tls(self) -> Optional[str]: + """Returns whether TLS is configured.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("tls") + if secret: + return secret.get("tls") + + return self.relation.data[self.relation.app].get("tls") + + @property + def tls_ca(self) -> Optional[str]: + """Returns TLS CA.""" + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("tls") + if secret: + return secret.get("tls-ca") + + return self.relation.data[self.relation.app].get("tls-ca") + + +# Database related events and fields + + +class DatabaseProvidesEvent(RelationEvent): + """Base class for database events.""" + + @property + def database(self) -> Optional[str]: + """Returns the database that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("database") + + +class DatabaseRequestedEvent(DatabaseProvidesEvent, ExtraRoleEvent): + """Event emitted when a new database is requested for use on this relation.""" + + @property + def external_node_connectivity(self) -> bool: + """Returns the requested external_node_connectivity field.""" + if not self.relation.app: + return False + + return ( + self.relation.data[self.relation.app].get("external-node-connectivity", "false") + == "true" + ) + + +class DatabaseProvidesEvents(CharmEvents): + """Database events. + + This class defines the events that the database can emit. + """ + + database_requested = EventSource(DatabaseRequestedEvent) + + +class DatabaseRequiresEvent(RelationEventWithSecret): + """Base class for database events.""" + + @property + def database(self) -> Optional[str]: + """Returns the database name.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("database") + + @property + def endpoints(self) -> Optional[str]: + """Returns a comma separated list of read/write endpoints. + + In VM charms, this is the primary's address. + In kubernetes charms, this is the service to the primary pod. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("endpoints") + + @property + def read_only_endpoints(self) -> Optional[str]: + """Returns a comma separated list of read only endpoints. + + In VM charms, this is the address of all the secondary instances. + In kubernetes charms, this is the service to all replica pod instances. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("read-only-endpoints") + + @property + def replset(self) -> Optional[str]: + """Returns the replicaset name. + + MongoDB only. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("replset") + + @property + def uris(self) -> Optional[str]: + """Returns the connection URIs. + + MongoDB, Redis, OpenSearch. + """ + if not self.relation.app: + return None + + if self.secrets_enabled: + secret = self._get_secret("user") + if secret: + return secret.get("uris") + + return self.relation.data[self.relation.app].get("uris") + + @property + def version(self) -> Optional[str]: + """Returns the version of the database. + + Version as informed by the database daemon. + """ + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("version") + + +class DatabaseCreatedEvent(AuthenticationEvent, DatabaseRequiresEvent): + """Event emitted when a new database is created for use on this relation.""" + + +class DatabaseEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent): + """Event emitted when the read/write endpoints are changed.""" + + +class DatabaseReadOnlyEndpointsChangedEvent(AuthenticationEvent, DatabaseRequiresEvent): + """Event emitted when the read only endpoints are changed.""" + + +class DatabaseRequiresEvents(CharmEvents): + """Database events. + + This class defines the events that the database can emit. + """ + + database_created = EventSource(DatabaseCreatedEvent) + endpoints_changed = EventSource(DatabaseEndpointsChangedEvent) + read_only_endpoints_changed = EventSource(DatabaseReadOnlyEndpointsChangedEvent) + + +# Database Provider and Requires + + +class DatabaseProviderData(ProviderData): + """Provider-side data of the database relations.""" + + def __init__(self, model: Model, relation_name: str) -> None: + super().__init__(model, relation_name) + + def set_database(self, relation_id: int, database_name: str) -> None: + """Set database name. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + database_name: database name. + """ + self.update_relation_data(relation_id, {"database": database_name}) + + def set_endpoints(self, relation_id: int, connection_strings: str) -> None: + """Set database primary connections. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + In VM charms, only the primary's address should be passed as an endpoint. + In kubernetes charms, the service endpoint to the primary pod should be + passed as an endpoint. + + Args: + relation_id: the identifier for a particular relation. + connection_strings: database hosts and ports comma separated list. + """ + self.update_relation_data(relation_id, {"endpoints": connection_strings}) + + def set_read_only_endpoints(self, relation_id: int, connection_strings: str) -> None: + """Set database replicas connection strings. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + connection_strings: database hosts and ports comma separated list. + """ + self.update_relation_data(relation_id, {"read-only-endpoints": connection_strings}) + + def set_replset(self, relation_id: int, replset: str) -> None: + """Set replica set name in the application relation databag. + + MongoDB only. + + Args: + relation_id: the identifier for a particular relation. + replset: replica set name. + """ + self.update_relation_data(relation_id, {"replset": replset}) + + def set_uris(self, relation_id: int, uris: str) -> None: + """Set the database connection URIs in the application relation databag. + + MongoDB, Redis, and OpenSearch only. + + Args: + relation_id: the identifier for a particular relation. + uris: connection URIs. + """ + self.update_relation_data(relation_id, {"uris": uris}) + + def set_version(self, relation_id: int, version: str) -> None: + """Set the database version in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + version: database version. + """ + self.update_relation_data(relation_id, {"version": version}) + + def set_subordinated(self, relation_id: int) -> None: + """Raises the subordinated flag in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + """ + self.update_relation_data(relation_id, {"subordinated": "true"}) + + +class DatabaseProviderEventHandlers(EventHandlers): + """Provider-side of the database relation handlers.""" + + on = DatabaseProvidesEvents() # pyright: ignore [reportAssignmentType] + + def __init__( + self, charm: CharmBase, relation_data: DatabaseProviderData, unique_key: str = "" + ): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + # Just to calm down pyright, it can't parse that the same type is being used in the super() call above + self.relation_data = relation_data + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.relation_data.local_unit.is_leader(): + return + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit a database requested event if the setup key (database name and optional + # extra user roles) was added to the relation databag by the application. + if "database" in diff.added: + getattr(self.on, "database_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + +class DatabaseProvides(DatabaseProviderData, DatabaseProviderEventHandlers): + """Provider-side of the database relations.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + DatabaseProviderData.__init__(self, charm.model, relation_name) + DatabaseProviderEventHandlers.__init__(self, charm, self) + + +class DatabaseRequirerData(RequirerData): + """Requirer-side of the database relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + database_name: str, + extra_user_roles: Optional[str] = None, + relations_aliases: Optional[List[str]] = None, + additional_secret_fields: Optional[List[str]] = [], + external_node_connectivity: bool = False, + ): + """Manager of database client relations.""" + super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) + self.database = database_name + self.relations_aliases = relations_aliases + self.external_node_connectivity = external_node_connectivity + + def is_postgresql_plugin_enabled(self, plugin: str, relation_index: int = 0) -> bool: + """Returns whether a plugin is enabled in the database. + + Args: + plugin: name of the plugin to check. + relation_index: optional relation index to check the database + (default: 0 - first relation). + + PostgreSQL only. + """ + # Psycopg 3 is imported locally to avoid the need of its package installation + # when relating to a database charm other than PostgreSQL. + import psycopg + + # Return False if no relation is established. + if len(self.relations) == 0: + return False + + relation_id = self.relations[relation_index].id + host = self.fetch_relation_field(relation_id, "endpoints") + + # Return False if there is no endpoint available. + if host is None: + return False + + host = host.split(":")[0] + + content = self.fetch_relation_data([relation_id], ["username", "password"]).get( + relation_id, {} + ) + user = content.get("username") + password = content.get("password") + + connection_string = ( + f"host='{host}' dbname='{self.database}' user='{user}' password='{password}'" + ) + try: + with psycopg.connect(connection_string) as connection: + with connection.cursor() as cursor: + cursor.execute( + "SELECT TRUE FROM pg_extension WHERE extname=%s::text;", (plugin,) + ) + return cursor.fetchone() is not None + except psycopg.Error as e: + logger.exception( + f"failed to check whether {plugin} plugin is enabled in the database: %s", str(e) + ) + return False + + +class DatabaseRequirerEventHandlers(RequirerEventHandlers): + """Requires-side of the relation.""" + + on = DatabaseRequiresEvents() # pyright: ignore [reportAssignmentType] + + def __init__( + self, charm: CharmBase, relation_data: DatabaseRequirerData, unique_key: str = "" + ): + """Manager of base client relations.""" + super().__init__(charm, relation_data, unique_key) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + # Define custom event names for each alias. + if self.relation_data.relations_aliases: + # Ensure the number of aliases does not exceed the maximum + # of connections allowed in the specific relation. + relation_connection_limit = self.charm.meta.requires[ + self.relation_data.relation_name + ].limit + if len(self.relation_data.relations_aliases) != relation_connection_limit: + raise ValueError( + f"The number of aliases must match the maximum number of connections allowed in the relation. " + f"Expected {relation_connection_limit}, got {len(self.relation_data.relations_aliases)}" + ) + + if self.relation_data.relations_aliases: + for relation_alias in self.relation_data.relations_aliases: + self.on.define_event(f"{relation_alias}_database_created", DatabaseCreatedEvent) + self.on.define_event( + f"{relation_alias}_endpoints_changed", DatabaseEndpointsChangedEvent + ) + self.on.define_event( + f"{relation_alias}_read_only_endpoints_changed", + DatabaseReadOnlyEndpointsChangedEvent, + ) + + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + pass + + def _assign_relation_alias(self, relation_id: int) -> None: + """Assigns an alias to a relation. + + This function writes in the unit data bag. + + Args: + relation_id: the identifier for a particular relation. + """ + # If no aliases were provided, return immediately. + if not self.relation_data.relations_aliases: + return + + # Return if an alias was already assigned to this relation + # (like when there are more than one unit joining the relation). + relation = self.charm.model.get_relation(self.relation_data.relation_name, relation_id) + if relation and relation.data[self.relation_data.local_unit].get("alias"): + return + + # Retrieve the available aliases (the ones that weren't assigned to any relation). + available_aliases = self.relation_data.relations_aliases[:] + for relation in self.charm.model.relations[self.relation_data.relation_name]: + alias = relation.data[self.relation_data.local_unit].get("alias") + if alias: + logger.debug("Alias %s was already assigned to relation %d", alias, relation.id) + available_aliases.remove(alias) + + # Set the alias in the unit relation databag of the specific relation. + relation = self.charm.model.get_relation(self.relation_data.relation_name, relation_id) + if relation: + relation.data[self.relation_data.local_unit].update({"alias": available_aliases[0]}) + + # We need to set relation alias also on the application level so, + # it will be accessible in show-unit juju command, executed for a consumer application unit + if self.relation_data.local_unit.is_leader(): + self.relation_data.update_relation_data(relation_id, {"alias": available_aliases[0]}) + + def _emit_aliased_event(self, event: RelationChangedEvent, event_name: str) -> None: + """Emit an aliased event to a particular relation if it has an alias. + + Args: + event: the relation changed event that was received. + event_name: the name of the event to emit. + """ + alias = self._get_relation_alias(event.relation.id) + if alias: + getattr(self.on, f"{alias}_{event_name}").emit( + event.relation, app=event.app, unit=event.unit + ) + + def _get_relation_alias(self, relation_id: int) -> Optional[str]: + """Returns the relation alias. + + Args: + relation_id: the identifier for a particular relation. + + Returns: + the relation alias or None if the relation was not found. + """ + for relation in self.charm.model.relations[self.relation_data.relation_name]: + if relation.id == relation_id: + return relation.data[self.relation_data.local_unit].get("alias") + return None + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the database relation is created.""" + super()._on_relation_created_event(event) + + # If relations aliases were provided, assign one to the relation. + self._assign_relation_alias(event.relation.id) + + # Sets both database and extra user roles in the relation + # if the roles are provided. Otherwise, sets only the database. + if not self.relation_data.local_unit.is_leader(): + return + + event_data = {"database": self.relation_data.database} + + if self.relation_data.extra_user_roles: + event_data["extra-user-roles"] = self.relation_data.extra_user_roles + + # set external-node-connectivity field + if self.relation_data.external_node_connectivity: + event_data["external-node-connectivity"] = "true" + + self.relation_data.update_relation_data(event.relation.id, event_data) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the database relation has changed.""" + is_subordinate = False + remote_unit_data = None + for key in event.relation.data.keys(): + if isinstance(key, Unit) and not key.name.startswith(self.charm.app.name): + remote_unit_data = event.relation.data[key] + elif isinstance(key, Application) and key.name != self.charm.app.name: + is_subordinate = event.relation.data[key].get("subordinated") == "true" + + if is_subordinate: + if not remote_unit_data: + return + + if remote_unit_data.get("state") != "ready": + return + + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Register all new secrets with their labels + if any(newval for newval in diff.added if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, diff.added) + + # Check if the database is created + # (the database charm shared the credentials). + secret_field_user = self.relation_data._generate_secret_field_name(SECRET_GROUPS.USER) + if ( + "username" in diff.added and "password" in diff.added + ) or secret_field_user in diff.added: + # Emit the default event (the one without an alias). + logger.info("database created at %s", datetime.now()) + getattr(self.on, "database_created").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "database_created") + + # To avoid unnecessary application restarts do not trigger + # “endpoints_changed“ event if “database_created“ is triggered. + return + + # Emit an endpoints changed event if the database + # added or changed this info in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + getattr(self.on, "endpoints_changed").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "endpoints_changed") + + # To avoid unnecessary application restarts do not trigger + # “read_only_endpoints_changed“ event if “endpoints_changed“ is triggered. + return + + # Emit a read only endpoints changed event if the database + # added or changed this info in the relation databag. + if "read-only-endpoints" in diff.added or "read-only-endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("read-only-endpoints changed on %s", datetime.now()) + getattr(self.on, "read_only_endpoints_changed").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Emit the aliased event (if any). + self._emit_aliased_event(event, "read_only_endpoints_changed") + + +class DatabaseRequires(DatabaseRequirerData, DatabaseRequirerEventHandlers): + """Provider-side of the database relations.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str, + database_name: str, + extra_user_roles: Optional[str] = None, + relations_aliases: Optional[List[str]] = None, + additional_secret_fields: Optional[List[str]] = [], + external_node_connectivity: bool = False, + ): + DatabaseRequirerData.__init__( + self, + charm.model, + relation_name, + database_name, + extra_user_roles, + relations_aliases, + additional_secret_fields, + external_node_connectivity, + ) + DatabaseRequirerEventHandlers.__init__(self, charm, self) + + +################################################################################ +# Charm-specific Relations Data and Events +################################################################################ + +# Kafka Events + + +class KafkaProvidesEvent(RelationEvent): + """Base class for Kafka events.""" + + @property + def topic(self) -> Optional[str]: + """Returns the topic that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("topic") + + @property + def consumer_group_prefix(self) -> Optional[str]: + """Returns the consumer-group-prefix that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("consumer-group-prefix") + + +class TopicRequestedEvent(KafkaProvidesEvent, ExtraRoleEvent): + """Event emitted when a new topic is requested for use on this relation.""" + + +class KafkaProvidesEvents(CharmEvents): + """Kafka events. + + This class defines the events that the Kafka can emit. + """ + + topic_requested = EventSource(TopicRequestedEvent) + + +class KafkaRequiresEvent(RelationEvent): + """Base class for Kafka events.""" + + @property + def topic(self) -> Optional[str]: + """Returns the topic.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("topic") + + @property + def bootstrap_server(self) -> Optional[str]: + """Returns a comma-separated list of broker uris.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("endpoints") + + @property + def consumer_group_prefix(self) -> Optional[str]: + """Returns the consumer-group-prefix.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("consumer-group-prefix") + + @property + def zookeeper_uris(self) -> Optional[str]: + """Returns a comma separated list of Zookeeper uris.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("zookeeper-uris") + + +class TopicCreatedEvent(AuthenticationEvent, KafkaRequiresEvent): + """Event emitted when a new topic is created for use on this relation.""" + + +class BootstrapServerChangedEvent(AuthenticationEvent, KafkaRequiresEvent): + """Event emitted when the bootstrap server is changed.""" + + +class KafkaRequiresEvents(CharmEvents): + """Kafka events. + + This class defines the events that the Kafka can emit. + """ + + topic_created = EventSource(TopicCreatedEvent) + bootstrap_server_changed = EventSource(BootstrapServerChangedEvent) + + +# Kafka Provides and Requires + + +class KafkaProviderData(ProviderData): + """Provider-side of the Kafka relation.""" + + def __init__(self, model: Model, relation_name: str) -> None: + super().__init__(model, relation_name) + + def set_topic(self, relation_id: int, topic: str) -> None: + """Set topic name in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + topic: the topic name. + """ + self.update_relation_data(relation_id, {"topic": topic}) + + def set_bootstrap_server(self, relation_id: int, bootstrap_server: str) -> None: + """Set the bootstrap server in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + bootstrap_server: the bootstrap server address. + """ + self.update_relation_data(relation_id, {"endpoints": bootstrap_server}) + + def set_consumer_group_prefix(self, relation_id: int, consumer_group_prefix: str) -> None: + """Set the consumer group prefix in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + consumer_group_prefix: the consumer group prefix string. + """ + self.update_relation_data(relation_id, {"consumer-group-prefix": consumer_group_prefix}) + + def set_zookeeper_uris(self, relation_id: int, zookeeper_uris: str) -> None: + """Set the zookeeper uris in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + zookeeper_uris: comma-separated list of ZooKeeper server uris. + """ + self.update_relation_data(relation_id, {"zookeeper-uris": zookeeper_uris}) + + +class KafkaProviderEventHandlers(EventHandlers): + """Provider-side of the Kafka relation.""" + + on = KafkaProvidesEvents() # pyright: ignore [reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: KafkaProviderData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.relation_data.local_unit.is_leader(): + return + + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit a topic requested event if the setup key (topic name and optional + # extra user roles) was added to the relation databag by the application. + if "topic" in diff.added: + getattr(self.on, "topic_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + +class KafkaProvides(KafkaProviderData, KafkaProviderEventHandlers): + """Provider-side of the Kafka relation.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + KafkaProviderData.__init__(self, charm.model, relation_name) + KafkaProviderEventHandlers.__init__(self, charm, self) + + +class KafkaRequirerData(RequirerData): + """Requirer-side of the Kafka relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + topic: str, + extra_user_roles: Optional[str] = None, + consumer_group_prefix: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ): + """Manager of Kafka client relations.""" + super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) + self.topic = topic + self.consumer_group_prefix = consumer_group_prefix or "" + + @property + def topic(self): + """Topic to use in Kafka.""" + return self._topic + + @topic.setter + def topic(self, value): + # Avoid wildcards + if value == "*": + raise ValueError(f"Error on topic '{value}', cannot be a wildcard.") + self._topic = value + + +class KafkaRequirerEventHandlers(RequirerEventHandlers): + """Requires-side of the Kafka relation.""" + + on = KafkaRequiresEvents() # pyright: ignore [reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: KafkaRequirerData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the Kafka relation is created.""" + super()._on_relation_created_event(event) + + if not self.relation_data.local_unit.is_leader(): + return + + # Sets topic, extra user roles, and "consumer-group-prefix" in the relation + relation_data = {"topic": self.relation_data.topic} + + if self.relation_data.extra_user_roles: + relation_data["extra-user-roles"] = self.relation_data.extra_user_roles + + if self.relation_data.consumer_group_prefix: + relation_data["consumer-group-prefix"] = self.relation_data.consumer_group_prefix + + self.relation_data.update_relation_data(event.relation.id, relation_data) + + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + pass + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the Kafka relation has changed.""" + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Check if the topic is created + # (the Kafka charm shared the credentials). + + # Register all new secrets with their labels + if any(newval for newval in diff.added if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, diff.added) + + secret_field_user = self.relation_data._generate_secret_field_name(SECRET_GROUPS.USER) + if ( + "username" in diff.added and "password" in diff.added + ) or secret_field_user in diff.added: + # Emit the default event (the one without an alias). + logger.info("topic created at %s", datetime.now()) + getattr(self.on, "topic_created").emit(event.relation, app=event.app, unit=event.unit) + + # To avoid unnecessary application restarts do not trigger + # “endpoints_changed“ event if “topic_created“ is triggered. + return + + # Emit an endpoints (bootstrap-server) changed event if the Kafka endpoints + # added or changed this info in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + getattr(self.on, "bootstrap_server_changed").emit( + event.relation, app=event.app, unit=event.unit + ) # here check if this is the right design + return + + +class KafkaRequires(KafkaRequirerData, KafkaRequirerEventHandlers): + """Provider-side of the Kafka relation.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str, + topic: str, + extra_user_roles: Optional[str] = None, + consumer_group_prefix: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ) -> None: + KafkaRequirerData.__init__( + self, + charm.model, + relation_name, + topic, + extra_user_roles, + consumer_group_prefix, + additional_secret_fields, + ) + KafkaRequirerEventHandlers.__init__(self, charm, self) + + +# Opensearch related events + + +class OpenSearchProvidesEvent(RelationEvent): + """Base class for OpenSearch events.""" + + @property + def index(self) -> Optional[str]: + """Returns the index that was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("index") + + +class IndexRequestedEvent(OpenSearchProvidesEvent, ExtraRoleEvent): + """Event emitted when a new index is requested for use on this relation.""" + + +class OpenSearchProvidesEvents(CharmEvents): + """OpenSearch events. + + This class defines the events that OpenSearch can emit. + """ + + index_requested = EventSource(IndexRequestedEvent) + + +class OpenSearchRequiresEvent(DatabaseRequiresEvent): + """Base class for OpenSearch requirer events.""" + + +class IndexCreatedEvent(AuthenticationEvent, OpenSearchRequiresEvent): + """Event emitted when a new index is created for use on this relation.""" + + +class OpenSearchRequiresEvents(CharmEvents): + """OpenSearch events. + + This class defines the events that the opensearch requirer can emit. + """ + + index_created = EventSource(IndexCreatedEvent) + endpoints_changed = EventSource(DatabaseEndpointsChangedEvent) + authentication_updated = EventSource(AuthenticationEvent) + + +# OpenSearch Provides and Requires Objects + + +class OpenSearchProvidesData(ProviderData): + """Provider-side of the OpenSearch relation.""" + + def __init__(self, model: Model, relation_name: str) -> None: + super().__init__(model, relation_name) + + def set_index(self, relation_id: int, index: str) -> None: + """Set the index in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + index: the index as it is _created_ on the provider charm. This needn't match the + requested index, and can be used to present a different index name if, for example, + the requested index is invalid. + """ + self.update_relation_data(relation_id, {"index": index}) + + def set_endpoints(self, relation_id: int, endpoints: str) -> None: + """Set the endpoints in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + endpoints: the endpoint addresses for opensearch nodes. + """ + self.update_relation_data(relation_id, {"endpoints": endpoints}) + + def set_version(self, relation_id: int, version: str) -> None: + """Set the opensearch version in the application relation databag. + + Args: + relation_id: the identifier for a particular relation. + version: database version. + """ + self.update_relation_data(relation_id, {"version": version}) + + +class OpenSearchProvidesEventHandlers(EventHandlers): + """Provider-side of the OpenSearch relation.""" + + on = OpenSearchProvidesEvents() # pyright: ignore[reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: OpenSearchProvidesData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the relation has changed.""" + # Leader only + if not self.relation_data.local_unit.is_leader(): + return + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Emit an index requested event if the setup key (index name and optional extra user roles) + # have been added to the relation databag by the application. + if "index" in diff.added: + getattr(self.on, "index_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + +class OpenSearchProvides(OpenSearchProvidesData, OpenSearchProvidesEventHandlers): + """Provider-side of the OpenSearch relation.""" + + def __init__(self, charm: CharmBase, relation_name: str) -> None: + OpenSearchProvidesData.__init__(self, charm.model, relation_name) + OpenSearchProvidesEventHandlers.__init__(self, charm, self) + + +class OpenSearchRequiresData(RequirerData): + """Requires data side of the OpenSearch relation.""" + + def __init__( + self, + model: Model, + relation_name: str, + index: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ): + """Manager of OpenSearch client relations.""" + super().__init__(model, relation_name, extra_user_roles, additional_secret_fields) + self.index = index + + +class OpenSearchRequiresEventHandlers(RequirerEventHandlers): + """Requires events side of the OpenSearch relation.""" + + on = OpenSearchRequiresEvents() # pyright: ignore[reportAssignmentType] + + def __init__(self, charm: CharmBase, relation_data: OpenSearchRequiresData) -> None: + super().__init__(charm, relation_data) + # Just to keep lint quiet, can't resolve inheritance. The same happened in super().__init__() above + self.relation_data = relation_data + + def _on_relation_created_event(self, event: RelationCreatedEvent) -> None: + """Event emitted when the OpenSearch relation is created.""" + super()._on_relation_created_event(event) + + if not self.relation_data.local_unit.is_leader(): + return + + # Sets both index and extra user roles in the relation if the roles are provided. + # Otherwise, sets only the index. + data = {"index": self.relation_data.index} + if self.relation_data.extra_user_roles: + data["extra-user-roles"] = self.relation_data.extra_user_roles + + self.relation_data.update_relation_data(event.relation.id, data) + + def _on_secret_changed_event(self, event: SecretChangedEvent): + """Event notifying about a new value of a secret.""" + if not event.secret.label: + return + + relation = self.relation_data._relation_from_secret_label(event.secret.label) + if not relation: + logging.info( + f"Received secret {event.secret.label} but couldn't parse, seems irrelevant" + ) + return + + if relation.app == self.charm.app: + logging.info("Secret changed event ignored for Secret Owner") + + remote_unit = None + for unit in relation.units: + if unit.app != self.charm.app: + remote_unit = unit + + logger.info("authentication updated") + getattr(self.on, "authentication_updated").emit( + relation, app=relation.app, unit=remote_unit + ) + + def _on_relation_changed_event(self, event: RelationChangedEvent) -> None: + """Event emitted when the OpenSearch relation has changed. + + This event triggers individual custom events depending on the changing relation. + """ + # Check which data has changed to emit customs events. + diff = self._diff(event) + + # Register all new secrets with their labels + if any(newval for newval in diff.added if self.relation_data._is_secret_field(newval)): + self.relation_data._register_secrets_to_relation(event.relation, diff.added) + + secret_field_user = self.relation_data._generate_secret_field_name(SECRET_GROUPS.USER) + secret_field_tls = self.relation_data._generate_secret_field_name(SECRET_GROUPS.TLS) + updates = {"username", "password", "tls", "tls-ca", secret_field_user, secret_field_tls} + if len(set(diff._asdict().keys()) - updates) < len(diff): + logger.info("authentication updated at: %s", datetime.now()) + getattr(self.on, "authentication_updated").emit( + event.relation, app=event.app, unit=event.unit + ) + + # Check if the index is created + # (the OpenSearch charm shares the credentials). + if ( + "username" in diff.added and "password" in diff.added + ) or secret_field_user in diff.added: + # Emit the default event (the one without an alias). + logger.info("index created at: %s", datetime.now()) + getattr(self.on, "index_created").emit(event.relation, app=event.app, unit=event.unit) + + # To avoid unnecessary application restarts do not trigger + # “endpoints_changed“ event if “index_created“ is triggered. + return + + # Emit a endpoints changed event if the OpenSearch application added or changed this info + # in the relation databag. + if "endpoints" in diff.added or "endpoints" in diff.changed: + # Emit the default event (the one without an alias). + logger.info("endpoints changed on %s", datetime.now()) + getattr(self.on, "endpoints_changed").emit( + event.relation, app=event.app, unit=event.unit + ) # here check if this is the right design + return + + +class OpenSearchRequires(OpenSearchRequiresData, OpenSearchRequiresEventHandlers): + """Requires-side of the OpenSearch relation.""" + + def __init__( + self, + charm: CharmBase, + relation_name: str, + index: str, + extra_user_roles: Optional[str] = None, + additional_secret_fields: Optional[List[str]] = [], + ) -> None: + OpenSearchRequiresData.__init__( + self, + charm.model, + relation_name, + index, + extra_user_roles, + additional_secret_fields, + ) + OpenSearchRequiresEventHandlers.__init__(self, charm, self) diff --git a/lib/charms/data_platform_libs/v0/s3.py b/lib/charms/data_platform_libs/v0/s3.py new file mode 100644 index 0000000..f5614aa --- /dev/null +++ b/lib/charms/data_platform_libs/v0/s3.py @@ -0,0 +1,791 @@ +# Copyright 2023 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""A library for communicating with the S3 credentials providers and consumers. + +This library provides the relevant interface code implementing the communication +specification for fetching, retrieving, triggering, and responding to events related to +the S3 provider charm and its consumers. + +### Provider charm + +The provider is implemented in the `s3-provider` charm which is meant to be deployed +alongside one or more consumer charms. The provider charm is serving the s3 credentials and +metadata needed to communicate and work with an S3 compatible backend. + +Example: +```python + +from charms.data_platform_libs.v0.s3 import CredentialRequestedEvent, S3Provider + + +class ExampleProviderCharm(CharmBase): + def __init__(self, *args) -> None: + super().__init__(*args) + self.s3_provider = S3Provider(self, "s3-credentials") + + self.framework.observe(self.s3_provider.on.credentials_requested, + self._on_credential_requested) + + def _on_credential_requested(self, event: CredentialRequestedEvent): + if not self.unit.is_leader(): + return + + # get relation id + relation_id = event.relation.id + + # get bucket name + bucket = event.bucket + + # S3 configuration parameters + desired_configuration = {"access-key": "your-access-key", "secret-key": + "your-secret-key", "bucket": "your-bucket"} + + # update the configuration + self.s3_provider.update_connection_info(relation_id, desired_configuration) + + # or it is possible to set each field independently + + self.s3_provider.set_secret_key(relation_id, "your-secret-key") + + +if __name__ == "__main__": + main(ExampleProviderCharm) + + +### Requirer charm + +The requirer charm is the charm requiring the S3 credentials. +An example of requirer charm is the following: + +Example: +```python + +from charms.data_platform_libs.v0.s3 import ( + CredentialsChangedEvent, + CredentialsGoneEvent, + S3Requirer +) + +class ExampleRequirerCharm(CharmBase): + + def __init__(self, *args): + super().__init__(*args) + + bucket_name = "test-bucket" + # if bucket name is not provided the bucket name will be generated + # e.g., ('relation-{relation.id}') + + self.s3_client = S3Requirer(self, "s3-credentials", bucket_name) + + self.framework.observe(self.s3_client.on.credentials_changed, self._on_credential_changed) + self.framework.observe(self.s3_client.on.credentials_gone, self._on_credential_gone) + + def _on_credential_changed(self, event: CredentialsChangedEvent): + + # access single parameter credential + secret_key = event.secret_key + access_key = event.access_key + + # or as alternative all credentials can be collected as a dictionary + credentials = self.s3_client.get_s3_credentials() + + def _on_credential_gone(self, event: CredentialsGoneEvent): + # credentials are removed + pass + + if __name__ == "__main__": + main(ExampleRequirerCharm) +``` + +""" +import json +import logging +from collections import namedtuple +from typing import Dict, List, Optional, Union + +import ops.charm +import ops.framework +import ops.model +from ops.charm import ( + CharmBase, + CharmEvents, + RelationBrokenEvent, + RelationChangedEvent, + RelationEvent, + RelationJoinedEvent, +) +from ops.framework import EventSource, Object, ObjectEvents +from ops.model import Application, Relation, RelationDataContent, Unit + +# The unique Charmhub library identifier, never change it +LIBID = "fca396f6254246c9bfa565b1f85ab528" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 5 + +logger = logging.getLogger(__name__) + +Diff = namedtuple("Diff", "added changed deleted") +Diff.__doc__ = """ +A tuple for storing the diff between two data mappings. + +added - keys that were added +changed - keys that still exist but have new values +deleted - key that were deleted""" + + +def diff(event: RelationChangedEvent, bucket: Union[Unit, Application]) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + bucket: bucket of the databag (app or unit) + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + # Retrieve the old data from the data key in the application relation databag. + old_data = json.loads(event.relation.data[bucket].get("data", "{}")) + # Retrieve the new data from the event relation databag. + new_data = ( + {key: value for key, value in event.relation.data[event.app].items() if key != "data"} + if event.app + else {} + ) + + # These are the keys that were added to the databag and triggered this event. + added = new_data.keys() - old_data.keys() + # These are the keys that were removed from the databag and triggered this event. + deleted = old_data.keys() - new_data.keys() + # These are the keys that already existed in the databag, + # but had their values changed. + changed = {key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key]} + + # TODO: evaluate the possibility of losing the diff if some error + # happens in the charm before the diff is completely checked (DPE-412). + # Convert the new_data to a serializable format and save it for a next diff check. + event.relation.data[bucket].update({"data": json.dumps(new_data)}) + + # Return the diff with all possible changes. + return Diff(added, changed, deleted) + + +class BucketEvent(RelationEvent): + """Base class for bucket events.""" + + @property + def bucket(self) -> Optional[str]: + """Returns the bucket was requested.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("bucket", "") + + +class CredentialRequestedEvent(BucketEvent): + """Event emitted when a set of credential is requested for use on this relation.""" + + +class S3CredentialEvents(CharmEvents): + """Event descriptor for events raised by S3Provider.""" + + credentials_requested = EventSource(CredentialRequestedEvent) + + +class S3Provider(Object): + """A provider handler for communicating S3 credentials to consumers.""" + + on = S3CredentialEvents() # pyright: ignore [reportAssignmentType] + + def __init__( + self, + charm: CharmBase, + relation_name: str, + ): + super().__init__(charm, relation_name) + self.charm = charm + self.local_app = self.charm.model.app + self.local_unit = self.charm.unit + self.relation_name = relation_name + + # monitor relation changed event for changes in the credentials + self.framework.observe(charm.on[relation_name].relation_changed, self._on_relation_changed) + + def _on_relation_changed(self, event: RelationChangedEvent) -> None: + """React to the relation changed event by consuming data.""" + if not self.charm.unit.is_leader(): + return + diff = self._diff(event) + # emit on credential requested if bucket is provided by the requirer application + if "bucket" in diff.added: + getattr(self.on, "credentials_requested").emit( + event.relation, app=event.app, unit=event.unit + ) + + def _load_relation_data(self, raw_relation_data: dict) -> dict: + """Loads relation data from the relation data bag. + + Args: + raw_relation_data: Relation data from the databag + Returns: + dict: Relation data in dict format. + """ + connection_data = {} + for key in raw_relation_data: + try: + connection_data[key] = json.loads(raw_relation_data[key]) + except (json.decoder.JSONDecodeError, TypeError): + connection_data[key] = raw_relation_data[key] + return connection_data + + # def _diff(self, event: RelationChangedEvent) -> Diff: + # """Retrieves the diff of the data in the relation changed databag. + + # Args: + # event: relation changed event. + + # Returns: + # a Diff instance containing the added, deleted and changed + # keys from the event relation databag. + # """ + # # Retrieve the old data from the data key in the application relation databag. + # old_data = json.loads(event.relation.data[self.local_app].get("data", "{}")) + # # Retrieve the new data from the event relation databag. + # new_data = { + # key: value for key, value in event.relation.data[event.app].items() if key != "data" + # } + + # # These are the keys that were added to the databag and triggered this event. + # added = new_data.keys() - old_data.keys() + # # These are the keys that were removed from the databag and triggered this event. + # deleted = old_data.keys() - new_data.keys() + # # These are the keys that already existed in the databag, + # # but had their values changed. + # changed = { + # key for key in old_data.keys() & new_data.keys() if old_data[key] != new_data[key] + # } + + # # TODO: evaluate the possibility of losing the diff if some error + # # happens in the charm before the diff is completely checked (DPE-412). + # # Convert the new_data to a serializable format and save it for a next diff check. + # event.relation.data[self.local_app].update({"data": json.dumps(new_data)}) + + # # Return the diff with all possible changes. + # return Diff(added, changed, deleted) + + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + return diff(event, self.local_app) + + def fetch_relation_data(self) -> dict: + """Retrieves data from relation. + + This function can be used to retrieve data from a relation + in the charm code when outside an event callback. + + Returns: + a dict of the values stored in the relation data bag + for all relation instances (indexed by the relation id). + """ + data = {} + for relation in self.relations: + data[relation.id] = ( + {key: value for key, value in relation.data[relation.app].items() if key != "data"} + if relation.app + else {} + ) + return data + + def update_connection_info(self, relation_id: int, connection_data: dict) -> None: + """Updates the credential data as set of key-value pairs in the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + connection_data: dict containing the key-value pairs + that should be updated. + """ + # check and write changes only if you are the leader + if not self.local_unit.is_leader(): + return + + relation = self.charm.model.get_relation(self.relation_name, relation_id) + + if not relation: + return + + # configuration options that are list + s3_list_options = ["attributes", "tls-ca-chain"] + + # update the databag, if connection data did not change with respect to before + # the relation changed event is not triggered + updated_connection_data = {} + for configuration_option, configuration_value in connection_data.items(): + if configuration_option in s3_list_options: + updated_connection_data[configuration_option] = json.dumps(configuration_value) + else: + updated_connection_data[configuration_option] = configuration_value + + relation.data[self.local_app].update(updated_connection_data) + logger.debug(f"Updated S3 connection info: {updated_connection_data}") + + @property + def relations(self) -> List[Relation]: + """The list of Relation instances associated with this relation_name.""" + return list(self.charm.model.relations[self.relation_name]) + + def set_bucket(self, relation_id: int, bucket: str) -> None: + """Sets bucket name in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + bucket: the bucket name. + """ + self.update_connection_info(relation_id, {"bucket": bucket}) + + def set_access_key(self, relation_id: int, access_key: str) -> None: + """Sets access-key value in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + access_key: the access-key value. + """ + self.update_connection_info(relation_id, {"access-key": access_key}) + + def set_secret_key(self, relation_id: int, secret_key: str) -> None: + """Sets the secret key value in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + secret_key: the value of the secret key. + """ + self.update_connection_info(relation_id, {"secret-key": secret_key}) + + def set_path(self, relation_id: int, path: str) -> None: + """Sets the path value in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + path: the path value. + """ + self.update_connection_info(relation_id, {"path": path}) + + def set_endpoint(self, relation_id: int, endpoint: str) -> None: + """Sets the endpoint address in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + endpoint: the endpoint address. + """ + self.update_connection_info(relation_id, {"endpoint": endpoint}) + + def set_region(self, relation_id: int, region: str) -> None: + """Sets the region location in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + region: the region address. + """ + self.update_connection_info(relation_id, {"region": region}) + + def set_s3_uri_style(self, relation_id: int, s3_uri_style: str) -> None: + """Sets the S3 URI style in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + s3_uri_style: the s3 URI style. + """ + self.update_connection_info(relation_id, {"s3-uri-style": s3_uri_style}) + + def set_storage_class(self, relation_id: int, storage_class: str) -> None: + """Sets the storage class in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + storage_class: the storage class. + """ + self.update_connection_info(relation_id, {"storage-class": storage_class}) + + def set_tls_ca_chain(self, relation_id: int, tls_ca_chain: List[str]) -> None: + """Sets the tls_ca_chain value in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + tls_ca_chain: the TLS Chain value. + """ + self.update_connection_info(relation_id, {"tls-ca-chain": tls_ca_chain}) + + def set_s3_api_version(self, relation_id: int, s3_api_version: str) -> None: + """Sets the S3 API version in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + s3_api_version: the S3 version value. + """ + self.update_connection_info(relation_id, {"s3-api-version": s3_api_version}) + + def set_delete_older_than_days(self, relation_id: int, days: int) -> None: + """Sets the retention days for full backups in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + days: the value. + """ + self.update_connection_info(relation_id, {"delete-older-than-days": str(days)}) + + def set_attributes(self, relation_id: int, attributes: List[str]) -> None: + """Sets the connection attributes in application databag. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + attributes: the attributes value. + """ + self.update_connection_info(relation_id, {"attributes": attributes}) + + +class S3Event(RelationEvent): + """Base class for S3 storage events.""" + + @property + def bucket(self) -> Optional[str]: + """Returns the bucket name.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("bucket") + + @property + def access_key(self) -> Optional[str]: + """Returns the access key.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("access-key") + + @property + def secret_key(self) -> Optional[str]: + """Returns the secret key.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("secret-key") + + @property + def path(self) -> Optional[str]: + """Returns the path where data can be stored.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("path") + + @property + def endpoint(self) -> Optional[str]: + """Returns the endpoint address.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("endpoint") + + @property + def region(self) -> Optional[str]: + """Returns the region.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("region") + + @property + def s3_uri_style(self) -> Optional[str]: + """Returns the s3 uri style.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("s3-uri-style") + + @property + def storage_class(self) -> Optional[str]: + """Returns the storage class name.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("storage-class") + + @property + def tls_ca_chain(self) -> Optional[List[str]]: + """Returns the TLS CA chain.""" + if not self.relation.app: + return None + + tls_ca_chain = self.relation.data[self.relation.app].get("tls-ca-chain") + if tls_ca_chain is not None: + return json.loads(tls_ca_chain) + return None + + @property + def s3_api_version(self) -> Optional[str]: + """Returns the S3 API version.""" + if not self.relation.app: + return None + + return self.relation.data[self.relation.app].get("s3-api-version") + + @property + def delete_older_than_days(self) -> Optional[int]: + """Returns the retention days for full backups.""" + if not self.relation.app: + return None + + days = self.relation.data[self.relation.app].get("delete-older-than-days") + if days is None: + return None + return int(days) + + @property + def attributes(self) -> Optional[List[str]]: + """Returns the attributes.""" + if not self.relation.app: + return None + + attributes = self.relation.data[self.relation.app].get("attributes") + if attributes is not None: + return json.loads(attributes) + return None + + +class CredentialsChangedEvent(S3Event): + """Event emitted when S3 credential are changed on this relation.""" + + +class CredentialsGoneEvent(RelationEvent): + """Event emitted when S3 credential are removed from this relation.""" + + +class S3CredentialRequiresEvents(ObjectEvents): + """Event descriptor for events raised by the S3Provider.""" + + credentials_changed = EventSource(CredentialsChangedEvent) + credentials_gone = EventSource(CredentialsGoneEvent) + + +S3_REQUIRED_OPTIONS = ["access-key", "secret-key"] + + +class S3Requirer(Object): + """Requires-side of the s3 relation.""" + + on = S3CredentialRequiresEvents() # pyright: ignore[reportAssignmentType] + + def __init__( + self, charm: ops.charm.CharmBase, relation_name: str, bucket_name: Optional[str] = None + ): + """Manager of the s3 client relations.""" + super().__init__(charm, relation_name) + + self.relation_name = relation_name + self.charm = charm + self.local_app = self.charm.model.app + self.local_unit = self.charm.unit + self.bucket = bucket_name + + self.framework.observe( + self.charm.on[self.relation_name].relation_changed, self._on_relation_changed + ) + + self.framework.observe( + self.charm.on[self.relation_name].relation_joined, self._on_relation_joined + ) + + self.framework.observe( + self.charm.on[self.relation_name].relation_broken, + self._on_relation_broken, + ) + + def _generate_bucket_name(self, event: RelationJoinedEvent): + """Returns the bucket name generated from relation id.""" + return f"relation-{event.relation.id}" + + def _on_relation_joined(self, event: RelationJoinedEvent) -> None: + """Event emitted when the application joins the s3 relation.""" + if self.bucket is None: + self.bucket = self._generate_bucket_name(event) + self.update_connection_info(event.relation.id, {"bucket": self.bucket}) + + def fetch_relation_data(self) -> dict: + """Retrieves data from relation. + + This function can be used to retrieve data from a relation + in the charm code when outside an event callback. + + Returns: + a dict of the values stored in the relation data bag + for all relation instances (indexed by the relation id). + """ + data = {} + + for relation in self.relations: + data[relation.id] = self._load_relation_data(relation.data[self.charm.app]) + return data + + def update_connection_info(self, relation_id: int, connection_data: dict) -> None: + """Updates the credential data as set of key-value pairs in the relation. + + This function writes in the application data bag, therefore, + only the leader unit can call it. + + Args: + relation_id: the identifier for a particular relation. + connection_data: dict containing the key-value pairs + that should be updated. + """ + # check and write changes only if you are the leader + if not self.local_unit.is_leader(): + return + + relation = self.charm.model.get_relation(self.relation_name, relation_id) + + if not relation: + return + + # update the databag, if connection data did not change with respect to before + # the relation changed event is not triggered + # configuration options that are list + s3_list_options = ["attributes", "tls-ca-chain"] + updated_connection_data = {} + for configuration_option, configuration_value in connection_data.items(): + if configuration_option in s3_list_options: + updated_connection_data[configuration_option] = json.dumps(configuration_value) + else: + updated_connection_data[configuration_option] = configuration_value + + relation.data[self.local_app].update(updated_connection_data) + logger.debug(f"Updated S3 credentials: {updated_connection_data}") + + def _load_relation_data(self, raw_relation_data: RelationDataContent) -> Dict[str, str]: + """Loads relation data from the relation data bag. + + Args: + raw_relation_data: Relation data from the databag + Returns: + dict: Relation data in dict format. + """ + connection_data = {} + for key in raw_relation_data: + try: + connection_data[key] = json.loads(raw_relation_data[key]) + except (json.decoder.JSONDecodeError, TypeError): + connection_data[key] = raw_relation_data[key] + return connection_data + + def _diff(self, event: RelationChangedEvent) -> Diff: + """Retrieves the diff of the data in the relation changed databag. + + Args: + event: relation changed event. + + Returns: + a Diff instance containing the added, deleted and changed + keys from the event relation databag. + """ + return diff(event, self.local_unit) + + def _on_relation_changed(self, event: RelationChangedEvent) -> None: + """Notify the charm about the presence of S3 credentials.""" + # check if the mandatory options are in the relation data + contains_required_options = True + # get current credentials data + credentials = self.get_s3_connection_info() + # records missing options + missing_options = [] + for configuration_option in S3_REQUIRED_OPTIONS: + if configuration_option not in credentials: + contains_required_options = False + missing_options.append(configuration_option) + # emit credential change event only if all mandatory fields are present + if contains_required_options: + getattr(self.on, "credentials_changed").emit( + event.relation, app=event.app, unit=event.unit + ) + else: + logger.warning( + f"Some mandatory fields: {missing_options} are not present, do not emit credential change event!" + ) + + def get_s3_connection_info(self) -> Dict[str, str]: + """Return the s3 credentials as a dictionary.""" + for relation in self.relations: + if relation and relation.app: + return self._load_relation_data(relation.data[relation.app]) + + return {} + + def _on_relation_broken(self, event: RelationBrokenEvent) -> None: + """Notify the charm about a broken S3 credential store relation.""" + getattr(self.on, "credentials_gone").emit(event.relation, app=event.app, unit=event.unit) + + @property + def relations(self) -> List[Relation]: + """The list of Relation instances associated with this relation_name.""" + return list(self.charm.model.relations[self.relation_name]) diff --git a/lib/charms/redis_k8s/v0/redis.py b/lib/charms/redis_k8s/v0/redis.py new file mode 100644 index 0000000..a731507 --- /dev/null +++ b/lib/charms/redis_k8s/v0/redis.py @@ -0,0 +1,135 @@ +"""Library for the redis relation. + +This library contains the Requires and Provides classes for handling the +redis interface. + +Import `RedisRequires` in your charm by adding the following to `src/charm.py`: +``` +from charms.redis_k8s.v0.redis import RedisRequires +``` +Define the following attributes in charm charm class for the library to be able to work with it +``` + on = RedisRelationCharmEvents() +``` +And then wherever you need to reference the relation data it will be available +in the property `relation_data`: +``` +redis_host = self.redis.relation_data.get("hostname") +redis_port = self.redis.relation_data.get("port") +``` +You will also need to add the following to `metadata.yaml`: +``` +requires: + redis: + interface: redis +``` +""" +import logging +import socket +from typing import Dict, Optional + +from ops.charm import CharmEvents +from ops.framework import EventBase, EventSource, Object + +# The unique Charmhub library identifier, never change it. +LIBID = "fe18a608cec5465fa5153e419abcad7b" + +# Increment this major API version when introducing breaking changes. +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version. +LIBPATCH = 6 + +logger = logging.getLogger(__name__) + +DEFAULT_REALTION_NAME = "redis" + +class RedisRelationUpdatedEvent(EventBase): + """An event for the redis relation having been updated.""" + + +class RedisRelationCharmEvents(CharmEvents): + """A class to carry custom charm events so requires can react to relation changes.""" + redis_relation_updated = EventSource(RedisRelationUpdatedEvent) + + +class RedisRequires(Object): + + def __init__(self, charm, relation_name: str = DEFAULT_REALTION_NAME): + """A class implementing the redis requires relation.""" + super().__init__(charm, relation_name) + self.framework.observe(charm.on[relation_name].relation_joined, self._on_relation_changed) + self.framework.observe(charm.on[relation_name].relation_changed, self._on_relation_changed) + self.framework.observe(charm.on[relation_name].relation_broken, self._on_relation_broken) + self.charm = charm + self.relation_name = relation_name + + def _on_relation_changed(self, event): + """Handle the relation changed event.""" + if not event.unit: + return + + # Trigger an event that our charm can react to. + self.charm.on.redis_relation_updated.emit() + + def _on_relation_broken(self, event): + """Handle the relation broken event.""" + # Trigger an event that our charm can react to. + self.charm.on.redis_relation_updated.emit() + + @property + def relation_data(self) -> Optional[Dict[str, str]]: + """Retrieve the relation data. + + Returns: + Dict: dict containing the relation data. + """ + relation = self.model.get_relation(self.relation_name) + if not relation or not relation.units: + return None + unit = next(iter(relation.units)) + return relation.data[unit] + + @property + def url(self) -> Optional[str]: + """Retrieve the Redis URL. + + Returns: + str: the Redis URL. + """ + relation_data = self.relation_data + if not relation_data: + return None + redis_host = relation_data.get("hostname") + redis_port = relation_data.get("port") + return f"redis://{redis_host}:{redis_port}" + + +class RedisProvides(Object): + def __init__(self, charm, port): + """A class implementing the redis provides relation.""" + super().__init__(charm, DEFAULT_REALTION_NAME) + self.framework.observe(charm.on.redis_relation_changed, self._on_relation_changed) + self._port = port + self._charm = charm + + def _on_relation_changed(self, event): + """Handle the relation changed event.""" + event.relation.data[self.model.unit]["hostname"] = self._get_master_ip() + event.relation.data[self.model.unit]["port"] = str(self._port) + # The reactive Redis charm also exposes 'password'. When tackling + # https://github.com/canonical/redis-k8s/issues/7 add 'password' + # field so that it matches the exposed interface information from it. + # event.relation.data[self.unit]['password'] = '' + + def _bind_address(self, event): + """Convenience function for getting the unit address.""" + relation = self.model.get_relation(event.relation.name, event.relation.id) + if address := self.model.get_binding(relation).network.bind_address: + return address + return self.app.name + + def _get_master_ip(self) -> str: + """Gets the ip of the current redis master.""" + return socket.gethostbyname(self._charm.current_master) diff --git a/lib/charms/smtp_integrator/v0/smtp.py b/lib/charms/smtp_integrator/v0/smtp.py new file mode 100644 index 0000000..6238a10 --- /dev/null +++ b/lib/charms/smtp_integrator/v0/smtp.py @@ -0,0 +1,355 @@ +# Copyright 2024 Canonical Ltd. +# Licensed under the Apache2.0. See LICENSE file in charm source for details. + +"""Library to manage the integration with the SMTP Integrator charm. + +This library contains the Requires and Provides classes for handling the integration +between an application and a charm providing the `smtp` and `smtp-legacy` integrations. +If the requirer charm supports secrets, the preferred approach is to use the `smtp` +relation to leverage them. +This library also contains a `SmtpRelationData` class to wrap the SMTP data that will +be shared via the integration. + +### Requirer Charm + +```python + +from charms.smtp_integrator.v0.smtp import SmtpDataAvailableEvent, SmtpRequires + +class SmtpRequirerCharm(ops.CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.smtp = smtp.SmtpRequires(self) + self.framework.observe(self.smtp.on.smtp_data_available, self._handler) + ... + + def _handler(self, events: SmtpDataAvailableEvent) -> None: + ... + +``` + +As shown above, the library provides a custom event to handle the scenario in +which new SMTP data has been added or updated. + +### Provider Charm + +Following the previous example, this is an example of the provider charm. + +```python +from charms.smtp_integrator.v0.smtp import SmtpProvides + +class SmtpProviderCharm(ops.CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.smtp = SmtpProvides(self) + ... + +``` +The SmtpProvides object wraps the list of relations into a `relations` property +and provides an `update_relation_data` method to update the relation data by passing +a `SmtpRelationData` data object. + +```python +class SmtpProviderCharm(ops.CharmBase): + ... + + def _on_config_changed(self, _) -> None: + for relation in self.model.relations[self.smtp.relation_name]: + self.smtp.update_relation_data(relation, self._get_smtp_data()) + +``` +""" + +# The unique Charmhub library identifier, never change it +LIBID = "09583c2f9c1d4c0f9a40244cfc20b0c2" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 9 + +PYDEPS = ["pydantic>=2"] + +# pylint: disable=wrong-import-position +import itertools +import logging +import typing +from enum import Enum +from typing import Dict, Optional + +import ops +from pydantic import BaseModel, Field, ValidationError + +logger = logging.getLogger(__name__) + +DEFAULT_RELATION_NAME = "smtp" +LEGACY_RELATION_NAME = "smtp-legacy" + + +class TransportSecurity(str, Enum): + """Represent the transport security values. + + Attributes: + NONE: none + STARTTLS: starttls + TLS: tls + """ + + NONE = "none" + STARTTLS = "starttls" + TLS = "tls" + + +class AuthType(str, Enum): + """Represent the auth type values. + + Attributes: + NONE: none + NOT_PROVIDED: not_provided + PLAIN: plain + """ + + NONE = "none" + NOT_PROVIDED = "not_provided" + PLAIN = "plain" + + +class SmtpRelationData(BaseModel): + """Represent the relation data. + + Attributes: + host: The hostname or IP address of the outgoing SMTP relay. + port: The port of the outgoing SMTP relay. + user: The SMTP AUTH user to use for the outgoing SMTP relay. + password: The SMTP AUTH password to use for the outgoing SMTP relay. + password_id: The secret ID where the SMTP AUTH password for the SMTP relay is stored. + auth_type: The type used to authenticate with the SMTP relay. + transport_security: The security protocol to use for the outgoing SMTP relay. + domain: The domain used by the sent emails from SMTP relay. + """ + + host: str = Field(..., min_length=1) + port: int = Field(None, ge=1, le=65536) + user: Optional[str] = None + password: Optional[str] = None + password_id: Optional[str] = None + auth_type: AuthType + transport_security: TransportSecurity + domain: Optional[str] = None + + def to_relation_data(self) -> Dict[str, str]: + """Convert an instance of SmtpRelationData to the relation representation. + + Returns: + Dict containing the representation. + """ + result = { + "host": str(self.host), + "port": str(self.port), + "auth_type": self.auth_type.value, + "transport_security": self.transport_security.value, + } + if self.domain: + result["domain"] = self.domain + if self.user: + result["user"] = self.user + if self.password: + result["password"] = self.password + if self.password_id: + result["password_id"] = self.password_id + return result + + +class SmtpDataAvailableEvent(ops.RelationEvent): + """Smtp event emitted when relation data has changed. + + Attributes: + host: The hostname or IP address of the outgoing SMTP relay. + port: The port of the outgoing SMTP relay. + user: The SMTP AUTH user to use for the outgoing SMTP relay. + password: The SMTP AUTH password to use for the outgoing SMTP relay. + password_id: The secret ID where the SMTP AUTH password for the SMTP relay is stored. + auth_type: The type used to authenticate with the SMTP relay. + transport_security: The security protocol to use for the outgoing SMTP relay. + domain: The domain used by the sent emails from SMTP relay. + """ + + @property + def host(self) -> str: + """Fetch the SMTP host from the relation.""" + assert self.relation.app + return typing.cast(str, self.relation.data[self.relation.app].get("host")) + + @property + def port(self) -> int: + """Fetch the SMTP port from the relation.""" + assert self.relation.app + return int(typing.cast(str, self.relation.data[self.relation.app].get("port"))) + + @property + def user(self) -> str: + """Fetch the SMTP user from the relation.""" + assert self.relation.app + return typing.cast(str, self.relation.data[self.relation.app].get("user")) + + @property + def password(self) -> str: + """Fetch the SMTP password from the relation.""" + assert self.relation.app + return typing.cast(str, self.relation.data[self.relation.app].get("password")) + + @property + def password_id(self) -> str: + """Fetch the SMTP password from the relation.""" + assert self.relation.app + return typing.cast(str, self.relation.data[self.relation.app].get("password_id")) + + @property + def auth_type(self) -> AuthType: + """Fetch the SMTP auth type from the relation.""" + assert self.relation.app + return AuthType(self.relation.data[self.relation.app].get("auth_type")) + + @property + def transport_security(self) -> TransportSecurity: + """Fetch the SMTP transport security protocol from the relation.""" + assert self.relation.app + return TransportSecurity(self.relation.data[self.relation.app].get("transport_security")) + + @property + def domain(self) -> str: + """Fetch the SMTP domain from the relation.""" + assert self.relation.app + return typing.cast(str, self.relation.data[self.relation.app].get("domain")) + + +class SmtpRequiresEvents(ops.CharmEvents): + """SMTP events. + + This class defines the events that a SMTP requirer can emit. + + Attributes: + smtp_data_available: the SmtpDataAvailableEvent. + """ + + smtp_data_available = ops.EventSource(SmtpDataAvailableEvent) + + +class SmtpRequires(ops.Object): + """Requirer side of the SMTP relation. + + Attributes: + on: events the provider can emit. + """ + + on = SmtpRequiresEvents() + + def __init__(self, charm: ops.CharmBase, relation_name: str = DEFAULT_RELATION_NAME) -> None: + """Construct. + + Args: + charm: the provider charm. + relation_name: the relation name. + """ + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.framework.observe(charm.on[relation_name].relation_changed, self._on_relation_changed) + + def get_relation_data(self) -> Optional[SmtpRelationData]: + """Retrieve the relation data. + + Returns: + SmtpRelationData: the relation data. + """ + relation = self.model.get_relation(self.relation_name) + return self._get_relation_data_from_relation(relation) if relation else None + + def _get_relation_data_from_relation(self, relation: ops.Relation) -> SmtpRelationData: + """Retrieve the relation data. + + Args: + relation: the relation to retrieve the data from. + + Returns: + SmtpRelationData: the relation data. + """ + assert relation.app + relation_data = relation.data[relation.app] + return SmtpRelationData( + host=typing.cast(str, relation_data.get("host")), + port=typing.cast(int, relation_data.get("port")), + user=relation_data.get("user"), + password=relation_data.get("password"), + password_id=relation_data.get("password_id"), + auth_type=AuthType(relation_data.get("auth_type")), + transport_security=TransportSecurity(relation_data.get("transport_security")), + domain=relation_data.get("domain"), + ) + + def _is_relation_data_valid(self, relation: ops.Relation) -> bool: + """Validate the relation data. + + Args: + relation: the relation to validate. + + Returns: + true: if the relation data is valid. + """ + try: + _ = self._get_relation_data_from_relation(relation) + return True + except ValidationError as ex: + error_fields = set( + itertools.chain.from_iterable(error["loc"] for error in ex.errors()) + ) + error_field_str = " ".join(f"{f}" for f in error_fields) + logger.warning("Error validation the relation data %s", error_field_str) + return False + + def _on_relation_changed(self, event: ops.RelationChangedEvent) -> None: + """Event emitted when the relation has changed. + + Args: + event: event triggering this handler. + """ + assert event.relation.app + relation_data = event.relation.data[event.relation.app] + if relation_data: + if relation_data["auth_type"] == AuthType.NONE.value: + logger.warning('Insecure setting: auth_type has a value "none"') + if relation_data["transport_security"] == TransportSecurity.NONE.value: + logger.warning('Insecure setting: transport_security has value "none"') + if self._is_relation_data_valid(event.relation): + self.on.smtp_data_available.emit(event.relation, app=event.app, unit=event.unit) + + +class SmtpProvides(ops.Object): + """Provider side of the SMTP relation.""" + + def __init__(self, charm: ops.CharmBase, relation_name: str = DEFAULT_RELATION_NAME) -> None: + """Construct. + + Args: + charm: the provider charm. + relation_name: the relation name. + """ + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + + def update_relation_data(self, relation: ops.Relation, smtp_data: SmtpRelationData) -> None: + """Update the relation data. + + Args: + relation: the relation for which to update the data. + smtp_data: a SmtpRelationData instance wrapping the data to be updated. + """ + relation_data = smtp_data.to_relation_data() + if relation_data["auth_type"] == AuthType.NONE.value: + logger.warning('Insecure setting: auth_type has a value "none"') + if relation_data["transport_security"] == TransportSecurity.NONE.value: + logger.warning('Insecure setting: transport_security has value "none"') + relation.data[self.charm.model.app].update(relation_data) diff --git a/lib/charms/traefik_k8s/v2/ingress.py b/lib/charms/traefik_k8s/v2/ingress.py new file mode 100644 index 0000000..407cfb5 --- /dev/null +++ b/lib/charms/traefik_k8s/v2/ingress.py @@ -0,0 +1,840 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +r"""# Interface Library for ingress. + +This library wraps relation endpoints using the `ingress` interface +and provides a Python API for both requesting and providing per-application +ingress, with load-balancing occurring across all units. + +## Getting Started + +To get started using the library, you just need to fetch the library using `charmcraft`. + +```shell +cd some-charm +charmcraft fetch-lib charms.traefik_k8s.v2.ingress +``` + +In the `metadata.yaml` of the charm, add the following: + +```yaml +requires: + ingress: + interface: ingress + limit: 1 +``` + +Then, to initialise the library: + +```python +from charms.traefik_k8s.v2.ingress import (IngressPerAppRequirer, + IngressPerAppReadyEvent, IngressPerAppRevokedEvent) + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + self.ingress = IngressPerAppRequirer(self, port=80) + # The following event is triggered when the ingress URL to be used + # by this deployment of the `SomeCharm` is ready (or changes). + self.framework.observe( + self.ingress.on.ready, self._on_ingress_ready + ) + self.framework.observe( + self.ingress.on.revoked, self._on_ingress_revoked + ) + + def _on_ingress_ready(self, event: IngressPerAppReadyEvent): + logger.info("This app's ingress URL: %s", event.url) + + def _on_ingress_revoked(self, event: IngressPerAppRevokedEvent): + logger.info("This app no longer has ingress") +""" +import ipaddress +import json +import logging +import socket +import typing +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, MutableMapping, Optional, Sequence, Tuple, Union + +import pydantic +from ops.charm import CharmBase, RelationBrokenEvent, RelationEvent +from ops.framework import EventSource, Object, ObjectEvents, StoredState +from ops.model import ModelError, Relation, Unit +from pydantic import AnyHttpUrl, BaseModel, Field, validator + +# The unique Charmhub library identifier, never change it +LIBID = "e6de2a5cd5b34422a204668f3b8f90d2" + +# Increment this major API version when introducing breaking changes +LIBAPI = 2 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 13 + +PYDEPS = ["pydantic"] + +DEFAULT_RELATION_NAME = "ingress" +RELATION_INTERFACE = "ingress" + +log = logging.getLogger(__name__) +BUILTIN_JUJU_KEYS = {"ingress-address", "private-address", "egress-subnets"} + +PYDANTIC_IS_V1 = int(pydantic.version.VERSION.split(".")[0]) < 2 +if PYDANTIC_IS_V1: + + class DatabagModel(BaseModel): # type: ignore + """Base databag model.""" + + class Config: + """Pydantic config.""" + + allow_population_by_field_name = True + """Allow instantiating this class by field name (instead of forcing alias).""" + + _NEST_UNDER = None + + @classmethod + def load(cls, databag: MutableMapping): + """Load this model from a Juju databag.""" + if cls._NEST_UNDER: + return cls.parse_obj(json.loads(databag[cls._NEST_UNDER])) + + try: + data = { + k: json.loads(v) + for k, v in databag.items() + # Don't attempt to parse model-external values + if k in {f.alias for f in cls.__fields__.values()} # type: ignore + } + except json.JSONDecodeError as e: + msg = f"invalid databag contents: expecting json. {databag}" + log.error(msg) + raise DataValidationError(msg) from e + + try: + return cls.parse_raw(json.dumps(data)) # type: ignore + except pydantic.ValidationError as e: + msg = f"failed to validate databag: {databag}" + log.debug(msg, exc_info=True) + raise DataValidationError(msg) from e + + def dump(self, databag: Optional[MutableMapping] = None, clear: bool = True): + """Write the contents of this model to Juju databag. + + :param databag: the databag to write the data to. + :param clear: ensure the databag is cleared before writing it. + """ + if clear and databag: + databag.clear() + + if databag is None: + databag = {} + + if self._NEST_UNDER: + databag[self._NEST_UNDER] = self.json(by_alias=True, exclude_defaults=True) + return databag + + for key, value in self.dict(by_alias=True, exclude_defaults=True).items(): # type: ignore + databag[key] = json.dumps(value) + + return databag + +else: + from pydantic import ConfigDict + + class DatabagModel(BaseModel): + """Base databag model.""" + + model_config = ConfigDict( + # tolerate additional keys in databag + extra="ignore", + # Allow instantiating this class by field name (instead of forcing alias). + populate_by_name=True, + # Custom config key: whether to nest the whole datastructure (as json) + # under a field or spread it out at the toplevel. + _NEST_UNDER=None, + ) # type: ignore + """Pydantic config.""" + + @classmethod + def load(cls, databag: MutableMapping): + """Load this model from a Juju databag.""" + nest_under = cls.model_config.get("_NEST_UNDER") + if nest_under: + return cls.model_validate(json.loads(databag[nest_under])) # type: ignore + + try: + data = { + k: json.loads(v) + for k, v in databag.items() + # Don't attempt to parse model-external values + if k in {(f.alias or n) for n, f in cls.__fields__.items()} # type: ignore + } + except json.JSONDecodeError as e: + msg = f"invalid databag contents: expecting json. {databag}" + log.error(msg) + raise DataValidationError(msg) from e + + try: + return cls.model_validate_json(json.dumps(data)) # type: ignore + except pydantic.ValidationError as e: + msg = f"failed to validate databag: {databag}" + log.debug(msg, exc_info=True) + raise DataValidationError(msg) from e + + def dump(self, databag: Optional[MutableMapping] = None, clear: bool = True): + """Write the contents of this model to Juju databag. + + :param databag: the databag to write the data to. + :param clear: ensure the databag is cleared before writing it. + """ + if clear and databag: + databag.clear() + + if databag is None: + databag = {} + nest_under = self.model_config.get("_NEST_UNDER") + if nest_under: + databag[nest_under] = self.model_dump_json( # type: ignore + by_alias=True, + # skip keys whose values are default + exclude_defaults=True, + ) + return databag + + dct = self.model_dump(mode="json", by_alias=True, exclude_defaults=True) # type: ignore + databag.update({k: json.dumps(v) for k, v in dct.items()}) + return databag + + +# todo: import these models from charm-relation-interfaces/ingress/v2 instead of redeclaring them +class IngressUrl(BaseModel): + """Ingress url schema.""" + + url: AnyHttpUrl + + +class IngressProviderAppData(DatabagModel): + """Ingress application databag schema.""" + + ingress: IngressUrl + + +class ProviderSchema(BaseModel): + """Provider schema for Ingress.""" + + app: IngressProviderAppData + + +class IngressRequirerAppData(DatabagModel): + """Ingress requirer application databag model.""" + + model: str = Field(description="The model the application is in.") + name: str = Field(description="the name of the app requesting ingress.") + port: int = Field(description="The port the app wishes to be exposed.") + + # fields on top of vanilla 'ingress' interface: + strip_prefix: Optional[bool] = Field( + default=False, + description="Whether to strip the prefix from the ingress url.", + alias="strip-prefix", + ) + redirect_https: Optional[bool] = Field( + default=False, + description="Whether to redirect http traffic to https.", + alias="redirect-https", + ) + + scheme: Optional[str] = Field( + default="http", description="What scheme to use in the generated ingress url" + ) + + @validator("scheme", pre=True) + def validate_scheme(cls, scheme): # noqa: N805 # pydantic wants 'cls' as first arg + """Validate scheme arg.""" + if scheme not in {"http", "https", "h2c"}: + raise ValueError("invalid scheme: should be one of `http|https|h2c`") + return scheme + + @validator("port", pre=True) + def validate_port(cls, port): # noqa: N805 # pydantic wants 'cls' as first arg + """Validate port.""" + assert isinstance(port, int), type(port) + assert 0 < port < 65535, "port out of TCP range" + return port + + +class IngressRequirerUnitData(DatabagModel): + """Ingress requirer unit databag model.""" + + host: str = Field(description="Hostname at which the unit is reachable.") + ip: Optional[str] = Field( + None, + description="IP at which the unit is reachable, " + "IP can only be None if the IP information can't be retrieved from juju.", + ) + + @validator("host", pre=True) + def validate_host(cls, host): # noqa: N805 # pydantic wants 'cls' as first arg + """Validate host.""" + assert isinstance(host, str), type(host) + return host + + @validator("ip", pre=True) + def validate_ip(cls, ip): # noqa: N805 # pydantic wants 'cls' as first arg + """Validate ip.""" + if ip is None: + return None + if not isinstance(ip, str): + raise TypeError(f"got ip of type {type(ip)} instead of expected str") + try: + ipaddress.IPv4Address(ip) + return ip + except ipaddress.AddressValueError: + pass + try: + ipaddress.IPv6Address(ip) + return ip + except ipaddress.AddressValueError: + raise ValueError(f"{ip!r} is not a valid ip address") + + +class RequirerSchema(BaseModel): + """Requirer schema for Ingress.""" + + app: IngressRequirerAppData + unit: IngressRequirerUnitData + + +class IngressError(RuntimeError): + """Base class for custom errors raised by this library.""" + + +class NotReadyError(IngressError): + """Raised when a relation is not ready.""" + + +class DataValidationError(IngressError): + """Raised when data validation fails on IPU relation data.""" + + +class _IngressPerAppBase(Object): + """Base class for IngressPerUnit interface classes.""" + + def __init__(self, charm: CharmBase, relation_name: str = DEFAULT_RELATION_NAME): + super().__init__(charm, relation_name) + + self.charm: CharmBase = charm + self.relation_name = relation_name + self.app = self.charm.app + self.unit = self.charm.unit + + observe = self.framework.observe + rel_events = charm.on[relation_name] + observe(rel_events.relation_created, self._handle_relation) + observe(rel_events.relation_joined, self._handle_relation) + observe(rel_events.relation_changed, self._handle_relation) + observe(rel_events.relation_departed, self._handle_relation) + observe(rel_events.relation_broken, self._handle_relation_broken) + observe(charm.on.leader_elected, self._handle_upgrade_or_leader) # type: ignore + observe(charm.on.upgrade_charm, self._handle_upgrade_or_leader) # type: ignore + + @property + def relations(self): + """The list of Relation instances associated with this endpoint.""" + return list(self.charm.model.relations[self.relation_name]) + + def _handle_relation(self, event): + """Subclasses should implement this method to handle a relation update.""" + pass + + def _handle_relation_broken(self, event): + """Subclasses should implement this method to handle a relation breaking.""" + pass + + def _handle_upgrade_or_leader(self, event): + """Subclasses should implement this method to handle upgrades or leadership change.""" + pass + + +class _IPAEvent(RelationEvent): + __args__: Tuple[str, ...] = () + __optional_kwargs__: Dict[str, Any] = {} + + @classmethod + def __attrs__(cls): + return cls.__args__ + tuple(cls.__optional_kwargs__.keys()) + + def __init__(self, handle, relation, *args, **kwargs): + super().__init__(handle, relation) + + if not len(self.__args__) == len(args): + raise TypeError("expected {} args, got {}".format(len(self.__args__), len(args))) + + for attr, obj in zip(self.__args__, args): + setattr(self, attr, obj) + for attr, default in self.__optional_kwargs__.items(): + obj = kwargs.get(attr, default) + setattr(self, attr, obj) + + def snapshot(self): + dct = super().snapshot() + for attr in self.__attrs__(): + obj = getattr(self, attr) + try: + dct[attr] = obj + except ValueError as e: + raise ValueError( + "cannot automagically serialize {}: " + "override this method and do it " + "manually.".format(obj) + ) from e + + return dct + + def restore(self, snapshot) -> None: + super().restore(snapshot) + for attr, obj in snapshot.items(): + setattr(self, attr, obj) + + +class IngressPerAppDataProvidedEvent(_IPAEvent): + """Event representing that ingress data has been provided for an app.""" + + __args__ = ("name", "model", "hosts", "strip_prefix", "redirect_https") + + if typing.TYPE_CHECKING: + name: Optional[str] = None + model: Optional[str] = None + # sequence of hostname, port dicts + hosts: Sequence["IngressRequirerUnitData"] = () + strip_prefix: bool = False + redirect_https: bool = False + + +class IngressPerAppDataRemovedEvent(RelationEvent): + """Event representing that ingress data has been removed for an app.""" + + +class IngressPerAppProviderEvents(ObjectEvents): + """Container for IPA Provider events.""" + + data_provided = EventSource(IngressPerAppDataProvidedEvent) + data_removed = EventSource(IngressPerAppDataRemovedEvent) + + +@dataclass +class IngressRequirerData: + """Data exposed by the ingress requirer to the provider.""" + + app: "IngressRequirerAppData" + units: List["IngressRequirerUnitData"] + + +class IngressPerAppProvider(_IngressPerAppBase): + """Implementation of the provider of ingress.""" + + on = IngressPerAppProviderEvents() # type: ignore + + def __init__( + self, + charm: CharmBase, + relation_name: str = DEFAULT_RELATION_NAME, + ): + """Constructor for IngressPerAppProvider. + + Args: + charm: The charm that is instantiating the instance. + relation_name: The name of the relation endpoint to bind to + (defaults to "ingress"). + """ + super().__init__(charm, relation_name) + + def _handle_relation(self, event): + # created, joined or changed: if remote side has sent the required data: + # notify listeners. + if self.is_ready(event.relation): + data = self.get_data(event.relation) + self.on.data_provided.emit( # type: ignore + event.relation, + data.app.name, + data.app.model, + [unit.dict() for unit in data.units], + data.app.strip_prefix or False, + data.app.redirect_https or False, + ) + + def _handle_relation_broken(self, event): + self.on.data_removed.emit(event.relation) # type: ignore + + def wipe_ingress_data(self, relation: Relation): + """Clear ingress data from relation.""" + assert self.unit.is_leader(), "only leaders can do this" + try: + relation.data + except ModelError as e: + log.warning( + "error {} accessing relation data for {!r}. " + "Probably a ghost of a dead relation is still " + "lingering around.".format(e, relation.name) + ) + return + del relation.data[self.app]["ingress"] + + def _get_requirer_units_data(self, relation: Relation) -> List["IngressRequirerUnitData"]: + """Fetch and validate the requirer's app databag.""" + out: List["IngressRequirerUnitData"] = [] + + unit: Unit + for unit in relation.units: + databag = relation.data[unit] + try: + data = IngressRequirerUnitData.load(databag) + out.append(data) + except pydantic.ValidationError: + log.info(f"failed to validate remote unit data for {unit}") + raise + return out + + @staticmethod + def _get_requirer_app_data(relation: Relation) -> "IngressRequirerAppData": + """Fetch and validate the requirer's app databag.""" + app = relation.app + if app is None: + raise NotReadyError(relation) + + databag = relation.data[app] + return IngressRequirerAppData.load(databag) + + def get_data(self, relation: Relation) -> IngressRequirerData: + """Fetch the remote (requirer) app and units' databags.""" + try: + return IngressRequirerData( + self._get_requirer_app_data(relation), self._get_requirer_units_data(relation) + ) + except (pydantic.ValidationError, DataValidationError) as e: + raise DataValidationError("failed to validate ingress requirer data") from e + + def is_ready(self, relation: Optional[Relation] = None): + """The Provider is ready if the requirer has sent valid data.""" + if not relation: + return any(map(self.is_ready, self.relations)) + + try: + self.get_data(relation) + except (DataValidationError, NotReadyError) as e: + log.debug("Provider not ready; validation error encountered: %s" % str(e)) + return False + return True + + def _published_url(self, relation: Relation) -> Optional["IngressProviderAppData"]: + """Fetch and validate this app databag; return the ingress url.""" + if not self.is_ready(relation) or not self.unit.is_leader(): + # Handle edge case where remote app name can be missing, e.g., + # relation_broken events. + # Also, only leader units can read own app databags. + # FIXME https://github.com/canonical/traefik-k8s-operator/issues/34 + return None + + # fetch the provider's app databag + databag = relation.data[self.app] + if not databag.get("ingress"): + raise NotReadyError("This application did not `publish_url` yet.") + + return IngressProviderAppData.load(databag) + + def publish_url(self, relation: Relation, url: str): + """Publish to the app databag the ingress url.""" + ingress_url = {"url": url} + IngressProviderAppData(ingress=ingress_url).dump(relation.data[self.app]) # type: ignore + + @property + def proxied_endpoints(self) -> Dict[str, Dict[str, str]]: + """Returns the ingress settings provided to applications by this IngressPerAppProvider. + + For example, when this IngressPerAppProvider has provided the + `http://foo.bar/my-model.my-app` URL to the my-app application, the returned dictionary + will be: + + ``` + { + "my-app": { + "url": "http://foo.bar/my-model.my-app" + } + } + ``` + """ + results: Dict[str, Dict[str, str]] = {} + + for ingress_relation in self.relations: + if not ingress_relation.app: + log.warning( + f"no app in relation {ingress_relation} when fetching proxied endpoints: skipping" + ) + continue + try: + ingress_data = self._published_url(ingress_relation) + except NotReadyError: + log.warning( + f"no published url found in {ingress_relation}: " + f"traefik didn't publish_url yet to this relation." + ) + continue + + if not ingress_data: + log.warning(f"relation {ingress_relation} not ready yet: try again in some time.") + continue + if PYDANTIC_IS_V1: + results[ingress_relation.app.name] = ingress_data.ingress.dict() + else: + results[ingress_relation.app.name] = ingress_data.ingress.model_dump(mode="json") + return results + + +class IngressPerAppReadyEvent(_IPAEvent): + """Event representing that ingress for an app is ready.""" + + __args__ = ("url",) + if typing.TYPE_CHECKING: + url: Optional[str] = None + + +class IngressPerAppRevokedEvent(RelationEvent): + """Event representing that ingress for an app has been revoked.""" + + +class IngressPerAppRequirerEvents(ObjectEvents): + """Container for IPA Requirer events.""" + + ready = EventSource(IngressPerAppReadyEvent) + revoked = EventSource(IngressPerAppRevokedEvent) + + +class IngressPerAppRequirer(_IngressPerAppBase): + """Implementation of the requirer of the ingress relation.""" + + on = IngressPerAppRequirerEvents() # type: ignore + + # used to prevent spurious urls to be sent out if the event we're currently + # handling is a relation-broken one. + _stored = StoredState() + + def __init__( + self, + charm: CharmBase, + relation_name: str = DEFAULT_RELATION_NAME, + *, + host: Optional[str] = None, + ip: Optional[str] = None, + port: Optional[int] = None, + strip_prefix: bool = False, + redirect_https: bool = False, + # fixme: this is horrible UX. + # shall we switch to manually calling provide_ingress_requirements with all args when ready? + scheme: Union[Callable[[], str], str] = lambda: "http", + ): + """Constructor for IngressRequirer. + + The request args can be used to specify the ingress properties when the + instance is created. If any are set, at least `port` is required, and + they will be sent to the ingress provider as soon as it is available. + All request args must be given as keyword args. + + Args: + charm: the charm that is instantiating the library. + relation_name: the name of the relation endpoint to bind to (defaults to `ingress`); + relation must be of interface type `ingress` and have "limit: 1") + host: Hostname to be used by the ingress provider to address the requiring + application; if unspecified, the default Kubernetes service name will be used. + ip: Alternative addressing method other than host to be used by the ingress provider; + if unspecified, binding address from juju network API will be used. + strip_prefix: configure Traefik to strip the path prefix. + redirect_https: redirect incoming requests to HTTPS. + scheme: callable returning the scheme to use when constructing the ingress url. + Or a string, if the scheme is known and stable at charm-init-time. + + Request Args: + port: the port of the service + """ + super().__init__(charm, relation_name) + self.charm: CharmBase = charm + self.relation_name = relation_name + self._strip_prefix = strip_prefix + self._redirect_https = redirect_https + self._get_scheme = scheme if callable(scheme) else lambda: scheme + + self._stored.set_default(current_url=None) # type: ignore + + # if instantiated with a port, and we are related, then + # we immediately publish our ingress data to speed up the process. + if port: + self._auto_data = host, ip, port + else: + self._auto_data = None + + def _handle_relation(self, event): + # created, joined or changed: if we have auto data: publish it + self._publish_auto_data() + if self.is_ready(): + # Avoid spurious events, emit only when there is a NEW URL available + new_url = ( + None + if isinstance(event, RelationBrokenEvent) + else self._get_url_from_relation_data() + ) + if self._stored.current_url != new_url: # type: ignore + self._stored.current_url = new_url # type: ignore + self.on.ready.emit(event.relation, new_url) # type: ignore + + def _handle_relation_broken(self, event): + self._stored.current_url = None # type: ignore + self.on.revoked.emit(event.relation) # type: ignore + + def _handle_upgrade_or_leader(self, event): + """On upgrade/leadership change: ensure we publish the data we have.""" + self._publish_auto_data() + + def is_ready(self): + """The Requirer is ready if the Provider has sent valid data.""" + try: + return bool(self._get_url_from_relation_data()) + except DataValidationError as e: + log.debug("Requirer not ready; validation error encountered: %s" % str(e)) + return False + + def _publish_auto_data(self): + if self._auto_data: + host, ip, port = self._auto_data + self.provide_ingress_requirements(host=host, ip=ip, port=port) + + def provide_ingress_requirements( + self, + *, + scheme: Optional[str] = None, + host: Optional[str] = None, + ip: Optional[str] = None, + port: int, + ): + """Publishes the data that Traefik needs to provide ingress. + + Args: + scheme: Scheme to be used; if unspecified, use the one used by __init__. + host: Hostname to be used by the ingress provider to address the + requirer unit; if unspecified, FQDN will be used instead + ip: Alternative addressing method other than host to be used by the ingress provider. + if unspecified, binding address from juju network API will be used. + port: the port of the service (required) + """ + for relation in self.relations: + self._provide_ingress_requirements(scheme, host, ip, port, relation) + + def _provide_ingress_requirements( + self, + scheme: Optional[str], + host: Optional[str], + ip: Optional[str], + port: int, + relation: Relation, + ): + if self.unit.is_leader(): + self._publish_app_data(scheme, port, relation) + + self._publish_unit_data(host, ip, relation) + + def _publish_unit_data( + self, + host: Optional[str], + ip: Optional[str], + relation: Relation, + ): + if not host: + host = socket.getfqdn() + + if ip is None: + network_binding = self.charm.model.get_binding(relation) + if ( + network_binding is not None + and (bind_address := network_binding.network.bind_address) is not None + ): + ip = str(bind_address) + else: + log.error("failed to retrieve ip information from juju") + + unit_databag = relation.data[self.unit] + try: + IngressRequirerUnitData(host=host, ip=ip).dump(unit_databag) + except pydantic.ValidationError as e: + msg = "failed to validate unit data" + log.info(msg, exc_info=True) # log to INFO because this might be expected + raise DataValidationError(msg) from e + + def _publish_app_data( + self, + scheme: Optional[str], + port: int, + relation: Relation, + ): + # assumes leadership! + app_databag = relation.data[self.app] + + if not scheme: + # If scheme was not provided, use the one given to the constructor. + scheme = self._get_scheme() + + try: + IngressRequirerAppData( # type: ignore # pyright does not like aliases + model=self.model.name, + name=self.app.name, + scheme=scheme, + port=port, + strip_prefix=self._strip_prefix, # type: ignore # pyright does not like aliases + redirect_https=self._redirect_https, # type: ignore # pyright does not like aliases + ).dump(app_databag) + except pydantic.ValidationError as e: + msg = "failed to validate app data" + log.info(msg, exc_info=True) # log to INFO because this might be expected + raise DataValidationError(msg) from e + + @property + def relation(self): + """The established Relation instance, or None.""" + return self.relations[0] if self.relations else None + + def _get_url_from_relation_data(self) -> Optional[str]: + """The full ingress URL to reach the current unit. + + Returns None if the URL isn't available yet. + """ + relation = self.relation + if not relation or not relation.app: + return None + + # fetch the provider's app databag + try: + databag = relation.data[relation.app] + except ModelError as e: + log.debug( + f"Error {e} attempting to read remote app data; " + f"probably we are in a relation_departed hook" + ) + return None + + if not databag: # not ready yet + return None + + return str(IngressProviderAppData.load(databag).ingress.url) + + @property + def url(self) -> Optional[str]: + """The full ingress URL to reach the current unit. + + Returns None if the URL isn't available yet. + """ + data = ( + typing.cast(Optional[str], self._stored.current_url) # type: ignore + or self._get_url_from_relation_data() + ) + return data diff --git a/penpot_rock/rockcraft.yaml b/penpot_rock/rockcraft.yaml new file mode 100644 index 0000000..db7981c --- /dev/null +++ b/penpot_rock/rockcraft.yaml @@ -0,0 +1,230 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +name: penpot +summary: Web-based open-source design tool. +description: Penpot is the web-based open-source design tool that bridges the gap between designers and developers. +version: &penpot-version 2.1.1 +license: MPL-2.0 + +base: ubuntu@24.04 +build-base: ubuntu@24.04 +platforms: + amd64: + +run-user: _daemon_ + +parts: + frontend: + plugin: nil + source-type: git + source: https://github.com/penpot/penpot.git + source-tag: *penpot-version + stage-packages: + - nginx-full + - gettext-base + build-packages: + - openjdk-21-jdk + - rlwrap + - npm + - rsync + build-environment: + - VERSION: *penpot-version + override-build: | + craftctl default + + # install clojure + curl -L https://github.com/clojure/brew-install/releases/download/1.11.3.1463/linux-install.sh -o install-clojure + echo "0c41063a2fefb53a31bc1bc236899955f759c5103dc0495489cdd74bf8f114bb install-clojure" | shasum -c + chmod +x install-clojure + ./install-clojure + + cd frontend + + # install yarn + npm install --global corepack + corepack enable + corepack prepare yarn@4.2.2 + + # build assets + export NODE_ENV=production + yarn install + clojure -M:dev:shadow-cljs release main --config-merge "{:release-version \"$(git rev-parse --short HEAD)-$(date +%s)\"}" + yarn run compile + sed -i -re "s/\%version\%/${VERSION}/g" ./resources/public/index.html + sed -i -re "s/\%buildDate\%/$(date -R);/g" ./resources/public/index.html + echo ${VERSION} > ./resources/public/version.txt + + mkdir -p $CRAFT_PART_INSTALL/var/www/ + mkdir -p $CRAFT_PART_INSTALL/etc/nginx/ + + cd .. + cp -r ./frontend/resources/public $CRAFT_PART_INSTALL/var/www/app + cp ./docker/images/files/nginx-mime.types $CRAFT_PART_INSTALL/etc/nginx/mime.types + cp ./docker/images/files/config.js $CRAFT_PART_INSTALL/var/www/app/js/config.js + cp ./docker/images/files/nginx.conf $CRAFT_PART_INSTALL/etc/nginx/nginx.conf.template + mkdir -p $CRAFT_PART_INSTALL/opt/penpot/frontend + cp ./docker/images/files/nginx-entrypoint.sh $CRAFT_PART_INSTALL/opt/penpot/frontend/nginx-entrypoint.sh + chmod +x $CRAFT_PART_INSTALL/opt/penpot/frontend/nginx-entrypoint.sh + + # nginx runs as non-root user + sed -i "s/listen 80 default_server;/listen 8080 default_server;/g" $CRAFT_PART_INSTALL/etc/nginx/nginx.conf.template + sed -i "s/pid \\/run\\/nginx.pid;/pid \\/opt\\/penpot\\/frontend\\/nginx.pid;/g" $CRAFT_PART_INSTALL/etc/nginx/nginx.conf.template + sed -i "s/user www-data;/user _daemon_;/g" $CRAFT_PART_INSTALL/etc/nginx/nginx.conf.template + + override-stage: | + chown -R 584792:584792 $CRAFT_PART_INSTALL/etc/nginx/ + chown -R 584792:584792 $CRAFT_PART_INSTALL/var/www/app/ + chown -R 584792:584792 $CRAFT_PART_INSTALL/var/lib/nginx/ + chown -R 584792:584792 $CRAFT_PART_INSTALL/opt/penpot/frontend/ + + craftctl default + + backend: + plugin: nil + source-type: git + source: https://github.com/penpot/penpot.git + source-tag: *penpot-version + build-packages: + - openjdk-21-jdk + - rlwrap + - npm + - rsync + stage-packages: + - curl + - openjdk-21-jdk + - imagemagick + - webp + - rlwrap + - fontconfig + - woff-tools + - woff2 + - python3 + - python3-tabulate + - fontforge + build-environment: + - VERSION: *penpot-version + override-build: | + craftctl default + + # install clojure + curl -L https://github.com/clojure/brew-install/releases/download/1.11.3.1463/linux-install.sh -o install-clojure + echo "0c41063a2fefb53a31bc1bc236899955f759c5103dc0495489cdd74bf8f114bb install-clojure" | shasum -c + chmod +x install-clojure + ./install-clojure + + # install babashka + curl -L https://raw.githubusercontent.com/babashka/babashka/v1.3.191/install -o install-babashka + echo "b1fa184c87f5115251cc38bcc999221c23b458df608cfeb6395a427185eb708c install-babashka" | shasum -c + chmod +x install-babashka + ./install-babashka + + cd backend + + mkdir -p target/classes + mkdir -p target/dist + echo $VERSION > target/classes/version.txt + cp ../CHANGES.md target/classes/changelog.md + + clojure -T:build jar + mv target/penpot.jar target/dist/penpot.jar + cp resources/log4j2.xml target/dist/log4j2.xml + cp scripts/run.template.sh target/dist/run.sh + cp scripts/manage.py target/dist/manage.py + chmod +x target/dist/run.sh + chmod +x target/dist/manage.py + + # Prefetch templates + mkdir builtin-templates + + bb ./scripts/prefetch-templates.clj resources/app/onboarding.edn builtin-templates/ + cp -r builtin-templates target/dist/ + + mkdir -p $CRAFT_PART_INSTALL/opt/penpot/ + cp -r target/dist/ $CRAFT_PART_INSTALL/opt/penpot/backend/ + + override-stage: | + chown -R 584792:584792 $CRAFT_PART_INSTALL/opt/penpot/backend/ + rm -rf $CRAFT_PART_INSTALL/dev + cd $CRAFT_PART_INSTALL/usr/bin + for bin in $(ls *-im6.q16) + do ln -s ./$bin $(basename $bin -im6.q16) + done + craftctl default + + exporter: + plugin: nil + source-type: git + source: https://github.com/penpot/penpot.git + source-tag: *penpot-version + build-packages: + - openjdk-21-jdk + - rlwrap + - npm + - rsync + stage-packages: + - nodejs + - libasound2t64 + - libatk-bridge2.0-0t64 + - libatk1.0-0t64 + - libatspi2.0-0t64 + - libcairo2 + - libcups2t64 + - libdbus-1-3 + - libdrm2 + - libgbm1 + - libglib2.0-0t64 + - libnspr4 + - libnss3 + - libpango-1.0-0 + - libx11-6 + - libxcb1 + - libxcomposite1 + - libxdamage1 + - libxext6 + - libxfixes3 + - libxkbcommon0 + - libxrandr2 + build-environment: + - VERSION: *penpot-version + override-build: | + craftctl default + + # install clojure + curl -L https://github.com/clojure/brew-install/releases/download/1.11.3.1463/linux-install.sh -o install-clojure + echo "0c41063a2fefb53a31bc1bc236899955f759c5103dc0495489cdd74bf8f114bb install-clojure" | shasum -c + chmod +x install-clojure + ./install-clojure + + cd exporter + + # install yarn + npm install --global corepack + corepack enable + corepack prepare yarn@4.2.2 + + export NODE_ENV=production + yarn install + clojure -J-Xms100M -J-Xmx1000M -J-XX:+UseSerialGC -M:dev:shadow-cljs release main + mkdir -p target + rm -rf target/app + cp ../.yarnrc.yml target/ + cp yarn.lock target/ + cp package.json target/ + sed -i -re "s/\%version\%/$VERSION/g" ./target/app.js + + mkdir -p $CRAFT_PART_INSTALL/opt/penpot/ + cp -r target/ $CRAFT_PART_INSTALL/opt/penpot/exporter + + cd $CRAFT_PART_INSTALL/opt/penpot/exporter + yarn install + + mkdir -p $CRAFT_PART_INSTALL/opt/penpot/exporter/browsers + + export PLAYWRIGHT_BROWSERS_PATH=$CRAFT_PART_INSTALL/opt/penpot/exporter/browsers + yarn run playwright install chromium + + override-stage: | + chown -R 584792:584792 $CRAFT_PART_INSTALL/opt/penpot/exporter/ + + craftctl default diff --git a/pyproject.toml b/pyproject.toml index 0fce3f3..8264620 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,7 +26,7 @@ select = ["E", "W", "F", "C", "N", "R", "D", "H"] # Ignore D107 Missing docstring in __init__ ignore = ["W503", "E501", "D107"] # D100, D101, D102, D103: Ignore missing docstrings in tests -per-file-ignores = ["tests/*:D100,D101,D102,D103,D104,D205,D212,D415"] +per-file-ignores = ["src/charm.py:DCO060", "tests/*:D100,D101,D102,D103,D104,D205,D212,D415"] docstring-convention = "google" [tool.isort] @@ -76,4 +76,5 @@ per-file-ignores = {"tests/*" = ["D100","D101","D102","D103","D104"]} max-complexity = 10 [tool.codespell] +ignore-words-list = "edn" skip = "build,lib,venv,icon.svg,.tox,.git,.mypy_cache,.ruff_cache,.coverage" diff --git a/requirements.txt b/requirements.txt index aaa16b1..d136cbf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1,2 @@ -ops >= 2.2.0 +ops==2.15.0 +dnspython==2.6.1 diff --git a/src/charm.py b/src/charm.py index 6b2c8b4..b013018 100755 --- a/src/charm.py +++ b/src/charm.py @@ -5,28 +5,28 @@ # Learn more at: https://juju.is/docs/sdk -"""Charm the service. - -Refer to the following post for a quick-start guide that will help you -develop a new k8s charm using the Operator Framework: - -https://discourse.charmhub.io/t/4208 -""" +"""Penpot charm service.""" import logging +import secrets import typing +import dns.resolver import ops +from charms.data_platform_libs.v0.data_interfaces import DatabaseRequires +from charms.data_platform_libs.v0.s3 import S3Requirer +from charms.redis_k8s.v0.redis import RedisRelationCharmEvents, RedisRequires +from charms.smtp_integrator.v0.smtp import SmtpRequires, TransportSecurity +from charms.traefik_k8s.v2.ingress import IngressPerAppRequirer -# Log messages can be retrieved using juju debug-log logger = logging.getLogger(__name__) -VALID_LOG_LEVELS = ["info", "debug", "warning", "error", "critical"] - class PenpotCharm(ops.CharmBase): """Charm the service.""" + on = RedisRelationCharmEvents() + def __init__(self, *args: typing.Any): """Construct. @@ -34,20 +34,399 @@ def __init__(self, *args: typing.Any): args: Arguments passed to the CharmBase parent constructor. """ super().__init__(*args) - self.framework.observe(self.on.config_changed, self._on_config_changed) + self.container = self.unit.get_container("penpot") + self.postgresql = DatabaseRequires( + self, relation_name="postgresql", database_name=self.app.name + ) + self.redis = RedisRequires(self, "redis") + self.smtp = SmtpRequires(self) + self.s3 = S3Requirer(self, relation_name="s3") + self.ingress = IngressPerAppRequirer(self, port=8080) + self.framework.observe(self.on.config_changed, self._reconcile) + self.framework.observe(self.on.penpot_peer_relation_created, self._reconcile) + self.framework.observe(self.on.penpot_peer_relation_changed, self._reconcile) + self.framework.observe(self.on.penpot_peer_relation_departed, self._reconcile) + self.framework.observe(self.on.secret_changed, self._reconcile) + self.framework.observe(self.postgresql.on.database_created, self._reconcile) + self.framework.observe(self.postgresql.on.endpoints_changed, self._reconcile) + self.framework.observe(self.on.postgresql_relation_broken, self._reconcile) + self.framework.observe(self.redis.charm.on.redis_relation_updated, self._reconcile) + self.framework.observe(self.on.redis_relation_broken, self._reconcile) + self.framework.observe(self.s3.on.credentials_changed, self._reconcile) + self.framework.observe(self.s3.on.credentials_gone, self._reconcile) + self.framework.observe(self.smtp.on.smtp_data_available, self._reconcile) + self.framework.observe(self.on.smtp_relation_broken, self._reconcile) + self.framework.observe(self.ingress.on.ready, self._reconcile) + self.framework.observe(self.ingress.on.revoked, self._reconcile) + self.framework.observe(self.on.penpot_pebble_ready, self._reconcile) + self.framework.observe(self.on.create_profile_action, self._on_create_profile_action) + self.framework.observe(self.on.delete_profile_action, self._on_delete_profile_action) + + def _on_create_profile_action(self, event: ops.ActionEvent) -> None: + """Handle create-profile action. + + Args: + event: Action event. + """ + if ( + not self.container.can_connect() + or "backend" not in self.container.get_plan().services + or not self.container.get_service("backend").is_running() + ): + event.fail("penpot is not ready") + return + email = event.params["email"] + fullname = event.params["fullname"] + password = secrets.token_urlsafe(10) + process = self.container.exec( + ["python3", "manage.py", "create-profile", "--email", email, "--fullname", fullname], + service_context="backend", + stdin=password + "\n", + combine_stderr=True, + ) + try: + process.wait() + except ops.pebble.ExecError as exc: + event.fail(typing.cast(str, exc.stdout)) + return + event.set_results({"email": email, "fullname": fullname, "password": password}) + + def _on_delete_profile_action(self, event: ops.ActionEvent) -> None: + """Handle delete-profile action. - def _on_config_changed(self, _: ops.ConfigChangedEvent) -> None: - """Handle changed configuration.""" - # Fetch the new config value - log_level = str(self.model.config["log-level"]).lower() + Args: + event: Action event. + """ + if ( + not self.container.can_connect() + or "backend" not in self.container.get_plan().services + or not self.container.get_service("backend").is_running() + ): + event.fail("penpot is not ready") + return + email = event.params["email"] + process = self.container.exec( + ["python3", "manage.py", "delete-profile", "--email", email], + service_context="backend", + combine_stderr=True, + ) + try: + process.wait() + except ops.pebble.ExecError as exc: + event.fail(typing.cast(str, exc.stdout)) + return + event.set_results({"email": email}) - # Do some validation of the configuration option - if log_level in VALID_LOG_LEVELS: - # The config is good, so update the configuration of the workload - self.unit.status = ops.ActiveStatus() + def _reconcile(self, _: ops.EventBase) -> None: + """Reconcile penpot services.""" + if not self._check_ready(): + if self.container.can_connect() and self.container.get_services(): + self.container.stop("backend") + self.container.stop("frontend") + self.container.stop("exporter") + return + self.container.add_layer("penpot", self._gen_pebble_plan(), combine=True) + self.container.replan() + self.container.start("backend") + self.container.start("frontend") + if self.unit.name == self._get_penpot_exporter_unit(): + self.container.start("exporter") else: - # In this case, the config option is bad, so block the charm and notify the operator. - self.unit.status = ops.BlockedStatus("invalid log level: '{log_level}'") + self.container.stop("exporter") + self.unit.status = ops.ActiveStatus() + + def _gen_pebble_plan(self) -> ops.pebble.LayerDict: + """Generate penpot pebble plan. + + Returns: + Penpot pebble plan. + """ + plan = ops.pebble.LayerDict( + summary="penpot services", + description="penpot services", + services={ + "frontend": { + "command": './nginx-entrypoint.sh nginx -g "daemon off;"', + "working-dir": "/opt/penpot/frontend/", + "override": "replace", + "after": ["backend"], + "environment": { + "PENPOT_BACKEND_URI": "http://127.0.0.1:6060", + "PENPOT_EXPORTER_URI": self._get_penpot_exporter_uri(), + "PENPOT_INTERNAL_RESOLVER": self._get_local_resolver(), + "PENPOT_FLAGS": " ".join(self._get_penpot_frontend_options()), + }, + }, + "backend": { + "command": "/opt/penpot/backend/run.sh", + "override": "replace", + "working-dir": "/opt/penpot/backend/", + "environment": { + "JAVA_HOME": "/usr/lib/jvm/java-21-openjdk-amd64", + "PENPOT_TELEMETRY_ENABLED": "false", + "PENPOT_PUBLIC_URI": typing.cast(str, self._get_public_uri()), + "PENPOT_FLAGS": " ".join(self._get_penpot_backend_options()), + **typing.cast(dict[str, str], self._get_penpot_secret_key()), + **typing.cast(dict[str, str], self._get_postgresql_credentials()), + **typing.cast(dict[str, str], self._get_redis_credentials()), + **typing.cast(dict[str, str], self._get_smtp_credentials()), + **typing.cast(dict[str, str], self._get_s3_credentials()), + }, + }, + "exporter": { + "command": "node app.js", + "working-dir": "/opt/penpot/exporter/", + "override": "replace", + "after": ["backend", "frontend"], + "environment": { + "PENPOT_PUBLIC_URI": "http://127.0.0.1:8080", + "PLAYWRIGHT_BROWSERS_PATH": "/opt/penpot/exporter/browsers", + **typing.cast(dict[str, str], self._get_redis_credentials()), + }, + }, + }, + checks={ + "backend-ready": { + "override": "replace", + "level": "alive", + "period": "30s", + "exec": { + # pylint: disable=line-too-long + "command": 'bash -c "pebble services backend | grep -q inactive || curl -f -m 5 localhost:6060/readyz"' # noqa: E501 + }, + } + }, + ) + return plan + + def _check_ready(self) -> bool: + """Check if penpot is ready to start. + + Returns: + True if penpot is ready to start. + """ + requirements = { + "peer integration": self._get_penpot_secret_key(), + "postgresql": self._get_postgresql_credentials(), + "redis": self._get_redis_credentials(), + "s3": self._get_s3_credentials(), + "ingress": self._get_public_uri(), + "penpot container": self.container.can_connect(), + } + unfulfilled = sorted([k for k, v in requirements.items() if not v]) + if unfulfilled: + self.unit.status = ops.BlockedStatus(f"waiting for {', '.join(unfulfilled)}") + return False + return True + + def _get_penpot_secret_key(self) -> dict[str, str] | None: + """Retrieve or generate a Penpot secret key. + + Checks if the Penpot secret key already exists within the peer relation. + If it does not exist, a new secret key is generated and stored in the peer relation. + This key is then returned. + + Returns: + Penpot secret key. + """ + peer_relation = self.model.get_relation("penpot_peer") + if peer_relation is None: + return None + secret_id = peer_relation.data[self.app].get("secrets") + if secret_id is None: + if self.unit.is_leader(): + new_secret = {"penpot-secret-key": secrets.token_urlsafe(64)} + secret = self.app.add_secret(new_secret) + secret.set_content(new_secret) + peer_relation.data[self.app]["secrets"] = typing.cast(str, secret.id) + return {k.replace("-", "_").upper(): v for k, v in new_secret.items()} + return None + secret = self.model.get_secret(id=secret_id) + return { + k.replace("-", "_").upper(): v for k, v in secret.get_content(refresh=True).items() + } + + def _get_postgresql_credentials(self) -> dict[str, str] | None: + """Get penpot postgresql credentials from the postgresql integration. + + Returns: + Penpot postgresql credentials. + """ + relation = self.model.get_relation("postgresql") + if not relation or not relation.app: + return None + endpoint = self.postgresql.fetch_relation_field(relation.id, "endpoints") + database = self.postgresql.fetch_relation_field(relation.id, "database") + username = self.postgresql.fetch_relation_field(relation.id, "username") + password = self.postgresql.fetch_relation_field(relation.id, "password") + if not all((endpoint, database, username, password)): + return None + return { + "PENPOT_DATABASE_URI": f"postgresql://{endpoint}/{database}", + "PENPOT_DATABASE_USERNAME": username, + "PENPOT_DATABASE_PASSWORD": password, + } + + def _get_redis_credentials(self) -> dict[str, str] | None: + """Get penpot redis credentials from the redis integration. + + Returns: + Penpot redis credentials. + """ + relation = self.model.get_relation("redis") + if not relation or not relation.app: + return None + relation_data = self.redis.relation_data + if not relation_data: + return None + return {"PENPOT_REDIS_URI": self.redis.url} + + def _get_smtp_credentials(self) -> dict[str, str]: + """Get penpot smtp credentials from the smtp integration. + + Returns: + Penpot smtp credentials. + """ + relation = self.model.get_relation("smtp") + if not relation or not relation.app: + return {} + smtp_data = self.smtp.get_relation_data() + if not smtp_data: + return {} + from_address = f"{smtp_data.user or 'no-reply'}@{smtp_data.domain}" + config_from_address = self.config.get("smtp-from-address") + if config_from_address: + from_address = typing.cast(str, config_from_address) + smtp_credentials = { + "PENPOT_SMTP_DEFAULT_FROM": from_address, + "PENPOT_SMTP_DEFAULT_REPLY_TO": from_address, + "PENPOT_SMTP_HOST": smtp_data.host, + "PENPOT_SMTP_PORT": str(smtp_data.port), + "PENPOT_SMTP_TLS": "false", + "PENPOT_SMTP_SSL": "false", + } + if smtp_data.user: + smtp_credentials["PENPOT_SMTP_USERNAME"] = smtp_data.user + if smtp_data.password: + smtp_credentials["PENPOT_SMTP_PASSWORD"] = smtp_data.password + if smtp_data.password_id: + password_secret = self.model.get_secret(id=smtp_data.password_id) + password_secret_content = password_secret.get_content(refresh=True) + smtp_credentials["PENPOT_SMTP_PASSWORD"] = password_secret_content["password"] + if smtp_data.transport_security == TransportSecurity.TLS: + smtp_credentials["PENPOT_SMTP_TLS"] = "true" + if smtp_data.transport_security == TransportSecurity.STARTTLS: + smtp_credentials["PENPOT_SMTP_SSL"] = "true" + return smtp_credentials + + def _get_s3_credentials(self) -> dict[str, str] | None: + """Get penpot s3 credentials from the s3 integration. + + Returns: + Penpot s3 credentials. + """ + relation = self.model.get_relation("s3") + if not relation or not relation.app: + return None + s3_data = self.s3.get_s3_connection_info() + if not s3_data or "access-key" not in s3_data: + return None + return { + "AWS_ACCESS_KEY_ID": s3_data["access-key"], + "AWS_SECRET_ACCESS_KEY": s3_data["secret-key"], + "PENPOT_ASSETS_STORAGE_BACKEND": "assets-s3", + "PENPOT_STORAGE_ASSETS_S3_REGION": s3_data.get("region", "us-east-1"), + "PENPOT_STORAGE_ASSETS_S3_BUCKET": s3_data["bucket"], + "PENPOT_STORAGE_ASSETS_S3_ENDPOINT": s3_data["endpoint"], + } + + def _get_public_uri(self) -> str | None: + """Get penpot public URI. + + Returns: + Penpot public URI. + """ + return self.ingress.url + + def _get_penpot_frontend_options(self) -> list[str]: + """Retrieve the penpot options for the penpot frontend. + + Returns: + Penpot frontend options. + """ + return sorted( + [ + "enable-login-with-password", + "disable-registration", + "disable-onboarding-questions", + ] + ) + + def _get_penpot_backend_options(self) -> list[str]: + """Retrieve the penpot options for the penpot backend. + + Returns: + Penpot backend options. + """ + return sorted( + [ + "enable-login-with-password", + "enable-prepl-server", + "disable-registration", + "disable-telemetry", + "disable-onboarding-questions", + "disable-log-emails", + ("enable" if self._get_smtp_credentials() else "disable") + "-smtp", + ] + ) + + def _get_local_resolver(self) -> str: + """Retrieve the current nameserver address being used. + + Returns: + The address of the nameserver. + """ + kube_dns = f"kube-dns.kube-system.svc.{self._get_kubernetes_cluster_domain()}" + try: + dns.resolver.resolve(kube_dns, search=True) + return kube_dns + except dns.exception.DNSException: + # resolvers like dns-over-https, not likely to happen in Kubernetes + return typing.cast(str, dns.resolver.Resolver().nameservers[0]) + + def _get_penpot_exporter_unit(self) -> str: + """Retrieve the name of the unit designated to run the penpot exporter. + + Returns: + Exporter unit name. + """ + relation = typing.cast(ops.Relation, self.model.get_relation("penpot_peer")) + units = list(relation.units) + units.append(self.unit) + return sorted(units, key=lambda u: int(u.name.split("/")[-1]))[0].name + + def _get_penpot_exporter_uri(self) -> str: + """Retrieve the address of the unit designated to run the penpot exporter. + + Returns: + Exporter unit address. + """ + unit_name = self._get_penpot_exporter_unit().replace("/", "-") + k8s_domain = self._get_kubernetes_cluster_domain() + hostname = f"{unit_name}.{self.app.name}-endpoints.{self.model.name}.svc.{k8s_domain}" + return f"http://{hostname}:6061" + + def _get_kubernetes_cluster_domain(self) -> str: + """Get Kubernetes cluster domain name. + + Returns: + Kubernetes cluster domain name. + """ + try: + answers = dns.resolver.resolve("kubernetes.default.svc", search=True) + except dns.exception.DNSException: + return "cluster.local" + return answers.qname.to_text().removeprefix("kubernetes.default.svc").strip(".") if __name__ == "__main__": # pragma: nocover diff --git a/tests/conftest.py b/tests/conftest.py index ad7716b..0223708 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,3 +11,6 @@ def pytest_addoption(parser): parser: Pytest parser. """ parser.addoption("--charm-file", action="store") + parser.addoption("--kube-config", action="store") + parser.addoption("--penpot-image", action="store") + parser.addoption("--ingress-address", action="store") diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 0000000..1bc154c --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,148 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Integration test fixtures.""" + +# pylint: disable=unused-argument + +import collections +import json +import logging +import time + +import boto3 +import botocore.client +import kubernetes +import pytest +import pytest_asyncio +from pytest_operator.plugin import OpsTest + +logger = logging.getLogger(__name__) + + +@pytest_asyncio.fixture(name="get_unit_ips", scope="module") +async def get_unit_ips_fixture(ops_test: OpsTest): + """A function to get unit ips of a charm application.""" + + async def _get_unit_ips(name: str): + """A function to get unit ips of a charm application. + + Args: + name: The name of the charm application. + + Returns: + A list of unit ips. + """ + _, status, _ = await ops_test.juju("status", "--format", "json") + status = json.loads(status) + units = status["applications"][name]["units"] + ip_list = [] + for key in sorted(units.keys(), key=lambda n: int(n.split("/")[-1])): + ip_list.append(units[key]["address"]) + return ip_list + + return _get_unit_ips + + +@pytest.fixture(scope="module", name="load_kube_config") +def load_kube_config_fixture(pytestconfig: pytest.Config): + """Load kubernetes config file.""" + kube_config = pytestconfig.getoption("--kube-config") + kubernetes.config.load_kube_config(config_file=kube_config) + + +@pytest_asyncio.fixture(name="minio", scope="module") +async def minio_fixture(get_unit_ips, load_kube_config, ops_test: OpsTest): + """Deploy test minio service.""" + key = "minioadmin" + assert ops_test.model + minio = await ops_test.model.deploy("minio", config={"access-key": key, "secret-key": key}) + await ops_test.model.wait_for_idle(apps=[minio.name]) + ip = (await get_unit_ips(minio.name))[0] + s3 = boto3.client( + "s3", + endpoint_url=f"http://{ip}:9000", + aws_access_key_id=key, + aws_secret_access_key=key, + config=botocore.client.Config(signature_version="s3v4"), + ) + bucket = "penpot" + s3.create_bucket(Bucket=bucket) + S3Credential = collections.namedtuple("S3Credential", "endpoint bucket access_key secret_key") + return S3Credential( + endpoint=f"http://minio-endpoints.{ops_test.model.name}.svc.cluster.local:9000", + bucket=bucket, + access_key=key, + secret_key=key, + ) + + +@pytest.fixture(scope="module") +def mailcatcher(load_kube_config, ops_test: OpsTest): + """Deploy test mailcatcher service.""" + assert ops_test.model + namespace = ops_test.model.name + v1 = kubernetes.client.CoreV1Api() + pod = kubernetes.client.V1Pod( + api_version="v1", + kind="Pod", + metadata=kubernetes.client.V1ObjectMeta( + name="mailcatcher", + namespace=namespace, + labels={"app.kubernetes.io/name": "mailcatcher"}, + ), + spec=kubernetes.client.V1PodSpec( + containers=[ + kubernetes.client.V1Container( + name="mailcatcher", + image="sj26/mailcatcher", + ports=[ + kubernetes.client.V1ContainerPort(container_port=1025), + kubernetes.client.V1ContainerPort(container_port=1080), + ], + ) + ], + ), + ) + v1.create_namespaced_pod(namespace=namespace, body=pod) + service = kubernetes.client.V1Service( + api_version="v1", + kind="Service", + metadata=kubernetes.client.V1ObjectMeta(name="mailcatcher-service", namespace=namespace), + spec=kubernetes.client.V1ServiceSpec( + type="ClusterIP", + ports=[ + kubernetes.client.V1ServicePort(port=1025, target_port=1025, name="tcp-1025"), + kubernetes.client.V1ServicePort(port=1080, target_port=1080, name="tcp-1080"), + ], + selector={"app.kubernetes.io/name": "mailcatcher"}, + ), + ) + v1.create_namespaced_service(namespace=namespace, body=service) + deadline = time.time() + 300 + while True: + if time.time() > deadline: + raise TimeoutError("timeout while waiting for mailcatcher pod") + try: + pod = v1.read_namespaced_pod(name="mailcatcher", namespace=namespace) + if pod.status.phase == "Running": + logger.info("mailcatcher running at %s", pod.status.pod_ip) + break + except kubernetes.client.ApiException: + pass + logger.info("waiting for mailcatcher pod") + time.sleep(1) + SmtpCredential = collections.namedtuple("SmtpCredential", "host port") + return SmtpCredential( + host=f"mailcatcher-service.{namespace}.svc.cluster.local", + port=1025, + ) + + +@pytest.fixture(scope="module") +def ingress_address(pytestconfig: pytest.Config): + """Get ingress address test option.""" + address = pytestconfig.getoption("--ingress-address") + if not address: + return "127.0.0.1" + return address diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index 0a8d427..74bbcea 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -5,32 +5,125 @@ """Integration tests.""" -import asyncio import logging -from pathlib import Path +import time +import juju.action import pytest -import yaml +import requests from pytest_operator.plugin import OpsTest logger = logging.getLogger(__name__) -CHARMCRAFT = yaml.safe_load(Path("./charmcraft.yaml").read_text(encoding="utf-8")) -APP_NAME = CHARMCRAFT["name"] - @pytest.mark.abort_on_fail -async def test_build_and_deploy(ops_test: OpsTest, pytestconfig: pytest.Config): - """Deploy the charm together with related charms. - - Assert on the unit status before any relations/configurations take place. +async def test_build_and_deploy( + ops_test: OpsTest, pytestconfig: pytest.Config, minio, mailcatcher +): + """ + arrange: set up the test Juju model. + act: build and deploy the Penpot charm with required services. + assert: the Penpot charm becomes active. """ charm = pytestconfig.getoption("--charm-file") - # Deploy the charm and wait for active/idle status + penpot_image = pytestconfig.getoption("--penpot-image") + assert penpot_image + if not charm: + charm = await ops_test.build_charm(".") assert ops_test.model - await asyncio.gather( - ops_test.model.deploy(f"./{charm}", application_name=APP_NAME), - ops_test.model.wait_for_idle( - apps=[APP_NAME], status="active", raise_on_blocked=True, timeout=1000 - ), + penpot = await ops_test.model.deploy( + f"./{charm}", resources={"penpot-image": penpot_image}, num_units=2 + ) + postgresql_k8s = await ops_test.model.deploy("postgresql-k8s", channel="14/stable", trust=True) + redis_k8s = await ops_test.model.deploy("redis-k8s", channel="edge") + smtp_integrator = await ops_test.model.deploy( + "smtp-integrator", + config={ + "auth_type": "none", + "domain": "example.com", + "host": mailcatcher.host, + "port": mailcatcher.port, + }, + ) + s3_integrator = await ops_test.model.deploy( + "s3-integrator", config={"bucket": minio.bucket, "endpoint": minio.endpoint} + ) + nginx_ingress_integrator = await ops_test.model.deploy( + "nginx-ingress-integrator", + channel="edge", + config={"path-routes": "/", "service-hostname": "penpot.local"}, + trust=True, + revision=109, ) + await ops_test.model.wait_for_idle(timeout=900) + action = await s3_integrator.units[0].run_action( + "sync-s3-credentials", + **{ + "access-key": minio.access_key, + "secret-key": minio.secret_key, + }, + ) + await action.wait() + await ops_test.model.add_relation(penpot.name, postgresql_k8s.name) + await ops_test.model.add_relation(penpot.name, redis_k8s.name) + await ops_test.model.add_relation(penpot.name, s3_integrator.name) + await ops_test.model.add_relation(penpot.name, f"{smtp_integrator.name}:smtp") + await ops_test.model.add_relation(penpot.name, nginx_ingress_integrator.name) + await ops_test.model.wait_for_idle(timeout=900, status="active") + + +async def test_create_profile(ops_test: OpsTest, ingress_address): + """ + arrange: deploy the Penpot charm. + act: create a Penpot account using the 'create-profile' charm action. + assert: the account created can be used to log in to Penpot. + """ + email = "test@test.com" + assert ops_test.model + unit = ops_test.model.applications["penpot"].units[0] + deadline = time.time() + 300 + while time.time() < deadline: + action: juju.action.Action = await unit.run_action( + "create-profile", email=email, fullname="test" + ) + await action.wait() + if "password" in action.results: + password = action.results["password"] + break + logger.info("waiting for penpot started: %s", action.results) + time.sleep(5) + else: + raise TimeoutError("timed out waiting for profile creation success") + logger.info("create test penpot user %s with password: %s", email, password) + session = requests.Session() + deadline = time.time() + 300 + while time.time() < deadline: + response = session.post( + f"http://{ingress_address}/api/rpc/command/login-with-password", + headers={"Host": "penpot.local"}, + json={"~:email": email, "~:password": password}, + timeout=10, + ) + if response.status_code == 200: + break + logger.info("penpot login status: %s", response.status_code) + time.sleep(5) + else: + raise TimeoutError("timed out waiting for login success") + action = await unit.run_action("delete-profile", email=email) + await action.wait() + deadline = time.time() + 300 + while time.time() < deadline: + response = session.post( + f"http://{ingress_address}/api/rpc/command/login-with-password", + headers={"Host": "penpot.local"}, + json={"~:email": email, "~:password": password}, + timeout=10, + ) + if response.status_code == 400: + break + logger.info("penpot login status: %s", response.status_code) + time.sleep(5) + else: + raise TimeoutError("timed out waiting for login response") + assert response.status_code == 400 diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index e3979c0..52bba9b 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,2 +1,4 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. + +"""Unit tests.""" diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py new file mode 100644 index 0000000..51a364a --- /dev/null +++ b/tests/unit/conftest.py @@ -0,0 +1,131 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Unit tests fixtures.""" +import json + +import ops.testing +import pytest + +from src.charm import PenpotCharm + + +class Harness: + """Unit test helper.""" + + def __init__(self, harness: ops.testing.Harness) -> None: + """Initialize harness. + + Args: + harness: ops.testing.Harness object. + """ + self.harness = harness + self.harness.set_model_name("test") + + def setup_postgresql_integration(self): + """Setup postgresql integration.""" + postgresql_app = "postgresql" + secret_id = self.harness.add_model_secret( + postgresql_app, {"username": "postgresql-username", "password": "postgresql-password"} + ) + relation_id = self.harness.add_relation("postgresql", postgresql_app) + self.harness.grant_secret(secret_id, self.harness.charm.app) + self.harness.add_relation_unit(relation_id, f"{postgresql_app}/0") + self.harness.update_relation_data( + relation_id, + postgresql_app, + { + "data": json.dumps( + { + "database": "penpot", + "requested-secrets": json.dumps(["username", "password"]), + } + ), + "database": "penpot", + "endpoints": "postgresql-endpoint:5432", + "secret-user": secret_id, + "version": "14.11", + }, + ) + + def setup_redis_integration(self): + """Setup redis integration.""" + self.harness.add_relation( + "redis", "redis", unit_data={"hostname": "redis-hostname", "port": "6379"} + ) + + def setup_s3_integration(self): + """Setup s3 integration.""" + self.harness.add_relation( + "s3", + "s3-integrator", + app_data={ + "access-key": "s3-access-key", + "bucket": "penpot", + "endpoint": "s3-endpoint", + "secret-key": "s3-secret-key", + }, + ) + + def setup_ingress_integration(self): + """Setup ingress integration.""" + self.harness.add_network("10.0.0.10") + self.harness.add_relation( + "ingress", + "nginx-ingress-integrator", + app_data={"ingress": '{"url": "http://penpot.local/"}'}, + ) + + def setup_smtp_integration(self, use_password: bool = False): + """Setup smtp integration. + + Args: + use_password: use user/password authentication. + """ + smtp_integrator_app = "smtp-integrator" + if use_password: + secret_id = self.harness.add_model_secret( + smtp_integrator_app, + {"username": "smtp-username", "password": "smtp-password"}, + ) + relation_id = self.harness.add_relation("smtp", smtp_integrator_app) + if use_password: + self.harness.grant_secret(secret_id, self.harness.charm.app) + self.harness.add_relation_unit(relation_id, f"{smtp_integrator_app}/0") + app_data = { + "auth_type": "plain" if use_password else "none", + "domain": "example.com", + "host": "smtp-host", + "port": "1025", + "transport_security": "none", + } + if use_password: + app_data["user"] = "smtp-user" + app_data["password_id"] = secret_id + self.harness.update_relation_data(relation_id, smtp_integrator_app, app_data) + + def setup_integration(self): + """Setup all integrations.""" + self.setup_postgresql_integration() + self.setup_redis_integration() + self.setup_s3_integration() + self.setup_ingress_integration() + self.setup_smtp_integration() + + def __getattr__(self, attr): + """Proxy ops.testing.Harness. + + Args: + attr: attribute name. + + Returns: + Proxied attribute. + """ + return getattr(self.harness, attr) + + +@pytest.fixture(name="harness") +def harness_fixture(monkeypatch): + """Harness fixture.""" + monkeypatch.setenv("JUJU_VERSION", "3.5.0") + return Harness(ops.testing.Harness(PenpotCharm)) diff --git a/tests/unit/test_base.py b/tests/unit/test_base.py deleted file mode 100644 index f694dff..0000000 --- a/tests/unit/test_base.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -# Learn more about testing at: https://juju.is/docs/sdk/testing - -# pylint: disable=duplicate-code,missing-function-docstring -"""Unit tests.""" - -import unittest - -import ops -import ops.testing - -from charm import PenpotCharm - - -class TestCharm(unittest.TestCase): - """Test class.""" - - def setUp(self): - """Set up the testing environment.""" - self.harness = ops.testing.Harness(PenpotCharm) - self.addCleanup(self.harness.cleanup) - self.harness.begin() - - def test_config_changed_valid(self): - # Trigger a config-changed event with an updated value - self.harness.update_config({"log-level": "debug"}) - self.assertEqual(self.harness.model.unit.status, ops.ActiveStatus()) - - def test_config_changed_invalid(self): - # Trigger a config-changed event with an updated value - self.harness.update_config({"log-level": "foobar"}) - # Check the charm is in BlockedStatus - self.assertIsInstance(self.harness.model.unit.status, ops.BlockedStatus) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py new file mode 100644 index 0000000..11966c1 --- /dev/null +++ b/tests/unit/test_charm.py @@ -0,0 +1,323 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Unit tests.""" + +# pylint: disable=protected-access + + +def test_postgresql_config(harness): + """ + arrange: initialize the testing harness and set up the postgresql integration. + act: retrieve the postgresql configuration for penpot. + assert: ensure the postgresql configuration for penpot matches the expectations. + """ + harness.begin_with_initial_hooks() + assert harness.charm._get_postgresql_credentials() is None + harness.setup_postgresql_integration() + assert harness.charm._get_postgresql_credentials() == { + "PENPOT_DATABASE_PASSWORD": "postgresql-password", + "PENPOT_DATABASE_URI": "postgresql://postgresql-endpoint:5432/penpot", + "PENPOT_DATABASE_USERNAME": "postgresql-username", + } + + +def test_redis_config(harness): + """ + arrange: initialize the testing harness and set up the redis integration. + act: retrieve the redis configuration for penpot. + assert: ensure the redis configuration for penpot matches the expectations. + """ + harness.begin_with_initial_hooks() + assert harness.charm._get_redis_credentials() is None + harness.setup_redis_integration() + assert harness.charm._get_redis_credentials() == { + "PENPOT_REDIS_URI": "redis://redis-hostname:6379" + } + + +def test_s3_config(harness): + """ + arrange: initialize the testing harness and set up the s3 integration. + act: retrieve the s3 configuration for penpot. + assert: ensure the s3 configuration for penpot matches the expectations. + """ + harness.begin_with_initial_hooks() + assert harness.charm._get_s3_credentials() is None + harness.setup_s3_integration() + assert harness.charm._get_s3_credentials() == { + "AWS_ACCESS_KEY_ID": "s3-access-key", + "AWS_SECRET_ACCESS_KEY": "s3-secret-key", + "PENPOT_ASSETS_STORAGE_BACKEND": "assets-s3", + "PENPOT_STORAGE_ASSETS_S3_BUCKET": "penpot", + "PENPOT_STORAGE_ASSETS_S3_ENDPOINT": "s3-endpoint", + "PENPOT_STORAGE_ASSETS_S3_REGION": "us-east-1", + } + + +def test_smtp_config(harness): + """ + arrange: initialize the testing harness and set up the smtp integration. + act: retrieve the smtp configuration for penpot. + assert: ensure the smtp configuration for penpot matches the expectations. + """ + harness.begin_with_initial_hooks() + assert harness.charm._get_smtp_credentials() == {} + harness.setup_smtp_integration() + assert harness.charm._get_smtp_credentials() == { + "PENPOT_SMTP_DEFAULT_FROM": "no-reply@example.com", + "PENPOT_SMTP_DEFAULT_REPLY_TO": "no-reply@example.com", + "PENPOT_SMTP_HOST": "smtp-host", + "PENPOT_SMTP_PORT": "1025", + "PENPOT_SMTP_SSL": "false", + "PENPOT_SMTP_TLS": "false", + } + + +def test_smtp_config_with_password(harness): + """ + arrange: set up the smtp integration with password authentication. + act: retrieve the smtp configuration for penpot. + assert: ensure the smtp configuration for penpot matches the expectations. + """ + harness.begin_with_initial_hooks() + harness.setup_smtp_integration(use_password=True) + assert harness.charm._get_smtp_credentials() == { + "PENPOT_SMTP_DEFAULT_FROM": "smtp-user@example.com", + "PENPOT_SMTP_DEFAULT_REPLY_TO": "smtp-user@example.com", + "PENPOT_SMTP_HOST": "smtp-host", + "PENPOT_SMTP_PASSWORD": "smtp-password", + "PENPOT_SMTP_PORT": "1025", + "PENPOT_SMTP_SSL": "false", + "PENPOT_SMTP_TLS": "false", + "PENPOT_SMTP_USERNAME": "smtp-user", + } + + +def test_smtp_config_override_from_address(harness): + """ + arrange: initialize the testing harness and set up the smtp integration. + act: set smtp-from-address configuration and retrieve the smtp configuration for penpot. + assert: ensure the smtp configuration for penpot matches the expectations. + """ + harness.begin_with_initial_hooks() + harness.update_config({"smtp-from-address": "test@test.com"}) + harness.setup_smtp_integration(use_password=True) + assert harness.charm._get_smtp_credentials() == { + "PENPOT_SMTP_DEFAULT_FROM": "test@test.com", + "PENPOT_SMTP_DEFAULT_REPLY_TO": "test@test.com", + "PENPOT_SMTP_HOST": "smtp-host", + "PENPOT_SMTP_PASSWORD": "smtp-password", + "PENPOT_SMTP_PORT": "1025", + "PENPOT_SMTP_SSL": "false", + "PENPOT_SMTP_TLS": "false", + "PENPOT_SMTP_USERNAME": "smtp-user", + } + + +def test_smtp_penpot_option(harness): + """ + arrange: initialize the testing harness. + act: retrieve the penpot options with different smtp setup. + assert: ensure the penpot options matches the expectations. + """ + harness.begin_with_initial_hooks() + assert harness.charm._get_penpot_backend_options() == [ + "disable-log-emails", + "disable-onboarding-questions", + "disable-registration", + "disable-smtp", + "disable-telemetry", + "enable-login-with-password", + "enable-prepl-server", + ] + harness.setup_smtp_integration(use_password=True) + assert harness.charm._get_penpot_backend_options() == [ + "disable-log-emails", + "disable-onboarding-questions", + "disable-registration", + "disable-telemetry", + "enable-login-with-password", + "enable-prepl-server", + "enable-smtp", + ] + + +def test_public_uri(harness): + """ + arrange: initialize the testing harness and set up the ingress integration. + act: retrieve the public URI configuration for penpot. + assert: ensure the public URI for penpot matches the expectations. + """ + harness.begin_with_initial_hooks() + assert harness.charm._get_public_uri() is None + harness.setup_ingress_integration() + assert harness.charm._get_public_uri() == "http://penpot.local/" + + +def test_penpot_pebble_layer(harness): + """ + arrange: initialize the testing harness and set up all required integration. + act: retrieve the pebble layer for penpot. + assert: ensure the pebble layer for penpot matches the expectations. + """ + harness.set_leader() + harness.begin_with_initial_hooks() + assert not harness.charm._check_ready() + harness.setup_postgresql_integration() + assert not harness.charm._check_ready() + harness.setup_redis_integration() + assert not harness.charm._check_ready() + harness.setup_s3_integration() + assert not harness.charm._check_ready() + harness.setup_ingress_integration() + assert harness.charm._check_ready() + harness.setup_smtp_integration() + assert harness.charm._check_ready() + plan = harness.charm._gen_pebble_plan() + del plan["services"]["backend"]["environment"]["PENPOT_SECRET_KEY"] + del plan["services"]["frontend"]["environment"]["PENPOT_INTERNAL_RESOLVER"] + assert plan == { + "checks": { + "backend-ready": { + "exec": { + # pylint: disable=line-too-long + "command": 'bash -c "pebble services backend | grep -q inactive || curl -f -m 5 localhost:6060/readyz"' # noqa: E501 + }, + "level": "alive", + "override": "replace", + "period": "30s", + } + }, + "description": "penpot services", + "services": { + "backend": { + "command": "/opt/penpot/backend/run.sh", + "environment": { + "AWS_ACCESS_KEY_ID": "s3-access-key", + "AWS_SECRET_ACCESS_KEY": "s3-secret-key", + "JAVA_HOME": "/usr/lib/jvm/java-21-openjdk-amd64", + "PENPOT_ASSETS_STORAGE_BACKEND": "assets-s3", + "PENPOT_DATABASE_PASSWORD": "postgresql-password", + "PENPOT_DATABASE_URI": "postgresql://postgresql-endpoint:5432/penpot", + "PENPOT_DATABASE_USERNAME": "postgresql-username", + "PENPOT_FLAGS": ( + "disable-log-emails " + "disable-onboarding-questions " + "disable-registration " + "disable-telemetry " + "enable-login-with-password " + "enable-prepl-server " + "enable-smtp" + ), + "PENPOT_PUBLIC_URI": "http://penpot.local/", + "PENPOT_REDIS_URI": "redis://redis-hostname:6379", + "PENPOT_SMTP_DEFAULT_FROM": "no-reply@example.com", + "PENPOT_SMTP_DEFAULT_REPLY_TO": "no-reply@example.com", + "PENPOT_SMTP_HOST": "smtp-host", + "PENPOT_SMTP_PORT": "1025", + "PENPOT_SMTP_SSL": "false", + "PENPOT_SMTP_TLS": "false", + "PENPOT_STORAGE_ASSETS_S3_BUCKET": "penpot", + "PENPOT_STORAGE_ASSETS_S3_ENDPOINT": "s3-endpoint", + "PENPOT_STORAGE_ASSETS_S3_REGION": "us-east-1", + "PENPOT_TELEMETRY_ENABLED": "false", + }, + "override": "replace", + "working-dir": "/opt/penpot/backend/", + }, + "exporter": { + "after": ["backend", "frontend"], + "command": "node app.js", + "environment": { + "PENPOT_PUBLIC_URI": "http://127.0.0.1:8080", + "PENPOT_REDIS_URI": "redis://redis-hostname:6379", + "PLAYWRIGHT_BROWSERS_PATH": "/opt/penpot/exporter/browsers", + }, + "override": "replace", + "working-dir": "/opt/penpot/exporter/", + }, + "frontend": { + "after": ["backend"], + "command": './nginx-entrypoint.sh nginx -g "daemon ' 'off;"', + "environment": { + "PENPOT_BACKEND_URI": "http://127.0.0.1:6060", + "PENPOT_EXPORTER_URI": ( + "http://penpot-0.penpot-endpoints.test.svc.cluster.local:6061" + ), + "PENPOT_FLAGS": ( + "disable-onboarding-questions " + "disable-registration " + "enable-login-with-password" + ), + }, + "override": "replace", + "working-dir": "/opt/penpot/frontend/", + }, + }, + "summary": "penpot services", + } + + +def test_penpot_exporter_unit(harness): + """ + arrange: initialize the testing harness and set up some penpot units. + act: retrieve the penpot exporter unit. + assert: penpot exporter unit is the unit with the least unit number. + """ + relation_id = harness.add_relation("penpot_peer", "penpot") + harness.add_relation_unit(relation_id, "penpot/1") + harness.add_relation_unit(relation_id, "penpot/2") + harness.begin() + assert harness.charm._get_penpot_exporter_unit() == "penpot/0" + + +def test_penpot_create_profile_action(harness): + """ + arrange: initialize the testing harness and set up all required integration. + act: run create-profile charm action. + assert: ensure correct commands are executed. + """ + harness.set_leader() + harness.begin_with_initial_hooks() + harness.setup_integration() + harness.set_can_connect("penpot", True) + + def test_handler(args): + assert args.command == [ + "python3", + "manage.py", + "create-profile", + "--email", + "test@test.com", + "--fullname", + "test", + ] + assert args.stdin + + harness.handle_exec("penpot", [], handler=test_handler) + harness.run_action("create-profile", {"email": "test@test.com", "fullname": "test"}) + + +def test_penpot_delete_profile_action(harness): + """ + arrange: initialize the testing harness and set up all required integration. + act: run delete-profile charm action. + assert: ensure correct commands are executed. + """ + harness.set_leader() + harness.begin_with_initial_hooks() + harness.setup_integration() + harness.set_can_connect("penpot", True) + + def test_handler(args): + assert args.command == [ + "python3", + "manage.py", + "delete-profile", + "--email", + "test@test.com", + ] + + harness.handle_exec("penpot", [], handler=test_handler) + harness.run_action("delete-profile", {"email": "test@test.com"}) diff --git a/tox.ini b/tox.ini index 8a5f784..f6e6175 100644 --- a/tox.ini +++ b/tox.ini @@ -35,19 +35,22 @@ commands = description = Check code against coding style standards deps = black + boto3 + ruff codespell - flake8<6.0.0 + flake8 flake8-builtins - flake8-copyright<6.0.0 + flake8-copyright flake8-docstrings>=1.6.0 flake8-docstrings-complete>=1.0.3 flake8-test-docs>=1.0 isort mypy + ops-scenario pep8-naming pydocstyle>=2.10 pylint - pyproject-flake8<6.0.0 + pyproject-flake8 pytest pytest-asyncio pytest-operator @@ -57,11 +60,7 @@ deps = -r{toxinidir}/requirements.txt commands = pydocstyle {[vars]src_path} - # uncomment the following line if this charm owns a lib - # codespell {[vars]lib_path} - codespell {toxinidir} --skip {toxinidir}/.git --skip {toxinidir}/.tox \ - --skip {toxinidir}/build --skip {toxinidir}/lib --skip {toxinidir}/venv \ - --skip {toxinidir}/.mypy_cache --skip {toxinidir}/icon.svg + codespell {toxinidir} # pflake8 wrapper supports config from pyproject.toml pflake8 {[vars]all_path} --ignore=W503 isort --check-only --diff {[vars]all_path} @@ -73,6 +72,8 @@ commands = description = Run unit tests deps = coverage[toml] + ops-scenario + pydantic pytest -r{toxinidir}/requirements.txt commands = @@ -100,22 +101,11 @@ commands = [testenv:integration] description = Run integration tests deps = - juju==3.4.0.0 + boto3 + juju==3.5.2.0 pytest pytest-asyncio pytest-operator -r{toxinidir}/requirements.txt commands = pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} - -[testenv:src-docs] -allowlist_externals=sh -setenv = - PYTHONPATH = {toxinidir}:{toxinidir}/lib:{[vars]src_path} -description = Generate documentation for src -deps = - lazydocs - -r{toxinidir}/requirements.txt -commands = - ; can't run lazydocs directly due to needing to run it on src/* which produces an invocation error in tox - sh generate-src-docs.sh