diff --git a/config.yaml b/config.yaml index d17793d1..88d97359 100644 --- a/config.yaml +++ b/config.yaml @@ -37,3 +37,13 @@ options: description: Deploy the NodePort service for MLFlow type: boolean default: true + serve_artifacts: + description: | + Enables serving of artifacts requests by routing these request to ./mlartifacts directory. + type: boolean + default: false + artifacts_destination: + description: | + Base location URI of resolve artifact requests. + type: string + default: './mlartifacts' diff --git a/src/charm.py b/src/charm.py index 3f58f924..46c859d9 100755 --- a/src/charm.py +++ b/src/charm.py @@ -50,6 +50,8 @@ def __init__(self, *args): self.logger = logging.getLogger(__name__) self._port = self.model.config["mlflow_port"] + self._serve_artifacts = self.model.config.get("serve_artifacts", False) + self._artifacts_destination = self.model.config.get("artifacts_destination", "") self._exporter_port = self.model.config["mlflow_prometheus_exporter_port"] self._container_name = "mlflow-server" self._exporter_container_name = "mlflow-prometheus-exporter" @@ -199,7 +201,11 @@ def _get_env_vars(self, relational_db_data, object_storage_data): def _charmed_mlflow_layer(self, env_vars, default_artifact_root) -> Layer: """Create and return Pebble framework layer.""" - + serve_artifacts = "" + if self._serve_artifacts: + serve_artifacts = ( + f"--serve-artifacts --artifacts-destination {self._artifacts_destination}" + ) layer_config = { "summary": "mlflow-server layer", "description": "Pebble config layer for mlflow-server", @@ -214,6 +220,7 @@ def _charmed_mlflow_layer(self, env_vars, default_artifact_root) -> Layer: "0.0.0.0 " "--port " f"{self._port} " + f"{serve_artifacts} " "--backend-store-uri " f"{env_vars['MLFLOW_TRACKING_URI']} " "--default-artifact-root " diff --git a/tests/unit/test_operator.py b/tests/unit/test_operator.py index f95a4c8f..c34c0c8f 100644 --- a/tests/unit/test_operator.py +++ b/tests/unit/test_operator.py @@ -21,7 +21,19 @@ "summary": "Entrypoint of mlflow-server image", "startup": "enabled", "override": "replace", - "command": "mlflow server --host 0.0.0.0 --port 5000 --backend-store-uri test --default-artifact-root s3:/// --expose-prometheus /metrics", # noqa: E501 + "command": "mlflow server --host 0.0.0.0 --port 5000 --backend-store-uri test --default-artifact-root s3:/// --expose-prometheus /metrics", # noqa: E501 + "environment": {"MLFLOW_TRACKING_URI": "test"}, + }, + ) +} +EXPECTED_SERVICE_ARTIFACTS = { + "mlflow-server": Service( + "mlflow-server", + raw={ + "summary": "Entrypoint of mlflow-server image", + "startup": "enabled", + "override": "replace", + "command": "mlflow server --host 0.0.0.0 --port 5000 --serve-artifacts --artifacts-destination s3:/// --backend-store-uri test --default-artifact-root s3:/// --expose-prometheus /metrics", # noqa: E501 "environment": {"MLFLOW_TRACKING_URI": "test"}, }, ) @@ -425,6 +437,24 @@ def test_update_layer_success( ) assert harness.charm.container.get_plan().services == EXPECTED_SERVICE + @patch( + "charm.KubernetesServicePatch", + lambda x, y, service_name, service_type, refresh_event: None, + ) + def test_config_artifact_success( + self, + harness: Harness, + ): + harness.update_config({"serve_artifacts": True, "artifacts_destination": "s3:///"}) + harness.begin() + harness.charm._update_layer( + harness.charm.container, + harness.charm._container_name, + harness.charm._charmed_mlflow_layer({"MLFLOW_TRACKING_URI": "test"}, ""), + ) + updated_plan = harness.get_container_pebble_plan('mlflow-server').to_dict() + assert harness.charm.container.get_plan().services == EXPECTED_SERVICE_ARTIFACTS + @patch( "charm.KubernetesServicePatch", lambda x, y, service_name, service_type, refresh_event: None,