Skip to content

Commit

Permalink
Handle the kubeconfig_mgmnt file in the plugin internally
Browse files Browse the repository at this point in the history
Signed-off-by: michal.gubricky <[email protected]>
  • Loading branch information
michal-gubricky committed Nov 20, 2024
1 parent 95d813b commit a967d90
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 28 deletions.
44 changes: 22 additions & 22 deletions Tests/kaas/plugin/plugin_cluster_stacks.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import os
import yaml
import shutil
import subprocess
import base64
import time
Expand Down Expand Up @@ -132,9 +131,8 @@ def __init__(self, config_file=None):
self.clouds_yaml_path = os.path.expanduser(self.config.get('clouds_yaml_path'))
self.cs_namespace = self.config.get('cs_namespace')
logger.debug(f"Working from {self.working_directory}")
self.kubeconfig_mgmnt_path = "kubeconfig-mgmnt.yaml"

def create_cluster(self, cluster_name=None, version=None, kubeconfig_filepath=None):
def create_cluster(self, cluster_name="scs-cluster", version=None, kubeconfig_filepath=None):
self.cluster_name = cluster_name
self.cluster_version = version
self.kubeconfig_cs_cluster = kubeconfig_filepath
Expand All @@ -143,41 +141,39 @@ def create_cluster(self, cluster_name=None, version=None, kubeconfig_filepath=No
self.cluster = KindCluster(name=cluster_name)
self.cluster.create()
self.kubeconfig_mgmnt = str(self.cluster.kubeconfig_path.resolve())
if self.kubeconfig_mgmnt:
shutil.move(self.kubeconfig_mgmnt, self.kubeconfig_mgmnt_path)

# Initialize clusterctl with OpenStack as the infrastructure provider
self._run_subprocess(
["sudo", "-E", "clusterctl", "init", "--infrastructure", "openstack"],
"Error during clusterctl init",
kubeconfig=self.kubeconfig_mgmnt_path
kubeconfig=self.kubeconfig_mgmnt
)

# Wait for all CAPI pods to be ready
wait_for_pods(self, ["capi-kubeadm-bootstrap-system", "capi-kubeadm-control-plane-system", "capi-system"], kubeconfig=self.kubeconfig_mgmnt_path)
wait_for_pods(self, ["capi-kubeadm-bootstrap-system", "capi-kubeadm-control-plane-system", "capi-system"], kubeconfig=self.kubeconfig_mgmnt)

# Apply infrastructure components
self._apply_yaml_with_envsubst("cso-infrastructure-components.yaml", "Error applying CSO infrastructure components", kubeconfig=self.kubeconfig_mgmnt_path)
self._apply_yaml_with_envsubst("cspo-infrastructure-components.yaml", "Error applying CSPO infrastructure components", kubeconfig=self.kubeconfig_mgmnt_path)
self._apply_yaml_with_envsubst("cso-infrastructure-components.yaml", "Error applying CSO infrastructure components", kubeconfig=self.kubeconfig_mgmnt)
self._apply_yaml_with_envsubst("cspo-infrastructure-components.yaml", "Error applying CSPO infrastructure components", kubeconfig=self.kubeconfig_mgmnt)

# Deploy CSP-helper chart
helm_command = (
f"helm upgrade -i csp-helper-{self.cs_namespace} -n {self.cs_namespace} "
f"--create-namespace https://github.com/SovereignCloudStack/openstack-csp-helper/releases/latest/download/openstack-csp-helper.tgz "
f"-f {self.clouds_yaml_path}"
)
self._run_subprocess(helm_command, "Error deploying CSP-helper chart", shell=True, kubeconfig=self.kubeconfig_mgmnt_path)
self._run_subprocess(helm_command, "Error deploying CSP-helper chart", shell=True, kubeconfig=self.kubeconfig_mgmnt)

wait_for_pods(self, ["cso-system"], kubeconfig=self.kubeconfig_mgmnt_path)
wait_for_pods(self, ["cso-system"], kubeconfig=self.kubeconfig_mgmnt)

# Create Cluster Stack definition and workload cluster
self._apply_yaml_with_envsubst("clusterstack.yaml", "Error applying clusterstack.yaml", kubeconfig=self.kubeconfig_mgmnt_path)
self._apply_yaml_with_envsubst("cluster.yaml", "Error applying cluster.yaml", kubeconfig=self.kubeconfig_mgmnt_path)
self._apply_yaml_with_envsubst("clusterstack.yaml", "Error applying clusterstack.yaml", kubeconfig=self.kubeconfig_mgmnt)
self._apply_yaml_with_envsubst("cluster.yaml", "Error applying cluster.yaml", kubeconfig=self.kubeconfig_mgmnt)

# Get and wait on kubeadmcontrolplane and retrieve workload cluster kubeconfig
kcp_name = self._get_kubeadm_control_plane_name(namespace=self.cs_namespace, kubeconfig=self.kubeconfig_mgmnt_path)
self._wait_kcp_ready(kcp_name, namespace=self.cs_namespace, kubeconfig=self.kubeconfig_mgmnt_path)
self._retrieve_kubeconfig(namespace=self.cs_namespace, kubeconfig=self.kubeconfig_mgmnt_path)
kcp_name = self._get_kubeadm_control_plane_name(namespace=self.cs_namespace, kubeconfig=self.kubeconfig_mgmnt)
self._wait_kcp_ready(kcp_name, namespace=self.cs_namespace, kubeconfig=self.kubeconfig_mgmnt)
self._retrieve_kubeconfig(namespace=self.cs_namespace, kubeconfig=self.kubeconfig_mgmnt)

# Wait for workload system pods to be ready
# wait_for_workload_pods_ready(kubeconfig_path=self.kubeconfig_cs_cluster)
Expand All @@ -186,31 +182,35 @@ def create_cluster(self, cluster_name=None, version=None, kubeconfig_filepath=No
def delete_cluster(self, cluster_name=None, kubeconfig_filepath=None):
self.cluster_name = cluster_name
kubeconfig_cs_cluster_filename = kubeconfig_filepath

# Get kubeconfig of the mgmnt (kind) cluster
self.cluster = KindCluster(cluster_name)
self.kubeconfig_mgmnt = str(self.cluster.kubeconfig_path.resolve())

try:
# Check if the cluster exists
check_cluster_command = f"kubectl get cluster {cluster_name} -n {self.cs_namespace}"
result = self._run_subprocess(check_cluster_command, "Failed to get cluster resource", shell=True, capture_output=True, text=True, kubeconfig=self.kubeconfig_mgmnt_path)
result = self._run_subprocess(check_cluster_command, "Failed to get cluster resource", shell=True, capture_output=True, text=True, kubeconfig=self.kubeconfig_mgmnt)

# Proceed with deletion only if the cluster exists
if result.returncode == 0:
delete_command = f"kubectl delete cluster {cluster_name} --timeout=600s -n {self.cs_namespace}"
self._run_subprocess(delete_command, "Timeout while deleting the cluster", shell=True, kubeconfig=self.kubeconfig_mgmnt_path)
self._run_subprocess(delete_command, "Timeout while deleting the cluster", shell=True, kubeconfig=self.kubeconfig_mgmnt)

except subprocess.CalledProcessError as error:
if "NotFound" in error.stderr:
logger.info(f"Cluster {cluster_name} not found. Skipping deletion.")
else:
raise RuntimeError(f"Error checking for cluster existence: {error}")

# Delete kind cluster
self.cluster = KindCluster(cluster_name)
# Delete mgmngt (kind) cluster
self.cluster.delete()

# Remove kubeconfigs
if os.path.exists(kubeconfig_cs_cluster_filename):
os.remove(kubeconfig_cs_cluster_filename)
if os.path.exists(self.kubeconfig_mgmnt_path):
os.remove(self.kubeconfig_mgmnt_path)
if os.path.exists(self.kubeconfig_mgmnt):
os.remove(self.kubeconfig_mgmnt)

def _apply_yaml_with_envsubst(self, yaml_file, error_msg, kubeconfig=None):
"""
Expand Down
2 changes: 0 additions & 2 deletions Tests/kaas/plugin/plugin_kind.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@

logger = logging.getLogger(__name__)

logger = logging.getLogger(__name__)


class PluginKind(KubernetesClusterPlugin):
"""
Expand Down
4 changes: 0 additions & 4 deletions Tests/scs-compatible-kaas.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,7 @@ modules:
tags: [mandatory]
- id: scs-0210-v2
name: Kubernetes version policy
<<<<<<< HEAD
url: https://docs.scs.community/standards/scs-0210-v2-k8s-version-policy
=======
url: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Standards/scs-0210-v2-k8s-version-policy.md
>>>>>>> Rearange configuration files
run:
- executable: ./kaas/k8s-version-policy/k8s_version_policy.py
args: -k {subject_root}/kubeconfig.yaml
Expand Down

0 comments on commit a967d90

Please sign in to comment.