Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

NO-ISSUE: Add cluster name to OCI bucket name #2605

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -150,8 +150,16 @@ def _create_bucket(
# Need the namespace and bucket name
return namespace

def _upload_iso_to_bucket(self, file_path: str, namespace: str, bucket_name: str) -> None:
log.info(f"Upload iso file to bucket object storage {file_path}")
def _upload_data_to_bucket(self, data: str, filename: str, namespace: str, bucket_name: str):
file_path = f"/tmp/{filename}"

with os.path.open(file_path, "w") as f:
f.write(data)

return self._upload_file_to_bucket(file_path, namespace, bucket_name)

def _upload_file_to_bucket(self, file_path: str, namespace: str, bucket_name: str):
log.info(f"Upload file to bucket object storage {file_path}")
if os.path.isfile(file_path):
try:
self._object_storage_client.put_object(
Expand Down Expand Up @@ -444,11 +452,11 @@ def setup_time(self) -> str:

def prepare_nodes(self) -> None:
log.info("OCI prepare all nodes")
bucket_name = random_name("bucket-")
bucket_name = f"bucket-{self._entity_config.cluster_id}"
namespace = self._create_bucket(bucket_name)
self._upload_iso_to_bucket(self._entity_config.iso_download_path, namespace, bucket_name)
self._upload_file_to_bucket(self._entity_config.iso_download_path, namespace, bucket_name)
url_path = self._create_pre_authenticated(
random_name("preauth-"), self._entity_config.iso_download_path, namespace, bucket_name
f"preauth-{self._entity_config.cluster_id}", self._entity_config.iso_download_path, namespace, bucket_name
)

terraform_variables = self._terraform_variables(
Expand All @@ -460,9 +468,16 @@ def prepare_nodes(self) -> None:
base_dns=self._entity_config.base_dns_domain,
)
stack_id = self._create_stack(
random_name("stack-"), namespace, bucket_name, self._config.oci_infrastructure_zip_file, terraform_variables
f"stack-{self._entity_config.cluster_id}",
namespace,
bucket_name,
self._config.oci_infrastructure_zip_file,
terraform_variables,
)
terraform_output = self._apply_job_from_stack(stack_id, random_name("job-"))
Copy link
Contributor

@adriengentil adriengentil Jan 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
terraform_output = self._apply_job_from_stack(stack_id, random_name("job-"))
terraform_output = self._apply_job_from_stack(stack_id, "create-infra")

as we applied it only once, it should be enough.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For my understanding QE has sometimes more then one job per cluster, that is why left it as it is.
@talhil-rh @dudyas6 Can you reffer to that?

Copy link
Contributor

@adriengentil adriengentil Jan 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the job is attached to the stack where the cluster infra is defined /cc @bkopilov

self._upload_data_to_bucket(
terraform_output, f"terraform-output-{self._entity_config.cluster_id}.yaml", namespace, bucket_name
)
Comment on lines +478 to +480
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why? the bucket will be cleaned up after the test.
The terraform output is available in the OCI stack, so it's can be extracted from there directly using the API.

Copy link
Contributor Author

@eliorerz eliorerz Jan 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

QE in some cases are deploying the nodes and leave all the resources up

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We would like to use it with Cypress flow. In this case, we need the ability to read the yaml from outside test-infra.
/cc @dudyas6

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

About the resource that not cleanup: we store a stack for it and it must run at the reverse order .
In case we abort test-infra . it would stay there.

in othercase i have a bug with cleanup , did we try this flow on OCI ?

self.cloud_provider = terraform_output

def is_active(self, node_name) -> bool:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ class BaseClusterConfig(BaseEntityConfig, ABC):
All arguments must default to None and be type annotated.
"""

cluster_id: str = None
cluster_tags: str = None
olm_operators: List[str] = None
vip_dhcp_allocation: bool = None
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,4 @@ class BaseEntityConfig(BaseConfig, ABC):
is_bonded: bool = None
num_bonded_slaves: int = None
bonding_mode: str = None
cluster_id: str = None
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ class BaseInfraEnvConfig(BaseEntityConfig, ABC):
"""

infra_env_id: str = None
cluster_id: str = None
static_network_config: List[dict] = None
ignition_config_override: str = None
verify_download_iso_ssl: bool = None
Expand Down