From 249d8d23c6f2c47a6b7430ccd85a2d761d0f73aa Mon Sep 17 00:00:00 2001 From: Mathieu Cloutier Date: Tue, 19 Mar 2024 14:04:34 -0600 Subject: [PATCH 1/5] add .venv to .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 0d51c10..e6bad95 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # Custom .envrc .env +.venv tmp/ .DS_Store From f4a57502eed17007c30e802d1d8ecd9c3078786e Mon Sep 17 00:00:00 2001 From: Mathieu Cloutier Date: Tue, 19 Mar 2024 14:06:49 -0600 Subject: [PATCH 2/5] Set default of `use_path_style` and legacy `force_path_style` to `true` --- bin/tflocal | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/bin/tflocal b/bin/tflocal index c8034dd..1eb3695 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -60,6 +60,7 @@ terraform { bucket = "" key = "" dynamodb_table = "" + use_path_style = "" access_key = "test" secret_key = "test" @@ -220,6 +221,7 @@ def generate_s3_backend_config() -> str: "key": "terraform.tfstate", "dynamodb_table": "tf-test-state", "region": get_region(), + "use_path_style": "true", "endpoints": { "s3": get_service_endpoint("s3"), "iam": get_service_endpoint("iam"), @@ -247,6 +249,9 @@ def generate_s3_backend_config() -> str: get_or_create_bucket(configs["bucket"]) get_or_create_ddb_table(configs["dynamodb_table"], region=configs["region"]) result = TF_S3_BACKEND_CONFIG + if is_tf_legacy: + result = result.replace("use_path_style", "force_path_style") + configs["force_path_style"] = configs.pop("use_path_style") for key, value in configs.items(): if isinstance(value, bool): value = str(value).lower() From 149b08dc6b39299a55a647fdcc5079db4096bd09 Mon Sep 17 00:00:00 2001 From: Mathieu Cloutier Date: Fri, 22 Mar 2024 14:03:41 -0600 Subject: [PATCH 3/5] Set use_path_style default to false and reuse use_s3_path_style to determine if s3 path style is required --- bin/tflocal | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/bin/tflocal b/bin/tflocal index ff751c4..0f67238 100755 --- a/bin/tflocal +++ b/bin/tflocal @@ -221,7 +221,7 @@ def generate_s3_backend_config() -> str: "key": "terraform.tfstate", "dynamodb_table": "tf-test-state", "region": get_region(), - "use_path_style": "true", + "use_path_style": "false", "endpoints": { "s3": get_service_endpoint("s3"), "iam": get_service_endpoint("iam"), @@ -244,6 +244,11 @@ def generate_s3_backend_config() -> str: backend_config["endpoints"] = { k: backend_config["endpoints"].get(k) or v for k, v in configs["endpoints"].items()} + # If the endpoint is configured in the backend we use that endpoint + # otherwise we will use the default created for the provider. Note that the user + # can still override this value with `use_path_style`in backend tf config + if use_s3_path_style(backend_config.get("endpoints", {}).get("s3")): + backend_config["use_path_style"] = "true" configs.update(backend_config) if not DRY_RUN: get_or_create_bucket(configs["bucket"]) @@ -294,7 +299,7 @@ def check_override_file(providers_file: str) -> None: # AWS CLIENT UTILS # --- -def use_s3_path_style() -> bool: +def use_s3_path_style(endpoint: str = "") -> bool: """ Whether to use S3 path addressing (depending on the configured S3 endpoint) If the endpoint starts with the `s3.` prefix, LocalStack will recognize virtual host addressing. If the endpoint @@ -302,7 +307,9 @@ def use_s3_path_style() -> bool: inter container communications in Docker. """ try: - host = urlparse(get_service_endpoint("s3")).hostname + if endpoint: + endpoint = add_http_protocol(endpoint) + host = urlparse(endpoint or get_service_endpoint("s3")).hostname except ValueError: host = "" @@ -348,15 +355,20 @@ def deactivate_access_key(access_key: str) -> str: return "L" + access_key[1:] if access_key[0] == "A" else access_key +def add_http_protocol(url: str) -> str: + """Add the http:// protocal if not found in the url""" + if "://" not in url: + return f"http://{url}" + return url + + def get_service_endpoint(service: str) -> str: """Get the service endpoint URL for the given service name""" # allow configuring a custom endpoint via the environment env_name = f"{service.replace('-', '_').upper().strip()}_ENDPOINT" env_endpoint = os.environ.get(env_name, "").strip() if env_endpoint: - if "://" not in env_endpoint: - env_endpoint = f"http://{env_endpoint}" - return env_endpoint + return add_http_protocol(env_endpoint) # some services need specific hostnames hostname = LOCALSTACK_HOSTNAME From 95eab831641264cc707204e258090eb41308a011 Mon Sep 17 00:00:00 2001 From: Mathieu Cloutier Date: Fri, 22 Mar 2024 14:08:40 -0600 Subject: [PATCH 4/5] explanded test of use_s3_path_style to ensure behavior when endpoint is provided --- tests/test_apply.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/tests/test_apply.py b/tests/test_apply.py index 69a471a..9d94800 100644 --- a/tests/test_apply.py +++ b/tests/test_apply.py @@ -113,23 +113,39 @@ def test_s3_path_addressing(): def test_use_s3_path_style(monkeypatch): - monkeypatch.setenv("S3_HOSTNAME", "s3.localhost.localstack.cloud") + s3_hostname = "s3.localhost.localstack.cloud" + monkeypatch.setenv("S3_HOSTNAME", s3_hostname) import_cli_code() assert not use_s3_path_style() # noqa + assert not use_s3_path_style(s3_hostname) # noqa - monkeypatch.setenv("S3_HOSTNAME", "localhost") + s3_hostname = "localhost" + monkeypatch.setenv("S3_HOSTNAME", s3_hostname) import_cli_code() assert use_s3_path_style() # noqa + assert use_s3_path_style(s3_hostname) # noqa # test the case where the S3_HOSTNAME could be a Docker container name - monkeypatch.setenv("S3_HOSTNAME", "localstack") + s3_hostname = "localstack" + monkeypatch.setenv("S3_HOSTNAME", s3_hostname) import_cli_code() assert use_s3_path_style() # noqa + assert use_s3_path_style(s3_hostname) # noqa # test the case where the S3_HOSTNAME could be an arbitrary host starting with `s3.` - monkeypatch.setenv("S3_HOSTNAME", "s3.internal.host") + s3_hostname = "s3.internal.host" + monkeypatch.setenv("S3_HOSTNAME", s3_hostname) import_cli_code() assert not use_s3_path_style() # noqa + assert not use_s3_path_style(s3_hostname) # noqa + + # test the case where the S3_HOSTNAME where a provided host name differs from env default.` + s3_hostname = "s3.internal.host" + backend_host_name = "localstack" + monkeypatch.setenv("S3_HOSTNAME", s3_hostname) + import_cli_code() + assert not use_s3_path_style() # noqa + assert use_s3_path_style(backend_host_name) # noqa def test_provider_aliases(): From 954726c02169a998861359b4fb41184f472742b0 Mon Sep 17 00:00:00 2001 From: Mathieu Cloutier Date: Fri, 22 Mar 2024 14:10:04 -0600 Subject: [PATCH 5/5] added tests for path style in backend --- tests/test_apply.py | 68 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/tests/test_apply.py b/tests/test_apply.py index 9d94800..f0c7ef2 100644 --- a/tests/test_apply.py +++ b/tests/test_apply.py @@ -215,6 +215,74 @@ def test_s3_backend(): assert result["ResponseMetadata"]["HTTPStatusCode"] == 200 +@pytest.mark.parametrize("s3_hostname", ["", "localstack", "s3.amazonaws.com"]) +def test_s3_backend_use_path_style_with_hostname(monkeypatch, s3_hostname): + monkeypatch.setenv("DRY_RUN", "1") + monkeypatch.setenv("S3_HOSTNAME", s3_hostname) + + state_bucket = f"tf-state-{short_uid()}" + config = """ + terraform { + backend "s3" { + bucket = "%s" + key = "terraform.tfstate" + region = "us-east-2" + skip_credentials_validation = true + } + } + """ % state_bucket + temp_dir = deploy_tf_script(config, cleanup=False, user_input="yes") + + # Assert the override file set path style appropriately + override_file = os.path.join(temp_dir, "localstack_providers_override.tf") + + assert check_override_file_exists(override_file) + with open(override_file, "r") as fp: + result = hcl2.load(fp) + result = result["terraform"][0]["backend"][0]["s3"] + + # if the hostname isn't provided or is a s3.* we shouldn't use path name + use_path_style = s3_hostname != "" and not s3_hostname.startswith("s3.") + path_style_key = "force_path_style" if is_legacy_tf_version(get_version()) else "use_path_style" + assert result[path_style_key] == json.dumps(use_path_style) + + rmtree(temp_dir) + + +@pytest.mark.parametrize("s3_endpoint", ["http://localhost:4566", "https://s3.amazonaws.com"]) +def test_s3_backend_use_path_style_with_endpoint(monkeypatch, s3_endpoint): + monkeypatch.setenv("DRY_RUN", "1") + + state_bucket = f"tf-state-{short_uid()}" + config = """ + terraform { + backend "s3" { + bucket = "%s" + key = "terraform.tfstate" + region = "us-east-2" + skip_credentials_validation = true + endpoint = "%s" + } + } + """ % (state_bucket, s3_endpoint) + temp_dir = deploy_tf_script(config, cleanup=False, user_input="yes") + + # Assert the override file set path style appropriately + override_file = os.path.join(temp_dir, "localstack_providers_override.tf") + + assert check_override_file_exists(override_file) + with open(override_file, "r") as fp: + result = hcl2.load(fp) + result = result["terraform"][0]["backend"][0]["s3"] + + # if endpoint is a s3.* we shouldn't use path name + use_path_style = s3_endpoint.find("s3.") == -1 + path_style_key = "force_path_style" if is_legacy_tf_version(get_version()) else "use_path_style" + assert result[path_style_key] == json.dumps(use_path_style) + + rmtree(temp_dir) + + def test_dry_run(monkeypatch): monkeypatch.setenv("DRY_RUN", "1") state_bucket = "tf-state-dry-run"