Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow for s3 backend path style #53

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Custom
.envrc
.env
.venv
tmp/

.DS_Store
Expand Down
27 changes: 22 additions & 5 deletions bin/tflocal
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ terraform {
bucket = "<bucket>"
key = "<key>"
dynamodb_table = "<dynamodb_table>"
use_path_style = "<use_path_style>"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This must be added dynamically, see my other comment below why.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

To keep transparency and a similar style, I left it here, but changed the default to false.


access_key = "test"
secret_key = "test"
Expand Down Expand Up @@ -220,6 +221,7 @@ def generate_s3_backend_config() -> str:
"key": "terraform.tfstate",
"dynamodb_table": "tf-test-state",
"region": get_region(),
"use_path_style": "false",
"endpoints": {
"s3": get_service_endpoint("s3"),
"iam": get_service_endpoint("iam"),
Expand All @@ -242,11 +244,19 @@ def generate_s3_backend_config() -> str:
backend_config["endpoints"] = {
k: backend_config["endpoints"].get(k) or v
for k, v in configs["endpoints"].items()}
# If the endpoint is configured in the backend we use that endpoint
# otherwise we will use the default created for the provider. Note that the user
# can still override this value with `use_path_style`in backend tf config
if use_s3_path_style(backend_config.get("endpoints", {}).get("s3")):
backend_config["use_path_style"] = "true"
configs.update(backend_config)
if not DRY_RUN:
get_or_create_bucket(configs["bucket"])
get_or_create_ddb_table(configs["dynamodb_table"], region=configs["region"])
result = TF_S3_BACKEND_CONFIG
if is_tf_legacy:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nice backward compatibility proofing. I believe we should introduce the same logic for the override file too if we address this here. Fancy to add it somewhere here?

if use_s3_path_style():

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I looked at that, and if I understand properly, you are suggesting to change the configuration of the aws provider from s3_use_path_style supported in provider >4 to s3_force_path_style for providers version <4.

It can be quite the complex problem to define which version of a provider will be used as all modules are looked at by terraform before deciding on the providers version.

Let me know if I am missing something 🤔

result = result.replace("use_path_style", "force_path_style")
configs["force_path_style"] = configs.pop("use_path_style")
for key, value in configs.items():
if isinstance(value, bool):
value = str(value).lower()
Expand Down Expand Up @@ -289,15 +299,17 @@ def check_override_file(providers_file: str) -> None:
# AWS CLIENT UTILS
# ---

def use_s3_path_style() -> bool:
def use_s3_path_style(endpoint: str = "") -> bool:
"""
Whether to use S3 path addressing (depending on the configured S3 endpoint)
If the endpoint starts with the `s3.` prefix, LocalStack will recognize virtual host addressing. If the endpoint
does not start with it, use path style. This also allows overriding the endpoint to always use path style in case of
inter container communications in Docker.
"""
try:
host = urlparse(get_service_endpoint("s3")).hostname
if endpoint:
endpoint = add_http_protocol(endpoint)
host = urlparse(endpoint or get_service_endpoint("s3")).hostname
except ValueError:
host = ""

Expand Down Expand Up @@ -343,15 +355,20 @@ def deactivate_access_key(access_key: str) -> str:
return "L" + access_key[1:] if access_key[0] == "A" else access_key


def add_http_protocol(url: str) -> str:
"""Add the http:// protocal if not found in the url"""
if "://" not in url:
return f"http://{url}"
return url


def get_service_endpoint(service: str) -> str:
"""Get the service endpoint URL for the given service name"""
# allow configuring a custom endpoint via the environment
env_name = f"{service.replace('-', '_').upper().strip()}_ENDPOINT"
env_endpoint = os.environ.get(env_name, "").strip()
if env_endpoint:
if "://" not in env_endpoint:
env_endpoint = f"http://{env_endpoint}"
return env_endpoint
return add_http_protocol(env_endpoint)

# some services need specific hostnames
hostname = LOCALSTACK_HOSTNAME
Expand Down
92 changes: 88 additions & 4 deletions tests/test_apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,23 +113,39 @@ def test_s3_path_addressing():


def test_use_s3_path_style(monkeypatch):
monkeypatch.setenv("S3_HOSTNAME", "s3.localhost.localstack.cloud")
s3_hostname = "s3.localhost.localstack.cloud"
monkeypatch.setenv("S3_HOSTNAME", s3_hostname)
import_cli_code()
assert not use_s3_path_style() # noqa
assert not use_s3_path_style(s3_hostname) # noqa

monkeypatch.setenv("S3_HOSTNAME", "localhost")
s3_hostname = "localhost"
monkeypatch.setenv("S3_HOSTNAME", s3_hostname)
import_cli_code()
assert use_s3_path_style() # noqa
assert use_s3_path_style(s3_hostname) # noqa

# test the case where the S3_HOSTNAME could be a Docker container name
monkeypatch.setenv("S3_HOSTNAME", "localstack")
s3_hostname = "localstack"
monkeypatch.setenv("S3_HOSTNAME", s3_hostname)
import_cli_code()
assert use_s3_path_style() # noqa
assert use_s3_path_style(s3_hostname) # noqa

# test the case where the S3_HOSTNAME could be an arbitrary host starting with `s3.`
monkeypatch.setenv("S3_HOSTNAME", "s3.internal.host")
s3_hostname = "s3.internal.host"
monkeypatch.setenv("S3_HOSTNAME", s3_hostname)
import_cli_code()
assert not use_s3_path_style() # noqa
assert not use_s3_path_style(s3_hostname) # noqa

# test the case where the S3_HOSTNAME where a provided host name differs from env default.`
s3_hostname = "s3.internal.host"
backend_host_name = "localstack"
monkeypatch.setenv("S3_HOSTNAME", s3_hostname)
import_cli_code()
assert not use_s3_path_style() # noqa
assert use_s3_path_style(backend_host_name) # noqa


def test_provider_aliases():
Expand Down Expand Up @@ -199,6 +215,74 @@ def test_s3_backend():
assert result["ResponseMetadata"]["HTTPStatusCode"] == 200


@pytest.mark.parametrize("s3_hostname", ["", "localstack", "s3.amazonaws.com"])
def test_s3_backend_use_path_style_with_hostname(monkeypatch, s3_hostname):
monkeypatch.setenv("DRY_RUN", "1")
monkeypatch.setenv("S3_HOSTNAME", s3_hostname)

state_bucket = f"tf-state-{short_uid()}"
config = """
terraform {
backend "s3" {
bucket = "%s"
key = "terraform.tfstate"
region = "us-east-2"
skip_credentials_validation = true
}
}
""" % state_bucket
temp_dir = deploy_tf_script(config, cleanup=False, user_input="yes")

# Assert the override file set path style appropriately
override_file = os.path.join(temp_dir, "localstack_providers_override.tf")

assert check_override_file_exists(override_file)
with open(override_file, "r") as fp:
result = hcl2.load(fp)
result = result["terraform"][0]["backend"][0]["s3"]

# if the hostname isn't provided or is a s3.* we shouldn't use path name
use_path_style = s3_hostname != "" and not s3_hostname.startswith("s3.")
path_style_key = "force_path_style" if is_legacy_tf_version(get_version()) else "use_path_style"
assert result[path_style_key] == json.dumps(use_path_style)

rmtree(temp_dir)


@pytest.mark.parametrize("s3_endpoint", ["http://localhost:4566", "https://s3.amazonaws.com"])
def test_s3_backend_use_path_style_with_endpoint(monkeypatch, s3_endpoint):
monkeypatch.setenv("DRY_RUN", "1")

state_bucket = f"tf-state-{short_uid()}"
config = """
terraform {
backend "s3" {
bucket = "%s"
key = "terraform.tfstate"
region = "us-east-2"
skip_credentials_validation = true
endpoint = "%s"
}
}
""" % (state_bucket, s3_endpoint)
temp_dir = deploy_tf_script(config, cleanup=False, user_input="yes")

# Assert the override file set path style appropriately
override_file = os.path.join(temp_dir, "localstack_providers_override.tf")

assert check_override_file_exists(override_file)
with open(override_file, "r") as fp:
result = hcl2.load(fp)
result = result["terraform"][0]["backend"][0]["s3"]

# if endpoint is a s3.* we shouldn't use path name
use_path_style = s3_endpoint.find("s3.") == -1
path_style_key = "force_path_style" if is_legacy_tf_version(get_version()) else "use_path_style"
assert result[path_style_key] == json.dumps(use_path_style)

rmtree(temp_dir)


def test_dry_run(monkeypatch):
monkeypatch.setenv("DRY_RUN", "1")
state_bucket = "tf-state-dry-run"
Expand Down
Loading