From 0c229f70ef24ac9a77ac0210e1f0a927cdb09622 Mon Sep 17 00:00:00 2001 From: Justin McCormick Date: Sun, 11 Nov 2018 21:18:16 -0500 Subject: [PATCH] Switching to dep and locking terraform to v0.11 --- Gopkg.lock | 396 + Gopkg.toml | 34 + README.md | 4 + .../github.com/agext/levenshtein/.gitignore | 2 + .../github.com/agext/levenshtein/.travis.yml | 70 + vendor/github.com/agext/levenshtein/DCO | 36 + vendor/github.com/agext/levenshtein/LICENSE | 201 + .../github.com/agext/levenshtein/MAINTAINERS | 1 + vendor/github.com/agext/levenshtein/NOTICE | 5 + vendor/github.com/agext/levenshtein/README.md | 38 + .../agext/levenshtein/levenshtein.go | 290 + vendor/github.com/agext/levenshtein/params.go | 152 + .../apparentlymart/go-cidr/cidr/cidr.go | 111 +- .../apparentlymart/go-textseg/LICENSE | 95 + .../go-textseg/textseg/all_tokens.go | 30 + .../go-textseg/textseg/generate.go | 7 + .../go-textseg/textseg/grapheme_clusters.go | 5276 ++++ .../go-textseg/textseg/grapheme_clusters.rl | 132 + .../textseg/grapheme_clusters_table.rl | 1583 ++ .../go-textseg/textseg/make_tables.go | 307 + .../go-textseg/textseg/make_test_tables.go | 212 + .../go-textseg/textseg/tables.go | 5700 ++++ .../go-textseg/textseg/unicode2ragel.rb | 335 + .../go-textseg/textseg/utf8_seqs.go | 19 + vendor/github.com/armon/go-radix/.gitignore | 22 + vendor/github.com/armon/go-radix/.travis.yml | 3 + vendor/github.com/armon/go-radix/LICENSE | 20 + vendor/github.com/armon/go-radix/README.md | 38 + vendor/github.com/armon/go-radix/go.mod | 1 + vendor/github.com/armon/go-radix/radix.go | 540 + .../aws/aws-sdk-go/aws/awsutil/prettify.go | 6 + .../aws/aws-sdk-go/aws/client/client.go | 85 +- .../aws-sdk-go/aws/client/default_retryer.go | 86 +- .../aws/aws-sdk-go/aws/client/logger.go | 184 + .../aws/client/metadata/client_info.go | 1 + .../github.com/aws/aws-sdk-go/aws/config.go | 134 +- .../github.com/aws/aws-sdk-go/aws/context.go | 71 + .../aws/aws-sdk-go/aws/context_1_6.go | 41 + .../aws/aws-sdk-go/aws/context_1_7.go | 9 + .../aws/aws-sdk-go/aws/convert_types.go | 18 + .../aws-sdk-go/aws/corehandlers/handlers.go | 186 +- .../aws-sdk-go/aws/corehandlers/user_agent.go | 37 + .../aws/credentials/chain_provider.go | 20 +- .../aws-sdk-go/aws/credentials/credentials.go | 62 +- .../ec2rolecreds/ec2_role_provider.go | 8 +- .../aws/credentials/endpointcreds/provider.go | 7 + .../aws/credentials/env_provider.go | 5 +- .../shared_credentials_provider.go | 63 +- .../aws/credentials/static_provider.go | 2 - .../stscreds/assume_role_provider.go | 159 +- .../github.com/aws/aws-sdk-go/aws/csm/doc.go | 46 + .../aws/aws-sdk-go/aws/csm/enable.go | 67 + .../aws/aws-sdk-go/aws/csm/metric.go | 53 + .../aws/aws-sdk-go/aws/csm/metric_chan.go | 54 + .../aws/aws-sdk-go/aws/csm/reporter.go | 242 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 114 +- .../aws-sdk-go/aws/defaults/shared_config.go | 27 + vendor/github.com/aws/aws-sdk-go/aws/doc.go | 56 + .../aws/aws-sdk-go/aws/ec2metadata/api.go | 32 +- .../aws/aws-sdk-go/aws/ec2metadata/service.go | 25 + .../aws/aws-sdk-go/aws/endpoints/decode.go | 155 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 3487 +++ .../aws/aws-sdk-go/aws/endpoints/doc.go | 66 + .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 449 + .../aws/aws-sdk-go/aws/endpoints/v3model.go | 307 + .../aws/endpoints/v3model_codegen.go | 337 + .../github.com/aws/aws-sdk-go/aws/errors.go | 4 - .../aws/aws-sdk-go/aws/jsonvalue.go | 12 + .../github.com/aws/aws-sdk-go/aws/logger.go | 10 +- .../aws/request/connection_reset_error.go | 19 + .../request/connection_reset_error_other.go | 11 + .../aws/aws-sdk-go/aws/request/handlers.go | 115 +- .../aws-sdk-go/aws/request/http_request.go | 21 +- .../aws/request/http_request_1_4.go | 31 - .../aws-sdk-go/aws/request/offset_reader.go | 23 +- .../aws/aws-sdk-go/aws/request/request.go | 465 +- .../aws/aws-sdk-go/aws/request/request_1_7.go | 39 + .../aws/aws-sdk-go/aws/request/request_1_8.go | 33 + .../aws-sdk-go/aws/request/request_context.go | 14 + .../aws/request/request_context_1_6.go | 14 + .../aws/request/request_pagination.go | 192 +- .../aws/aws-sdk-go/aws/request/retryer.go | 100 +- .../aws/request/timeout_read_closer.go | 94 + .../aws/aws-sdk-go/aws/request/validation.go | 29 +- .../aws/aws-sdk-go/aws/request/waiter.go | 295 + .../aws/aws-sdk-go/aws/session/doc.go | 102 +- .../aws/aws-sdk-go/aws/session/env_config.go | 106 +- .../aws/aws-sdk-go/aws/session/session.go | 443 +- .../aws-sdk-go/aws/session/shared_config.go | 101 +- .../aws/aws-sdk-go/aws/signer/v4/options.go | 7 + .../aws/aws-sdk-go/aws/signer/v4/uri_path.go | 24 + .../aws/aws-sdk-go/aws/signer/v4/v4.go | 313 +- vendor/github.com/aws/aws-sdk-go/aws/types.go | 99 +- vendor/github.com/aws/aws-sdk-go/aws/url.go | 12 + .../github.com/aws/aws-sdk-go/aws/url_1_7.go | 29 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/internal/ini/ast.go | 120 + .../aws-sdk-go/internal/ini/comma_token.go | 11 + .../aws-sdk-go/internal/ini/comment_token.go | 35 + .../aws/aws-sdk-go/internal/ini/doc.go | 29 + .../aws-sdk-go/internal/ini/empty_token.go | 4 + .../aws/aws-sdk-go/internal/ini/expression.go | 24 + .../aws/aws-sdk-go/internal/ini/fuzz.go | 17 + .../aws/aws-sdk-go/internal/ini/ini.go | 51 + .../aws/aws-sdk-go/internal/ini/ini_lexer.go | 165 + .../aws/aws-sdk-go/internal/ini/ini_parser.go | 337 + .../aws-sdk-go/internal/ini/literal_tokens.go | 324 + .../aws-sdk-go/internal/ini/newline_token.go | 30 + .../aws-sdk-go/internal/ini/number_helper.go | 152 + .../aws/aws-sdk-go/internal/ini/op_tokens.go | 39 + .../aws-sdk-go/internal/ini/parse_error.go | 43 + .../aws-sdk-go/internal/ini/parse_stack.go | 60 + .../aws/aws-sdk-go/internal/ini/sep_tokens.go | 41 + .../aws/aws-sdk-go/internal/ini/skipper.go | 45 + .../aws/aws-sdk-go/internal/ini/statement.go | 35 + .../aws/aws-sdk-go/internal/ini/value_util.go | 284 + .../aws/aws-sdk-go/internal/ini/visitor.go | 166 + .../aws/aws-sdk-go/internal/ini/walker.go | 25 + .../aws/aws-sdk-go/internal/ini/ws_token.go | 24 + .../aws/aws-sdk-go/internal/s3err/error.go | 57 + .../aws/aws-sdk-go/internal/sdkio/io_go1.6.go | 10 + .../aws/aws-sdk-go/internal/sdkio/io_go1.7.go | 12 + .../internal/sdkrand/locked_source.go | 29 + .../aws/aws-sdk-go/internal/sdkuri/path.go | 23 + .../internal/shareddefaults/ecs_container.go | 12 + .../internal/shareddefaults/shared_config.go | 40 + .../aws-sdk-go/private/endpoints/endpoints.go | 70 - .../private/endpoints/endpoints.json | 78 - .../private/endpoints/endpoints_map.go | 91 - .../private/protocol/eventstream/debug.go | 144 + .../private/protocol/eventstream/decode.go | 199 + .../private/protocol/eventstream/encode.go | 114 + .../private/protocol/eventstream/error.go | 23 + .../eventstream/eventstreamapi/api.go | 196 + .../eventstream/eventstreamapi/error.go | 24 + .../private/protocol/eventstream/header.go | 166 + .../protocol/eventstream/header_value.go | 501 + .../private/protocol/eventstream/message.go | 103 + .../aws/aws-sdk-go/private/protocol/host.go | 21 + .../aws-sdk-go/private/protocol/jsonvalue.go | 76 + .../aws-sdk-go/private/protocol/payload.go | 81 + .../private/protocol/query/build.go | 4 +- .../protocol/query/queryutil/queryutil.go | 20 +- .../private/protocol/query/unmarshal.go | 8 +- .../private/protocol/query/unmarshal_error.go | 14 +- .../aws-sdk-go/private/protocol/rest/build.go | 125 +- .../private/protocol/rest/unmarshal.go | 41 +- .../private/protocol/restxml/restxml.go | 16 +- .../aws-sdk-go/private/protocol/timestamp.go | 72 + .../private/protocol/xml/xmlutil/build.go | 33 +- .../private/protocol/xml/xmlutil/unmarshal.go | 34 +- .../protocol/xml/xmlutil/xml_to_struct.go | 51 +- .../aws/aws-sdk-go/private/waiter/waiter.go | 134 - .../aws/aws-sdk-go/service/s3/api.go | 22492 ++++++++++++---- .../aws/aws-sdk-go/service/s3/body_hash.go | 249 + .../aws-sdk-go/service/s3/bucket_location.go | 65 +- .../aws/aws-sdk-go/service/s3/content_md5.go | 36 - .../aws-sdk-go/service/s3/customizations.go | 27 + .../aws/aws-sdk-go/service/s3/doc.go | 26 + .../aws/aws-sdk-go/service/s3/doc_custom.go | 109 + .../aws/aws-sdk-go/service/s3/errors.go | 48 + .../service/s3/host_style_bucket.go | 74 +- .../aws/aws-sdk-go/service/s3/service.go | 33 +- .../aws/aws-sdk-go/service/s3/sse.go | 18 +- .../aws-sdk-go/service/s3/statusok_error.go | 12 +- .../aws-sdk-go/service/s3/unmarshal_error.go | 14 +- .../aws/aws-sdk-go/service/s3/waiters.go | 223 +- .../aws/aws-sdk-go/service/sts/api.go | 1311 +- .../aws/aws-sdk-go/service/sts/doc.go | 72 + .../aws/aws-sdk-go/service/sts/errors.go | 73 + .../aws/aws-sdk-go/service/sts/service.go | 69 +- .../github.com/bgentry/speakeasy/.gitignore | 2 + vendor/github.com/bgentry/speakeasy/LICENSE | 24 + .../bgentry/speakeasy/LICENSE_WINDOWS | 201 + vendor/github.com/bgentry/speakeasy/Readme.md | 30 + .../github.com/bgentry/speakeasy/speakeasy.go | 49 + .../bgentry/speakeasy/speakeasy_unix.go | 93 + .../bgentry/speakeasy/speakeasy_windows.go | 41 + vendor/github.com/blang/semver/.travis.yml | 21 + vendor/github.com/blang/semver/LICENSE | 22 + vendor/github.com/blang/semver/README.md | 194 + vendor/github.com/blang/semver/json.go | 23 + vendor/github.com/blang/semver/package.json | 17 + vendor/github.com/blang/semver/range.go | 416 + vendor/github.com/blang/semver/semver.go | 418 + vendor/github.com/blang/semver/sort.go | 28 + vendor/github.com/blang/semver/sql.go | 30 + vendor/github.com/go-ini/ini/LICENSE | 191 - vendor/github.com/go-ini/ini/README.md | 590 - vendor/github.com/go-ini/ini/README_ZH.md | 577 - vendor/github.com/go-ini/ini/ini.go | 1027 - vendor/github.com/go-ini/ini/parser.go | 312 - vendor/github.com/go-ini/ini/struct.go | 351 - .../github.com/hashicorp/go-cleanhttp/LICENSE | 363 + .../hashicorp/go-cleanhttp/README.md | 30 + .../hashicorp/go-cleanhttp/cleanhttp.go | 57 + .../github.com/hashicorp/go-cleanhttp/doc.go | 20 + .../hashicorp/go-getter/.travis.yml | 23 + .../github.com/hashicorp/go-getter/README.md | 64 +- .../hashicorp/go-getter/appveyor.yml | 2 +- .../github.com/hashicorp/go-getter/client.go | 46 +- .../hashicorp/go-getter/decompress.go | 29 + .../hashicorp/go-getter/decompress_gzip.go | 2 +- .../hashicorp/go-getter/decompress_tar.go | 138 + .../hashicorp/go-getter/decompress_tbz2.go | 64 +- .../hashicorp/go-getter/decompress_testing.go | 36 +- .../hashicorp/go-getter/decompress_tgz.go | 62 +- .../hashicorp/go-getter/decompress_txz.go | 39 + .../hashicorp/go-getter/decompress_xz.go | 49 + .../hashicorp/go-getter/decompress_zip.go | 5 + .../github.com/hashicorp/go-getter/detect.go | 6 + .../hashicorp/go-getter/detect_file.go | 2 +- vendor/github.com/hashicorp/go-getter/get.go | 13 +- .../hashicorp/go-getter/get_file.go | 24 + .../hashicorp/go-getter/get_file_unix.go | 2 +- .../github.com/hashicorp/go-getter/get_git.go | 147 +- .../github.com/hashicorp/go-getter/get_hg.go | 14 +- .../hashicorp/go-getter/get_http.go | 56 +- .../hashicorp/go-getter/get_mock.go | 7 + .../github.com/hashicorp/go-getter/get_s3.go | 112 +- .../github.com/hashicorp/go-getter/source.go | 26 + .../detect-file-symlink-pwd/syml/pwd | 1 + .../github.com/hashicorp/go-safetemp/LICENSE | 362 + .../hashicorp/go-safetemp/README.md | 10 + .../github.com/hashicorp/go-safetemp/go.mod | 1 + .../hashicorp/go-safetemp/safetemp.go | 40 + .../hashicorp/go-version/.travis.yml | 12 + .../github.com/hashicorp/go-version/README.md | 2 +- .../hashicorp/go-version/constraint.go | 24 +- .../hashicorp/go-version/version.go | 121 +- vendor/github.com/hashicorp/hcl/.gitignore | 9 + vendor/github.com/hashicorp/hcl/.travis.yml | 12 + vendor/github.com/hashicorp/hcl/appveyor.yml | 2 +- vendor/github.com/hashicorp/hcl/decoder.go | 14 +- .../github.com/hashicorp/hcl/hcl/ast/ast.go | 3 +- .../hashicorp/hcl/hcl/parser/parser.go | 57 +- .../hashicorp/hcl/hcl/scanner/scanner.go | 8 +- .../hashicorp/hcl/hcl/strconv/quote.go | 19 +- .../hashicorp/hcl/json/parser/flatten.go | 6 + .../hashicorp/hcl/json/parser/parser.go | 12 +- vendor/github.com/hashicorp/hcl2/LICENSE | 353 + .../github.com/hashicorp/hcl2/gohcl/decode.go | 304 + vendor/github.com/hashicorp/hcl2/gohcl/doc.go | 49 + .../github.com/hashicorp/hcl2/gohcl/schema.go | 174 + .../github.com/hashicorp/hcl2/gohcl/types.go | 16 + .../hashicorp/hcl2/hcl/diagnostic.go | 103 + .../hashicorp/hcl2/hcl/diagnostic_text.go | 168 + .../hashicorp/hcl2/hcl/didyoumean.go | 24 + vendor/github.com/hashicorp/hcl2/hcl/doc.go | 1 + .../hashicorp/hcl2/hcl/eval_context.go | 25 + .../hashicorp/hcl2/hcl/expr_call.go | 46 + .../hashicorp/hcl2/hcl/expr_list.go | 37 + .../github.com/hashicorp/hcl2/hcl/expr_map.go | 44 + .../hashicorp/hcl2/hcl/expr_unwrap.go | 68 + .../hcl2/hcl/hclsyntax/didyoumean.go | 24 + .../hashicorp/hcl2/hcl/hclsyntax/doc.go | 7 + .../hcl2/hcl/hclsyntax/expression.go | 1275 + .../hcl2/hcl/hclsyntax/expression_ops.go | 258 + .../hcl2/hcl/hclsyntax/expression_template.go | 192 + .../hcl2/hcl/hclsyntax/expression_vars.go | 76 + .../hcl2/hcl/hclsyntax/expression_vars_gen.go | 99 + .../hashicorp/hcl2/hcl/hclsyntax/file.go | 20 + .../hashicorp/hcl2/hcl/hclsyntax/generate.go | 9 + .../hashicorp/hcl2/hcl/hclsyntax/keywords.go | 21 + .../hcl2/hcl/hclsyntax/navigation.go | 41 + .../hashicorp/hcl2/hcl/hclsyntax/node.go | 22 + .../hashicorp/hcl2/hcl/hclsyntax/parser.go | 1836 ++ .../hcl2/hcl/hclsyntax/parser_template.go | 728 + .../hcl2/hcl/hclsyntax/parser_traversal.go | 159 + .../hashicorp/hcl2/hcl/hclsyntax/peeker.go | 212 + .../hashicorp/hcl2/hcl/hclsyntax/public.go | 171 + .../hcl2/hcl/hclsyntax/scan_string_lit.go | 301 + .../hcl2/hcl/hclsyntax/scan_string_lit.rl | 105 + .../hcl2/hcl/hclsyntax/scan_tokens.go | 5443 ++++ .../hcl2/hcl/hclsyntax/scan_tokens.rl | 376 + .../hashicorp/hcl2/hcl/hclsyntax/spec.md | 923 + .../hashicorp/hcl2/hcl/hclsyntax/structure.go | 379 + .../hashicorp/hcl2/hcl/hclsyntax/token.go | 272 + .../hcl2/hcl/hclsyntax/token_type_string.go | 69 + .../hcl2/hcl/hclsyntax/unicode2ragel.rb | 335 + .../hcl2/hcl/hclsyntax/unicode_derived.rl | 2135 ++ .../hashicorp/hcl2/hcl/hclsyntax/variables.go | 86 + .../hashicorp/hcl2/hcl/hclsyntax/walk.go | 77 + .../github.com/hashicorp/hcl2/hcl/json/ast.go | 121 + .../hashicorp/hcl2/hcl/json/didyoumean.go | 33 + .../github.com/hashicorp/hcl2/hcl/json/doc.go | 8 + .../hashicorp/hcl2/hcl/json/navigation.go | 70 + .../hashicorp/hcl2/hcl/json/parser.go | 491 + .../hashicorp/hcl2/hcl/json/peeker.go | 25 + .../hashicorp/hcl2/hcl/json/public.go | 94 + .../hashicorp/hcl2/hcl/json/scanner.go | 293 + .../hashicorp/hcl2/hcl/json/spec.md | 405 + .../hashicorp/hcl2/hcl/json/structure.go | 616 + .../hcl2/hcl/json/tokentype_string.go | 29 + .../github.com/hashicorp/hcl2/hcl/merged.go | 226 + vendor/github.com/hashicorp/hcl2/hcl/ops.go | 147 + vendor/github.com/hashicorp/hcl2/hcl/pos.go | 262 + .../hashicorp/hcl2/hcl/pos_scanner.go | 148 + .../github.com/hashicorp/hcl2/hcl/schema.go | 21 + vendor/github.com/hashicorp/hcl2/hcl/spec.md | 691 + .../hashicorp/hcl2/hcl/static_expr.go | 40 + .../hashicorp/hcl2/hcl/structure.go | 151 + .../hashicorp/hcl2/hcl/traversal.go | 352 + .../hashicorp/hcl2/hcl/traversal_for_expr.go | 121 + .../hashicorp/hcl2/hcldec/block_labels.go | 21 + .../hashicorp/hcl2/hcldec/decode.go | 36 + .../github.com/hashicorp/hcl2/hcldec/doc.go | 12 + .../github.com/hashicorp/hcl2/hcldec/gob.go | 23 + .../hashicorp/hcl2/hcldec/public.go | 78 + .../hashicorp/hcl2/hcldec/schema.go | 36 + .../github.com/hashicorp/hcl2/hcldec/spec.go | 998 + .../hashicorp/hcl2/hcldec/variables.go | 34 + .../hashicorp/hcl2/hclparse/parser.go | 123 + vendor/github.com/hashicorp/hil/.gitignore | 3 + vendor/github.com/hashicorp/hil/.travis.yml | 3 + .../hashicorp/hil/ast/arithmetic_op.go | 13 +- vendor/github.com/hashicorp/hil/ast/ast.go | 25 +- .../hashicorp/hil/ast/conditional.go | 36 + vendor/github.com/hashicorp/hil/ast/index.go | 2 + .../github.com/hashicorp/hil/ast/literal.go | 55 + .../hashicorp/hil/ast/type_string.go | 28 +- .../github.com/hashicorp/hil/ast/unknown.go | 30 + .../hashicorp/hil/ast/variables_helper.go | 62 +- vendor/github.com/hashicorp/hil/builtins.go | 162 + .../github.com/hashicorp/hil/check_types.go | 345 +- vendor/github.com/hashicorp/hil/convert.go | 11 + vendor/github.com/hashicorp/hil/eval.go | 169 +- vendor/github.com/hashicorp/hil/eval_type.go | 2 + .../hashicorp/hil/evaltype_string.go | 14 +- vendor/github.com/hashicorp/hil/lang.y | 200 - vendor/github.com/hashicorp/hil/lex.go | 407 - vendor/github.com/hashicorp/hil/parse.go | 51 +- .../hashicorp/hil/parser/binary_op.go | 45 + .../github.com/hashicorp/hil/parser/error.go | 38 + .../github.com/hashicorp/hil/parser/fuzz.go | 28 + .../github.com/hashicorp/hil/parser/parser.go | 522 + .../hashicorp/hil/scanner/peeker.go | 55 + .../hashicorp/hil/scanner/scanner.go | 556 + .../github.com/hashicorp/hil/scanner/token.go | 105 + .../hashicorp/hil/scanner/tokentype_string.go | 51 + vendor/github.com/hashicorp/hil/y.go | 666 - vendor/github.com/hashicorp/hil/y.output | 328 - .../hashicorp/terraform/config/append.go | 16 + .../hashicorp/terraform/config/config.go | 640 +- .../terraform/config/config_string.go | 78 +- .../terraform/config/config_terraform.go | 117 + .../config/configschema/decoder_spec.go | 97 + .../terraform/config/configschema/doc.go | 14 + .../config/configschema/implied_type.go | 21 + .../config/configschema/internal_validate.go | 92 + .../config/configschema/nestingmode_string.go | 16 + .../terraform/config/configschema/schema.go | 107 + .../terraform/config/hcl2_shim_util.go | 134 + .../config/hcl2shim/single_attr_body.go | 85 + .../terraform/config/hcl2shim/values.go | 246 + .../hashicorp/terraform/config/import_tree.go | 54 +- .../hashicorp/terraform/config/interpolate.go | 84 +- .../terraform/config/interpolate_funcs.go | 757 +- .../terraform/config/interpolate_walk.go | 45 +- .../hashicorp/terraform/config/loader.go | 27 +- .../hashicorp/terraform/config/loader_hcl.go | 590 +- .../hashicorp/terraform/config/loader_hcl2.go | 473 + .../hashicorp/terraform/config/merge.go | 21 + .../hashicorp/terraform/config/module/get.go | 20 +- .../terraform/config/module/inode.go | 2 +- .../terraform/config/module/module.go | 6 +- .../terraform/config/module/storage.go | 346 + .../terraform/config/module/testing.go | 36 + .../hashicorp/terraform/config/module/tree.go | 406 +- .../config/module/validate_provider_alias.go | 118 + .../terraform/config/module/versions.go | 95 + .../hashicorp/terraform/config/providers.go | 103 + .../terraform/config/provisioner_enums.go | 40 + .../hashicorp/terraform/config/raw_config.go | 188 +- .../terraform/config/resource_mode_string.go | 6 +- .../hashicorp/terraform/config/testing.go | 17 + .../github.com/hashicorp/terraform/dag/dag.go | 125 +- .../github.com/hashicorp/terraform/dag/dot.go | 282 + .../hashicorp/terraform/dag/graph.go | 135 +- .../hashicorp/terraform/dag/marshal.go | 474 + .../github.com/hashicorp/terraform/dag/set.go | 36 + .../hashicorp/terraform/dag/walk.go | 445 + .../hashicorp/terraform/dot/graph.go | 224 - .../hashicorp/terraform/dot/graph_writer.go | 47 - .../hashicorp/terraform/flatmap/expand.go | 92 +- .../hashicorp/terraform/httpclient/client.go | 18 + .../terraform/httpclient/useragent.go | 40 + .../terraform/moduledeps/dependencies.go | 43 + .../hashicorp/terraform/moduledeps/doc.go | 7 + .../hashicorp/terraform/moduledeps/module.go | 204 + .../terraform/moduledeps/provider.go | 30 + .../terraform/plugin/discovery/error.go | 30 + .../terraform/plugin/discovery/find.go | 191 + .../terraform/plugin/discovery/get.go | 548 + .../terraform/plugin/discovery/get_cache.go | 48 + .../terraform/plugin/discovery/meta.go | 41 + .../terraform/plugin/discovery/meta_set.go | 195 + .../plugin/discovery/requirements.go | 105 + .../terraform/plugin/discovery/signature.go | 53 + .../terraform/plugin/discovery/version.go | 72 + .../terraform/plugin/discovery/version_set.go | 84 + .../hashicorp/terraform/registry/client.go | 227 + .../hashicorp/terraform/registry/errors.go | 23 + .../registry/regsrc/friendly_host.go | 140 + .../terraform/registry/regsrc/module.go | 205 + .../terraform/registry/regsrc/regsrc.go | 8 + .../terraform/registry/response/module.go | 93 + .../registry/response/module_list.go | 7 + .../registry/response/module_provider.go | 14 + .../registry/response/module_versions.go | 32 + .../terraform/registry/response/pagination.go | 65 + .../terraform/registry/response/redirect.go | 6 + .../hashicorp/terraform/svchost/auth/cache.go | 45 + .../terraform/svchost/auth/credentials.go | 63 + .../terraform/svchost/auth/from_map.go | 18 + .../terraform/svchost/auth/helper_program.go | 80 + .../terraform/svchost/auth/static.go | 28 + .../svchost/auth/token_credentials.go | 25 + .../terraform/svchost/disco/disco.go | 241 + .../hashicorp/terraform/svchost/disco/host.go | 51 + .../hashicorp/terraform/svchost/label_iter.go | 69 + .../hashicorp/terraform/svchost/svchost.go | 207 + .../hashicorp/terraform/terraform/context.go | 610 +- .../terraform/terraform/context_components.go | 65 + .../terraform/terraform/context_graph_type.go | 32 + .../terraform/terraform/context_import.go | 19 +- .../hashicorp/terraform/terraform/debug.go | 523 + .../hashicorp/terraform/terraform/diff.go | 279 +- .../terraform/terraform/edge_destroy.go | 17 + .../hashicorp/terraform/terraform/eval.go | 4 +- .../terraform/terraform/eval_apply.go | 151 +- .../terraform/eval_check_prevent_destroy.go | 12 +- .../terraform/terraform/eval_context.go | 17 +- .../terraform/eval_context_builtin.go | 144 +- .../terraform/terraform/eval_context_mock.go | 46 +- .../terraform/eval_count_boundary.go | 78 + .../terraform/eval_count_computed.go | 25 + .../terraform/terraform/eval_diff.go | 189 +- .../terraform/terraform/eval_interpolate.go | 40 +- .../terraform/terraform/eval_local.go | 86 + .../terraform/terraform/eval_output.go | 38 +- .../terraform/terraform/eval_provider.go | 54 +- .../terraform/terraform/eval_read_data.go | 17 +- .../terraform/terraform/eval_sequence.go | 4 + .../terraform/terraform/eval_state.go | 42 +- .../terraform/terraform/eval_validate.go | 85 +- .../terraform/eval_validate_selfref.go | 74 + .../terraform/terraform/eval_variable.go | 49 +- .../terraform/terraform/evaltree_provider.go | 41 +- .../hashicorp/terraform/terraform/features.go | 7 + .../hashicorp/terraform/terraform/graph.go | 217 +- .../terraform/terraform/graph_builder.go | 191 +- .../terraform/graph_builder_apply.go | 158 + .../terraform/graph_builder_destroy_plan.go | 67 + .../terraform/graph_builder_import.go | 24 +- .../terraform/graph_builder_input.go | 27 + .../terraform/terraform/graph_builder_plan.go | 181 + .../terraform/graph_builder_refresh.go | 169 + .../terraform/graph_builder_validate.go | 36 + .../terraform/terraform/graph_config_node.go | 41 - .../terraform/graph_config_node_module.go | 213 - .../terraform/graph_config_node_output.go | 106 - .../terraform/graph_config_node_provider.go | 131 - .../terraform/graph_config_node_resource.go | 531 - .../terraform/graph_config_node_type.go | 16 - .../terraform/graph_config_node_variable.go | 274 - .../terraform/terraform/graph_dot.go | 182 +- .../terraform/terraform/graph_walk.go | 32 +- .../terraform/terraform/graph_walk_context.go | 14 +- .../terraform/graphnodeconfigtype_string.go | 16 - .../terraform/terraform/graphtype_string.go | 16 + .../hashicorp/terraform/terraform/hook.go | 4 +- .../terraform/terraform/hook_mock.go | 10 +- .../terraform/terraform/hook_stop.go | 2 +- .../terraform/instancetype_string.go | 6 +- .../terraform/terraform/interpolate.go | 373 +- .../terraform/module_dependencies.go | 155 + .../terraform/node_count_boundary.go | 14 + .../terraform/terraform/node_data_destroy.go | 22 + .../terraform/terraform/node_data_refresh.go | 221 + .../terraform/terraform/node_local.go | 66 + .../terraform/node_module_removed.go | 77 + .../terraform/node_module_variable.go | 138 + .../terraform/terraform/node_output.go | 153 + .../terraform/terraform/node_output_orphan.go | 40 + .../terraform/terraform/node_provider.go | 11 + .../terraform/node_provider_abstract.go | 95 + .../terraform/node_provider_disabled.go | 34 + .../terraform/terraform/node_provisioner.go | 44 + .../terraform/node_resource_abstract.go | 247 + .../terraform/node_resource_abstract_count.go | 50 + .../terraform/node_resource_apply.go | 400 + .../terraform/node_resource_destroy.go | 291 + .../terraform/terraform/node_resource_plan.go | 85 + .../terraform/node_resource_plan_destroy.go | 53 + .../terraform/node_resource_plan_instance.go | 190 + .../terraform/node_resource_plan_orphan.go | 54 + .../terraform/node_resource_refresh.go | 266 + .../terraform/node_resource_validate.go | 159 + .../terraform/terraform/node_root_variable.go | 22 + .../hashicorp/terraform/terraform/path.go | 18 +- .../hashicorp/terraform/terraform/plan.go | 104 +- .../hashicorp/terraform/terraform/resource.go | 188 +- .../terraform/terraform/resource_address.go | 265 +- .../terraform/terraform/resource_provider.go | 130 + .../terraform/resource_provider_mock.go | 55 +- .../terraform/resource_provisioner.go | 20 + .../terraform/resource_provisioner_mock.go | 23 +- .../hashicorp/terraform/terraform/schemas.go | 34 + .../terraform/terraform/semantics.go | 56 +- .../hashicorp/terraform/terraform/state.go | 381 +- .../terraform/terraform/state_add.go | 7 +- .../terraform/terraform/state_filter.go | 32 +- .../terraform/state_upgrade_v1_to_v2.go | 21 +- .../terraform/state_upgrade_v2_to_v3.go | 2 +- .../hashicorp/terraform/terraform/testing.go | 19 + .../terraform/terraform/transform.go | 36 + .../transform_attach_config_provider.go | 18 + .../transform_attach_config_resource.go | 78 + .../terraform/transform_attach_state.go | 68 + .../terraform/terraform/transform_config.go | 184 +- .../terraform/transform_config_flat.go | 80 + .../terraform/transform_config_old.go | 23 + .../terraform/transform_count_boundary.go | 28 + .../terraform/terraform/transform_deposed.go | 51 +- .../terraform/terraform/transform_destroy.go | 239 - .../terraform/transform_destroy_cbd.go | 257 + .../terraform/transform_destroy_edge.go | 267 + .../terraform/terraform/transform_diff.go | 86 + .../terraform/terraform/transform_expand.go | 21 +- .../terraform/terraform/transform_flatten.go | 107 - .../terraform/transform_import_provider.go | 38 + .../terraform/transform_import_state.go | 41 +- .../terraform/terraform/transform_local.go | 40 + .../terraform/terraform/transform_module.go | 62 - .../terraform/transform_module_variable.go | 120 + .../terraform/terraform/transform_noop.go | 104 - .../terraform/terraform/transform_orphan.go | 418 - .../terraform/transform_orphan_count.go | 110 + .../terraform/transform_orphan_output.go | 53 + .../terraform/transform_orphan_resource.go | 78 + .../terraform/terraform/transform_output.go | 125 +- .../terraform/terraform/transform_provider.go | 760 +- .../terraform/transform_provisioner.go | 92 +- .../terraform/terraform/transform_proxy.go | 62 - .../terraform/transform_reference.go | 403 + .../terraform/transform_removed_modules.go | 32 + .../terraform/terraform/transform_resource.go | 953 - .../terraform/transform_resource_count.go | 53 + .../terraform/terraform/transform_root.go | 4 - .../terraform/terraform/transform_state.go | 65 + .../terraform/terraform/transform_targets.go | 158 +- .../terraform/terraform/transform_variable.go | 40 + .../terraform/terraform/ui_output_mock.go | 5 + .../terraform/terraform/user_agent.go | 13 + .../hashicorp/terraform/terraform/util.go | 35 +- .../terraform/terraform/variables.go | 35 +- .../hashicorp/terraform/terraform/version.go | 27 +- .../terraform/terraform/version_required.go | 71 + .../terraform/walkoperation_string.go | 6 +- .../hashicorp/terraform/tfdiags/diagnostic.go | 26 + .../terraform/tfdiags/diagnostics.go | 181 + .../hashicorp/terraform/tfdiags/doc.go | 16 + .../hashicorp/terraform/tfdiags/error.go | 23 + .../hashicorp/terraform/tfdiags/hcl.go | 77 + .../terraform/tfdiags/rpc_friendly.go | 53 + .../terraform/tfdiags/severity_string.go | 21 + .../terraform/tfdiags/simple_warning.go | 25 + .../terraform/tfdiags/source_range.go | 35 + .../hashicorp/terraform/version/version.go | 36 + .../jmespath/go-jmespath/.gitignore | 4 + .../jmespath/go-jmespath/.travis.yml | 9 + .../github.com/jmespath/go-jmespath/Makefile | 2 +- .../jmespath/go-jmespath/functions.go | 128 +- .../github.com/jmespath/go-jmespath/parser.go | 4 +- vendor/github.com/mattn/go-isatty/.travis.yml | 13 + vendor/github.com/mattn/go-isatty/LICENSE | 9 + vendor/github.com/mattn/go-isatty/README.md | 50 + vendor/github.com/mattn/go-isatty/doc.go | 2 + .../mattn/go-isatty/isatty_appengine.go | 15 + .../github.com/mattn/go-isatty/isatty_bsd.go | 18 + .../mattn/go-isatty/isatty_linux.go | 18 + .../mattn/go-isatty/isatty_linux_ppc64x.go | 19 + .../mattn/go-isatty/isatty_others.go | 10 + .../mattn/go-isatty/isatty_solaris.go | 16 + .../mattn/go-isatty/isatty_windows.go | 94 + vendor/github.com/mitchellh/cli/.travis.yml | 13 + vendor/github.com/mitchellh/cli/LICENSE | 354 + vendor/github.com/mitchellh/cli/Makefile | 20 + vendor/github.com/mitchellh/cli/README.md | 67 + .../github.com/mitchellh/cli/autocomplete.go | 43 + vendor/github.com/mitchellh/cli/cli.go | 715 + vendor/github.com/mitchellh/cli/command.go | 67 + .../github.com/mitchellh/cli/command_mock.go | 63 + vendor/github.com/mitchellh/cli/help.go | 79 + vendor/github.com/mitchellh/cli/ui.go | 187 + vendor/github.com/mitchellh/cli/ui_colored.go | 69 + .../github.com/mitchellh/cli/ui_concurrent.go | 54 + vendor/github.com/mitchellh/cli/ui_mock.go | 111 + vendor/github.com/mitchellh/cli/ui_writer.go | 18 + .../mitchellh/copystructure/.travis.yml | 12 + .../mitchellh/copystructure/copystructure.go | 186 +- .../mitchellh/go-homedir/homedir.go | 7 +- .../go-testing-interface/.travis.yml | 13 + .../mitchellh/go-testing-interface/LICENSE | 21 + .../mitchellh/go-testing-interface/README.md | 52 + .../mitchellh/go-testing-interface/go.mod | 1 + .../mitchellh/go-testing-interface/testing.go | 84 + .../go-testing-interface/testing_go19.go | 108 + .../mitchellh/go-wordwrap/LICENSE.md | 21 + .../mitchellh/go-wordwrap/README.md | 39 + .../github.com/mitchellh/go-wordwrap/go.mod | 1 + .../mitchellh/go-wordwrap/wordwrap.go | 73 + .../mitchellh/hashstructure/LICENSE | 21 + .../mitchellh/hashstructure/README.md | 61 + .../mitchellh/hashstructure/hashstructure.go | 323 + .../mitchellh/hashstructure/include.go | 15 + .../mitchellh/mapstructure/.travis.yml | 7 + .../mitchellh/mapstructure/decode_hooks.go | 5 +- .../mitchellh/mapstructure/mapstructure.go | 146 +- .../mitchellh/reflectwalk/.travis.yml | 1 + .../mitchellh/reflectwalk/location.go | 2 + .../mitchellh/reflectwalk/location_string.go | 8 +- .../mitchellh/reflectwalk/reflectwalk.go | 182 +- vendor/github.com/posener/complete/.gitignore | 4 + .../github.com/posener/complete/.travis.yml | 16 + .../github.com/posener/complete/LICENSE.txt | 21 + vendor/github.com/posener/complete/args.go | 111 + vendor/github.com/posener/complete/cmd/cmd.go | 128 + .../posener/complete/cmd/install/bash.go | 32 + .../posener/complete/cmd/install/fish.go | 56 + .../posener/complete/cmd/install/install.go | 119 + .../posener/complete/cmd/install/utils.go | 140 + .../posener/complete/cmd/install/zsh.go | 39 + vendor/github.com/posener/complete/command.go | 111 + .../github.com/posener/complete/complete.go | 109 + vendor/github.com/posener/complete/go.mod | 3 + vendor/github.com/posener/complete/go.sum | 4 + vendor/github.com/posener/complete/log.go | 22 + .../github.com/posener/complete/match/file.go | 19 + .../posener/complete/match/match.go | 6 + .../posener/complete/match/prefix.go | 9 + vendor/github.com/posener/complete/predict.go | 41 + .../posener/complete/predict_files.go | 108 + .../posener/complete/predict_set.go | 12 + vendor/github.com/posener/complete/readme.md | 118 + vendor/github.com/posener/complete/test.sh | 12 + vendor/github.com/posener/complete/utils.go | 46 + vendor/github.com/satori/go.uuid/LICENSE | 20 - vendor/github.com/satori/go.uuid/README.md | 66 - vendor/github.com/satori/go.uuid/uuid.go | 435 - vendor/github.com/ulikunitz/xz/.gitignore | 25 + vendor/github.com/ulikunitz/xz/LICENSE | 26 + vendor/github.com/ulikunitz/xz/README.md | 73 + vendor/github.com/ulikunitz/xz/TODO.md | 319 + vendor/github.com/ulikunitz/xz/bits.go | 74 + vendor/github.com/ulikunitz/xz/crc.go | 54 + vendor/github.com/ulikunitz/xz/example.go | 40 + vendor/github.com/ulikunitz/xz/format.go | 728 + vendor/github.com/ulikunitz/xz/fox.xz | Bin 0 -> 104 bytes .../ulikunitz/xz/internal/hash/cyclic_poly.go | 181 + .../ulikunitz/xz/internal/hash/doc.go | 14 + .../ulikunitz/xz/internal/hash/rabin_karp.go | 66 + .../ulikunitz/xz/internal/hash/roller.go | 29 + .../ulikunitz/xz/internal/xlog/xlog.go | 457 + .../github.com/ulikunitz/xz/lzma/bintree.go | 523 + vendor/github.com/ulikunitz/xz/lzma/bitops.go | 45 + .../github.com/ulikunitz/xz/lzma/breader.go | 39 + vendor/github.com/ulikunitz/xz/lzma/buffer.go | 171 + .../ulikunitz/xz/lzma/bytewriter.go | 37 + .../github.com/ulikunitz/xz/lzma/decoder.go | 277 + .../ulikunitz/xz/lzma/decoderdict.go | 135 + .../ulikunitz/xz/lzma/directcodec.go | 49 + .../github.com/ulikunitz/xz/lzma/distcodec.go | 156 + .../github.com/ulikunitz/xz/lzma/encoder.go | 268 + .../ulikunitz/xz/lzma/encoderdict.go | 149 + vendor/github.com/ulikunitz/xz/lzma/fox.lzma | Bin 0 -> 67 bytes .../github.com/ulikunitz/xz/lzma/hashtable.go | 309 + vendor/github.com/ulikunitz/xz/lzma/header.go | 167 + .../github.com/ulikunitz/xz/lzma/header2.go | 398 + .../ulikunitz/xz/lzma/lengthcodec.go | 129 + .../ulikunitz/xz/lzma/literalcodec.go | 132 + .../ulikunitz/xz/lzma/matchalgorithm.go | 52 + .../github.com/ulikunitz/xz/lzma/operation.go | 80 + vendor/github.com/ulikunitz/xz/lzma/prob.go | 53 + .../ulikunitz/xz/lzma/properties.go | 69 + .../ulikunitz/xz/lzma/rangecodec.go | 248 + vendor/github.com/ulikunitz/xz/lzma/reader.go | 100 + .../github.com/ulikunitz/xz/lzma/reader2.go | 232 + vendor/github.com/ulikunitz/xz/lzma/state.go | 151 + .../ulikunitz/xz/lzma/treecodecs.go | 133 + vendor/github.com/ulikunitz/xz/lzma/writer.go | 209 + .../github.com/ulikunitz/xz/lzma/writer2.go | 305 + vendor/github.com/ulikunitz/xz/lzmafilter.go | 117 + vendor/github.com/ulikunitz/xz/make-docs | 5 + vendor/github.com/ulikunitz/xz/reader.go | 373 + vendor/github.com/ulikunitz/xz/writer.go | 386 + vendor/github.com/zclconf/go-cty/LICENSE | 21 + .../github.com/zclconf/go-cty/cty/capsule.go | 89 + .../zclconf/go-cty/cty/collection.go | 34 + .../go-cty/cty/convert/compare_types.go | 165 + .../zclconf/go-cty/cty/convert/conversion.go | 136 + .../cty/convert/conversion_collection.go | 340 + .../go-cty/cty/convert/conversion_dynamic.go | 33 + .../go-cty/cty/convert/conversion_object.go | 76 + .../cty/convert/conversion_primitive.go | 50 + .../zclconf/go-cty/cty/convert/doc.go | 15 + .../go-cty/cty/convert/mismatch_msg.go | 135 + .../zclconf/go-cty/cty/convert/public.go | 83 + .../zclconf/go-cty/cty/convert/sort_types.go | 69 + .../zclconf/go-cty/cty/convert/unify.go | 66 + vendor/github.com/zclconf/go-cty/cty/doc.go | 18 + .../zclconf/go-cty/cty/element_iterator.go | 191 + vendor/github.com/zclconf/go-cty/cty/error.go | 55 + .../zclconf/go-cty/cty/function/argument.go | 50 + .../zclconf/go-cty/cty/function/doc.go | 6 + .../zclconf/go-cty/cty/function/error.go | 50 + .../zclconf/go-cty/cty/function/function.go | 291 + .../go-cty/cty/function/stdlib/bool.go | 73 + .../go-cty/cty/function/stdlib/bytes.go | 112 + .../go-cty/cty/function/stdlib/collection.go | 140 + .../zclconf/go-cty/cty/function/stdlib/csv.go | 93 + .../go-cty/cty/function/stdlib/datetime.go | 385 + .../zclconf/go-cty/cty/function/stdlib/doc.go | 13 + .../go-cty/cty/function/stdlib/format.go | 496 + .../go-cty/cty/function/stdlib/format_fsm.go | 358 + .../go-cty/cty/function/stdlib/format_fsm.rl | 182 + .../go-cty/cty/function/stdlib/general.go | 107 + .../go-cty/cty/function/stdlib/json.go | 72 + .../go-cty/cty/function/stdlib/number.go | 428 + .../go-cty/cty/function/stdlib/sequence.go | 130 + .../zclconf/go-cty/cty/function/stdlib/set.go | 195 + .../go-cty/cty/function/stdlib/string.go | 234 + .../go-cty/cty/function/unpredictable.go | 31 + vendor/github.com/zclconf/go-cty/cty/gob.go | 125 + .../zclconf/go-cty/cty/gocty/doc.go | 7 + .../zclconf/go-cty/cty/gocty/helpers.go | 43 + .../github.com/zclconf/go-cty/cty/gocty/in.go | 528 + .../zclconf/go-cty/cty/gocty/out.go | 705 + .../zclconf/go-cty/cty/gocty/type_implied.go | 108 + .../github.com/zclconf/go-cty/cty/helper.go | 99 + vendor/github.com/zclconf/go-cty/cty/json.go | 176 + .../github.com/zclconf/go-cty/cty/json/doc.go | 11 + .../zclconf/go-cty/cty/json/marshal.go | 189 + .../zclconf/go-cty/cty/json/simple.go | 41 + .../zclconf/go-cty/cty/json/type.go | 23 + .../zclconf/go-cty/cty/json/type_implied.go | 171 + .../zclconf/go-cty/cty/json/unmarshal.go | 459 + .../zclconf/go-cty/cty/json/value.go | 65 + .../zclconf/go-cty/cty/list_type.go | 74 + .../github.com/zclconf/go-cty/cty/map_type.go | 74 + vendor/github.com/zclconf/go-cty/cty/null.go | 14 + .../zclconf/go-cty/cty/object_type.go | 135 + vendor/github.com/zclconf/go-cty/cty/path.go | 190 + .../github.com/zclconf/go-cty/cty/path_set.go | 198 + .../zclconf/go-cty/cty/primitive_type.go | 122 + .../github.com/zclconf/go-cty/cty/set/gob.go | 76 + .../zclconf/go-cty/cty/set/iterator.go | 36 + .../github.com/zclconf/go-cty/cty/set/ops.go | 199 + .../zclconf/go-cty/cty/set/rules.go | 25 + .../github.com/zclconf/go-cty/cty/set/set.go | 62 + .../zclconf/go-cty/cty/set_helper.go | 126 + .../zclconf/go-cty/cty/set_internals.go | 158 + .../github.com/zclconf/go-cty/cty/set_type.go | 72 + .../zclconf/go-cty/cty/tuple_type.go | 121 + vendor/github.com/zclconf/go-cty/cty/type.go | 120 + .../zclconf/go-cty/cty/type_conform.go | 142 + .../zclconf/go-cty/cty/types_to_register.go | 57 + .../github.com/zclconf/go-cty/cty/unknown.go | 84 + .../zclconf/go-cty/cty/unknown_as_null.go | 64 + vendor/github.com/zclconf/go-cty/cty/value.go | 98 + .../zclconf/go-cty/cty/value_init.go | 276 + .../zclconf/go-cty/cty/value_ops.go | 1074 + vendor/github.com/zclconf/go-cty/cty/walk.go | 182 + vendor/vendor.json | 406 - 775 files changed, 121036 insertions(+), 18694 deletions(-) create mode 100644 Gopkg.lock create mode 100644 Gopkg.toml create mode 100644 vendor/github.com/agext/levenshtein/.gitignore create mode 100644 vendor/github.com/agext/levenshtein/.travis.yml create mode 100644 vendor/github.com/agext/levenshtein/DCO create mode 100644 vendor/github.com/agext/levenshtein/LICENSE create mode 100644 vendor/github.com/agext/levenshtein/MAINTAINERS create mode 100644 vendor/github.com/agext/levenshtein/NOTICE create mode 100644 vendor/github.com/agext/levenshtein/README.md create mode 100644 vendor/github.com/agext/levenshtein/levenshtein.go create mode 100644 vendor/github.com/agext/levenshtein/params.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/LICENSE create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/generate.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/make_tables.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/tables.go create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/unicode2ragel.rb create mode 100644 vendor/github.com/apparentlymart/go-textseg/textseg/utf8_seqs.go create mode 100644 vendor/github.com/armon/go-radix/.gitignore create mode 100644 vendor/github.com/armon/go-radix/.travis.yml create mode 100644 vendor/github.com/armon/go-radix/LICENSE create mode 100644 vendor/github.com/armon/go-radix/README.md create mode 100644 vendor/github.com/armon/go-radix/go.mod create mode 100644 vendor/github.com/armon/go-radix/radix.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/host.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/errors.go create mode 100644 vendor/github.com/bgentry/speakeasy/.gitignore create mode 100644 vendor/github.com/bgentry/speakeasy/LICENSE create mode 100644 vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS create mode 100644 vendor/github.com/bgentry/speakeasy/Readme.md create mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy.go create mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy_unix.go create mode 100644 vendor/github.com/bgentry/speakeasy/speakeasy_windows.go create mode 100644 vendor/github.com/blang/semver/.travis.yml create mode 100644 vendor/github.com/blang/semver/LICENSE create mode 100644 vendor/github.com/blang/semver/README.md create mode 100644 vendor/github.com/blang/semver/json.go create mode 100644 vendor/github.com/blang/semver/package.json create mode 100644 vendor/github.com/blang/semver/range.go create mode 100644 vendor/github.com/blang/semver/semver.go create mode 100644 vendor/github.com/blang/semver/sort.go create mode 100644 vendor/github.com/blang/semver/sql.go delete mode 100644 vendor/github.com/go-ini/ini/LICENSE delete mode 100644 vendor/github.com/go-ini/ini/README.md delete mode 100644 vendor/github.com/go-ini/ini/README_ZH.md delete mode 100644 vendor/github.com/go-ini/ini/ini.go delete mode 100644 vendor/github.com/go-ini/ini/parser.go delete mode 100644 vendor/github.com/go-ini/ini/struct.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/README.md create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go create mode 100644 vendor/github.com/hashicorp/go-cleanhttp/doc.go create mode 100644 vendor/github.com/hashicorp/go-getter/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_tar.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_txz.go create mode 100644 vendor/github.com/hashicorp/go-getter/decompress_xz.go create mode 120000 vendor/github.com/hashicorp/go-getter/test-fixtures/detect-file-symlink-pwd/syml/pwd create mode 100644 vendor/github.com/hashicorp/go-safetemp/LICENSE create mode 100644 vendor/github.com/hashicorp/go-safetemp/README.md create mode 100644 vendor/github.com/hashicorp/go-safetemp/go.mod create mode 100644 vendor/github.com/hashicorp/go-safetemp/safetemp.go create mode 100644 vendor/github.com/hashicorp/go-version/.travis.yml create mode 100644 vendor/github.com/hashicorp/hcl/.gitignore create mode 100644 vendor/github.com/hashicorp/hcl/.travis.yml create mode 100644 vendor/github.com/hashicorp/hcl2/LICENSE create mode 100644 vendor/github.com/hashicorp/hcl2/gohcl/decode.go create mode 100644 vendor/github.com/hashicorp/hcl2/gohcl/doc.go create mode 100644 vendor/github.com/hashicorp/hcl2/gohcl/schema.go create mode 100644 vendor/github.com/hashicorp/hcl2/gohcl/types.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/doc.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/eval_context.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/expr_call.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/expr_list.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/expr_map.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go create mode 100755 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars_gen.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.rl create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token_type_string.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/unicode2ragel.rb create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/unicode_derived.rl create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/variables.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/walk.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/ast.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/didyoumean.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/doc.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/navigation.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/parser.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/public.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/spec.md create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/structure.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/merged.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/ops.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/pos.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/schema.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/spec.md create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/static_expr.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/structure.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/traversal.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/decode.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/doc.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/gob.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/public.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/schema.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/spec.go create mode 100644 vendor/github.com/hashicorp/hcl2/hcldec/variables.go create mode 100644 vendor/github.com/hashicorp/hcl2/hclparse/parser.go create mode 100644 vendor/github.com/hashicorp/hil/.gitignore create mode 100644 vendor/github.com/hashicorp/hil/.travis.yml create mode 100644 vendor/github.com/hashicorp/hil/ast/conditional.go create mode 100644 vendor/github.com/hashicorp/hil/ast/unknown.go delete mode 100644 vendor/github.com/hashicorp/hil/lang.y delete mode 100644 vendor/github.com/hashicorp/hil/lex.go create mode 100644 vendor/github.com/hashicorp/hil/parser/binary_op.go create mode 100644 vendor/github.com/hashicorp/hil/parser/error.go create mode 100644 vendor/github.com/hashicorp/hil/parser/fuzz.go create mode 100644 vendor/github.com/hashicorp/hil/parser/parser.go create mode 100644 vendor/github.com/hashicorp/hil/scanner/peeker.go create mode 100644 vendor/github.com/hashicorp/hil/scanner/scanner.go create mode 100644 vendor/github.com/hashicorp/hil/scanner/token.go create mode 100644 vendor/github.com/hashicorp/hil/scanner/tokentype_string.go delete mode 100644 vendor/github.com/hashicorp/hil/y.go delete mode 100644 vendor/github.com/hashicorp/hil/y.output create mode 100644 vendor/github.com/hashicorp/terraform/config/config_terraform.go create mode 100644 vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go create mode 100644 vendor/github.com/hashicorp/terraform/config/configschema/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go create mode 100644 vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go create mode 100644 vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go create mode 100644 vendor/github.com/hashicorp/terraform/config/configschema/schema.go create mode 100644 vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go create mode 100644 vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go create mode 100644 vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go create mode 100644 vendor/github.com/hashicorp/terraform/config/loader_hcl2.go create mode 100644 vendor/github.com/hashicorp/terraform/config/module/storage.go create mode 100644 vendor/github.com/hashicorp/terraform/config/module/testing.go create mode 100644 vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go create mode 100644 vendor/github.com/hashicorp/terraform/config/module/versions.go create mode 100644 vendor/github.com/hashicorp/terraform/config/providers.go create mode 100644 vendor/github.com/hashicorp/terraform/config/provisioner_enums.go create mode 100644 vendor/github.com/hashicorp/terraform/config/testing.go create mode 100644 vendor/github.com/hashicorp/terraform/dag/dot.go create mode 100644 vendor/github.com/hashicorp/terraform/dag/marshal.go create mode 100644 vendor/github.com/hashicorp/terraform/dag/walk.go delete mode 100644 vendor/github.com/hashicorp/terraform/dot/graph.go delete mode 100644 vendor/github.com/hashicorp/terraform/dot/graph_writer.go create mode 100644 vendor/github.com/hashicorp/terraform/httpclient/client.go create mode 100644 vendor/github.com/hashicorp/terraform/httpclient/useragent.go create mode 100644 vendor/github.com/hashicorp/terraform/moduledeps/dependencies.go create mode 100644 vendor/github.com/hashicorp/terraform/moduledeps/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/moduledeps/module.go create mode 100644 vendor/github.com/hashicorp/terraform/moduledeps/provider.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/error.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/find.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/get.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/version.go create mode 100644 vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/client.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/errors.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/regsrc/module.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/response/module.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/response/module_list.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/response/module_provider.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/response/module_versions.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/response/pagination.go create mode 100644 vendor/github.com/hashicorp/terraform/registry/response/redirect.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/auth/cache.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/auth/static.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/disco/disco.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/disco/host.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/label_iter.go create mode 100644 vendor/github.com/hashicorp/terraform/svchost/svchost.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/context_components.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/debug.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_local.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/features.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_config_node.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_config_node_module.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_config_node_output.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_config_node_provider.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_config_node_resource.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_config_node_type.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graph_config_node_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/graphnodeconfigtype_string.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_local.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_output.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_provider.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/schemas.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/testing.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_destroy.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_diff.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_flatten.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_local.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_module.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_noop.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_orphan.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_proxy.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_reference.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go delete mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_resource.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_state.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/transform_variable.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/user_agent.go create mode 100644 vendor/github.com/hashicorp/terraform/terraform/version_required.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/doc.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/error.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/hcl.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go create mode 100644 vendor/github.com/hashicorp/terraform/tfdiags/source_range.go create mode 100644 vendor/github.com/hashicorp/terraform/version/version.go create mode 100644 vendor/github.com/jmespath/go-jmespath/.gitignore create mode 100644 vendor/github.com/jmespath/go-jmespath/.travis.yml create mode 100644 vendor/github.com/mattn/go-isatty/.travis.yml create mode 100644 vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 vendor/github.com/mattn/go-isatty/README.md create mode 100644 vendor/github.com/mattn/go-isatty/doc.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_appengine.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_linux.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_others.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/mitchellh/cli/.travis.yml create mode 100644 vendor/github.com/mitchellh/cli/LICENSE create mode 100644 vendor/github.com/mitchellh/cli/Makefile create mode 100644 vendor/github.com/mitchellh/cli/README.md create mode 100644 vendor/github.com/mitchellh/cli/autocomplete.go create mode 100644 vendor/github.com/mitchellh/cli/cli.go create mode 100644 vendor/github.com/mitchellh/cli/command.go create mode 100644 vendor/github.com/mitchellh/cli/command_mock.go create mode 100644 vendor/github.com/mitchellh/cli/help.go create mode 100644 vendor/github.com/mitchellh/cli/ui.go create mode 100644 vendor/github.com/mitchellh/cli/ui_colored.go create mode 100644 vendor/github.com/mitchellh/cli/ui_concurrent.go create mode 100644 vendor/github.com/mitchellh/cli/ui_mock.go create mode 100644 vendor/github.com/mitchellh/cli/ui_writer.go create mode 100644 vendor/github.com/mitchellh/copystructure/.travis.yml create mode 100644 vendor/github.com/mitchellh/go-testing-interface/.travis.yml create mode 100644 vendor/github.com/mitchellh/go-testing-interface/LICENSE create mode 100644 vendor/github.com/mitchellh/go-testing-interface/README.md create mode 100644 vendor/github.com/mitchellh/go-testing-interface/go.mod create mode 100644 vendor/github.com/mitchellh/go-testing-interface/testing.go create mode 100644 vendor/github.com/mitchellh/go-testing-interface/testing_go19.go create mode 100644 vendor/github.com/mitchellh/go-wordwrap/LICENSE.md create mode 100644 vendor/github.com/mitchellh/go-wordwrap/README.md create mode 100644 vendor/github.com/mitchellh/go-wordwrap/go.mod create mode 100644 vendor/github.com/mitchellh/go-wordwrap/wordwrap.go create mode 100644 vendor/github.com/mitchellh/hashstructure/LICENSE create mode 100644 vendor/github.com/mitchellh/hashstructure/README.md create mode 100644 vendor/github.com/mitchellh/hashstructure/hashstructure.go create mode 100644 vendor/github.com/mitchellh/hashstructure/include.go create mode 100644 vendor/github.com/mitchellh/mapstructure/.travis.yml create mode 100644 vendor/github.com/mitchellh/reflectwalk/.travis.yml create mode 100644 vendor/github.com/posener/complete/.gitignore create mode 100644 vendor/github.com/posener/complete/.travis.yml create mode 100644 vendor/github.com/posener/complete/LICENSE.txt create mode 100644 vendor/github.com/posener/complete/args.go create mode 100644 vendor/github.com/posener/complete/cmd/cmd.go create mode 100644 vendor/github.com/posener/complete/cmd/install/bash.go create mode 100644 vendor/github.com/posener/complete/cmd/install/fish.go create mode 100644 vendor/github.com/posener/complete/cmd/install/install.go create mode 100644 vendor/github.com/posener/complete/cmd/install/utils.go create mode 100644 vendor/github.com/posener/complete/cmd/install/zsh.go create mode 100644 vendor/github.com/posener/complete/command.go create mode 100644 vendor/github.com/posener/complete/complete.go create mode 100644 vendor/github.com/posener/complete/go.mod create mode 100644 vendor/github.com/posener/complete/go.sum create mode 100644 vendor/github.com/posener/complete/log.go create mode 100644 vendor/github.com/posener/complete/match/file.go create mode 100644 vendor/github.com/posener/complete/match/match.go create mode 100644 vendor/github.com/posener/complete/match/prefix.go create mode 100644 vendor/github.com/posener/complete/predict.go create mode 100644 vendor/github.com/posener/complete/predict_files.go create mode 100644 vendor/github.com/posener/complete/predict_set.go create mode 100644 vendor/github.com/posener/complete/readme.md create mode 100755 vendor/github.com/posener/complete/test.sh create mode 100644 vendor/github.com/posener/complete/utils.go delete mode 100644 vendor/github.com/satori/go.uuid/LICENSE delete mode 100644 vendor/github.com/satori/go.uuid/README.md delete mode 100644 vendor/github.com/satori/go.uuid/uuid.go create mode 100644 vendor/github.com/ulikunitz/xz/.gitignore create mode 100644 vendor/github.com/ulikunitz/xz/LICENSE create mode 100644 vendor/github.com/ulikunitz/xz/README.md create mode 100644 vendor/github.com/ulikunitz/xz/TODO.md create mode 100644 vendor/github.com/ulikunitz/xz/bits.go create mode 100644 vendor/github.com/ulikunitz/xz/crc.go create mode 100644 vendor/github.com/ulikunitz/xz/example.go create mode 100644 vendor/github.com/ulikunitz/xz/format.go create mode 100644 vendor/github.com/ulikunitz/xz/fox.xz create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/doc.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/hash/roller.go create mode 100644 vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bintree.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bitops.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/breader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/buffer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/bytewriter.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/decoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/directcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/distcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoder.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/encoderdict.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/fox.lzma create mode 100644 vendor/github.com/ulikunitz/xz/lzma/hashtable.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/header2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/literalcodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/operation.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/prob.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/properties.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/rangecodec.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/reader2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/state.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/treecodecs.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer.go create mode 100644 vendor/github.com/ulikunitz/xz/lzma/writer2.go create mode 100644 vendor/github.com/ulikunitz/xz/lzmafilter.go create mode 100755 vendor/github.com/ulikunitz/xz/make-docs create mode 100644 vendor/github.com/ulikunitz/xz/reader.go create mode 100644 vendor/github.com/ulikunitz/xz/writer.go create mode 100644 vendor/github.com/zclconf/go-cty/LICENSE create mode 100644 vendor/github.com/zclconf/go-cty/cty/capsule.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/collection.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/public.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/convert/unify.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/element_iterator.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/error.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/argument.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/error.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/function.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gob.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/in.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/out.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/helper.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/doc.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/marshal.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/simple.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/type_implied.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/json/value.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/list_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/map_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/null.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/object_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/path.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/path_set.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/primitive_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/gob.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/iterator.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/ops.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/rules.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set/set.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set_helper.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set_internals.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/set_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/tuple_type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/type.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/type_conform.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/types_to_register.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/unknown.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/value.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/value_init.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/value_ops.go create mode 100644 vendor/github.com/zclconf/go-cty/cty/walk.go delete mode 100644 vendor/vendor.json diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 00000000..b3b8d075 --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,396 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:dc2ba13235a4c8f80a40599575a3f10acb4736cb372f1a68b88333c06564471d" + name = "github.com/agext/levenshtein" + packages = ["."] + pruneopts = "UT" + revision = "5f10fee965225ac1eecdc234c09daf5cd9e7f7b6" + version = "v1.2.1" + +[[projects]] + digest = "1:ef98942770803ae37c4787ad6bf70e401c99834dfba5e3034b97ba7c24e66c10" + name = "github.com/apparentlymart/go-cidr" + packages = ["cidr"] + pruneopts = "UT" + revision = "2bd8b58cf4275aeb086ade613de226773e29e853" + +[[projects]] + digest = "1:2bf7da77216264bb61bd8cb5f3380a03ab509e8a826fe359008d4a6ba0789b13" + name = "github.com/apparentlymart/go-textseg" + packages = ["textseg"] + pruneopts = "UT" + revision = "fb01f485ebef760e5ee06d55e1b07534dda2d295" + version = "v1.0.0" + +[[projects]] + digest = "1:c47f4964978e211c6e566596ec6246c329912ea92e9bb99c00798bb4564c5b09" + name = "github.com/armon/go-radix" + packages = ["."] + pruneopts = "UT" + revision = "1a2de0c21c94309923825da3df33a4381872c795" + version = "v1.0.0" + +[[projects]] + digest = "1:ad009afc10b82f2de510d000fad8472d13f2888716dc941c942f5cbb3a28cd57" + name = "github.com/aws/aws-sdk-go" + packages = [ + "aws", + "aws/awserr", + "aws/awsutil", + "aws/client", + "aws/client/metadata", + "aws/corehandlers", + "aws/credentials", + "aws/credentials/ec2rolecreds", + "aws/credentials/endpointcreds", + "aws/credentials/stscreds", + "aws/csm", + "aws/defaults", + "aws/ec2metadata", + "aws/endpoints", + "aws/request", + "aws/session", + "aws/signer/v4", + "internal/ini", + "internal/s3err", + "internal/sdkio", + "internal/sdkrand", + "internal/sdkuri", + "internal/shareddefaults", + "private/protocol", + "private/protocol/eventstream", + "private/protocol/eventstream/eventstreamapi", + "private/protocol/query", + "private/protocol/query/queryutil", + "private/protocol/rest", + "private/protocol/restxml", + "private/protocol/xml/xmlutil", + "service/s3", + "service/sts", + ] + pruneopts = "UT" + revision = "cf00ea20983ce38df17ab0a0814463ab8838459f" + version = "v1.15.73" + +[[projects]] + branch = "master" + digest = "1:37011b20a70e205b93ebea5287e1afa5618db54bf3998c36ff5a8e4b146a170a" + name = "github.com/bgentry/go-netrc" + packages = ["netrc"] + pruneopts = "UT" + revision = "9fd32a8b3d3d3f9d43c341bfe098430e07609480" + +[[projects]] + digest = "1:1343a2963481a305ca4d051e84bc2abd16b601ee22ed324f8d605de1adb291b0" + name = "github.com/bgentry/speakeasy" + packages = ["."] + pruneopts = "UT" + revision = "4aabc24848ce5fd31929f7d1e4ea74d3709c14cd" + version = "v0.1.0" + +[[projects]] + digest = "1:e8af57548b19feb1905d6816cdc6e6d91e0459a1c571c6189774cb2454099779" + name = "github.com/blang/semver" + packages = ["."] + pruneopts = "UT" + revision = "4a1e882c79dcf4ec00d2e29fac74b9c8938d5052" + +[[projects]] + digest = "1:07671f8997086ed115824d1974507d2b147d1e0463675ea5dbf3be89b1c2c563" + name = "github.com/hashicorp/errwrap" + packages = ["."] + pruneopts = "UT" + revision = "7554cd9344cec97297fa6649b055a8c98c2a1e55" + +[[projects]] + digest = "1:7bb7fdad88218bcb5c9033fbeedb5b4c5de71c232c5ae94beabe611fd2c48172" + name = "github.com/hashicorp/go-cleanhttp" + packages = ["."] + pruneopts = "UT" + revision = "06c9ea3a335b7443026f8124b22619524420291b" + +[[projects]] + digest = "1:3aeb3431df1b40b8e416a731018a381841389dea128dcf4d52de590d6de9ecd4" + name = "github.com/hashicorp/go-getter" + packages = [ + ".", + "helper/url", + ] + pruneopts = "UT" + revision = "90bb99a48d86cf1d327cee9968f7428f90ba13c1" + +[[projects]] + digest = "1:9e6e28e4a4a21fc0e3448a519ac1b1979d03c9cb15ceeee0004e5912cf37f763" + name = "github.com/hashicorp/go-multierror" + packages = ["."] + pruneopts = "UT" + revision = "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5" + +[[projects]] + digest = "1:605c47454db9040e30b20dc1b29e3e9d42d6ee742545729cdef74afb1b898ad0" + name = "github.com/hashicorp/go-safetemp" + packages = ["."] + pruneopts = "UT" + revision = "c9a55de4fe06c920a71964b53cfe3dd293a3c743" + version = "v1.0.0" + +[[projects]] + digest = "1:74edc93e0bbfc9e63b2fd7b0b9bc8355d141701d7d7b584b1cd5d6dcbdeb8750" + name = "github.com/hashicorp/go-uuid" + packages = ["."] + pruneopts = "UT" + revision = "36289988d83ca270bc07c234c36f364b0dd9c9a7" + +[[projects]] + digest = "1:e4d5ee2b25927d97e077a7f9ec3f78a72d1385e36e0152b272f4834ba94e1e78" + name = "github.com/hashicorp/go-version" + packages = ["."] + pruneopts = "UT" + revision = "4fe82ae3040f80a03d04d2cccb5606a626b8e1ee" + +[[projects]] + digest = "1:2576f6dbd3968554275733c7d7f1b81c3d8fef671d60d73cfe067bba31a1ce83" + name = "github.com/hashicorp/hcl" + packages = [ + ".", + "hcl/ast", + "hcl/parser", + "hcl/scanner", + "hcl/strconv", + "hcl/token", + "json/parser", + "json/scanner", + "json/token", + ] + pruneopts = "UT" + revision = "a4b07c25de5ff55ad3b8936cea69a79a3d95a855" + +[[projects]] + digest = "1:c8b5af3be55edbe00a60cc34303ca36b5d3dab1ee50ee850ca93342d8c6197bc" + name = "github.com/hashicorp/hcl2" + packages = [ + "gohcl", + "hcl", + "hcl/hclsyntax", + "hcl/json", + "hcldec", + "hclparse", + ] + pruneopts = "UT" + revision = "5f8ed954abd873b2c09616ba0aa607892bbca7e9" + +[[projects]] + branch = "master" + digest = "1:a0fcb763bbae723b790db78143ac713c2a96c7542a4e1a2628ba3a9aedd13ae9" + name = "github.com/hashicorp/hil" + packages = [ + ".", + "ast", + "parser", + "scanner", + ] + pruneopts = "UT" + revision = "fa9f258a92500514cc8e9c67020487709df92432" + +[[projects]] + digest = "1:1f660d32bc77e644a91c2038cbe7a9fb0ae45e63d43f4f4d6410331e6767a61e" + name = "github.com/hashicorp/terraform" + packages = [ + "config", + "config/configschema", + "config/hcl2shim", + "config/module", + "dag", + "flatmap", + "helper/hilmapstructure", + "httpclient", + "moduledeps", + "plugin/discovery", + "registry", + "registry/regsrc", + "registry/response", + "svchost", + "svchost/auth", + "svchost/disco", + "terraform", + "tfdiags", + "version", + ] + pruneopts = "UT" + revision = "17850e9a55d33c43d7c31fd6ac122ba97a51d899" + version = "v0.11.10" + +[[projects]] + digest = "1:e22af8c7518e1eab6f2eab2b7d7558927f816262586cd6ed9f349c97a6c285c4" + name = "github.com/jmespath/go-jmespath" + packages = ["."] + pruneopts = "UT" + revision = "0b12d6b5" + +[[projects]] + digest = "1:0981502f9816113c9c8c4ac301583841855c8cf4da8c72f696b3ebedf6d0e4e5" + name = "github.com/mattn/go-isatty" + packages = ["."] + pruneopts = "UT" + revision = "6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c" + version = "v0.0.4" + +[[projects]] + digest = "1:619cc2adc92a4c54546f0a57665443874beb69fa06bf1e2b636856f2bf5f99bc" + name = "github.com/mitchellh/cli" + packages = ["."] + pruneopts = "UT" + revision = "33edc47170b5df54d2588696d590c5e20ee583fe" + +[[projects]] + digest = "1:dfb4eb6168a4e7f16e9fda5b9164013a0f5a8eb06bb5ca3a1980964a7beedde4" + name = "github.com/mitchellh/copystructure" + packages = ["."] + pruneopts = "UT" + revision = "d23ffcb85de31694d6ccaa23ccb4a03e55c1303f" + +[[projects]] + digest = "1:12ae6210bdbdad658a9a67fd95cd9c99f7fdbf12f6d36eaf0af704e69dacf4f5" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "UT" + revision = "b8bc1bf767474819792c23f32d8286a45736f1c6" + +[[projects]] + digest = "1:42eb1f52b84a06820cedc9baec2e710bfbda3ee6dac6cdb97f8b9a5066134ec6" + name = "github.com/mitchellh/go-testing-interface" + packages = ["."] + pruneopts = "UT" + revision = "6d0b8010fcc857872e42fc6c931227569016843c" + version = "v1.0.0" + +[[projects]] + digest = "1:abf08734a6527df70ed361d7c369fb580e6840d8f7a6012e5f609fdfd93b4e48" + name = "github.com/mitchellh/go-wordwrap" + packages = ["."] + pruneopts = "UT" + revision = "9e67c67572bc5dd02aef930e2b0ae3c02a4b5a5c" + version = "v1.0.0" + +[[projects]] + digest = "1:081749ce432ea7d9707f26e74d96bfe8c859c61c12f3d6f566edcae470639a87" + name = "github.com/mitchellh/hashstructure" + packages = ["."] + pruneopts = "UT" + revision = "6b17d669fac5e2f71c16658d781ec3fdd3802b69" + +[[projects]] + digest = "1:3bf49f179b730bede84bcec58587f33af244353cc7029283c82de81729e75fae" + name = "github.com/mitchellh/mapstructure" + packages = ["."] + pruneopts = "UT" + revision = "53818660ed4955e899c0bcafa97299a388bd7c8e" + +[[projects]] + digest = "1:012bcbda750df8b57e302656a0820833eaa98009a7546b22620283c65996743b" + name = "github.com/mitchellh/reflectwalk" + packages = ["."] + pruneopts = "UT" + revision = "63d60e9d0dbc60cf9164e6510889b0db6683d98c" + +[[projects]] + digest = "1:57e168c6dfcc02c1c53bdf1589afbef59694d819cac65bfd3a855de2256d4950" + name = "github.com/posener/complete" + packages = [ + ".", + "cmd", + "cmd/install", + "match", + ] + pruneopts = "UT" + revision = "3ef9b31a6a0613ae832e7ecf208374027c3b2343" + version = "v1.2.1" + +[[projects]] + digest = "1:db345c377984d0907323c09a9b17f11d995a59649fd38f909727df358b5f9020" + name = "github.com/ulikunitz/xz" + packages = [ + ".", + "internal/hash", + "internal/xlog", + "lzma", + ] + pruneopts = "UT" + revision = "590df8077fbcb06ad62d7714da06c00e5dd2316d" + version = "v0.5.5" + +[[projects]] + branch = "master" + digest = "1:e77fdd77613344e716e0ce6c7cc28d8f6ebf02d938fad00db4b5fa73cb3589fb" + name = "github.com/zclconf/go-cty" + packages = [ + "cty", + "cty/convert", + "cty/function", + "cty/function/stdlib", + "cty/gocty", + "cty/json", + "cty/set", + ] + pruneopts = "UT" + revision = "01c5aba823a6c91fe3bc287fd6e493ca717a64b8" + +[[projects]] + digest = "1:cc91ea751334c85af3f6fd10c4bc34b6f82c1e214a7fd3177ce6f1297efb23e3" + name = "golang.org/x/crypto" + packages = [ + "bcrypt", + "blowfish", + "cast5", + "openpgp", + "openpgp/armor", + "openpgp/elgamal", + "openpgp/errors", + "openpgp/packet", + "openpgp/s2k", + ] + pruneopts = "UT" + revision = "453249f01cfeb54c3d549ddb75ff152ca243f9d8" + +[[projects]] + digest = "1:3168bbdd4629ada733a4b73042ea23a03c4000bc6006e462e920cc39be00ccce" + name = "golang.org/x/net" + packages = [ + "html", + "html/atom", + "idna", + ] + pruneopts = "UT" + revision = "f2499483f923065a842d38eb4c7f1927e6fc6e6d" + +[[projects]] + branch = "master" + digest = "1:7ba061af4131fb44b30448572acd0d6fefbf63a61b97b7ef1dea0be5871c2742" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "66b7b1311ac80bbafcd2daeef9a5e6e2cd1e2399" + +[[projects]] + digest = "1:8029e9743749d4be5bc9f7d42ea1659471767860f0cdc34d37c3111bd308a295" + name = "golang.org/x/text" + packages = [ + "internal/gen", + "internal/triegen", + "internal/ucd", + "transform", + "unicode/cldr", + "unicode/norm", + ] + pruneopts = "UT" + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = ["github.com/hashicorp/terraform/terraform"] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 00000000..1c9f6b91 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,34 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + + +[[constraint]] + name = "github.com/hashicorp/terraform" + version = "0.11.10" + +[prune] + go-tests = true + unused-packages = true diff --git a/README.md b/README.md index 9355f319..b4373676 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,10 @@ $ tfjson terraform.tfplan } ``` +## Compatibility Notice + +This library is compatible up to Terraform v0.11. + ## License This project is made available under the [MIT License](http://opensource.org/licenses/MIT). diff --git a/vendor/github.com/agext/levenshtein/.gitignore b/vendor/github.com/agext/levenshtein/.gitignore new file mode 100644 index 00000000..404365f6 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/.gitignore @@ -0,0 +1,2 @@ +README.html +coverage.out diff --git a/vendor/github.com/agext/levenshtein/.travis.yml b/vendor/github.com/agext/levenshtein/.travis.yml new file mode 100644 index 00000000..95be94af --- /dev/null +++ b/vendor/github.com/agext/levenshtein/.travis.yml @@ -0,0 +1,70 @@ +language: go +sudo: false +go: + - 1.8 + - 1.7.5 + - 1.7.4 + - 1.7.3 + - 1.7.2 + - 1.7.1 + - 1.7 + - tip + - 1.6.4 + - 1.6.3 + - 1.6.2 + - 1.6.1 + - 1.6 + - 1.5.4 + - 1.5.3 + - 1.5.2 + - 1.5.1 + - 1.5 + - 1.4.3 + - 1.4.2 + - 1.4.1 + - 1.4 + - 1.3.3 + - 1.3.2 + - 1.3.1 + - 1.3 + - 1.2.2 + - 1.2.1 + - 1.2 + - 1.1.2 + - 1.1.1 + - 1.1 +before_install: + - go get github.com/mattn/goveralls +script: + - $HOME/gopath/bin/goveralls -service=travis-ci +notifications: + email: + on_success: never +matrix: + fast_finish: true + allow_failures: + - go: tip + - go: 1.6.4 + - go: 1.6.3 + - go: 1.6.2 + - go: 1.6.1 + - go: 1.6 + - go: 1.5.4 + - go: 1.5.3 + - go: 1.5.2 + - go: 1.5.1 + - go: 1.5 + - go: 1.4.3 + - go: 1.4.2 + - go: 1.4.1 + - go: 1.4 + - go: 1.3.3 + - go: 1.3.2 + - go: 1.3.1 + - go: 1.3 + - go: 1.2.2 + - go: 1.2.1 + - go: 1.2 + - go: 1.1.2 + - go: 1.1.1 + - go: 1.1 diff --git a/vendor/github.com/agext/levenshtein/DCO b/vendor/github.com/agext/levenshtein/DCO new file mode 100644 index 00000000..716561d5 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/DCO @@ -0,0 +1,36 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/agext/levenshtein/LICENSE b/vendor/github.com/agext/levenshtein/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/agext/levenshtein/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/agext/levenshtein/MAINTAINERS b/vendor/github.com/agext/levenshtein/MAINTAINERS new file mode 100644 index 00000000..726c2afb --- /dev/null +++ b/vendor/github.com/agext/levenshtein/MAINTAINERS @@ -0,0 +1 @@ +Alex Bucataru (@AlexBucataru) diff --git a/vendor/github.com/agext/levenshtein/NOTICE b/vendor/github.com/agext/levenshtein/NOTICE new file mode 100644 index 00000000..eaffaab9 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/NOTICE @@ -0,0 +1,5 @@ +Alrux Go EXTensions (AGExt) - package levenshtein +Copyright 2016 ALRUX Inc. + +This product includes software developed at ALRUX Inc. +(http://www.alrux.com/). diff --git a/vendor/github.com/agext/levenshtein/README.md b/vendor/github.com/agext/levenshtein/README.md new file mode 100644 index 00000000..90509c2a --- /dev/null +++ b/vendor/github.com/agext/levenshtein/README.md @@ -0,0 +1,38 @@ +# A Go package for calculating the Levenshtein distance between two strings + +[![Release](https://img.shields.io/github/release/agext/levenshtein.svg?style=flat)](https://github.com/agext/levenshtein/releases/latest) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/agext/levenshtein)  +[![Build Status](https://travis-ci.org/agext/levenshtein.svg?branch=master&style=flat)](https://travis-ci.org/agext/levenshtein) +[![Coverage Status](https://coveralls.io/repos/github/agext/levenshtein/badge.svg?style=flat)](https://coveralls.io/github/agext/levenshtein) +[![Go Report Card](https://goreportcard.com/badge/github.com/agext/levenshtein?style=flat)](https://goreportcard.com/report/github.com/agext/levenshtein) + + +This package implements distance and similarity metrics for strings, based on the Levenshtein measure, in [Go](http://golang.org). + +## Project Status + +v1.2.1 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. + +This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome. + +## Overview + +The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. + +A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. + +The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. + +The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. + +The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. + +## Installation + +``` +go get github.com/agext/levenshtein +``` + +## License + +Package levenshtein is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/agext/levenshtein/levenshtein.go b/vendor/github.com/agext/levenshtein/levenshtein.go new file mode 100644 index 00000000..df69ce70 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/levenshtein.go @@ -0,0 +1,290 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure. + +The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. + +A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. + +The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. + +The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. + +The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. +*/ +package levenshtein + +// Calculate determines the Levenshtein distance between two strings, using +// the given costs for each edit operation. It returns the distance along with +// the lengths of the longest common prefix and suffix. +// +// If maxCost is non-zero, the calculation stops as soon as the distance is determined +// to be greater than maxCost. Therefore, any return value higher than maxCost is a +// lower bound for the actual distance. +func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) { + l1, l2 := len(str1), len(str2) + // trim common prefix, if any, as it doesn't affect the distance + for ; prefixLen < l1 && prefixLen < l2; prefixLen++ { + if str1[prefixLen] != str2[prefixLen] { + break + } + } + str1, str2 = str1[prefixLen:], str2[prefixLen:] + l1 -= prefixLen + l2 -= prefixLen + // trim common suffix, if any, as it doesn't affect the distance + for 0 < l1 && 0 < l2 { + if str1[l1-1] != str2[l2-1] { + str1, str2 = str1[:l1], str2[:l2] + break + } + l1-- + l2-- + suffixLen++ + } + // if the first string is empty, the distance is the length of the second string times the cost of insertion + if l1 == 0 { + dist = l2 * insCost + return + } + // if the second string is empty, the distance is the length of the first string times the cost of deletion + if l2 == 0 { + dist = l1 * delCost + return + } + + // variables used in inner "for" loops + var y, dy, c, l int + + // if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited' + if maxCost > 0 { + if subCost < delCost+insCost { + if maxCost >= l1*subCost+(l2-l1)*insCost { + maxCost = 0 + } + } else { + if maxCost >= l1*delCost+l2*insCost { + maxCost = 0 + } + } + } + + if maxCost > 0 { + // prefer the longer string first, to minimize time; + // a swap also transposes the meanings of insertion and deletion. + if l1 < l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + + // the length differential times cost of deletion is a lower bound for the cost; + // if it is higher than the maxCost, there is no point going into the main calculation. + if dist = (l1 - l2) * delCost; dist > maxCost { + return + } + + d := make([]int, l1+1) + + // offset and length of d in the current row + doff, dlen := 0, 1 + for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ { + d[y] = dy + y++ + dy = y * delCost + } + // fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + + for x := 0; x < l2; x++ { + dy, d[doff] = d[doff], d[doff]+insCost + for d[doff] > maxCost && dlen > 0 { + if str1[doff] != str2[x] { + dy += subCost + } + doff++ + dlen-- + if c = d[doff] + insCost; c < dy { + dy = c + } + dy, d[doff] = d[doff], dy + } + for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + if y < l1 { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy { + y++ + dlen++ + } + } + // fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + if dlen == 0 { + dist = maxCost + 1 + return + } + } + if doff+dlen-1 < l1 { + dist = maxCost + 1 + return + } + dist = d[l1] + } else { + // ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is + // worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space + // http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html + + // prefer the shorter string first, to minimize space; time is O(l1*l2) anyway; + // a swap also transposes the meanings of insertion and deletion. + if l1 > l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + d := make([]int, l1+1) + + for y = 1; y <= l1; y++ { + d[y] = y * delCost + } + for x := 0; x < l2; x++ { + dy, d[0] = d[0], d[0]+insCost + for y = 0; y < l1; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + } + dist = d[l1] + } + + return +} + +// Distance returns the Levenshtein distance between str1 and str2, using the +// default or provided cost values. Pass nil for the third argument to use the +// default cost of 1 for all three operations, with no maximum. +func Distance(str1, str2 string, p *Params) int { + if p == nil { + p = defaultParams + } + dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost) + return dist +} + +// Similarity returns a score in the range of 0..1 for how similar the two strings are. +// A score of 1 means the strings are identical, and 0 means they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Similarity(str1, str2 string, p *Params) float64 { + return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus +} + +// Match returns a similarity score adjusted by the same method as proposed by Winkler for +// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their +// similarity score is already over a threshold. +// +// The score is in the range of 0..1, with 1 meaning the strings are identical, +// and 0 meaning they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations, maximum length of +// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Match(str1, str2 string, p *Params) float64 { + s1, s2 := []rune(str1), []rune(str2) + l1, l2 := len(s1), len(s2) + // two empty strings are identical; shortcut also avoids divByZero issues later on. + if l1 == 0 && l2 == 0 { + return 1 + } + + if p == nil { + p = defaultParams + } + + // a min over 1 can never be satisfied, so the score is 0. + if p.minScore > 1 { + return 0 + } + + insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0 + if l1 > l2 { + l1, l2, insCost, delCost = l2, l1, delCost, insCost + } + + if p.subCost < delCost+insCost { + maxDist = l1*p.subCost + (l2-l1)*insCost + } else { + maxDist = l1*delCost + l2*insCost + } + + // a zero min is always satisfied, so no need to set a max cost. + if p.minScore > 0 { + // if p.minScore is lower than p.bonusThreshold, we can use a simplified formula + // for the max cost, because a sim score below min cannot receive a bonus. + if p.minScore < p.bonusThreshold { + // round down the max - a cost equal to a rounded up max would already be under min. + max = int((1 - p.minScore) * float64(maxDist)) + } else { + // p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim) + // p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist)) + // p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist + // 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist + // (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist + max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale)) + } + } + + dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost) + if max > 0 && dist > max { + return 0 + } + sim := 1 - float64(dist)/float64(maxDist) + + if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 { + if pl > p.bonusPrefix { + pl = p.bonusPrefix + } + sim += float64(pl) * p.bonusScale * (1 - sim) + } + + if sim < p.minScore { + return 0 + } + + return sim +} diff --git a/vendor/github.com/agext/levenshtein/params.go b/vendor/github.com/agext/levenshtein/params.go new file mode 100644 index 00000000..a85727b3 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/params.go @@ -0,0 +1,152 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package levenshtein + +// Params represents a set of parameter values for the various formulas involved +// in the calculation of the Levenshtein string metrics. +type Params struct { + insCost int + subCost int + delCost int + maxCost int + minScore float64 + bonusPrefix int + bonusScale float64 + bonusThreshold float64 +} + +var ( + defaultParams = NewParams() +) + +// NewParams creates a new set of parameters and initializes it with the default values. +func NewParams() *Params { + return &Params{ + insCost: 1, + subCost: 1, + delCost: 1, + maxCost: 0, + minScore: 0, + bonusPrefix: 4, + bonusScale: .1, + bonusThreshold: .7, + } +} + +// Clone returns a pointer to a copy of the receiver parameter set, or of a new +// default parameter set if the receiver is nil. +func (p *Params) Clone() *Params { + if p == nil { + return NewParams() + } + return &Params{ + insCost: p.insCost, + subCost: p.subCost, + delCost: p.delCost, + maxCost: p.maxCost, + minScore: p.minScore, + bonusPrefix: p.bonusPrefix, + bonusScale: p.bonusScale, + bonusThreshold: p.bonusThreshold, + } +} + +// InsCost overrides the default value of 1 for the cost of insertion. +// The new value must be zero or positive. +func (p *Params) InsCost(v int) *Params { + if v >= 0 { + p.insCost = v + } + return p +} + +// SubCost overrides the default value of 1 for the cost of substitution. +// The new value must be zero or positive. +func (p *Params) SubCost(v int) *Params { + if v >= 0 { + p.subCost = v + } + return p +} + +// DelCost overrides the default value of 1 for the cost of deletion. +// The new value must be zero or positive. +func (p *Params) DelCost(v int) *Params { + if v >= 0 { + p.delCost = v + } + return p +} + +// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost. +// The calculation of Distance() stops when the result is guaranteed to exceed +// this maximum, returning a lower-bound rather than exact value. +// The new value must be zero or positive. +func (p *Params) MaxCost(v int) *Params { + if v >= 0 { + p.maxCost = v + } + return p +} + +// MinScore overrides the default value of 0 for the minimum similarity score. +// Scores below this threshold are returned as 0 by Similarity() and Match(). +// The new value must be zero or positive. Note that a minimum greater than 1 +// can never be satisfied, resulting in a score of 0 for any pair of strings. +func (p *Params) MinScore(v float64) *Params { + if v >= 0 { + p.minScore = v + } + return p +} + +// BonusPrefix overrides the default value for the maximum length of +// common prefix to be considered for bonus by Match(). +// The new value must be zero or positive. +func (p *Params) BonusPrefix(v int) *Params { + if v >= 0 { + p.bonusPrefix = v + } + return p +} + +// BonusScale overrides the default value for the scaling factor used by Match() +// in calculating the bonus. +// The new value must be zero or positive. To guarantee that the similarity score +// remains in the interval 0..1, this scaling factor is not allowed to exceed +// 1 / BonusPrefix. +func (p *Params) BonusScale(v float64) *Params { + if v >= 0 { + p.bonusScale = v + } + + // the bonus cannot exceed (1-sim), or the score may become greater than 1. + if float64(p.bonusPrefix)*p.bonusScale > 1 { + p.bonusScale = 1 / float64(p.bonusPrefix) + } + + return p +} + +// BonusThreshold overrides the default value for the minimum similarity score +// for which Match() can assign a bonus. +// The new value must be zero or positive. Note that a threshold greater than 1 +// effectively makes Match() become the equivalent of Similarity(). +func (p *Params) BonusThreshold(v float64) *Params { + if v >= 0 { + p.bonusThreshold = v + } + return p +} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go index 1583d638..75344732 100644 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go @@ -61,11 +61,23 @@ func Host(base *net.IPNet, num int) (net.IP, error) { hostLen := addrLen - parentLen maxHostNum := uint64(1< maxHostNum { - return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) + + numUint64 := uint64(num) + if num < 0 { + numUint64 = uint64(-num) - 1 + num = int(maxHostNum - numUint64) } - return insertNumIntoIP(ip, num, 32), nil + if numUint64 > maxHostNum { + return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) + } + var bitlength int + if ip.To4() != nil { + bitlength = 32 + } else { + bitlength = 128 + } + return insertNumIntoIP(ip, num, bitlength), nil } // AddressRange returns the first and last addresses in the given CIDR range. @@ -103,3 +115,96 @@ func AddressCount(network *net.IPNet) uint64 { prefixLen, bits := network.Mask.Size() return 1 << (uint64(bits) - uint64(prefixLen)) } + +//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies +//none of the subnets overlap and all subnets are in the supernet +//it returns an error if any of those conditions are not satisfied +func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error { + firstLastIP := make([][]net.IP, len(subnets)) + for i, s := range subnets { + first, last := AddressRange(s) + firstLastIP[i] = []net.IP{first, last} + } + for i, s := range subnets { + if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) { + return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String()) + } + for j := i + 1; j < len(subnets); j++ { + first := firstLastIP[j][0] + last := firstLastIP[j][1] + if s.Contains(first) || s.Contains(last) { + return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String()) + } + } + } + return nil +} + +// PreviousSubnet returns the subnet of the desired mask in the IP space +// just lower than the start of IPNet provided. If the IP space rolls over +// then the second return value is true +func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { + startIP := checkIPv4(network.IP) + previousIP := make(net.IP, len(startIP)) + copy(previousIP, startIP) + cMask := net.CIDRMask(prefixLen, 8*len(previousIP)) + previousIP = Dec(previousIP) + previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask} + if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) { + return previous, true + } + return previous, false +} + +// NextSubnet returns the next available subnet of the desired mask size +// starting for the maximum IP of the offset subnet +// If the IP exceeds the maxium IP then the second return value is true +func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { + _, currentLast := AddressRange(network) + mask := net.CIDRMask(prefixLen, 8*len(currentLast)) + currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask} + _, last := AddressRange(currentSubnet) + last = Inc(last) + next := &net.IPNet{IP: last.Mask(mask), Mask: mask} + if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) { + return next, true + } + return next, false +} + +//Inc increases the IP by one this returns a new []byte for the IP +func Inc(IP net.IP) net.IP { + IP = checkIPv4(IP) + incIP := make([]byte, len(IP)) + copy(incIP, IP) + for j := len(incIP) - 1; j >= 0; j-- { + incIP[j]++ + if incIP[j] > 0 { + break + } + } + return incIP +} + +//Dec decreases the IP by one this returns a new []byte for the IP +func Dec(IP net.IP) net.IP { + IP = checkIPv4(IP) + decIP := make([]byte, len(IP)) + copy(decIP, IP) + decIP = checkIPv4(decIP) + for j := len(decIP) - 1; j >= 0; j-- { + decIP[j]-- + if decIP[j] < 255 { + break + } + } + return decIP +} + +func checkIPv4(ip net.IP) net.IP { + // Go for some reason allocs IPv6len for IPv4 so we have to correct it + if v4 := ip.To4(); v4 != nil { + return v4 + } + return ip +} diff --git a/vendor/github.com/apparentlymart/go-textseg/LICENSE b/vendor/github.com/apparentlymart/go-textseg/LICENSE new file mode 100644 index 00000000..684b03b4 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/LICENSE @@ -0,0 +1,95 @@ +Copyright (c) 2017 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------- + +Unicode table generation programs are under a separate copyright and license: + +Copyright (c) 2014 Couchbase, Inc. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file +except in compliance with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the +License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +either express or implied. See the License for the specific language governing permissions +and limitations under the License. + +--------- + +Grapheme break data is provided as part of the Unicode character database, +copright 2016 Unicode, Inc, which is provided with the following license: + +Unicode Data Files include all data files under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +Unicode Data Files do not include PDF online code charts under the +directory http://www.unicode.org/Public/. + +Software includes any source code published in the Unicode Standard +or under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +NOTICE TO USER: Carefully read the following legal agreement. +BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S +DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), +YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE +TERMS AND CONDITIONS OF THIS AGREEMENT. +IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE +THE DATA FILES OR SOFTWARE. + +COPYRIGHT AND PERMISSION NOTICE + +Copyright © 1991-2017 Unicode, Inc. All rights reserved. +Distributed under the Terms of Use in http://www.unicode.org/copyright.html. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Unicode data files and any associated documentation +(the "Data Files") or Unicode software and any associated documentation +(the "Software") to deal in the Data Files or Software +without restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, and/or sell copies of +the Data Files or Software, and to permit persons to whom the Data Files +or Software are furnished to do so, provided that either +(a) this copyright and permission notice appear with all copies +of the Data Files or Software, or +(b) this copyright and permission notice appear in associated +Documentation. + +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT OF THIRD PARTY RIGHTS. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS +NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL +DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THE DATA FILES OR SOFTWARE. + +Except as contained in this notice, the name of a copyright holder +shall not be used in advertising or otherwise to promote the sale, +use or other dealings in these Data Files or Software without prior +written authorization of the copyright holder. diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go b/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go new file mode 100644 index 00000000..5752e9ef --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go @@ -0,0 +1,30 @@ +package textseg + +import ( + "bufio" + "bytes" +) + +// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of +// all of the recognized tokens in the given buffer. +func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) { + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Split(splitFunc) + var ret [][]byte + for scanner.Scan() { + ret = append(ret, scanner.Bytes()) + } + return ret, scanner.Err() +} + +// TokenCount is a utility that uses a bufio.SplitFunc to count the number of +// recognized tokens in the given buffer. +func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) { + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Split(splitFunc) + var ret int + for scanner.Scan() { + ret++ + } + return ret, scanner.Err() +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go b/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go new file mode 100644 index 00000000..81f3a747 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go @@ -0,0 +1,7 @@ +package textseg + +//go:generate go run make_tables.go -output tables.go +//go:generate go run make_test_tables.go -output tables_test.go +//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,E_Base,E_Modifier,ZWJ,Glue_After_Zwj,E_Base_GAZ" -o grapheme_clusters_table.rl +//go:generate ragel -Z grapheme_clusters.rl +//go:generate gofmt -w grapheme_clusters.go diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go new file mode 100644 index 00000000..012bc690 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go @@ -0,0 +1,5276 @@ + +// line 1 "grapheme_clusters.rl" +package textseg + +import ( + "errors" + "unicode/utf8" +) + +// Generated from grapheme_clusters.rl. DO NOT EDIT + +// line 13 "grapheme_clusters.go" +var _graphclust_actions []byte = []byte{ + 0, 1, 0, 1, 4, 1, 9, 1, 10, + 1, 11, 1, 12, 1, 13, 1, 14, + 1, 15, 1, 16, 1, 17, 1, 18, + 1, 19, 1, 20, 1, 21, 2, 1, + 7, 2, 1, 8, 2, 2, 3, 2, + 5, 1, 3, 0, 1, 8, 3, 5, + 0, 1, 3, 5, 1, 6, +} + +var _graphclust_key_offsets []int16 = []int16{ + 0, 0, 1, 3, 5, 7, 10, 15, + 17, 20, 28, 31, 33, 35, 37, 67, + 75, 77, 81, 84, 89, 94, 104, 116, + 122, 127, 137, 140, 147, 151, 159, 169, + 173, 181, 183, 191, 194, 196, 201, 203, + 210, 212, 220, 221, 242, 246, 252, 257, + 259, 263, 267, 269, 273, 275, 278, 282, + 284, 291, 293, 297, 301, 305, 307, 309, + 318, 322, 327, 329, 335, 337, 338, 340, + 341, 343, 345, 347, 349, 364, 368, 370, + 372, 377, 381, 385, 387, 389, 393, 397, + 399, 403, 410, 415, 419, 422, 423, 427, + 434, 439, 440, 441, 443, 452, 454, 477, + 481, 483, 487, 491, 492, 496, 500, 503, + 505, 510, 523, 525, 527, 529, 531, 535, + 539, 541, 543, 545, 549, 553, 557, 559, + 561, 563, 565, 566, 568, 574, 580, 586, + 588, 592, 596, 601, 604, 614, 616, 618, + 621, 623, 625, 627, 629, 632, 637, 639, + 642, 650, 653, 655, 657, 659, 690, 698, + 700, 704, 711, 723, 730, 744, 750, 768, + 779, 785, 797, 800, 809, 814, 824, 830, + 844, 850, 862, 874, 878, 880, 886, 888, + 895, 898, 906, 907, 928, 937, 945, 951, + 953, 957, 961, 966, 972, 974, 977, 990, + 995, 1009, 1011, 1020, 1027, 1038, 1048, 1056, + 1067, 1071, 1076, 1078, 1080, 1082, 1083, 1085, + 1087, 1089, 1091, 1106, 1110, 1112, 1114, 1122, + 1130, 1132, 1136, 1147, 1150, 1160, 1164, 1171, + 1179, 1185, 1188, 1189, 1193, 1200, 1205, 1206, + 1207, 1209, 1218, 1220, 1243, 1248, 1250, 1259, + 1264, 1265, 1274, 1280, 1290, 1295, 1302, 1316, + 1320, 1325, 1336, 1339, 1349, 1353, 1362, 1364, + 1372, 1379, 1385, 1392, 1396, 1398, 1400, 1402, + 1403, 1405, 1411, 1419, 1425, 1427, 1431, 1435, + 1440, 1443, 1453, 1455, 1457, 1458, 1460, 1461, + 1467, 1469, 1471, 1471, 1472, 1473, 1474, 1480, + 1482, 1484, 1484, 1490, 1492, 1497, 1502, 1504, + 1506, 1508, 1511, 1516, 1518, 1521, 1529, 1532, + 1534, 1536, 1538, 1568, 1576, 1578, 1582, 1585, + 1590, 1595, 1605, 1617, 1623, 1628, 1638, 1641, + 1648, 1652, 1660, 1670, 1674, 1682, 1684, 1692, + 1695, 1697, 1702, 1704, 1711, 1713, 1721, 1722, + 1743, 1747, 1753, 1758, 1760, 1764, 1768, 1770, + 1774, 1776, 1779, 1783, 1785, 1792, 1794, 1798, + 1802, 1806, 1808, 1810, 1819, 1823, 1828, 1830, + 1836, 1838, 1839, 1841, 1842, 1844, 1846, 1848, + 1850, 1865, 1869, 1871, 1873, 1878, 1882, 1886, + 1888, 1890, 1894, 1898, 1900, 1904, 1911, 1916, + 1920, 1923, 1924, 1928, 1935, 1940, 1941, 1942, + 1944, 1953, 1955, 1978, 1982, 1984, 1988, 1992, + 1993, 1997, 2001, 2004, 2006, 2011, 2024, 2026, + 2028, 2030, 2032, 2036, 2040, 2042, 2044, 2046, + 2050, 2054, 2058, 2060, 2062, 2064, 2066, 2067, + 2069, 2075, 2081, 2087, 2089, 2093, 2097, 2102, + 2105, 2115, 2117, 2119, 2122, 2124, 2126, 2128, + 2130, 2133, 2138, 2140, 2143, 2151, 2154, 2156, + 2158, 2160, 2191, 2199, 2201, 2205, 2212, 2224, + 2231, 2245, 2251, 2269, 2280, 2286, 2298, 2301, + 2310, 2315, 2325, 2331, 2345, 2351, 2363, 2375, + 2379, 2381, 2387, 2389, 2396, 2399, 2407, 2408, + 2429, 2438, 2446, 2452, 2454, 2458, 2462, 2467, + 2473, 2475, 2478, 2491, 2496, 2510, 2512, 2521, + 2528, 2539, 2549, 2557, 2568, 2572, 2577, 2579, + 2581, 2583, 2584, 2586, 2588, 2590, 2592, 2607, + 2611, 2613, 2615, 2623, 2631, 2633, 2637, 2648, + 2651, 2661, 2665, 2672, 2680, 2686, 2689, 2690, + 2694, 2701, 2706, 2707, 2708, 2710, 2719, 2721, + 2744, 2749, 2751, 2760, 2765, 2766, 2775, 2781, + 2791, 2796, 2803, 2817, 2821, 2826, 2837, 2840, + 2850, 2854, 2863, 2865, 2873, 2880, 2886, 2893, + 2897, 2899, 2901, 2903, 2904, 2906, 2912, 2920, + 2926, 2928, 2932, 2936, 2941, 2944, 2954, 2956, + 2958, 2959, 2961, 2962, 2968, 2970, 2972, 2972, + 2973, 2974, 2975, 2981, 2983, 2985, 2985, 2991, + 2993, 2997, 3003, 3006, 3009, 3013, 3016, 3019, + 3026, 3028, 3052, 3054, 3078, 3080, 3082, 3105, + 3107, 3109, 3110, 3112, 3114, 3116, 3122, 3124, + 3156, 3160, 3165, 3188, 3190, 3192, 3194, 3196, + 3199, 3201, 3203, 3207, 3207, 3263, 3319, 3350, + 3355, 3359, 3366, 3374, 3378, 3381, 3384, 3390, + 3392, 3412, 3418, 3423, 3425, 3427, 3430, 3432, + 3434, 3438, 3494, 3550, 3581, 3586, 3594, 3598, + 3600, 3605, 3611, 3615, 3618, 3624, 3627, 3631, + 3634, 3638, 3651, 3655, 3662, 3663, 3665, 3668, + 3678, 3698, 3705, 3709, 3716, 3726, 3733, 3736, + 3751, 3753, 3756, 3761, 3763, 3766, 3769, 3773, + 3776, 3779, 3786, 3788, 3790, 3792, 3794, 3797, + 3802, 3804, 3807, 3815, 3818, 3820, 3822, 3824, + 3854, 3862, 3864, 3868, 3871, 3876, 3881, 3891, + 3903, 3909, 3914, 3924, 3927, 3934, 3938, 3946, + 3956, 3960, 3968, 3970, 3978, 3981, 3983, 3988, + 3990, 3997, 3999, 4007, 4008, 4029, 4033, 4039, + 4044, 4046, 4050, 4054, 4056, 4060, 4062, 4065, + 4069, 4071, 4078, 4080, 4084, 4088, 4092, 4094, + 4096, 4105, 4109, 4114, 4116, 4122, 4124, 4125, + 4127, 4128, 4130, 4132, 4134, 4136, 4151, 4155, + 4157, 4159, 4164, 4168, 4172, 4174, 4176, 4180, + 4184, 4186, 4190, 4197, 4202, 4206, 4209, 4210, + 4214, 4221, 4226, 4227, 4228, 4230, 4239, 4241, + 4264, 4268, 4270, 4274, 4278, 4279, 4283, 4287, + 4290, 4292, 4297, 4310, 4312, 4314, 4316, 4318, + 4322, 4326, 4328, 4330, 4332, 4336, 4340, 4344, + 4346, 4348, 4350, 4352, 4353, 4355, 4361, 4367, + 4373, 4375, 4379, 4383, 4388, 4391, 4401, 4403, + 4405, 4408, 4410, 4412, 4414, 4416, 4419, 4424, + 4426, 4429, 4437, 4440, 4442, 4444, 4446, 4477, + 4485, 4487, 4491, 4498, 4510, 4517, 4531, 4537, + 4555, 4566, 4572, 4584, 4587, 4596, 4601, 4611, + 4617, 4631, 4637, 4649, 4661, 4665, 4667, 4673, + 4675, 4682, 4685, 4693, 4694, 4715, 4724, 4732, + 4738, 4740, 4744, 4748, 4753, 4759, 4761, 4764, + 4777, 4782, 4796, 4798, 4807, 4814, 4825, 4835, + 4843, 4854, 4858, 4863, 4865, 4867, 4869, 4870, + 4872, 4874, 4876, 4878, 4893, 4897, 4899, 4901, + 4909, 4917, 4919, 4923, 4934, 4937, 4947, 4951, + 4958, 4966, 4972, 4975, 4976, 4980, 4987, 4992, + 4993, 4994, 4996, 5005, 5007, 5030, 5035, 5037, + 5046, 5051, 5052, 5061, 5067, 5077, 5082, 5089, + 5103, 5107, 5112, 5123, 5126, 5136, 5140, 5149, + 5151, 5159, 5166, 5172, 5179, 5183, 5185, 5187, + 5189, 5190, 5192, 5198, 5206, 5212, 5214, 5218, + 5222, 5227, 5230, 5240, 5242, 5244, 5245, 5247, + 5248, 5254, 5256, 5258, 5258, 5259, 5260, 5261, + 5267, 5269, 5271, 5271, 5277, 5301, 5303, 5327, + 5329, 5331, 5354, 5356, 5358, 5359, 5361, 5363, + 5365, 5371, 5373, 5405, 5409, 5414, 5437, 5439, + 5441, 5443, 5445, 5448, 5450, 5452, 5456, 5456, + 5512, 5568, 5599, 5604, 5607, 5614, 5626, 5628, + 5630, 5632, 5635, 5640, 5642, 5645, 5653, 5656, + 5658, 5660, 5662, 5692, 5700, 5702, 5706, 5709, + 5714, 5719, 5729, 5741, 5747, 5752, 5762, 5765, + 5772, 5776, 5784, 5794, 5798, 5806, 5808, 5816, + 5819, 5821, 5826, 5828, 5835, 5837, 5845, 5846, + 5867, 5871, 5877, 5882, 5884, 5888, 5892, 5894, + 5898, 5900, 5903, 5907, 5909, 5916, 5918, 5922, + 5926, 5930, 5932, 5934, 5943, 5947, 5952, 5954, + 5956, 5958, 5959, 5961, 5963, 5965, 5967, 5982, + 5986, 5988, 5990, 5995, 5999, 6003, 6005, 6007, + 6011, 6015, 6017, 6021, 6028, 6033, 6037, 6040, + 6041, 6045, 6051, 6056, 6057, 6058, 6060, 6069, + 6071, 6094, 6098, 6100, 6104, 6108, 6109, 6113, + 6117, 6120, 6122, 6127, 6140, 6142, 6144, 6146, + 6148, 6152, 6156, 6158, 6160, 6162, 6166, 6170, + 6174, 6176, 6178, 6180, 6182, 6183, 6185, 6191, + 6197, 6203, 6205, 6209, 6213, 6218, 6221, 6231, + 6233, 6235, 6236, 6242, 6244, 6246, 6246, 6252, + 6253, 6260, 6263, 6265, 6267, 6269, 6271, 6274, + 6279, 6281, 6284, 6292, 6295, 6297, 6299, 6301, + 6332, 6340, 6342, 6346, 6353, 6365, 6372, 6386, + 6392, 6410, 6421, 6427, 6439, 6442, 6451, 6456, + 6466, 6472, 6486, 6492, 6504, 6516, 6520, 6522, + 6528, 6530, 6537, 6540, 6548, 6549, 6570, 6579, + 6587, 6593, 6595, 6599, 6603, 6608, 6614, 6616, + 6619, 6632, 6637, 6651, 6653, 6662, 6669, 6680, + 6690, 6698, 6709, 6713, 6718, 6720, 6722, 6724, + 6725, 6727, 6729, 6731, 6733, 6748, 6752, 6754, + 6756, 6764, 6772, 6774, 6778, 6789, 6792, 6802, + 6806, 6813, 6821, 6827, 6830, 6831, 6835, 6842, + 6847, 6848, 6849, 6851, 6860, 6862, 6885, 6890, + 6892, 6901, 6906, 6907, 6916, 6922, 6932, 6937, + 6944, 6958, 6962, 6967, 6978, 6981, 6991, 6995, + 7004, 7006, 7014, 7021, 7027, 7034, 7038, 7040, + 7042, 7044, 7045, 7047, 7053, 7061, 7067, 7069, + 7073, 7077, 7082, 7085, 7095, 7097, 7099, 7100, + 7102, 7103, 7109, 7111, 7113, 7113, 7114, 7115, + 7121, 7124, 7126, 7128, 7130, 7133, 7138, 7140, + 7143, 7151, 7154, 7156, 7158, 7160, 7191, 7199, + 7201, 7205, 7212, 7214, 7216, 7218, 7221, 7226, + 7228, 7231, 7239, 7242, 7244, 7246, 7248, 7278, + 7286, 7288, 7292, 7295, 7300, 7305, 7315, 7327, + 7333, 7338, 7348, 7351, 7358, 7362, 7370, 7380, + 7384, 7392, 7394, 7402, 7405, 7407, 7412, 7414, + 7421, 7423, 7431, 7432, 7453, 7457, 7463, 7468, + 7470, 7474, 7478, 7480, 7484, 7486, 7489, 7493, + 7495, 7502, 7504, 7508, 7512, 7516, 7518, 7520, + 7529, 7533, 7538, 7540, 7546, 7548, 7549, 7551, + 7552, 7554, 7556, 7558, 7560, 7575, 7579, 7581, + 7583, 7588, 7592, 7596, 7598, 7600, 7604, 7608, + 7610, 7614, 7621, 7626, 7630, 7633, 7634, 7638, + 7645, 7650, 7651, 7652, 7654, 7663, 7665, 7688, + 7692, 7694, 7698, 7702, 7703, 7707, 7711, 7714, + 7716, 7721, 7734, 7736, 7738, 7740, 7742, 7746, + 7750, 7752, 7754, 7756, 7760, 7764, 7768, 7770, + 7772, 7774, 7776, 7777, 7779, 7785, 7791, 7797, + 7799, 7803, 7807, 7812, 7815, 7825, 7827, 7829, + 7832, 7834, 7835, 7836, 7837, 7843, 7845, 7847, + 7847, 7853, 7865, 7872, 7886, 7892, 7910, 7921, + 7927, 7939, 7942, 7951, 7956, 7966, 7972, 7986, + 7992, 8004, 8016, 8020, 8022, 8028, 8030, 8037, + 8040, 8048, 8049, 8070, 8079, 8087, 8093, 8095, + 8099, 8103, 8108, 8114, 8116, 8119, 8132, 8137, + 8151, 8153, 8162, 8169, 8180, 8190, 8198, 8209, + 8213, 8218, 8220, 8222, 8224, 8225, 8227, 8229, + 8231, 8233, 8248, 8252, 8254, 8256, 8264, 8272, + 8274, 8278, 8289, 8292, 8302, 8306, 8313, 8321, + 8327, 8330, 8331, 8335, 8342, 8347, 8348, 8349, + 8351, 8360, 8362, 8385, 8390, 8392, 8401, 8406, + 8407, 8416, 8422, 8432, 8437, 8444, 8458, 8462, + 8467, 8478, 8481, 8491, 8495, 8504, 8506, 8514, + 8521, 8527, 8534, 8538, 8540, 8542, 8544, 8545, + 8547, 8553, 8561, 8567, 8569, 8573, 8577, 8582, + 8585, 8595, 8597, 8599, 8600, 8602, 8603, 8609, + 8611, 8613, 8613, 8616, 8622, 8624, 8644, 8650, + 8655, 8657, 8659, 8662, 8664, 8666, 8670, 8726, + 8782, 8817, 8822, 8830, 8832, 8832, 8834, 8838, + 8841, 8848, 8854, 8858, 8861, 8867, 8870, 8876, + 8879, 8885, 8898, 8902, 8904, 8906, 8908, 8911, + 8916, 8918, 8921, 8929, 8932, 8934, 8936, 8938, + 8968, 8976, 8978, 8982, 8985, 8990, 8995, 9005, + 9017, 9023, 9028, 9038, 9041, 9048, 9052, 9060, + 9070, 9074, 9082, 9084, 9092, 9095, 9097, 9102, + 9104, 9111, 9113, 9121, 9122, 9143, 9147, 9153, + 9158, 9160, 9164, 9168, 9170, 9174, 9176, 9179, + 9183, 9185, 9192, 9194, 9198, 9202, 9206, 9208, + 9210, 9219, 9223, 9228, 9230, 9236, 9238, 9239, + 9241, 9242, 9244, 9246, 9248, 9250, 9265, 9269, + 9271, 9273, 9278, 9282, 9286, 9288, 9290, 9294, + 9298, 9300, 9304, 9311, 9316, 9320, 9323, 9324, + 9328, 9335, 9340, 9341, 9342, 9344, 9353, 9355, + 9378, 9382, 9384, 9388, 9392, 9393, 9397, 9401, + 9404, 9406, 9411, 9424, 9426, 9428, 9430, 9432, + 9436, 9440, 9442, 9444, 9446, 9450, 9454, 9458, + 9460, 9462, 9464, 9466, 9467, 9469, 9475, 9481, + 9487, 9489, 9493, 9497, 9502, 9505, 9515, 9517, + 9519, 9522, 9524, 9526, 9528, 9530, 9533, 9538, + 9540, 9543, 9551, 9554, 9556, 9558, 9560, 9591, + 9599, 9601, 9605, 9612, 9624, 9631, 9645, 9651, + 9669, 9680, 9686, 9698, 9701, 9710, 9715, 9725, + 9731, 9745, 9751, 9763, 9775, 9779, 9781, 9787, + 9789, 9796, 9799, 9807, 9808, 9829, 9838, 9846, + 9852, 9854, 9858, 9862, 9867, 9873, 9875, 9878, + 9891, 9896, 9910, 9912, 9921, 9928, 9939, 9949, + 9957, 9968, 9972, 9977, 9979, 9981, 9983, 9984, + 9986, 9988, 9990, 9992, 10007, 10011, 10013, 10015, + 10023, 10031, 10033, 10037, 10048, 10051, 10061, 10065, + 10072, 10080, 10086, 10089, 10090, 10094, 10101, 10106, + 10107, 10108, 10110, 10119, 10121, 10144, 10149, 10151, + 10160, 10165, 10166, 10175, 10181, 10191, 10196, 10203, + 10217, 10221, 10226, 10237, 10240, 10250, 10254, 10263, + 10265, 10273, 10280, 10286, 10293, 10297, 10299, 10301, + 10303, 10304, 10306, 10312, 10320, 10326, 10328, 10332, + 10336, 10341, 10344, 10354, 10356, 10358, 10359, 10361, + 10362, 10368, 10370, 10372, 10372, 10373, 10374, 10375, + 10381, 10383, 10385, 10385, 10391, 10398, 10399, 10401, + 10404, 10414, 10434, 10441, 10445, 10452, 10462, 10469, + 10472, 10487, 10489, 10492, 10501, 10505, 10509, 10538, + 10558, 10578, 10598, 10620, 10640, 10660, 10680, 10703, + 10724, 10745, 10766, 10786, 10809, 10829, 10849, 10869, + 10890, 10911, 10932, 10952, 10972, 10992, 11012, 11032, + 11052, 11072, 11092, 11112, +} + +var _graphclust_trans_keys []byte = []byte{ + 10, 128, 255, 176, 255, 131, 137, 191, + 145, 189, 135, 129, 130, 132, 133, 144, + 154, 176, 139, 159, 150, 156, 159, 164, + 167, 168, 170, 173, 145, 176, 255, 139, + 255, 166, 176, 171, 179, 160, 161, 163, + 164, 165, 167, 169, 171, 173, 174, 175, + 176, 177, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 166, + 170, 172, 178, 150, 153, 155, 163, 165, + 167, 169, 173, 153, 155, 148, 161, 163, + 255, 189, 132, 185, 144, 152, 161, 164, + 255, 188, 129, 131, 190, 255, 133, 134, + 137, 138, 142, 150, 152, 161, 164, 255, + 131, 134, 137, 138, 142, 144, 146, 175, + 178, 180, 182, 255, 134, 138, 142, 161, + 164, 255, 188, 129, 131, 190, 191, 128, + 132, 135, 136, 139, 141, 150, 151, 162, + 163, 130, 190, 191, 151, 128, 130, 134, + 136, 138, 141, 128, 131, 190, 255, 133, + 137, 142, 148, 151, 161, 164, 255, 128, + 132, 134, 136, 138, 141, 149, 150, 162, + 163, 129, 131, 190, 255, 133, 137, 142, + 150, 152, 161, 164, 255, 130, 131, 138, + 150, 143, 148, 152, 159, 178, 179, 177, + 179, 186, 135, 142, 177, 179, 185, 187, + 188, 136, 141, 181, 183, 185, 152, 153, + 190, 191, 177, 191, 128, 132, 134, 135, + 141, 151, 153, 188, 134, 128, 129, 130, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 173, 183, 185, 190, 150, 153, + 158, 160, 177, 180, 130, 141, 157, 132, + 134, 157, 159, 146, 148, 178, 180, 146, + 147, 178, 179, 180, 255, 148, 156, 158, + 255, 139, 141, 169, 133, 134, 160, 171, + 176, 187, 151, 155, 160, 162, 191, 149, + 158, 165, 188, 176, 190, 128, 132, 180, + 255, 133, 170, 180, 255, 128, 130, 161, + 173, 166, 179, 164, 183, 173, 144, 146, + 148, 168, 178, 180, 184, 185, 128, 181, + 187, 191, 128, 131, 179, 181, 183, 140, + 141, 128, 131, 157, 179, 181, 183, 144, + 176, 164, 175, 177, 191, 160, 191, 128, + 130, 170, 175, 153, 154, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 175, 178, 180, 189, + 158, 159, 176, 177, 130, 134, 139, 163, + 167, 128, 129, 180, 255, 134, 159, 178, + 255, 166, 173, 135, 147, 128, 131, 179, + 255, 129, 164, 166, 255, 169, 182, 131, + 188, 140, 141, 176, 178, 180, 183, 184, + 190, 191, 129, 171, 175, 181, 182, 163, + 170, 172, 173, 172, 184, 190, 158, 128, + 143, 160, 175, 144, 145, 150, 155, 157, + 158, 159, 135, 139, 141, 168, 171, 189, + 160, 182, 186, 191, 129, 131, 133, 134, + 140, 143, 184, 186, 165, 166, 128, 129, + 130, 132, 133, 134, 135, 136, 139, 140, + 141, 144, 145, 146, 147, 150, 151, 152, + 153, 154, 156, 176, 178, 128, 130, 184, + 255, 135, 190, 131, 175, 187, 255, 128, + 130, 167, 180, 179, 128, 130, 179, 255, + 129, 137, 141, 255, 190, 172, 183, 159, + 170, 188, 128, 131, 190, 191, 151, 128, + 132, 135, 136, 139, 141, 162, 163, 166, + 172, 176, 180, 181, 191, 128, 134, 176, + 255, 132, 255, 175, 181, 184, 255, 129, + 155, 158, 255, 129, 255, 171, 183, 157, + 171, 175, 182, 184, 191, 146, 167, 169, + 182, 171, 172, 189, 190, 176, 180, 176, + 182, 145, 190, 143, 146, 178, 157, 158, + 133, 134, 137, 168, 169, 170, 165, 169, + 173, 178, 187, 255, 131, 132, 140, 169, + 174, 255, 130, 132, 128, 182, 187, 255, + 173, 180, 182, 255, 132, 155, 159, 161, + 175, 128, 163, 165, 128, 134, 136, 152, + 155, 161, 163, 164, 166, 170, 144, 150, + 132, 138, 145, 146, 151, 166, 169, 0, + 127, 176, 255, 131, 137, 191, 145, 189, + 135, 129, 130, 132, 133, 144, 154, 176, + 139, 159, 150, 156, 159, 164, 167, 168, + 170, 173, 145, 176, 255, 139, 255, 166, + 176, 171, 179, 160, 161, 163, 164, 165, + 166, 167, 169, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 168, 170, 150, 153, 155, 163, 165, 167, + 169, 173, 153, 155, 148, 161, 163, 255, + 131, 187, 189, 132, 185, 190, 255, 141, + 144, 129, 136, 145, 151, 152, 161, 162, + 163, 164, 255, 129, 188, 190, 130, 131, + 191, 255, 141, 151, 129, 132, 133, 134, + 137, 138, 142, 161, 162, 163, 164, 255, + 131, 188, 129, 130, 190, 255, 145, 181, + 129, 130, 131, 134, 135, 136, 137, 138, + 139, 141, 142, 175, 176, 177, 178, 255, + 134, 138, 141, 129, 136, 142, 161, 162, + 163, 164, 255, 129, 188, 130, 131, 190, + 191, 128, 141, 129, 132, 135, 136, 139, + 140, 150, 151, 162, 163, 130, 190, 191, + 128, 141, 151, 129, 130, 134, 136, 138, + 140, 128, 129, 131, 190, 255, 133, 137, + 129, 132, 142, 148, 151, 161, 164, 255, + 129, 188, 190, 191, 130, 131, 130, 134, + 128, 132, 135, 136, 138, 139, 140, 141, + 149, 150, 162, 163, 129, 190, 130, 131, + 191, 255, 133, 137, 141, 151, 129, 132, + 142, 161, 162, 163, 164, 255, 138, 143, + 150, 159, 144, 145, 146, 148, 152, 158, + 178, 179, 177, 179, 180, 186, 135, 142, + 177, 179, 180, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 191, + 177, 190, 128, 132, 134, 135, 141, 151, + 153, 188, 134, 128, 129, 130, 141, 156, + 157, 158, 159, 160, 162, 164, 168, 169, + 170, 172, 173, 174, 175, 176, 179, 183, + 177, 173, 183, 185, 186, 187, 188, 189, + 190, 150, 151, 152, 153, 158, 160, 177, + 180, 130, 132, 141, 157, 133, 134, 157, + 159, 146, 148, 178, 180, 146, 147, 178, + 179, 182, 180, 189, 190, 255, 134, 157, + 137, 147, 148, 255, 139, 141, 169, 133, + 134, 178, 160, 162, 163, 166, 167, 168, + 169, 171, 176, 184, 185, 187, 155, 151, + 152, 153, 154, 150, 160, 162, 191, 149, + 151, 152, 158, 165, 172, 173, 178, 179, + 188, 176, 190, 132, 181, 187, 128, 131, + 180, 188, 189, 255, 130, 133, 170, 171, + 179, 180, 255, 130, 161, 170, 128, 129, + 162, 165, 166, 167, 168, 173, 167, 173, + 166, 169, 170, 174, 175, 177, 178, 179, + 164, 171, 172, 179, 180, 181, 182, 183, + 161, 173, 180, 144, 146, 148, 168, 178, + 179, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 144, 176, + 175, 177, 191, 160, 191, 128, 130, 170, + 175, 153, 154, 153, 154, 155, 160, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 175, 175, 178, 180, 189, 158, 159, + 176, 177, 130, 134, 139, 167, 163, 164, + 165, 166, 132, 133, 134, 159, 160, 177, + 178, 255, 166, 173, 135, 145, 146, 147, + 131, 179, 188, 128, 130, 180, 181, 182, + 185, 186, 255, 165, 129, 255, 169, 174, + 175, 176, 177, 178, 179, 180, 181, 182, + 131, 140, 141, 188, 176, 178, 180, 183, + 184, 190, 191, 129, 171, 181, 182, 172, + 173, 174, 175, 165, 168, 172, 173, 163, + 170, 172, 184, 190, 158, 128, 143, 160, + 175, 144, 145, 150, 155, 157, 158, 159, + 135, 139, 141, 168, 171, 189, 160, 182, + 186, 191, 129, 131, 133, 134, 140, 143, + 184, 186, 165, 166, 128, 129, 130, 132, + 133, 134, 135, 136, 139, 140, 141, 144, + 145, 146, 147, 150, 151, 152, 153, 154, + 156, 176, 178, 129, 128, 130, 184, 255, + 135, 190, 130, 131, 175, 176, 178, 183, + 184, 187, 255, 172, 128, 130, 167, 180, + 179, 130, 128, 129, 179, 181, 182, 190, + 191, 255, 129, 137, 138, 140, 141, 255, + 180, 190, 172, 174, 175, 177, 178, 181, + 182, 183, 159, 160, 162, 163, 170, 188, + 190, 191, 128, 129, 130, 131, 128, 151, + 129, 132, 135, 136, 139, 141, 162, 163, + 166, 172, 176, 180, 181, 183, 184, 191, + 133, 128, 129, 130, 134, 176, 185, 189, + 177, 178, 179, 186, 187, 190, 191, 255, + 129, 132, 255, 175, 190, 176, 177, 178, + 181, 184, 187, 188, 255, 129, 155, 158, + 255, 189, 176, 178, 179, 186, 187, 190, + 191, 255, 129, 255, 172, 182, 171, 173, + 174, 175, 176, 183, 166, 157, 159, 160, + 161, 162, 171, 175, 190, 176, 182, 184, + 191, 169, 177, 180, 146, 167, 170, 182, + 171, 172, 189, 190, 176, 180, 176, 182, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 166, 173, 165, 169, 174, + 178, 187, 255, 131, 132, 140, 169, 174, + 255, 130, 132, 128, 182, 187, 255, 173, + 180, 182, 255, 132, 155, 159, 161, 175, + 128, 163, 165, 128, 134, 136, 152, 155, + 161, 163, 164, 166, 170, 144, 150, 132, + 138, 143, 187, 191, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 139, + 168, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 144, 145, 150, 155, + 157, 158, 128, 191, 173, 128, 159, 160, + 191, 156, 128, 133, 134, 191, 0, 127, + 176, 255, 131, 137, 191, 145, 189, 135, + 129, 130, 132, 133, 144, 154, 176, 139, + 159, 150, 156, 159, 164, 167, 168, 170, + 173, 145, 176, 255, 139, 255, 166, 176, + 171, 179, 160, 161, 163, 164, 165, 167, + 169, 171, 173, 174, 175, 176, 177, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 166, 170, 172, 178, + 150, 153, 155, 163, 165, 167, 169, 173, + 153, 155, 148, 161, 163, 255, 189, 132, + 185, 144, 152, 161, 164, 255, 188, 129, + 131, 190, 255, 133, 134, 137, 138, 142, + 150, 152, 161, 164, 255, 131, 134, 137, + 138, 142, 144, 146, 175, 178, 180, 182, + 255, 134, 138, 142, 161, 164, 255, 188, + 129, 131, 190, 191, 128, 132, 135, 136, + 139, 141, 150, 151, 162, 163, 130, 190, + 191, 151, 128, 130, 134, 136, 138, 141, + 128, 131, 190, 255, 133, 137, 142, 148, + 151, 161, 164, 255, 128, 132, 134, 136, + 138, 141, 149, 150, 162, 163, 129, 131, + 190, 255, 133, 137, 142, 150, 152, 161, + 164, 255, 130, 131, 138, 150, 143, 148, + 152, 159, 178, 179, 177, 179, 186, 135, + 142, 177, 179, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 177, + 191, 128, 132, 134, 135, 141, 151, 153, + 188, 134, 128, 129, 130, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 173, + 183, 185, 190, 150, 153, 158, 160, 177, + 180, 130, 141, 157, 132, 134, 157, 159, + 146, 148, 178, 180, 146, 147, 178, 179, + 180, 255, 148, 156, 158, 255, 139, 141, + 169, 133, 134, 160, 171, 176, 187, 151, + 155, 160, 162, 191, 149, 158, 165, 188, + 176, 190, 128, 132, 180, 255, 133, 170, + 180, 255, 128, 130, 161, 173, 166, 179, + 164, 183, 173, 144, 146, 148, 168, 178, + 180, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 128, 131, + 157, 179, 181, 183, 144, 176, 164, 175, + 177, 191, 160, 191, 128, 130, 170, 175, + 153, 154, 153, 154, 155, 160, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 175, 175, 178, 180, 189, 158, 159, 176, + 177, 130, 134, 139, 163, 167, 128, 129, + 180, 255, 134, 159, 178, 255, 166, 173, + 135, 147, 128, 131, 179, 255, 129, 164, + 166, 255, 169, 182, 131, 188, 140, 141, + 176, 178, 180, 183, 184, 190, 191, 129, + 171, 175, 181, 182, 163, 170, 172, 173, + 172, 184, 190, 158, 128, 143, 160, 175, + 144, 145, 150, 155, 157, 158, 159, 135, + 139, 141, 168, 171, 189, 160, 182, 186, + 191, 129, 131, 133, 134, 140, 143, 184, + 186, 165, 166, 128, 129, 130, 132, 133, + 134, 135, 136, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 176, 178, 128, 130, 184, 255, 135, 190, + 131, 175, 187, 255, 128, 130, 167, 180, + 179, 128, 130, 179, 255, 129, 137, 141, + 255, 190, 172, 183, 159, 170, 188, 128, + 131, 190, 191, 151, 128, 132, 135, 136, + 139, 141, 162, 163, 166, 172, 176, 180, + 181, 191, 128, 134, 176, 255, 132, 255, + 175, 181, 184, 255, 129, 155, 158, 255, + 129, 255, 171, 183, 157, 171, 175, 182, + 184, 191, 146, 167, 169, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 145, 190, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 165, 169, 173, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 145, + 146, 151, 166, 169, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 141, 144, 129, 136, + 145, 151, 152, 161, 162, 163, 164, 255, + 129, 188, 190, 130, 131, 191, 255, 141, + 151, 129, 132, 133, 134, 137, 138, 142, + 161, 162, 163, 164, 255, 131, 188, 129, + 130, 190, 255, 145, 181, 129, 130, 131, + 134, 135, 136, 137, 138, 139, 141, 142, + 175, 176, 177, 178, 255, 134, 138, 141, + 129, 136, 142, 161, 162, 163, 164, 255, + 129, 188, 130, 131, 190, 191, 128, 141, + 129, 132, 135, 136, 139, 140, 150, 151, + 162, 163, 130, 190, 191, 128, 141, 151, + 129, 130, 134, 136, 138, 140, 128, 129, + 131, 190, 255, 133, 137, 129, 132, 142, + 148, 151, 161, 164, 255, 129, 188, 190, + 191, 130, 131, 130, 134, 128, 132, 135, + 136, 138, 139, 140, 141, 149, 150, 162, + 163, 129, 190, 130, 131, 191, 255, 133, + 137, 141, 151, 129, 132, 142, 161, 162, + 163, 164, 255, 138, 143, 150, 159, 144, + 145, 146, 148, 152, 158, 178, 179, 177, + 179, 180, 186, 135, 142, 177, 179, 180, + 185, 187, 188, 136, 141, 181, 183, 185, + 152, 153, 190, 191, 191, 177, 190, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 177, 173, 183, + 185, 186, 187, 188, 189, 190, 150, 151, + 152, 153, 158, 160, 177, 180, 130, 132, + 141, 157, 133, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 182, 180, + 189, 190, 255, 134, 157, 137, 147, 148, + 255, 139, 141, 169, 133, 134, 178, 160, + 162, 163, 166, 167, 168, 169, 171, 176, + 184, 185, 187, 155, 151, 152, 153, 154, + 150, 160, 162, 191, 149, 151, 152, 158, + 165, 172, 173, 178, 179, 188, 176, 190, + 132, 181, 187, 128, 131, 180, 188, 189, + 255, 130, 133, 170, 171, 179, 180, 255, + 130, 161, 170, 128, 129, 162, 165, 166, + 167, 168, 173, 167, 173, 166, 169, 170, + 174, 175, 177, 178, 179, 164, 171, 172, + 179, 180, 181, 182, 183, 161, 173, 180, + 144, 146, 148, 168, 178, 179, 184, 185, + 128, 181, 187, 191, 128, 131, 179, 181, + 183, 140, 141, 144, 176, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 167, 163, 164, 165, 166, 132, + 133, 134, 159, 160, 177, 178, 255, 166, + 173, 135, 145, 146, 147, 131, 179, 188, + 128, 130, 180, 181, 182, 185, 186, 255, + 165, 129, 255, 169, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 131, 140, 141, + 188, 176, 178, 180, 183, 184, 190, 191, + 129, 171, 181, 182, 172, 173, 174, 175, + 165, 168, 172, 173, 163, 170, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 129, 128, 130, 184, 255, 135, 190, 130, + 131, 175, 176, 178, 183, 184, 187, 255, + 172, 128, 130, 167, 180, 179, 130, 128, + 129, 179, 181, 182, 190, 191, 255, 129, + 137, 138, 140, 141, 255, 180, 190, 172, + 174, 175, 177, 178, 181, 182, 183, 159, + 160, 162, 163, 170, 188, 190, 191, 128, + 129, 130, 131, 128, 151, 129, 132, 135, + 136, 139, 141, 162, 163, 166, 172, 176, + 180, 181, 183, 184, 191, 133, 128, 129, + 130, 134, 176, 185, 189, 177, 178, 179, + 186, 187, 190, 191, 255, 129, 132, 255, + 175, 190, 176, 177, 178, 181, 184, 187, + 188, 255, 129, 155, 158, 255, 189, 176, + 178, 179, 186, 187, 190, 191, 255, 129, + 255, 172, 182, 171, 173, 174, 175, 176, + 183, 166, 157, 159, 160, 161, 162, 171, + 175, 190, 176, 182, 184, 191, 169, 177, + 180, 146, 167, 170, 182, 171, 172, 189, + 190, 176, 180, 176, 182, 143, 146, 178, + 157, 158, 133, 134, 137, 168, 169, 170, + 166, 173, 165, 169, 174, 178, 187, 255, + 131, 132, 140, 169, 174, 255, 130, 132, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 163, 165, + 128, 134, 136, 152, 155, 161, 163, 164, + 166, 170, 144, 150, 132, 138, 143, 187, + 191, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 139, 168, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 144, 145, 150, 155, 157, 158, 128, + 191, 160, 172, 174, 191, 128, 133, 134, + 155, 157, 191, 157, 128, 191, 143, 128, + 191, 163, 181, 128, 191, 162, 128, 191, + 142, 128, 191, 132, 133, 134, 135, 160, + 128, 191, 128, 255, 128, 129, 130, 132, + 133, 134, 141, 156, 157, 158, 159, 160, + 162, 164, 168, 169, 170, 172, 173, 174, + 175, 176, 179, 183, 160, 255, 128, 129, + 130, 133, 134, 135, 141, 156, 157, 158, + 159, 160, 162, 164, 168, 169, 170, 172, + 173, 174, 175, 176, 179, 183, 160, 255, + 168, 255, 128, 129, 130, 134, 135, 141, + 156, 157, 158, 159, 160, 162, 164, 168, + 169, 170, 172, 173, 174, 175, 176, 179, + 183, 168, 255, 192, 255, 159, 139, 187, + 158, 159, 176, 255, 135, 138, 139, 187, + 188, 255, 168, 255, 153, 154, 155, 160, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 175, 177, 178, 179, 180, 181, + 182, 184, 185, 186, 187, 188, 189, 191, + 176, 190, 192, 255, 135, 147, 160, 188, + 128, 156, 184, 129, 255, 128, 129, 130, + 133, 134, 141, 156, 157, 158, 159, 160, + 162, 164, 168, 169, 170, 172, 173, 174, + 175, 176, 179, 183, 158, 159, 135, 255, + 148, 176, 140, 168, 132, 160, 188, 152, + 180, 144, 172, 136, 164, 192, 255, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 171, 172, 173, 174, 175, + 176, 178, 179, 180, 181, 182, 183, 185, + 186, 187, 188, 189, 190, 128, 191, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 171, 172, 173, 174, 175, + 176, 178, 179, 180, 181, 182, 183, 185, + 186, 187, 188, 189, 190, 128, 191, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 128, 156, 160, 255, 136, 164, + 175, 176, 255, 128, 141, 143, 191, 128, + 129, 152, 155, 156, 130, 191, 140, 141, + 128, 138, 144, 167, 175, 191, 128, 159, + 176, 191, 157, 128, 191, 185, 128, 191, + 128, 137, 138, 141, 142, 191, 128, 191, + 165, 177, 178, 179, 180, 181, 182, 184, + 185, 186, 187, 188, 189, 191, 128, 175, + 176, 190, 192, 255, 128, 159, 160, 188, + 189, 191, 128, 156, 184, 129, 255, 148, + 176, 140, 168, 132, 160, 188, 152, 180, + 144, 172, 136, 164, 192, 255, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 128, 156, 160, 255, 136, 164, 175, + 176, 255, 135, 138, 139, 187, 188, 191, + 192, 255, 187, 191, 128, 190, 128, 190, + 188, 128, 175, 190, 191, 145, 155, 157, + 159, 128, 191, 130, 135, 128, 191, 189, + 128, 191, 128, 129, 130, 131, 132, 191, + 178, 128, 191, 128, 159, 164, 191, 133, + 128, 191, 128, 178, 187, 191, 135, 142, + 143, 145, 146, 149, 150, 153, 154, 155, + 164, 128, 191, 128, 165, 166, 191, 144, + 145, 150, 155, 157, 158, 159, 135, 166, + 191, 133, 128, 191, 128, 130, 131, 132, + 133, 137, 138, 139, 140, 191, 174, 188, + 128, 129, 130, 131, 132, 133, 134, 144, + 145, 165, 166, 169, 170, 175, 176, 184, + 185, 191, 128, 132, 170, 129, 135, 136, + 191, 181, 186, 128, 191, 144, 128, 148, + 149, 150, 151, 191, 128, 132, 133, 135, + 136, 138, 139, 143, 144, 191, 163, 128, + 179, 180, 182, 183, 191, 128, 129, 191, + 166, 176, 191, 128, 151, 152, 158, 159, + 178, 179, 185, 186, 187, 188, 190, 128, + 191, 160, 128, 191, 128, 129, 135, 132, + 134, 128, 175, 157, 128, 191, 143, 128, + 191, 163, 181, 128, 191, 162, 128, 191, + 142, 128, 191, 132, 133, 134, 135, 160, + 128, 191, 0, 127, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 167, 169, 171, + 173, 174, 175, 176, 177, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 166, 170, 172, 178, 150, 153, + 155, 163, 165, 167, 169, 173, 153, 155, + 148, 161, 163, 255, 189, 132, 185, 144, + 152, 161, 164, 255, 188, 129, 131, 190, + 255, 133, 134, 137, 138, 142, 150, 152, + 161, 164, 255, 131, 134, 137, 138, 142, + 144, 146, 175, 178, 180, 182, 255, 134, + 138, 142, 161, 164, 255, 188, 129, 131, + 190, 191, 128, 132, 135, 136, 139, 141, + 150, 151, 162, 163, 130, 190, 191, 151, + 128, 130, 134, 136, 138, 141, 128, 131, + 190, 255, 133, 137, 142, 148, 151, 161, + 164, 255, 128, 132, 134, 136, 138, 141, + 149, 150, 162, 163, 129, 131, 190, 255, + 133, 137, 142, 150, 152, 161, 164, 255, + 130, 131, 138, 150, 143, 148, 152, 159, + 178, 179, 177, 179, 186, 135, 142, 177, + 179, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 177, 191, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 173, 183, 185, + 190, 150, 153, 158, 160, 177, 180, 130, + 141, 157, 132, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 180, 255, + 148, 156, 158, 255, 139, 141, 169, 133, + 134, 160, 171, 176, 187, 151, 155, 160, + 162, 191, 149, 158, 165, 188, 176, 190, + 128, 132, 180, 255, 133, 170, 180, 255, + 128, 130, 161, 173, 166, 179, 164, 183, + 173, 144, 146, 148, 168, 178, 180, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 128, 131, 157, 179, + 181, 183, 144, 176, 164, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 163, 167, 128, 129, 180, 255, + 134, 159, 178, 255, 166, 173, 135, 147, + 128, 131, 179, 255, 129, 164, 166, 255, + 169, 182, 131, 188, 140, 141, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 175, + 181, 182, 163, 170, 172, 173, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 128, 130, 184, 255, 135, 190, 131, 175, + 187, 255, 128, 130, 167, 180, 179, 128, + 130, 179, 255, 129, 137, 141, 255, 190, + 172, 183, 159, 170, 188, 128, 131, 190, + 191, 151, 128, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 191, + 128, 134, 176, 255, 132, 255, 175, 181, + 184, 255, 129, 155, 158, 255, 129, 255, + 171, 183, 157, 171, 175, 182, 184, 191, + 146, 167, 169, 182, 171, 172, 189, 190, + 176, 180, 176, 182, 145, 190, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 163, 165, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 144, 150, 132, 138, 145, 146, 151, + 166, 169, 128, 255, 176, 255, 131, 137, + 191, 145, 189, 135, 129, 130, 132, 133, + 144, 154, 176, 139, 159, 150, 156, 159, + 164, 167, 168, 170, 173, 145, 176, 255, + 139, 255, 166, 176, 171, 179, 160, 161, + 163, 164, 165, 166, 167, 169, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 168, 170, 150, 153, 155, + 163, 165, 167, 169, 173, 153, 155, 148, + 161, 163, 255, 131, 187, 189, 132, 185, + 190, 255, 141, 144, 129, 136, 145, 151, + 152, 161, 162, 163, 164, 255, 129, 188, + 190, 130, 131, 191, 255, 141, 151, 129, + 132, 133, 134, 137, 138, 142, 161, 162, + 163, 164, 255, 131, 188, 129, 130, 190, + 255, 145, 181, 129, 130, 131, 134, 135, + 136, 137, 138, 139, 141, 142, 175, 176, + 177, 178, 255, 134, 138, 141, 129, 136, + 142, 161, 162, 163, 164, 255, 129, 188, + 130, 131, 190, 191, 128, 141, 129, 132, + 135, 136, 139, 140, 150, 151, 162, 163, + 130, 190, 191, 128, 141, 151, 129, 130, + 134, 136, 138, 140, 128, 129, 131, 190, + 255, 133, 137, 129, 132, 142, 148, 151, + 161, 164, 255, 129, 188, 190, 191, 130, + 131, 130, 134, 128, 132, 135, 136, 138, + 139, 140, 141, 149, 150, 162, 163, 129, + 190, 130, 131, 191, 255, 133, 137, 141, + 151, 129, 132, 142, 161, 162, 163, 164, + 255, 138, 143, 150, 159, 144, 145, 146, + 148, 152, 158, 178, 179, 177, 179, 180, + 186, 135, 142, 177, 179, 180, 185, 187, + 188, 136, 141, 181, 183, 185, 152, 153, + 190, 191, 191, 177, 190, 128, 132, 134, + 135, 141, 151, 153, 188, 134, 128, 129, + 130, 141, 156, 157, 158, 159, 160, 162, + 164, 168, 169, 170, 172, 173, 174, 175, + 176, 179, 183, 177, 173, 183, 185, 186, + 187, 188, 189, 190, 150, 151, 152, 153, + 158, 160, 177, 180, 130, 132, 141, 157, + 133, 134, 157, 159, 146, 148, 178, 180, + 146, 147, 178, 179, 182, 180, 189, 190, + 255, 134, 157, 137, 147, 148, 255, 139, + 141, 169, 133, 134, 178, 160, 162, 163, + 166, 167, 168, 169, 171, 176, 184, 185, + 187, 155, 151, 152, 153, 154, 150, 160, + 162, 191, 149, 151, 152, 158, 165, 172, + 173, 178, 179, 188, 176, 190, 132, 181, + 187, 128, 131, 180, 188, 189, 255, 130, + 133, 170, 171, 179, 180, 255, 130, 161, + 170, 128, 129, 162, 165, 166, 167, 168, + 173, 167, 173, 166, 169, 170, 174, 175, + 177, 178, 179, 164, 171, 172, 179, 180, + 181, 182, 183, 161, 173, 180, 144, 146, + 148, 168, 178, 179, 184, 185, 128, 181, + 187, 191, 128, 131, 179, 181, 183, 140, + 141, 144, 176, 175, 177, 191, 160, 191, + 128, 130, 170, 175, 153, 154, 153, 154, + 155, 160, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 175, 175, 178, 180, + 189, 158, 159, 176, 177, 130, 134, 139, + 167, 163, 164, 165, 166, 132, 133, 134, + 159, 160, 177, 178, 255, 166, 173, 135, + 145, 146, 147, 131, 179, 188, 128, 130, + 180, 181, 182, 185, 186, 255, 165, 129, + 255, 169, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 131, 140, 141, 188, 176, + 178, 180, 183, 184, 190, 191, 129, 171, + 181, 182, 172, 173, 174, 175, 165, 168, + 172, 173, 163, 170, 172, 184, 190, 158, + 128, 143, 160, 175, 144, 145, 150, 155, + 157, 158, 159, 135, 139, 141, 168, 171, + 189, 160, 182, 186, 191, 129, 131, 133, + 134, 140, 143, 184, 186, 165, 166, 128, + 129, 130, 132, 133, 134, 135, 136, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 176, 178, 129, 128, + 130, 184, 255, 135, 190, 130, 131, 175, + 176, 178, 183, 184, 187, 255, 172, 128, + 130, 167, 180, 179, 130, 128, 129, 179, + 181, 182, 190, 191, 255, 129, 137, 138, + 140, 141, 255, 180, 190, 172, 174, 175, + 177, 178, 181, 182, 183, 159, 160, 162, + 163, 170, 188, 190, 191, 128, 129, 130, + 131, 128, 151, 129, 132, 135, 136, 139, + 141, 162, 163, 166, 172, 176, 180, 181, + 183, 184, 191, 133, 128, 129, 130, 134, + 176, 185, 189, 177, 178, 179, 186, 187, + 190, 191, 255, 129, 132, 255, 175, 190, + 176, 177, 178, 181, 184, 187, 188, 255, + 129, 155, 158, 255, 189, 176, 178, 179, + 186, 187, 190, 191, 255, 129, 255, 172, + 182, 171, 173, 174, 175, 176, 183, 166, + 157, 159, 160, 161, 162, 171, 175, 190, + 176, 182, 184, 191, 169, 177, 180, 146, + 167, 170, 182, 171, 172, 189, 190, 176, + 180, 176, 182, 143, 146, 178, 157, 158, + 133, 134, 137, 168, 169, 170, 166, 173, + 165, 169, 174, 178, 187, 255, 131, 132, + 140, 169, 174, 255, 130, 132, 128, 182, + 187, 255, 173, 180, 182, 255, 132, 155, + 159, 161, 175, 128, 163, 165, 128, 134, + 136, 152, 155, 161, 163, 164, 166, 170, + 144, 150, 132, 138, 143, 187, 191, 160, + 128, 129, 132, 135, 133, 134, 160, 255, + 192, 255, 139, 168, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 144, + 145, 150, 155, 157, 158, 128, 129, 130, + 132, 133, 134, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 160, 255, 128, + 129, 130, 133, 134, 135, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 160, + 255, 168, 255, 128, 129, 130, 134, 135, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 168, 255, 192, 255, 159, 139, + 187, 158, 159, 176, 255, 135, 138, 139, + 187, 188, 255, 168, 255, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 177, 178, 179, 180, + 181, 182, 184, 185, 186, 187, 188, 189, + 191, 176, 190, 192, 255, 135, 147, 160, + 188, 128, 156, 184, 129, 255, 128, 129, + 130, 133, 134, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 158, 159, 135, + 255, 148, 176, 140, 168, 132, 160, 188, + 152, 180, 144, 172, 136, 164, 192, 255, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 160, 161, 162, 164, 165, + 166, 167, 168, 169, 171, 172, 173, 174, + 175, 176, 178, 179, 180, 181, 182, 183, + 185, 186, 187, 188, 189, 190, 128, 191, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 160, 161, 162, 164, 165, + 166, 167, 168, 169, 171, 172, 173, 174, + 175, 176, 178, 179, 180, 181, 182, 183, + 185, 186, 187, 188, 189, 190, 128, 191, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 128, 156, 160, 255, 136, + 164, 175, 176, 255, 142, 128, 191, 128, + 129, 152, 155, 156, 130, 191, 139, 141, + 128, 140, 142, 143, 144, 167, 168, 174, + 175, 191, 128, 255, 176, 255, 131, 137, + 191, 145, 189, 135, 129, 130, 132, 133, + 144, 154, 176, 139, 159, 150, 156, 159, + 164, 167, 168, 170, 173, 145, 176, 255, + 139, 255, 166, 176, 171, 179, 160, 161, + 163, 164, 165, 167, 169, 171, 173, 174, + 175, 176, 177, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 166, 170, 172, 178, 150, 153, 155, 163, + 165, 167, 169, 173, 153, 155, 148, 161, + 163, 255, 189, 132, 185, 144, 152, 161, + 164, 255, 188, 129, 131, 190, 255, 133, + 134, 137, 138, 142, 150, 152, 161, 164, + 255, 131, 134, 137, 138, 142, 144, 146, + 175, 178, 180, 182, 255, 134, 138, 142, + 161, 164, 255, 188, 129, 131, 190, 191, + 128, 132, 135, 136, 139, 141, 150, 151, + 162, 163, 130, 190, 191, 151, 128, 130, + 134, 136, 138, 141, 128, 131, 190, 255, + 133, 137, 142, 148, 151, 161, 164, 255, + 128, 132, 134, 136, 138, 141, 149, 150, + 162, 163, 129, 131, 190, 255, 133, 137, + 142, 150, 152, 161, 164, 255, 130, 131, + 138, 150, 143, 148, 152, 159, 178, 179, + 177, 179, 186, 135, 142, 177, 179, 185, + 187, 188, 136, 141, 181, 183, 185, 152, + 153, 190, 191, 177, 191, 128, 132, 134, + 135, 141, 151, 153, 188, 134, 128, 129, + 130, 141, 156, 157, 158, 159, 160, 162, + 164, 168, 169, 170, 172, 173, 174, 175, + 176, 179, 183, 173, 183, 185, 190, 150, + 153, 158, 160, 177, 180, 130, 141, 157, + 132, 134, 157, 159, 146, 148, 178, 180, + 146, 147, 178, 179, 180, 255, 148, 156, + 158, 255, 139, 141, 169, 133, 134, 160, + 171, 176, 187, 151, 155, 160, 162, 191, + 149, 158, 165, 188, 176, 190, 128, 132, + 180, 255, 133, 170, 180, 255, 128, 130, + 161, 173, 166, 179, 164, 183, 173, 144, + 146, 148, 168, 178, 180, 184, 185, 128, + 181, 187, 191, 128, 131, 179, 181, 183, + 140, 141, 144, 176, 175, 177, 191, 160, + 191, 128, 130, 170, 175, 153, 154, 153, + 154, 155, 160, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 175, 175, 178, + 180, 189, 158, 159, 176, 177, 130, 134, + 139, 163, 167, 128, 129, 180, 255, 134, + 159, 178, 255, 166, 173, 135, 147, 128, + 131, 179, 255, 129, 164, 166, 255, 169, + 182, 131, 188, 140, 141, 176, 178, 180, + 183, 184, 190, 191, 129, 171, 175, 181, + 182, 163, 170, 172, 173, 172, 184, 190, + 158, 128, 143, 160, 175, 144, 145, 150, + 155, 157, 158, 135, 139, 141, 168, 171, + 189, 160, 182, 186, 191, 129, 131, 133, + 134, 140, 143, 184, 186, 165, 166, 128, + 129, 130, 132, 133, 134, 135, 136, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 176, 178, 128, 130, + 184, 255, 135, 190, 131, 175, 187, 255, + 128, 130, 167, 180, 179, 128, 130, 179, + 255, 129, 137, 141, 255, 190, 172, 183, + 159, 170, 188, 128, 131, 190, 191, 151, + 128, 132, 135, 136, 139, 141, 162, 163, + 166, 172, 176, 180, 181, 191, 128, 134, + 176, 255, 132, 255, 175, 181, 184, 255, + 129, 155, 158, 255, 129, 255, 171, 183, + 157, 171, 175, 182, 184, 191, 146, 167, + 169, 182, 171, 172, 189, 190, 176, 180, + 176, 182, 145, 190, 143, 146, 178, 157, + 158, 133, 134, 137, 168, 169, 170, 165, + 169, 173, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 128, 182, 187, + 255, 173, 180, 182, 255, 132, 155, 159, + 161, 175, 128, 163, 165, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 144, + 150, 132, 138, 160, 128, 129, 132, 135, + 133, 134, 160, 255, 192, 255, 128, 131, + 157, 179, 181, 183, 164, 144, 145, 150, + 155, 157, 158, 159, 145, 146, 151, 166, + 169, 128, 255, 176, 255, 131, 137, 191, + 145, 189, 135, 129, 130, 132, 133, 144, + 154, 176, 139, 159, 150, 156, 159, 164, + 167, 168, 170, 173, 145, 176, 255, 139, + 255, 166, 176, 171, 179, 160, 161, 163, + 164, 165, 166, 167, 169, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 168, 170, 150, 153, 155, 163, + 165, 167, 169, 173, 153, 155, 148, 161, + 163, 255, 131, 187, 189, 132, 185, 190, + 255, 141, 144, 129, 136, 145, 151, 152, + 161, 162, 163, 164, 255, 129, 188, 190, + 130, 131, 191, 255, 141, 151, 129, 132, + 133, 134, 137, 138, 142, 161, 162, 163, + 164, 255, 131, 188, 129, 130, 190, 255, + 145, 181, 129, 130, 131, 134, 135, 136, + 137, 138, 139, 141, 142, 175, 176, 177, + 178, 255, 134, 138, 141, 129, 136, 142, + 161, 162, 163, 164, 255, 129, 188, 130, + 131, 190, 191, 128, 141, 129, 132, 135, + 136, 139, 140, 150, 151, 162, 163, 130, + 190, 191, 128, 141, 151, 129, 130, 134, + 136, 138, 140, 128, 129, 131, 190, 255, + 133, 137, 129, 132, 142, 148, 151, 161, + 164, 255, 129, 188, 190, 191, 130, 131, + 130, 134, 128, 132, 135, 136, 138, 139, + 140, 141, 149, 150, 162, 163, 129, 190, + 130, 131, 191, 255, 133, 137, 141, 151, + 129, 132, 142, 161, 162, 163, 164, 255, + 138, 143, 150, 159, 144, 145, 146, 148, + 152, 158, 178, 179, 177, 179, 180, 186, + 135, 142, 177, 179, 180, 185, 187, 188, + 136, 141, 181, 183, 185, 152, 153, 190, + 191, 191, 177, 190, 128, 132, 134, 135, + 141, 151, 153, 188, 134, 128, 129, 130, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 177, 173, 183, 185, 186, 187, + 188, 189, 190, 150, 151, 152, 153, 158, + 160, 177, 180, 130, 132, 141, 157, 133, + 134, 157, 159, 146, 148, 178, 180, 146, + 147, 178, 179, 182, 180, 189, 190, 255, + 134, 157, 137, 147, 148, 255, 139, 141, + 169, 133, 134, 178, 160, 162, 163, 166, + 167, 168, 169, 171, 176, 184, 185, 187, + 155, 151, 152, 153, 154, 150, 160, 162, + 191, 149, 151, 152, 158, 165, 172, 173, + 178, 179, 188, 176, 190, 132, 181, 187, + 128, 131, 180, 188, 189, 255, 130, 133, + 170, 171, 179, 180, 255, 130, 161, 170, + 128, 129, 162, 165, 166, 167, 168, 173, + 167, 173, 166, 169, 170, 174, 175, 177, + 178, 179, 164, 171, 172, 179, 180, 181, + 182, 183, 161, 173, 180, 144, 146, 148, + 168, 178, 179, 184, 185, 128, 181, 187, + 191, 128, 131, 179, 181, 183, 140, 141, + 144, 176, 175, 177, 191, 160, 191, 128, + 130, 170, 175, 153, 154, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 175, 178, 180, 189, + 158, 159, 176, 177, 130, 134, 139, 167, + 163, 164, 165, 166, 132, 133, 134, 159, + 160, 177, 178, 255, 166, 173, 135, 145, + 146, 147, 131, 179, 188, 128, 130, 180, + 181, 182, 185, 186, 255, 165, 129, 255, + 169, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 131, 140, 141, 188, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 181, + 182, 172, 173, 174, 175, 165, 168, 172, + 173, 163, 170, 172, 184, 190, 158, 128, + 143, 160, 175, 144, 145, 150, 155, 157, + 158, 159, 135, 139, 141, 168, 171, 189, + 160, 182, 186, 191, 129, 131, 133, 134, + 140, 143, 184, 186, 165, 166, 128, 129, + 130, 132, 133, 134, 135, 136, 139, 140, + 141, 144, 145, 146, 147, 150, 151, 152, + 153, 154, 156, 176, 178, 129, 128, 130, + 184, 255, 135, 190, 130, 131, 175, 176, + 178, 183, 184, 187, 255, 172, 128, 130, + 167, 180, 179, 130, 128, 129, 179, 181, + 182, 190, 191, 255, 129, 137, 138, 140, + 141, 255, 180, 190, 172, 174, 175, 177, + 178, 181, 182, 183, 159, 160, 162, 163, + 170, 188, 190, 191, 128, 129, 130, 131, + 128, 151, 129, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 183, + 184, 191, 133, 128, 129, 130, 134, 176, + 185, 189, 177, 178, 179, 186, 187, 190, + 191, 255, 129, 132, 255, 175, 190, 176, + 177, 178, 181, 184, 187, 188, 255, 129, + 155, 158, 255, 189, 176, 178, 179, 186, + 187, 190, 191, 255, 129, 255, 172, 182, + 171, 173, 174, 175, 176, 183, 166, 157, + 159, 160, 161, 162, 171, 175, 190, 176, + 182, 184, 191, 169, 177, 180, 146, 167, + 170, 182, 171, 172, 189, 190, 176, 180, + 176, 182, 143, 146, 178, 157, 158, 133, + 134, 137, 168, 169, 170, 166, 173, 165, + 169, 174, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 128, 182, 187, + 255, 173, 180, 182, 255, 132, 155, 159, + 161, 175, 128, 163, 165, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 144, + 150, 132, 138, 143, 187, 191, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 139, 168, 128, 159, 160, 175, 176, + 191, 157, 128, 191, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 167, 169, 171, + 173, 174, 175, 176, 177, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 166, 170, 172, 178, 150, 153, + 155, 163, 165, 167, 169, 173, 153, 155, + 148, 161, 163, 255, 189, 132, 185, 144, + 152, 161, 164, 255, 188, 129, 131, 190, + 255, 133, 134, 137, 138, 142, 150, 152, + 161, 164, 255, 131, 134, 137, 138, 142, + 144, 146, 175, 178, 180, 182, 255, 134, + 138, 142, 161, 164, 255, 188, 129, 131, + 190, 191, 128, 132, 135, 136, 139, 141, + 150, 151, 162, 163, 130, 190, 191, 151, + 128, 130, 134, 136, 138, 141, 128, 131, + 190, 255, 133, 137, 142, 148, 151, 161, + 164, 255, 128, 132, 134, 136, 138, 141, + 149, 150, 162, 163, 129, 131, 190, 255, + 133, 137, 142, 150, 152, 161, 164, 255, + 130, 131, 138, 150, 143, 148, 152, 159, + 178, 179, 177, 179, 186, 135, 142, 177, + 179, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 177, 191, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 173, 183, 185, + 190, 150, 153, 158, 160, 177, 180, 130, + 141, 157, 132, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 180, 255, + 148, 156, 158, 255, 139, 141, 169, 133, + 134, 160, 171, 176, 187, 151, 155, 160, + 162, 191, 149, 158, 165, 188, 176, 190, + 128, 132, 180, 255, 133, 170, 180, 255, + 128, 130, 161, 173, 166, 179, 164, 183, + 173, 144, 146, 148, 168, 178, 180, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 128, 131, 157, 179, + 181, 183, 144, 176, 164, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 163, 167, 128, 129, 180, 255, + 134, 159, 178, 255, 166, 173, 135, 147, + 128, 131, 179, 255, 129, 164, 166, 255, + 169, 182, 131, 188, 140, 141, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 175, + 181, 182, 163, 170, 172, 173, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 128, 130, 184, 255, 135, 190, 131, 175, + 187, 255, 128, 130, 167, 180, 179, 128, + 130, 179, 255, 129, 137, 141, 255, 190, + 172, 183, 159, 170, 188, 128, 131, 190, + 191, 151, 128, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 191, + 128, 134, 176, 255, 132, 255, 175, 181, + 184, 255, 129, 155, 158, 255, 129, 255, + 171, 183, 157, 171, 175, 182, 184, 191, + 146, 167, 169, 182, 171, 172, 189, 190, + 176, 180, 176, 182, 145, 190, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 163, 165, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 144, 150, 132, 138, 145, 146, 151, + 166, 169, 139, 168, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 144, + 145, 150, 155, 157, 158, 141, 144, 129, + 136, 145, 151, 152, 161, 162, 163, 164, + 255, 129, 188, 190, 130, 131, 191, 255, + 141, 151, 129, 132, 133, 134, 137, 138, + 142, 161, 162, 163, 164, 255, 131, 188, + 129, 130, 190, 255, 145, 181, 129, 130, + 131, 134, 135, 136, 137, 138, 139, 141, + 142, 175, 176, 177, 178, 255, 134, 138, + 141, 129, 136, 142, 161, 162, 163, 164, + 255, 129, 188, 130, 131, 190, 191, 128, + 141, 129, 132, 135, 136, 139, 140, 150, + 151, 162, 163, 130, 190, 191, 128, 141, + 151, 129, 130, 134, 136, 138, 140, 128, + 129, 131, 190, 255, 133, 137, 129, 132, + 142, 148, 151, 161, 164, 255, 129, 188, + 190, 191, 130, 131, 130, 134, 128, 132, + 135, 136, 138, 139, 140, 141, 149, 150, + 162, 163, 129, 190, 130, 131, 191, 255, + 133, 137, 141, 151, 129, 132, 142, 161, + 162, 163, 164, 255, 138, 143, 150, 159, + 144, 145, 146, 148, 152, 158, 178, 179, + 177, 179, 180, 186, 135, 142, 177, 179, + 180, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 191, 177, 190, + 128, 132, 134, 135, 141, 151, 153, 188, + 134, 128, 129, 130, 141, 156, 157, 158, + 159, 160, 162, 164, 168, 169, 170, 172, + 173, 174, 175, 176, 179, 183, 177, 173, + 183, 185, 186, 187, 188, 189, 190, 150, + 151, 152, 153, 158, 160, 177, 180, 130, + 132, 141, 157, 133, 134, 157, 159, 146, + 148, 178, 180, 146, 147, 178, 179, 182, + 180, 189, 190, 255, 134, 157, 137, 147, + 148, 255, 139, 141, 169, 133, 134, 178, + 160, 162, 163, 166, 167, 168, 169, 171, + 176, 184, 185, 187, 155, 151, 152, 153, + 154, 150, 160, 162, 191, 149, 151, 152, + 158, 165, 172, 173, 178, 179, 188, 176, + 190, 132, 181, 187, 128, 131, 180, 188, + 189, 255, 130, 133, 170, 171, 179, 180, + 255, 130, 161, 170, 128, 129, 162, 165, + 166, 167, 168, 173, 167, 173, 166, 169, + 170, 174, 175, 177, 178, 179, 164, 171, + 172, 179, 180, 181, 182, 183, 161, 173, + 180, 144, 146, 148, 168, 178, 179, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 144, 176, 175, 177, + 191, 160, 191, 128, 130, 170, 175, 153, + 154, 153, 154, 155, 160, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 175, + 175, 178, 180, 189, 158, 159, 176, 177, + 130, 134, 139, 167, 163, 164, 165, 166, + 132, 133, 134, 159, 160, 177, 178, 255, + 166, 173, 135, 145, 146, 147, 131, 179, + 188, 128, 130, 180, 181, 182, 185, 186, + 255, 165, 129, 255, 169, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 131, 140, + 141, 188, 176, 178, 180, 183, 184, 190, + 191, 129, 171, 181, 182, 172, 173, 174, + 175, 165, 168, 172, 173, 163, 170, 172, + 184, 190, 158, 128, 143, 160, 175, 144, + 145, 150, 155, 157, 158, 159, 135, 139, + 141, 168, 171, 189, 160, 182, 186, 191, + 129, 131, 133, 134, 140, 143, 184, 186, + 165, 166, 128, 129, 130, 132, 133, 134, + 135, 136, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 156, 176, + 178, 129, 128, 130, 184, 255, 135, 190, + 130, 131, 175, 176, 178, 183, 184, 187, + 255, 172, 128, 130, 167, 180, 179, 130, + 128, 129, 179, 181, 182, 190, 191, 255, + 129, 137, 138, 140, 141, 255, 180, 190, + 172, 174, 175, 177, 178, 181, 182, 183, + 159, 160, 162, 163, 170, 188, 190, 191, + 128, 129, 130, 131, 128, 151, 129, 132, + 135, 136, 139, 141, 162, 163, 166, 172, + 176, 180, 181, 183, 184, 191, 133, 128, + 129, 130, 134, 176, 185, 189, 177, 178, + 179, 186, 187, 190, 191, 255, 129, 132, + 255, 175, 190, 176, 177, 178, 181, 184, + 187, 188, 255, 129, 155, 158, 255, 189, + 176, 178, 179, 186, 187, 190, 191, 255, + 129, 255, 172, 182, 171, 173, 174, 175, + 176, 183, 166, 157, 159, 160, 161, 162, + 171, 175, 190, 176, 182, 184, 191, 169, + 177, 180, 146, 167, 170, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 166, 173, 165, 169, 174, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 143, + 187, 191, 160, 128, 129, 132, 135, 133, + 134, 160, 255, 192, 255, 185, 128, 191, + 128, 137, 138, 141, 142, 191, 128, 191, + 165, 177, 178, 179, 180, 181, 182, 184, + 185, 186, 187, 188, 189, 191, 128, 175, + 176, 190, 192, 255, 128, 159, 160, 188, + 189, 191, 128, 156, 184, 129, 255, 148, + 176, 140, 168, 132, 160, 188, 152, 180, + 144, 172, 136, 164, 192, 255, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 191, 128, 156, 161, 190, 192, + 255, 136, 164, 175, 176, 255, 135, 138, + 139, 187, 188, 191, 192, 255, 0, 127, + 192, 255, 187, 191, 128, 190, 191, 128, + 190, 188, 128, 175, 176, 189, 190, 191, + 145, 155, 157, 159, 128, 191, 130, 135, + 128, 191, 189, 128, 191, 128, 129, 130, + 131, 132, 191, 178, 128, 191, 128, 159, + 160, 163, 164, 191, 133, 128, 191, 128, + 178, 179, 186, 187, 191, 135, 142, 143, + 145, 146, 149, 150, 153, 154, 155, 164, + 128, 191, 128, 165, 166, 191, 128, 255, + 176, 255, 131, 137, 191, 145, 189, 135, + 129, 130, 132, 133, 144, 154, 176, 139, + 159, 150, 156, 159, 164, 167, 168, 170, + 173, 145, 176, 255, 139, 255, 166, 176, + 171, 179, 160, 161, 163, 164, 165, 167, + 169, 171, 173, 174, 175, 176, 177, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 166, 170, 172, 178, + 150, 153, 155, 163, 165, 167, 169, 173, + 153, 155, 148, 161, 163, 255, 189, 132, + 185, 144, 152, 161, 164, 255, 188, 129, + 131, 190, 255, 133, 134, 137, 138, 142, + 150, 152, 161, 164, 255, 131, 134, 137, + 138, 142, 144, 146, 175, 178, 180, 182, + 255, 134, 138, 142, 161, 164, 255, 188, + 129, 131, 190, 191, 128, 132, 135, 136, + 139, 141, 150, 151, 162, 163, 130, 190, + 191, 151, 128, 130, 134, 136, 138, 141, + 128, 131, 190, 255, 133, 137, 142, 148, + 151, 161, 164, 255, 128, 132, 134, 136, + 138, 141, 149, 150, 162, 163, 129, 131, + 190, 255, 133, 137, 142, 150, 152, 161, + 164, 255, 130, 131, 138, 150, 143, 148, + 152, 159, 178, 179, 177, 179, 186, 135, + 142, 177, 179, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 177, + 191, 128, 132, 134, 135, 141, 151, 153, + 188, 134, 128, 129, 130, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 173, + 183, 185, 190, 150, 153, 158, 160, 177, + 180, 130, 141, 157, 132, 134, 157, 159, + 146, 148, 178, 180, 146, 147, 178, 179, + 180, 255, 148, 156, 158, 255, 139, 141, + 169, 133, 134, 160, 171, 176, 187, 151, + 155, 160, 162, 191, 149, 158, 165, 188, + 176, 190, 128, 132, 180, 255, 133, 170, + 180, 255, 128, 130, 161, 173, 166, 179, + 164, 183, 173, 144, 146, 148, 168, 178, + 180, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 128, 131, + 157, 179, 181, 183, 144, 176, 164, 175, + 177, 191, 160, 191, 128, 130, 170, 175, + 153, 154, 153, 154, 155, 160, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 175, 175, 178, 180, 189, 158, 159, 176, + 177, 130, 134, 139, 163, 167, 128, 129, + 180, 255, 134, 159, 178, 255, 166, 173, + 135, 147, 128, 131, 179, 255, 129, 164, + 166, 255, 169, 182, 131, 188, 140, 141, + 176, 178, 180, 183, 184, 190, 191, 129, + 171, 175, 181, 182, 163, 170, 172, 173, + 172, 184, 190, 158, 128, 143, 160, 175, + 144, 145, 150, 155, 157, 158, 159, 135, + 139, 141, 168, 171, 189, 160, 182, 186, + 191, 129, 131, 133, 134, 140, 143, 184, + 186, 165, 166, 128, 129, 130, 132, 133, + 134, 135, 136, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 176, 178, 128, 130, 184, 255, 135, 190, + 131, 175, 187, 255, 128, 130, 167, 180, + 179, 128, 130, 179, 255, 129, 137, 141, + 255, 190, 172, 183, 159, 170, 188, 128, + 131, 190, 191, 151, 128, 132, 135, 136, + 139, 141, 162, 163, 166, 172, 176, 180, + 181, 191, 128, 134, 176, 255, 132, 255, + 175, 181, 184, 255, 129, 155, 158, 255, + 129, 255, 171, 183, 157, 171, 175, 182, + 184, 191, 146, 167, 169, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 145, 190, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 165, 169, 173, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 145, + 146, 151, 166, 169, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 141, 144, 129, 136, + 145, 151, 152, 161, 162, 163, 164, 255, + 129, 188, 190, 130, 131, 191, 255, 141, + 151, 129, 132, 133, 134, 137, 138, 142, + 161, 162, 163, 164, 255, 131, 188, 129, + 130, 190, 255, 145, 181, 129, 130, 131, + 134, 135, 136, 137, 138, 139, 141, 142, + 175, 176, 177, 178, 255, 134, 138, 141, + 129, 136, 142, 161, 162, 163, 164, 255, + 129, 188, 130, 131, 190, 191, 128, 141, + 129, 132, 135, 136, 139, 140, 150, 151, + 162, 163, 130, 190, 191, 128, 141, 151, + 129, 130, 134, 136, 138, 140, 128, 129, + 131, 190, 255, 133, 137, 129, 132, 142, + 148, 151, 161, 164, 255, 129, 188, 190, + 191, 130, 131, 130, 134, 128, 132, 135, + 136, 138, 139, 140, 141, 149, 150, 162, + 163, 129, 190, 130, 131, 191, 255, 133, + 137, 141, 151, 129, 132, 142, 161, 162, + 163, 164, 255, 138, 143, 150, 159, 144, + 145, 146, 148, 152, 158, 178, 179, 177, + 179, 180, 186, 135, 142, 177, 179, 180, + 185, 187, 188, 136, 141, 181, 183, 185, + 152, 153, 190, 191, 191, 177, 190, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 177, 173, 183, + 185, 186, 187, 188, 189, 190, 150, 151, + 152, 153, 158, 160, 177, 180, 130, 132, + 141, 157, 133, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 182, 180, + 189, 190, 255, 134, 157, 137, 147, 148, + 255, 139, 141, 169, 133, 134, 178, 160, + 162, 163, 166, 167, 168, 169, 171, 176, + 184, 185, 187, 155, 151, 152, 153, 154, + 150, 160, 162, 191, 149, 151, 152, 158, + 165, 172, 173, 178, 179, 188, 176, 190, + 132, 181, 187, 128, 131, 180, 188, 189, + 255, 130, 133, 170, 171, 179, 180, 255, + 130, 161, 170, 128, 129, 162, 165, 166, + 167, 168, 173, 167, 173, 166, 169, 170, + 174, 175, 177, 178, 179, 164, 171, 172, + 179, 180, 181, 182, 183, 161, 173, 180, + 144, 146, 148, 168, 178, 179, 184, 185, + 128, 181, 187, 191, 128, 131, 179, 181, + 183, 140, 141, 144, 176, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 167, 163, 164, 165, 166, 132, + 133, 134, 159, 160, 177, 178, 255, 166, + 173, 135, 145, 146, 147, 131, 179, 188, + 128, 130, 180, 181, 182, 185, 186, 255, + 165, 129, 255, 169, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 131, 140, 141, + 188, 176, 178, 180, 183, 184, 190, 191, + 129, 171, 181, 182, 172, 173, 174, 175, + 165, 168, 172, 173, 163, 170, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 129, 128, 130, 184, 255, 135, 190, 130, + 131, 175, 176, 178, 183, 184, 187, 255, + 172, 128, 130, 167, 180, 179, 130, 128, + 129, 179, 181, 182, 190, 191, 255, 129, + 137, 138, 140, 141, 255, 180, 190, 172, + 174, 175, 177, 178, 181, 182, 183, 159, + 160, 162, 163, 170, 188, 190, 191, 128, + 129, 130, 131, 128, 151, 129, 132, 135, + 136, 139, 141, 162, 163, 166, 172, 176, + 180, 181, 183, 184, 191, 133, 128, 129, + 130, 134, 176, 185, 189, 177, 178, 179, + 186, 187, 190, 191, 255, 129, 132, 255, + 175, 190, 176, 177, 178, 181, 184, 187, + 188, 255, 129, 155, 158, 255, 189, 176, + 178, 179, 186, 187, 190, 191, 255, 129, + 255, 172, 182, 171, 173, 174, 175, 176, + 183, 166, 157, 159, 160, 161, 162, 171, + 175, 190, 176, 182, 184, 191, 169, 177, + 180, 146, 167, 170, 182, 171, 172, 189, + 190, 176, 180, 176, 182, 143, 146, 178, + 157, 158, 133, 134, 137, 168, 169, 170, + 166, 173, 165, 169, 174, 178, 187, 255, + 131, 132, 140, 169, 174, 255, 130, 132, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 163, 165, + 128, 134, 136, 152, 155, 161, 163, 164, + 166, 170, 144, 150, 132, 138, 143, 187, + 191, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 139, 168, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 144, 145, 150, 155, 157, 158, 144, + 145, 150, 155, 157, 158, 159, 135, 166, + 191, 133, 128, 191, 128, 130, 131, 132, + 133, 137, 138, 139, 140, 191, 174, 188, + 128, 129, 130, 131, 132, 133, 134, 144, + 145, 165, 166, 169, 170, 175, 176, 184, + 185, 191, 128, 132, 170, 129, 135, 136, + 191, 181, 186, 128, 191, 144, 128, 148, + 149, 150, 151, 191, 128, 132, 133, 135, + 136, 138, 139, 143, 144, 191, 163, 128, + 179, 180, 182, 183, 191, 128, 129, 191, + 166, 176, 191, 128, 151, 152, 158, 159, + 178, 179, 185, 186, 187, 188, 190, 128, + 191, 160, 128, 191, 128, 130, 131, 135, + 191, 129, 134, 136, 190, 128, 159, 160, + 191, 128, 175, 176, 255, 10, 13, 127, + 194, 216, 219, 220, 224, 225, 226, 234, + 235, 236, 237, 239, 240, 243, 0, 31, + 128, 191, 192, 223, 227, 238, 241, 247, + 248, 255, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 239, 240, 243, 204, 205, + 210, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 234, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 239, 240, 243, 194, 216, + 219, 220, 224, 225, 226, 234, 235, 236, + 237, 239, 240, 243, 32, 126, 192, 223, + 227, 238, 241, 247, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 235, 236, 237, 239, 240, 243, 204, + 205, 210, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 234, + 237, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 237, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 237, 239, 240, 243, 204, 205, + 210, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 234, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 235, 236, 237, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 239, 240, 243, 204, 205, 210, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 234, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 239, 240, 243, 204, 205, 210, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 234, 237, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 237, 239, 240, 243, 204, + 205, 210, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 234, + 237, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, +} + +var _graphclust_single_lengths []byte = []byte{ + 0, 1, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 26, 0, + 0, 0, 1, 1, 1, 0, 0, 2, + 1, 0, 1, 1, 0, 2, 0, 0, + 2, 0, 2, 1, 0, 1, 0, 3, + 0, 0, 1, 21, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 1, + 0, 5, 2, 6, 0, 1, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 2, 1, 1, 0, 3, 1, 0, 7, + 5, 1, 1, 0, 1, 0, 23, 0, + 0, 0, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 0, 0, + 0, 0, 1, 0, 6, 0, 0, 0, + 0, 0, 1, 3, 0, 0, 0, 3, + 0, 0, 0, 0, 1, 1, 0, 1, + 0, 1, 0, 0, 0, 29, 0, 0, + 0, 3, 2, 3, 2, 2, 2, 3, + 2, 2, 3, 3, 1, 2, 4, 2, + 2, 4, 4, 2, 0, 2, 0, 3, + 1, 0, 1, 21, 1, 0, 4, 0, + 0, 0, 1, 2, 0, 1, 1, 1, + 4, 0, 3, 1, 3, 2, 0, 3, + 0, 5, 2, 0, 0, 1, 0, 2, + 0, 0, 15, 0, 0, 0, 4, 0, + 0, 0, 3, 1, 0, 4, 1, 4, + 4, 3, 1, 0, 7, 5, 1, 1, + 0, 1, 0, 23, 1, 0, 1, 1, + 1, 1, 0, 2, 1, 3, 2, 0, + 1, 3, 1, 2, 0, 1, 0, 2, + 1, 2, 3, 4, 0, 0, 0, 1, + 0, 6, 2, 0, 0, 0, 0, 1, + 3, 0, 0, 0, 1, 0, 1, 4, + 0, 0, 0, 1, 1, 1, 4, 0, + 0, 0, 6, 0, 1, 1, 0, 0, + 0, 1, 1, 0, 1, 0, 1, 0, + 0, 0, 26, 0, 0, 0, 1, 1, + 1, 0, 0, 2, 1, 0, 1, 1, + 0, 2, 0, 0, 2, 0, 2, 1, + 0, 1, 0, 3, 0, 0, 1, 21, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 1, 0, 5, 2, 6, + 0, 1, 0, 1, 0, 2, 0, 0, + 15, 0, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 0, 2, 1, 1, 0, + 3, 1, 0, 7, 5, 1, 1, 0, + 1, 0, 23, 0, 0, 0, 0, 1, + 0, 0, 1, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 4, 0, 0, 0, 0, 1, 0, + 6, 0, 0, 0, 0, 0, 1, 3, + 0, 0, 0, 3, 0, 0, 0, 0, + 1, 1, 0, 1, 0, 1, 0, 0, + 0, 29, 0, 0, 0, 3, 2, 3, + 2, 2, 2, 3, 2, 2, 3, 3, + 1, 2, 4, 2, 2, 4, 4, 2, + 0, 2, 0, 3, 1, 0, 1, 21, + 1, 0, 4, 0, 0, 0, 1, 2, + 0, 1, 1, 1, 4, 0, 3, 1, + 3, 2, 0, 3, 0, 5, 2, 0, + 0, 1, 0, 2, 0, 0, 15, 0, + 0, 0, 4, 0, 0, 0, 3, 1, + 0, 4, 1, 4, 4, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 1, 0, 1, 1, 1, 1, 0, 2, + 1, 3, 2, 0, 1, 3, 1, 2, + 0, 1, 0, 2, 1, 2, 3, 4, + 0, 0, 0, 1, 0, 6, 2, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 1, 0, 1, 4, 0, 0, 0, 1, + 1, 1, 4, 0, 0, 0, 6, 0, + 0, 0, 1, 1, 2, 1, 1, 5, + 0, 24, 0, 24, 0, 0, 23, 0, + 0, 1, 0, 2, 0, 0, 0, 28, + 0, 3, 23, 2, 0, 2, 2, 3, + 2, 2, 2, 0, 54, 54, 27, 1, + 0, 5, 2, 0, 1, 1, 0, 0, + 14, 0, 3, 2, 2, 3, 2, 2, + 2, 54, 54, 27, 1, 0, 2, 0, + 1, 4, 2, 1, 0, 1, 0, 1, + 0, 11, 0, 7, 1, 0, 1, 0, + 2, 3, 2, 1, 0, 1, 1, 3, + 0, 1, 3, 0, 1, 1, 2, 1, + 1, 5, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 26, + 0, 0, 0, 1, 1, 1, 0, 0, + 2, 1, 0, 1, 1, 0, 2, 0, + 0, 2, 0, 2, 1, 0, 1, 0, + 3, 0, 0, 1, 21, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 1, 0, 5, 2, 6, 0, 1, 0, + 1, 0, 2, 0, 0, 15, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 2, 1, 1, 0, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 0, 0, 0, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 0, + 0, 0, 0, 1, 0, 6, 0, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 3, 0, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 29, 0, + 0, 0, 3, 2, 3, 2, 2, 2, + 3, 2, 2, 3, 3, 1, 2, 4, + 2, 2, 4, 4, 2, 0, 2, 0, + 3, 1, 0, 1, 21, 1, 0, 4, + 0, 0, 0, 1, 2, 0, 1, 1, + 1, 4, 0, 3, 1, 3, 2, 0, + 3, 0, 5, 2, 0, 0, 1, 0, + 2, 0, 0, 15, 0, 0, 0, 4, + 0, 0, 0, 3, 1, 0, 4, 1, + 4, 4, 3, 1, 0, 7, 5, 1, + 1, 0, 1, 0, 23, 1, 0, 1, + 1, 1, 1, 0, 2, 1, 3, 2, + 0, 1, 3, 1, 2, 0, 1, 0, + 2, 1, 2, 3, 4, 0, 0, 0, + 1, 0, 6, 2, 0, 0, 0, 0, + 1, 3, 0, 0, 0, 1, 0, 1, + 4, 0, 0, 0, 1, 1, 1, 4, + 0, 0, 0, 6, 24, 0, 24, 0, + 0, 23, 0, 0, 1, 0, 2, 0, + 0, 0, 28, 0, 3, 23, 2, 0, + 2, 2, 3, 2, 2, 2, 0, 54, + 54, 27, 1, 1, 5, 2, 0, 0, + 0, 1, 1, 0, 1, 0, 1, 0, + 0, 0, 26, 0, 0, 0, 1, 1, + 1, 0, 0, 2, 1, 0, 1, 1, + 0, 2, 0, 0, 2, 0, 2, 1, + 0, 1, 0, 3, 0, 0, 1, 21, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 1, 0, 5, 2, 0, + 0, 1, 0, 2, 0, 0, 15, 0, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0, 2, 1, 1, 0, 3, 1, + 0, 6, 5, 1, 1, 0, 1, 0, + 23, 0, 0, 0, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 4, + 0, 0, 0, 0, 1, 0, 6, 0, + 0, 0, 0, 0, 1, 3, 0, 0, + 0, 1, 4, 0, 0, 0, 6, 1, + 7, 3, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 29, + 0, 0, 0, 3, 2, 3, 2, 2, + 2, 3, 2, 2, 3, 3, 1, 2, + 4, 2, 2, 4, 4, 2, 0, 2, + 0, 3, 1, 0, 1, 21, 1, 0, + 4, 0, 0, 0, 1, 2, 0, 1, + 1, 1, 4, 0, 3, 1, 3, 2, + 0, 3, 0, 5, 2, 0, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 4, 0, 0, 0, 3, 1, 0, 4, + 1, 4, 4, 3, 1, 0, 7, 5, + 1, 1, 0, 1, 0, 23, 1, 0, + 1, 1, 1, 1, 0, 2, 1, 3, + 2, 0, 1, 3, 1, 2, 0, 1, + 0, 2, 1, 2, 3, 4, 0, 0, + 0, 1, 0, 6, 2, 0, 0, 0, + 0, 1, 3, 0, 0, 0, 1, 0, + 1, 4, 0, 0, 0, 1, 1, 0, + 1, 0, 0, 0, 1, 1, 0, 1, + 0, 1, 0, 0, 0, 29, 0, 0, + 0, 3, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 26, 0, + 0, 0, 1, 1, 1, 0, 0, 2, + 1, 0, 1, 1, 0, 2, 0, 0, + 2, 0, 2, 1, 0, 1, 0, 3, + 0, 0, 1, 21, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 1, + 0, 5, 2, 6, 0, 1, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 2, 1, 1, 0, 3, 1, 0, 7, + 5, 1, 1, 0, 1, 0, 23, 0, + 0, 0, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 0, 0, + 0, 0, 1, 0, 6, 0, 0, 0, + 0, 0, 1, 3, 0, 0, 0, 3, + 0, 1, 1, 1, 4, 0, 0, 0, + 6, 2, 3, 2, 2, 2, 3, 2, + 2, 3, 3, 1, 2, 4, 2, 2, + 4, 4, 2, 0, 2, 0, 3, 1, + 0, 1, 21, 1, 0, 4, 0, 0, + 0, 1, 2, 0, 1, 1, 1, 4, + 0, 3, 1, 3, 2, 0, 3, 0, + 5, 2, 0, 0, 1, 0, 2, 0, + 0, 15, 0, 0, 0, 4, 0, 0, + 0, 3, 1, 0, 4, 1, 4, 4, + 3, 1, 0, 7, 5, 1, 1, 0, + 1, 0, 23, 1, 0, 1, 1, 1, + 1, 0, 2, 1, 3, 2, 0, 1, + 3, 1, 2, 0, 1, 0, 2, 1, + 2, 3, 4, 0, 0, 0, 1, 0, + 6, 2, 0, 0, 0, 0, 1, 3, + 0, 0, 0, 1, 0, 1, 4, 0, + 0, 0, 1, 0, 0, 14, 0, 3, + 2, 2, 3, 2, 2, 2, 54, 54, + 29, 1, 0, 0, 0, 0, 2, 1, + 1, 4, 2, 1, 0, 1, 0, 1, + 0, 11, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 26, + 0, 0, 0, 1, 1, 1, 0, 0, + 2, 1, 0, 1, 1, 0, 2, 0, + 0, 2, 0, 2, 1, 0, 1, 0, + 3, 0, 0, 1, 21, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 1, 0, 5, 2, 6, 0, 1, 0, + 1, 0, 2, 0, 0, 15, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 2, 1, 1, 0, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 0, 0, 0, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 0, + 0, 0, 0, 1, 0, 6, 0, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 3, 0, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 29, 0, + 0, 0, 3, 2, 3, 2, 2, 2, + 3, 2, 2, 3, 3, 1, 2, 4, + 2, 2, 4, 4, 2, 0, 2, 0, + 3, 1, 0, 1, 21, 1, 0, 4, + 0, 0, 0, 1, 2, 0, 1, 1, + 1, 4, 0, 3, 1, 3, 2, 0, + 3, 0, 5, 2, 0, 0, 1, 0, + 2, 0, 0, 15, 0, 0, 0, 4, + 0, 0, 0, 3, 1, 0, 4, 1, + 4, 4, 3, 1, 0, 7, 5, 1, + 1, 0, 1, 0, 23, 1, 0, 1, + 1, 1, 1, 0, 2, 1, 3, 2, + 0, 1, 3, 1, 2, 0, 1, 0, + 2, 1, 2, 3, 4, 0, 0, 0, + 1, 0, 6, 2, 0, 0, 0, 0, + 1, 3, 0, 0, 0, 1, 0, 1, + 4, 0, 0, 0, 1, 1, 1, 4, + 0, 0, 0, 6, 7, 1, 0, 1, + 0, 2, 3, 2, 1, 0, 1, 1, + 3, 0, 1, 5, 0, 0, 17, 20, + 20, 20, 14, 20, 20, 20, 23, 21, + 21, 21, 20, 23, 20, 20, 20, 21, + 21, 21, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, +} + +var _graphclust_range_lengths []byte = []byte{ + 0, 0, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 2, 4, + 1, 2, 1, 2, 2, 5, 6, 2, + 2, 5, 1, 3, 2, 3, 5, 2, + 3, 1, 3, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 2, 3, 1, 1, + 2, 2, 1, 2, 1, 1, 2, 1, + 2, 1, 2, 2, 2, 1, 1, 4, + 2, 0, 0, 0, 1, 0, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 1, 2, 2, 1, 1, 2, 2, 1, + 1, 3, 2, 2, 0, 0, 2, 0, + 0, 0, 0, 1, 4, 1, 0, 2, + 1, 2, 2, 0, 2, 2, 1, 1, + 2, 6, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 2, 0, 1, 1, + 1, 1, 0, 1, 0, 3, 3, 1, + 2, 2, 2, 0, 5, 1, 1, 0, + 1, 1, 1, 1, 1, 2, 1, 1, + 4, 1, 1, 1, 1, 1, 4, 1, + 2, 2, 5, 2, 6, 2, 8, 4, + 2, 5, 0, 3, 2, 4, 1, 6, + 2, 4, 4, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 4, 4, 1, 1, + 2, 2, 2, 2, 1, 1, 6, 2, + 5, 1, 3, 3, 4, 4, 4, 4, + 2, 0, 0, 1, 1, 0, 1, 0, + 1, 1, 0, 2, 1, 1, 2, 4, + 1, 2, 4, 1, 5, 0, 3, 2, + 1, 0, 0, 2, 0, 0, 0, 0, + 1, 4, 1, 0, 2, 1, 4, 2, + 0, 4, 3, 4, 2, 2, 6, 2, + 2, 4, 1, 4, 2, 4, 1, 3, + 3, 2, 2, 0, 1, 1, 1, 0, + 1, 0, 3, 3, 1, 2, 2, 2, + 0, 5, 1, 1, 0, 1, 0, 1, + 1, 1, 0, 0, 0, 0, 1, 1, + 1, 0, 0, 1, 2, 2, 1, 1, + 1, 1, 2, 1, 1, 4, 1, 1, + 1, 1, 2, 4, 1, 2, 1, 2, + 2, 5, 6, 2, 2, 5, 1, 3, + 2, 3, 5, 2, 3, 1, 3, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 2, 3, 1, 1, 2, 2, 1, 2, + 1, 1, 2, 1, 2, 1, 2, 2, + 2, 1, 1, 4, 2, 0, 0, 0, + 1, 0, 1, 0, 1, 0, 1, 1, + 0, 2, 1, 1, 1, 2, 2, 1, + 1, 2, 2, 1, 1, 3, 2, 2, + 0, 0, 2, 0, 0, 0, 0, 1, + 4, 1, 0, 2, 1, 2, 2, 0, + 2, 2, 1, 1, 2, 6, 1, 1, + 1, 1, 2, 2, 1, 1, 1, 2, + 2, 0, 1, 1, 1, 1, 0, 1, + 0, 3, 3, 1, 2, 2, 2, 0, + 5, 1, 1, 0, 1, 1, 1, 1, + 1, 2, 1, 1, 4, 1, 1, 1, + 1, 1, 4, 1, 2, 2, 5, 2, + 6, 2, 8, 4, 2, 5, 0, 3, + 2, 4, 1, 6, 2, 4, 4, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 4, 4, 1, 1, 2, 2, 2, 2, + 1, 1, 6, 2, 5, 1, 3, 3, + 4, 4, 4, 4, 2, 0, 0, 1, + 1, 0, 1, 0, 1, 1, 0, 2, + 1, 1, 2, 4, 1, 2, 4, 1, + 5, 0, 3, 2, 1, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 4, 2, 0, 4, 3, 4, + 2, 2, 6, 2, 2, 4, 1, 4, + 2, 4, 1, 3, 3, 2, 2, 0, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 0, 1, 1, 1, 0, 0, + 0, 0, 1, 1, 1, 0, 0, 1, + 2, 3, 1, 1, 1, 1, 1, 1, + 1, 0, 1, 0, 1, 1, 0, 1, + 1, 0, 1, 0, 1, 3, 1, 2, + 2, 1, 0, 0, 1, 0, 0, 0, + 0, 0, 1, 0, 1, 1, 2, 2, + 2, 1, 3, 2, 1, 1, 3, 1, + 3, 3, 1, 0, 0, 0, 0, 0, + 1, 1, 1, 2, 2, 4, 1, 1, + 2, 1, 1, 1, 3, 1, 2, 1, + 2, 1, 2, 0, 0, 1, 1, 5, + 9, 2, 1, 3, 5, 3, 1, 6, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 2, + 4, 1, 2, 1, 2, 2, 5, 6, + 2, 2, 5, 1, 3, 2, 3, 5, + 2, 3, 1, 3, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 2, 3, 1, + 1, 2, 2, 1, 2, 1, 1, 2, + 1, 2, 1, 2, 2, 2, 1, 1, + 4, 2, 0, 0, 0, 1, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 1, + 1, 1, 2, 2, 1, 1, 2, 2, + 1, 1, 3, 2, 2, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 2, 2, 0, 2, 2, 1, + 1, 2, 6, 1, 1, 1, 1, 2, + 2, 1, 1, 1, 2, 2, 0, 1, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 1, 4, + 1, 2, 2, 5, 2, 6, 2, 8, + 4, 2, 5, 0, 3, 2, 4, 1, + 6, 2, 4, 4, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 4, 4, 1, + 1, 2, 2, 2, 2, 1, 1, 6, + 2, 5, 1, 3, 3, 4, 4, 4, + 4, 2, 0, 0, 1, 1, 0, 1, + 0, 1, 1, 0, 2, 1, 1, 2, + 4, 1, 2, 4, 1, 5, 0, 3, + 2, 1, 0, 0, 2, 0, 0, 0, + 0, 1, 4, 1, 0, 2, 1, 4, + 2, 0, 4, 3, 4, 2, 2, 6, + 2, 2, 4, 1, 4, 2, 4, 1, + 3, 3, 2, 2, 0, 1, 1, 1, + 0, 1, 0, 3, 3, 1, 2, 2, + 2, 0, 5, 1, 1, 0, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 1, 0, 1, + 1, 0, 1, 1, 0, 1, 0, 1, + 3, 1, 2, 2, 1, 0, 0, 1, + 0, 0, 0, 0, 0, 1, 0, 1, + 1, 2, 2, 1, 1, 5, 1, 1, + 1, 1, 2, 1, 1, 4, 1, 1, + 1, 1, 2, 4, 1, 2, 1, 2, + 2, 5, 6, 2, 2, 5, 1, 3, + 2, 3, 5, 2, 3, 1, 3, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 2, 3, 1, 1, 2, 2, 1, 2, + 1, 1, 2, 1, 2, 1, 2, 2, + 2, 1, 1, 4, 2, 0, 0, 1, + 1, 0, 1, 0, 1, 1, 0, 2, + 1, 1, 1, 2, 2, 1, 1, 2, + 2, 1, 1, 3, 2, 2, 0, 0, + 2, 0, 0, 0, 0, 1, 4, 1, + 0, 2, 1, 2, 2, 0, 2, 2, + 1, 1, 2, 6, 1, 1, 1, 1, + 2, 2, 1, 1, 1, 2, 2, 0, + 1, 1, 1, 1, 0, 1, 0, 3, + 3, 1, 2, 2, 2, 0, 5, 1, + 1, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 1, + 4, 1, 2, 2, 5, 2, 6, 2, + 8, 4, 2, 5, 0, 3, 2, 4, + 1, 6, 2, 4, 4, 1, 1, 2, + 1, 2, 1, 4, 0, 0, 4, 4, + 1, 1, 2, 2, 2, 2, 1, 1, + 6, 2, 5, 1, 3, 3, 4, 4, + 4, 4, 2, 0, 0, 1, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 2, 4, 1, 2, 4, 1, 5, 0, + 3, 2, 1, 0, 0, 2, 0, 0, + 0, 0, 1, 4, 1, 0, 2, 1, + 4, 2, 0, 4, 3, 4, 2, 2, + 6, 2, 2, 4, 1, 4, 2, 4, + 1, 3, 3, 2, 2, 0, 1, 1, + 1, 0, 1, 0, 3, 3, 1, 2, + 2, 2, 0, 5, 1, 1, 0, 1, + 0, 1, 1, 1, 0, 0, 0, 3, + 1, 1, 1, 1, 1, 2, 1, 1, + 4, 1, 1, 1, 1, 1, 4, 1, + 2, 2, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 2, 4, + 1, 2, 1, 2, 2, 5, 6, 2, + 2, 5, 1, 3, 2, 3, 5, 2, + 3, 1, 3, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 2, 3, 1, 1, + 2, 2, 1, 2, 1, 1, 2, 1, + 2, 1, 2, 2, 2, 1, 1, 4, + 2, 0, 0, 0, 1, 0, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 1, 2, 2, 1, 1, 2, 2, 1, + 1, 3, 2, 2, 0, 0, 2, 0, + 0, 0, 0, 1, 4, 1, 0, 2, + 1, 2, 2, 0, 2, 2, 1, 1, + 2, 6, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 2, 0, 1, 1, + 1, 1, 0, 1, 0, 3, 3, 1, + 2, 2, 2, 0, 5, 1, 1, 0, + 1, 0, 0, 0, 1, 1, 1, 0, + 0, 5, 2, 6, 2, 8, 4, 2, + 5, 0, 3, 2, 4, 1, 6, 2, + 4, 4, 1, 1, 2, 1, 2, 1, + 4, 0, 0, 4, 4, 1, 1, 2, + 2, 2, 2, 1, 1, 6, 2, 5, + 1, 3, 3, 4, 4, 4, 4, 2, + 0, 0, 1, 1, 0, 1, 0, 1, + 1, 0, 2, 1, 1, 2, 4, 1, + 2, 4, 1, 5, 0, 3, 2, 1, + 0, 0, 2, 0, 0, 0, 0, 1, + 4, 1, 0, 2, 1, 4, 2, 0, + 4, 3, 4, 2, 2, 6, 2, 2, + 4, 1, 4, 2, 4, 1, 3, 3, + 2, 2, 0, 1, 1, 1, 0, 1, + 0, 3, 3, 1, 2, 2, 2, 0, + 5, 1, 1, 0, 1, 0, 1, 1, + 1, 0, 1, 3, 1, 3, 3, 1, + 0, 0, 0, 0, 0, 1, 1, 1, + 3, 2, 4, 1, 0, 1, 1, 1, + 3, 1, 1, 1, 3, 1, 3, 1, + 3, 1, 2, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 2, + 4, 1, 2, 1, 2, 2, 5, 6, + 2, 2, 5, 1, 3, 2, 3, 5, + 2, 3, 1, 3, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 2, 3, 1, + 1, 2, 2, 1, 2, 1, 1, 2, + 1, 2, 1, 2, 2, 2, 1, 1, + 4, 2, 0, 0, 0, 1, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 1, + 1, 1, 2, 2, 1, 1, 2, 2, + 1, 1, 3, 2, 2, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 2, 2, 0, 2, 2, 1, + 1, 2, 6, 1, 1, 1, 1, 2, + 2, 1, 1, 1, 2, 2, 0, 1, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 1, 4, + 1, 2, 2, 5, 2, 6, 2, 8, + 4, 2, 5, 0, 3, 2, 4, 1, + 6, 2, 4, 4, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 4, 4, 1, + 1, 2, 2, 2, 2, 1, 1, 6, + 2, 5, 1, 3, 3, 4, 4, 4, + 4, 2, 0, 0, 1, 1, 0, 1, + 0, 1, 1, 0, 2, 1, 1, 2, + 4, 1, 2, 4, 1, 5, 0, 3, + 2, 1, 0, 0, 2, 0, 0, 0, + 0, 1, 4, 1, 0, 2, 1, 4, + 2, 0, 4, 3, 4, 2, 2, 6, + 2, 2, 4, 1, 4, 2, 4, 1, + 3, 3, 2, 2, 0, 1, 1, 1, + 0, 1, 0, 3, 3, 1, 2, 2, + 2, 0, 5, 1, 1, 0, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 0, 1, 1, + 5, 9, 2, 1, 3, 5, 3, 1, + 6, 1, 1, 2, 2, 2, 6, 0, + 0, 0, 4, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_index_offsets []int16 = []int16{ + 0, 0, 2, 4, 6, 8, 11, 15, + 17, 20, 25, 28, 30, 32, 34, 63, + 68, 70, 73, 76, 80, 84, 90, 97, + 102, 106, 112, 115, 120, 123, 129, 135, + 138, 144, 146, 152, 155, 157, 161, 163, + 169, 171, 176, 178, 200, 203, 207, 212, + 214, 217, 220, 222, 225, 227, 230, 233, + 235, 241, 243, 246, 249, 252, 254, 256, + 262, 265, 271, 274, 281, 283, 285, 287, + 289, 291, 294, 296, 298, 314, 317, 319, + 321, 326, 329, 332, 334, 336, 339, 342, + 344, 348, 353, 357, 360, 364, 366, 369, + 377, 383, 385, 387, 389, 395, 397, 421, + 424, 426, 429, 432, 434, 437, 440, 443, + 445, 449, 457, 459, 461, 463, 465, 468, + 471, 473, 475, 477, 480, 483, 488, 490, + 492, 494, 496, 498, 500, 507, 511, 515, + 517, 520, 523, 527, 531, 537, 539, 541, + 545, 547, 549, 551, 553, 556, 560, 562, + 565, 570, 573, 575, 577, 579, 610, 615, + 617, 620, 626, 634, 640, 649, 654, 665, + 673, 678, 686, 690, 697, 701, 708, 714, + 723, 728, 737, 746, 750, 752, 757, 759, + 765, 768, 773, 775, 797, 803, 808, 814, + 816, 819, 822, 826, 831, 833, 836, 844, + 848, 858, 860, 867, 872, 880, 887, 892, + 900, 903, 909, 912, 914, 916, 918, 920, + 923, 925, 927, 943, 946, 948, 950, 957, + 962, 964, 967, 975, 978, 984, 989, 994, + 1001, 1007, 1011, 1013, 1016, 1024, 1030, 1032, + 1034, 1036, 1042, 1044, 1068, 1072, 1074, 1080, + 1084, 1086, 1092, 1096, 1103, 1107, 1113, 1122, + 1125, 1129, 1137, 1140, 1147, 1150, 1156, 1158, + 1164, 1169, 1174, 1180, 1185, 1187, 1189, 1191, + 1193, 1195, 1202, 1208, 1212, 1214, 1217, 1220, + 1224, 1228, 1234, 1236, 1238, 1240, 1242, 1244, + 1250, 1252, 1254, 1255, 1257, 1259, 1261, 1267, + 1269, 1271, 1272, 1279, 1281, 1285, 1289, 1291, + 1293, 1295, 1298, 1302, 1304, 1307, 1312, 1315, + 1317, 1319, 1321, 1350, 1355, 1357, 1360, 1363, + 1367, 1371, 1377, 1384, 1389, 1393, 1399, 1402, + 1407, 1410, 1416, 1422, 1425, 1431, 1433, 1439, + 1442, 1444, 1448, 1450, 1456, 1458, 1463, 1465, + 1487, 1490, 1494, 1499, 1501, 1504, 1507, 1509, + 1512, 1514, 1517, 1520, 1522, 1528, 1530, 1533, + 1536, 1539, 1541, 1543, 1549, 1552, 1558, 1561, + 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, + 1585, 1601, 1604, 1606, 1608, 1613, 1616, 1619, + 1621, 1623, 1626, 1629, 1631, 1635, 1640, 1644, + 1647, 1651, 1653, 1656, 1664, 1670, 1672, 1674, + 1676, 1682, 1684, 1708, 1711, 1713, 1716, 1719, + 1721, 1724, 1727, 1730, 1732, 1736, 1744, 1746, + 1748, 1750, 1752, 1755, 1758, 1760, 1762, 1764, + 1767, 1770, 1775, 1777, 1779, 1781, 1783, 1785, + 1787, 1794, 1798, 1802, 1804, 1807, 1810, 1814, + 1818, 1824, 1826, 1828, 1832, 1834, 1836, 1838, + 1840, 1843, 1847, 1849, 1852, 1857, 1860, 1862, + 1864, 1866, 1897, 1902, 1904, 1907, 1913, 1921, + 1927, 1936, 1941, 1952, 1960, 1965, 1973, 1977, + 1984, 1988, 1995, 2001, 2010, 2015, 2024, 2033, + 2037, 2039, 2044, 2046, 2052, 2055, 2060, 2062, + 2084, 2090, 2095, 2101, 2103, 2106, 2109, 2113, + 2118, 2120, 2123, 2131, 2135, 2145, 2147, 2154, + 2159, 2167, 2174, 2179, 2187, 2190, 2196, 2199, + 2201, 2203, 2205, 2207, 2210, 2212, 2214, 2230, + 2233, 2235, 2237, 2244, 2249, 2251, 2254, 2262, + 2265, 2271, 2276, 2281, 2288, 2294, 2298, 2300, + 2303, 2311, 2317, 2319, 2321, 2323, 2329, 2331, + 2355, 2359, 2361, 2367, 2371, 2373, 2379, 2383, + 2390, 2394, 2400, 2409, 2412, 2416, 2424, 2427, + 2434, 2437, 2443, 2445, 2451, 2456, 2461, 2467, + 2472, 2474, 2476, 2478, 2480, 2482, 2489, 2495, + 2499, 2501, 2504, 2507, 2511, 2515, 2521, 2523, + 2525, 2527, 2529, 2531, 2537, 2539, 2541, 2542, + 2544, 2546, 2548, 2554, 2556, 2558, 2559, 2566, + 2568, 2571, 2575, 2578, 2581, 2585, 2588, 2591, + 2598, 2600, 2625, 2627, 2652, 2654, 2656, 2680, + 2682, 2684, 2686, 2688, 2691, 2693, 2697, 2699, + 2730, 2733, 2738, 2762, 2765, 2767, 2770, 2773, + 2777, 2780, 2783, 2787, 2788, 2844, 2900, 2930, + 2934, 2937, 2944, 2950, 2953, 2956, 2959, 2963, + 2965, 2983, 2987, 2992, 2995, 2998, 3002, 3005, + 3008, 3012, 3068, 3124, 3154, 3158, 3163, 3167, + 3169, 3173, 3179, 3183, 3186, 3190, 3193, 3196, + 3199, 3202, 3215, 3218, 3226, 3228, 3230, 3233, + 3239, 3251, 3257, 3261, 3266, 3272, 3277, 3280, + 3290, 3292, 3295, 3300, 3302, 3305, 3308, 3312, + 3315, 3318, 3325, 3327, 3329, 3331, 3333, 3336, + 3340, 3342, 3345, 3350, 3353, 3355, 3357, 3359, + 3388, 3393, 3395, 3398, 3401, 3405, 3409, 3415, + 3422, 3427, 3431, 3437, 3440, 3445, 3448, 3454, + 3460, 3463, 3469, 3471, 3477, 3480, 3482, 3486, + 3488, 3494, 3496, 3501, 3503, 3525, 3528, 3532, + 3537, 3539, 3542, 3545, 3547, 3550, 3552, 3555, + 3558, 3560, 3566, 3568, 3571, 3574, 3577, 3579, + 3581, 3587, 3590, 3596, 3599, 3606, 3608, 3610, + 3612, 3614, 3616, 3619, 3621, 3623, 3639, 3642, + 3644, 3646, 3651, 3654, 3657, 3659, 3661, 3664, + 3667, 3669, 3673, 3678, 3682, 3685, 3689, 3691, + 3694, 3702, 3708, 3710, 3712, 3714, 3720, 3722, + 3746, 3749, 3751, 3754, 3757, 3759, 3762, 3765, + 3768, 3770, 3774, 3782, 3784, 3786, 3788, 3790, + 3793, 3796, 3798, 3800, 3802, 3805, 3808, 3813, + 3815, 3817, 3819, 3821, 3823, 3825, 3832, 3836, + 3840, 3842, 3845, 3848, 3852, 3856, 3862, 3864, + 3866, 3870, 3872, 3874, 3876, 3878, 3881, 3885, + 3887, 3890, 3895, 3898, 3900, 3902, 3904, 3935, + 3940, 3942, 3945, 3951, 3959, 3965, 3974, 3979, + 3990, 3998, 4003, 4011, 4015, 4022, 4026, 4033, + 4039, 4048, 4053, 4062, 4071, 4075, 4077, 4082, + 4084, 4090, 4093, 4098, 4100, 4122, 4128, 4133, + 4139, 4141, 4144, 4147, 4151, 4156, 4158, 4161, + 4169, 4173, 4183, 4185, 4192, 4197, 4205, 4212, + 4217, 4225, 4228, 4234, 4237, 4239, 4241, 4243, + 4245, 4248, 4250, 4252, 4268, 4271, 4273, 4275, + 4282, 4287, 4289, 4292, 4300, 4303, 4309, 4314, + 4319, 4326, 4332, 4336, 4338, 4341, 4349, 4355, + 4357, 4359, 4361, 4367, 4369, 4393, 4397, 4399, + 4405, 4409, 4411, 4417, 4421, 4428, 4432, 4438, + 4447, 4450, 4454, 4462, 4465, 4472, 4475, 4481, + 4483, 4489, 4494, 4499, 4505, 4510, 4512, 4514, + 4516, 4518, 4520, 4527, 4533, 4537, 4539, 4542, + 4545, 4549, 4553, 4559, 4561, 4563, 4565, 4567, + 4569, 4575, 4577, 4579, 4580, 4582, 4584, 4586, + 4592, 4594, 4596, 4597, 4604, 4629, 4631, 4656, + 4658, 4660, 4684, 4686, 4688, 4690, 4692, 4695, + 4697, 4701, 4703, 4734, 4737, 4742, 4766, 4769, + 4771, 4774, 4777, 4781, 4784, 4787, 4791, 4792, + 4848, 4904, 4934, 4938, 4941, 4948, 4956, 4958, + 4960, 4962, 4965, 4969, 4971, 4974, 4979, 4982, + 4984, 4986, 4988, 5017, 5022, 5024, 5027, 5030, + 5034, 5038, 5044, 5051, 5056, 5060, 5066, 5069, + 5074, 5077, 5083, 5089, 5092, 5098, 5100, 5106, + 5109, 5111, 5115, 5117, 5123, 5125, 5130, 5132, + 5154, 5157, 5161, 5166, 5168, 5171, 5174, 5176, + 5179, 5181, 5184, 5187, 5189, 5195, 5197, 5200, + 5203, 5206, 5208, 5210, 5216, 5219, 5225, 5228, + 5230, 5232, 5234, 5236, 5239, 5241, 5243, 5259, + 5262, 5264, 5266, 5271, 5274, 5277, 5279, 5281, + 5284, 5287, 5289, 5293, 5298, 5302, 5305, 5309, + 5311, 5314, 5321, 5327, 5329, 5331, 5333, 5339, + 5341, 5365, 5368, 5370, 5373, 5376, 5378, 5381, + 5384, 5387, 5389, 5393, 5401, 5403, 5405, 5407, + 5409, 5412, 5415, 5417, 5419, 5421, 5424, 5427, + 5432, 5434, 5436, 5438, 5440, 5442, 5444, 5451, + 5455, 5459, 5461, 5464, 5467, 5471, 5475, 5481, + 5483, 5485, 5487, 5493, 5495, 5497, 5498, 5505, + 5507, 5515, 5519, 5521, 5523, 5525, 5527, 5530, + 5534, 5536, 5539, 5544, 5547, 5549, 5551, 5553, + 5584, 5589, 5591, 5594, 5600, 5608, 5614, 5623, + 5628, 5639, 5647, 5652, 5660, 5664, 5671, 5675, + 5682, 5688, 5697, 5702, 5711, 5720, 5724, 5726, + 5731, 5733, 5739, 5742, 5747, 5749, 5771, 5777, + 5782, 5788, 5790, 5793, 5796, 5800, 5805, 5807, + 5810, 5818, 5822, 5832, 5834, 5841, 5846, 5854, + 5861, 5866, 5874, 5877, 5883, 5886, 5888, 5890, + 5892, 5894, 5897, 5899, 5901, 5917, 5920, 5922, + 5924, 5931, 5936, 5938, 5941, 5949, 5952, 5958, + 5963, 5968, 5975, 5981, 5985, 5987, 5990, 5998, + 6004, 6006, 6008, 6010, 6016, 6018, 6042, 6046, + 6048, 6054, 6058, 6060, 6066, 6070, 6077, 6081, + 6087, 6096, 6099, 6103, 6111, 6114, 6121, 6124, + 6130, 6132, 6138, 6143, 6148, 6154, 6159, 6161, + 6163, 6165, 6167, 6169, 6176, 6182, 6186, 6188, + 6191, 6194, 6198, 6202, 6208, 6210, 6212, 6214, + 6216, 6218, 6224, 6226, 6228, 6229, 6231, 6233, + 6237, 6240, 6242, 6244, 6246, 6249, 6253, 6255, + 6258, 6263, 6266, 6268, 6270, 6272, 6303, 6308, + 6310, 6313, 6319, 6321, 6323, 6325, 6328, 6332, + 6334, 6337, 6342, 6345, 6347, 6349, 6351, 6380, + 6385, 6387, 6390, 6393, 6397, 6401, 6407, 6414, + 6419, 6423, 6429, 6432, 6437, 6440, 6446, 6452, + 6455, 6461, 6463, 6469, 6472, 6474, 6478, 6480, + 6486, 6488, 6493, 6495, 6517, 6520, 6524, 6529, + 6531, 6534, 6537, 6539, 6542, 6544, 6547, 6550, + 6552, 6558, 6560, 6563, 6566, 6569, 6571, 6573, + 6579, 6582, 6588, 6591, 6598, 6600, 6602, 6604, + 6606, 6608, 6611, 6613, 6615, 6631, 6634, 6636, + 6638, 6643, 6646, 6649, 6651, 6653, 6656, 6659, + 6661, 6665, 6670, 6674, 6677, 6681, 6683, 6686, + 6694, 6700, 6702, 6704, 6706, 6712, 6714, 6738, + 6741, 6743, 6746, 6749, 6751, 6754, 6757, 6760, + 6762, 6766, 6774, 6776, 6778, 6780, 6782, 6785, + 6788, 6790, 6792, 6794, 6797, 6800, 6805, 6807, + 6809, 6811, 6813, 6815, 6817, 6824, 6828, 6832, + 6834, 6837, 6840, 6844, 6848, 6854, 6856, 6858, + 6862, 6864, 6866, 6868, 6870, 6876, 6878, 6880, + 6881, 6888, 6896, 6902, 6911, 6916, 6927, 6935, + 6940, 6948, 6952, 6959, 6963, 6970, 6976, 6985, + 6990, 6999, 7008, 7012, 7014, 7019, 7021, 7027, + 7030, 7035, 7037, 7059, 7065, 7070, 7076, 7078, + 7081, 7084, 7088, 7093, 7095, 7098, 7106, 7110, + 7120, 7122, 7129, 7134, 7142, 7149, 7154, 7162, + 7165, 7171, 7174, 7176, 7178, 7180, 7182, 7185, + 7187, 7189, 7205, 7208, 7210, 7212, 7219, 7224, + 7226, 7229, 7237, 7240, 7246, 7251, 7256, 7263, + 7269, 7273, 7275, 7278, 7286, 7292, 7294, 7296, + 7298, 7304, 7306, 7330, 7334, 7336, 7342, 7346, + 7348, 7354, 7358, 7365, 7369, 7375, 7384, 7387, + 7391, 7399, 7402, 7409, 7412, 7418, 7420, 7426, + 7431, 7436, 7442, 7447, 7449, 7451, 7453, 7455, + 7457, 7464, 7470, 7474, 7476, 7479, 7482, 7486, + 7490, 7496, 7498, 7500, 7502, 7504, 7506, 7512, + 7514, 7516, 7517, 7520, 7524, 7526, 7544, 7548, + 7553, 7556, 7559, 7563, 7566, 7569, 7573, 7629, + 7685, 7718, 7722, 7727, 7729, 7730, 7732, 7736, + 7739, 7744, 7750, 7754, 7757, 7761, 7764, 7768, + 7771, 7775, 7788, 7791, 7793, 7795, 7797, 7800, + 7804, 7806, 7809, 7814, 7817, 7819, 7821, 7823, + 7852, 7857, 7859, 7862, 7865, 7869, 7873, 7879, + 7886, 7891, 7895, 7901, 7904, 7909, 7912, 7918, + 7924, 7927, 7933, 7935, 7941, 7944, 7946, 7950, + 7952, 7958, 7960, 7965, 7967, 7989, 7992, 7996, + 8001, 8003, 8006, 8009, 8011, 8014, 8016, 8019, + 8022, 8024, 8030, 8032, 8035, 8038, 8041, 8043, + 8045, 8051, 8054, 8060, 8063, 8070, 8072, 8074, + 8076, 8078, 8080, 8083, 8085, 8087, 8103, 8106, + 8108, 8110, 8115, 8118, 8121, 8123, 8125, 8128, + 8131, 8133, 8137, 8142, 8146, 8149, 8153, 8155, + 8158, 8166, 8172, 8174, 8176, 8178, 8184, 8186, + 8210, 8213, 8215, 8218, 8221, 8223, 8226, 8229, + 8232, 8234, 8238, 8246, 8248, 8250, 8252, 8254, + 8257, 8260, 8262, 8264, 8266, 8269, 8272, 8277, + 8279, 8281, 8283, 8285, 8287, 8289, 8296, 8300, + 8304, 8306, 8309, 8312, 8316, 8320, 8326, 8328, + 8330, 8334, 8336, 8338, 8340, 8342, 8345, 8349, + 8351, 8354, 8359, 8362, 8364, 8366, 8368, 8399, + 8404, 8406, 8409, 8415, 8423, 8429, 8438, 8443, + 8454, 8462, 8467, 8475, 8479, 8486, 8490, 8497, + 8503, 8512, 8517, 8526, 8535, 8539, 8541, 8546, + 8548, 8554, 8557, 8562, 8564, 8586, 8592, 8597, + 8603, 8605, 8608, 8611, 8615, 8620, 8622, 8625, + 8633, 8637, 8647, 8649, 8656, 8661, 8669, 8676, + 8681, 8689, 8692, 8698, 8701, 8703, 8705, 8707, + 8709, 8712, 8714, 8716, 8732, 8735, 8737, 8739, + 8746, 8751, 8753, 8756, 8764, 8767, 8773, 8778, + 8783, 8790, 8796, 8800, 8802, 8805, 8813, 8819, + 8821, 8823, 8825, 8831, 8833, 8857, 8861, 8863, + 8869, 8873, 8875, 8881, 8885, 8892, 8896, 8902, + 8911, 8914, 8918, 8926, 8929, 8936, 8939, 8945, + 8947, 8953, 8958, 8963, 8969, 8974, 8976, 8978, + 8980, 8982, 8984, 8991, 8997, 9001, 9003, 9006, + 9009, 9013, 9017, 9023, 9025, 9027, 9029, 9031, + 9033, 9039, 9041, 9043, 9044, 9046, 9048, 9050, + 9056, 9058, 9060, 9061, 9068, 9076, 9078, 9080, + 9083, 9089, 9101, 9107, 9111, 9116, 9122, 9127, + 9130, 9140, 9142, 9145, 9153, 9156, 9159, 9183, + 9204, 9225, 9246, 9265, 9286, 9307, 9328, 9352, + 9374, 9396, 9418, 9439, 9463, 9484, 9505, 9526, + 9548, 9570, 9592, 9613, 9634, 9655, 9676, 9697, + 9718, 9739, 9760, 9781, +} + +var _graphclust_indicies []int16 = []int16{ + 0, 1, 3, 2, 2, 3, 3, 2, + 3, 3, 2, 3, 3, 3, 2, 3, + 2, 3, 3, 2, 3, 3, 3, 3, + 2, 3, 3, 2, 2, 3, 3, 2, + 3, 2, 4, 5, 6, 7, 8, 10, + 11, 12, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 9, 13, 2, 3, + 3, 3, 3, 2, 3, 2, 3, 3, + 2, 2, 2, 3, 2, 2, 2, 3, + 3, 3, 3, 2, 2, 2, 2, 2, + 2, 3, 2, 2, 2, 2, 2, 2, + 3, 2, 2, 2, 2, 3, 3, 3, + 3, 2, 3, 3, 3, 3, 3, 2, + 3, 3, 2, 3, 3, 3, 3, 2, + 3, 3, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 2, 3, + 3, 2, 2, 2, 2, 2, 2, 3, + 3, 2, 3, 3, 3, 3, 3, 2, + 3, 3, 2, 3, 2, 3, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 2, + 3, 2, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 2, + 3, 3, 2, 3, 3, 3, 2, 3, + 3, 3, 3, 2, 3, 2, 3, 3, + 2, 3, 3, 2, 3, 2, 2, 2, + 3, 3, 2, 3, 3, 2, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 3, 2, 2, 2, + 3, 3, 3, 2, 3, 2, 3, 2, + 3, 3, 3, 3, 3, 2, 3, 3, + 2, 53, 54, 55, 56, 57, 2, 3, + 58, 2, 53, 54, 59, 55, 56, 57, + 2, 3, 2, 3, 2, 3, 2, 3, + 2, 3, 2, 60, 61, 2, 3, 2, + 3, 2, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, + 76, 2, 3, 3, 2, 3, 2, 3, + 2, 3, 3, 3, 3, 2, 3, 3, + 2, 2, 2, 3, 3, 2, 3, 2, + 3, 3, 2, 2, 2, 3, 3, 2, + 3, 3, 3, 2, 3, 3, 3, 3, + 2, 3, 3, 3, 2, 3, 3, 2, + 77, 78, 63, 2, 3, 2, 3, 3, + 2, 79, 80, 81, 82, 83, 84, 85, + 2, 86, 87, 88, 89, 90, 2, 3, + 2, 3, 2, 3, 2, 3, 3, 3, + 3, 3, 2, 3, 2, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 104, 108, + 109, 110, 111, 112, 2, 3, 3, 2, + 2, 3, 2, 2, 3, 3, 3, 2, + 3, 2, 3, 3, 2, 2, 2, 3, + 3, 3, 2, 3, 2, 3, 3, 3, + 2, 3, 3, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 2, 3, 2, 2, + 3, 3, 3, 2, 2, 2, 3, 2, + 3, 3, 2, 3, 2, 3, 3, 2, + 3, 3, 2, 113, 114, 115, 116, 2, + 3, 2, 3, 2, 3, 2, 3, 2, + 117, 2, 3, 2, 118, 119, 120, 121, + 122, 123, 2, 3, 3, 3, 2, 2, + 2, 2, 3, 3, 2, 3, 3, 2, + 2, 2, 3, 3, 3, 3, 2, 124, + 125, 126, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 2, 127, 128, 129, + 2, 130, 2, 2, 130, 2, 130, 130, + 2, 130, 130, 2, 130, 130, 130, 2, + 130, 2, 130, 130, 2, 130, 130, 130, + 130, 2, 130, 130, 2, 2, 130, 130, + 2, 130, 2, 131, 132, 133, 134, 135, + 136, 137, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 22, 151, + 152, 153, 154, 155, 156, 157, 158, 159, + 138, 2, 130, 130, 130, 130, 2, 130, + 2, 130, 130, 2, 3, 3, 2, 2, + 3, 130, 130, 2, 130, 130, 2, 130, + 2, 3, 130, 130, 130, 3, 3, 2, + 130, 130, 130, 2, 2, 2, 130, 2, + 3, 3, 130, 130, 3, 2, 130, 130, + 130, 2, 130, 2, 130, 2, 130, 2, + 3, 2, 2, 130, 130, 2, 130, 2, + 3, 130, 130, 3, 130, 2, 3, 130, + 130, 3, 3, 130, 130, 2, 130, 130, + 3, 2, 130, 130, 130, 3, 3, 3, + 2, 130, 3, 130, 2, 2, 2, 3, + 2, 2, 2, 130, 130, 130, 3, 130, + 3, 2, 130, 130, 3, 3, 3, 130, + 130, 130, 2, 130, 130, 3, 3, 2, + 2, 2, 130, 130, 130, 2, 130, 2, + 3, 130, 130, 130, 130, 3, 130, 3, + 3, 2, 130, 3, 130, 2, 130, 2, + 130, 3, 130, 130, 2, 130, 2, 130, + 130, 130, 130, 3, 2, 3, 130, 2, + 130, 130, 130, 130, 2, 130, 2, 160, + 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 2, 3, 130, 130, + 3, 130, 2, 3, 130, 130, 130, 2, + 130, 3, 130, 130, 130, 2, 130, 2, + 130, 130, 2, 130, 130, 2, 3, 130, + 3, 2, 130, 130, 130, 2, 3, 130, + 2, 130, 130, 2, 130, 130, 3, 130, + 3, 3, 130, 2, 130, 130, 3, 2, + 130, 130, 130, 130, 3, 130, 130, 3, + 130, 2, 130, 2, 3, 3, 3, 130, + 130, 3, 2, 130, 2, 130, 2, 3, + 3, 3, 3, 130, 130, 3, 130, 2, + 3, 130, 130, 3, 130, 3, 2, 3, + 130, 3, 130, 2, 3, 130, 130, 130, + 130, 3, 130, 2, 130, 130, 2, 181, + 182, 183, 184, 185, 2, 130, 58, 2, + 130, 2, 130, 2, 130, 2, 130, 2, + 186, 187, 2, 130, 2, 130, 2, 188, + 189, 190, 191, 66, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 2, 130, + 130, 2, 130, 2, 130, 2, 130, 130, + 130, 3, 3, 130, 2, 130, 2, 130, + 2, 3, 130, 2, 130, 3, 2, 3, + 130, 130, 130, 3, 130, 3, 2, 130, + 2, 3, 130, 3, 130, 3, 130, 2, + 130, 130, 3, 130, 2, 130, 130, 130, + 130, 2, 130, 3, 3, 130, 130, 3, + 2, 130, 130, 3, 130, 3, 2, 202, + 203, 189, 2, 130, 2, 130, 130, 2, + 204, 205, 206, 207, 208, 209, 210, 2, + 211, 212, 213, 214, 215, 2, 130, 2, + 130, 2, 130, 2, 130, 130, 130, 130, + 130, 2, 130, 2, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 2, 130, 3, 130, 2, + 2, 130, 3, 2, 3, 3, 2, 130, + 3, 130, 130, 2, 130, 2, 3, 130, + 3, 130, 3, 2, 2, 130, 2, 3, + 130, 130, 3, 130, 3, 130, 2, 130, + 3, 130, 2, 130, 130, 3, 130, 3, + 2, 130, 130, 3, 3, 3, 3, 130, + 130, 2, 3, 130, 2, 3, 3, 130, + 2, 130, 3, 130, 3, 130, 3, 130, + 2, 3, 2, 130, 130, 3, 3, 130, + 3, 130, 2, 2, 2, 130, 130, 3, + 130, 3, 130, 2, 2, 130, 3, 3, + 130, 3, 130, 2, 3, 130, 3, 130, + 2, 3, 3, 130, 130, 2, 3, 3, + 3, 130, 130, 2, 239, 240, 115, 241, + 2, 130, 2, 130, 2, 130, 2, 242, + 2, 130, 2, 243, 244, 245, 246, 247, + 248, 2, 3, 3, 130, 130, 130, 2, + 2, 2, 2, 130, 130, 2, 130, 130, + 2, 2, 2, 130, 130, 130, 130, 2, + 249, 250, 251, 2, 130, 130, 130, 130, + 130, 2, 130, 2, 130, 2, 252, 2, + 3, 2, 253, 2, 254, 255, 256, 258, + 257, 2, 130, 2, 2, 130, 130, 3, + 2, 3, 2, 259, 2, 260, 261, 262, + 264, 263, 2, 3, 2, 2, 3, 3, + 79, 80, 81, 82, 83, 84, 2, 3, + 1, 265, 265, 3, 1, 265, 266, 3, + 1, 267, 268, 267, 268, 268, 267, 268, + 268, 267, 268, 268, 268, 267, 268, 267, + 268, 268, 267, 268, 268, 268, 268, 267, + 268, 268, 267, 267, 268, 268, 267, 268, + 267, 269, 270, 271, 272, 273, 275, 276, + 277, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, + 294, 295, 296, 274, 278, 267, 268, 268, + 268, 268, 267, 268, 267, 268, 268, 267, + 267, 267, 268, 267, 267, 267, 268, 268, + 268, 268, 267, 267, 267, 267, 267, 267, + 268, 267, 267, 267, 267, 267, 267, 268, + 267, 267, 267, 267, 268, 268, 268, 268, + 267, 268, 268, 268, 268, 268, 267, 268, + 268, 267, 268, 268, 268, 268, 267, 268, + 268, 267, 267, 267, 267, 267, 267, 268, + 268, 268, 268, 268, 268, 267, 268, 268, + 267, 267, 267, 267, 267, 267, 268, 268, + 267, 268, 268, 268, 268, 268, 267, 268, + 268, 267, 268, 267, 268, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 267, 268, + 267, 297, 298, 299, 300, 301, 302, 303, + 304, 305, 306, 307, 308, 309, 310, 311, + 312, 313, 314, 315, 316, 317, 267, 268, + 268, 267, 268, 268, 268, 267, 268, 268, + 268, 268, 267, 268, 267, 268, 268, 267, + 268, 268, 267, 268, 267, 267, 267, 268, + 268, 267, 268, 268, 267, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 268, 267, 267, 267, 268, + 268, 268, 267, 268, 267, 268, 267, 268, + 268, 268, 268, 268, 267, 268, 268, 267, + 318, 319, 320, 321, 322, 267, 268, 323, + 267, 318, 319, 324, 320, 321, 322, 267, + 268, 267, 268, 267, 268, 267, 268, 267, + 268, 267, 325, 326, 267, 268, 267, 268, + 267, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 267, 268, 268, 267, 268, 267, 268, 267, + 268, 268, 268, 268, 267, 268, 268, 267, + 267, 267, 268, 268, 267, 268, 267, 268, + 268, 267, 267, 267, 268, 268, 267, 268, + 268, 268, 267, 268, 268, 268, 268, 267, + 268, 268, 268, 267, 268, 268, 267, 342, + 343, 328, 267, 268, 267, 268, 268, 267, + 344, 345, 346, 347, 348, 349, 350, 267, + 351, 352, 353, 354, 355, 267, 268, 267, + 268, 267, 268, 267, 268, 268, 268, 268, + 268, 267, 268, 267, 356, 357, 358, 359, + 360, 361, 362, 363, 364, 365, 366, 367, + 368, 369, 370, 371, 372, 369, 373, 374, + 375, 376, 377, 267, 268, 268, 267, 267, + 268, 267, 267, 268, 268, 268, 267, 268, + 267, 268, 268, 267, 267, 267, 268, 268, + 268, 267, 268, 267, 268, 268, 268, 267, + 268, 268, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 267, 268, 267, 267, 268, + 268, 268, 267, 267, 267, 268, 267, 268, + 268, 267, 268, 267, 268, 268, 267, 268, + 268, 267, 378, 379, 380, 381, 267, 268, + 267, 268, 267, 268, 267, 268, 267, 382, + 267, 268, 267, 383, 384, 385, 386, 387, + 388, 267, 268, 268, 268, 267, 267, 267, + 267, 268, 268, 267, 268, 268, 267, 267, + 267, 268, 268, 268, 268, 267, 389, 390, + 391, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 267, 392, 393, 394, 267, + 395, 267, 395, 267, 267, 395, 395, 267, + 395, 395, 267, 395, 395, 395, 267, 395, + 267, 395, 395, 267, 395, 395, 395, 395, + 267, 395, 395, 267, 267, 395, 395, 267, + 395, 267, 396, 397, 398, 399, 400, 401, + 402, 404, 405, 406, 407, 408, 409, 410, + 411, 412, 413, 414, 415, 287, 416, 417, + 418, 419, 420, 421, 422, 423, 424, 403, + 267, 395, 395, 395, 395, 267, 395, 267, + 395, 395, 267, 268, 268, 267, 267, 268, + 395, 395, 267, 395, 395, 267, 395, 267, + 268, 395, 395, 395, 268, 268, 267, 395, + 395, 395, 267, 267, 267, 395, 267, 268, + 268, 395, 395, 268, 267, 395, 395, 395, + 267, 395, 267, 395, 267, 395, 267, 268, + 267, 267, 395, 395, 267, 395, 267, 268, + 395, 395, 268, 395, 267, 268, 395, 395, + 268, 268, 395, 395, 267, 395, 395, 268, + 267, 395, 395, 395, 268, 268, 268, 267, + 395, 268, 395, 267, 267, 267, 268, 267, + 267, 267, 395, 395, 395, 268, 395, 268, + 267, 395, 395, 268, 268, 268, 395, 395, + 395, 267, 395, 395, 268, 268, 267, 267, + 267, 395, 395, 395, 267, 395, 267, 268, + 395, 395, 395, 395, 268, 395, 268, 268, + 267, 395, 268, 395, 267, 395, 267, 395, + 268, 395, 395, 267, 395, 267, 395, 395, + 395, 395, 268, 267, 268, 395, 267, 395, + 395, 395, 395, 267, 395, 267, 425, 426, + 427, 428, 429, 430, 431, 432, 433, 434, + 435, 436, 437, 438, 439, 440, 441, 442, + 443, 444, 445, 267, 268, 395, 395, 268, + 395, 267, 268, 395, 395, 395, 267, 395, + 268, 395, 395, 395, 267, 395, 267, 395, + 395, 267, 395, 395, 267, 268, 395, 268, + 267, 395, 395, 395, 267, 268, 395, 267, + 395, 395, 267, 395, 395, 268, 395, 268, + 268, 395, 267, 395, 395, 268, 267, 395, + 395, 395, 395, 268, 395, 395, 268, 395, + 267, 395, 267, 268, 268, 268, 395, 395, + 268, 267, 395, 267, 395, 267, 268, 268, + 268, 268, 395, 395, 268, 395, 267, 268, + 395, 395, 268, 395, 268, 267, 268, 395, + 268, 395, 267, 268, 395, 395, 395, 395, + 268, 395, 267, 395, 395, 267, 446, 447, + 448, 449, 450, 267, 395, 323, 267, 395, + 267, 395, 267, 395, 267, 395, 267, 451, + 452, 267, 395, 267, 395, 267, 453, 454, + 455, 456, 331, 457, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 267, 395, 395, + 267, 395, 267, 395, 267, 395, 395, 395, + 268, 268, 395, 267, 395, 267, 395, 267, + 268, 395, 267, 395, 268, 267, 268, 395, + 395, 395, 268, 395, 268, 267, 395, 267, + 268, 395, 268, 395, 268, 395, 267, 395, + 395, 268, 395, 267, 395, 395, 395, 395, + 267, 395, 268, 268, 395, 395, 268, 267, + 395, 395, 268, 395, 268, 267, 467, 468, + 454, 267, 395, 267, 395, 395, 267, 469, + 470, 471, 472, 473, 474, 475, 267, 476, + 477, 478, 479, 480, 267, 395, 267, 395, + 267, 395, 267, 395, 395, 395, 395, 395, + 267, 395, 267, 481, 482, 483, 484, 485, + 486, 487, 488, 489, 490, 491, 492, 493, + 494, 495, 496, 497, 498, 499, 500, 501, + 502, 503, 267, 395, 268, 395, 267, 267, + 395, 268, 267, 268, 268, 267, 395, 268, + 395, 395, 267, 395, 267, 268, 395, 268, + 395, 268, 267, 267, 395, 267, 268, 395, + 395, 268, 395, 268, 395, 267, 395, 268, + 395, 267, 395, 395, 268, 395, 268, 267, + 395, 395, 268, 268, 268, 268, 395, 395, + 267, 268, 395, 267, 268, 268, 395, 267, + 395, 268, 395, 268, 395, 268, 395, 267, + 268, 267, 395, 395, 268, 268, 395, 268, + 395, 267, 267, 267, 395, 395, 268, 395, + 268, 395, 267, 267, 395, 268, 268, 395, + 268, 395, 267, 268, 395, 268, 395, 267, + 268, 268, 395, 395, 267, 268, 268, 268, + 395, 395, 267, 504, 505, 380, 506, 267, + 395, 267, 395, 267, 395, 267, 507, 267, + 395, 267, 508, 509, 510, 511, 512, 513, + 267, 268, 268, 395, 395, 395, 267, 267, + 267, 267, 395, 395, 267, 395, 395, 267, + 267, 267, 395, 395, 395, 395, 267, 514, + 515, 516, 267, 395, 395, 395, 395, 395, + 267, 395, 267, 395, 267, 517, 267, 268, + 267, 518, 267, 519, 520, 521, 523, 522, + 267, 395, 267, 267, 395, 395, 268, 267, + 268, 267, 524, 267, 525, 526, 527, 529, + 528, 267, 268, 267, 267, 268, 268, 344, + 345, 346, 347, 348, 349, 267, 268, 267, + 268, 268, 267, 266, 268, 268, 267, 266, + 268, 267, 266, 268, 267, 531, 532, 530, + 267, 266, 268, 267, 266, 268, 267, 533, + 534, 535, 536, 537, 530, 267, 538, 267, + 297, 298, 299, 533, 534, 539, 300, 301, + 302, 303, 304, 305, 306, 307, 308, 309, + 310, 311, 312, 313, 314, 315, 316, 317, + 267, 540, 538, 297, 298, 299, 541, 535, + 536, 300, 301, 302, 303, 304, 305, 306, + 307, 308, 309, 310, 311, 312, 313, 314, + 315, 316, 317, 267, 540, 267, 542, 540, + 297, 298, 299, 543, 536, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 315, 316, 317, 267, + 542, 267, 267, 542, 544, 267, 542, 267, + 545, 546, 267, 540, 267, 267, 542, 267, + 540, 267, 540, 327, 328, 329, 330, 331, + 332, 333, 547, 335, 336, 337, 338, 339, + 340, 341, 549, 550, 551, 552, 553, 554, + 549, 550, 551, 552, 553, 554, 549, 548, + 555, 267, 268, 538, 267, 556, 556, 556, + 542, 267, 297, 298, 299, 541, 539, 300, + 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 315, 316, + 317, 267, 545, 557, 267, 267, 540, 556, + 556, 542, 556, 556, 542, 556, 556, 556, + 542, 556, 556, 542, 556, 556, 542, 556, + 556, 267, 542, 542, 551, 552, 553, 554, + 548, 549, 551, 552, 553, 554, 548, 549, + 551, 552, 553, 554, 548, 549, 551, 552, + 553, 554, 548, 549, 551, 552, 553, 554, + 548, 549, 551, 552, 553, 554, 548, 549, + 551, 552, 553, 554, 548, 549, 551, 552, + 553, 554, 548, 549, 551, 552, 553, 554, + 548, 549, 550, 555, 552, 553, 554, 548, + 549, 550, 552, 553, 554, 548, 549, 550, + 552, 553, 554, 548, 549, 550, 552, 553, + 554, 548, 549, 550, 552, 553, 554, 548, + 549, 550, 552, 553, 554, 548, 549, 550, + 552, 553, 554, 548, 549, 550, 552, 553, + 554, 548, 549, 550, 552, 553, 554, 548, + 549, 550, 551, 555, 553, 554, 548, 549, + 550, 551, 553, 554, 548, 549, 550, 551, + 553, 554, 548, 549, 550, 551, 553, 554, + 548, 549, 550, 551, 553, 558, 557, 552, + 267, 555, 556, 267, 540, 542, 268, 268, + 267, 559, 560, 561, 562, 563, 530, 267, + 268, 323, 268, 268, 268, 267, 268, 268, + 267, 395, 268, 267, 395, 268, 267, 268, + 395, 268, 267, 530, 267, 564, 566, 567, + 568, 569, 570, 571, 566, 567, 568, 569, + 570, 571, 566, 530, 565, 555, 267, 268, + 538, 268, 267, 540, 540, 540, 542, 267, + 540, 540, 542, 540, 540, 542, 540, 540, + 540, 542, 540, 540, 542, 540, 540, 542, + 540, 540, 267, 542, 568, 569, 570, 571, + 565, 566, 568, 569, 570, 571, 565, 566, + 568, 569, 570, 571, 565, 566, 568, 569, + 570, 571, 565, 566, 568, 569, 570, 571, + 565, 566, 568, 569, 570, 571, 565, 566, + 568, 569, 570, 571, 565, 566, 568, 569, + 570, 571, 565, 566, 568, 569, 570, 571, + 565, 566, 567, 555, 569, 570, 571, 565, + 566, 567, 569, 570, 571, 565, 566, 567, + 569, 570, 571, 565, 566, 567, 569, 570, + 571, 565, 566, 567, 569, 570, 571, 565, + 566, 567, 569, 570, 571, 565, 566, 567, + 569, 570, 571, 565, 566, 567, 569, 570, + 571, 565, 566, 567, 569, 570, 571, 565, + 566, 567, 568, 555, 570, 571, 565, 566, + 567, 568, 570, 571, 565, 566, 567, 568, + 570, 571, 565, 566, 567, 568, 570, 571, + 565, 566, 567, 568, 570, 572, 573, 569, + 267, 555, 540, 268, 540, 542, 268, 542, + 268, 267, 540, 574, 575, 530, 267, 268, + 267, 268, 268, 268, 267, 577, 578, 579, + 580, 576, 267, 581, 582, 530, 267, 266, + 268, 267, 268, 266, 268, 267, 583, 530, + 267, 268, 268, 267, 584, 530, 267, 268, + 268, 267, 585, 586, 587, 588, 589, 590, + 591, 592, 593, 594, 595, 530, 267, 268, + 596, 267, 344, 345, 346, 347, 348, 349, + 597, 267, 598, 267, 268, 267, 395, 268, + 267, 268, 395, 268, 395, 268, 267, 395, + 395, 268, 395, 268, 395, 268, 395, 268, + 395, 268, 267, 268, 268, 395, 395, 268, + 267, 395, 395, 268, 267, 395, 268, 395, + 268, 267, 268, 395, 268, 395, 268, 267, + 395, 268, 395, 268, 267, 395, 268, 267, + 395, 395, 268, 268, 395, 268, 395, 268, + 395, 267, 576, 267, 599, 576, 267, 322, + 530, 600, 530, 267, 268, 267, 266, 3, + 1, 266, 3, 1, 602, 603, 601, 1, + 266, 3, 1, 266, 3, 1, 604, 605, + 606, 607, 608, 601, 1, 609, 610, 612, + 611, 611, 612, 612, 611, 612, 612, 611, + 612, 612, 612, 611, 612, 611, 612, 612, + 611, 612, 612, 612, 612, 611, 612, 612, + 611, 611, 612, 612, 611, 612, 611, 613, + 614, 615, 616, 617, 619, 620, 621, 623, + 624, 625, 626, 627, 628, 629, 630, 631, + 632, 633, 634, 635, 636, 637, 638, 639, + 640, 618, 622, 611, 612, 612, 612, 612, + 611, 612, 611, 612, 612, 611, 611, 611, + 612, 611, 611, 611, 612, 612, 612, 612, + 611, 611, 611, 611, 611, 611, 612, 611, + 611, 611, 611, 611, 611, 612, 611, 611, + 611, 611, 612, 612, 612, 612, 611, 612, + 612, 612, 612, 612, 611, 612, 612, 611, + 612, 612, 612, 612, 611, 612, 612, 611, + 611, 611, 611, 611, 611, 612, 612, 612, + 612, 612, 612, 611, 612, 612, 611, 611, + 611, 611, 611, 611, 612, 612, 611, 612, + 612, 612, 612, 612, 611, 612, 612, 611, + 612, 611, 612, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 611, 612, 611, 641, + 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 611, 612, 612, 611, + 612, 612, 612, 611, 612, 612, 612, 612, + 611, 612, 611, 612, 612, 611, 612, 612, + 611, 612, 611, 611, 611, 612, 612, 611, + 612, 612, 611, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 612, 611, 611, 611, 612, 612, 612, + 611, 612, 611, 612, 611, 612, 612, 612, + 612, 612, 611, 612, 612, 611, 662, 663, + 664, 665, 666, 611, 612, 667, 611, 662, + 663, 668, 664, 665, 666, 611, 612, 611, + 612, 611, 612, 611, 612, 611, 612, 611, + 669, 670, 611, 612, 611, 612, 611, 671, + 672, 673, 674, 675, 676, 677, 678, 679, + 680, 681, 682, 683, 684, 685, 611, 612, + 612, 611, 612, 611, 612, 611, 612, 612, + 612, 612, 611, 612, 612, 611, 611, 611, + 612, 612, 611, 612, 611, 612, 612, 611, + 611, 611, 612, 612, 611, 612, 612, 612, + 611, 612, 612, 612, 612, 611, 612, 612, + 612, 611, 612, 612, 611, 686, 687, 672, + 611, 612, 611, 612, 612, 611, 688, 689, + 690, 691, 692, 693, 694, 611, 695, 696, + 697, 698, 699, 611, 612, 611, 612, 611, + 612, 611, 612, 612, 612, 612, 612, 611, + 612, 611, 700, 701, 702, 703, 704, 705, + 706, 707, 708, 709, 710, 711, 712, 713, + 714, 715, 716, 713, 717, 718, 719, 720, + 721, 611, 612, 612, 611, 611, 612, 611, + 611, 612, 612, 612, 611, 612, 611, 612, + 612, 611, 611, 611, 612, 612, 612, 611, + 612, 611, 612, 612, 612, 611, 612, 612, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 611, 612, 611, 611, 612, 612, 612, + 611, 611, 611, 612, 611, 612, 612, 611, + 612, 611, 612, 612, 611, 612, 612, 611, + 722, 723, 724, 725, 611, 612, 611, 612, + 611, 612, 611, 612, 611, 726, 611, 612, + 611, 727, 728, 729, 730, 731, 732, 611, + 612, 612, 612, 611, 611, 611, 611, 612, + 612, 611, 612, 612, 611, 611, 611, 612, + 612, 612, 612, 611, 733, 734, 735, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 611, 736, 737, 738, 611, 739, 611, + 739, 611, 611, 739, 739, 611, 739, 739, + 611, 739, 739, 739, 611, 739, 611, 739, + 739, 611, 739, 739, 739, 739, 611, 739, + 739, 611, 611, 739, 739, 611, 739, 611, + 740, 741, 742, 743, 744, 745, 746, 748, + 749, 750, 751, 752, 753, 754, 755, 756, + 757, 758, 759, 631, 760, 761, 762, 763, + 764, 765, 766, 767, 768, 747, 611, 739, + 739, 739, 739, 611, 739, 611, 739, 739, + 611, 612, 612, 611, 611, 612, 739, 739, + 611, 739, 739, 611, 739, 611, 612, 739, + 739, 739, 612, 612, 611, 739, 739, 739, + 611, 611, 611, 739, 611, 612, 612, 739, + 739, 612, 611, 739, 739, 739, 611, 739, + 611, 739, 611, 739, 611, 612, 611, 611, + 739, 739, 611, 739, 611, 612, 739, 739, + 612, 739, 611, 612, 739, 739, 612, 612, + 739, 739, 611, 739, 739, 612, 611, 739, + 739, 739, 612, 612, 612, 611, 739, 612, + 739, 611, 611, 611, 612, 611, 611, 611, + 739, 739, 739, 612, 739, 612, 611, 739, + 739, 612, 612, 612, 739, 739, 739, 611, + 739, 739, 612, 612, 611, 611, 611, 739, + 739, 739, 611, 739, 611, 612, 739, 739, + 739, 739, 612, 739, 612, 612, 611, 739, + 612, 739, 611, 739, 611, 739, 612, 739, + 739, 611, 739, 611, 739, 739, 739, 739, + 612, 611, 612, 739, 611, 739, 739, 739, + 739, 611, 739, 611, 769, 770, 771, 772, + 773, 774, 775, 776, 777, 778, 779, 780, + 781, 782, 783, 784, 785, 786, 787, 788, + 789, 611, 612, 739, 739, 612, 739, 611, + 612, 739, 739, 739, 611, 739, 612, 739, + 739, 739, 611, 739, 611, 739, 739, 611, + 739, 739, 611, 612, 739, 612, 611, 739, + 739, 739, 611, 612, 739, 611, 739, 739, + 611, 739, 739, 612, 739, 612, 612, 739, + 611, 739, 739, 612, 611, 739, 739, 739, + 739, 612, 739, 739, 612, 739, 611, 739, + 611, 612, 612, 612, 739, 739, 612, 611, + 739, 611, 739, 611, 612, 612, 612, 612, + 739, 739, 612, 739, 611, 612, 739, 739, + 612, 739, 612, 611, 612, 739, 612, 739, + 611, 612, 739, 739, 739, 739, 612, 739, + 611, 739, 739, 611, 790, 791, 792, 793, + 794, 611, 739, 667, 611, 739, 611, 739, + 611, 739, 611, 739, 611, 795, 796, 611, + 739, 611, 739, 611, 797, 798, 799, 800, + 675, 801, 802, 803, 804, 805, 806, 807, + 808, 809, 810, 611, 739, 739, 611, 739, + 611, 739, 611, 739, 739, 739, 612, 612, + 739, 611, 739, 611, 739, 611, 612, 739, + 611, 739, 612, 611, 612, 739, 739, 739, + 612, 739, 612, 611, 739, 611, 612, 739, + 612, 739, 612, 739, 611, 739, 739, 612, + 739, 611, 739, 739, 739, 739, 611, 739, + 612, 612, 739, 739, 612, 611, 739, 739, + 612, 739, 612, 611, 811, 812, 798, 611, + 739, 611, 739, 739, 611, 813, 814, 815, + 816, 817, 818, 819, 611, 820, 821, 822, + 823, 824, 611, 739, 611, 739, 611, 739, + 611, 739, 739, 739, 739, 739, 611, 739, + 611, 825, 826, 827, 828, 829, 830, 831, + 832, 833, 834, 835, 836, 837, 838, 839, + 840, 841, 842, 843, 844, 845, 846, 847, + 611, 739, 612, 739, 611, 611, 739, 612, + 611, 612, 612, 611, 739, 612, 739, 739, + 611, 739, 611, 612, 739, 612, 739, 612, + 611, 611, 739, 611, 612, 739, 739, 612, + 739, 612, 739, 611, 739, 612, 739, 611, + 739, 739, 612, 739, 612, 611, 739, 739, + 612, 612, 612, 612, 739, 739, 611, 612, + 739, 611, 612, 612, 739, 611, 739, 612, + 739, 612, 739, 612, 739, 611, 612, 611, + 739, 739, 612, 612, 739, 612, 739, 611, + 611, 611, 739, 739, 612, 739, 612, 739, + 611, 611, 739, 612, 612, 739, 612, 739, + 611, 612, 739, 612, 739, 611, 612, 612, + 739, 739, 611, 612, 612, 612, 739, 739, + 611, 848, 849, 724, 850, 611, 739, 611, + 739, 611, 739, 611, 851, 611, 739, 611, + 852, 853, 854, 855, 856, 857, 611, 612, + 612, 739, 739, 739, 611, 611, 611, 611, + 739, 739, 611, 739, 739, 611, 611, 611, + 739, 739, 739, 739, 611, 858, 859, 860, + 611, 739, 739, 739, 739, 739, 611, 739, + 611, 739, 611, 861, 611, 612, 611, 862, + 611, 863, 864, 865, 867, 866, 611, 739, + 611, 611, 739, 739, 612, 611, 612, 611, + 868, 611, 869, 870, 871, 873, 872, 611, + 612, 611, 611, 612, 612, 688, 689, 690, + 691, 692, 693, 611, 641, 642, 643, 604, + 605, 874, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 611, 875, 610, 641, + 642, 643, 876, 606, 607, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 661, 611, + 875, 611, 877, 875, 641, 642, 643, 878, + 607, 644, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 661, 611, 877, 611, 609, 877, + 879, 611, 877, 611, 880, 881, 611, 875, + 611, 611, 877, 611, 875, 611, 875, 671, + 672, 673, 674, 675, 676, 677, 882, 679, + 680, 681, 682, 683, 684, 685, 884, 885, + 886, 887, 888, 889, 884, 885, 886, 887, + 888, 889, 884, 883, 890, 611, 612, 610, + 611, 891, 891, 891, 877, 611, 641, 642, + 643, 876, 874, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 661, 611, 880, 892, + 611, 611, 875, 891, 891, 877, 891, 891, + 877, 891, 891, 891, 877, 891, 891, 877, + 891, 891, 877, 891, 891, 611, 877, 877, + 886, 887, 888, 889, 883, 884, 886, 887, + 888, 889, 883, 884, 886, 887, 888, 889, + 883, 884, 886, 887, 888, 889, 883, 884, + 886, 887, 888, 889, 883, 884, 886, 887, + 888, 889, 883, 884, 886, 887, 888, 889, + 883, 884, 886, 887, 888, 889, 883, 884, + 886, 887, 888, 889, 883, 884, 885, 890, + 887, 888, 889, 883, 884, 885, 887, 888, + 889, 883, 884, 885, 887, 888, 889, 883, + 884, 885, 887, 888, 889, 883, 884, 885, + 887, 888, 889, 883, 884, 885, 887, 888, + 889, 883, 884, 885, 887, 888, 889, 883, + 884, 885, 887, 888, 889, 883, 884, 885, + 887, 888, 889, 883, 884, 885, 886, 890, + 888, 889, 883, 884, 885, 886, 888, 889, + 883, 884, 885, 886, 888, 889, 883, 884, + 885, 886, 888, 889, 883, 884, 885, 886, + 888, 893, 892, 887, 611, 890, 891, 611, + 875, 877, 265, 3, 1, 894, 895, 896, + 897, 898, 601, 1, 265, 899, 3, 265, + 3, 265, 3, 1, 901, 900, 900, 901, + 901, 900, 901, 901, 900, 901, 901, 901, + 900, 901, 900, 901, 901, 900, 901, 901, + 901, 901, 900, 901, 901, 900, 900, 901, + 901, 900, 901, 900, 902, 903, 904, 905, + 906, 908, 909, 910, 912, 913, 914, 915, + 916, 917, 918, 919, 920, 921, 922, 923, + 924, 925, 926, 927, 928, 929, 907, 911, + 900, 901, 901, 901, 901, 900, 901, 900, + 901, 901, 900, 900, 900, 901, 900, 900, + 900, 901, 901, 901, 901, 900, 900, 900, + 900, 900, 900, 901, 900, 900, 900, 900, + 900, 900, 901, 900, 900, 900, 900, 901, + 901, 901, 901, 900, 901, 901, 901, 901, + 901, 900, 901, 901, 900, 901, 901, 901, + 901, 900, 901, 901, 900, 900, 900, 900, + 900, 900, 901, 901, 901, 901, 901, 901, + 900, 901, 901, 900, 900, 900, 900, 900, + 900, 901, 901, 900, 901, 901, 901, 901, + 901, 900, 901, 901, 900, 901, 900, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 900, 901, 900, 930, 931, 932, 933, + 934, 935, 936, 937, 938, 939, 940, 941, + 942, 943, 944, 945, 946, 947, 948, 949, + 950, 900, 901, 901, 900, 901, 901, 901, + 900, 901, 901, 901, 901, 900, 901, 900, + 901, 901, 900, 901, 901, 900, 901, 900, + 900, 900, 901, 901, 900, 901, 901, 900, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 901, 901, 900, + 900, 900, 901, 901, 901, 900, 901, 900, + 901, 900, 901, 901, 901, 901, 901, 900, + 901, 901, 900, 951, 952, 953, 954, 955, + 900, 901, 899, 900, 901, 900, 901, 900, + 901, 900, 901, 900, 956, 957, 900, 901, + 900, 901, 900, 958, 959, 960, 961, 962, + 963, 964, 965, 966, 967, 968, 969, 970, + 971, 972, 900, 901, 901, 900, 901, 900, + 901, 900, 901, 901, 901, 901, 900, 901, + 901, 900, 900, 900, 901, 901, 900, 901, + 900, 901, 901, 900, 900, 900, 901, 901, + 900, 901, 901, 901, 900, 901, 901, 901, + 901, 900, 901, 901, 901, 900, 901, 901, + 900, 973, 974, 959, 900, 901, 900, 901, + 901, 900, 975, 976, 977, 978, 979, 980, + 900, 981, 982, 983, 984, 985, 900, 901, + 900, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 986, 987, 988, + 989, 990, 991, 992, 993, 994, 995, 996, + 997, 998, 999, 1000, 1001, 1002, 999, 1003, + 1004, 1005, 1006, 1007, 900, 901, 901, 900, + 900, 901, 900, 900, 901, 901, 901, 900, + 901, 900, 901, 901, 900, 900, 900, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 900, 901, 901, 901, 901, 901, 901, 901, + 900, 901, 900, 901, 900, 901, 900, 900, + 901, 901, 901, 900, 900, 900, 901, 900, + 901, 901, 900, 901, 900, 901, 901, 900, + 901, 901, 900, 1008, 1009, 1010, 1011, 900, + 901, 900, 901, 900, 901, 900, 901, 900, + 1012, 900, 901, 900, 1013, 1014, 1015, 1016, + 1017, 1018, 900, 901, 901, 901, 900, 900, + 900, 900, 901, 901, 900, 901, 901, 900, + 900, 900, 901, 901, 901, 901, 900, 1019, + 1020, 1021, 900, 901, 901, 901, 901, 901, + 900, 901, 900, 901, 900, 1022, 900, 1023, + 1024, 1025, 1027, 1026, 900, 901, 900, 900, + 901, 901, 951, 952, 1028, 953, 954, 955, + 900, 901, 900, 975, 976, 977, 978, 979, + 980, 1029, 900, 1030, 1031, 1032, 900, 1033, + 900, 1033, 900, 900, 1033, 1033, 900, 1033, + 1033, 900, 1033, 1033, 1033, 900, 1033, 900, + 1033, 1033, 900, 1033, 1033, 1033, 1033, 900, + 1033, 1033, 900, 900, 1033, 1033, 900, 1033, + 900, 1034, 1035, 1036, 1037, 1038, 1039, 1040, + 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, + 1050, 1051, 1052, 1053, 920, 1054, 1055, 1056, + 1057, 1058, 1059, 1060, 1061, 1062, 1041, 900, + 1033, 1033, 1033, 1033, 900, 1033, 900, 1033, + 1033, 900, 901, 901, 900, 900, 901, 1033, + 1033, 900, 1033, 1033, 900, 1033, 900, 901, + 1033, 1033, 1033, 901, 901, 900, 1033, 1033, + 1033, 900, 900, 900, 1033, 900, 901, 901, + 1033, 1033, 901, 900, 1033, 1033, 1033, 900, + 1033, 900, 1033, 900, 1033, 900, 901, 900, + 900, 1033, 1033, 900, 1033, 900, 901, 1033, + 1033, 901, 1033, 900, 901, 1033, 1033, 901, + 901, 1033, 1033, 900, 1033, 1033, 901, 900, + 1033, 1033, 1033, 901, 901, 901, 900, 1033, + 901, 1033, 900, 900, 900, 901, 900, 900, + 900, 1033, 1033, 1033, 901, 1033, 901, 900, + 1033, 1033, 901, 901, 901, 1033, 1033, 1033, + 900, 1033, 1033, 901, 901, 900, 900, 900, + 1033, 1033, 1033, 900, 1033, 900, 901, 1033, + 1033, 1033, 1033, 901, 1033, 901, 901, 900, + 1033, 901, 1033, 900, 1033, 900, 1033, 901, + 1033, 1033, 900, 1033, 900, 1033, 1033, 1033, + 1033, 901, 900, 901, 1033, 900, 1033, 1033, + 1033, 1033, 900, 1033, 900, 1063, 1064, 1065, + 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, + 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, + 1082, 1083, 900, 901, 1033, 1033, 901, 1033, + 900, 901, 1033, 1033, 1033, 900, 1033, 901, + 1033, 1033, 1033, 900, 1033, 900, 1033, 1033, + 900, 1033, 1033, 900, 901, 1033, 901, 900, + 1033, 1033, 1033, 900, 901, 1033, 900, 1033, + 1033, 900, 1033, 1033, 901, 1033, 901, 901, + 1033, 900, 1033, 1033, 901, 900, 1033, 1033, + 1033, 1033, 901, 1033, 1033, 901, 1033, 900, + 1033, 900, 901, 901, 901, 1033, 1033, 901, + 900, 1033, 900, 1033, 900, 901, 901, 901, + 901, 1033, 1033, 901, 1033, 900, 901, 1033, + 1033, 901, 1033, 901, 900, 901, 1033, 901, + 1033, 900, 901, 1033, 1033, 1033, 1033, 901, + 1033, 900, 1033, 1033, 900, 1084, 1085, 1086, + 1087, 1088, 900, 1033, 899, 900, 1033, 900, + 1033, 900, 1033, 900, 1033, 900, 1089, 1090, + 900, 1033, 900, 1033, 900, 1091, 1092, 1093, + 1094, 962, 1095, 1096, 1097, 1098, 1099, 1100, + 1101, 1102, 1103, 1104, 900, 1033, 1033, 900, + 1033, 900, 1033, 900, 1033, 1033, 1033, 901, + 901, 1033, 900, 1033, 900, 1033, 900, 901, + 1033, 900, 1033, 901, 900, 901, 1033, 1033, + 1033, 901, 1033, 901, 900, 1033, 900, 901, + 1033, 901, 1033, 901, 1033, 900, 1033, 1033, + 901, 1033, 900, 1033, 1033, 1033, 1033, 900, + 1033, 901, 901, 1033, 1033, 901, 900, 1033, + 1033, 901, 1033, 901, 900, 1105, 1106, 1092, + 900, 1033, 900, 1033, 1033, 900, 1107, 1108, + 1109, 1110, 1111, 1112, 1113, 900, 1114, 1115, + 1116, 1117, 1118, 900, 1033, 900, 1033, 900, + 1033, 900, 1033, 1033, 1033, 1033, 1033, 900, + 1033, 900, 1119, 1120, 1121, 1122, 1123, 1124, + 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, + 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, + 1141, 900, 1033, 901, 1033, 900, 900, 1033, + 901, 900, 901, 901, 900, 1033, 901, 1033, + 1033, 900, 1033, 900, 901, 1033, 901, 1033, + 901, 900, 900, 1033, 900, 901, 1033, 1033, + 901, 1033, 901, 1033, 900, 1033, 901, 1033, + 900, 1033, 1033, 901, 1033, 901, 900, 1033, + 1033, 901, 901, 901, 901, 1033, 1033, 900, + 901, 1033, 900, 901, 901, 1033, 900, 1033, + 901, 1033, 901, 1033, 901, 1033, 900, 901, + 900, 1033, 1033, 901, 901, 1033, 901, 1033, + 900, 900, 900, 1033, 1033, 901, 1033, 901, + 1033, 900, 900, 1033, 901, 901, 1033, 901, + 1033, 900, 901, 1033, 901, 1033, 900, 901, + 901, 1033, 1033, 900, 901, 901, 901, 1033, + 1033, 900, 1142, 1143, 1010, 1144, 900, 1033, + 900, 1033, 900, 1033, 900, 1145, 900, 1033, + 900, 1146, 1147, 1148, 1149, 1150, 1151, 900, + 901, 901, 1033, 1033, 1033, 900, 900, 900, + 900, 1033, 1033, 900, 1033, 1033, 900, 900, + 900, 1033, 1033, 1033, 1033, 900, 1152, 1153, + 1154, 900, 1033, 1033, 1033, 1033, 1033, 900, + 1033, 900, 1033, 900, 1155, 900, 901, 900, + 1156, 900, 1157, 1158, 1159, 1161, 1160, 900, + 1033, 900, 900, 1033, 1033, 901, 900, 901, + 900, 3, 265, 3, 1, 1162, 3, 1, + 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1162, + 1163, 1162, 1162, 1162, 1163, 1162, 1163, 1162, + 1162, 1163, 1162, 1162, 1162, 1162, 1163, 1162, + 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1163, + 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1172, + 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, + 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, + 1189, 1190, 1191, 1192, 1193, 1171, 1163, 1162, + 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, + 1163, 1194, 1194, 1163, 1163, 1194, 1162, 1194, + 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1163, 1194, 1163, 1194, 1194, + 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, + 1163, 1163, 1194, 1194, 1163, 1194, 1163, 1195, + 1196, 1197, 1198, 1199, 1201, 1202, 1203, 1205, + 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1184, + 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, + 1221, 1200, 1204, 1163, 1194, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1194, 1163, 1163, 1163, + 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1194, + 1163, 1163, 1163, 1163, 1163, 1163, 1194, 1163, + 1163, 1163, 1163, 1163, 1163, 1194, 1163, 1163, + 1163, 1163, 1194, 1194, 1194, 1194, 1163, 1194, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1163, 1163, 1163, 1163, 1163, 1194, 1194, 1194, + 1194, 1194, 1194, 1163, 1194, 1194, 1163, 1163, + 1163, 1163, 1163, 1163, 1194, 1194, 1163, 1194, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1163, 1194, 1163, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, + 1239, 1240, 1241, 1242, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1163, 1194, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1194, 1163, 1194, 1194, + 1163, 1194, 1163, 1163, 1163, 1194, 1194, 1163, + 1194, 1194, 1163, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1163, 1163, 1163, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1163, 1194, 1194, 1194, + 1194, 1194, 1163, 1194, 1194, 1163, 1243, 1244, + 1245, 1246, 1247, 1163, 1194, 1248, 1163, 1243, + 1244, 1249, 1245, 1246, 1247, 1163, 1194, 1163, + 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1163, + 1250, 1251, 1163, 1194, 1163, 1194, 1163, 1252, + 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, + 1261, 1262, 1263, 1264, 1265, 1266, 1163, 1194, + 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1194, + 1194, 1194, 1163, 1194, 1194, 1163, 1163, 1163, + 1194, 1194, 1163, 1194, 1163, 1194, 1194, 1163, + 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1194, + 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, + 1194, 1163, 1194, 1194, 1163, 1267, 1268, 1253, + 1163, 1194, 1163, 1194, 1194, 1163, 1269, 1270, + 1271, 1272, 1273, 1274, 1275, 1163, 1276, 1277, + 1278, 1279, 1280, 1163, 1194, 1163, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1194, 1194, 1163, + 1194, 1163, 1281, 1282, 1283, 1284, 1285, 1286, + 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, + 1295, 1296, 1297, 1294, 1298, 1299, 1300, 1301, + 1302, 1163, 1194, 1194, 1163, 1163, 1194, 1163, + 1163, 1194, 1194, 1194, 1163, 1194, 1163, 1194, + 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1194, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1163, 1194, 1163, 1163, 1194, 1194, 1194, + 1163, 1163, 1163, 1194, 1163, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1163, 1194, 1194, 1163, + 1303, 1304, 1305, 1306, 1163, 1194, 1163, 1194, + 1163, 1194, 1163, 1194, 1163, 1307, 1163, 1194, + 1163, 1308, 1309, 1310, 1311, 1312, 1313, 1163, + 1194, 1194, 1194, 1163, 1163, 1163, 1163, 1194, + 1194, 1163, 1194, 1194, 1163, 1163, 1163, 1194, + 1194, 1194, 1194, 1163, 1314, 1315, 1316, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1163, 1317, 1318, 1319, 1163, 1162, 1163, + 1194, 1163, 1194, 1163, 1320, 1163, 1321, 1322, + 1323, 1325, 1324, 1163, 1194, 1163, 1163, 1194, + 1194, 1269, 1270, 1271, 1272, 1273, 1274, 1163, + 1162, 1163, 1162, 1162, 1163, 1162, 1163, 1194, + 1162, 1162, 1162, 1194, 1194, 1163, 1162, 1162, + 1162, 1163, 1163, 1163, 1162, 1163, 1194, 1194, + 1162, 1162, 1194, 1163, 1162, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1163, 1194, 1163, + 1163, 1162, 1162, 1163, 1162, 1163, 1194, 1162, + 1162, 1194, 1162, 1163, 1194, 1162, 1162, 1194, + 1194, 1162, 1162, 1163, 1162, 1162, 1194, 1163, + 1162, 1162, 1162, 1194, 1194, 1194, 1163, 1162, + 1194, 1162, 1163, 1163, 1163, 1194, 1163, 1163, + 1163, 1162, 1162, 1162, 1194, 1162, 1194, 1163, + 1162, 1162, 1194, 1194, 1194, 1162, 1162, 1162, + 1163, 1162, 1162, 1194, 1194, 1163, 1163, 1163, + 1162, 1162, 1162, 1163, 1162, 1163, 1194, 1162, + 1162, 1162, 1162, 1194, 1162, 1194, 1194, 1163, + 1162, 1194, 1162, 1163, 1162, 1163, 1162, 1194, + 1162, 1162, 1163, 1162, 1163, 1162, 1162, 1162, + 1162, 1194, 1163, 1194, 1162, 1163, 1162, 1162, + 1162, 1162, 1163, 1162, 1163, 1326, 1327, 1328, + 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, + 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, + 1345, 1346, 1163, 1194, 1162, 1162, 1194, 1162, + 1163, 1194, 1162, 1162, 1162, 1163, 1162, 1194, + 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, + 1163, 1162, 1162, 1163, 1194, 1162, 1194, 1163, + 1162, 1162, 1162, 1163, 1194, 1162, 1163, 1162, + 1162, 1163, 1162, 1162, 1194, 1162, 1194, 1194, + 1162, 1163, 1162, 1162, 1194, 1163, 1162, 1162, + 1162, 1162, 1194, 1162, 1162, 1194, 1162, 1163, + 1162, 1163, 1194, 1194, 1194, 1162, 1162, 1194, + 1163, 1162, 1163, 1162, 1163, 1194, 1194, 1194, + 1194, 1162, 1162, 1194, 1162, 1163, 1194, 1162, + 1162, 1194, 1162, 1194, 1163, 1194, 1162, 1194, + 1162, 1163, 1194, 1162, 1162, 1162, 1162, 1194, + 1162, 1163, 1162, 1162, 1163, 1347, 1348, 1349, + 1350, 1351, 1163, 1162, 1248, 1163, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1163, 1352, 1353, + 1163, 1162, 1163, 1162, 1163, 1354, 1355, 1356, + 1357, 1256, 1358, 1359, 1360, 1361, 1362, 1363, + 1364, 1365, 1366, 1367, 1163, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1162, 1162, 1194, + 1194, 1162, 1163, 1162, 1163, 1162, 1163, 1194, + 1162, 1163, 1162, 1194, 1163, 1194, 1162, 1162, + 1162, 1194, 1162, 1194, 1163, 1162, 1163, 1194, + 1162, 1194, 1162, 1194, 1162, 1163, 1162, 1162, + 1194, 1162, 1163, 1162, 1162, 1162, 1162, 1163, + 1162, 1194, 1194, 1162, 1162, 1194, 1163, 1162, + 1162, 1194, 1162, 1194, 1163, 1368, 1369, 1355, + 1163, 1162, 1163, 1162, 1162, 1163, 1370, 1371, + 1372, 1373, 1374, 1375, 1376, 1163, 1377, 1378, + 1379, 1380, 1381, 1163, 1162, 1163, 1162, 1163, + 1162, 1163, 1162, 1162, 1162, 1162, 1162, 1163, + 1162, 1163, 1382, 1383, 1384, 1385, 1386, 1387, + 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, + 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, + 1404, 1163, 1162, 1194, 1162, 1163, 1163, 1162, + 1194, 1163, 1194, 1194, 1163, 1162, 1194, 1162, + 1162, 1163, 1162, 1163, 1194, 1162, 1194, 1162, + 1194, 1163, 1163, 1162, 1163, 1194, 1162, 1162, + 1194, 1162, 1194, 1162, 1163, 1162, 1194, 1162, + 1163, 1162, 1162, 1194, 1162, 1194, 1163, 1162, + 1162, 1194, 1194, 1194, 1194, 1162, 1162, 1163, + 1194, 1162, 1163, 1194, 1194, 1162, 1163, 1162, + 1194, 1162, 1194, 1162, 1194, 1162, 1163, 1194, + 1163, 1162, 1162, 1194, 1194, 1162, 1194, 1162, + 1163, 1163, 1163, 1162, 1162, 1194, 1162, 1194, + 1162, 1163, 1163, 1162, 1194, 1194, 1162, 1194, + 1162, 1163, 1194, 1162, 1194, 1162, 1163, 1194, + 1194, 1162, 1162, 1163, 1194, 1194, 1194, 1162, + 1162, 1163, 1405, 1406, 1305, 1407, 1163, 1162, + 1163, 1162, 1163, 1162, 1163, 1408, 1163, 1162, + 1163, 1409, 1410, 1411, 1412, 1413, 1414, 1163, + 1194, 1194, 1162, 1162, 1162, 1163, 1163, 1163, + 1163, 1162, 1162, 1163, 1162, 1162, 1163, 1163, + 1163, 1162, 1162, 1162, 1162, 1163, 1415, 1416, + 1417, 1163, 1162, 1162, 1162, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1418, 1163, 1194, 1163, + 1419, 1163, 1420, 1421, 1422, 1424, 1423, 1163, + 1162, 1163, 1163, 1162, 1162, 1162, 3, 1, + 3, 1162, 3, 1, 601, 1, 1425, 1427, + 1428, 1429, 1430, 1431, 1432, 1427, 1428, 1429, + 1430, 1431, 1432, 1427, 601, 1426, 890, 1, + 3, 610, 3, 1, 875, 875, 875, 877, + 1, 875, 875, 877, 875, 875, 877, 875, + 875, 875, 877, 875, 875, 877, 875, 875, + 877, 875, 875, 1, 877, 1429, 1430, 1431, + 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, + 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, + 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, + 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, + 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, + 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, + 1432, 1426, 1427, 1428, 890, 1430, 1431, 1432, + 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, + 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, + 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, + 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, + 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, + 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, + 1426, 1427, 1428, 1429, 890, 1431, 1432, 1426, + 1427, 1428, 1429, 1431, 1432, 1426, 1427, 1428, + 1429, 1431, 1432, 1426, 1427, 1428, 1429, 1431, + 1432, 1426, 1427, 1428, 1429, 1431, 1433, 1434, + 1435, 1437, 1430, 1436, 1, 890, 875, 3, + 875, 877, 3, 877, 3, 1, 875, 1, + 265, 265, 1, 265, 1438, 1439, 601, 1, + 265, 3, 1, 3, 3, 265, 3, 1, + 1441, 1442, 1443, 1444, 1440, 1, 1445, 1446, + 601, 1, 266, 3, 1, 3, 266, 3, + 1, 1447, 601, 1, 3, 265, 3, 1, + 1448, 601, 1, 3, 265, 3, 1, 1449, + 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, + 1458, 1459, 601, 1, 3, 1460, 1, 1462, + 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1461, 1462, 1461, 1462, 1462, + 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, + 1461, 1461, 1462, 1462, 1461, 1462, 1461, 1463, + 1464, 1465, 1466, 1467, 1469, 1470, 1471, 1473, + 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, + 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, + 1490, 1468, 1472, 1461, 1462, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1462, 1461, 1461, 1461, + 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1462, + 1461, 1461, 1461, 1461, 1461, 1461, 1462, 1461, + 1461, 1461, 1461, 1461, 1461, 1462, 1461, 1461, + 1461, 1461, 1462, 1462, 1462, 1462, 1461, 1462, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1461, 1461, 1461, 1461, 1461, 1462, 1462, 1462, + 1462, 1462, 1462, 1461, 1462, 1462, 1461, 1461, + 1461, 1461, 1461, 1461, 1462, 1462, 1461, 1462, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1461, 1462, 1461, 1491, + 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, + 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, + 1508, 1509, 1510, 1511, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1461, 1462, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1462, 1461, 1462, 1462, + 1461, 1462, 1461, 1461, 1461, 1462, 1462, 1461, + 1462, 1462, 1461, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1461, 1461, 1461, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1461, 1462, 1462, 1462, + 1462, 1462, 1461, 1462, 1462, 1461, 1512, 1513, + 1514, 1515, 1516, 1461, 1462, 1517, 1461, 1512, + 1513, 1518, 1514, 1515, 1516, 1461, 1462, 1461, + 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1461, + 1519, 1520, 1461, 1462, 1461, 1462, 1461, 1521, + 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, + 1530, 1531, 1532, 1533, 1534, 1535, 1461, 1462, + 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1462, + 1462, 1462, 1461, 1462, 1462, 1461, 1461, 1461, + 1462, 1462, 1461, 1462, 1461, 1462, 1462, 1461, + 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1462, + 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, + 1462, 1461, 1462, 1462, 1461, 1536, 1537, 1522, + 1461, 1462, 1461, 1462, 1462, 1461, 1538, 1539, + 1540, 1541, 1542, 1543, 1544, 1461, 1545, 1546, + 1547, 1548, 1549, 1461, 1462, 1461, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1462, 1462, 1461, + 1462, 1461, 1550, 1551, 1552, 1553, 1554, 1555, + 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, + 1564, 1565, 1566, 1563, 1567, 1568, 1569, 1570, + 1571, 1461, 1462, 1462, 1461, 1461, 1462, 1461, + 1461, 1462, 1462, 1462, 1461, 1462, 1461, 1462, + 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1461, 1462, 1461, 1461, 1462, 1462, 1462, + 1461, 1461, 1461, 1462, 1461, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1461, 1462, 1462, 1461, + 1572, 1573, 1574, 1575, 1461, 1462, 1461, 1462, + 1461, 1462, 1461, 1462, 1461, 1576, 1461, 1462, + 1461, 1577, 1578, 1579, 1580, 1581, 1582, 1461, + 1462, 1462, 1462, 1461, 1461, 1461, 1461, 1462, + 1462, 1461, 1462, 1462, 1461, 1461, 1461, 1462, + 1462, 1462, 1462, 1461, 1583, 1584, 1585, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1461, 1586, 1587, 1588, 1461, 1589, 1461, + 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1589, + 1461, 1589, 1589, 1589, 1461, 1589, 1461, 1589, + 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, + 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1461, + 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1598, + 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, + 1607, 1608, 1609, 1481, 1610, 1611, 1612, 1613, + 1614, 1615, 1616, 1617, 1618, 1597, 1461, 1589, + 1589, 1589, 1589, 1461, 1589, 1461, 1589, 1589, + 1461, 1462, 1462, 1461, 1461, 1462, 1589, 1589, + 1461, 1589, 1589, 1461, 1589, 1461, 1462, 1589, + 1589, 1589, 1462, 1462, 1461, 1589, 1589, 1589, + 1461, 1461, 1461, 1589, 1461, 1462, 1462, 1589, + 1589, 1462, 1461, 1589, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1461, 1462, 1461, 1461, + 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, + 1462, 1589, 1461, 1462, 1589, 1589, 1462, 1462, + 1589, 1589, 1461, 1589, 1589, 1462, 1461, 1589, + 1589, 1589, 1462, 1462, 1462, 1461, 1589, 1462, + 1589, 1461, 1461, 1461, 1462, 1461, 1461, 1461, + 1589, 1589, 1589, 1462, 1589, 1462, 1461, 1589, + 1589, 1462, 1462, 1462, 1589, 1589, 1589, 1461, + 1589, 1589, 1462, 1462, 1461, 1461, 1461, 1589, + 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, + 1589, 1589, 1462, 1589, 1462, 1462, 1461, 1589, + 1462, 1589, 1461, 1589, 1461, 1589, 1462, 1589, + 1589, 1461, 1589, 1461, 1589, 1589, 1589, 1589, + 1462, 1461, 1462, 1589, 1461, 1589, 1589, 1589, + 1589, 1461, 1589, 1461, 1619, 1620, 1621, 1622, + 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, + 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, + 1639, 1461, 1462, 1589, 1589, 1462, 1589, 1461, + 1462, 1589, 1589, 1589, 1461, 1589, 1462, 1589, + 1589, 1589, 1461, 1589, 1461, 1589, 1589, 1461, + 1589, 1589, 1461, 1462, 1589, 1462, 1461, 1589, + 1589, 1589, 1461, 1462, 1589, 1461, 1589, 1589, + 1461, 1589, 1589, 1462, 1589, 1462, 1462, 1589, + 1461, 1589, 1589, 1462, 1461, 1589, 1589, 1589, + 1589, 1462, 1589, 1589, 1462, 1589, 1461, 1589, + 1461, 1462, 1462, 1462, 1589, 1589, 1462, 1461, + 1589, 1461, 1589, 1461, 1462, 1462, 1462, 1462, + 1589, 1589, 1462, 1589, 1461, 1462, 1589, 1589, + 1462, 1589, 1462, 1461, 1462, 1589, 1462, 1589, + 1461, 1462, 1589, 1589, 1589, 1589, 1462, 1589, + 1461, 1589, 1589, 1461, 1640, 1641, 1642, 1643, + 1644, 1461, 1589, 1517, 1461, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1461, 1645, 1646, 1461, + 1589, 1461, 1589, 1461, 1647, 1648, 1649, 1650, + 1525, 1651, 1652, 1653, 1654, 1655, 1656, 1657, + 1658, 1659, 1660, 1461, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1589, 1589, 1462, 1462, + 1589, 1461, 1589, 1461, 1589, 1461, 1462, 1589, + 1461, 1589, 1462, 1461, 1462, 1589, 1589, 1589, + 1462, 1589, 1462, 1461, 1589, 1461, 1462, 1589, + 1462, 1589, 1462, 1589, 1461, 1589, 1589, 1462, + 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, + 1462, 1462, 1589, 1589, 1462, 1461, 1589, 1589, + 1462, 1589, 1462, 1461, 1661, 1662, 1648, 1461, + 1589, 1461, 1589, 1589, 1461, 1663, 1664, 1665, + 1666, 1667, 1668, 1669, 1461, 1670, 1671, 1672, + 1673, 1674, 1461, 1589, 1461, 1589, 1461, 1589, + 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, + 1461, 1675, 1676, 1677, 1678, 1679, 1680, 1681, + 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, + 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, + 1461, 1589, 1462, 1589, 1461, 1461, 1589, 1462, + 1461, 1462, 1462, 1461, 1589, 1462, 1589, 1589, + 1461, 1589, 1461, 1462, 1589, 1462, 1589, 1462, + 1461, 1461, 1589, 1461, 1462, 1589, 1589, 1462, + 1589, 1462, 1589, 1461, 1589, 1462, 1589, 1461, + 1589, 1589, 1462, 1589, 1462, 1461, 1589, 1589, + 1462, 1462, 1462, 1462, 1589, 1589, 1461, 1462, + 1589, 1461, 1462, 1462, 1589, 1461, 1589, 1462, + 1589, 1462, 1589, 1462, 1589, 1461, 1462, 1461, + 1589, 1589, 1462, 1462, 1589, 1462, 1589, 1461, + 1461, 1461, 1589, 1589, 1462, 1589, 1462, 1589, + 1461, 1461, 1589, 1462, 1462, 1589, 1462, 1589, + 1461, 1462, 1589, 1462, 1589, 1461, 1462, 1462, + 1589, 1589, 1461, 1462, 1462, 1462, 1589, 1589, + 1461, 1698, 1699, 1574, 1700, 1461, 1589, 1461, + 1589, 1461, 1589, 1461, 1701, 1461, 1589, 1461, + 1702, 1703, 1704, 1705, 1706, 1707, 1461, 1462, + 1462, 1589, 1589, 1589, 1461, 1461, 1461, 1461, + 1589, 1589, 1461, 1589, 1589, 1461, 1461, 1461, + 1589, 1589, 1589, 1589, 1461, 1708, 1709, 1710, + 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1711, 1461, 1462, 1461, 1712, + 1461, 1713, 1714, 1715, 1717, 1716, 1461, 1589, + 1461, 1461, 1589, 1589, 1462, 1461, 1462, 1461, + 1718, 1461, 1719, 1720, 1721, 1723, 1722, 1461, + 1462, 1461, 1461, 1462, 1462, 1538, 1539, 1540, + 1541, 1542, 1543, 1461, 1538, 1539, 1540, 1541, + 1542, 1543, 1724, 1461, 1725, 1461, 1462, 1461, + 1162, 3, 1, 3, 1162, 3, 1162, 3, + 1, 1162, 1162, 3, 1162, 3, 1162, 3, + 1162, 3, 1162, 3, 1, 3, 3, 1162, + 1162, 3, 1, 1162, 1162, 3, 1, 1162, + 3, 1162, 3, 1, 3, 1162, 3, 1162, + 3, 1, 1162, 3, 1162, 3, 1, 1162, + 3, 1, 1162, 1162, 3, 3, 1162, 3, + 1162, 3, 1162, 1, 1440, 1, 1726, 1440, + 1, 1727, 1435, 1437, 1728, 1437, 601, 1436, + 1, 265, 3, 1, 3, 265, 1, 1, + 1730, 1729, 1733, 1734, 1735, 1736, 1737, 1738, + 1739, 1741, 1742, 1743, 1744, 1745, 1746, 1748, + 1729, 1, 1732, 1740, 1747, 1, 1731, 262, + 264, 1750, 1751, 1752, 1753, 1754, 1755, 1756, + 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, + 1765, 1766, 1767, 1749, 262, 264, 1750, 1751, + 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, + 1760, 1761, 1768, 1763, 1764, 1765, 1769, 1767, + 1749, 256, 258, 1770, 1771, 1772, 1773, 1774, + 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, + 1783, 1784, 1785, 1786, 1787, 1749, 1789, 1790, + 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, + 1799, 1800, 1801, 1803, 268, 530, 576, 1802, + 1788, 527, 529, 1804, 1805, 1806, 1807, 1808, + 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, + 1817, 1818, 1819, 1820, 1821, 1788, 527, 529, + 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, + 1812, 1813, 1814, 1815, 1822, 1817, 1818, 1819, + 1823, 1821, 1788, 521, 523, 1824, 1825, 1826, + 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, + 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1788, + 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, + 1810, 1811, 1812, 1813, 1814, 1842, 1816, 1817, + 1843, 1844, 1845, 1846, 1819, 1820, 1821, 1788, + 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, + 1810, 1811, 1812, 1813, 1814, 1847, 1816, 1817, + 1818, 1848, 1819, 1820, 1821, 1788, 527, 529, + 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, + 1812, 1813, 1814, 1849, 1816, 1817, 1818, 1850, + 1819, 1820, 1821, 1788, 527, 529, 1804, 1805, + 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, + 1814, 1851, 1816, 1817, 1818, 1852, 1819, 1820, + 1821, 1788, 527, 529, 1804, 1805, 1806, 1807, + 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, + 1816, 1817, 1818, 1819, 1853, 1821, 1788, 871, + 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, + 1870, 1871, 1872, 1873, 1874, 1875, 1854, 871, + 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1876, 1867, 1868, 1877, + 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, + 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, + 1865, 1876, 1878, 1868, 1877, 1873, 1879, 1875, + 1854, 865, 867, 1880, 1881, 1882, 1883, 1884, + 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, + 1893, 1894, 1895, 1896, 1897, 1854, 871, 873, + 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, + 1863, 1864, 1865, 1898, 1867, 1868, 1877, 1899, + 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, + 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, + 1865, 1900, 1867, 1868, 1877, 1901, 1873, 1874, + 1875, 1854, 871, 873, 1855, 1856, 1857, 1858, + 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1902, + 1867, 1868, 1877, 1903, 1873, 1874, 1875, 1854, + 1025, 1027, 1905, 1906, 1907, 1908, 1909, 1910, + 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, + 1919, 1920, 1921, 1922, 1904, 1025, 1027, 1905, + 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, + 1914, 1915, 1916, 1923, 1918, 1919, 1920, 1924, + 1922, 1904, 1159, 1161, 1925, 1926, 1927, 1928, + 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, + 1937, 1938, 1939, 1940, 1941, 1942, 1904, 1422, + 1424, 1944, 1945, 1946, 1947, 1948, 1949, 1950, + 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, + 1959, 1960, 1961, 1943, 1323, 1325, 1962, 1963, + 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, + 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, + 1943, 1323, 1325, 1962, 1963, 1964, 1965, 1966, + 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1980, + 1975, 1976, 1977, 1981, 1979, 1943, 1721, 1723, + 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, + 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, + 1999, 2000, 1982, 1721, 1723, 1983, 1984, 1985, + 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, + 1994, 1995, 1996, 1997, 1998, 2001, 2000, 1982, + 1721, 1723, 1983, 1984, 1985, 1986, 1987, 1988, + 1989, 1990, 1991, 1992, 1993, 1994, 2002, 1996, + 1997, 1998, 2003, 2000, 1982, 1715, 1717, 2004, + 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, + 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, + 2021, 1982, +} + +var _graphclust_trans_targs []int16 = []int16{ + 1974, 0, 1974, 1975, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, + 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 66, 68, 70, + 71, 72, 1976, 69, 74, 75, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 93, 94, 96, + 102, 125, 130, 132, 139, 143, 97, 98, + 99, 100, 101, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, + 124, 126, 127, 128, 129, 131, 133, 134, + 135, 136, 137, 138, 140, 141, 142, 144, + 291, 292, 1977, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 210, 211, 212, + 213, 214, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 234, 235, 237, 243, 267, 271, + 273, 280, 284, 238, 239, 240, 241, 242, + 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 268, + 269, 270, 272, 274, 275, 276, 277, 278, + 279, 281, 282, 283, 285, 287, 288, 289, + 145, 290, 146, 294, 295, 296, 2, 297, + 3, 1974, 1978, 1974, 1979, 315, 316, 317, + 318, 319, 320, 321, 322, 323, 324, 325, + 326, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 342, 344, 345, 346, 347, 348, 349, 350, + 351, 352, 353, 354, 355, 356, 357, 358, + 359, 360, 361, 362, 363, 364, 366, 368, + 370, 371, 372, 1980, 369, 374, 375, 377, + 378, 379, 380, 381, 382, 383, 384, 385, + 386, 387, 388, 389, 390, 391, 393, 394, + 396, 402, 425, 430, 432, 439, 443, 397, + 398, 399, 400, 401, 403, 404, 405, 406, + 407, 408, 409, 410, 411, 412, 413, 414, + 415, 416, 417, 418, 419, 420, 421, 422, + 423, 424, 426, 427, 428, 429, 431, 433, + 434, 435, 436, 437, 438, 440, 441, 442, + 444, 591, 592, 1981, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 467, 468, 469, + 470, 471, 472, 473, 474, 475, 476, 477, + 478, 479, 480, 481, 482, 483, 484, 485, + 486, 488, 489, 490, 491, 492, 493, 494, + 495, 496, 497, 498, 499, 500, 501, 502, + 503, 504, 505, 506, 507, 508, 510, 511, + 512, 513, 514, 516, 517, 519, 520, 521, + 522, 523, 524, 525, 526, 527, 528, 529, + 530, 531, 532, 534, 535, 537, 543, 567, + 571, 573, 580, 584, 538, 539, 540, 541, + 542, 544, 545, 546, 547, 548, 549, 550, + 551, 552, 553, 554, 555, 556, 557, 558, + 559, 560, 561, 562, 563, 564, 565, 566, + 568, 569, 570, 572, 574, 575, 576, 577, + 578, 579, 581, 582, 583, 585, 587, 588, + 589, 445, 590, 446, 594, 595, 596, 302, + 597, 303, 599, 605, 606, 608, 610, 613, + 616, 640, 1982, 622, 1983, 612, 1984, 615, + 618, 620, 621, 624, 625, 629, 630, 631, + 632, 633, 634, 635, 1985, 628, 639, 642, + 643, 644, 645, 646, 649, 650, 651, 652, + 653, 654, 655, 656, 660, 661, 663, 664, + 647, 666, 669, 671, 673, 667, 668, 670, + 672, 674, 678, 679, 680, 681, 682, 683, + 684, 685, 686, 687, 1986, 676, 677, 690, + 691, 299, 695, 696, 698, 997, 1000, 1003, + 1027, 1974, 1987, 1974, 1988, 712, 713, 714, + 715, 716, 717, 718, 719, 720, 721, 722, + 723, 724, 725, 726, 727, 728, 729, 730, + 731, 732, 733, 734, 735, 736, 737, 738, + 739, 741, 742, 743, 744, 745, 746, 747, + 748, 749, 750, 751, 752, 753, 754, 755, + 756, 757, 758, 759, 760, 761, 763, 765, + 767, 768, 769, 1989, 766, 771, 772, 774, + 775, 776, 777, 778, 779, 780, 781, 782, + 783, 784, 785, 786, 787, 788, 790, 791, + 793, 799, 822, 827, 829, 836, 840, 794, + 795, 796, 797, 798, 800, 801, 802, 803, + 804, 805, 806, 807, 808, 809, 810, 811, + 812, 813, 814, 815, 816, 817, 818, 819, + 820, 821, 823, 824, 825, 826, 828, 830, + 831, 832, 833, 834, 835, 837, 838, 839, + 841, 988, 989, 1990, 855, 856, 857, 858, + 859, 860, 861, 862, 863, 864, 865, 866, + 867, 868, 869, 870, 871, 872, 873, 874, + 875, 876, 877, 878, 879, 880, 881, 882, + 883, 885, 886, 887, 888, 889, 890, 891, + 892, 893, 894, 895, 896, 897, 898, 899, + 900, 901, 902, 903, 904, 905, 907, 908, + 909, 910, 911, 913, 914, 916, 917, 918, + 919, 920, 921, 922, 923, 924, 925, 926, + 927, 928, 929, 931, 932, 934, 940, 964, + 968, 970, 977, 981, 935, 936, 937, 938, + 939, 941, 942, 943, 944, 945, 946, 947, + 948, 949, 950, 951, 952, 953, 954, 955, + 956, 957, 958, 959, 960, 961, 962, 963, + 965, 966, 967, 969, 971, 972, 973, 974, + 975, 976, 978, 979, 980, 982, 984, 985, + 986, 842, 987, 843, 991, 992, 993, 699, + 994, 700, 1009, 1991, 999, 1992, 1002, 1005, + 1007, 1008, 1011, 1012, 1016, 1017, 1018, 1019, + 1020, 1021, 1022, 1993, 1015, 1026, 1029, 1327, + 1328, 1626, 1627, 1994, 1974, 1995, 1043, 1044, + 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, + 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, + 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, + 1069, 1070, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, + 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1094, + 1095, 1096, 1097, 1098, 1100, 1101, 1103, 1104, + 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, + 1113, 1114, 1115, 1116, 1117, 1119, 1120, 1122, + 1128, 1151, 1156, 1158, 1165, 1123, 1124, 1125, + 1126, 1127, 1129, 1130, 1131, 1132, 1133, 1134, + 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, + 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, + 1152, 1153, 1154, 1155, 1157, 1159, 1160, 1161, + 1162, 1163, 1164, 1166, 1167, 1168, 1170, 1171, + 1172, 1030, 1173, 1031, 1175, 1177, 1178, 1325, + 1326, 1996, 1192, 1193, 1194, 1195, 1196, 1197, + 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, + 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, + 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, + 1239, 1240, 1241, 1242, 1244, 1245, 1246, 1247, + 1248, 1250, 1251, 1253, 1254, 1255, 1256, 1257, + 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, + 1266, 1268, 1269, 1271, 1277, 1301, 1305, 1307, + 1314, 1318, 1272, 1273, 1274, 1275, 1276, 1278, + 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, + 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, + 1295, 1296, 1297, 1298, 1299, 1300, 1302, 1303, + 1304, 1306, 1308, 1309, 1310, 1311, 1312, 1313, + 1315, 1316, 1317, 1319, 1321, 1322, 1323, 1179, + 1324, 1180, 1997, 1974, 1342, 1343, 1344, 1345, + 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, + 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, + 1377, 1513, 1514, 1515, 1516, 1517, 1518, 1519, + 1520, 1521, 1998, 1359, 1360, 1361, 1362, 1363, + 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, + 1372, 1373, 1374, 1375, 1376, 1378, 1379, 1380, + 1381, 1382, 1383, 1384, 1385, 1386, 1388, 1389, + 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, + 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, + 1406, 1407, 1408, 1410, 1412, 1414, 1415, 1416, + 1999, 1413, 1418, 1419, 1421, 1422, 1423, 1424, + 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, + 1433, 1434, 1435, 1437, 1438, 1440, 1446, 1469, + 1474, 1476, 1483, 1487, 1441, 1442, 1443, 1444, + 1445, 1447, 1448, 1449, 1450, 1451, 1452, 1453, + 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, + 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1470, + 1471, 1472, 1473, 1475, 1477, 1478, 1479, 1480, + 1481, 1482, 1484, 1485, 1486, 1488, 1489, 1490, + 1492, 1493, 1494, 1346, 1495, 1347, 1523, 1524, + 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, + 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, + 1541, 1542, 1543, 1545, 1546, 1547, 1548, 1549, + 1551, 1552, 1554, 1555, 1556, 1557, 1558, 1559, + 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, + 1569, 1570, 1572, 1578, 1602, 1606, 1608, 1615, + 1619, 1573, 1574, 1575, 1576, 1577, 1579, 1580, + 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, + 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, + 1597, 1598, 1599, 1600, 1601, 1603, 1604, 1605, + 1607, 1609, 1610, 1611, 1612, 1613, 1614, 1616, + 1617, 1618, 1620, 1622, 1623, 1624, 1329, 1625, + 1330, 1630, 1631, 1632, 1633, 1634, 1635, 1636, + 1637, 1641, 1642, 1643, 1644, 1645, 1647, 1648, + 1628, 1650, 1653, 1655, 1657, 1651, 1652, 1654, + 1656, 1658, 1959, 1960, 1961, 1962, 1963, 1964, + 1965, 1966, 1967, 1968, 2000, 1974, 2001, 1672, + 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, + 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, + 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, + 1697, 1698, 1699, 1701, 1702, 1703, 1704, 1705, + 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, + 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, + 1723, 1725, 1727, 1728, 1729, 2002, 1726, 1731, + 1732, 1734, 1735, 1736, 1737, 1738, 1739, 1740, + 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, + 1750, 1751, 1753, 1759, 1782, 1787, 1789, 1796, + 1800, 1754, 1755, 1756, 1757, 1758, 1760, 1761, + 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, + 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, + 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, + 1788, 1790, 1791, 1792, 1793, 1794, 1795, 1797, + 1798, 1799, 1801, 1948, 1949, 2003, 1815, 1816, + 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, + 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, + 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, + 1841, 1842, 1843, 1845, 1846, 1847, 1848, 1849, + 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, + 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, + 1867, 1868, 1869, 1870, 1871, 1873, 1874, 1876, + 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, + 1885, 1886, 1887, 1888, 1889, 1891, 1892, 1894, + 1900, 1924, 1928, 1930, 1937, 1941, 1895, 1896, + 1897, 1898, 1899, 1901, 1902, 1903, 1904, 1905, + 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, + 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, + 1922, 1923, 1925, 1926, 1927, 1929, 1931, 1932, + 1933, 1934, 1935, 1936, 1938, 1939, 1940, 1942, + 1944, 1945, 1946, 1802, 1947, 1803, 1951, 1952, + 1953, 1659, 1954, 1660, 1957, 1958, 1971, 1972, + 1973, 1974, 1, 1975, 299, 300, 301, 692, + 693, 694, 697, 1028, 1628, 1629, 1638, 1639, + 1640, 1646, 1649, 1969, 1970, 1974, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, + 14, 43, 65, 73, 76, 92, 298, 293, + 67, 95, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 187, 209, 215, + 218, 233, 236, 286, 1974, 600, 601, 602, + 603, 604, 607, 641, 648, 657, 658, 659, + 662, 665, 688, 689, 304, 305, 306, 307, + 308, 309, 310, 311, 312, 313, 314, 343, + 365, 373, 376, 392, 598, 593, 367, 395, + 447, 448, 449, 450, 451, 452, 453, 454, + 455, 456, 457, 487, 509, 515, 518, 533, + 536, 586, 609, 623, 636, 637, 638, 611, + 619, 614, 617, 626, 627, 675, 1974, 701, + 702, 703, 704, 705, 706, 707, 708, 709, + 710, 711, 996, 762, 770, 1010, 1023, 1024, + 1025, 789, 995, 990, 740, 773, 764, 792, + 844, 845, 846, 847, 848, 849, 850, 851, + 852, 853, 854, 884, 906, 912, 915, 930, + 933, 983, 998, 1006, 1001, 1004, 1013, 1014, + 1974, 1032, 1033, 1034, 1035, 1036, 1037, 1038, + 1039, 1040, 1041, 1042, 1071, 1174, 1099, 1102, + 1118, 1176, 1169, 1093, 1121, 1181, 1182, 1183, + 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, + 1221, 1243, 1249, 1252, 1267, 1270, 1320, 1974, + 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, + 1339, 1340, 1341, 1522, 1544, 1550, 1553, 1568, + 1571, 1621, 1348, 1349, 1350, 1351, 1352, 1353, + 1354, 1355, 1356, 1357, 1358, 1387, 1409, 1417, + 1420, 1436, 1496, 1491, 1411, 1439, 1974, 1661, + 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, + 1670, 1671, 1700, 1722, 1730, 1733, 1749, 1956, + 1950, 1955, 1724, 1752, 1804, 1805, 1806, 1807, + 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1844, + 1866, 1872, 1875, 1890, 1893, 1943, +} + +var _graphclust_trans_actions []byte = []byte{ + 31, 0, 27, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 34, 40, 25, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 40, 0, 40, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 29, 51, 17, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 51, 0, 51, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 40, 21, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 40, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 19, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 40, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 23, 40, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 43, 1, 47, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 15, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 13, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 9, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 7, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 11, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _graphclust_to_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 37, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_from_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_eof_trans []int16 = []int16{ + 0, 0, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 0, 0, 0, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 0, 0, 0, 0, + 0, 0, 610, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 610, 612, 612, + 610, 612, 612, 610, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 610, 612, + 612, 612, 612, 0, 0, 0, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 0, + 0, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1750, + 1750, 1750, 1789, 1789, 1789, 1789, 1789, 1789, + 1789, 1789, 1789, 1855, 1855, 1855, 1855, 1855, + 1855, 1855, 1905, 1905, 1905, 1944, 1944, 1944, + 1983, 1983, 1983, 1983, +} + +const graphclust_start int = 1974 +const graphclust_first_final int = 1974 +const graphclust_error int = 0 + +const graphclust_en_main int = 1974 + + +// line 14 "grapheme_clusters.rl" + + +var Error = errors.New("invalid UTF8 text") + +// ScanGraphemeClusters is a split function for bufio.Scanner that splits +// on grapheme cluster boundaries. +func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { + if len(data) == 0 { + return 0, nil, nil + } + + // Ragel state + cs := 0 // Current State + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + startPos := 0 + endPos := 0 + + +// line 4976 "grapheme_clusters.go" + { + cs = graphclust_start + ts = 0 + te = 0 + act = 0 + } + +// line 4984 "grapheme_clusters.go" + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } +_resume: + _acts = int(_graphclust_from_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts - 1] { + case 4: +// line 1 "NONE" + +ts = p + +// line 5008 "grapheme_clusters.go" + } + } + + _keys = int(_graphclust_key_offsets[cs]) + _trans = int(_graphclust_index_offsets[cs]) + + _klen = int(_graphclust_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _graphclust_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_graphclust_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _graphclust_trans_keys[_mid + 1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + +_match: + _trans = int(_graphclust_indicies[_trans]) +_eof_trans: + cs = int(_graphclust_trans_targs[_trans]) + + if _graphclust_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_graphclust_trans_actions[_trans]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 0: +// line 46 "grapheme_clusters.rl" + + + startPos = p + + case 1: +// line 50 "grapheme_clusters.rl" + + + endPos = p + + case 5: +// line 1 "NONE" + +te = p+1 + + case 6: +// line 54 "grapheme_clusters.rl" + +act = 3; + case 7: +// line 54 "grapheme_clusters.rl" + +te = p+1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 8: +// line 54 "grapheme_clusters.rl" + +te = p+1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 9: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 10: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 11: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 12: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 13: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 14: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 15: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 16: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 17: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 18: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 19: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 20: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 21: +// line 1 "NONE" + + switch act { + case 0: + {cs = 0 +goto _again +} + case 3: + {p = (te) - 1 + + return endPos+1, data[startPos:endPos+1], nil + } + } + +// line 5218 "grapheme_clusters.go" + } + } + +_again: + _acts = int(_graphclust_to_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 2: +// line 1 "NONE" + +ts = 0 + + case 3: +// line 1 "NONE" + +act = 0 + +// line 5238 "grapheme_clusters.go" + } + } + + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: {} + if p == eof { + if _graphclust_eof_trans[cs] > 0 { + _trans = int(_graphclust_eof_trans[cs] - 1) + goto _eof_trans + } + } + + _out: {} + } + +// line 116 "grapheme_clusters.rl" + + + // If we fall out here then we were unable to complete a sequence. + // If we weren't able to complete a sequence then either we've + // reached the end of a partial buffer (so there's more data to come) + // or we have an isolated symbol that would normally be part of a + // grapheme cluster but has appeared in isolation here. + + if !atEOF { + // Request more + return 0, nil, nil + } + + // Just take the first UTF-8 sequence and return that. + _, seqLen := utf8.DecodeRune(data) + return seqLen, data[:seqLen], nil +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl new file mode 100644 index 00000000..003ffbf5 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl @@ -0,0 +1,132 @@ +package textseg + +import ( + "errors" + "unicode/utf8" +) + +// Generated from grapheme_clusters.rl. DO NOT EDIT +%%{ + # (except you are actually in grapheme_clusters.rl here, so edit away!) + + machine graphclust; + write data; +}%% + +var Error = errors.New("invalid UTF8 text") + +// ScanGraphemeClusters is a split function for bufio.Scanner that splits +// on grapheme cluster boundaries. +func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { + if len(data) == 0 { + return 0, nil, nil + } + + // Ragel state + cs := 0 // Current State + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + startPos := 0 + endPos := 0 + + %%{ + include GraphemeCluster "grapheme_clusters_table.rl"; + + action start { + startPos = p + } + + action end { + endPos = p + } + + action emit { + return endPos+1, data[startPos:endPos+1], nil + } + + ZWJGlue = ZWJ (Glue_After_Zwj | E_Base_GAZ Extend* E_Modifier?)?; + AnyExtender = Extend | ZWJGlue | SpacingMark; + Extension = AnyExtender*; + ReplacementChar = (0xEF 0xBF 0xBD); + + CRLFSeq = CR LF; + ControlSeq = Control | ReplacementChar; + HangulSeq = ( + L+ (((LV? V+ | LVT) T*)?|LV?) | + LV V* T* | + V+ T* | + LVT T* | + T+ + ) Extension; + EmojiSeq = (E_Base | E_Base_GAZ) Extend* E_Modifier? Extension; + ZWJSeq = ZWJGlue Extension; + EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension; + + UTF8Cont = 0x80 .. 0xBF; + AnyUTF8 = ( + 0x00..0x7F | + 0xC0..0xDF . UTF8Cont | + 0xE0..0xEF . UTF8Cont . UTF8Cont | + 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont + ); + + # OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension + OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|E_Base|E_Base_GAZ|ZWJ|Regional_Indicator|Prepend)) Extension; + + # PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break + PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?; + + CRLFTok = CRLFSeq >start @end; + ControlTok = ControlSeq >start @end; + HangulTok = HangulSeq >start @end; + EmojiTok = EmojiSeq >start @end; + ZWJTok = ZWJSeq >start @end; + EmojiFlagTok = EmojiFlagSeq >start @end; + OtherTok = OtherSeq >start @end; + PrependTok = PrependSeq >start @end; + + main := |* + CRLFTok => emit; + ControlTok => emit; + HangulTok => emit; + EmojiTok => emit; + ZWJTok => emit; + EmojiFlagTok => emit; + PrependTok => emit; + OtherTok => emit; + + # any single valid UTF-8 character would also be valid per spec, + # but we'll handle that separately after the loop so we can deal + # with requesting more bytes if we're not at EOF. + *|; + + write init; + write exec; + }%% + + // If we fall out here then we were unable to complete a sequence. + // If we weren't able to complete a sequence then either we've + // reached the end of a partial buffer (so there's more data to come) + // or we have an isolated symbol that would normally be part of a + // grapheme cluster but has appeared in isolation here. + + if !atEOF { + // Request more + return 0, nil, nil + } + + // Just take the first UTF-8 sequence and return that. + _, seqLen := utf8.DecodeRune(data) + return seqLen, data[:seqLen], nil +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl new file mode 100644 index 00000000..fb451182 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl @@ -0,0 +1,1583 @@ +# The following Ragel file was autogenerated with unicode2ragel.rb +# from: http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt +# +# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "E_Base", "E_Modifier", "ZWJ", "Glue_After_Zwj", "E_Base_GAZ"]. +# +# To use this, make sure that your alphtype is set to byte, +# and that your input is in utf8. + +%%{ + machine GraphemeCluster; + + Prepend = + 0xD8 0x80..0x85 #Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER ... + | 0xDB 0x9D #Cf ARABIC END OF AYAH + | 0xDC 0x8F #Cf SYRIAC ABBREVIATION MARK + | 0xE0 0xA3 0xA2 #Cf ARABIC DISPUTED END OF AYAH + | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH + | 0xF0 0x91 0x82 0xBD #Cf KAITHI NUMBER SIGN + | 0xF0 0x91 0x87 0x82..0x83 #Lo [2] SHARADA SIGN JIHVAMULIYA..SHARA... + ; + + CR = + 0x0D #Cc + ; + + LF = + 0x0A #Cc + ; + + Control = + 0x00..0x09 #Cc [10] .. + | 0x0B..0x0C #Cc [2] .. + | 0x0E..0x1F #Cc [18] .. + | 0x7F #Cc [33] .. + | 0xC2 0x80..0x9F # + | 0xC2 0xAD #Cf SOFT HYPHEN + | 0xD8 0x9C #Cf ARABIC LETTER MARK + | 0xE1 0xA0 0x8E #Cf MONGOLIAN VOWEL SEPARATOR + | 0xE2 0x80 0x8B #Cf ZERO WIDTH SPACE + | 0xE2 0x80 0x8E..0x8F #Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT ... + | 0xE2 0x80 0xA8 #Zl LINE SEPARATOR + | 0xE2 0x80 0xA9 #Zp PARAGRAPH SEPARATOR + | 0xE2 0x80 0xAA..0xAE #Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-... + | 0xE2 0x81 0xA0..0xA4 #Cf [5] WORD JOINER..INVISIBLE PLUS + | 0xE2 0x81 0xA5 #Cn + | 0xE2 0x81 0xA6..0xAF #Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIG... + | 0xED 0xA0 0x80..0xFF #Cs [2048] .... + | 0xEF 0xBF 0xB9..0xBB #Cf [3] INTERLINEAR ANNOTATION ANCHOR..INT... + | 0xF0 0x9B 0xB2 0xA0..0xA3 #Cf [4] SHORTHAND FORMAT LETTER OVERLAP... + | 0xF0 0x9D 0x85 0xB3..0xBA #Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSI... + | 0xF3 0xA0 0x80 0x80 #Cn + | 0xF3 0xA0 0x80 0x81 #Cf LANGUAGE TAG + | 0xF3 0xA0 0x80 0x82..0x9F #Cn [30] .. + | 0xF3 0xA0 0x82 0x80..0xFF #Cn [128] .. + | 0xF3 0xA0 0x83 0x00..0xBF # + | 0xF3 0xA0 0x87 0xB0..0xFF #Cn [3600] .. 0 { + line = line[0:commentStart] + } + pieces := strings.Split(line, sep) + if len(pieces) != 2 { + log.Printf("unexpected %d pieces in %s", len(pieces), line) + return + } + + propertyName := strings.TrimSpace(pieces[1]) + + rangeTable, ok := propertyRanges[propertyName] + if !ok { + rangeTable = &unicode.RangeTable{ + LatinOffset: 0, + } + propertyRanges[propertyName] = rangeTable + } + + codepointRange := strings.TrimSpace(pieces[0]) + rngeIndex := strings.Index(codepointRange, rnge) + + if rngeIndex < 0 { + // single codepoint, not range + codepointInt, err := strconv.ParseUint(codepointRange, 16, 64) + if err != nil { + log.Printf("error parsing int: %v", err) + return + } + if codepointInt < 0x10000 { + r16 := unicode.Range16{ + Lo: uint16(codepointInt), + Hi: uint16(codepointInt), + Stride: 1, + } + addR16ToTable(rangeTable, r16) + } else { + r32 := unicode.Range32{ + Lo: uint32(codepointInt), + Hi: uint32(codepointInt), + Stride: 1, + } + addR32ToTable(rangeTable, r32) + } + } else { + rngeStart := codepointRange[0:rngeIndex] + rngeEnd := codepointRange[rngeIndex+2:] + rngeStartInt, err := strconv.ParseUint(rngeStart, 16, 64) + if err != nil { + log.Printf("error parsing int: %v", err) + return + } + rngeEndInt, err := strconv.ParseUint(rngeEnd, 16, 64) + if err != nil { + log.Printf("error parsing int: %v", err) + return + } + if rngeStartInt < 0x10000 && rngeEndInt < 0x10000 { + r16 := unicode.Range16{ + Lo: uint16(rngeStartInt), + Hi: uint16(rngeEndInt), + Stride: 1, + } + addR16ToTable(rangeTable, r16) + } else if rngeStartInt >= 0x10000 && rngeEndInt >= 0x10000 { + r32 := unicode.Range32{ + Lo: uint32(rngeStartInt), + Hi: uint32(rngeEndInt), + Stride: 1, + } + addR32ToTable(rangeTable, r32) + } else { + log.Printf("unexpected range") + } + } +} + +func addR16ToTable(r *unicode.RangeTable, r16 unicode.Range16) { + if r.R16 == nil { + r.R16 = make([]unicode.Range16, 0, 1) + } + r.R16 = append(r.R16, r16) + if r16.Hi <= unicode.MaxLatin1 { + r.LatinOffset++ + } +} + +func addR32ToTable(r *unicode.RangeTable, r32 unicode.Range32) { + if r.R32 == nil { + r.R32 = make([]unicode.Range32, 0, 1) + } + r.R32 = append(r.R32, r32) +} + +func generateTables(prefix string, propertyRanges map[string]*unicode.RangeTable) { + prNames := make([]string, 0, len(propertyRanges)) + for k := range propertyRanges { + prNames = append(prNames, k) + } + sort.Strings(prNames) + for _, key := range prNames { + rt := propertyRanges[key] + fmt.Fprintf(output, "var _%s%s = %s\n", prefix, key, generateRangeTable(rt)) + } + fmt.Fprintf(output, "type _%sRuneRange unicode.RangeTable\n", prefix) + + fmt.Fprintf(output, "func _%sRuneType(r rune) *_%sRuneRange {\n", prefix, prefix) + fmt.Fprintf(output, "\tswitch {\n") + for _, key := range prNames { + fmt.Fprintf(output, "\tcase unicode.Is(_%s%s, r):\n\t\treturn (*_%sRuneRange)(_%s%s)\n", prefix, key, prefix, prefix, key) + } + fmt.Fprintf(output, "\tdefault:\n\t\treturn nil\n") + fmt.Fprintf(output, "\t}\n") + fmt.Fprintf(output, "}\n") + + fmt.Fprintf(output, "func (rng *_%sRuneRange) String() string {\n", prefix) + fmt.Fprintf(output, "\tswitch (*unicode.RangeTable)(rng) {\n") + for _, key := range prNames { + fmt.Fprintf(output, "\tcase _%s%s:\n\t\treturn %q\n", prefix, key, key) + } + fmt.Fprintf(output, "\tdefault:\n\t\treturn \"Other\"\n") + fmt.Fprintf(output, "\t}\n") + fmt.Fprintf(output, "}\n") +} + +func generateRangeTable(rt *unicode.RangeTable) string { + rv := "&unicode.RangeTable{\n" + if rt.R16 != nil { + rv += "\tR16: []unicode.Range16{\n" + for _, r16 := range rt.R16 { + rv += fmt.Sprintf("\t\t%#v,\n", r16) + } + rv += "\t},\n" + } + if rt.R32 != nil { + rv += "\tR32: []unicode.Range32{\n" + for _, r32 := range rt.R32 { + rv += fmt.Sprintf("\t\t%#v,\n", r32) + } + rv += "\t},\n" + } + rv += fmt.Sprintf("\t\tLatinOffset: %d,\n", rt.LatinOffset) + rv += "}\n" + return rv +} + +const fileHeader = `// Generated by running +// maketables --url=%s +// DO NOT EDIT + +package textseg + +import( + "unicode" +) +` + +func setupOutput() { + output = bufio.NewWriter(startGofmt()) +} + +// startGofmt connects output to a gofmt process if -output is set. +func startGofmt() io.Writer { + if *outputFile == "" { + return os.Stdout + } + stdout, err := os.Create(*outputFile) + if err != nil { + log.Fatal(err) + } + // Pipe output to gofmt. + gofmt := exec.Command("gofmt") + fd, err := gofmt.StdinPipe() + if err != nil { + log.Fatal(err) + } + gofmt.Stdout = stdout + gofmt.Stderr = os.Stderr + err = gofmt.Start() + if err != nil { + log.Fatal(err) + } + return fd +} + +func flushOutput() { + err := output.Flush() + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go b/vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go new file mode 100644 index 00000000..ac420026 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/make_test_tables.go @@ -0,0 +1,212 @@ +// Copyright (c) 2014 Couchbase, Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file +// except in compliance with the License. You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software distributed under the +// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +// either express or implied. See the License for the specific language governing permissions +// and limitations under the License. + +// +build ignore + +package main + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "io" + "log" + "net/http" + "os" + "os/exec" + "strconv" + "strings" + "unicode" +) + +var url = flag.String("url", + "http://www.unicode.org/Public/"+unicode.Version+"/ucd/auxiliary/", + "URL of Unicode database directory") +var verbose = flag.Bool("verbose", + false, + "write data to stdout as it is parsed") +var localFiles = flag.Bool("local", + false, + "data files have been copied to the current directory; for debugging only") + +var outputFile = flag.String("output", + "", + "output file for generated tables; default stdout") + +var output *bufio.Writer + +func main() { + flag.Parse() + setupOutput() + + graphemeTests := make([]test, 0) + graphemeTests = loadUnicodeData("GraphemeBreakTest.txt", graphemeTests) + wordTests := make([]test, 0) + wordTests = loadUnicodeData("WordBreakTest.txt", wordTests) + sentenceTests := make([]test, 0) + sentenceTests = loadUnicodeData("SentenceBreakTest.txt", sentenceTests) + + fmt.Fprintf(output, fileHeader, *url) + generateTestTables("Grapheme", graphemeTests) + generateTestTables("Word", wordTests) + generateTestTables("Sentence", sentenceTests) + + flushOutput() +} + +// WordBreakProperty.txt has the form: +// 05F0..05F2 ; Hebrew_Letter # Lo [3] HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW LIGATURE YIDDISH DOUBLE YOD +// FB1D ; Hebrew_Letter # Lo HEBREW LETTER YOD WITH HIRIQ +func openReader(file string) (input io.ReadCloser) { + if *localFiles { + f, err := os.Open(file) + if err != nil { + log.Fatal(err) + } + input = f + } else { + path := *url + file + resp, err := http.Get(path) + if err != nil { + log.Fatal(err) + } + if resp.StatusCode != 200 { + log.Fatal("bad GET status for "+file, resp.Status) + } + input = resp.Body + } + return +} + +func loadUnicodeData(filename string, tests []test) []test { + f := openReader(filename) + defer f.Close() + bufioReader := bufio.NewReader(f) + line, err := bufioReader.ReadString('\n') + for err == nil { + tests = parseLine(line, tests) + line, err = bufioReader.ReadString('\n') + } + // if the err was EOF still need to process last value + if err == io.EOF { + tests = parseLine(line, tests) + } + return tests +} + +const comment = "#" +const brk = "÷" +const nbrk = "×" + +type test [][]byte + +func parseLine(line string, tests []test) []test { + if strings.HasPrefix(line, comment) { + return tests + } + line = strings.TrimSpace(line) + if len(line) == 0 { + return tests + } + commentStart := strings.Index(line, comment) + if commentStart > 0 { + line = line[0:commentStart] + } + pieces := strings.Split(line, brk) + t := make(test, 0) + for _, piece := range pieces { + piece = strings.TrimSpace(piece) + if len(piece) > 0 { + codePoints := strings.Split(piece, nbrk) + word := "" + for _, codePoint := range codePoints { + codePoint = strings.TrimSpace(codePoint) + r, err := strconv.ParseInt(codePoint, 16, 64) + if err != nil { + log.Printf("err: %v for '%s'", err, string(r)) + return tests + } + + word += string(r) + } + t = append(t, []byte(word)) + } + } + tests = append(tests, t) + return tests +} + +func generateTestTables(prefix string, tests []test) { + fmt.Fprintf(output, testHeader, prefix) + for _, t := range tests { + fmt.Fprintf(output, "\t\t{\n") + fmt.Fprintf(output, "\t\t\tinput: %#v,\n", bytes.Join(t, []byte{})) + fmt.Fprintf(output, "\t\t\toutput: %s,\n", generateTest(t)) + fmt.Fprintf(output, "\t\t},\n") + } + fmt.Fprintf(output, "}\n") +} + +func generateTest(t test) string { + rv := "[][]byte{" + for _, te := range t { + rv += fmt.Sprintf("%#v,", te) + } + rv += "}" + return rv +} + +const fileHeader = `// Generated by running +// maketesttables --url=%s +// DO NOT EDIT + +package textseg +` + +const testHeader = `var unicode%sTests = []struct { + input []byte + output [][]byte + }{ +` + +func setupOutput() { + output = bufio.NewWriter(startGofmt()) +} + +// startGofmt connects output to a gofmt process if -output is set. +func startGofmt() io.Writer { + if *outputFile == "" { + return os.Stdout + } + stdout, err := os.Create(*outputFile) + if err != nil { + log.Fatal(err) + } + // Pipe output to gofmt. + gofmt := exec.Command("gofmt") + fd, err := gofmt.StdinPipe() + if err != nil { + log.Fatal(err) + } + gofmt.Stdout = stdout + gofmt.Stderr = os.Stderr + err = gofmt.Start() + if err != nil { + log.Fatal(err) + } + return fd +} + +func flushOutput() { + err := output.Flush() + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/tables.go b/vendor/github.com/apparentlymart/go-textseg/textseg/tables.go new file mode 100644 index 00000000..fab7e842 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/tables.go @@ -0,0 +1,5700 @@ +// Generated by running +// maketables --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/ +// DO NOT EDIT + +package textseg + +import ( + "unicode" +) + +var _GraphemeCR = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xd, Hi: 0xd, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _GraphemeControl = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x0, Hi: 0x9, Stride: 0x1}, + unicode.Range16{Lo: 0xb, Hi: 0xc, Stride: 0x1}, + unicode.Range16{Lo: 0xe, Hi: 0x1f, Stride: 0x1}, + unicode.Range16{Lo: 0x7f, Hi: 0x9f, Stride: 0x1}, + unicode.Range16{Lo: 0xad, Hi: 0xad, Stride: 0x1}, + unicode.Range16{Lo: 0x61c, Hi: 0x61c, Stride: 0x1}, + unicode.Range16{Lo: 0x180e, Hi: 0x180e, Stride: 0x1}, + unicode.Range16{Lo: 0x200b, Hi: 0x200b, Stride: 0x1}, + unicode.Range16{Lo: 0x200e, Hi: 0x200f, Stride: 0x1}, + unicode.Range16{Lo: 0x2028, Hi: 0x2028, Stride: 0x1}, + unicode.Range16{Lo: 0x2029, Hi: 0x2029, Stride: 0x1}, + unicode.Range16{Lo: 0x202a, Hi: 0x202e, Stride: 0x1}, + unicode.Range16{Lo: 0x2060, Hi: 0x2064, Stride: 0x1}, + unicode.Range16{Lo: 0x2065, Hi: 0x2065, Stride: 0x1}, + unicode.Range16{Lo: 0x2066, Hi: 0x206f, Stride: 0x1}, + unicode.Range16{Lo: 0xd800, Hi: 0xdfff, Stride: 0x1}, + unicode.Range16{Lo: 0xfeff, Hi: 0xfeff, Stride: 0x1}, + unicode.Range16{Lo: 0xfff0, Hi: 0xfff8, Stride: 0x1}, + unicode.Range16{Lo: 0xfff9, Hi: 0xfffb, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1bca0, Hi: 0x1bca3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d173, Hi: 0x1d17a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0000, Hi: 0xe0000, Stride: 0x1}, + unicode.Range32{Lo: 0xe0001, Hi: 0xe0001, Stride: 0x1}, + unicode.Range32{Lo: 0xe0002, Hi: 0xe001f, Stride: 0x1}, + unicode.Range32{Lo: 0xe0080, Hi: 0xe00ff, Stride: 0x1}, + unicode.Range32{Lo: 0xe01f0, Hi: 0xe0fff, Stride: 0x1}, + }, + LatinOffset: 5, +} + +var _GraphemeE_Base = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x261d, Hi: 0x261d, Stride: 0x1}, + unicode.Range16{Lo: 0x26f9, Hi: 0x26f9, Stride: 0x1}, + unicode.Range16{Lo: 0x270a, Hi: 0x270d, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f385, Hi: 0x1f385, Stride: 0x1}, + unicode.Range32{Lo: 0x1f3c3, Hi: 0x1f3c4, Stride: 0x1}, + unicode.Range32{Lo: 0x1f3ca, Hi: 0x1f3cb, Stride: 0x1}, + unicode.Range32{Lo: 0x1f442, Hi: 0x1f443, Stride: 0x1}, + unicode.Range32{Lo: 0x1f446, Hi: 0x1f450, Stride: 0x1}, + unicode.Range32{Lo: 0x1f46e, Hi: 0x1f46e, Stride: 0x1}, + unicode.Range32{Lo: 0x1f470, Hi: 0x1f478, Stride: 0x1}, + unicode.Range32{Lo: 0x1f47c, Hi: 0x1f47c, Stride: 0x1}, + unicode.Range32{Lo: 0x1f481, Hi: 0x1f483, Stride: 0x1}, + unicode.Range32{Lo: 0x1f485, Hi: 0x1f487, Stride: 0x1}, + unicode.Range32{Lo: 0x1f4aa, Hi: 0x1f4aa, Stride: 0x1}, + unicode.Range32{Lo: 0x1f575, Hi: 0x1f575, Stride: 0x1}, + unicode.Range32{Lo: 0x1f57a, Hi: 0x1f57a, Stride: 0x1}, + unicode.Range32{Lo: 0x1f590, Hi: 0x1f590, Stride: 0x1}, + unicode.Range32{Lo: 0x1f595, Hi: 0x1f596, Stride: 0x1}, + unicode.Range32{Lo: 0x1f645, Hi: 0x1f647, Stride: 0x1}, + unicode.Range32{Lo: 0x1f64b, Hi: 0x1f64f, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6a3, Hi: 0x1f6a3, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6b4, Hi: 0x1f6b6, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6c0, Hi: 0x1f6c0, Stride: 0x1}, + unicode.Range32{Lo: 0x1f918, Hi: 0x1f91e, Stride: 0x1}, + unicode.Range32{Lo: 0x1f926, Hi: 0x1f926, Stride: 0x1}, + unicode.Range32{Lo: 0x1f930, Hi: 0x1f930, Stride: 0x1}, + unicode.Range32{Lo: 0x1f933, Hi: 0x1f939, Stride: 0x1}, + unicode.Range32{Lo: 0x1f93c, Hi: 0x1f93e, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeE_Base_GAZ = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f466, Hi: 0x1f469, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeE_Modifier = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f3fb, Hi: 0x1f3ff, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeExtend = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x300, Hi: 0x36f, Stride: 0x1}, + unicode.Range16{Lo: 0x483, Hi: 0x487, Stride: 0x1}, + unicode.Range16{Lo: 0x488, Hi: 0x489, Stride: 0x1}, + unicode.Range16{Lo: 0x591, Hi: 0x5bd, Stride: 0x1}, + unicode.Range16{Lo: 0x5bf, Hi: 0x5bf, Stride: 0x1}, + unicode.Range16{Lo: 0x5c1, Hi: 0x5c2, Stride: 0x1}, + unicode.Range16{Lo: 0x5c4, Hi: 0x5c5, Stride: 0x1}, + unicode.Range16{Lo: 0x5c7, Hi: 0x5c7, Stride: 0x1}, + unicode.Range16{Lo: 0x610, Hi: 0x61a, Stride: 0x1}, + unicode.Range16{Lo: 0x64b, Hi: 0x65f, Stride: 0x1}, + unicode.Range16{Lo: 0x670, Hi: 0x670, Stride: 0x1}, + unicode.Range16{Lo: 0x6d6, Hi: 0x6dc, Stride: 0x1}, + unicode.Range16{Lo: 0x6df, Hi: 0x6e4, Stride: 0x1}, + unicode.Range16{Lo: 0x6e7, Hi: 0x6e8, Stride: 0x1}, + unicode.Range16{Lo: 0x6ea, Hi: 0x6ed, Stride: 0x1}, + unicode.Range16{Lo: 0x711, Hi: 0x711, Stride: 0x1}, + unicode.Range16{Lo: 0x730, Hi: 0x74a, Stride: 0x1}, + unicode.Range16{Lo: 0x7a6, Hi: 0x7b0, Stride: 0x1}, + unicode.Range16{Lo: 0x7eb, Hi: 0x7f3, Stride: 0x1}, + unicode.Range16{Lo: 0x816, Hi: 0x819, Stride: 0x1}, + unicode.Range16{Lo: 0x81b, Hi: 0x823, Stride: 0x1}, + unicode.Range16{Lo: 0x825, Hi: 0x827, Stride: 0x1}, + unicode.Range16{Lo: 0x829, Hi: 0x82d, Stride: 0x1}, + unicode.Range16{Lo: 0x859, Hi: 0x85b, Stride: 0x1}, + unicode.Range16{Lo: 0x8d4, Hi: 0x8e1, Stride: 0x1}, + unicode.Range16{Lo: 0x8e3, Hi: 0x902, Stride: 0x1}, + unicode.Range16{Lo: 0x93a, Hi: 0x93a, Stride: 0x1}, + unicode.Range16{Lo: 0x93c, Hi: 0x93c, Stride: 0x1}, + unicode.Range16{Lo: 0x941, Hi: 0x948, Stride: 0x1}, + unicode.Range16{Lo: 0x94d, Hi: 0x94d, Stride: 0x1}, + unicode.Range16{Lo: 0x951, Hi: 0x957, Stride: 0x1}, + unicode.Range16{Lo: 0x962, Hi: 0x963, Stride: 0x1}, + unicode.Range16{Lo: 0x981, Hi: 0x981, Stride: 0x1}, + unicode.Range16{Lo: 0x9bc, Hi: 0x9bc, Stride: 0x1}, + unicode.Range16{Lo: 0x9be, Hi: 0x9be, Stride: 0x1}, + unicode.Range16{Lo: 0x9c1, Hi: 0x9c4, Stride: 0x1}, + unicode.Range16{Lo: 0x9cd, Hi: 0x9cd, Stride: 0x1}, + unicode.Range16{Lo: 0x9d7, Hi: 0x9d7, Stride: 0x1}, + unicode.Range16{Lo: 0x9e2, Hi: 0x9e3, Stride: 0x1}, + unicode.Range16{Lo: 0xa01, Hi: 0xa02, Stride: 0x1}, + unicode.Range16{Lo: 0xa3c, Hi: 0xa3c, Stride: 0x1}, + unicode.Range16{Lo: 0xa41, Hi: 0xa42, Stride: 0x1}, + unicode.Range16{Lo: 0xa47, Hi: 0xa48, Stride: 0x1}, + unicode.Range16{Lo: 0xa4b, Hi: 0xa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xa51, Hi: 0xa51, Stride: 0x1}, + unicode.Range16{Lo: 0xa70, Hi: 0xa71, Stride: 0x1}, + unicode.Range16{Lo: 0xa75, Hi: 0xa75, Stride: 0x1}, + unicode.Range16{Lo: 0xa81, Hi: 0xa82, Stride: 0x1}, + unicode.Range16{Lo: 0xabc, Hi: 0xabc, Stride: 0x1}, + unicode.Range16{Lo: 0xac1, Hi: 0xac5, Stride: 0x1}, + unicode.Range16{Lo: 0xac7, Hi: 0xac8, Stride: 0x1}, + unicode.Range16{Lo: 0xacd, Hi: 0xacd, Stride: 0x1}, + unicode.Range16{Lo: 0xae2, Hi: 0xae3, Stride: 0x1}, + unicode.Range16{Lo: 0xb01, Hi: 0xb01, Stride: 0x1}, + unicode.Range16{Lo: 0xb3c, Hi: 0xb3c, Stride: 0x1}, + unicode.Range16{Lo: 0xb3e, Hi: 0xb3e, Stride: 0x1}, + unicode.Range16{Lo: 0xb3f, Hi: 0xb3f, Stride: 0x1}, + unicode.Range16{Lo: 0xb41, Hi: 0xb44, Stride: 0x1}, + unicode.Range16{Lo: 0xb4d, Hi: 0xb4d, Stride: 0x1}, + unicode.Range16{Lo: 0xb56, Hi: 0xb56, Stride: 0x1}, + unicode.Range16{Lo: 0xb57, Hi: 0xb57, Stride: 0x1}, + unicode.Range16{Lo: 0xb62, Hi: 0xb63, Stride: 0x1}, + unicode.Range16{Lo: 0xb82, Hi: 0xb82, Stride: 0x1}, + unicode.Range16{Lo: 0xbbe, Hi: 0xbbe, Stride: 0x1}, + unicode.Range16{Lo: 0xbc0, Hi: 0xbc0, Stride: 0x1}, + unicode.Range16{Lo: 0xbcd, Hi: 0xbcd, Stride: 0x1}, + unicode.Range16{Lo: 0xbd7, Hi: 0xbd7, Stride: 0x1}, + unicode.Range16{Lo: 0xc00, Hi: 0xc00, Stride: 0x1}, + unicode.Range16{Lo: 0xc3e, Hi: 0xc40, Stride: 0x1}, + unicode.Range16{Lo: 0xc46, Hi: 0xc48, Stride: 0x1}, + unicode.Range16{Lo: 0xc4a, Hi: 0xc4d, Stride: 0x1}, + unicode.Range16{Lo: 0xc55, Hi: 0xc56, Stride: 0x1}, + unicode.Range16{Lo: 0xc62, Hi: 0xc63, Stride: 0x1}, + unicode.Range16{Lo: 0xc81, Hi: 0xc81, Stride: 0x1}, + unicode.Range16{Lo: 0xcbc, Hi: 0xcbc, Stride: 0x1}, + unicode.Range16{Lo: 0xcbf, Hi: 0xcbf, Stride: 0x1}, + unicode.Range16{Lo: 0xcc2, Hi: 0xcc2, Stride: 0x1}, + unicode.Range16{Lo: 0xcc6, Hi: 0xcc6, Stride: 0x1}, + unicode.Range16{Lo: 0xccc, Hi: 0xccd, Stride: 0x1}, + unicode.Range16{Lo: 0xcd5, Hi: 0xcd6, Stride: 0x1}, + unicode.Range16{Lo: 0xce2, Hi: 0xce3, Stride: 0x1}, + unicode.Range16{Lo: 0xd01, Hi: 0xd01, Stride: 0x1}, + unicode.Range16{Lo: 0xd3e, Hi: 0xd3e, Stride: 0x1}, + unicode.Range16{Lo: 0xd41, Hi: 0xd44, Stride: 0x1}, + unicode.Range16{Lo: 0xd4d, Hi: 0xd4d, Stride: 0x1}, + unicode.Range16{Lo: 0xd57, Hi: 0xd57, Stride: 0x1}, + unicode.Range16{Lo: 0xd62, Hi: 0xd63, Stride: 0x1}, + unicode.Range16{Lo: 0xdca, Hi: 0xdca, Stride: 0x1}, + unicode.Range16{Lo: 0xdcf, Hi: 0xdcf, Stride: 0x1}, + unicode.Range16{Lo: 0xdd2, Hi: 0xdd4, Stride: 0x1}, + unicode.Range16{Lo: 0xdd6, Hi: 0xdd6, Stride: 0x1}, + unicode.Range16{Lo: 0xddf, Hi: 0xddf, Stride: 0x1}, + unicode.Range16{Lo: 0xe31, Hi: 0xe31, Stride: 0x1}, + unicode.Range16{Lo: 0xe34, Hi: 0xe3a, Stride: 0x1}, + unicode.Range16{Lo: 0xe47, Hi: 0xe4e, Stride: 0x1}, + unicode.Range16{Lo: 0xeb1, Hi: 0xeb1, Stride: 0x1}, + unicode.Range16{Lo: 0xeb4, Hi: 0xeb9, Stride: 0x1}, + unicode.Range16{Lo: 0xebb, Hi: 0xebc, Stride: 0x1}, + unicode.Range16{Lo: 0xec8, Hi: 0xecd, Stride: 0x1}, + unicode.Range16{Lo: 0xf18, Hi: 0xf19, Stride: 0x1}, + unicode.Range16{Lo: 0xf35, Hi: 0xf35, Stride: 0x1}, + unicode.Range16{Lo: 0xf37, Hi: 0xf37, Stride: 0x1}, + unicode.Range16{Lo: 0xf39, Hi: 0xf39, Stride: 0x1}, + unicode.Range16{Lo: 0xf71, Hi: 0xf7e, Stride: 0x1}, + unicode.Range16{Lo: 0xf80, Hi: 0xf84, Stride: 0x1}, + unicode.Range16{Lo: 0xf86, Hi: 0xf87, Stride: 0x1}, + unicode.Range16{Lo: 0xf8d, Hi: 0xf97, Stride: 0x1}, + unicode.Range16{Lo: 0xf99, Hi: 0xfbc, Stride: 0x1}, + unicode.Range16{Lo: 0xfc6, Hi: 0xfc6, Stride: 0x1}, + unicode.Range16{Lo: 0x102d, Hi: 0x1030, Stride: 0x1}, + unicode.Range16{Lo: 0x1032, Hi: 0x1037, Stride: 0x1}, + unicode.Range16{Lo: 0x1039, Hi: 0x103a, Stride: 0x1}, + unicode.Range16{Lo: 0x103d, Hi: 0x103e, Stride: 0x1}, + unicode.Range16{Lo: 0x1058, Hi: 0x1059, Stride: 0x1}, + unicode.Range16{Lo: 0x105e, Hi: 0x1060, Stride: 0x1}, + unicode.Range16{Lo: 0x1071, Hi: 0x1074, Stride: 0x1}, + unicode.Range16{Lo: 0x1082, Hi: 0x1082, Stride: 0x1}, + unicode.Range16{Lo: 0x1085, Hi: 0x1086, Stride: 0x1}, + unicode.Range16{Lo: 0x108d, Hi: 0x108d, Stride: 0x1}, + unicode.Range16{Lo: 0x109d, Hi: 0x109d, Stride: 0x1}, + unicode.Range16{Lo: 0x135d, Hi: 0x135f, Stride: 0x1}, + unicode.Range16{Lo: 0x1712, Hi: 0x1714, Stride: 0x1}, + unicode.Range16{Lo: 0x1732, Hi: 0x1734, Stride: 0x1}, + unicode.Range16{Lo: 0x1752, Hi: 0x1753, Stride: 0x1}, + unicode.Range16{Lo: 0x1772, Hi: 0x1773, Stride: 0x1}, + unicode.Range16{Lo: 0x17b4, Hi: 0x17b5, Stride: 0x1}, + unicode.Range16{Lo: 0x17b7, Hi: 0x17bd, Stride: 0x1}, + unicode.Range16{Lo: 0x17c6, Hi: 0x17c6, Stride: 0x1}, + unicode.Range16{Lo: 0x17c9, Hi: 0x17d3, Stride: 0x1}, + unicode.Range16{Lo: 0x17dd, Hi: 0x17dd, Stride: 0x1}, + unicode.Range16{Lo: 0x180b, Hi: 0x180d, Stride: 0x1}, + unicode.Range16{Lo: 0x1885, Hi: 0x1886, Stride: 0x1}, + unicode.Range16{Lo: 0x18a9, Hi: 0x18a9, Stride: 0x1}, + unicode.Range16{Lo: 0x1920, Hi: 0x1922, Stride: 0x1}, + unicode.Range16{Lo: 0x1927, Hi: 0x1928, Stride: 0x1}, + unicode.Range16{Lo: 0x1932, Hi: 0x1932, Stride: 0x1}, + unicode.Range16{Lo: 0x1939, Hi: 0x193b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a17, Hi: 0x1a18, Stride: 0x1}, + unicode.Range16{Lo: 0x1a1b, Hi: 0x1a1b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a56, Hi: 0x1a56, Stride: 0x1}, + unicode.Range16{Lo: 0x1a58, Hi: 0x1a5e, Stride: 0x1}, + unicode.Range16{Lo: 0x1a60, Hi: 0x1a60, Stride: 0x1}, + unicode.Range16{Lo: 0x1a62, Hi: 0x1a62, Stride: 0x1}, + unicode.Range16{Lo: 0x1a65, Hi: 0x1a6c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a73, Hi: 0x1a7c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a7f, Hi: 0x1a7f, Stride: 0x1}, + unicode.Range16{Lo: 0x1ab0, Hi: 0x1abd, Stride: 0x1}, + unicode.Range16{Lo: 0x1abe, Hi: 0x1abe, Stride: 0x1}, + unicode.Range16{Lo: 0x1b00, Hi: 0x1b03, Stride: 0x1}, + unicode.Range16{Lo: 0x1b34, Hi: 0x1b34, Stride: 0x1}, + unicode.Range16{Lo: 0x1b36, Hi: 0x1b3a, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3c, Hi: 0x1b3c, Stride: 0x1}, + unicode.Range16{Lo: 0x1b42, Hi: 0x1b42, Stride: 0x1}, + unicode.Range16{Lo: 0x1b6b, Hi: 0x1b73, Stride: 0x1}, + unicode.Range16{Lo: 0x1b80, Hi: 0x1b81, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba2, Hi: 0x1ba5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba8, Hi: 0x1ba9, Stride: 0x1}, + unicode.Range16{Lo: 0x1bab, Hi: 0x1bad, Stride: 0x1}, + unicode.Range16{Lo: 0x1be6, Hi: 0x1be6, Stride: 0x1}, + unicode.Range16{Lo: 0x1be8, Hi: 0x1be9, Stride: 0x1}, + unicode.Range16{Lo: 0x1bed, Hi: 0x1bed, Stride: 0x1}, + unicode.Range16{Lo: 0x1bef, Hi: 0x1bf1, Stride: 0x1}, + unicode.Range16{Lo: 0x1c2c, Hi: 0x1c33, Stride: 0x1}, + unicode.Range16{Lo: 0x1c36, Hi: 0x1c37, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd4, Hi: 0x1ce0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce2, Hi: 0x1ce8, Stride: 0x1}, + unicode.Range16{Lo: 0x1ced, Hi: 0x1ced, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf4, Hi: 0x1cf4, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf8, Hi: 0x1cf9, Stride: 0x1}, + unicode.Range16{Lo: 0x1dc0, Hi: 0x1df5, Stride: 0x1}, + unicode.Range16{Lo: 0x1dfb, Hi: 0x1dff, Stride: 0x1}, + unicode.Range16{Lo: 0x200c, Hi: 0x200c, Stride: 0x1}, + unicode.Range16{Lo: 0x20d0, Hi: 0x20dc, Stride: 0x1}, + unicode.Range16{Lo: 0x20dd, Hi: 0x20e0, Stride: 0x1}, + unicode.Range16{Lo: 0x20e1, Hi: 0x20e1, Stride: 0x1}, + unicode.Range16{Lo: 0x20e2, Hi: 0x20e4, Stride: 0x1}, + unicode.Range16{Lo: 0x20e5, Hi: 0x20f0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cef, Hi: 0x2cf1, Stride: 0x1}, + unicode.Range16{Lo: 0x2d7f, Hi: 0x2d7f, Stride: 0x1}, + unicode.Range16{Lo: 0x2de0, Hi: 0x2dff, Stride: 0x1}, + unicode.Range16{Lo: 0x302a, Hi: 0x302d, Stride: 0x1}, + unicode.Range16{Lo: 0x302e, Hi: 0x302f, Stride: 0x1}, + unicode.Range16{Lo: 0x3099, Hi: 0x309a, Stride: 0x1}, + unicode.Range16{Lo: 0xa66f, Hi: 0xa66f, Stride: 0x1}, + unicode.Range16{Lo: 0xa670, Hi: 0xa672, Stride: 0x1}, + unicode.Range16{Lo: 0xa674, Hi: 0xa67d, Stride: 0x1}, + unicode.Range16{Lo: 0xa69e, Hi: 0xa69f, Stride: 0x1}, + unicode.Range16{Lo: 0xa6f0, Hi: 0xa6f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa802, Hi: 0xa802, Stride: 0x1}, + unicode.Range16{Lo: 0xa806, Hi: 0xa806, Stride: 0x1}, + unicode.Range16{Lo: 0xa80b, Hi: 0xa80b, Stride: 0x1}, + unicode.Range16{Lo: 0xa825, Hi: 0xa826, Stride: 0x1}, + unicode.Range16{Lo: 0xa8c4, Hi: 0xa8c5, Stride: 0x1}, + unicode.Range16{Lo: 0xa8e0, Hi: 0xa8f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa926, Hi: 0xa92d, Stride: 0x1}, + unicode.Range16{Lo: 0xa947, Hi: 0xa951, Stride: 0x1}, + unicode.Range16{Lo: 0xa980, Hi: 0xa982, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b3, Hi: 0xa9b3, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b6, Hi: 0xa9b9, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bc, Hi: 0xa9bc, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e5, Hi: 0xa9e5, Stride: 0x1}, + unicode.Range16{Lo: 0xaa29, Hi: 0xaa2e, Stride: 0x1}, + unicode.Range16{Lo: 0xaa31, Hi: 0xaa32, Stride: 0x1}, + unicode.Range16{Lo: 0xaa35, Hi: 0xaa36, Stride: 0x1}, + unicode.Range16{Lo: 0xaa43, Hi: 0xaa43, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4c, Hi: 0xaa4c, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7c, Hi: 0xaa7c, Stride: 0x1}, + unicode.Range16{Lo: 0xaab0, Hi: 0xaab0, Stride: 0x1}, + unicode.Range16{Lo: 0xaab2, Hi: 0xaab4, Stride: 0x1}, + unicode.Range16{Lo: 0xaab7, Hi: 0xaab8, Stride: 0x1}, + unicode.Range16{Lo: 0xaabe, Hi: 0xaabf, Stride: 0x1}, + unicode.Range16{Lo: 0xaac1, Hi: 0xaac1, Stride: 0x1}, + unicode.Range16{Lo: 0xaaec, Hi: 0xaaed, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf6, Hi: 0xaaf6, Stride: 0x1}, + unicode.Range16{Lo: 0xabe5, Hi: 0xabe5, Stride: 0x1}, + unicode.Range16{Lo: 0xabe8, Hi: 0xabe8, Stride: 0x1}, + unicode.Range16{Lo: 0xabed, Hi: 0xabed, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1e, Hi: 0xfb1e, Stride: 0x1}, + unicode.Range16{Lo: 0xfe00, Hi: 0xfe0f, Stride: 0x1}, + unicode.Range16{Lo: 0xfe20, Hi: 0xfe2f, Stride: 0x1}, + unicode.Range16{Lo: 0xff9e, Hi: 0xff9f, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x101fd, Hi: 0x101fd, Stride: 0x1}, + unicode.Range32{Lo: 0x102e0, Hi: 0x102e0, Stride: 0x1}, + unicode.Range32{Lo: 0x10376, Hi: 0x1037a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a01, Hi: 0x10a03, Stride: 0x1}, + unicode.Range32{Lo: 0x10a05, Hi: 0x10a06, Stride: 0x1}, + unicode.Range32{Lo: 0x10a0c, Hi: 0x10a0f, Stride: 0x1}, + unicode.Range32{Lo: 0x10a38, Hi: 0x10a3a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 0x1}, + unicode.Range32{Lo: 0x10ae5, Hi: 0x10ae6, Stride: 0x1}, + unicode.Range32{Lo: 0x11001, Hi: 0x11001, Stride: 0x1}, + unicode.Range32{Lo: 0x11038, Hi: 0x11046, Stride: 0x1}, + unicode.Range32{Lo: 0x1107f, Hi: 0x11081, Stride: 0x1}, + unicode.Range32{Lo: 0x110b3, Hi: 0x110b6, Stride: 0x1}, + unicode.Range32{Lo: 0x110b9, Hi: 0x110ba, Stride: 0x1}, + unicode.Range32{Lo: 0x11100, Hi: 0x11102, Stride: 0x1}, + unicode.Range32{Lo: 0x11127, Hi: 0x1112b, Stride: 0x1}, + unicode.Range32{Lo: 0x1112d, Hi: 0x11134, Stride: 0x1}, + unicode.Range32{Lo: 0x11173, Hi: 0x11173, Stride: 0x1}, + unicode.Range32{Lo: 0x11180, Hi: 0x11181, Stride: 0x1}, + unicode.Range32{Lo: 0x111b6, Hi: 0x111be, Stride: 0x1}, + unicode.Range32{Lo: 0x111ca, Hi: 0x111cc, Stride: 0x1}, + unicode.Range32{Lo: 0x1122f, Hi: 0x11231, Stride: 0x1}, + unicode.Range32{Lo: 0x11234, Hi: 0x11234, Stride: 0x1}, + unicode.Range32{Lo: 0x11236, Hi: 0x11237, Stride: 0x1}, + unicode.Range32{Lo: 0x1123e, Hi: 0x1123e, Stride: 0x1}, + unicode.Range32{Lo: 0x112df, Hi: 0x112df, Stride: 0x1}, + unicode.Range32{Lo: 0x112e3, Hi: 0x112ea, Stride: 0x1}, + unicode.Range32{Lo: 0x11300, Hi: 0x11301, Stride: 0x1}, + unicode.Range32{Lo: 0x1133c, Hi: 0x1133c, Stride: 0x1}, + unicode.Range32{Lo: 0x1133e, Hi: 0x1133e, Stride: 0x1}, + unicode.Range32{Lo: 0x11340, Hi: 0x11340, Stride: 0x1}, + unicode.Range32{Lo: 0x11357, Hi: 0x11357, Stride: 0x1}, + unicode.Range32{Lo: 0x11366, Hi: 0x1136c, Stride: 0x1}, + unicode.Range32{Lo: 0x11370, Hi: 0x11374, Stride: 0x1}, + unicode.Range32{Lo: 0x11438, Hi: 0x1143f, Stride: 0x1}, + unicode.Range32{Lo: 0x11442, Hi: 0x11444, Stride: 0x1}, + unicode.Range32{Lo: 0x11446, Hi: 0x11446, Stride: 0x1}, + unicode.Range32{Lo: 0x114b0, Hi: 0x114b0, Stride: 0x1}, + unicode.Range32{Lo: 0x114b3, Hi: 0x114b8, Stride: 0x1}, + unicode.Range32{Lo: 0x114ba, Hi: 0x114ba, Stride: 0x1}, + unicode.Range32{Lo: 0x114bd, Hi: 0x114bd, Stride: 0x1}, + unicode.Range32{Lo: 0x114bf, Hi: 0x114c0, Stride: 0x1}, + unicode.Range32{Lo: 0x114c2, Hi: 0x114c3, Stride: 0x1}, + unicode.Range32{Lo: 0x115af, Hi: 0x115af, Stride: 0x1}, + unicode.Range32{Lo: 0x115b2, Hi: 0x115b5, Stride: 0x1}, + unicode.Range32{Lo: 0x115bc, Hi: 0x115bd, Stride: 0x1}, + unicode.Range32{Lo: 0x115bf, Hi: 0x115c0, Stride: 0x1}, + unicode.Range32{Lo: 0x115dc, Hi: 0x115dd, Stride: 0x1}, + unicode.Range32{Lo: 0x11633, Hi: 0x1163a, Stride: 0x1}, + unicode.Range32{Lo: 0x1163d, Hi: 0x1163d, Stride: 0x1}, + unicode.Range32{Lo: 0x1163f, Hi: 0x11640, Stride: 0x1}, + unicode.Range32{Lo: 0x116ab, Hi: 0x116ab, Stride: 0x1}, + unicode.Range32{Lo: 0x116ad, Hi: 0x116ad, Stride: 0x1}, + unicode.Range32{Lo: 0x116b0, Hi: 0x116b5, Stride: 0x1}, + unicode.Range32{Lo: 0x116b7, Hi: 0x116b7, Stride: 0x1}, + unicode.Range32{Lo: 0x1171d, Hi: 0x1171f, Stride: 0x1}, + unicode.Range32{Lo: 0x11722, Hi: 0x11725, Stride: 0x1}, + unicode.Range32{Lo: 0x11727, Hi: 0x1172b, Stride: 0x1}, + unicode.Range32{Lo: 0x11c30, Hi: 0x11c36, Stride: 0x1}, + unicode.Range32{Lo: 0x11c38, Hi: 0x11c3d, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3f, Hi: 0x11c3f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c92, Hi: 0x11ca7, Stride: 0x1}, + unicode.Range32{Lo: 0x11caa, Hi: 0x11cb0, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb2, Hi: 0x11cb3, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb5, Hi: 0x11cb6, Stride: 0x1}, + unicode.Range32{Lo: 0x16af0, Hi: 0x16af4, Stride: 0x1}, + unicode.Range32{Lo: 0x16b30, Hi: 0x16b36, Stride: 0x1}, + unicode.Range32{Lo: 0x16f8f, Hi: 0x16f92, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d165, Hi: 0x1d165, Stride: 0x1}, + unicode.Range32{Lo: 0x1d167, Hi: 0x1d169, Stride: 0x1}, + unicode.Range32{Lo: 0x1d16e, Hi: 0x1d172, Stride: 0x1}, + unicode.Range32{Lo: 0x1d17b, Hi: 0x1d182, Stride: 0x1}, + unicode.Range32{Lo: 0x1d185, Hi: 0x1d18b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 0x1}, + unicode.Range32{Lo: 0x1d242, Hi: 0x1d244, Stride: 0x1}, + unicode.Range32{Lo: 0x1da00, Hi: 0x1da36, Stride: 0x1}, + unicode.Range32{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 0x1}, + unicode.Range32{Lo: 0x1da75, Hi: 0x1da75, Stride: 0x1}, + unicode.Range32{Lo: 0x1da84, Hi: 0x1da84, Stride: 0x1}, + unicode.Range32{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 0x1}, + unicode.Range32{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 0x1}, + unicode.Range32{Lo: 0x1e000, Hi: 0x1e006, Stride: 0x1}, + unicode.Range32{Lo: 0x1e008, Hi: 0x1e018, Stride: 0x1}, + unicode.Range32{Lo: 0x1e01b, Hi: 0x1e021, Stride: 0x1}, + unicode.Range32{Lo: 0x1e023, Hi: 0x1e024, Stride: 0x1}, + unicode.Range32{Lo: 0x1e026, Hi: 0x1e02a, Stride: 0x1}, + unicode.Range32{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 0x1}, + unicode.Range32{Lo: 0x1e944, Hi: 0x1e94a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0020, Hi: 0xe007f, Stride: 0x1}, + unicode.Range32{Lo: 0xe0100, Hi: 0xe01ef, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeGlue_After_Zwj = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2764, Hi: 0x2764, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f48b, Hi: 0x1f48b, Stride: 0x1}, + unicode.Range32{Lo: 0x1f5e8, Hi: 0x1f5e8, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeL = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x1100, Hi: 0x115f, Stride: 0x1}, + unicode.Range16{Lo: 0xa960, Hi: 0xa97c, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeLF = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xa, Hi: 0xa, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _GraphemeLV = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xac00, Hi: 0xac00, Stride: 0x1}, + unicode.Range16{Lo: 0xac1c, Hi: 0xac1c, Stride: 0x1}, + unicode.Range16{Lo: 0xac38, Hi: 0xac38, Stride: 0x1}, + unicode.Range16{Lo: 0xac54, Hi: 0xac54, Stride: 0x1}, + unicode.Range16{Lo: 0xac70, Hi: 0xac70, Stride: 0x1}, + unicode.Range16{Lo: 0xac8c, Hi: 0xac8c, Stride: 0x1}, + unicode.Range16{Lo: 0xaca8, Hi: 0xaca8, Stride: 0x1}, + unicode.Range16{Lo: 0xacc4, Hi: 0xacc4, Stride: 0x1}, + unicode.Range16{Lo: 0xace0, Hi: 0xace0, Stride: 0x1}, + unicode.Range16{Lo: 0xacfc, Hi: 0xacfc, Stride: 0x1}, + unicode.Range16{Lo: 0xad18, Hi: 0xad18, Stride: 0x1}, + unicode.Range16{Lo: 0xad34, Hi: 0xad34, Stride: 0x1}, + unicode.Range16{Lo: 0xad50, Hi: 0xad50, Stride: 0x1}, + unicode.Range16{Lo: 0xad6c, Hi: 0xad6c, Stride: 0x1}, + unicode.Range16{Lo: 0xad88, Hi: 0xad88, Stride: 0x1}, + unicode.Range16{Lo: 0xada4, Hi: 0xada4, Stride: 0x1}, + unicode.Range16{Lo: 0xadc0, Hi: 0xadc0, Stride: 0x1}, + unicode.Range16{Lo: 0xaddc, Hi: 0xaddc, Stride: 0x1}, + unicode.Range16{Lo: 0xadf8, Hi: 0xadf8, Stride: 0x1}, + unicode.Range16{Lo: 0xae14, Hi: 0xae14, Stride: 0x1}, + unicode.Range16{Lo: 0xae30, Hi: 0xae30, Stride: 0x1}, + unicode.Range16{Lo: 0xae4c, Hi: 0xae4c, Stride: 0x1}, + unicode.Range16{Lo: 0xae68, Hi: 0xae68, Stride: 0x1}, + unicode.Range16{Lo: 0xae84, Hi: 0xae84, Stride: 0x1}, + unicode.Range16{Lo: 0xaea0, Hi: 0xaea0, Stride: 0x1}, + unicode.Range16{Lo: 0xaebc, Hi: 0xaebc, Stride: 0x1}, + unicode.Range16{Lo: 0xaed8, Hi: 0xaed8, Stride: 0x1}, + unicode.Range16{Lo: 0xaef4, Hi: 0xaef4, Stride: 0x1}, + unicode.Range16{Lo: 0xaf10, Hi: 0xaf10, Stride: 0x1}, + unicode.Range16{Lo: 0xaf2c, Hi: 0xaf2c, Stride: 0x1}, + unicode.Range16{Lo: 0xaf48, Hi: 0xaf48, Stride: 0x1}, + unicode.Range16{Lo: 0xaf64, Hi: 0xaf64, Stride: 0x1}, + unicode.Range16{Lo: 0xaf80, Hi: 0xaf80, Stride: 0x1}, + unicode.Range16{Lo: 0xaf9c, Hi: 0xaf9c, Stride: 0x1}, + unicode.Range16{Lo: 0xafb8, Hi: 0xafb8, Stride: 0x1}, + unicode.Range16{Lo: 0xafd4, Hi: 0xafd4, Stride: 0x1}, + unicode.Range16{Lo: 0xaff0, Hi: 0xaff0, Stride: 0x1}, + unicode.Range16{Lo: 0xb00c, Hi: 0xb00c, Stride: 0x1}, + unicode.Range16{Lo: 0xb028, Hi: 0xb028, Stride: 0x1}, + unicode.Range16{Lo: 0xb044, Hi: 0xb044, Stride: 0x1}, + unicode.Range16{Lo: 0xb060, Hi: 0xb060, Stride: 0x1}, + unicode.Range16{Lo: 0xb07c, Hi: 0xb07c, Stride: 0x1}, + unicode.Range16{Lo: 0xb098, Hi: 0xb098, Stride: 0x1}, + unicode.Range16{Lo: 0xb0b4, Hi: 0xb0b4, Stride: 0x1}, + unicode.Range16{Lo: 0xb0d0, Hi: 0xb0d0, Stride: 0x1}, + unicode.Range16{Lo: 0xb0ec, Hi: 0xb0ec, Stride: 0x1}, + unicode.Range16{Lo: 0xb108, Hi: 0xb108, Stride: 0x1}, + unicode.Range16{Lo: 0xb124, Hi: 0xb124, Stride: 0x1}, + unicode.Range16{Lo: 0xb140, Hi: 0xb140, Stride: 0x1}, + unicode.Range16{Lo: 0xb15c, Hi: 0xb15c, Stride: 0x1}, + unicode.Range16{Lo: 0xb178, Hi: 0xb178, Stride: 0x1}, + unicode.Range16{Lo: 0xb194, Hi: 0xb194, Stride: 0x1}, + unicode.Range16{Lo: 0xb1b0, Hi: 0xb1b0, Stride: 0x1}, + unicode.Range16{Lo: 0xb1cc, Hi: 0xb1cc, Stride: 0x1}, + unicode.Range16{Lo: 0xb1e8, Hi: 0xb1e8, Stride: 0x1}, + unicode.Range16{Lo: 0xb204, Hi: 0xb204, Stride: 0x1}, + unicode.Range16{Lo: 0xb220, Hi: 0xb220, Stride: 0x1}, + unicode.Range16{Lo: 0xb23c, Hi: 0xb23c, Stride: 0x1}, + unicode.Range16{Lo: 0xb258, Hi: 0xb258, Stride: 0x1}, + unicode.Range16{Lo: 0xb274, Hi: 0xb274, Stride: 0x1}, + unicode.Range16{Lo: 0xb290, Hi: 0xb290, Stride: 0x1}, + unicode.Range16{Lo: 0xb2ac, Hi: 0xb2ac, Stride: 0x1}, + unicode.Range16{Lo: 0xb2c8, Hi: 0xb2c8, Stride: 0x1}, + unicode.Range16{Lo: 0xb2e4, Hi: 0xb2e4, Stride: 0x1}, + unicode.Range16{Lo: 0xb300, Hi: 0xb300, Stride: 0x1}, + unicode.Range16{Lo: 0xb31c, Hi: 0xb31c, Stride: 0x1}, + unicode.Range16{Lo: 0xb338, Hi: 0xb338, Stride: 0x1}, + unicode.Range16{Lo: 0xb354, Hi: 0xb354, Stride: 0x1}, + unicode.Range16{Lo: 0xb370, Hi: 0xb370, Stride: 0x1}, + unicode.Range16{Lo: 0xb38c, Hi: 0xb38c, Stride: 0x1}, + unicode.Range16{Lo: 0xb3a8, Hi: 0xb3a8, Stride: 0x1}, + unicode.Range16{Lo: 0xb3c4, Hi: 0xb3c4, Stride: 0x1}, + unicode.Range16{Lo: 0xb3e0, Hi: 0xb3e0, Stride: 0x1}, + unicode.Range16{Lo: 0xb3fc, Hi: 0xb3fc, Stride: 0x1}, + unicode.Range16{Lo: 0xb418, Hi: 0xb418, Stride: 0x1}, + unicode.Range16{Lo: 0xb434, Hi: 0xb434, Stride: 0x1}, + unicode.Range16{Lo: 0xb450, Hi: 0xb450, Stride: 0x1}, + unicode.Range16{Lo: 0xb46c, Hi: 0xb46c, Stride: 0x1}, + unicode.Range16{Lo: 0xb488, Hi: 0xb488, Stride: 0x1}, + unicode.Range16{Lo: 0xb4a4, Hi: 0xb4a4, Stride: 0x1}, + unicode.Range16{Lo: 0xb4c0, Hi: 0xb4c0, Stride: 0x1}, + unicode.Range16{Lo: 0xb4dc, Hi: 0xb4dc, Stride: 0x1}, + unicode.Range16{Lo: 0xb4f8, Hi: 0xb4f8, Stride: 0x1}, + unicode.Range16{Lo: 0xb514, Hi: 0xb514, Stride: 0x1}, + unicode.Range16{Lo: 0xb530, Hi: 0xb530, Stride: 0x1}, + unicode.Range16{Lo: 0xb54c, Hi: 0xb54c, Stride: 0x1}, + unicode.Range16{Lo: 0xb568, Hi: 0xb568, Stride: 0x1}, + unicode.Range16{Lo: 0xb584, Hi: 0xb584, Stride: 0x1}, + unicode.Range16{Lo: 0xb5a0, Hi: 0xb5a0, Stride: 0x1}, + unicode.Range16{Lo: 0xb5bc, Hi: 0xb5bc, Stride: 0x1}, + unicode.Range16{Lo: 0xb5d8, Hi: 0xb5d8, Stride: 0x1}, + unicode.Range16{Lo: 0xb5f4, Hi: 0xb5f4, Stride: 0x1}, + unicode.Range16{Lo: 0xb610, Hi: 0xb610, Stride: 0x1}, + unicode.Range16{Lo: 0xb62c, Hi: 0xb62c, Stride: 0x1}, + unicode.Range16{Lo: 0xb648, Hi: 0xb648, Stride: 0x1}, + unicode.Range16{Lo: 0xb664, Hi: 0xb664, Stride: 0x1}, + unicode.Range16{Lo: 0xb680, Hi: 0xb680, Stride: 0x1}, + unicode.Range16{Lo: 0xb69c, Hi: 0xb69c, Stride: 0x1}, + unicode.Range16{Lo: 0xb6b8, Hi: 0xb6b8, Stride: 0x1}, + unicode.Range16{Lo: 0xb6d4, Hi: 0xb6d4, Stride: 0x1}, + unicode.Range16{Lo: 0xb6f0, Hi: 0xb6f0, Stride: 0x1}, + unicode.Range16{Lo: 0xb70c, Hi: 0xb70c, Stride: 0x1}, + unicode.Range16{Lo: 0xb728, Hi: 0xb728, Stride: 0x1}, + unicode.Range16{Lo: 0xb744, Hi: 0xb744, Stride: 0x1}, + unicode.Range16{Lo: 0xb760, Hi: 0xb760, Stride: 0x1}, + unicode.Range16{Lo: 0xb77c, Hi: 0xb77c, Stride: 0x1}, + unicode.Range16{Lo: 0xb798, Hi: 0xb798, Stride: 0x1}, + unicode.Range16{Lo: 0xb7b4, Hi: 0xb7b4, Stride: 0x1}, + unicode.Range16{Lo: 0xb7d0, Hi: 0xb7d0, Stride: 0x1}, + unicode.Range16{Lo: 0xb7ec, Hi: 0xb7ec, Stride: 0x1}, + unicode.Range16{Lo: 0xb808, Hi: 0xb808, Stride: 0x1}, + unicode.Range16{Lo: 0xb824, Hi: 0xb824, Stride: 0x1}, + unicode.Range16{Lo: 0xb840, Hi: 0xb840, Stride: 0x1}, + unicode.Range16{Lo: 0xb85c, Hi: 0xb85c, Stride: 0x1}, + unicode.Range16{Lo: 0xb878, Hi: 0xb878, Stride: 0x1}, + unicode.Range16{Lo: 0xb894, Hi: 0xb894, Stride: 0x1}, + unicode.Range16{Lo: 0xb8b0, Hi: 0xb8b0, Stride: 0x1}, + unicode.Range16{Lo: 0xb8cc, Hi: 0xb8cc, Stride: 0x1}, + unicode.Range16{Lo: 0xb8e8, Hi: 0xb8e8, Stride: 0x1}, + unicode.Range16{Lo: 0xb904, Hi: 0xb904, Stride: 0x1}, + unicode.Range16{Lo: 0xb920, Hi: 0xb920, Stride: 0x1}, + unicode.Range16{Lo: 0xb93c, Hi: 0xb93c, Stride: 0x1}, + unicode.Range16{Lo: 0xb958, Hi: 0xb958, Stride: 0x1}, + unicode.Range16{Lo: 0xb974, Hi: 0xb974, Stride: 0x1}, + unicode.Range16{Lo: 0xb990, Hi: 0xb990, Stride: 0x1}, + unicode.Range16{Lo: 0xb9ac, Hi: 0xb9ac, Stride: 0x1}, + unicode.Range16{Lo: 0xb9c8, Hi: 0xb9c8, Stride: 0x1}, + unicode.Range16{Lo: 0xb9e4, Hi: 0xb9e4, Stride: 0x1}, + unicode.Range16{Lo: 0xba00, Hi: 0xba00, Stride: 0x1}, + unicode.Range16{Lo: 0xba1c, Hi: 0xba1c, Stride: 0x1}, + unicode.Range16{Lo: 0xba38, Hi: 0xba38, Stride: 0x1}, + unicode.Range16{Lo: 0xba54, Hi: 0xba54, Stride: 0x1}, + unicode.Range16{Lo: 0xba70, Hi: 0xba70, Stride: 0x1}, + unicode.Range16{Lo: 0xba8c, Hi: 0xba8c, Stride: 0x1}, + unicode.Range16{Lo: 0xbaa8, Hi: 0xbaa8, Stride: 0x1}, + unicode.Range16{Lo: 0xbac4, Hi: 0xbac4, Stride: 0x1}, + unicode.Range16{Lo: 0xbae0, Hi: 0xbae0, Stride: 0x1}, + unicode.Range16{Lo: 0xbafc, Hi: 0xbafc, Stride: 0x1}, + unicode.Range16{Lo: 0xbb18, Hi: 0xbb18, Stride: 0x1}, + unicode.Range16{Lo: 0xbb34, Hi: 0xbb34, Stride: 0x1}, + unicode.Range16{Lo: 0xbb50, Hi: 0xbb50, Stride: 0x1}, + unicode.Range16{Lo: 0xbb6c, Hi: 0xbb6c, Stride: 0x1}, + unicode.Range16{Lo: 0xbb88, Hi: 0xbb88, Stride: 0x1}, + unicode.Range16{Lo: 0xbba4, Hi: 0xbba4, Stride: 0x1}, + unicode.Range16{Lo: 0xbbc0, Hi: 0xbbc0, Stride: 0x1}, + unicode.Range16{Lo: 0xbbdc, Hi: 0xbbdc, Stride: 0x1}, + unicode.Range16{Lo: 0xbbf8, Hi: 0xbbf8, Stride: 0x1}, + unicode.Range16{Lo: 0xbc14, Hi: 0xbc14, Stride: 0x1}, + unicode.Range16{Lo: 0xbc30, Hi: 0xbc30, Stride: 0x1}, + unicode.Range16{Lo: 0xbc4c, Hi: 0xbc4c, Stride: 0x1}, + unicode.Range16{Lo: 0xbc68, Hi: 0xbc68, Stride: 0x1}, + unicode.Range16{Lo: 0xbc84, Hi: 0xbc84, Stride: 0x1}, + unicode.Range16{Lo: 0xbca0, Hi: 0xbca0, Stride: 0x1}, + unicode.Range16{Lo: 0xbcbc, Hi: 0xbcbc, Stride: 0x1}, + unicode.Range16{Lo: 0xbcd8, Hi: 0xbcd8, Stride: 0x1}, + unicode.Range16{Lo: 0xbcf4, Hi: 0xbcf4, Stride: 0x1}, + unicode.Range16{Lo: 0xbd10, Hi: 0xbd10, Stride: 0x1}, + unicode.Range16{Lo: 0xbd2c, Hi: 0xbd2c, Stride: 0x1}, + unicode.Range16{Lo: 0xbd48, Hi: 0xbd48, Stride: 0x1}, + unicode.Range16{Lo: 0xbd64, Hi: 0xbd64, Stride: 0x1}, + unicode.Range16{Lo: 0xbd80, Hi: 0xbd80, Stride: 0x1}, + unicode.Range16{Lo: 0xbd9c, Hi: 0xbd9c, Stride: 0x1}, + unicode.Range16{Lo: 0xbdb8, Hi: 0xbdb8, Stride: 0x1}, + unicode.Range16{Lo: 0xbdd4, Hi: 0xbdd4, Stride: 0x1}, + unicode.Range16{Lo: 0xbdf0, Hi: 0xbdf0, Stride: 0x1}, + unicode.Range16{Lo: 0xbe0c, Hi: 0xbe0c, Stride: 0x1}, + unicode.Range16{Lo: 0xbe28, Hi: 0xbe28, Stride: 0x1}, + unicode.Range16{Lo: 0xbe44, Hi: 0xbe44, Stride: 0x1}, + unicode.Range16{Lo: 0xbe60, Hi: 0xbe60, Stride: 0x1}, + unicode.Range16{Lo: 0xbe7c, Hi: 0xbe7c, Stride: 0x1}, + unicode.Range16{Lo: 0xbe98, Hi: 0xbe98, Stride: 0x1}, + unicode.Range16{Lo: 0xbeb4, Hi: 0xbeb4, Stride: 0x1}, + unicode.Range16{Lo: 0xbed0, Hi: 0xbed0, Stride: 0x1}, + unicode.Range16{Lo: 0xbeec, Hi: 0xbeec, Stride: 0x1}, + unicode.Range16{Lo: 0xbf08, Hi: 0xbf08, Stride: 0x1}, + unicode.Range16{Lo: 0xbf24, Hi: 0xbf24, Stride: 0x1}, + unicode.Range16{Lo: 0xbf40, Hi: 0xbf40, Stride: 0x1}, + unicode.Range16{Lo: 0xbf5c, Hi: 0xbf5c, Stride: 0x1}, + unicode.Range16{Lo: 0xbf78, Hi: 0xbf78, Stride: 0x1}, + unicode.Range16{Lo: 0xbf94, Hi: 0xbf94, Stride: 0x1}, + unicode.Range16{Lo: 0xbfb0, Hi: 0xbfb0, Stride: 0x1}, + unicode.Range16{Lo: 0xbfcc, Hi: 0xbfcc, Stride: 0x1}, + unicode.Range16{Lo: 0xbfe8, Hi: 0xbfe8, Stride: 0x1}, + unicode.Range16{Lo: 0xc004, Hi: 0xc004, Stride: 0x1}, + unicode.Range16{Lo: 0xc020, Hi: 0xc020, Stride: 0x1}, + unicode.Range16{Lo: 0xc03c, Hi: 0xc03c, Stride: 0x1}, + unicode.Range16{Lo: 0xc058, Hi: 0xc058, Stride: 0x1}, + unicode.Range16{Lo: 0xc074, Hi: 0xc074, Stride: 0x1}, + unicode.Range16{Lo: 0xc090, Hi: 0xc090, Stride: 0x1}, + unicode.Range16{Lo: 0xc0ac, Hi: 0xc0ac, Stride: 0x1}, + unicode.Range16{Lo: 0xc0c8, Hi: 0xc0c8, Stride: 0x1}, + unicode.Range16{Lo: 0xc0e4, Hi: 0xc0e4, Stride: 0x1}, + unicode.Range16{Lo: 0xc100, Hi: 0xc100, Stride: 0x1}, + unicode.Range16{Lo: 0xc11c, Hi: 0xc11c, Stride: 0x1}, + unicode.Range16{Lo: 0xc138, Hi: 0xc138, Stride: 0x1}, + unicode.Range16{Lo: 0xc154, Hi: 0xc154, Stride: 0x1}, + unicode.Range16{Lo: 0xc170, Hi: 0xc170, Stride: 0x1}, + unicode.Range16{Lo: 0xc18c, Hi: 0xc18c, Stride: 0x1}, + unicode.Range16{Lo: 0xc1a8, Hi: 0xc1a8, Stride: 0x1}, + unicode.Range16{Lo: 0xc1c4, Hi: 0xc1c4, Stride: 0x1}, + unicode.Range16{Lo: 0xc1e0, Hi: 0xc1e0, Stride: 0x1}, + unicode.Range16{Lo: 0xc1fc, Hi: 0xc1fc, Stride: 0x1}, + unicode.Range16{Lo: 0xc218, Hi: 0xc218, Stride: 0x1}, + unicode.Range16{Lo: 0xc234, Hi: 0xc234, Stride: 0x1}, + unicode.Range16{Lo: 0xc250, Hi: 0xc250, Stride: 0x1}, + unicode.Range16{Lo: 0xc26c, Hi: 0xc26c, Stride: 0x1}, + unicode.Range16{Lo: 0xc288, Hi: 0xc288, Stride: 0x1}, + unicode.Range16{Lo: 0xc2a4, Hi: 0xc2a4, Stride: 0x1}, + unicode.Range16{Lo: 0xc2c0, Hi: 0xc2c0, Stride: 0x1}, + unicode.Range16{Lo: 0xc2dc, Hi: 0xc2dc, Stride: 0x1}, + unicode.Range16{Lo: 0xc2f8, Hi: 0xc2f8, Stride: 0x1}, + unicode.Range16{Lo: 0xc314, Hi: 0xc314, Stride: 0x1}, + unicode.Range16{Lo: 0xc330, Hi: 0xc330, Stride: 0x1}, + unicode.Range16{Lo: 0xc34c, Hi: 0xc34c, Stride: 0x1}, + unicode.Range16{Lo: 0xc368, Hi: 0xc368, Stride: 0x1}, + unicode.Range16{Lo: 0xc384, Hi: 0xc384, Stride: 0x1}, + unicode.Range16{Lo: 0xc3a0, Hi: 0xc3a0, Stride: 0x1}, + unicode.Range16{Lo: 0xc3bc, Hi: 0xc3bc, Stride: 0x1}, + unicode.Range16{Lo: 0xc3d8, Hi: 0xc3d8, Stride: 0x1}, + unicode.Range16{Lo: 0xc3f4, Hi: 0xc3f4, Stride: 0x1}, + unicode.Range16{Lo: 0xc410, Hi: 0xc410, Stride: 0x1}, + unicode.Range16{Lo: 0xc42c, Hi: 0xc42c, Stride: 0x1}, + unicode.Range16{Lo: 0xc448, Hi: 0xc448, Stride: 0x1}, + unicode.Range16{Lo: 0xc464, Hi: 0xc464, Stride: 0x1}, + unicode.Range16{Lo: 0xc480, Hi: 0xc480, Stride: 0x1}, + unicode.Range16{Lo: 0xc49c, Hi: 0xc49c, Stride: 0x1}, + unicode.Range16{Lo: 0xc4b8, Hi: 0xc4b8, Stride: 0x1}, + unicode.Range16{Lo: 0xc4d4, Hi: 0xc4d4, Stride: 0x1}, + unicode.Range16{Lo: 0xc4f0, Hi: 0xc4f0, Stride: 0x1}, + unicode.Range16{Lo: 0xc50c, Hi: 0xc50c, Stride: 0x1}, + unicode.Range16{Lo: 0xc528, Hi: 0xc528, Stride: 0x1}, + unicode.Range16{Lo: 0xc544, Hi: 0xc544, Stride: 0x1}, + unicode.Range16{Lo: 0xc560, Hi: 0xc560, Stride: 0x1}, + unicode.Range16{Lo: 0xc57c, Hi: 0xc57c, Stride: 0x1}, + unicode.Range16{Lo: 0xc598, Hi: 0xc598, Stride: 0x1}, + unicode.Range16{Lo: 0xc5b4, Hi: 0xc5b4, Stride: 0x1}, + unicode.Range16{Lo: 0xc5d0, Hi: 0xc5d0, Stride: 0x1}, + unicode.Range16{Lo: 0xc5ec, Hi: 0xc5ec, Stride: 0x1}, + unicode.Range16{Lo: 0xc608, Hi: 0xc608, Stride: 0x1}, + unicode.Range16{Lo: 0xc624, Hi: 0xc624, Stride: 0x1}, + unicode.Range16{Lo: 0xc640, Hi: 0xc640, Stride: 0x1}, + unicode.Range16{Lo: 0xc65c, Hi: 0xc65c, Stride: 0x1}, + unicode.Range16{Lo: 0xc678, Hi: 0xc678, Stride: 0x1}, + unicode.Range16{Lo: 0xc694, Hi: 0xc694, Stride: 0x1}, + unicode.Range16{Lo: 0xc6b0, Hi: 0xc6b0, Stride: 0x1}, + unicode.Range16{Lo: 0xc6cc, Hi: 0xc6cc, Stride: 0x1}, + unicode.Range16{Lo: 0xc6e8, Hi: 0xc6e8, Stride: 0x1}, + unicode.Range16{Lo: 0xc704, Hi: 0xc704, Stride: 0x1}, + unicode.Range16{Lo: 0xc720, Hi: 0xc720, Stride: 0x1}, + unicode.Range16{Lo: 0xc73c, Hi: 0xc73c, Stride: 0x1}, + unicode.Range16{Lo: 0xc758, Hi: 0xc758, Stride: 0x1}, + unicode.Range16{Lo: 0xc774, Hi: 0xc774, Stride: 0x1}, + unicode.Range16{Lo: 0xc790, Hi: 0xc790, Stride: 0x1}, + unicode.Range16{Lo: 0xc7ac, Hi: 0xc7ac, Stride: 0x1}, + unicode.Range16{Lo: 0xc7c8, Hi: 0xc7c8, Stride: 0x1}, + unicode.Range16{Lo: 0xc7e4, Hi: 0xc7e4, Stride: 0x1}, + unicode.Range16{Lo: 0xc800, Hi: 0xc800, Stride: 0x1}, + unicode.Range16{Lo: 0xc81c, Hi: 0xc81c, Stride: 0x1}, + unicode.Range16{Lo: 0xc838, Hi: 0xc838, Stride: 0x1}, + unicode.Range16{Lo: 0xc854, Hi: 0xc854, Stride: 0x1}, + unicode.Range16{Lo: 0xc870, Hi: 0xc870, Stride: 0x1}, + unicode.Range16{Lo: 0xc88c, Hi: 0xc88c, Stride: 0x1}, + unicode.Range16{Lo: 0xc8a8, Hi: 0xc8a8, Stride: 0x1}, + unicode.Range16{Lo: 0xc8c4, Hi: 0xc8c4, Stride: 0x1}, + unicode.Range16{Lo: 0xc8e0, Hi: 0xc8e0, Stride: 0x1}, + unicode.Range16{Lo: 0xc8fc, Hi: 0xc8fc, Stride: 0x1}, + unicode.Range16{Lo: 0xc918, Hi: 0xc918, Stride: 0x1}, + unicode.Range16{Lo: 0xc934, Hi: 0xc934, Stride: 0x1}, + unicode.Range16{Lo: 0xc950, Hi: 0xc950, Stride: 0x1}, + unicode.Range16{Lo: 0xc96c, Hi: 0xc96c, Stride: 0x1}, + unicode.Range16{Lo: 0xc988, Hi: 0xc988, Stride: 0x1}, + unicode.Range16{Lo: 0xc9a4, Hi: 0xc9a4, Stride: 0x1}, + unicode.Range16{Lo: 0xc9c0, Hi: 0xc9c0, Stride: 0x1}, + unicode.Range16{Lo: 0xc9dc, Hi: 0xc9dc, Stride: 0x1}, + unicode.Range16{Lo: 0xc9f8, Hi: 0xc9f8, Stride: 0x1}, + unicode.Range16{Lo: 0xca14, Hi: 0xca14, Stride: 0x1}, + unicode.Range16{Lo: 0xca30, Hi: 0xca30, Stride: 0x1}, + unicode.Range16{Lo: 0xca4c, Hi: 0xca4c, Stride: 0x1}, + unicode.Range16{Lo: 0xca68, Hi: 0xca68, Stride: 0x1}, + unicode.Range16{Lo: 0xca84, Hi: 0xca84, Stride: 0x1}, + unicode.Range16{Lo: 0xcaa0, Hi: 0xcaa0, Stride: 0x1}, + unicode.Range16{Lo: 0xcabc, Hi: 0xcabc, Stride: 0x1}, + unicode.Range16{Lo: 0xcad8, Hi: 0xcad8, Stride: 0x1}, + unicode.Range16{Lo: 0xcaf4, Hi: 0xcaf4, Stride: 0x1}, + unicode.Range16{Lo: 0xcb10, Hi: 0xcb10, Stride: 0x1}, + unicode.Range16{Lo: 0xcb2c, Hi: 0xcb2c, Stride: 0x1}, + unicode.Range16{Lo: 0xcb48, Hi: 0xcb48, Stride: 0x1}, + unicode.Range16{Lo: 0xcb64, Hi: 0xcb64, Stride: 0x1}, + unicode.Range16{Lo: 0xcb80, Hi: 0xcb80, Stride: 0x1}, + unicode.Range16{Lo: 0xcb9c, Hi: 0xcb9c, Stride: 0x1}, + unicode.Range16{Lo: 0xcbb8, Hi: 0xcbb8, Stride: 0x1}, + unicode.Range16{Lo: 0xcbd4, Hi: 0xcbd4, Stride: 0x1}, + unicode.Range16{Lo: 0xcbf0, Hi: 0xcbf0, Stride: 0x1}, + unicode.Range16{Lo: 0xcc0c, Hi: 0xcc0c, Stride: 0x1}, + unicode.Range16{Lo: 0xcc28, Hi: 0xcc28, Stride: 0x1}, + unicode.Range16{Lo: 0xcc44, Hi: 0xcc44, Stride: 0x1}, + unicode.Range16{Lo: 0xcc60, Hi: 0xcc60, Stride: 0x1}, + unicode.Range16{Lo: 0xcc7c, Hi: 0xcc7c, Stride: 0x1}, + unicode.Range16{Lo: 0xcc98, Hi: 0xcc98, Stride: 0x1}, + unicode.Range16{Lo: 0xccb4, Hi: 0xccb4, Stride: 0x1}, + unicode.Range16{Lo: 0xccd0, Hi: 0xccd0, Stride: 0x1}, + unicode.Range16{Lo: 0xccec, Hi: 0xccec, Stride: 0x1}, + unicode.Range16{Lo: 0xcd08, Hi: 0xcd08, Stride: 0x1}, + unicode.Range16{Lo: 0xcd24, Hi: 0xcd24, Stride: 0x1}, + unicode.Range16{Lo: 0xcd40, Hi: 0xcd40, Stride: 0x1}, + unicode.Range16{Lo: 0xcd5c, Hi: 0xcd5c, Stride: 0x1}, + unicode.Range16{Lo: 0xcd78, Hi: 0xcd78, Stride: 0x1}, + unicode.Range16{Lo: 0xcd94, Hi: 0xcd94, Stride: 0x1}, + unicode.Range16{Lo: 0xcdb0, Hi: 0xcdb0, Stride: 0x1}, + unicode.Range16{Lo: 0xcdcc, Hi: 0xcdcc, Stride: 0x1}, + unicode.Range16{Lo: 0xcde8, Hi: 0xcde8, Stride: 0x1}, + unicode.Range16{Lo: 0xce04, Hi: 0xce04, Stride: 0x1}, + unicode.Range16{Lo: 0xce20, Hi: 0xce20, Stride: 0x1}, + unicode.Range16{Lo: 0xce3c, Hi: 0xce3c, Stride: 0x1}, + unicode.Range16{Lo: 0xce58, Hi: 0xce58, Stride: 0x1}, + unicode.Range16{Lo: 0xce74, Hi: 0xce74, Stride: 0x1}, + unicode.Range16{Lo: 0xce90, Hi: 0xce90, Stride: 0x1}, + unicode.Range16{Lo: 0xceac, Hi: 0xceac, Stride: 0x1}, + unicode.Range16{Lo: 0xcec8, Hi: 0xcec8, Stride: 0x1}, + unicode.Range16{Lo: 0xcee4, Hi: 0xcee4, Stride: 0x1}, + unicode.Range16{Lo: 0xcf00, Hi: 0xcf00, Stride: 0x1}, + unicode.Range16{Lo: 0xcf1c, Hi: 0xcf1c, Stride: 0x1}, + unicode.Range16{Lo: 0xcf38, Hi: 0xcf38, Stride: 0x1}, + unicode.Range16{Lo: 0xcf54, Hi: 0xcf54, Stride: 0x1}, + unicode.Range16{Lo: 0xcf70, Hi: 0xcf70, Stride: 0x1}, + unicode.Range16{Lo: 0xcf8c, Hi: 0xcf8c, Stride: 0x1}, + unicode.Range16{Lo: 0xcfa8, Hi: 0xcfa8, Stride: 0x1}, + unicode.Range16{Lo: 0xcfc4, Hi: 0xcfc4, Stride: 0x1}, + unicode.Range16{Lo: 0xcfe0, Hi: 0xcfe0, Stride: 0x1}, + unicode.Range16{Lo: 0xcffc, Hi: 0xcffc, Stride: 0x1}, + unicode.Range16{Lo: 0xd018, Hi: 0xd018, Stride: 0x1}, + unicode.Range16{Lo: 0xd034, Hi: 0xd034, Stride: 0x1}, + unicode.Range16{Lo: 0xd050, Hi: 0xd050, Stride: 0x1}, + unicode.Range16{Lo: 0xd06c, Hi: 0xd06c, Stride: 0x1}, + unicode.Range16{Lo: 0xd088, Hi: 0xd088, Stride: 0x1}, + unicode.Range16{Lo: 0xd0a4, Hi: 0xd0a4, Stride: 0x1}, + unicode.Range16{Lo: 0xd0c0, Hi: 0xd0c0, Stride: 0x1}, + unicode.Range16{Lo: 0xd0dc, Hi: 0xd0dc, Stride: 0x1}, + unicode.Range16{Lo: 0xd0f8, Hi: 0xd0f8, Stride: 0x1}, + unicode.Range16{Lo: 0xd114, Hi: 0xd114, Stride: 0x1}, + unicode.Range16{Lo: 0xd130, Hi: 0xd130, Stride: 0x1}, + unicode.Range16{Lo: 0xd14c, Hi: 0xd14c, Stride: 0x1}, + unicode.Range16{Lo: 0xd168, Hi: 0xd168, Stride: 0x1}, + unicode.Range16{Lo: 0xd184, Hi: 0xd184, Stride: 0x1}, + unicode.Range16{Lo: 0xd1a0, Hi: 0xd1a0, Stride: 0x1}, + unicode.Range16{Lo: 0xd1bc, Hi: 0xd1bc, Stride: 0x1}, + unicode.Range16{Lo: 0xd1d8, Hi: 0xd1d8, Stride: 0x1}, + unicode.Range16{Lo: 0xd1f4, Hi: 0xd1f4, Stride: 0x1}, + unicode.Range16{Lo: 0xd210, Hi: 0xd210, Stride: 0x1}, + unicode.Range16{Lo: 0xd22c, Hi: 0xd22c, Stride: 0x1}, + unicode.Range16{Lo: 0xd248, Hi: 0xd248, Stride: 0x1}, + unicode.Range16{Lo: 0xd264, Hi: 0xd264, Stride: 0x1}, + unicode.Range16{Lo: 0xd280, Hi: 0xd280, Stride: 0x1}, + unicode.Range16{Lo: 0xd29c, Hi: 0xd29c, Stride: 0x1}, + unicode.Range16{Lo: 0xd2b8, Hi: 0xd2b8, Stride: 0x1}, + unicode.Range16{Lo: 0xd2d4, Hi: 0xd2d4, Stride: 0x1}, + unicode.Range16{Lo: 0xd2f0, Hi: 0xd2f0, Stride: 0x1}, + unicode.Range16{Lo: 0xd30c, Hi: 0xd30c, Stride: 0x1}, + unicode.Range16{Lo: 0xd328, Hi: 0xd328, Stride: 0x1}, + unicode.Range16{Lo: 0xd344, Hi: 0xd344, Stride: 0x1}, + unicode.Range16{Lo: 0xd360, Hi: 0xd360, Stride: 0x1}, + unicode.Range16{Lo: 0xd37c, Hi: 0xd37c, Stride: 0x1}, + unicode.Range16{Lo: 0xd398, Hi: 0xd398, Stride: 0x1}, + unicode.Range16{Lo: 0xd3b4, Hi: 0xd3b4, Stride: 0x1}, + unicode.Range16{Lo: 0xd3d0, Hi: 0xd3d0, Stride: 0x1}, + unicode.Range16{Lo: 0xd3ec, Hi: 0xd3ec, Stride: 0x1}, + unicode.Range16{Lo: 0xd408, Hi: 0xd408, Stride: 0x1}, + unicode.Range16{Lo: 0xd424, Hi: 0xd424, Stride: 0x1}, + unicode.Range16{Lo: 0xd440, Hi: 0xd440, Stride: 0x1}, + unicode.Range16{Lo: 0xd45c, Hi: 0xd45c, Stride: 0x1}, + unicode.Range16{Lo: 0xd478, Hi: 0xd478, Stride: 0x1}, + unicode.Range16{Lo: 0xd494, Hi: 0xd494, Stride: 0x1}, + unicode.Range16{Lo: 0xd4b0, Hi: 0xd4b0, Stride: 0x1}, + unicode.Range16{Lo: 0xd4cc, Hi: 0xd4cc, Stride: 0x1}, + unicode.Range16{Lo: 0xd4e8, Hi: 0xd4e8, Stride: 0x1}, + unicode.Range16{Lo: 0xd504, Hi: 0xd504, Stride: 0x1}, + unicode.Range16{Lo: 0xd520, Hi: 0xd520, Stride: 0x1}, + unicode.Range16{Lo: 0xd53c, Hi: 0xd53c, Stride: 0x1}, + unicode.Range16{Lo: 0xd558, Hi: 0xd558, Stride: 0x1}, + unicode.Range16{Lo: 0xd574, Hi: 0xd574, Stride: 0x1}, + unicode.Range16{Lo: 0xd590, Hi: 0xd590, Stride: 0x1}, + unicode.Range16{Lo: 0xd5ac, Hi: 0xd5ac, Stride: 0x1}, + unicode.Range16{Lo: 0xd5c8, Hi: 0xd5c8, Stride: 0x1}, + unicode.Range16{Lo: 0xd5e4, Hi: 0xd5e4, Stride: 0x1}, + unicode.Range16{Lo: 0xd600, Hi: 0xd600, Stride: 0x1}, + unicode.Range16{Lo: 0xd61c, Hi: 0xd61c, Stride: 0x1}, + unicode.Range16{Lo: 0xd638, Hi: 0xd638, Stride: 0x1}, + unicode.Range16{Lo: 0xd654, Hi: 0xd654, Stride: 0x1}, + unicode.Range16{Lo: 0xd670, Hi: 0xd670, Stride: 0x1}, + unicode.Range16{Lo: 0xd68c, Hi: 0xd68c, Stride: 0x1}, + unicode.Range16{Lo: 0xd6a8, Hi: 0xd6a8, Stride: 0x1}, + unicode.Range16{Lo: 0xd6c4, Hi: 0xd6c4, Stride: 0x1}, + unicode.Range16{Lo: 0xd6e0, Hi: 0xd6e0, Stride: 0x1}, + unicode.Range16{Lo: 0xd6fc, Hi: 0xd6fc, Stride: 0x1}, + unicode.Range16{Lo: 0xd718, Hi: 0xd718, Stride: 0x1}, + unicode.Range16{Lo: 0xd734, Hi: 0xd734, Stride: 0x1}, + unicode.Range16{Lo: 0xd750, Hi: 0xd750, Stride: 0x1}, + unicode.Range16{Lo: 0xd76c, Hi: 0xd76c, Stride: 0x1}, + unicode.Range16{Lo: 0xd788, Hi: 0xd788, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeLVT = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xac01, Hi: 0xac1b, Stride: 0x1}, + unicode.Range16{Lo: 0xac1d, Hi: 0xac37, Stride: 0x1}, + unicode.Range16{Lo: 0xac39, Hi: 0xac53, Stride: 0x1}, + unicode.Range16{Lo: 0xac55, Hi: 0xac6f, Stride: 0x1}, + unicode.Range16{Lo: 0xac71, Hi: 0xac8b, Stride: 0x1}, + unicode.Range16{Lo: 0xac8d, Hi: 0xaca7, Stride: 0x1}, + unicode.Range16{Lo: 0xaca9, Hi: 0xacc3, Stride: 0x1}, + unicode.Range16{Lo: 0xacc5, Hi: 0xacdf, Stride: 0x1}, + unicode.Range16{Lo: 0xace1, Hi: 0xacfb, Stride: 0x1}, + unicode.Range16{Lo: 0xacfd, Hi: 0xad17, Stride: 0x1}, + unicode.Range16{Lo: 0xad19, Hi: 0xad33, Stride: 0x1}, + unicode.Range16{Lo: 0xad35, Hi: 0xad4f, Stride: 0x1}, + unicode.Range16{Lo: 0xad51, Hi: 0xad6b, Stride: 0x1}, + unicode.Range16{Lo: 0xad6d, Hi: 0xad87, Stride: 0x1}, + unicode.Range16{Lo: 0xad89, Hi: 0xada3, Stride: 0x1}, + unicode.Range16{Lo: 0xada5, Hi: 0xadbf, Stride: 0x1}, + unicode.Range16{Lo: 0xadc1, Hi: 0xaddb, Stride: 0x1}, + unicode.Range16{Lo: 0xaddd, Hi: 0xadf7, Stride: 0x1}, + unicode.Range16{Lo: 0xadf9, Hi: 0xae13, Stride: 0x1}, + unicode.Range16{Lo: 0xae15, Hi: 0xae2f, Stride: 0x1}, + unicode.Range16{Lo: 0xae31, Hi: 0xae4b, Stride: 0x1}, + unicode.Range16{Lo: 0xae4d, Hi: 0xae67, Stride: 0x1}, + unicode.Range16{Lo: 0xae69, Hi: 0xae83, Stride: 0x1}, + unicode.Range16{Lo: 0xae85, Hi: 0xae9f, Stride: 0x1}, + unicode.Range16{Lo: 0xaea1, Hi: 0xaebb, Stride: 0x1}, + unicode.Range16{Lo: 0xaebd, Hi: 0xaed7, Stride: 0x1}, + unicode.Range16{Lo: 0xaed9, Hi: 0xaef3, Stride: 0x1}, + unicode.Range16{Lo: 0xaef5, Hi: 0xaf0f, Stride: 0x1}, + unicode.Range16{Lo: 0xaf11, Hi: 0xaf2b, Stride: 0x1}, + unicode.Range16{Lo: 0xaf2d, Hi: 0xaf47, Stride: 0x1}, + unicode.Range16{Lo: 0xaf49, Hi: 0xaf63, Stride: 0x1}, + unicode.Range16{Lo: 0xaf65, Hi: 0xaf7f, Stride: 0x1}, + unicode.Range16{Lo: 0xaf81, Hi: 0xaf9b, Stride: 0x1}, + unicode.Range16{Lo: 0xaf9d, Hi: 0xafb7, Stride: 0x1}, + unicode.Range16{Lo: 0xafb9, Hi: 0xafd3, Stride: 0x1}, + unicode.Range16{Lo: 0xafd5, Hi: 0xafef, Stride: 0x1}, + unicode.Range16{Lo: 0xaff1, Hi: 0xb00b, Stride: 0x1}, + unicode.Range16{Lo: 0xb00d, Hi: 0xb027, Stride: 0x1}, + unicode.Range16{Lo: 0xb029, Hi: 0xb043, Stride: 0x1}, + unicode.Range16{Lo: 0xb045, Hi: 0xb05f, Stride: 0x1}, + unicode.Range16{Lo: 0xb061, Hi: 0xb07b, Stride: 0x1}, + unicode.Range16{Lo: 0xb07d, Hi: 0xb097, Stride: 0x1}, + unicode.Range16{Lo: 0xb099, Hi: 0xb0b3, Stride: 0x1}, + unicode.Range16{Lo: 0xb0b5, Hi: 0xb0cf, Stride: 0x1}, + unicode.Range16{Lo: 0xb0d1, Hi: 0xb0eb, Stride: 0x1}, + unicode.Range16{Lo: 0xb0ed, Hi: 0xb107, Stride: 0x1}, + unicode.Range16{Lo: 0xb109, Hi: 0xb123, Stride: 0x1}, + unicode.Range16{Lo: 0xb125, Hi: 0xb13f, Stride: 0x1}, + unicode.Range16{Lo: 0xb141, Hi: 0xb15b, Stride: 0x1}, + unicode.Range16{Lo: 0xb15d, Hi: 0xb177, Stride: 0x1}, + unicode.Range16{Lo: 0xb179, Hi: 0xb193, Stride: 0x1}, + unicode.Range16{Lo: 0xb195, Hi: 0xb1af, Stride: 0x1}, + unicode.Range16{Lo: 0xb1b1, Hi: 0xb1cb, Stride: 0x1}, + unicode.Range16{Lo: 0xb1cd, Hi: 0xb1e7, Stride: 0x1}, + unicode.Range16{Lo: 0xb1e9, Hi: 0xb203, Stride: 0x1}, + unicode.Range16{Lo: 0xb205, Hi: 0xb21f, Stride: 0x1}, + unicode.Range16{Lo: 0xb221, Hi: 0xb23b, Stride: 0x1}, + unicode.Range16{Lo: 0xb23d, Hi: 0xb257, Stride: 0x1}, + unicode.Range16{Lo: 0xb259, Hi: 0xb273, Stride: 0x1}, + unicode.Range16{Lo: 0xb275, Hi: 0xb28f, Stride: 0x1}, + unicode.Range16{Lo: 0xb291, Hi: 0xb2ab, Stride: 0x1}, + unicode.Range16{Lo: 0xb2ad, Hi: 0xb2c7, Stride: 0x1}, + unicode.Range16{Lo: 0xb2c9, Hi: 0xb2e3, Stride: 0x1}, + unicode.Range16{Lo: 0xb2e5, Hi: 0xb2ff, Stride: 0x1}, + unicode.Range16{Lo: 0xb301, Hi: 0xb31b, Stride: 0x1}, + unicode.Range16{Lo: 0xb31d, Hi: 0xb337, Stride: 0x1}, + unicode.Range16{Lo: 0xb339, Hi: 0xb353, Stride: 0x1}, + unicode.Range16{Lo: 0xb355, Hi: 0xb36f, Stride: 0x1}, + unicode.Range16{Lo: 0xb371, Hi: 0xb38b, Stride: 0x1}, + unicode.Range16{Lo: 0xb38d, Hi: 0xb3a7, Stride: 0x1}, + unicode.Range16{Lo: 0xb3a9, Hi: 0xb3c3, Stride: 0x1}, + unicode.Range16{Lo: 0xb3c5, Hi: 0xb3df, Stride: 0x1}, + unicode.Range16{Lo: 0xb3e1, Hi: 0xb3fb, Stride: 0x1}, + unicode.Range16{Lo: 0xb3fd, Hi: 0xb417, Stride: 0x1}, + unicode.Range16{Lo: 0xb419, Hi: 0xb433, Stride: 0x1}, + unicode.Range16{Lo: 0xb435, Hi: 0xb44f, Stride: 0x1}, + unicode.Range16{Lo: 0xb451, Hi: 0xb46b, Stride: 0x1}, + unicode.Range16{Lo: 0xb46d, Hi: 0xb487, Stride: 0x1}, + unicode.Range16{Lo: 0xb489, Hi: 0xb4a3, Stride: 0x1}, + unicode.Range16{Lo: 0xb4a5, Hi: 0xb4bf, Stride: 0x1}, + unicode.Range16{Lo: 0xb4c1, Hi: 0xb4db, Stride: 0x1}, + unicode.Range16{Lo: 0xb4dd, Hi: 0xb4f7, Stride: 0x1}, + unicode.Range16{Lo: 0xb4f9, Hi: 0xb513, Stride: 0x1}, + unicode.Range16{Lo: 0xb515, Hi: 0xb52f, Stride: 0x1}, + unicode.Range16{Lo: 0xb531, Hi: 0xb54b, Stride: 0x1}, + unicode.Range16{Lo: 0xb54d, Hi: 0xb567, Stride: 0x1}, + unicode.Range16{Lo: 0xb569, Hi: 0xb583, Stride: 0x1}, + unicode.Range16{Lo: 0xb585, Hi: 0xb59f, Stride: 0x1}, + unicode.Range16{Lo: 0xb5a1, Hi: 0xb5bb, Stride: 0x1}, + unicode.Range16{Lo: 0xb5bd, Hi: 0xb5d7, Stride: 0x1}, + unicode.Range16{Lo: 0xb5d9, Hi: 0xb5f3, Stride: 0x1}, + unicode.Range16{Lo: 0xb5f5, Hi: 0xb60f, Stride: 0x1}, + unicode.Range16{Lo: 0xb611, Hi: 0xb62b, Stride: 0x1}, + unicode.Range16{Lo: 0xb62d, Hi: 0xb647, Stride: 0x1}, + unicode.Range16{Lo: 0xb649, Hi: 0xb663, Stride: 0x1}, + unicode.Range16{Lo: 0xb665, Hi: 0xb67f, Stride: 0x1}, + unicode.Range16{Lo: 0xb681, Hi: 0xb69b, Stride: 0x1}, + unicode.Range16{Lo: 0xb69d, Hi: 0xb6b7, Stride: 0x1}, + unicode.Range16{Lo: 0xb6b9, Hi: 0xb6d3, Stride: 0x1}, + unicode.Range16{Lo: 0xb6d5, Hi: 0xb6ef, Stride: 0x1}, + unicode.Range16{Lo: 0xb6f1, Hi: 0xb70b, Stride: 0x1}, + unicode.Range16{Lo: 0xb70d, Hi: 0xb727, Stride: 0x1}, + unicode.Range16{Lo: 0xb729, Hi: 0xb743, Stride: 0x1}, + unicode.Range16{Lo: 0xb745, Hi: 0xb75f, Stride: 0x1}, + unicode.Range16{Lo: 0xb761, Hi: 0xb77b, Stride: 0x1}, + unicode.Range16{Lo: 0xb77d, Hi: 0xb797, Stride: 0x1}, + unicode.Range16{Lo: 0xb799, Hi: 0xb7b3, Stride: 0x1}, + unicode.Range16{Lo: 0xb7b5, Hi: 0xb7cf, Stride: 0x1}, + unicode.Range16{Lo: 0xb7d1, Hi: 0xb7eb, Stride: 0x1}, + unicode.Range16{Lo: 0xb7ed, Hi: 0xb807, Stride: 0x1}, + unicode.Range16{Lo: 0xb809, Hi: 0xb823, Stride: 0x1}, + unicode.Range16{Lo: 0xb825, Hi: 0xb83f, Stride: 0x1}, + unicode.Range16{Lo: 0xb841, Hi: 0xb85b, Stride: 0x1}, + unicode.Range16{Lo: 0xb85d, Hi: 0xb877, Stride: 0x1}, + unicode.Range16{Lo: 0xb879, Hi: 0xb893, Stride: 0x1}, + unicode.Range16{Lo: 0xb895, Hi: 0xb8af, Stride: 0x1}, + unicode.Range16{Lo: 0xb8b1, Hi: 0xb8cb, Stride: 0x1}, + unicode.Range16{Lo: 0xb8cd, Hi: 0xb8e7, Stride: 0x1}, + unicode.Range16{Lo: 0xb8e9, Hi: 0xb903, Stride: 0x1}, + unicode.Range16{Lo: 0xb905, Hi: 0xb91f, Stride: 0x1}, + unicode.Range16{Lo: 0xb921, Hi: 0xb93b, Stride: 0x1}, + unicode.Range16{Lo: 0xb93d, Hi: 0xb957, Stride: 0x1}, + unicode.Range16{Lo: 0xb959, Hi: 0xb973, Stride: 0x1}, + unicode.Range16{Lo: 0xb975, Hi: 0xb98f, Stride: 0x1}, + unicode.Range16{Lo: 0xb991, Hi: 0xb9ab, Stride: 0x1}, + unicode.Range16{Lo: 0xb9ad, Hi: 0xb9c7, Stride: 0x1}, + unicode.Range16{Lo: 0xb9c9, Hi: 0xb9e3, Stride: 0x1}, + unicode.Range16{Lo: 0xb9e5, Hi: 0xb9ff, Stride: 0x1}, + unicode.Range16{Lo: 0xba01, Hi: 0xba1b, Stride: 0x1}, + unicode.Range16{Lo: 0xba1d, Hi: 0xba37, Stride: 0x1}, + unicode.Range16{Lo: 0xba39, Hi: 0xba53, Stride: 0x1}, + unicode.Range16{Lo: 0xba55, Hi: 0xba6f, Stride: 0x1}, + unicode.Range16{Lo: 0xba71, Hi: 0xba8b, Stride: 0x1}, + unicode.Range16{Lo: 0xba8d, Hi: 0xbaa7, Stride: 0x1}, + unicode.Range16{Lo: 0xbaa9, Hi: 0xbac3, Stride: 0x1}, + unicode.Range16{Lo: 0xbac5, Hi: 0xbadf, Stride: 0x1}, + unicode.Range16{Lo: 0xbae1, Hi: 0xbafb, Stride: 0x1}, + unicode.Range16{Lo: 0xbafd, Hi: 0xbb17, Stride: 0x1}, + unicode.Range16{Lo: 0xbb19, Hi: 0xbb33, Stride: 0x1}, + unicode.Range16{Lo: 0xbb35, Hi: 0xbb4f, Stride: 0x1}, + unicode.Range16{Lo: 0xbb51, Hi: 0xbb6b, Stride: 0x1}, + unicode.Range16{Lo: 0xbb6d, Hi: 0xbb87, Stride: 0x1}, + unicode.Range16{Lo: 0xbb89, Hi: 0xbba3, Stride: 0x1}, + unicode.Range16{Lo: 0xbba5, Hi: 0xbbbf, Stride: 0x1}, + unicode.Range16{Lo: 0xbbc1, Hi: 0xbbdb, Stride: 0x1}, + unicode.Range16{Lo: 0xbbdd, Hi: 0xbbf7, Stride: 0x1}, + unicode.Range16{Lo: 0xbbf9, Hi: 0xbc13, Stride: 0x1}, + unicode.Range16{Lo: 0xbc15, Hi: 0xbc2f, Stride: 0x1}, + unicode.Range16{Lo: 0xbc31, Hi: 0xbc4b, Stride: 0x1}, + unicode.Range16{Lo: 0xbc4d, Hi: 0xbc67, Stride: 0x1}, + unicode.Range16{Lo: 0xbc69, Hi: 0xbc83, Stride: 0x1}, + unicode.Range16{Lo: 0xbc85, Hi: 0xbc9f, Stride: 0x1}, + unicode.Range16{Lo: 0xbca1, Hi: 0xbcbb, Stride: 0x1}, + unicode.Range16{Lo: 0xbcbd, Hi: 0xbcd7, Stride: 0x1}, + unicode.Range16{Lo: 0xbcd9, Hi: 0xbcf3, Stride: 0x1}, + unicode.Range16{Lo: 0xbcf5, Hi: 0xbd0f, Stride: 0x1}, + unicode.Range16{Lo: 0xbd11, Hi: 0xbd2b, Stride: 0x1}, + unicode.Range16{Lo: 0xbd2d, Hi: 0xbd47, Stride: 0x1}, + unicode.Range16{Lo: 0xbd49, Hi: 0xbd63, Stride: 0x1}, + unicode.Range16{Lo: 0xbd65, Hi: 0xbd7f, Stride: 0x1}, + unicode.Range16{Lo: 0xbd81, Hi: 0xbd9b, Stride: 0x1}, + unicode.Range16{Lo: 0xbd9d, Hi: 0xbdb7, Stride: 0x1}, + unicode.Range16{Lo: 0xbdb9, Hi: 0xbdd3, Stride: 0x1}, + unicode.Range16{Lo: 0xbdd5, Hi: 0xbdef, Stride: 0x1}, + unicode.Range16{Lo: 0xbdf1, Hi: 0xbe0b, Stride: 0x1}, + unicode.Range16{Lo: 0xbe0d, Hi: 0xbe27, Stride: 0x1}, + unicode.Range16{Lo: 0xbe29, Hi: 0xbe43, Stride: 0x1}, + unicode.Range16{Lo: 0xbe45, Hi: 0xbe5f, Stride: 0x1}, + unicode.Range16{Lo: 0xbe61, Hi: 0xbe7b, Stride: 0x1}, + unicode.Range16{Lo: 0xbe7d, Hi: 0xbe97, Stride: 0x1}, + unicode.Range16{Lo: 0xbe99, Hi: 0xbeb3, Stride: 0x1}, + unicode.Range16{Lo: 0xbeb5, Hi: 0xbecf, Stride: 0x1}, + unicode.Range16{Lo: 0xbed1, Hi: 0xbeeb, Stride: 0x1}, + unicode.Range16{Lo: 0xbeed, Hi: 0xbf07, Stride: 0x1}, + unicode.Range16{Lo: 0xbf09, Hi: 0xbf23, Stride: 0x1}, + unicode.Range16{Lo: 0xbf25, Hi: 0xbf3f, Stride: 0x1}, + unicode.Range16{Lo: 0xbf41, Hi: 0xbf5b, Stride: 0x1}, + unicode.Range16{Lo: 0xbf5d, Hi: 0xbf77, Stride: 0x1}, + unicode.Range16{Lo: 0xbf79, Hi: 0xbf93, Stride: 0x1}, + unicode.Range16{Lo: 0xbf95, Hi: 0xbfaf, Stride: 0x1}, + unicode.Range16{Lo: 0xbfb1, Hi: 0xbfcb, Stride: 0x1}, + unicode.Range16{Lo: 0xbfcd, Hi: 0xbfe7, Stride: 0x1}, + unicode.Range16{Lo: 0xbfe9, Hi: 0xc003, Stride: 0x1}, + unicode.Range16{Lo: 0xc005, Hi: 0xc01f, Stride: 0x1}, + unicode.Range16{Lo: 0xc021, Hi: 0xc03b, Stride: 0x1}, + unicode.Range16{Lo: 0xc03d, Hi: 0xc057, Stride: 0x1}, + unicode.Range16{Lo: 0xc059, Hi: 0xc073, Stride: 0x1}, + unicode.Range16{Lo: 0xc075, Hi: 0xc08f, Stride: 0x1}, + unicode.Range16{Lo: 0xc091, Hi: 0xc0ab, Stride: 0x1}, + unicode.Range16{Lo: 0xc0ad, Hi: 0xc0c7, Stride: 0x1}, + unicode.Range16{Lo: 0xc0c9, Hi: 0xc0e3, Stride: 0x1}, + unicode.Range16{Lo: 0xc0e5, Hi: 0xc0ff, Stride: 0x1}, + unicode.Range16{Lo: 0xc101, Hi: 0xc11b, Stride: 0x1}, + unicode.Range16{Lo: 0xc11d, Hi: 0xc137, Stride: 0x1}, + unicode.Range16{Lo: 0xc139, Hi: 0xc153, Stride: 0x1}, + unicode.Range16{Lo: 0xc155, Hi: 0xc16f, Stride: 0x1}, + unicode.Range16{Lo: 0xc171, Hi: 0xc18b, Stride: 0x1}, + unicode.Range16{Lo: 0xc18d, Hi: 0xc1a7, Stride: 0x1}, + unicode.Range16{Lo: 0xc1a9, Hi: 0xc1c3, Stride: 0x1}, + unicode.Range16{Lo: 0xc1c5, Hi: 0xc1df, Stride: 0x1}, + unicode.Range16{Lo: 0xc1e1, Hi: 0xc1fb, Stride: 0x1}, + unicode.Range16{Lo: 0xc1fd, Hi: 0xc217, Stride: 0x1}, + unicode.Range16{Lo: 0xc219, Hi: 0xc233, Stride: 0x1}, + unicode.Range16{Lo: 0xc235, Hi: 0xc24f, Stride: 0x1}, + unicode.Range16{Lo: 0xc251, Hi: 0xc26b, Stride: 0x1}, + unicode.Range16{Lo: 0xc26d, Hi: 0xc287, Stride: 0x1}, + unicode.Range16{Lo: 0xc289, Hi: 0xc2a3, Stride: 0x1}, + unicode.Range16{Lo: 0xc2a5, Hi: 0xc2bf, Stride: 0x1}, + unicode.Range16{Lo: 0xc2c1, Hi: 0xc2db, Stride: 0x1}, + unicode.Range16{Lo: 0xc2dd, Hi: 0xc2f7, Stride: 0x1}, + unicode.Range16{Lo: 0xc2f9, Hi: 0xc313, Stride: 0x1}, + unicode.Range16{Lo: 0xc315, Hi: 0xc32f, Stride: 0x1}, + unicode.Range16{Lo: 0xc331, Hi: 0xc34b, Stride: 0x1}, + unicode.Range16{Lo: 0xc34d, Hi: 0xc367, Stride: 0x1}, + unicode.Range16{Lo: 0xc369, Hi: 0xc383, Stride: 0x1}, + unicode.Range16{Lo: 0xc385, Hi: 0xc39f, Stride: 0x1}, + unicode.Range16{Lo: 0xc3a1, Hi: 0xc3bb, Stride: 0x1}, + unicode.Range16{Lo: 0xc3bd, Hi: 0xc3d7, Stride: 0x1}, + unicode.Range16{Lo: 0xc3d9, Hi: 0xc3f3, Stride: 0x1}, + unicode.Range16{Lo: 0xc3f5, Hi: 0xc40f, Stride: 0x1}, + unicode.Range16{Lo: 0xc411, Hi: 0xc42b, Stride: 0x1}, + unicode.Range16{Lo: 0xc42d, Hi: 0xc447, Stride: 0x1}, + unicode.Range16{Lo: 0xc449, Hi: 0xc463, Stride: 0x1}, + unicode.Range16{Lo: 0xc465, Hi: 0xc47f, Stride: 0x1}, + unicode.Range16{Lo: 0xc481, Hi: 0xc49b, Stride: 0x1}, + unicode.Range16{Lo: 0xc49d, Hi: 0xc4b7, Stride: 0x1}, + unicode.Range16{Lo: 0xc4b9, Hi: 0xc4d3, Stride: 0x1}, + unicode.Range16{Lo: 0xc4d5, Hi: 0xc4ef, Stride: 0x1}, + unicode.Range16{Lo: 0xc4f1, Hi: 0xc50b, Stride: 0x1}, + unicode.Range16{Lo: 0xc50d, Hi: 0xc527, Stride: 0x1}, + unicode.Range16{Lo: 0xc529, Hi: 0xc543, Stride: 0x1}, + unicode.Range16{Lo: 0xc545, Hi: 0xc55f, Stride: 0x1}, + unicode.Range16{Lo: 0xc561, Hi: 0xc57b, Stride: 0x1}, + unicode.Range16{Lo: 0xc57d, Hi: 0xc597, Stride: 0x1}, + unicode.Range16{Lo: 0xc599, Hi: 0xc5b3, Stride: 0x1}, + unicode.Range16{Lo: 0xc5b5, Hi: 0xc5cf, Stride: 0x1}, + unicode.Range16{Lo: 0xc5d1, Hi: 0xc5eb, Stride: 0x1}, + unicode.Range16{Lo: 0xc5ed, Hi: 0xc607, Stride: 0x1}, + unicode.Range16{Lo: 0xc609, Hi: 0xc623, Stride: 0x1}, + unicode.Range16{Lo: 0xc625, Hi: 0xc63f, Stride: 0x1}, + unicode.Range16{Lo: 0xc641, Hi: 0xc65b, Stride: 0x1}, + unicode.Range16{Lo: 0xc65d, Hi: 0xc677, Stride: 0x1}, + unicode.Range16{Lo: 0xc679, Hi: 0xc693, Stride: 0x1}, + unicode.Range16{Lo: 0xc695, Hi: 0xc6af, Stride: 0x1}, + unicode.Range16{Lo: 0xc6b1, Hi: 0xc6cb, Stride: 0x1}, + unicode.Range16{Lo: 0xc6cd, Hi: 0xc6e7, Stride: 0x1}, + unicode.Range16{Lo: 0xc6e9, Hi: 0xc703, Stride: 0x1}, + unicode.Range16{Lo: 0xc705, Hi: 0xc71f, Stride: 0x1}, + unicode.Range16{Lo: 0xc721, Hi: 0xc73b, Stride: 0x1}, + unicode.Range16{Lo: 0xc73d, Hi: 0xc757, Stride: 0x1}, + unicode.Range16{Lo: 0xc759, Hi: 0xc773, Stride: 0x1}, + unicode.Range16{Lo: 0xc775, Hi: 0xc78f, Stride: 0x1}, + unicode.Range16{Lo: 0xc791, Hi: 0xc7ab, Stride: 0x1}, + unicode.Range16{Lo: 0xc7ad, Hi: 0xc7c7, Stride: 0x1}, + unicode.Range16{Lo: 0xc7c9, Hi: 0xc7e3, Stride: 0x1}, + unicode.Range16{Lo: 0xc7e5, Hi: 0xc7ff, Stride: 0x1}, + unicode.Range16{Lo: 0xc801, Hi: 0xc81b, Stride: 0x1}, + unicode.Range16{Lo: 0xc81d, Hi: 0xc837, Stride: 0x1}, + unicode.Range16{Lo: 0xc839, Hi: 0xc853, Stride: 0x1}, + unicode.Range16{Lo: 0xc855, Hi: 0xc86f, Stride: 0x1}, + unicode.Range16{Lo: 0xc871, Hi: 0xc88b, Stride: 0x1}, + unicode.Range16{Lo: 0xc88d, Hi: 0xc8a7, Stride: 0x1}, + unicode.Range16{Lo: 0xc8a9, Hi: 0xc8c3, Stride: 0x1}, + unicode.Range16{Lo: 0xc8c5, Hi: 0xc8df, Stride: 0x1}, + unicode.Range16{Lo: 0xc8e1, Hi: 0xc8fb, Stride: 0x1}, + unicode.Range16{Lo: 0xc8fd, Hi: 0xc917, Stride: 0x1}, + unicode.Range16{Lo: 0xc919, Hi: 0xc933, Stride: 0x1}, + unicode.Range16{Lo: 0xc935, Hi: 0xc94f, Stride: 0x1}, + unicode.Range16{Lo: 0xc951, Hi: 0xc96b, Stride: 0x1}, + unicode.Range16{Lo: 0xc96d, Hi: 0xc987, Stride: 0x1}, + unicode.Range16{Lo: 0xc989, Hi: 0xc9a3, Stride: 0x1}, + unicode.Range16{Lo: 0xc9a5, Hi: 0xc9bf, Stride: 0x1}, + unicode.Range16{Lo: 0xc9c1, Hi: 0xc9db, Stride: 0x1}, + unicode.Range16{Lo: 0xc9dd, Hi: 0xc9f7, Stride: 0x1}, + unicode.Range16{Lo: 0xc9f9, Hi: 0xca13, Stride: 0x1}, + unicode.Range16{Lo: 0xca15, Hi: 0xca2f, Stride: 0x1}, + unicode.Range16{Lo: 0xca31, Hi: 0xca4b, Stride: 0x1}, + unicode.Range16{Lo: 0xca4d, Hi: 0xca67, Stride: 0x1}, + unicode.Range16{Lo: 0xca69, Hi: 0xca83, Stride: 0x1}, + unicode.Range16{Lo: 0xca85, Hi: 0xca9f, Stride: 0x1}, + unicode.Range16{Lo: 0xcaa1, Hi: 0xcabb, Stride: 0x1}, + unicode.Range16{Lo: 0xcabd, Hi: 0xcad7, Stride: 0x1}, + unicode.Range16{Lo: 0xcad9, Hi: 0xcaf3, Stride: 0x1}, + unicode.Range16{Lo: 0xcaf5, Hi: 0xcb0f, Stride: 0x1}, + unicode.Range16{Lo: 0xcb11, Hi: 0xcb2b, Stride: 0x1}, + unicode.Range16{Lo: 0xcb2d, Hi: 0xcb47, Stride: 0x1}, + unicode.Range16{Lo: 0xcb49, Hi: 0xcb63, Stride: 0x1}, + unicode.Range16{Lo: 0xcb65, Hi: 0xcb7f, Stride: 0x1}, + unicode.Range16{Lo: 0xcb81, Hi: 0xcb9b, Stride: 0x1}, + unicode.Range16{Lo: 0xcb9d, Hi: 0xcbb7, Stride: 0x1}, + unicode.Range16{Lo: 0xcbb9, Hi: 0xcbd3, Stride: 0x1}, + unicode.Range16{Lo: 0xcbd5, Hi: 0xcbef, Stride: 0x1}, + unicode.Range16{Lo: 0xcbf1, Hi: 0xcc0b, Stride: 0x1}, + unicode.Range16{Lo: 0xcc0d, Hi: 0xcc27, Stride: 0x1}, + unicode.Range16{Lo: 0xcc29, Hi: 0xcc43, Stride: 0x1}, + unicode.Range16{Lo: 0xcc45, Hi: 0xcc5f, Stride: 0x1}, + unicode.Range16{Lo: 0xcc61, Hi: 0xcc7b, Stride: 0x1}, + unicode.Range16{Lo: 0xcc7d, Hi: 0xcc97, Stride: 0x1}, + unicode.Range16{Lo: 0xcc99, Hi: 0xccb3, Stride: 0x1}, + unicode.Range16{Lo: 0xccb5, Hi: 0xcccf, Stride: 0x1}, + unicode.Range16{Lo: 0xccd1, Hi: 0xcceb, Stride: 0x1}, + unicode.Range16{Lo: 0xcced, Hi: 0xcd07, Stride: 0x1}, + unicode.Range16{Lo: 0xcd09, Hi: 0xcd23, Stride: 0x1}, + unicode.Range16{Lo: 0xcd25, Hi: 0xcd3f, Stride: 0x1}, + unicode.Range16{Lo: 0xcd41, Hi: 0xcd5b, Stride: 0x1}, + unicode.Range16{Lo: 0xcd5d, Hi: 0xcd77, Stride: 0x1}, + unicode.Range16{Lo: 0xcd79, Hi: 0xcd93, Stride: 0x1}, + unicode.Range16{Lo: 0xcd95, Hi: 0xcdaf, Stride: 0x1}, + unicode.Range16{Lo: 0xcdb1, Hi: 0xcdcb, Stride: 0x1}, + unicode.Range16{Lo: 0xcdcd, Hi: 0xcde7, Stride: 0x1}, + unicode.Range16{Lo: 0xcde9, Hi: 0xce03, Stride: 0x1}, + unicode.Range16{Lo: 0xce05, Hi: 0xce1f, Stride: 0x1}, + unicode.Range16{Lo: 0xce21, Hi: 0xce3b, Stride: 0x1}, + unicode.Range16{Lo: 0xce3d, Hi: 0xce57, Stride: 0x1}, + unicode.Range16{Lo: 0xce59, Hi: 0xce73, Stride: 0x1}, + unicode.Range16{Lo: 0xce75, Hi: 0xce8f, Stride: 0x1}, + unicode.Range16{Lo: 0xce91, Hi: 0xceab, Stride: 0x1}, + unicode.Range16{Lo: 0xcead, Hi: 0xcec7, Stride: 0x1}, + unicode.Range16{Lo: 0xcec9, Hi: 0xcee3, Stride: 0x1}, + unicode.Range16{Lo: 0xcee5, Hi: 0xceff, Stride: 0x1}, + unicode.Range16{Lo: 0xcf01, Hi: 0xcf1b, Stride: 0x1}, + unicode.Range16{Lo: 0xcf1d, Hi: 0xcf37, Stride: 0x1}, + unicode.Range16{Lo: 0xcf39, Hi: 0xcf53, Stride: 0x1}, + unicode.Range16{Lo: 0xcf55, Hi: 0xcf6f, Stride: 0x1}, + unicode.Range16{Lo: 0xcf71, Hi: 0xcf8b, Stride: 0x1}, + unicode.Range16{Lo: 0xcf8d, Hi: 0xcfa7, Stride: 0x1}, + unicode.Range16{Lo: 0xcfa9, Hi: 0xcfc3, Stride: 0x1}, + unicode.Range16{Lo: 0xcfc5, Hi: 0xcfdf, Stride: 0x1}, + unicode.Range16{Lo: 0xcfe1, Hi: 0xcffb, Stride: 0x1}, + unicode.Range16{Lo: 0xcffd, Hi: 0xd017, Stride: 0x1}, + unicode.Range16{Lo: 0xd019, Hi: 0xd033, Stride: 0x1}, + unicode.Range16{Lo: 0xd035, Hi: 0xd04f, Stride: 0x1}, + unicode.Range16{Lo: 0xd051, Hi: 0xd06b, Stride: 0x1}, + unicode.Range16{Lo: 0xd06d, Hi: 0xd087, Stride: 0x1}, + unicode.Range16{Lo: 0xd089, Hi: 0xd0a3, Stride: 0x1}, + unicode.Range16{Lo: 0xd0a5, Hi: 0xd0bf, Stride: 0x1}, + unicode.Range16{Lo: 0xd0c1, Hi: 0xd0db, Stride: 0x1}, + unicode.Range16{Lo: 0xd0dd, Hi: 0xd0f7, Stride: 0x1}, + unicode.Range16{Lo: 0xd0f9, Hi: 0xd113, Stride: 0x1}, + unicode.Range16{Lo: 0xd115, Hi: 0xd12f, Stride: 0x1}, + unicode.Range16{Lo: 0xd131, Hi: 0xd14b, Stride: 0x1}, + unicode.Range16{Lo: 0xd14d, Hi: 0xd167, Stride: 0x1}, + unicode.Range16{Lo: 0xd169, Hi: 0xd183, Stride: 0x1}, + unicode.Range16{Lo: 0xd185, Hi: 0xd19f, Stride: 0x1}, + unicode.Range16{Lo: 0xd1a1, Hi: 0xd1bb, Stride: 0x1}, + unicode.Range16{Lo: 0xd1bd, Hi: 0xd1d7, Stride: 0x1}, + unicode.Range16{Lo: 0xd1d9, Hi: 0xd1f3, Stride: 0x1}, + unicode.Range16{Lo: 0xd1f5, Hi: 0xd20f, Stride: 0x1}, + unicode.Range16{Lo: 0xd211, Hi: 0xd22b, Stride: 0x1}, + unicode.Range16{Lo: 0xd22d, Hi: 0xd247, Stride: 0x1}, + unicode.Range16{Lo: 0xd249, Hi: 0xd263, Stride: 0x1}, + unicode.Range16{Lo: 0xd265, Hi: 0xd27f, Stride: 0x1}, + unicode.Range16{Lo: 0xd281, Hi: 0xd29b, Stride: 0x1}, + unicode.Range16{Lo: 0xd29d, Hi: 0xd2b7, Stride: 0x1}, + unicode.Range16{Lo: 0xd2b9, Hi: 0xd2d3, Stride: 0x1}, + unicode.Range16{Lo: 0xd2d5, Hi: 0xd2ef, Stride: 0x1}, + unicode.Range16{Lo: 0xd2f1, Hi: 0xd30b, Stride: 0x1}, + unicode.Range16{Lo: 0xd30d, Hi: 0xd327, Stride: 0x1}, + unicode.Range16{Lo: 0xd329, Hi: 0xd343, Stride: 0x1}, + unicode.Range16{Lo: 0xd345, Hi: 0xd35f, Stride: 0x1}, + unicode.Range16{Lo: 0xd361, Hi: 0xd37b, Stride: 0x1}, + unicode.Range16{Lo: 0xd37d, Hi: 0xd397, Stride: 0x1}, + unicode.Range16{Lo: 0xd399, Hi: 0xd3b3, Stride: 0x1}, + unicode.Range16{Lo: 0xd3b5, Hi: 0xd3cf, Stride: 0x1}, + unicode.Range16{Lo: 0xd3d1, Hi: 0xd3eb, Stride: 0x1}, + unicode.Range16{Lo: 0xd3ed, Hi: 0xd407, Stride: 0x1}, + unicode.Range16{Lo: 0xd409, Hi: 0xd423, Stride: 0x1}, + unicode.Range16{Lo: 0xd425, Hi: 0xd43f, Stride: 0x1}, + unicode.Range16{Lo: 0xd441, Hi: 0xd45b, Stride: 0x1}, + unicode.Range16{Lo: 0xd45d, Hi: 0xd477, Stride: 0x1}, + unicode.Range16{Lo: 0xd479, Hi: 0xd493, Stride: 0x1}, + unicode.Range16{Lo: 0xd495, Hi: 0xd4af, Stride: 0x1}, + unicode.Range16{Lo: 0xd4b1, Hi: 0xd4cb, Stride: 0x1}, + unicode.Range16{Lo: 0xd4cd, Hi: 0xd4e7, Stride: 0x1}, + unicode.Range16{Lo: 0xd4e9, Hi: 0xd503, Stride: 0x1}, + unicode.Range16{Lo: 0xd505, Hi: 0xd51f, Stride: 0x1}, + unicode.Range16{Lo: 0xd521, Hi: 0xd53b, Stride: 0x1}, + unicode.Range16{Lo: 0xd53d, Hi: 0xd557, Stride: 0x1}, + unicode.Range16{Lo: 0xd559, Hi: 0xd573, Stride: 0x1}, + unicode.Range16{Lo: 0xd575, Hi: 0xd58f, Stride: 0x1}, + unicode.Range16{Lo: 0xd591, Hi: 0xd5ab, Stride: 0x1}, + unicode.Range16{Lo: 0xd5ad, Hi: 0xd5c7, Stride: 0x1}, + unicode.Range16{Lo: 0xd5c9, Hi: 0xd5e3, Stride: 0x1}, + unicode.Range16{Lo: 0xd5e5, Hi: 0xd5ff, Stride: 0x1}, + unicode.Range16{Lo: 0xd601, Hi: 0xd61b, Stride: 0x1}, + unicode.Range16{Lo: 0xd61d, Hi: 0xd637, Stride: 0x1}, + unicode.Range16{Lo: 0xd639, Hi: 0xd653, Stride: 0x1}, + unicode.Range16{Lo: 0xd655, Hi: 0xd66f, Stride: 0x1}, + unicode.Range16{Lo: 0xd671, Hi: 0xd68b, Stride: 0x1}, + unicode.Range16{Lo: 0xd68d, Hi: 0xd6a7, Stride: 0x1}, + unicode.Range16{Lo: 0xd6a9, Hi: 0xd6c3, Stride: 0x1}, + unicode.Range16{Lo: 0xd6c5, Hi: 0xd6df, Stride: 0x1}, + unicode.Range16{Lo: 0xd6e1, Hi: 0xd6fb, Stride: 0x1}, + unicode.Range16{Lo: 0xd6fd, Hi: 0xd717, Stride: 0x1}, + unicode.Range16{Lo: 0xd719, Hi: 0xd733, Stride: 0x1}, + unicode.Range16{Lo: 0xd735, Hi: 0xd74f, Stride: 0x1}, + unicode.Range16{Lo: 0xd751, Hi: 0xd76b, Stride: 0x1}, + unicode.Range16{Lo: 0xd76d, Hi: 0xd787, Stride: 0x1}, + unicode.Range16{Lo: 0xd789, Hi: 0xd7a3, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemePrepend = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x600, Hi: 0x605, Stride: 0x1}, + unicode.Range16{Lo: 0x6dd, Hi: 0x6dd, Stride: 0x1}, + unicode.Range16{Lo: 0x70f, Hi: 0x70f, Stride: 0x1}, + unicode.Range16{Lo: 0x8e2, Hi: 0x8e2, Stride: 0x1}, + unicode.Range16{Lo: 0xd4e, Hi: 0xd4e, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x110bd, Hi: 0x110bd, Stride: 0x1}, + unicode.Range32{Lo: 0x111c2, Hi: 0x111c3, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeRegional_Indicator = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f1e6, Hi: 0x1f1ff, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeSpacingMark = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x903, Hi: 0x903, Stride: 0x1}, + unicode.Range16{Lo: 0x93b, Hi: 0x93b, Stride: 0x1}, + unicode.Range16{Lo: 0x93e, Hi: 0x940, Stride: 0x1}, + unicode.Range16{Lo: 0x949, Hi: 0x94c, Stride: 0x1}, + unicode.Range16{Lo: 0x94e, Hi: 0x94f, Stride: 0x1}, + unicode.Range16{Lo: 0x982, Hi: 0x983, Stride: 0x1}, + unicode.Range16{Lo: 0x9bf, Hi: 0x9c0, Stride: 0x1}, + unicode.Range16{Lo: 0x9c7, Hi: 0x9c8, Stride: 0x1}, + unicode.Range16{Lo: 0x9cb, Hi: 0x9cc, Stride: 0x1}, + unicode.Range16{Lo: 0xa03, Hi: 0xa03, Stride: 0x1}, + unicode.Range16{Lo: 0xa3e, Hi: 0xa40, Stride: 0x1}, + unicode.Range16{Lo: 0xa83, Hi: 0xa83, Stride: 0x1}, + unicode.Range16{Lo: 0xabe, Hi: 0xac0, Stride: 0x1}, + unicode.Range16{Lo: 0xac9, Hi: 0xac9, Stride: 0x1}, + unicode.Range16{Lo: 0xacb, Hi: 0xacc, Stride: 0x1}, + unicode.Range16{Lo: 0xb02, Hi: 0xb03, Stride: 0x1}, + unicode.Range16{Lo: 0xb40, Hi: 0xb40, Stride: 0x1}, + unicode.Range16{Lo: 0xb47, Hi: 0xb48, Stride: 0x1}, + unicode.Range16{Lo: 0xb4b, Hi: 0xb4c, Stride: 0x1}, + unicode.Range16{Lo: 0xbbf, Hi: 0xbbf, Stride: 0x1}, + unicode.Range16{Lo: 0xbc1, Hi: 0xbc2, Stride: 0x1}, + unicode.Range16{Lo: 0xbc6, Hi: 0xbc8, Stride: 0x1}, + unicode.Range16{Lo: 0xbca, Hi: 0xbcc, Stride: 0x1}, + unicode.Range16{Lo: 0xc01, Hi: 0xc03, Stride: 0x1}, + unicode.Range16{Lo: 0xc41, Hi: 0xc44, Stride: 0x1}, + unicode.Range16{Lo: 0xc82, Hi: 0xc83, Stride: 0x1}, + unicode.Range16{Lo: 0xcbe, Hi: 0xcbe, Stride: 0x1}, + unicode.Range16{Lo: 0xcc0, Hi: 0xcc1, Stride: 0x1}, + unicode.Range16{Lo: 0xcc3, Hi: 0xcc4, Stride: 0x1}, + unicode.Range16{Lo: 0xcc7, Hi: 0xcc8, Stride: 0x1}, + unicode.Range16{Lo: 0xcca, Hi: 0xccb, Stride: 0x1}, + unicode.Range16{Lo: 0xd02, Hi: 0xd03, Stride: 0x1}, + unicode.Range16{Lo: 0xd3f, Hi: 0xd40, Stride: 0x1}, + unicode.Range16{Lo: 0xd46, Hi: 0xd48, Stride: 0x1}, + unicode.Range16{Lo: 0xd4a, Hi: 0xd4c, Stride: 0x1}, + unicode.Range16{Lo: 0xd82, Hi: 0xd83, Stride: 0x1}, + unicode.Range16{Lo: 0xdd0, Hi: 0xdd1, Stride: 0x1}, + unicode.Range16{Lo: 0xdd8, Hi: 0xdde, Stride: 0x1}, + unicode.Range16{Lo: 0xdf2, Hi: 0xdf3, Stride: 0x1}, + unicode.Range16{Lo: 0xe33, Hi: 0xe33, Stride: 0x1}, + unicode.Range16{Lo: 0xeb3, Hi: 0xeb3, Stride: 0x1}, + unicode.Range16{Lo: 0xf3e, Hi: 0xf3f, Stride: 0x1}, + unicode.Range16{Lo: 0xf7f, Hi: 0xf7f, Stride: 0x1}, + unicode.Range16{Lo: 0x1031, Hi: 0x1031, Stride: 0x1}, + unicode.Range16{Lo: 0x103b, Hi: 0x103c, Stride: 0x1}, + unicode.Range16{Lo: 0x1056, Hi: 0x1057, Stride: 0x1}, + unicode.Range16{Lo: 0x1084, Hi: 0x1084, Stride: 0x1}, + unicode.Range16{Lo: 0x17b6, Hi: 0x17b6, Stride: 0x1}, + unicode.Range16{Lo: 0x17be, Hi: 0x17c5, Stride: 0x1}, + unicode.Range16{Lo: 0x17c7, Hi: 0x17c8, Stride: 0x1}, + unicode.Range16{Lo: 0x1923, Hi: 0x1926, Stride: 0x1}, + unicode.Range16{Lo: 0x1929, Hi: 0x192b, Stride: 0x1}, + unicode.Range16{Lo: 0x1930, Hi: 0x1931, Stride: 0x1}, + unicode.Range16{Lo: 0x1933, Hi: 0x1938, Stride: 0x1}, + unicode.Range16{Lo: 0x1a19, Hi: 0x1a1a, Stride: 0x1}, + unicode.Range16{Lo: 0x1a55, Hi: 0x1a55, Stride: 0x1}, + unicode.Range16{Lo: 0x1a57, Hi: 0x1a57, Stride: 0x1}, + unicode.Range16{Lo: 0x1a6d, Hi: 0x1a72, Stride: 0x1}, + unicode.Range16{Lo: 0x1b04, Hi: 0x1b04, Stride: 0x1}, + unicode.Range16{Lo: 0x1b35, Hi: 0x1b35, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3b, Hi: 0x1b3b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3d, Hi: 0x1b41, Stride: 0x1}, + unicode.Range16{Lo: 0x1b43, Hi: 0x1b44, Stride: 0x1}, + unicode.Range16{Lo: 0x1b82, Hi: 0x1b82, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba1, Hi: 0x1ba1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba6, Hi: 0x1ba7, Stride: 0x1}, + unicode.Range16{Lo: 0x1baa, Hi: 0x1baa, Stride: 0x1}, + unicode.Range16{Lo: 0x1be7, Hi: 0x1be7, Stride: 0x1}, + unicode.Range16{Lo: 0x1bea, Hi: 0x1bec, Stride: 0x1}, + unicode.Range16{Lo: 0x1bee, Hi: 0x1bee, Stride: 0x1}, + unicode.Range16{Lo: 0x1bf2, Hi: 0x1bf3, Stride: 0x1}, + unicode.Range16{Lo: 0x1c24, Hi: 0x1c2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1c34, Hi: 0x1c35, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce1, Hi: 0x1ce1, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf2, Hi: 0x1cf3, Stride: 0x1}, + unicode.Range16{Lo: 0xa823, Hi: 0xa824, Stride: 0x1}, + unicode.Range16{Lo: 0xa827, Hi: 0xa827, Stride: 0x1}, + unicode.Range16{Lo: 0xa880, Hi: 0xa881, Stride: 0x1}, + unicode.Range16{Lo: 0xa8b4, Hi: 0xa8c3, Stride: 0x1}, + unicode.Range16{Lo: 0xa952, Hi: 0xa953, Stride: 0x1}, + unicode.Range16{Lo: 0xa983, Hi: 0xa983, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b4, Hi: 0xa9b5, Stride: 0x1}, + unicode.Range16{Lo: 0xa9ba, Hi: 0xa9bb, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bd, Hi: 0xa9c0, Stride: 0x1}, + unicode.Range16{Lo: 0xaa2f, Hi: 0xaa30, Stride: 0x1}, + unicode.Range16{Lo: 0xaa33, Hi: 0xaa34, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4d, Hi: 0xaa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xaaeb, Hi: 0xaaeb, Stride: 0x1}, + unicode.Range16{Lo: 0xaaee, Hi: 0xaaef, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf5, Hi: 0xaaf5, Stride: 0x1}, + unicode.Range16{Lo: 0xabe3, Hi: 0xabe4, Stride: 0x1}, + unicode.Range16{Lo: 0xabe6, Hi: 0xabe7, Stride: 0x1}, + unicode.Range16{Lo: 0xabe9, Hi: 0xabea, Stride: 0x1}, + unicode.Range16{Lo: 0xabec, Hi: 0xabec, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x11000, Hi: 0x11000, Stride: 0x1}, + unicode.Range32{Lo: 0x11002, Hi: 0x11002, Stride: 0x1}, + unicode.Range32{Lo: 0x11082, Hi: 0x11082, Stride: 0x1}, + unicode.Range32{Lo: 0x110b0, Hi: 0x110b2, Stride: 0x1}, + unicode.Range32{Lo: 0x110b7, Hi: 0x110b8, Stride: 0x1}, + unicode.Range32{Lo: 0x1112c, Hi: 0x1112c, Stride: 0x1}, + unicode.Range32{Lo: 0x11182, Hi: 0x11182, Stride: 0x1}, + unicode.Range32{Lo: 0x111b3, Hi: 0x111b5, Stride: 0x1}, + unicode.Range32{Lo: 0x111bf, Hi: 0x111c0, Stride: 0x1}, + unicode.Range32{Lo: 0x1122c, Hi: 0x1122e, Stride: 0x1}, + unicode.Range32{Lo: 0x11232, Hi: 0x11233, Stride: 0x1}, + unicode.Range32{Lo: 0x11235, Hi: 0x11235, Stride: 0x1}, + unicode.Range32{Lo: 0x112e0, Hi: 0x112e2, Stride: 0x1}, + unicode.Range32{Lo: 0x11302, Hi: 0x11303, Stride: 0x1}, + unicode.Range32{Lo: 0x1133f, Hi: 0x1133f, Stride: 0x1}, + unicode.Range32{Lo: 0x11341, Hi: 0x11344, Stride: 0x1}, + unicode.Range32{Lo: 0x11347, Hi: 0x11348, Stride: 0x1}, + unicode.Range32{Lo: 0x1134b, Hi: 0x1134d, Stride: 0x1}, + unicode.Range32{Lo: 0x11362, Hi: 0x11363, Stride: 0x1}, + unicode.Range32{Lo: 0x11435, Hi: 0x11437, Stride: 0x1}, + unicode.Range32{Lo: 0x11440, Hi: 0x11441, Stride: 0x1}, + unicode.Range32{Lo: 0x11445, Hi: 0x11445, Stride: 0x1}, + unicode.Range32{Lo: 0x114b1, Hi: 0x114b2, Stride: 0x1}, + unicode.Range32{Lo: 0x114b9, Hi: 0x114b9, Stride: 0x1}, + unicode.Range32{Lo: 0x114bb, Hi: 0x114bc, Stride: 0x1}, + unicode.Range32{Lo: 0x114be, Hi: 0x114be, Stride: 0x1}, + unicode.Range32{Lo: 0x114c1, Hi: 0x114c1, Stride: 0x1}, + unicode.Range32{Lo: 0x115b0, Hi: 0x115b1, Stride: 0x1}, + unicode.Range32{Lo: 0x115b8, Hi: 0x115bb, Stride: 0x1}, + unicode.Range32{Lo: 0x115be, Hi: 0x115be, Stride: 0x1}, + unicode.Range32{Lo: 0x11630, Hi: 0x11632, Stride: 0x1}, + unicode.Range32{Lo: 0x1163b, Hi: 0x1163c, Stride: 0x1}, + unicode.Range32{Lo: 0x1163e, Hi: 0x1163e, Stride: 0x1}, + unicode.Range32{Lo: 0x116ac, Hi: 0x116ac, Stride: 0x1}, + unicode.Range32{Lo: 0x116ae, Hi: 0x116af, Stride: 0x1}, + unicode.Range32{Lo: 0x116b6, Hi: 0x116b6, Stride: 0x1}, + unicode.Range32{Lo: 0x11720, Hi: 0x11721, Stride: 0x1}, + unicode.Range32{Lo: 0x11726, Hi: 0x11726, Stride: 0x1}, + unicode.Range32{Lo: 0x11c2f, Hi: 0x11c2f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3e, Hi: 0x11c3e, Stride: 0x1}, + unicode.Range32{Lo: 0x11ca9, Hi: 0x11ca9, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb1, Hi: 0x11cb1, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb4, Hi: 0x11cb4, Stride: 0x1}, + unicode.Range32{Lo: 0x16f51, Hi: 0x16f7e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d166, Hi: 0x1d166, Stride: 0x1}, + unicode.Range32{Lo: 0x1d16d, Hi: 0x1d16d, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeT = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x11a8, Hi: 0x11ff, Stride: 0x1}, + unicode.Range16{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeV = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x1160, Hi: 0x11a7, Stride: 0x1}, + unicode.Range16{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _GraphemeZWJ = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x200d, Hi: 0x200d, Stride: 0x1}, + }, + LatinOffset: 0, +} + +type _GraphemeRuneRange unicode.RangeTable + +func _GraphemeRuneType(r rune) *_GraphemeRuneRange { + switch { + case unicode.Is(_GraphemeCR, r): + return (*_GraphemeRuneRange)(_GraphemeCR) + case unicode.Is(_GraphemeControl, r): + return (*_GraphemeRuneRange)(_GraphemeControl) + case unicode.Is(_GraphemeE_Base, r): + return (*_GraphemeRuneRange)(_GraphemeE_Base) + case unicode.Is(_GraphemeE_Base_GAZ, r): + return (*_GraphemeRuneRange)(_GraphemeE_Base_GAZ) + case unicode.Is(_GraphemeE_Modifier, r): + return (*_GraphemeRuneRange)(_GraphemeE_Modifier) + case unicode.Is(_GraphemeExtend, r): + return (*_GraphemeRuneRange)(_GraphemeExtend) + case unicode.Is(_GraphemeGlue_After_Zwj, r): + return (*_GraphemeRuneRange)(_GraphemeGlue_After_Zwj) + case unicode.Is(_GraphemeL, r): + return (*_GraphemeRuneRange)(_GraphemeL) + case unicode.Is(_GraphemeLF, r): + return (*_GraphemeRuneRange)(_GraphemeLF) + case unicode.Is(_GraphemeLV, r): + return (*_GraphemeRuneRange)(_GraphemeLV) + case unicode.Is(_GraphemeLVT, r): + return (*_GraphemeRuneRange)(_GraphemeLVT) + case unicode.Is(_GraphemePrepend, r): + return (*_GraphemeRuneRange)(_GraphemePrepend) + case unicode.Is(_GraphemeRegional_Indicator, r): + return (*_GraphemeRuneRange)(_GraphemeRegional_Indicator) + case unicode.Is(_GraphemeSpacingMark, r): + return (*_GraphemeRuneRange)(_GraphemeSpacingMark) + case unicode.Is(_GraphemeT, r): + return (*_GraphemeRuneRange)(_GraphemeT) + case unicode.Is(_GraphemeV, r): + return (*_GraphemeRuneRange)(_GraphemeV) + case unicode.Is(_GraphemeZWJ, r): + return (*_GraphemeRuneRange)(_GraphemeZWJ) + default: + return nil + } +} +func (rng *_GraphemeRuneRange) String() string { + switch (*unicode.RangeTable)(rng) { + case _GraphemeCR: + return "CR" + case _GraphemeControl: + return "Control" + case _GraphemeE_Base: + return "E_Base" + case _GraphemeE_Base_GAZ: + return "E_Base_GAZ" + case _GraphemeE_Modifier: + return "E_Modifier" + case _GraphemeExtend: + return "Extend" + case _GraphemeGlue_After_Zwj: + return "Glue_After_Zwj" + case _GraphemeL: + return "L" + case _GraphemeLF: + return "LF" + case _GraphemeLV: + return "LV" + case _GraphemeLVT: + return "LVT" + case _GraphemePrepend: + return "Prepend" + case _GraphemeRegional_Indicator: + return "Regional_Indicator" + case _GraphemeSpacingMark: + return "SpacingMark" + case _GraphemeT: + return "T" + case _GraphemeV: + return "V" + case _GraphemeZWJ: + return "ZWJ" + default: + return "Other" + } +} + +var _WordALetter = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x41, Hi: 0x5a, Stride: 0x1}, + unicode.Range16{Lo: 0x61, Hi: 0x7a, Stride: 0x1}, + unicode.Range16{Lo: 0xaa, Hi: 0xaa, Stride: 0x1}, + unicode.Range16{Lo: 0xb5, Hi: 0xb5, Stride: 0x1}, + unicode.Range16{Lo: 0xba, Hi: 0xba, Stride: 0x1}, + unicode.Range16{Lo: 0xc0, Hi: 0xd6, Stride: 0x1}, + unicode.Range16{Lo: 0xd8, Hi: 0xf6, Stride: 0x1}, + unicode.Range16{Lo: 0xf8, Hi: 0x1ba, Stride: 0x1}, + unicode.Range16{Lo: 0x1bb, Hi: 0x1bb, Stride: 0x1}, + unicode.Range16{Lo: 0x1bc, Hi: 0x1bf, Stride: 0x1}, + unicode.Range16{Lo: 0x1c0, Hi: 0x1c3, Stride: 0x1}, + unicode.Range16{Lo: 0x1c4, Hi: 0x293, Stride: 0x1}, + unicode.Range16{Lo: 0x294, Hi: 0x294, Stride: 0x1}, + unicode.Range16{Lo: 0x295, Hi: 0x2af, Stride: 0x1}, + unicode.Range16{Lo: 0x2b0, Hi: 0x2c1, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6, Hi: 0x2d1, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0, Hi: 0x2e4, Stride: 0x1}, + unicode.Range16{Lo: 0x2ec, Hi: 0x2ec, Stride: 0x1}, + unicode.Range16{Lo: 0x2ee, Hi: 0x2ee, Stride: 0x1}, + unicode.Range16{Lo: 0x370, Hi: 0x373, Stride: 0x1}, + unicode.Range16{Lo: 0x374, Hi: 0x374, Stride: 0x1}, + unicode.Range16{Lo: 0x376, Hi: 0x377, Stride: 0x1}, + unicode.Range16{Lo: 0x37a, Hi: 0x37a, Stride: 0x1}, + unicode.Range16{Lo: 0x37b, Hi: 0x37d, Stride: 0x1}, + unicode.Range16{Lo: 0x37f, Hi: 0x37f, Stride: 0x1}, + unicode.Range16{Lo: 0x386, Hi: 0x386, Stride: 0x1}, + unicode.Range16{Lo: 0x388, Hi: 0x38a, Stride: 0x1}, + unicode.Range16{Lo: 0x38c, Hi: 0x38c, Stride: 0x1}, + unicode.Range16{Lo: 0x38e, Hi: 0x3a1, Stride: 0x1}, + unicode.Range16{Lo: 0x3a3, Hi: 0x3f5, Stride: 0x1}, + unicode.Range16{Lo: 0x3f7, Hi: 0x481, Stride: 0x1}, + unicode.Range16{Lo: 0x48a, Hi: 0x52f, Stride: 0x1}, + unicode.Range16{Lo: 0x531, Hi: 0x556, Stride: 0x1}, + unicode.Range16{Lo: 0x559, Hi: 0x559, Stride: 0x1}, + unicode.Range16{Lo: 0x561, Hi: 0x587, Stride: 0x1}, + unicode.Range16{Lo: 0x5f3, Hi: 0x5f3, Stride: 0x1}, + unicode.Range16{Lo: 0x620, Hi: 0x63f, Stride: 0x1}, + unicode.Range16{Lo: 0x640, Hi: 0x640, Stride: 0x1}, + unicode.Range16{Lo: 0x641, Hi: 0x64a, Stride: 0x1}, + unicode.Range16{Lo: 0x66e, Hi: 0x66f, Stride: 0x1}, + unicode.Range16{Lo: 0x671, Hi: 0x6d3, Stride: 0x1}, + unicode.Range16{Lo: 0x6d5, Hi: 0x6d5, Stride: 0x1}, + unicode.Range16{Lo: 0x6e5, Hi: 0x6e6, Stride: 0x1}, + unicode.Range16{Lo: 0x6ee, Hi: 0x6ef, Stride: 0x1}, + unicode.Range16{Lo: 0x6fa, Hi: 0x6fc, Stride: 0x1}, + unicode.Range16{Lo: 0x6ff, Hi: 0x6ff, Stride: 0x1}, + unicode.Range16{Lo: 0x710, Hi: 0x710, Stride: 0x1}, + unicode.Range16{Lo: 0x712, Hi: 0x72f, Stride: 0x1}, + unicode.Range16{Lo: 0x74d, Hi: 0x7a5, Stride: 0x1}, + unicode.Range16{Lo: 0x7b1, Hi: 0x7b1, Stride: 0x1}, + unicode.Range16{Lo: 0x7ca, Hi: 0x7ea, Stride: 0x1}, + unicode.Range16{Lo: 0x7f4, Hi: 0x7f5, Stride: 0x1}, + unicode.Range16{Lo: 0x7fa, Hi: 0x7fa, Stride: 0x1}, + unicode.Range16{Lo: 0x800, Hi: 0x815, Stride: 0x1}, + unicode.Range16{Lo: 0x81a, Hi: 0x81a, Stride: 0x1}, + unicode.Range16{Lo: 0x824, Hi: 0x824, Stride: 0x1}, + unicode.Range16{Lo: 0x828, Hi: 0x828, Stride: 0x1}, + unicode.Range16{Lo: 0x840, Hi: 0x858, Stride: 0x1}, + unicode.Range16{Lo: 0x8a0, Hi: 0x8b4, Stride: 0x1}, + unicode.Range16{Lo: 0x8b6, Hi: 0x8bd, Stride: 0x1}, + unicode.Range16{Lo: 0x904, Hi: 0x939, Stride: 0x1}, + unicode.Range16{Lo: 0x93d, Hi: 0x93d, Stride: 0x1}, + unicode.Range16{Lo: 0x950, Hi: 0x950, Stride: 0x1}, + unicode.Range16{Lo: 0x958, Hi: 0x961, Stride: 0x1}, + unicode.Range16{Lo: 0x971, Hi: 0x971, Stride: 0x1}, + unicode.Range16{Lo: 0x972, Hi: 0x980, Stride: 0x1}, + unicode.Range16{Lo: 0x985, Hi: 0x98c, Stride: 0x1}, + unicode.Range16{Lo: 0x98f, Hi: 0x990, Stride: 0x1}, + unicode.Range16{Lo: 0x993, Hi: 0x9a8, Stride: 0x1}, + unicode.Range16{Lo: 0x9aa, Hi: 0x9b0, Stride: 0x1}, + unicode.Range16{Lo: 0x9b2, Hi: 0x9b2, Stride: 0x1}, + unicode.Range16{Lo: 0x9b6, Hi: 0x9b9, Stride: 0x1}, + unicode.Range16{Lo: 0x9bd, Hi: 0x9bd, Stride: 0x1}, + unicode.Range16{Lo: 0x9ce, Hi: 0x9ce, Stride: 0x1}, + unicode.Range16{Lo: 0x9dc, Hi: 0x9dd, Stride: 0x1}, + unicode.Range16{Lo: 0x9df, Hi: 0x9e1, Stride: 0x1}, + unicode.Range16{Lo: 0x9f0, Hi: 0x9f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa05, Hi: 0xa0a, Stride: 0x1}, + unicode.Range16{Lo: 0xa0f, Hi: 0xa10, Stride: 0x1}, + unicode.Range16{Lo: 0xa13, Hi: 0xa28, Stride: 0x1}, + unicode.Range16{Lo: 0xa2a, Hi: 0xa30, Stride: 0x1}, + unicode.Range16{Lo: 0xa32, Hi: 0xa33, Stride: 0x1}, + unicode.Range16{Lo: 0xa35, Hi: 0xa36, Stride: 0x1}, + unicode.Range16{Lo: 0xa38, Hi: 0xa39, Stride: 0x1}, + unicode.Range16{Lo: 0xa59, Hi: 0xa5c, Stride: 0x1}, + unicode.Range16{Lo: 0xa5e, Hi: 0xa5e, Stride: 0x1}, + unicode.Range16{Lo: 0xa72, Hi: 0xa74, Stride: 0x1}, + unicode.Range16{Lo: 0xa85, Hi: 0xa8d, Stride: 0x1}, + unicode.Range16{Lo: 0xa8f, Hi: 0xa91, Stride: 0x1}, + unicode.Range16{Lo: 0xa93, Hi: 0xaa8, Stride: 0x1}, + unicode.Range16{Lo: 0xaaa, Hi: 0xab0, Stride: 0x1}, + unicode.Range16{Lo: 0xab2, Hi: 0xab3, Stride: 0x1}, + unicode.Range16{Lo: 0xab5, Hi: 0xab9, Stride: 0x1}, + unicode.Range16{Lo: 0xabd, Hi: 0xabd, Stride: 0x1}, + unicode.Range16{Lo: 0xad0, Hi: 0xad0, Stride: 0x1}, + unicode.Range16{Lo: 0xae0, Hi: 0xae1, Stride: 0x1}, + unicode.Range16{Lo: 0xaf9, Hi: 0xaf9, Stride: 0x1}, + unicode.Range16{Lo: 0xb05, Hi: 0xb0c, Stride: 0x1}, + unicode.Range16{Lo: 0xb0f, Hi: 0xb10, Stride: 0x1}, + unicode.Range16{Lo: 0xb13, Hi: 0xb28, Stride: 0x1}, + unicode.Range16{Lo: 0xb2a, Hi: 0xb30, Stride: 0x1}, + unicode.Range16{Lo: 0xb32, Hi: 0xb33, Stride: 0x1}, + unicode.Range16{Lo: 0xb35, Hi: 0xb39, Stride: 0x1}, + unicode.Range16{Lo: 0xb3d, Hi: 0xb3d, Stride: 0x1}, + unicode.Range16{Lo: 0xb5c, Hi: 0xb5d, Stride: 0x1}, + unicode.Range16{Lo: 0xb5f, Hi: 0xb61, Stride: 0x1}, + unicode.Range16{Lo: 0xb71, Hi: 0xb71, Stride: 0x1}, + unicode.Range16{Lo: 0xb83, Hi: 0xb83, Stride: 0x1}, + unicode.Range16{Lo: 0xb85, Hi: 0xb8a, Stride: 0x1}, + unicode.Range16{Lo: 0xb8e, Hi: 0xb90, Stride: 0x1}, + unicode.Range16{Lo: 0xb92, Hi: 0xb95, Stride: 0x1}, + unicode.Range16{Lo: 0xb99, Hi: 0xb9a, Stride: 0x1}, + unicode.Range16{Lo: 0xb9c, Hi: 0xb9c, Stride: 0x1}, + unicode.Range16{Lo: 0xb9e, Hi: 0xb9f, Stride: 0x1}, + unicode.Range16{Lo: 0xba3, Hi: 0xba4, Stride: 0x1}, + unicode.Range16{Lo: 0xba8, Hi: 0xbaa, Stride: 0x1}, + unicode.Range16{Lo: 0xbae, Hi: 0xbb9, Stride: 0x1}, + unicode.Range16{Lo: 0xbd0, Hi: 0xbd0, Stride: 0x1}, + unicode.Range16{Lo: 0xc05, Hi: 0xc0c, Stride: 0x1}, + unicode.Range16{Lo: 0xc0e, Hi: 0xc10, Stride: 0x1}, + unicode.Range16{Lo: 0xc12, Hi: 0xc28, Stride: 0x1}, + unicode.Range16{Lo: 0xc2a, Hi: 0xc39, Stride: 0x1}, + unicode.Range16{Lo: 0xc3d, Hi: 0xc3d, Stride: 0x1}, + unicode.Range16{Lo: 0xc58, Hi: 0xc5a, Stride: 0x1}, + unicode.Range16{Lo: 0xc60, Hi: 0xc61, Stride: 0x1}, + unicode.Range16{Lo: 0xc80, Hi: 0xc80, Stride: 0x1}, + unicode.Range16{Lo: 0xc85, Hi: 0xc8c, Stride: 0x1}, + unicode.Range16{Lo: 0xc8e, Hi: 0xc90, Stride: 0x1}, + unicode.Range16{Lo: 0xc92, Hi: 0xca8, Stride: 0x1}, + unicode.Range16{Lo: 0xcaa, Hi: 0xcb3, Stride: 0x1}, + unicode.Range16{Lo: 0xcb5, Hi: 0xcb9, Stride: 0x1}, + unicode.Range16{Lo: 0xcbd, Hi: 0xcbd, Stride: 0x1}, + unicode.Range16{Lo: 0xcde, Hi: 0xcde, Stride: 0x1}, + unicode.Range16{Lo: 0xce0, Hi: 0xce1, Stride: 0x1}, + unicode.Range16{Lo: 0xcf1, Hi: 0xcf2, Stride: 0x1}, + unicode.Range16{Lo: 0xd05, Hi: 0xd0c, Stride: 0x1}, + unicode.Range16{Lo: 0xd0e, Hi: 0xd10, Stride: 0x1}, + unicode.Range16{Lo: 0xd12, Hi: 0xd3a, Stride: 0x1}, + unicode.Range16{Lo: 0xd3d, Hi: 0xd3d, Stride: 0x1}, + unicode.Range16{Lo: 0xd4e, Hi: 0xd4e, Stride: 0x1}, + unicode.Range16{Lo: 0xd54, Hi: 0xd56, Stride: 0x1}, + unicode.Range16{Lo: 0xd5f, Hi: 0xd61, Stride: 0x1}, + unicode.Range16{Lo: 0xd7a, Hi: 0xd7f, Stride: 0x1}, + unicode.Range16{Lo: 0xd85, Hi: 0xd96, Stride: 0x1}, + unicode.Range16{Lo: 0xd9a, Hi: 0xdb1, Stride: 0x1}, + unicode.Range16{Lo: 0xdb3, Hi: 0xdbb, Stride: 0x1}, + unicode.Range16{Lo: 0xdbd, Hi: 0xdbd, Stride: 0x1}, + unicode.Range16{Lo: 0xdc0, Hi: 0xdc6, Stride: 0x1}, + unicode.Range16{Lo: 0xf00, Hi: 0xf00, Stride: 0x1}, + unicode.Range16{Lo: 0xf40, Hi: 0xf47, Stride: 0x1}, + unicode.Range16{Lo: 0xf49, Hi: 0xf6c, Stride: 0x1}, + unicode.Range16{Lo: 0xf88, Hi: 0xf8c, Stride: 0x1}, + unicode.Range16{Lo: 0x10a0, Hi: 0x10c5, Stride: 0x1}, + unicode.Range16{Lo: 0x10c7, Hi: 0x10c7, Stride: 0x1}, + unicode.Range16{Lo: 0x10cd, Hi: 0x10cd, Stride: 0x1}, + unicode.Range16{Lo: 0x10d0, Hi: 0x10fa, Stride: 0x1}, + unicode.Range16{Lo: 0x10fc, Hi: 0x10fc, Stride: 0x1}, + unicode.Range16{Lo: 0x10fd, Hi: 0x1248, Stride: 0x1}, + unicode.Range16{Lo: 0x124a, Hi: 0x124d, Stride: 0x1}, + unicode.Range16{Lo: 0x1250, Hi: 0x1256, Stride: 0x1}, + unicode.Range16{Lo: 0x1258, Hi: 0x1258, Stride: 0x1}, + unicode.Range16{Lo: 0x125a, Hi: 0x125d, Stride: 0x1}, + unicode.Range16{Lo: 0x1260, Hi: 0x1288, Stride: 0x1}, + unicode.Range16{Lo: 0x128a, Hi: 0x128d, Stride: 0x1}, + unicode.Range16{Lo: 0x1290, Hi: 0x12b0, Stride: 0x1}, + unicode.Range16{Lo: 0x12b2, Hi: 0x12b5, Stride: 0x1}, + unicode.Range16{Lo: 0x12b8, Hi: 0x12be, Stride: 0x1}, + unicode.Range16{Lo: 0x12c0, Hi: 0x12c0, Stride: 0x1}, + unicode.Range16{Lo: 0x12c2, Hi: 0x12c5, Stride: 0x1}, + unicode.Range16{Lo: 0x12c8, Hi: 0x12d6, Stride: 0x1}, + unicode.Range16{Lo: 0x12d8, Hi: 0x1310, Stride: 0x1}, + unicode.Range16{Lo: 0x1312, Hi: 0x1315, Stride: 0x1}, + unicode.Range16{Lo: 0x1318, Hi: 0x135a, Stride: 0x1}, + unicode.Range16{Lo: 0x1380, Hi: 0x138f, Stride: 0x1}, + unicode.Range16{Lo: 0x13a0, Hi: 0x13f5, Stride: 0x1}, + unicode.Range16{Lo: 0x13f8, Hi: 0x13fd, Stride: 0x1}, + unicode.Range16{Lo: 0x1401, Hi: 0x166c, Stride: 0x1}, + unicode.Range16{Lo: 0x166f, Hi: 0x167f, Stride: 0x1}, + unicode.Range16{Lo: 0x1681, Hi: 0x169a, Stride: 0x1}, + unicode.Range16{Lo: 0x16a0, Hi: 0x16ea, Stride: 0x1}, + unicode.Range16{Lo: 0x16ee, Hi: 0x16f0, Stride: 0x1}, + unicode.Range16{Lo: 0x16f1, Hi: 0x16f8, Stride: 0x1}, + unicode.Range16{Lo: 0x1700, Hi: 0x170c, Stride: 0x1}, + unicode.Range16{Lo: 0x170e, Hi: 0x1711, Stride: 0x1}, + unicode.Range16{Lo: 0x1720, Hi: 0x1731, Stride: 0x1}, + unicode.Range16{Lo: 0x1740, Hi: 0x1751, Stride: 0x1}, + unicode.Range16{Lo: 0x1760, Hi: 0x176c, Stride: 0x1}, + unicode.Range16{Lo: 0x176e, Hi: 0x1770, Stride: 0x1}, + unicode.Range16{Lo: 0x1820, Hi: 0x1842, Stride: 0x1}, + unicode.Range16{Lo: 0x1843, Hi: 0x1843, Stride: 0x1}, + unicode.Range16{Lo: 0x1844, Hi: 0x1877, Stride: 0x1}, + unicode.Range16{Lo: 0x1880, Hi: 0x1884, Stride: 0x1}, + unicode.Range16{Lo: 0x1887, Hi: 0x18a8, Stride: 0x1}, + unicode.Range16{Lo: 0x18aa, Hi: 0x18aa, Stride: 0x1}, + unicode.Range16{Lo: 0x18b0, Hi: 0x18f5, Stride: 0x1}, + unicode.Range16{Lo: 0x1900, Hi: 0x191e, Stride: 0x1}, + unicode.Range16{Lo: 0x1a00, Hi: 0x1a16, Stride: 0x1}, + unicode.Range16{Lo: 0x1b05, Hi: 0x1b33, Stride: 0x1}, + unicode.Range16{Lo: 0x1b45, Hi: 0x1b4b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b83, Hi: 0x1ba0, Stride: 0x1}, + unicode.Range16{Lo: 0x1bae, Hi: 0x1baf, Stride: 0x1}, + unicode.Range16{Lo: 0x1bba, Hi: 0x1be5, Stride: 0x1}, + unicode.Range16{Lo: 0x1c00, Hi: 0x1c23, Stride: 0x1}, + unicode.Range16{Lo: 0x1c4d, Hi: 0x1c4f, Stride: 0x1}, + unicode.Range16{Lo: 0x1c5a, Hi: 0x1c77, Stride: 0x1}, + unicode.Range16{Lo: 0x1c78, Hi: 0x1c7d, Stride: 0x1}, + unicode.Range16{Lo: 0x1c80, Hi: 0x1c88, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce9, Hi: 0x1cec, Stride: 0x1}, + unicode.Range16{Lo: 0x1cee, Hi: 0x1cf1, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf5, Hi: 0x1cf6, Stride: 0x1}, + unicode.Range16{Lo: 0x1d00, Hi: 0x1d2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1d2c, Hi: 0x1d6a, Stride: 0x1}, + unicode.Range16{Lo: 0x1d6b, Hi: 0x1d77, Stride: 0x1}, + unicode.Range16{Lo: 0x1d78, Hi: 0x1d78, Stride: 0x1}, + unicode.Range16{Lo: 0x1d79, Hi: 0x1d9a, Stride: 0x1}, + unicode.Range16{Lo: 0x1d9b, Hi: 0x1dbf, Stride: 0x1}, + unicode.Range16{Lo: 0x1e00, Hi: 0x1f15, Stride: 0x1}, + unicode.Range16{Lo: 0x1f18, Hi: 0x1f1d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f20, Hi: 0x1f45, Stride: 0x1}, + unicode.Range16{Lo: 0x1f48, Hi: 0x1f4d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f50, Hi: 0x1f57, Stride: 0x1}, + unicode.Range16{Lo: 0x1f59, Hi: 0x1f59, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5f, Hi: 0x1f7d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f80, Hi: 0x1fb4, Stride: 0x1}, + unicode.Range16{Lo: 0x1fb6, Hi: 0x1fbc, Stride: 0x1}, + unicode.Range16{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc6, Hi: 0x1fcc, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd6, Hi: 0x1fdb, Stride: 0x1}, + unicode.Range16{Lo: 0x1fe0, Hi: 0x1fec, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff6, Hi: 0x1ffc, Stride: 0x1}, + unicode.Range16{Lo: 0x2071, Hi: 0x2071, Stride: 0x1}, + unicode.Range16{Lo: 0x207f, Hi: 0x207f, Stride: 0x1}, + unicode.Range16{Lo: 0x2090, Hi: 0x209c, Stride: 0x1}, + unicode.Range16{Lo: 0x2102, Hi: 0x2102, Stride: 0x1}, + unicode.Range16{Lo: 0x2107, Hi: 0x2107, Stride: 0x1}, + unicode.Range16{Lo: 0x210a, Hi: 0x2113, Stride: 0x1}, + unicode.Range16{Lo: 0x2115, Hi: 0x2115, Stride: 0x1}, + unicode.Range16{Lo: 0x2119, Hi: 0x211d, Stride: 0x1}, + unicode.Range16{Lo: 0x2124, Hi: 0x2124, Stride: 0x1}, + unicode.Range16{Lo: 0x2126, Hi: 0x2126, Stride: 0x1}, + unicode.Range16{Lo: 0x2128, Hi: 0x2128, Stride: 0x1}, + unicode.Range16{Lo: 0x212a, Hi: 0x212d, Stride: 0x1}, + unicode.Range16{Lo: 0x212f, Hi: 0x2134, Stride: 0x1}, + unicode.Range16{Lo: 0x2135, Hi: 0x2138, Stride: 0x1}, + unicode.Range16{Lo: 0x2139, Hi: 0x2139, Stride: 0x1}, + unicode.Range16{Lo: 0x213c, Hi: 0x213f, Stride: 0x1}, + unicode.Range16{Lo: 0x2145, Hi: 0x2149, Stride: 0x1}, + unicode.Range16{Lo: 0x214e, Hi: 0x214e, Stride: 0x1}, + unicode.Range16{Lo: 0x2160, Hi: 0x2182, Stride: 0x1}, + unicode.Range16{Lo: 0x2183, Hi: 0x2184, Stride: 0x1}, + unicode.Range16{Lo: 0x2185, Hi: 0x2188, Stride: 0x1}, + unicode.Range16{Lo: 0x24b6, Hi: 0x24e9, Stride: 0x1}, + unicode.Range16{Lo: 0x2c00, Hi: 0x2c2e, Stride: 0x1}, + unicode.Range16{Lo: 0x2c30, Hi: 0x2c5e, Stride: 0x1}, + unicode.Range16{Lo: 0x2c60, Hi: 0x2c7b, Stride: 0x1}, + unicode.Range16{Lo: 0x2c7c, Hi: 0x2c7d, Stride: 0x1}, + unicode.Range16{Lo: 0x2c7e, Hi: 0x2ce4, Stride: 0x1}, + unicode.Range16{Lo: 0x2ceb, Hi: 0x2cee, Stride: 0x1}, + unicode.Range16{Lo: 0x2cf2, Hi: 0x2cf3, Stride: 0x1}, + unicode.Range16{Lo: 0x2d00, Hi: 0x2d25, Stride: 0x1}, + unicode.Range16{Lo: 0x2d27, Hi: 0x2d27, Stride: 0x1}, + unicode.Range16{Lo: 0x2d2d, Hi: 0x2d2d, Stride: 0x1}, + unicode.Range16{Lo: 0x2d30, Hi: 0x2d67, Stride: 0x1}, + unicode.Range16{Lo: 0x2d6f, Hi: 0x2d6f, Stride: 0x1}, + unicode.Range16{Lo: 0x2d80, Hi: 0x2d96, Stride: 0x1}, + unicode.Range16{Lo: 0x2da0, Hi: 0x2da6, Stride: 0x1}, + unicode.Range16{Lo: 0x2da8, Hi: 0x2dae, Stride: 0x1}, + unicode.Range16{Lo: 0x2db0, Hi: 0x2db6, Stride: 0x1}, + unicode.Range16{Lo: 0x2db8, Hi: 0x2dbe, Stride: 0x1}, + unicode.Range16{Lo: 0x2dc0, Hi: 0x2dc6, Stride: 0x1}, + unicode.Range16{Lo: 0x2dc8, Hi: 0x2dce, Stride: 0x1}, + unicode.Range16{Lo: 0x2dd0, Hi: 0x2dd6, Stride: 0x1}, + unicode.Range16{Lo: 0x2dd8, Hi: 0x2dde, Stride: 0x1}, + unicode.Range16{Lo: 0x2e2f, Hi: 0x2e2f, Stride: 0x1}, + unicode.Range16{Lo: 0x3005, Hi: 0x3005, Stride: 0x1}, + unicode.Range16{Lo: 0x303b, Hi: 0x303b, Stride: 0x1}, + unicode.Range16{Lo: 0x303c, Hi: 0x303c, Stride: 0x1}, + unicode.Range16{Lo: 0x3105, Hi: 0x312d, Stride: 0x1}, + unicode.Range16{Lo: 0x3131, Hi: 0x318e, Stride: 0x1}, + unicode.Range16{Lo: 0x31a0, Hi: 0x31ba, Stride: 0x1}, + unicode.Range16{Lo: 0xa000, Hi: 0xa014, Stride: 0x1}, + unicode.Range16{Lo: 0xa015, Hi: 0xa015, Stride: 0x1}, + unicode.Range16{Lo: 0xa016, Hi: 0xa48c, Stride: 0x1}, + unicode.Range16{Lo: 0xa4d0, Hi: 0xa4f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa4f8, Hi: 0xa4fd, Stride: 0x1}, + unicode.Range16{Lo: 0xa500, Hi: 0xa60b, Stride: 0x1}, + unicode.Range16{Lo: 0xa60c, Hi: 0xa60c, Stride: 0x1}, + unicode.Range16{Lo: 0xa610, Hi: 0xa61f, Stride: 0x1}, + unicode.Range16{Lo: 0xa62a, Hi: 0xa62b, Stride: 0x1}, + unicode.Range16{Lo: 0xa640, Hi: 0xa66d, Stride: 0x1}, + unicode.Range16{Lo: 0xa66e, Hi: 0xa66e, Stride: 0x1}, + unicode.Range16{Lo: 0xa67f, Hi: 0xa67f, Stride: 0x1}, + unicode.Range16{Lo: 0xa680, Hi: 0xa69b, Stride: 0x1}, + unicode.Range16{Lo: 0xa69c, Hi: 0xa69d, Stride: 0x1}, + unicode.Range16{Lo: 0xa6a0, Hi: 0xa6e5, Stride: 0x1}, + unicode.Range16{Lo: 0xa6e6, Hi: 0xa6ef, Stride: 0x1}, + unicode.Range16{Lo: 0xa717, Hi: 0xa71f, Stride: 0x1}, + unicode.Range16{Lo: 0xa722, Hi: 0xa76f, Stride: 0x1}, + unicode.Range16{Lo: 0xa770, Hi: 0xa770, Stride: 0x1}, + unicode.Range16{Lo: 0xa771, Hi: 0xa787, Stride: 0x1}, + unicode.Range16{Lo: 0xa788, Hi: 0xa788, Stride: 0x1}, + unicode.Range16{Lo: 0xa78b, Hi: 0xa78e, Stride: 0x1}, + unicode.Range16{Lo: 0xa78f, Hi: 0xa78f, Stride: 0x1}, + unicode.Range16{Lo: 0xa790, Hi: 0xa7ae, Stride: 0x1}, + unicode.Range16{Lo: 0xa7b0, Hi: 0xa7b7, Stride: 0x1}, + unicode.Range16{Lo: 0xa7f7, Hi: 0xa7f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa7f8, Hi: 0xa7f9, Stride: 0x1}, + unicode.Range16{Lo: 0xa7fa, Hi: 0xa7fa, Stride: 0x1}, + unicode.Range16{Lo: 0xa7fb, Hi: 0xa801, Stride: 0x1}, + unicode.Range16{Lo: 0xa803, Hi: 0xa805, Stride: 0x1}, + unicode.Range16{Lo: 0xa807, Hi: 0xa80a, Stride: 0x1}, + unicode.Range16{Lo: 0xa80c, Hi: 0xa822, Stride: 0x1}, + unicode.Range16{Lo: 0xa840, Hi: 0xa873, Stride: 0x1}, + unicode.Range16{Lo: 0xa882, Hi: 0xa8b3, Stride: 0x1}, + unicode.Range16{Lo: 0xa8f2, Hi: 0xa8f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa8fb, Hi: 0xa8fb, Stride: 0x1}, + unicode.Range16{Lo: 0xa8fd, Hi: 0xa8fd, Stride: 0x1}, + unicode.Range16{Lo: 0xa90a, Hi: 0xa925, Stride: 0x1}, + unicode.Range16{Lo: 0xa930, Hi: 0xa946, Stride: 0x1}, + unicode.Range16{Lo: 0xa960, Hi: 0xa97c, Stride: 0x1}, + unicode.Range16{Lo: 0xa984, Hi: 0xa9b2, Stride: 0x1}, + unicode.Range16{Lo: 0xa9cf, Hi: 0xa9cf, Stride: 0x1}, + unicode.Range16{Lo: 0xaa00, Hi: 0xaa28, Stride: 0x1}, + unicode.Range16{Lo: 0xaa40, Hi: 0xaa42, Stride: 0x1}, + unicode.Range16{Lo: 0xaa44, Hi: 0xaa4b, Stride: 0x1}, + unicode.Range16{Lo: 0xaae0, Hi: 0xaaea, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf2, Hi: 0xaaf2, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf3, Hi: 0xaaf4, Stride: 0x1}, + unicode.Range16{Lo: 0xab01, Hi: 0xab06, Stride: 0x1}, + unicode.Range16{Lo: 0xab09, Hi: 0xab0e, Stride: 0x1}, + unicode.Range16{Lo: 0xab11, Hi: 0xab16, Stride: 0x1}, + unicode.Range16{Lo: 0xab20, Hi: 0xab26, Stride: 0x1}, + unicode.Range16{Lo: 0xab28, Hi: 0xab2e, Stride: 0x1}, + unicode.Range16{Lo: 0xab30, Hi: 0xab5a, Stride: 0x1}, + unicode.Range16{Lo: 0xab5c, Hi: 0xab5f, Stride: 0x1}, + unicode.Range16{Lo: 0xab60, Hi: 0xab65, Stride: 0x1}, + unicode.Range16{Lo: 0xab70, Hi: 0xabbf, Stride: 0x1}, + unicode.Range16{Lo: 0xabc0, Hi: 0xabe2, Stride: 0x1}, + unicode.Range16{Lo: 0xac00, Hi: 0xd7a3, Stride: 0x1}, + unicode.Range16{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 0x1}, + unicode.Range16{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 0x1}, + unicode.Range16{Lo: 0xfb00, Hi: 0xfb06, Stride: 0x1}, + unicode.Range16{Lo: 0xfb13, Hi: 0xfb17, Stride: 0x1}, + unicode.Range16{Lo: 0xfb50, Hi: 0xfbb1, Stride: 0x1}, + unicode.Range16{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 0x1}, + unicode.Range16{Lo: 0xfd50, Hi: 0xfd8f, Stride: 0x1}, + unicode.Range16{Lo: 0xfd92, Hi: 0xfdc7, Stride: 0x1}, + unicode.Range16{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 0x1}, + unicode.Range16{Lo: 0xfe70, Hi: 0xfe74, Stride: 0x1}, + unicode.Range16{Lo: 0xfe76, Hi: 0xfefc, Stride: 0x1}, + unicode.Range16{Lo: 0xff21, Hi: 0xff3a, Stride: 0x1}, + unicode.Range16{Lo: 0xff41, Hi: 0xff5a, Stride: 0x1}, + unicode.Range16{Lo: 0xffa0, Hi: 0xffbe, Stride: 0x1}, + unicode.Range16{Lo: 0xffc2, Hi: 0xffc7, Stride: 0x1}, + unicode.Range16{Lo: 0xffca, Hi: 0xffcf, Stride: 0x1}, + unicode.Range16{Lo: 0xffd2, Hi: 0xffd7, Stride: 0x1}, + unicode.Range16{Lo: 0xffda, Hi: 0xffdc, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x10000, Hi: 0x1000b, Stride: 0x1}, + unicode.Range32{Lo: 0x1000d, Hi: 0x10026, Stride: 0x1}, + unicode.Range32{Lo: 0x10028, Hi: 0x1003a, Stride: 0x1}, + unicode.Range32{Lo: 0x1003c, Hi: 0x1003d, Stride: 0x1}, + unicode.Range32{Lo: 0x1003f, Hi: 0x1004d, Stride: 0x1}, + unicode.Range32{Lo: 0x10050, Hi: 0x1005d, Stride: 0x1}, + unicode.Range32{Lo: 0x10080, Hi: 0x100fa, Stride: 0x1}, + unicode.Range32{Lo: 0x10140, Hi: 0x10174, Stride: 0x1}, + unicode.Range32{Lo: 0x10280, Hi: 0x1029c, Stride: 0x1}, + unicode.Range32{Lo: 0x102a0, Hi: 0x102d0, Stride: 0x1}, + unicode.Range32{Lo: 0x10300, Hi: 0x1031f, Stride: 0x1}, + unicode.Range32{Lo: 0x10330, Hi: 0x10340, Stride: 0x1}, + unicode.Range32{Lo: 0x10341, Hi: 0x10341, Stride: 0x1}, + unicode.Range32{Lo: 0x10342, Hi: 0x10349, Stride: 0x1}, + unicode.Range32{Lo: 0x1034a, Hi: 0x1034a, Stride: 0x1}, + unicode.Range32{Lo: 0x10350, Hi: 0x10375, Stride: 0x1}, + unicode.Range32{Lo: 0x10380, Hi: 0x1039d, Stride: 0x1}, + unicode.Range32{Lo: 0x103a0, Hi: 0x103c3, Stride: 0x1}, + unicode.Range32{Lo: 0x103c8, Hi: 0x103cf, Stride: 0x1}, + unicode.Range32{Lo: 0x103d1, Hi: 0x103d5, Stride: 0x1}, + unicode.Range32{Lo: 0x10400, Hi: 0x1044f, Stride: 0x1}, + unicode.Range32{Lo: 0x10450, Hi: 0x1049d, Stride: 0x1}, + unicode.Range32{Lo: 0x104b0, Hi: 0x104d3, Stride: 0x1}, + unicode.Range32{Lo: 0x104d8, Hi: 0x104fb, Stride: 0x1}, + unicode.Range32{Lo: 0x10500, Hi: 0x10527, Stride: 0x1}, + unicode.Range32{Lo: 0x10530, Hi: 0x10563, Stride: 0x1}, + unicode.Range32{Lo: 0x10600, Hi: 0x10736, Stride: 0x1}, + unicode.Range32{Lo: 0x10740, Hi: 0x10755, Stride: 0x1}, + unicode.Range32{Lo: 0x10760, Hi: 0x10767, Stride: 0x1}, + unicode.Range32{Lo: 0x10800, Hi: 0x10805, Stride: 0x1}, + unicode.Range32{Lo: 0x10808, Hi: 0x10808, Stride: 0x1}, + unicode.Range32{Lo: 0x1080a, Hi: 0x10835, Stride: 0x1}, + unicode.Range32{Lo: 0x10837, Hi: 0x10838, Stride: 0x1}, + unicode.Range32{Lo: 0x1083c, Hi: 0x1083c, Stride: 0x1}, + unicode.Range32{Lo: 0x1083f, Hi: 0x10855, Stride: 0x1}, + unicode.Range32{Lo: 0x10860, Hi: 0x10876, Stride: 0x1}, + unicode.Range32{Lo: 0x10880, Hi: 0x1089e, Stride: 0x1}, + unicode.Range32{Lo: 0x108e0, Hi: 0x108f2, Stride: 0x1}, + unicode.Range32{Lo: 0x108f4, Hi: 0x108f5, Stride: 0x1}, + unicode.Range32{Lo: 0x10900, Hi: 0x10915, Stride: 0x1}, + unicode.Range32{Lo: 0x10920, Hi: 0x10939, Stride: 0x1}, + unicode.Range32{Lo: 0x10980, Hi: 0x109b7, Stride: 0x1}, + unicode.Range32{Lo: 0x109be, Hi: 0x109bf, Stride: 0x1}, + unicode.Range32{Lo: 0x10a00, Hi: 0x10a00, Stride: 0x1}, + unicode.Range32{Lo: 0x10a10, Hi: 0x10a13, Stride: 0x1}, + unicode.Range32{Lo: 0x10a15, Hi: 0x10a17, Stride: 0x1}, + unicode.Range32{Lo: 0x10a19, Hi: 0x10a33, Stride: 0x1}, + unicode.Range32{Lo: 0x10a60, Hi: 0x10a7c, Stride: 0x1}, + unicode.Range32{Lo: 0x10a80, Hi: 0x10a9c, Stride: 0x1}, + unicode.Range32{Lo: 0x10ac0, Hi: 0x10ac7, Stride: 0x1}, + unicode.Range32{Lo: 0x10ac9, Hi: 0x10ae4, Stride: 0x1}, + unicode.Range32{Lo: 0x10b00, Hi: 0x10b35, Stride: 0x1}, + unicode.Range32{Lo: 0x10b40, Hi: 0x10b55, Stride: 0x1}, + unicode.Range32{Lo: 0x10b60, Hi: 0x10b72, Stride: 0x1}, + unicode.Range32{Lo: 0x10b80, Hi: 0x10b91, Stride: 0x1}, + unicode.Range32{Lo: 0x10c00, Hi: 0x10c48, Stride: 0x1}, + unicode.Range32{Lo: 0x10c80, Hi: 0x10cb2, Stride: 0x1}, + unicode.Range32{Lo: 0x10cc0, Hi: 0x10cf2, Stride: 0x1}, + unicode.Range32{Lo: 0x11003, Hi: 0x11037, Stride: 0x1}, + unicode.Range32{Lo: 0x11083, Hi: 0x110af, Stride: 0x1}, + unicode.Range32{Lo: 0x110d0, Hi: 0x110e8, Stride: 0x1}, + unicode.Range32{Lo: 0x11103, Hi: 0x11126, Stride: 0x1}, + unicode.Range32{Lo: 0x11150, Hi: 0x11172, Stride: 0x1}, + unicode.Range32{Lo: 0x11176, Hi: 0x11176, Stride: 0x1}, + unicode.Range32{Lo: 0x11183, Hi: 0x111b2, Stride: 0x1}, + unicode.Range32{Lo: 0x111c1, Hi: 0x111c4, Stride: 0x1}, + unicode.Range32{Lo: 0x111da, Hi: 0x111da, Stride: 0x1}, + unicode.Range32{Lo: 0x111dc, Hi: 0x111dc, Stride: 0x1}, + unicode.Range32{Lo: 0x11200, Hi: 0x11211, Stride: 0x1}, + unicode.Range32{Lo: 0x11213, Hi: 0x1122b, Stride: 0x1}, + unicode.Range32{Lo: 0x11280, Hi: 0x11286, Stride: 0x1}, + unicode.Range32{Lo: 0x11288, Hi: 0x11288, Stride: 0x1}, + unicode.Range32{Lo: 0x1128a, Hi: 0x1128d, Stride: 0x1}, + unicode.Range32{Lo: 0x1128f, Hi: 0x1129d, Stride: 0x1}, + unicode.Range32{Lo: 0x1129f, Hi: 0x112a8, Stride: 0x1}, + unicode.Range32{Lo: 0x112b0, Hi: 0x112de, Stride: 0x1}, + unicode.Range32{Lo: 0x11305, Hi: 0x1130c, Stride: 0x1}, + unicode.Range32{Lo: 0x1130f, Hi: 0x11310, Stride: 0x1}, + unicode.Range32{Lo: 0x11313, Hi: 0x11328, Stride: 0x1}, + unicode.Range32{Lo: 0x1132a, Hi: 0x11330, Stride: 0x1}, + unicode.Range32{Lo: 0x11332, Hi: 0x11333, Stride: 0x1}, + unicode.Range32{Lo: 0x11335, Hi: 0x11339, Stride: 0x1}, + unicode.Range32{Lo: 0x1133d, Hi: 0x1133d, Stride: 0x1}, + unicode.Range32{Lo: 0x11350, Hi: 0x11350, Stride: 0x1}, + unicode.Range32{Lo: 0x1135d, Hi: 0x11361, Stride: 0x1}, + unicode.Range32{Lo: 0x11400, Hi: 0x11434, Stride: 0x1}, + unicode.Range32{Lo: 0x11447, Hi: 0x1144a, Stride: 0x1}, + unicode.Range32{Lo: 0x11480, Hi: 0x114af, Stride: 0x1}, + unicode.Range32{Lo: 0x114c4, Hi: 0x114c5, Stride: 0x1}, + unicode.Range32{Lo: 0x114c7, Hi: 0x114c7, Stride: 0x1}, + unicode.Range32{Lo: 0x11580, Hi: 0x115ae, Stride: 0x1}, + unicode.Range32{Lo: 0x115d8, Hi: 0x115db, Stride: 0x1}, + unicode.Range32{Lo: 0x11600, Hi: 0x1162f, Stride: 0x1}, + unicode.Range32{Lo: 0x11644, Hi: 0x11644, Stride: 0x1}, + unicode.Range32{Lo: 0x11680, Hi: 0x116aa, Stride: 0x1}, + unicode.Range32{Lo: 0x118a0, Hi: 0x118df, Stride: 0x1}, + unicode.Range32{Lo: 0x118ff, Hi: 0x118ff, Stride: 0x1}, + unicode.Range32{Lo: 0x11ac0, Hi: 0x11af8, Stride: 0x1}, + unicode.Range32{Lo: 0x11c00, Hi: 0x11c08, Stride: 0x1}, + unicode.Range32{Lo: 0x11c0a, Hi: 0x11c2e, Stride: 0x1}, + unicode.Range32{Lo: 0x11c40, Hi: 0x11c40, Stride: 0x1}, + unicode.Range32{Lo: 0x11c72, Hi: 0x11c8f, Stride: 0x1}, + unicode.Range32{Lo: 0x12000, Hi: 0x12399, Stride: 0x1}, + unicode.Range32{Lo: 0x12400, Hi: 0x1246e, Stride: 0x1}, + unicode.Range32{Lo: 0x12480, Hi: 0x12543, Stride: 0x1}, + unicode.Range32{Lo: 0x13000, Hi: 0x1342e, Stride: 0x1}, + unicode.Range32{Lo: 0x14400, Hi: 0x14646, Stride: 0x1}, + unicode.Range32{Lo: 0x16800, Hi: 0x16a38, Stride: 0x1}, + unicode.Range32{Lo: 0x16a40, Hi: 0x16a5e, Stride: 0x1}, + unicode.Range32{Lo: 0x16ad0, Hi: 0x16aed, Stride: 0x1}, + unicode.Range32{Lo: 0x16b00, Hi: 0x16b2f, Stride: 0x1}, + unicode.Range32{Lo: 0x16b40, Hi: 0x16b43, Stride: 0x1}, + unicode.Range32{Lo: 0x16b63, Hi: 0x16b77, Stride: 0x1}, + unicode.Range32{Lo: 0x16b7d, Hi: 0x16b8f, Stride: 0x1}, + unicode.Range32{Lo: 0x16f00, Hi: 0x16f44, Stride: 0x1}, + unicode.Range32{Lo: 0x16f50, Hi: 0x16f50, Stride: 0x1}, + unicode.Range32{Lo: 0x16f93, Hi: 0x16f9f, Stride: 0x1}, + unicode.Range32{Lo: 0x16fe0, Hi: 0x16fe0, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc00, Hi: 0x1bc6a, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc70, Hi: 0x1bc7c, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc80, Hi: 0x1bc88, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc90, Hi: 0x1bc99, Stride: 0x1}, + unicode.Range32{Lo: 0x1d400, Hi: 0x1d454, Stride: 0x1}, + unicode.Range32{Lo: 0x1d456, Hi: 0x1d49c, Stride: 0x1}, + unicode.Range32{Lo: 0x1d49e, Hi: 0x1d49f, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a2, Hi: 0x1d4a2, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a5, Hi: 0x1d4a6, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a9, Hi: 0x1d4ac, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4ae, Hi: 0x1d4b9, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4bb, Hi: 0x1d4bb, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4bd, Hi: 0x1d4c3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4c5, Hi: 0x1d505, Stride: 0x1}, + unicode.Range32{Lo: 0x1d507, Hi: 0x1d50a, Stride: 0x1}, + unicode.Range32{Lo: 0x1d50d, Hi: 0x1d514, Stride: 0x1}, + unicode.Range32{Lo: 0x1d516, Hi: 0x1d51c, Stride: 0x1}, + unicode.Range32{Lo: 0x1d51e, Hi: 0x1d539, Stride: 0x1}, + unicode.Range32{Lo: 0x1d53b, Hi: 0x1d53e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d540, Hi: 0x1d544, Stride: 0x1}, + unicode.Range32{Lo: 0x1d546, Hi: 0x1d546, Stride: 0x1}, + unicode.Range32{Lo: 0x1d54a, Hi: 0x1d550, Stride: 0x1}, + unicode.Range32{Lo: 0x1d552, Hi: 0x1d6a5, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6a8, Hi: 0x1d6c0, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6c2, Hi: 0x1d6da, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6dc, Hi: 0x1d6fa, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6fc, Hi: 0x1d714, Stride: 0x1}, + unicode.Range32{Lo: 0x1d716, Hi: 0x1d734, Stride: 0x1}, + unicode.Range32{Lo: 0x1d736, Hi: 0x1d74e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d750, Hi: 0x1d76e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d770, Hi: 0x1d788, Stride: 0x1}, + unicode.Range32{Lo: 0x1d78a, Hi: 0x1d7a8, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7aa, Hi: 0x1d7c2, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7c4, Hi: 0x1d7cb, Stride: 0x1}, + unicode.Range32{Lo: 0x1e800, Hi: 0x1e8c4, Stride: 0x1}, + unicode.Range32{Lo: 0x1e900, Hi: 0x1e943, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee00, Hi: 0x1ee03, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee05, Hi: 0x1ee1f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee21, Hi: 0x1ee22, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee24, Hi: 0x1ee24, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee27, Hi: 0x1ee27, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee29, Hi: 0x1ee32, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee34, Hi: 0x1ee37, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee39, Hi: 0x1ee39, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee3b, Hi: 0x1ee3b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee42, Hi: 0x1ee42, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee47, Hi: 0x1ee47, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee49, Hi: 0x1ee49, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee4b, Hi: 0x1ee4b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee4d, Hi: 0x1ee4f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee51, Hi: 0x1ee52, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee54, Hi: 0x1ee54, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee57, Hi: 0x1ee57, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee59, Hi: 0x1ee59, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5b, Hi: 0x1ee5b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5d, Hi: 0x1ee5d, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5f, Hi: 0x1ee5f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee61, Hi: 0x1ee62, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee64, Hi: 0x1ee64, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee67, Hi: 0x1ee6a, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee6c, Hi: 0x1ee72, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee74, Hi: 0x1ee77, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee79, Hi: 0x1ee7c, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee7e, Hi: 0x1ee7e, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee80, Hi: 0x1ee89, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee8b, Hi: 0x1ee9b, Stride: 0x1}, + unicode.Range32{Lo: 0x1eea1, Hi: 0x1eea3, Stride: 0x1}, + unicode.Range32{Lo: 0x1eea5, Hi: 0x1eea9, Stride: 0x1}, + unicode.Range32{Lo: 0x1eeab, Hi: 0x1eebb, Stride: 0x1}, + unicode.Range32{Lo: 0x1f130, Hi: 0x1f149, Stride: 0x1}, + unicode.Range32{Lo: 0x1f150, Hi: 0x1f169, Stride: 0x1}, + unicode.Range32{Lo: 0x1f170, Hi: 0x1f189, Stride: 0x1}, + }, + LatinOffset: 7, +} + +var _WordCR = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xd, Hi: 0xd, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordDouble_Quote = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x22, Hi: 0x22, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordE_Base = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x261d, Hi: 0x261d, Stride: 0x1}, + unicode.Range16{Lo: 0x26f9, Hi: 0x26f9, Stride: 0x1}, + unicode.Range16{Lo: 0x270a, Hi: 0x270d, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f385, Hi: 0x1f385, Stride: 0x1}, + unicode.Range32{Lo: 0x1f3c3, Hi: 0x1f3c4, Stride: 0x1}, + unicode.Range32{Lo: 0x1f3ca, Hi: 0x1f3cb, Stride: 0x1}, + unicode.Range32{Lo: 0x1f442, Hi: 0x1f443, Stride: 0x1}, + unicode.Range32{Lo: 0x1f446, Hi: 0x1f450, Stride: 0x1}, + unicode.Range32{Lo: 0x1f46e, Hi: 0x1f46e, Stride: 0x1}, + unicode.Range32{Lo: 0x1f470, Hi: 0x1f478, Stride: 0x1}, + unicode.Range32{Lo: 0x1f47c, Hi: 0x1f47c, Stride: 0x1}, + unicode.Range32{Lo: 0x1f481, Hi: 0x1f483, Stride: 0x1}, + unicode.Range32{Lo: 0x1f485, Hi: 0x1f487, Stride: 0x1}, + unicode.Range32{Lo: 0x1f4aa, Hi: 0x1f4aa, Stride: 0x1}, + unicode.Range32{Lo: 0x1f575, Hi: 0x1f575, Stride: 0x1}, + unicode.Range32{Lo: 0x1f57a, Hi: 0x1f57a, Stride: 0x1}, + unicode.Range32{Lo: 0x1f590, Hi: 0x1f590, Stride: 0x1}, + unicode.Range32{Lo: 0x1f595, Hi: 0x1f596, Stride: 0x1}, + unicode.Range32{Lo: 0x1f645, Hi: 0x1f647, Stride: 0x1}, + unicode.Range32{Lo: 0x1f64b, Hi: 0x1f64f, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6a3, Hi: 0x1f6a3, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6b4, Hi: 0x1f6b6, Stride: 0x1}, + unicode.Range32{Lo: 0x1f6c0, Hi: 0x1f6c0, Stride: 0x1}, + unicode.Range32{Lo: 0x1f918, Hi: 0x1f91e, Stride: 0x1}, + unicode.Range32{Lo: 0x1f926, Hi: 0x1f926, Stride: 0x1}, + unicode.Range32{Lo: 0x1f930, Hi: 0x1f930, Stride: 0x1}, + unicode.Range32{Lo: 0x1f933, Hi: 0x1f939, Stride: 0x1}, + unicode.Range32{Lo: 0x1f93c, Hi: 0x1f93e, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordE_Base_GAZ = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f466, Hi: 0x1f469, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordE_Modifier = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f3fb, Hi: 0x1f3ff, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordExtend = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x300, Hi: 0x36f, Stride: 0x1}, + unicode.Range16{Lo: 0x483, Hi: 0x487, Stride: 0x1}, + unicode.Range16{Lo: 0x488, Hi: 0x489, Stride: 0x1}, + unicode.Range16{Lo: 0x591, Hi: 0x5bd, Stride: 0x1}, + unicode.Range16{Lo: 0x5bf, Hi: 0x5bf, Stride: 0x1}, + unicode.Range16{Lo: 0x5c1, Hi: 0x5c2, Stride: 0x1}, + unicode.Range16{Lo: 0x5c4, Hi: 0x5c5, Stride: 0x1}, + unicode.Range16{Lo: 0x5c7, Hi: 0x5c7, Stride: 0x1}, + unicode.Range16{Lo: 0x610, Hi: 0x61a, Stride: 0x1}, + unicode.Range16{Lo: 0x64b, Hi: 0x65f, Stride: 0x1}, + unicode.Range16{Lo: 0x670, Hi: 0x670, Stride: 0x1}, + unicode.Range16{Lo: 0x6d6, Hi: 0x6dc, Stride: 0x1}, + unicode.Range16{Lo: 0x6df, Hi: 0x6e4, Stride: 0x1}, + unicode.Range16{Lo: 0x6e7, Hi: 0x6e8, Stride: 0x1}, + unicode.Range16{Lo: 0x6ea, Hi: 0x6ed, Stride: 0x1}, + unicode.Range16{Lo: 0x711, Hi: 0x711, Stride: 0x1}, + unicode.Range16{Lo: 0x730, Hi: 0x74a, Stride: 0x1}, + unicode.Range16{Lo: 0x7a6, Hi: 0x7b0, Stride: 0x1}, + unicode.Range16{Lo: 0x7eb, Hi: 0x7f3, Stride: 0x1}, + unicode.Range16{Lo: 0x816, Hi: 0x819, Stride: 0x1}, + unicode.Range16{Lo: 0x81b, Hi: 0x823, Stride: 0x1}, + unicode.Range16{Lo: 0x825, Hi: 0x827, Stride: 0x1}, + unicode.Range16{Lo: 0x829, Hi: 0x82d, Stride: 0x1}, + unicode.Range16{Lo: 0x859, Hi: 0x85b, Stride: 0x1}, + unicode.Range16{Lo: 0x8d4, Hi: 0x8e1, Stride: 0x1}, + unicode.Range16{Lo: 0x8e3, Hi: 0x902, Stride: 0x1}, + unicode.Range16{Lo: 0x903, Hi: 0x903, Stride: 0x1}, + unicode.Range16{Lo: 0x93a, Hi: 0x93a, Stride: 0x1}, + unicode.Range16{Lo: 0x93b, Hi: 0x93b, Stride: 0x1}, + unicode.Range16{Lo: 0x93c, Hi: 0x93c, Stride: 0x1}, + unicode.Range16{Lo: 0x93e, Hi: 0x940, Stride: 0x1}, + unicode.Range16{Lo: 0x941, Hi: 0x948, Stride: 0x1}, + unicode.Range16{Lo: 0x949, Hi: 0x94c, Stride: 0x1}, + unicode.Range16{Lo: 0x94d, Hi: 0x94d, Stride: 0x1}, + unicode.Range16{Lo: 0x94e, Hi: 0x94f, Stride: 0x1}, + unicode.Range16{Lo: 0x951, Hi: 0x957, Stride: 0x1}, + unicode.Range16{Lo: 0x962, Hi: 0x963, Stride: 0x1}, + unicode.Range16{Lo: 0x981, Hi: 0x981, Stride: 0x1}, + unicode.Range16{Lo: 0x982, Hi: 0x983, Stride: 0x1}, + unicode.Range16{Lo: 0x9bc, Hi: 0x9bc, Stride: 0x1}, + unicode.Range16{Lo: 0x9be, Hi: 0x9c0, Stride: 0x1}, + unicode.Range16{Lo: 0x9c1, Hi: 0x9c4, Stride: 0x1}, + unicode.Range16{Lo: 0x9c7, Hi: 0x9c8, Stride: 0x1}, + unicode.Range16{Lo: 0x9cb, Hi: 0x9cc, Stride: 0x1}, + unicode.Range16{Lo: 0x9cd, Hi: 0x9cd, Stride: 0x1}, + unicode.Range16{Lo: 0x9d7, Hi: 0x9d7, Stride: 0x1}, + unicode.Range16{Lo: 0x9e2, Hi: 0x9e3, Stride: 0x1}, + unicode.Range16{Lo: 0xa01, Hi: 0xa02, Stride: 0x1}, + unicode.Range16{Lo: 0xa03, Hi: 0xa03, Stride: 0x1}, + unicode.Range16{Lo: 0xa3c, Hi: 0xa3c, Stride: 0x1}, + unicode.Range16{Lo: 0xa3e, Hi: 0xa40, Stride: 0x1}, + unicode.Range16{Lo: 0xa41, Hi: 0xa42, Stride: 0x1}, + unicode.Range16{Lo: 0xa47, Hi: 0xa48, Stride: 0x1}, + unicode.Range16{Lo: 0xa4b, Hi: 0xa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xa51, Hi: 0xa51, Stride: 0x1}, + unicode.Range16{Lo: 0xa70, Hi: 0xa71, Stride: 0x1}, + unicode.Range16{Lo: 0xa75, Hi: 0xa75, Stride: 0x1}, + unicode.Range16{Lo: 0xa81, Hi: 0xa82, Stride: 0x1}, + unicode.Range16{Lo: 0xa83, Hi: 0xa83, Stride: 0x1}, + unicode.Range16{Lo: 0xabc, Hi: 0xabc, Stride: 0x1}, + unicode.Range16{Lo: 0xabe, Hi: 0xac0, Stride: 0x1}, + unicode.Range16{Lo: 0xac1, Hi: 0xac5, Stride: 0x1}, + unicode.Range16{Lo: 0xac7, Hi: 0xac8, Stride: 0x1}, + unicode.Range16{Lo: 0xac9, Hi: 0xac9, Stride: 0x1}, + unicode.Range16{Lo: 0xacb, Hi: 0xacc, Stride: 0x1}, + unicode.Range16{Lo: 0xacd, Hi: 0xacd, Stride: 0x1}, + unicode.Range16{Lo: 0xae2, Hi: 0xae3, Stride: 0x1}, + unicode.Range16{Lo: 0xb01, Hi: 0xb01, Stride: 0x1}, + unicode.Range16{Lo: 0xb02, Hi: 0xb03, Stride: 0x1}, + unicode.Range16{Lo: 0xb3c, Hi: 0xb3c, Stride: 0x1}, + unicode.Range16{Lo: 0xb3e, Hi: 0xb3e, Stride: 0x1}, + unicode.Range16{Lo: 0xb3f, Hi: 0xb3f, Stride: 0x1}, + unicode.Range16{Lo: 0xb40, Hi: 0xb40, Stride: 0x1}, + unicode.Range16{Lo: 0xb41, Hi: 0xb44, Stride: 0x1}, + unicode.Range16{Lo: 0xb47, Hi: 0xb48, Stride: 0x1}, + unicode.Range16{Lo: 0xb4b, Hi: 0xb4c, Stride: 0x1}, + unicode.Range16{Lo: 0xb4d, Hi: 0xb4d, Stride: 0x1}, + unicode.Range16{Lo: 0xb56, Hi: 0xb56, Stride: 0x1}, + unicode.Range16{Lo: 0xb57, Hi: 0xb57, Stride: 0x1}, + unicode.Range16{Lo: 0xb62, Hi: 0xb63, Stride: 0x1}, + unicode.Range16{Lo: 0xb82, Hi: 0xb82, Stride: 0x1}, + unicode.Range16{Lo: 0xbbe, Hi: 0xbbf, Stride: 0x1}, + unicode.Range16{Lo: 0xbc0, Hi: 0xbc0, Stride: 0x1}, + unicode.Range16{Lo: 0xbc1, Hi: 0xbc2, Stride: 0x1}, + unicode.Range16{Lo: 0xbc6, Hi: 0xbc8, Stride: 0x1}, + unicode.Range16{Lo: 0xbca, Hi: 0xbcc, Stride: 0x1}, + unicode.Range16{Lo: 0xbcd, Hi: 0xbcd, Stride: 0x1}, + unicode.Range16{Lo: 0xbd7, Hi: 0xbd7, Stride: 0x1}, + unicode.Range16{Lo: 0xc00, Hi: 0xc00, Stride: 0x1}, + unicode.Range16{Lo: 0xc01, Hi: 0xc03, Stride: 0x1}, + unicode.Range16{Lo: 0xc3e, Hi: 0xc40, Stride: 0x1}, + unicode.Range16{Lo: 0xc41, Hi: 0xc44, Stride: 0x1}, + unicode.Range16{Lo: 0xc46, Hi: 0xc48, Stride: 0x1}, + unicode.Range16{Lo: 0xc4a, Hi: 0xc4d, Stride: 0x1}, + unicode.Range16{Lo: 0xc55, Hi: 0xc56, Stride: 0x1}, + unicode.Range16{Lo: 0xc62, Hi: 0xc63, Stride: 0x1}, + unicode.Range16{Lo: 0xc81, Hi: 0xc81, Stride: 0x1}, + unicode.Range16{Lo: 0xc82, Hi: 0xc83, Stride: 0x1}, + unicode.Range16{Lo: 0xcbc, Hi: 0xcbc, Stride: 0x1}, + unicode.Range16{Lo: 0xcbe, Hi: 0xcbe, Stride: 0x1}, + unicode.Range16{Lo: 0xcbf, Hi: 0xcbf, Stride: 0x1}, + unicode.Range16{Lo: 0xcc0, Hi: 0xcc4, Stride: 0x1}, + unicode.Range16{Lo: 0xcc6, Hi: 0xcc6, Stride: 0x1}, + unicode.Range16{Lo: 0xcc7, Hi: 0xcc8, Stride: 0x1}, + unicode.Range16{Lo: 0xcca, Hi: 0xccb, Stride: 0x1}, + unicode.Range16{Lo: 0xccc, Hi: 0xccd, Stride: 0x1}, + unicode.Range16{Lo: 0xcd5, Hi: 0xcd6, Stride: 0x1}, + unicode.Range16{Lo: 0xce2, Hi: 0xce3, Stride: 0x1}, + unicode.Range16{Lo: 0xd01, Hi: 0xd01, Stride: 0x1}, + unicode.Range16{Lo: 0xd02, Hi: 0xd03, Stride: 0x1}, + unicode.Range16{Lo: 0xd3e, Hi: 0xd40, Stride: 0x1}, + unicode.Range16{Lo: 0xd41, Hi: 0xd44, Stride: 0x1}, + unicode.Range16{Lo: 0xd46, Hi: 0xd48, Stride: 0x1}, + unicode.Range16{Lo: 0xd4a, Hi: 0xd4c, Stride: 0x1}, + unicode.Range16{Lo: 0xd4d, Hi: 0xd4d, Stride: 0x1}, + unicode.Range16{Lo: 0xd57, Hi: 0xd57, Stride: 0x1}, + unicode.Range16{Lo: 0xd62, Hi: 0xd63, Stride: 0x1}, + unicode.Range16{Lo: 0xd82, Hi: 0xd83, Stride: 0x1}, + unicode.Range16{Lo: 0xdca, Hi: 0xdca, Stride: 0x1}, + unicode.Range16{Lo: 0xdcf, Hi: 0xdd1, Stride: 0x1}, + unicode.Range16{Lo: 0xdd2, Hi: 0xdd4, Stride: 0x1}, + unicode.Range16{Lo: 0xdd6, Hi: 0xdd6, Stride: 0x1}, + unicode.Range16{Lo: 0xdd8, Hi: 0xddf, Stride: 0x1}, + unicode.Range16{Lo: 0xdf2, Hi: 0xdf3, Stride: 0x1}, + unicode.Range16{Lo: 0xe31, Hi: 0xe31, Stride: 0x1}, + unicode.Range16{Lo: 0xe34, Hi: 0xe3a, Stride: 0x1}, + unicode.Range16{Lo: 0xe47, Hi: 0xe4e, Stride: 0x1}, + unicode.Range16{Lo: 0xeb1, Hi: 0xeb1, Stride: 0x1}, + unicode.Range16{Lo: 0xeb4, Hi: 0xeb9, Stride: 0x1}, + unicode.Range16{Lo: 0xebb, Hi: 0xebc, Stride: 0x1}, + unicode.Range16{Lo: 0xec8, Hi: 0xecd, Stride: 0x1}, + unicode.Range16{Lo: 0xf18, Hi: 0xf19, Stride: 0x1}, + unicode.Range16{Lo: 0xf35, Hi: 0xf35, Stride: 0x1}, + unicode.Range16{Lo: 0xf37, Hi: 0xf37, Stride: 0x1}, + unicode.Range16{Lo: 0xf39, Hi: 0xf39, Stride: 0x1}, + unicode.Range16{Lo: 0xf3e, Hi: 0xf3f, Stride: 0x1}, + unicode.Range16{Lo: 0xf71, Hi: 0xf7e, Stride: 0x1}, + unicode.Range16{Lo: 0xf7f, Hi: 0xf7f, Stride: 0x1}, + unicode.Range16{Lo: 0xf80, Hi: 0xf84, Stride: 0x1}, + unicode.Range16{Lo: 0xf86, Hi: 0xf87, Stride: 0x1}, + unicode.Range16{Lo: 0xf8d, Hi: 0xf97, Stride: 0x1}, + unicode.Range16{Lo: 0xf99, Hi: 0xfbc, Stride: 0x1}, + unicode.Range16{Lo: 0xfc6, Hi: 0xfc6, Stride: 0x1}, + unicode.Range16{Lo: 0x102b, Hi: 0x102c, Stride: 0x1}, + unicode.Range16{Lo: 0x102d, Hi: 0x1030, Stride: 0x1}, + unicode.Range16{Lo: 0x1031, Hi: 0x1031, Stride: 0x1}, + unicode.Range16{Lo: 0x1032, Hi: 0x1037, Stride: 0x1}, + unicode.Range16{Lo: 0x1038, Hi: 0x1038, Stride: 0x1}, + unicode.Range16{Lo: 0x1039, Hi: 0x103a, Stride: 0x1}, + unicode.Range16{Lo: 0x103b, Hi: 0x103c, Stride: 0x1}, + unicode.Range16{Lo: 0x103d, Hi: 0x103e, Stride: 0x1}, + unicode.Range16{Lo: 0x1056, Hi: 0x1057, Stride: 0x1}, + unicode.Range16{Lo: 0x1058, Hi: 0x1059, Stride: 0x1}, + unicode.Range16{Lo: 0x105e, Hi: 0x1060, Stride: 0x1}, + unicode.Range16{Lo: 0x1062, Hi: 0x1064, Stride: 0x1}, + unicode.Range16{Lo: 0x1067, Hi: 0x106d, Stride: 0x1}, + unicode.Range16{Lo: 0x1071, Hi: 0x1074, Stride: 0x1}, + unicode.Range16{Lo: 0x1082, Hi: 0x1082, Stride: 0x1}, + unicode.Range16{Lo: 0x1083, Hi: 0x1084, Stride: 0x1}, + unicode.Range16{Lo: 0x1085, Hi: 0x1086, Stride: 0x1}, + unicode.Range16{Lo: 0x1087, Hi: 0x108c, Stride: 0x1}, + unicode.Range16{Lo: 0x108d, Hi: 0x108d, Stride: 0x1}, + unicode.Range16{Lo: 0x108f, Hi: 0x108f, Stride: 0x1}, + unicode.Range16{Lo: 0x109a, Hi: 0x109c, Stride: 0x1}, + unicode.Range16{Lo: 0x109d, Hi: 0x109d, Stride: 0x1}, + unicode.Range16{Lo: 0x135d, Hi: 0x135f, Stride: 0x1}, + unicode.Range16{Lo: 0x1712, Hi: 0x1714, Stride: 0x1}, + unicode.Range16{Lo: 0x1732, Hi: 0x1734, Stride: 0x1}, + unicode.Range16{Lo: 0x1752, Hi: 0x1753, Stride: 0x1}, + unicode.Range16{Lo: 0x1772, Hi: 0x1773, Stride: 0x1}, + unicode.Range16{Lo: 0x17b4, Hi: 0x17b5, Stride: 0x1}, + unicode.Range16{Lo: 0x17b6, Hi: 0x17b6, Stride: 0x1}, + unicode.Range16{Lo: 0x17b7, Hi: 0x17bd, Stride: 0x1}, + unicode.Range16{Lo: 0x17be, Hi: 0x17c5, Stride: 0x1}, + unicode.Range16{Lo: 0x17c6, Hi: 0x17c6, Stride: 0x1}, + unicode.Range16{Lo: 0x17c7, Hi: 0x17c8, Stride: 0x1}, + unicode.Range16{Lo: 0x17c9, Hi: 0x17d3, Stride: 0x1}, + unicode.Range16{Lo: 0x17dd, Hi: 0x17dd, Stride: 0x1}, + unicode.Range16{Lo: 0x180b, Hi: 0x180d, Stride: 0x1}, + unicode.Range16{Lo: 0x1885, Hi: 0x1886, Stride: 0x1}, + unicode.Range16{Lo: 0x18a9, Hi: 0x18a9, Stride: 0x1}, + unicode.Range16{Lo: 0x1920, Hi: 0x1922, Stride: 0x1}, + unicode.Range16{Lo: 0x1923, Hi: 0x1926, Stride: 0x1}, + unicode.Range16{Lo: 0x1927, Hi: 0x1928, Stride: 0x1}, + unicode.Range16{Lo: 0x1929, Hi: 0x192b, Stride: 0x1}, + unicode.Range16{Lo: 0x1930, Hi: 0x1931, Stride: 0x1}, + unicode.Range16{Lo: 0x1932, Hi: 0x1932, Stride: 0x1}, + unicode.Range16{Lo: 0x1933, Hi: 0x1938, Stride: 0x1}, + unicode.Range16{Lo: 0x1939, Hi: 0x193b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a17, Hi: 0x1a18, Stride: 0x1}, + unicode.Range16{Lo: 0x1a19, Hi: 0x1a1a, Stride: 0x1}, + unicode.Range16{Lo: 0x1a1b, Hi: 0x1a1b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a55, Hi: 0x1a55, Stride: 0x1}, + unicode.Range16{Lo: 0x1a56, Hi: 0x1a56, Stride: 0x1}, + unicode.Range16{Lo: 0x1a57, Hi: 0x1a57, Stride: 0x1}, + unicode.Range16{Lo: 0x1a58, Hi: 0x1a5e, Stride: 0x1}, + unicode.Range16{Lo: 0x1a60, Hi: 0x1a60, Stride: 0x1}, + unicode.Range16{Lo: 0x1a61, Hi: 0x1a61, Stride: 0x1}, + unicode.Range16{Lo: 0x1a62, Hi: 0x1a62, Stride: 0x1}, + unicode.Range16{Lo: 0x1a63, Hi: 0x1a64, Stride: 0x1}, + unicode.Range16{Lo: 0x1a65, Hi: 0x1a6c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a6d, Hi: 0x1a72, Stride: 0x1}, + unicode.Range16{Lo: 0x1a73, Hi: 0x1a7c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a7f, Hi: 0x1a7f, Stride: 0x1}, + unicode.Range16{Lo: 0x1ab0, Hi: 0x1abd, Stride: 0x1}, + unicode.Range16{Lo: 0x1abe, Hi: 0x1abe, Stride: 0x1}, + unicode.Range16{Lo: 0x1b00, Hi: 0x1b03, Stride: 0x1}, + unicode.Range16{Lo: 0x1b04, Hi: 0x1b04, Stride: 0x1}, + unicode.Range16{Lo: 0x1b34, Hi: 0x1b34, Stride: 0x1}, + unicode.Range16{Lo: 0x1b35, Hi: 0x1b35, Stride: 0x1}, + unicode.Range16{Lo: 0x1b36, Hi: 0x1b3a, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3b, Hi: 0x1b3b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3c, Hi: 0x1b3c, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3d, Hi: 0x1b41, Stride: 0x1}, + unicode.Range16{Lo: 0x1b42, Hi: 0x1b42, Stride: 0x1}, + unicode.Range16{Lo: 0x1b43, Hi: 0x1b44, Stride: 0x1}, + unicode.Range16{Lo: 0x1b6b, Hi: 0x1b73, Stride: 0x1}, + unicode.Range16{Lo: 0x1b80, Hi: 0x1b81, Stride: 0x1}, + unicode.Range16{Lo: 0x1b82, Hi: 0x1b82, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba1, Hi: 0x1ba1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba2, Hi: 0x1ba5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba6, Hi: 0x1ba7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba8, Hi: 0x1ba9, Stride: 0x1}, + unicode.Range16{Lo: 0x1baa, Hi: 0x1baa, Stride: 0x1}, + unicode.Range16{Lo: 0x1bab, Hi: 0x1bad, Stride: 0x1}, + unicode.Range16{Lo: 0x1be6, Hi: 0x1be6, Stride: 0x1}, + unicode.Range16{Lo: 0x1be7, Hi: 0x1be7, Stride: 0x1}, + unicode.Range16{Lo: 0x1be8, Hi: 0x1be9, Stride: 0x1}, + unicode.Range16{Lo: 0x1bea, Hi: 0x1bec, Stride: 0x1}, + unicode.Range16{Lo: 0x1bed, Hi: 0x1bed, Stride: 0x1}, + unicode.Range16{Lo: 0x1bee, Hi: 0x1bee, Stride: 0x1}, + unicode.Range16{Lo: 0x1bef, Hi: 0x1bf1, Stride: 0x1}, + unicode.Range16{Lo: 0x1bf2, Hi: 0x1bf3, Stride: 0x1}, + unicode.Range16{Lo: 0x1c24, Hi: 0x1c2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1c2c, Hi: 0x1c33, Stride: 0x1}, + unicode.Range16{Lo: 0x1c34, Hi: 0x1c35, Stride: 0x1}, + unicode.Range16{Lo: 0x1c36, Hi: 0x1c37, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd4, Hi: 0x1ce0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce1, Hi: 0x1ce1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce2, Hi: 0x1ce8, Stride: 0x1}, + unicode.Range16{Lo: 0x1ced, Hi: 0x1ced, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf2, Hi: 0x1cf3, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf4, Hi: 0x1cf4, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf8, Hi: 0x1cf9, Stride: 0x1}, + unicode.Range16{Lo: 0x1dc0, Hi: 0x1df5, Stride: 0x1}, + unicode.Range16{Lo: 0x1dfb, Hi: 0x1dff, Stride: 0x1}, + unicode.Range16{Lo: 0x200c, Hi: 0x200c, Stride: 0x1}, + unicode.Range16{Lo: 0x20d0, Hi: 0x20dc, Stride: 0x1}, + unicode.Range16{Lo: 0x20dd, Hi: 0x20e0, Stride: 0x1}, + unicode.Range16{Lo: 0x20e1, Hi: 0x20e1, Stride: 0x1}, + unicode.Range16{Lo: 0x20e2, Hi: 0x20e4, Stride: 0x1}, + unicode.Range16{Lo: 0x20e5, Hi: 0x20f0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cef, Hi: 0x2cf1, Stride: 0x1}, + unicode.Range16{Lo: 0x2d7f, Hi: 0x2d7f, Stride: 0x1}, + unicode.Range16{Lo: 0x2de0, Hi: 0x2dff, Stride: 0x1}, + unicode.Range16{Lo: 0x302a, Hi: 0x302d, Stride: 0x1}, + unicode.Range16{Lo: 0x302e, Hi: 0x302f, Stride: 0x1}, + unicode.Range16{Lo: 0x3099, Hi: 0x309a, Stride: 0x1}, + unicode.Range16{Lo: 0xa66f, Hi: 0xa66f, Stride: 0x1}, + unicode.Range16{Lo: 0xa670, Hi: 0xa672, Stride: 0x1}, + unicode.Range16{Lo: 0xa674, Hi: 0xa67d, Stride: 0x1}, + unicode.Range16{Lo: 0xa69e, Hi: 0xa69f, Stride: 0x1}, + unicode.Range16{Lo: 0xa6f0, Hi: 0xa6f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa802, Hi: 0xa802, Stride: 0x1}, + unicode.Range16{Lo: 0xa806, Hi: 0xa806, Stride: 0x1}, + unicode.Range16{Lo: 0xa80b, Hi: 0xa80b, Stride: 0x1}, + unicode.Range16{Lo: 0xa823, Hi: 0xa824, Stride: 0x1}, + unicode.Range16{Lo: 0xa825, Hi: 0xa826, Stride: 0x1}, + unicode.Range16{Lo: 0xa827, Hi: 0xa827, Stride: 0x1}, + unicode.Range16{Lo: 0xa880, Hi: 0xa881, Stride: 0x1}, + unicode.Range16{Lo: 0xa8b4, Hi: 0xa8c3, Stride: 0x1}, + unicode.Range16{Lo: 0xa8c4, Hi: 0xa8c5, Stride: 0x1}, + unicode.Range16{Lo: 0xa8e0, Hi: 0xa8f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa926, Hi: 0xa92d, Stride: 0x1}, + unicode.Range16{Lo: 0xa947, Hi: 0xa951, Stride: 0x1}, + unicode.Range16{Lo: 0xa952, Hi: 0xa953, Stride: 0x1}, + unicode.Range16{Lo: 0xa980, Hi: 0xa982, Stride: 0x1}, + unicode.Range16{Lo: 0xa983, Hi: 0xa983, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b3, Hi: 0xa9b3, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b4, Hi: 0xa9b5, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b6, Hi: 0xa9b9, Stride: 0x1}, + unicode.Range16{Lo: 0xa9ba, Hi: 0xa9bb, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bc, Hi: 0xa9bc, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bd, Hi: 0xa9c0, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e5, Hi: 0xa9e5, Stride: 0x1}, + unicode.Range16{Lo: 0xaa29, Hi: 0xaa2e, Stride: 0x1}, + unicode.Range16{Lo: 0xaa2f, Hi: 0xaa30, Stride: 0x1}, + unicode.Range16{Lo: 0xaa31, Hi: 0xaa32, Stride: 0x1}, + unicode.Range16{Lo: 0xaa33, Hi: 0xaa34, Stride: 0x1}, + unicode.Range16{Lo: 0xaa35, Hi: 0xaa36, Stride: 0x1}, + unicode.Range16{Lo: 0xaa43, Hi: 0xaa43, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4c, Hi: 0xaa4c, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4d, Hi: 0xaa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7b, Hi: 0xaa7b, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7c, Hi: 0xaa7c, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7d, Hi: 0xaa7d, Stride: 0x1}, + unicode.Range16{Lo: 0xaab0, Hi: 0xaab0, Stride: 0x1}, + unicode.Range16{Lo: 0xaab2, Hi: 0xaab4, Stride: 0x1}, + unicode.Range16{Lo: 0xaab7, Hi: 0xaab8, Stride: 0x1}, + unicode.Range16{Lo: 0xaabe, Hi: 0xaabf, Stride: 0x1}, + unicode.Range16{Lo: 0xaac1, Hi: 0xaac1, Stride: 0x1}, + unicode.Range16{Lo: 0xaaeb, Hi: 0xaaeb, Stride: 0x1}, + unicode.Range16{Lo: 0xaaec, Hi: 0xaaed, Stride: 0x1}, + unicode.Range16{Lo: 0xaaee, Hi: 0xaaef, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf5, Hi: 0xaaf5, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf6, Hi: 0xaaf6, Stride: 0x1}, + unicode.Range16{Lo: 0xabe3, Hi: 0xabe4, Stride: 0x1}, + unicode.Range16{Lo: 0xabe5, Hi: 0xabe5, Stride: 0x1}, + unicode.Range16{Lo: 0xabe6, Hi: 0xabe7, Stride: 0x1}, + unicode.Range16{Lo: 0xabe8, Hi: 0xabe8, Stride: 0x1}, + unicode.Range16{Lo: 0xabe9, Hi: 0xabea, Stride: 0x1}, + unicode.Range16{Lo: 0xabec, Hi: 0xabec, Stride: 0x1}, + unicode.Range16{Lo: 0xabed, Hi: 0xabed, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1e, Hi: 0xfb1e, Stride: 0x1}, + unicode.Range16{Lo: 0xfe00, Hi: 0xfe0f, Stride: 0x1}, + unicode.Range16{Lo: 0xfe20, Hi: 0xfe2f, Stride: 0x1}, + unicode.Range16{Lo: 0xff9e, Hi: 0xff9f, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x101fd, Hi: 0x101fd, Stride: 0x1}, + unicode.Range32{Lo: 0x102e0, Hi: 0x102e0, Stride: 0x1}, + unicode.Range32{Lo: 0x10376, Hi: 0x1037a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a01, Hi: 0x10a03, Stride: 0x1}, + unicode.Range32{Lo: 0x10a05, Hi: 0x10a06, Stride: 0x1}, + unicode.Range32{Lo: 0x10a0c, Hi: 0x10a0f, Stride: 0x1}, + unicode.Range32{Lo: 0x10a38, Hi: 0x10a3a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 0x1}, + unicode.Range32{Lo: 0x10ae5, Hi: 0x10ae6, Stride: 0x1}, + unicode.Range32{Lo: 0x11000, Hi: 0x11000, Stride: 0x1}, + unicode.Range32{Lo: 0x11001, Hi: 0x11001, Stride: 0x1}, + unicode.Range32{Lo: 0x11002, Hi: 0x11002, Stride: 0x1}, + unicode.Range32{Lo: 0x11038, Hi: 0x11046, Stride: 0x1}, + unicode.Range32{Lo: 0x1107f, Hi: 0x11081, Stride: 0x1}, + unicode.Range32{Lo: 0x11082, Hi: 0x11082, Stride: 0x1}, + unicode.Range32{Lo: 0x110b0, Hi: 0x110b2, Stride: 0x1}, + unicode.Range32{Lo: 0x110b3, Hi: 0x110b6, Stride: 0x1}, + unicode.Range32{Lo: 0x110b7, Hi: 0x110b8, Stride: 0x1}, + unicode.Range32{Lo: 0x110b9, Hi: 0x110ba, Stride: 0x1}, + unicode.Range32{Lo: 0x11100, Hi: 0x11102, Stride: 0x1}, + unicode.Range32{Lo: 0x11127, Hi: 0x1112b, Stride: 0x1}, + unicode.Range32{Lo: 0x1112c, Hi: 0x1112c, Stride: 0x1}, + unicode.Range32{Lo: 0x1112d, Hi: 0x11134, Stride: 0x1}, + unicode.Range32{Lo: 0x11173, Hi: 0x11173, Stride: 0x1}, + unicode.Range32{Lo: 0x11180, Hi: 0x11181, Stride: 0x1}, + unicode.Range32{Lo: 0x11182, Hi: 0x11182, Stride: 0x1}, + unicode.Range32{Lo: 0x111b3, Hi: 0x111b5, Stride: 0x1}, + unicode.Range32{Lo: 0x111b6, Hi: 0x111be, Stride: 0x1}, + unicode.Range32{Lo: 0x111bf, Hi: 0x111c0, Stride: 0x1}, + unicode.Range32{Lo: 0x111ca, Hi: 0x111cc, Stride: 0x1}, + unicode.Range32{Lo: 0x1122c, Hi: 0x1122e, Stride: 0x1}, + unicode.Range32{Lo: 0x1122f, Hi: 0x11231, Stride: 0x1}, + unicode.Range32{Lo: 0x11232, Hi: 0x11233, Stride: 0x1}, + unicode.Range32{Lo: 0x11234, Hi: 0x11234, Stride: 0x1}, + unicode.Range32{Lo: 0x11235, Hi: 0x11235, Stride: 0x1}, + unicode.Range32{Lo: 0x11236, Hi: 0x11237, Stride: 0x1}, + unicode.Range32{Lo: 0x1123e, Hi: 0x1123e, Stride: 0x1}, + unicode.Range32{Lo: 0x112df, Hi: 0x112df, Stride: 0x1}, + unicode.Range32{Lo: 0x112e0, Hi: 0x112e2, Stride: 0x1}, + unicode.Range32{Lo: 0x112e3, Hi: 0x112ea, Stride: 0x1}, + unicode.Range32{Lo: 0x11300, Hi: 0x11301, Stride: 0x1}, + unicode.Range32{Lo: 0x11302, Hi: 0x11303, Stride: 0x1}, + unicode.Range32{Lo: 0x1133c, Hi: 0x1133c, Stride: 0x1}, + unicode.Range32{Lo: 0x1133e, Hi: 0x1133f, Stride: 0x1}, + unicode.Range32{Lo: 0x11340, Hi: 0x11340, Stride: 0x1}, + unicode.Range32{Lo: 0x11341, Hi: 0x11344, Stride: 0x1}, + unicode.Range32{Lo: 0x11347, Hi: 0x11348, Stride: 0x1}, + unicode.Range32{Lo: 0x1134b, Hi: 0x1134d, Stride: 0x1}, + unicode.Range32{Lo: 0x11357, Hi: 0x11357, Stride: 0x1}, + unicode.Range32{Lo: 0x11362, Hi: 0x11363, Stride: 0x1}, + unicode.Range32{Lo: 0x11366, Hi: 0x1136c, Stride: 0x1}, + unicode.Range32{Lo: 0x11370, Hi: 0x11374, Stride: 0x1}, + unicode.Range32{Lo: 0x11435, Hi: 0x11437, Stride: 0x1}, + unicode.Range32{Lo: 0x11438, Hi: 0x1143f, Stride: 0x1}, + unicode.Range32{Lo: 0x11440, Hi: 0x11441, Stride: 0x1}, + unicode.Range32{Lo: 0x11442, Hi: 0x11444, Stride: 0x1}, + unicode.Range32{Lo: 0x11445, Hi: 0x11445, Stride: 0x1}, + unicode.Range32{Lo: 0x11446, Hi: 0x11446, Stride: 0x1}, + unicode.Range32{Lo: 0x114b0, Hi: 0x114b2, Stride: 0x1}, + unicode.Range32{Lo: 0x114b3, Hi: 0x114b8, Stride: 0x1}, + unicode.Range32{Lo: 0x114b9, Hi: 0x114b9, Stride: 0x1}, + unicode.Range32{Lo: 0x114ba, Hi: 0x114ba, Stride: 0x1}, + unicode.Range32{Lo: 0x114bb, Hi: 0x114be, Stride: 0x1}, + unicode.Range32{Lo: 0x114bf, Hi: 0x114c0, Stride: 0x1}, + unicode.Range32{Lo: 0x114c1, Hi: 0x114c1, Stride: 0x1}, + unicode.Range32{Lo: 0x114c2, Hi: 0x114c3, Stride: 0x1}, + unicode.Range32{Lo: 0x115af, Hi: 0x115b1, Stride: 0x1}, + unicode.Range32{Lo: 0x115b2, Hi: 0x115b5, Stride: 0x1}, + unicode.Range32{Lo: 0x115b8, Hi: 0x115bb, Stride: 0x1}, + unicode.Range32{Lo: 0x115bc, Hi: 0x115bd, Stride: 0x1}, + unicode.Range32{Lo: 0x115be, Hi: 0x115be, Stride: 0x1}, + unicode.Range32{Lo: 0x115bf, Hi: 0x115c0, Stride: 0x1}, + unicode.Range32{Lo: 0x115dc, Hi: 0x115dd, Stride: 0x1}, + unicode.Range32{Lo: 0x11630, Hi: 0x11632, Stride: 0x1}, + unicode.Range32{Lo: 0x11633, Hi: 0x1163a, Stride: 0x1}, + unicode.Range32{Lo: 0x1163b, Hi: 0x1163c, Stride: 0x1}, + unicode.Range32{Lo: 0x1163d, Hi: 0x1163d, Stride: 0x1}, + unicode.Range32{Lo: 0x1163e, Hi: 0x1163e, Stride: 0x1}, + unicode.Range32{Lo: 0x1163f, Hi: 0x11640, Stride: 0x1}, + unicode.Range32{Lo: 0x116ab, Hi: 0x116ab, Stride: 0x1}, + unicode.Range32{Lo: 0x116ac, Hi: 0x116ac, Stride: 0x1}, + unicode.Range32{Lo: 0x116ad, Hi: 0x116ad, Stride: 0x1}, + unicode.Range32{Lo: 0x116ae, Hi: 0x116af, Stride: 0x1}, + unicode.Range32{Lo: 0x116b0, Hi: 0x116b5, Stride: 0x1}, + unicode.Range32{Lo: 0x116b6, Hi: 0x116b6, Stride: 0x1}, + unicode.Range32{Lo: 0x116b7, Hi: 0x116b7, Stride: 0x1}, + unicode.Range32{Lo: 0x1171d, Hi: 0x1171f, Stride: 0x1}, + unicode.Range32{Lo: 0x11720, Hi: 0x11721, Stride: 0x1}, + unicode.Range32{Lo: 0x11722, Hi: 0x11725, Stride: 0x1}, + unicode.Range32{Lo: 0x11726, Hi: 0x11726, Stride: 0x1}, + unicode.Range32{Lo: 0x11727, Hi: 0x1172b, Stride: 0x1}, + unicode.Range32{Lo: 0x11c2f, Hi: 0x11c2f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c30, Hi: 0x11c36, Stride: 0x1}, + unicode.Range32{Lo: 0x11c38, Hi: 0x11c3d, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3e, Hi: 0x11c3e, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3f, Hi: 0x11c3f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c92, Hi: 0x11ca7, Stride: 0x1}, + unicode.Range32{Lo: 0x11ca9, Hi: 0x11ca9, Stride: 0x1}, + unicode.Range32{Lo: 0x11caa, Hi: 0x11cb0, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb1, Hi: 0x11cb1, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb2, Hi: 0x11cb3, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb4, Hi: 0x11cb4, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb5, Hi: 0x11cb6, Stride: 0x1}, + unicode.Range32{Lo: 0x16af0, Hi: 0x16af4, Stride: 0x1}, + unicode.Range32{Lo: 0x16b30, Hi: 0x16b36, Stride: 0x1}, + unicode.Range32{Lo: 0x16f51, Hi: 0x16f7e, Stride: 0x1}, + unicode.Range32{Lo: 0x16f8f, Hi: 0x16f92, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d165, Hi: 0x1d166, Stride: 0x1}, + unicode.Range32{Lo: 0x1d167, Hi: 0x1d169, Stride: 0x1}, + unicode.Range32{Lo: 0x1d16d, Hi: 0x1d172, Stride: 0x1}, + unicode.Range32{Lo: 0x1d17b, Hi: 0x1d182, Stride: 0x1}, + unicode.Range32{Lo: 0x1d185, Hi: 0x1d18b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 0x1}, + unicode.Range32{Lo: 0x1d242, Hi: 0x1d244, Stride: 0x1}, + unicode.Range32{Lo: 0x1da00, Hi: 0x1da36, Stride: 0x1}, + unicode.Range32{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 0x1}, + unicode.Range32{Lo: 0x1da75, Hi: 0x1da75, Stride: 0x1}, + unicode.Range32{Lo: 0x1da84, Hi: 0x1da84, Stride: 0x1}, + unicode.Range32{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 0x1}, + unicode.Range32{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 0x1}, + unicode.Range32{Lo: 0x1e000, Hi: 0x1e006, Stride: 0x1}, + unicode.Range32{Lo: 0x1e008, Hi: 0x1e018, Stride: 0x1}, + unicode.Range32{Lo: 0x1e01b, Hi: 0x1e021, Stride: 0x1}, + unicode.Range32{Lo: 0x1e023, Hi: 0x1e024, Stride: 0x1}, + unicode.Range32{Lo: 0x1e026, Hi: 0x1e02a, Stride: 0x1}, + unicode.Range32{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 0x1}, + unicode.Range32{Lo: 0x1e944, Hi: 0x1e94a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0020, Hi: 0xe007f, Stride: 0x1}, + unicode.Range32{Lo: 0xe0100, Hi: 0xe01ef, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordExtendNumLet = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x5f, Hi: 0x5f, Stride: 0x1}, + unicode.Range16{Lo: 0x202f, Hi: 0x202f, Stride: 0x1}, + unicode.Range16{Lo: 0x203f, Hi: 0x2040, Stride: 0x1}, + unicode.Range16{Lo: 0x2054, Hi: 0x2054, Stride: 0x1}, + unicode.Range16{Lo: 0xfe33, Hi: 0xfe34, Stride: 0x1}, + unicode.Range16{Lo: 0xfe4d, Hi: 0xfe4f, Stride: 0x1}, + unicode.Range16{Lo: 0xff3f, Hi: 0xff3f, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordFormat = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xad, Hi: 0xad, Stride: 0x1}, + unicode.Range16{Lo: 0x600, Hi: 0x605, Stride: 0x1}, + unicode.Range16{Lo: 0x61c, Hi: 0x61c, Stride: 0x1}, + unicode.Range16{Lo: 0x6dd, Hi: 0x6dd, Stride: 0x1}, + unicode.Range16{Lo: 0x70f, Hi: 0x70f, Stride: 0x1}, + unicode.Range16{Lo: 0x8e2, Hi: 0x8e2, Stride: 0x1}, + unicode.Range16{Lo: 0x180e, Hi: 0x180e, Stride: 0x1}, + unicode.Range16{Lo: 0x200e, Hi: 0x200f, Stride: 0x1}, + unicode.Range16{Lo: 0x202a, Hi: 0x202e, Stride: 0x1}, + unicode.Range16{Lo: 0x2060, Hi: 0x2064, Stride: 0x1}, + unicode.Range16{Lo: 0x2066, Hi: 0x206f, Stride: 0x1}, + unicode.Range16{Lo: 0xfeff, Hi: 0xfeff, Stride: 0x1}, + unicode.Range16{Lo: 0xfff9, Hi: 0xfffb, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x110bd, Hi: 0x110bd, Stride: 0x1}, + unicode.Range32{Lo: 0x1bca0, Hi: 0x1bca3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d173, Hi: 0x1d17a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0001, Hi: 0xe0001, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordGlue_After_Zwj = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2764, Hi: 0x2764, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f48b, Hi: 0x1f48b, Stride: 0x1}, + unicode.Range32{Lo: 0x1f5e8, Hi: 0x1f5e8, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordHebrew_Letter = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x5d0, Hi: 0x5ea, Stride: 0x1}, + unicode.Range16{Lo: 0x5f0, Hi: 0x5f2, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1d, Hi: 0xfb1d, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1f, Hi: 0xfb28, Stride: 0x1}, + unicode.Range16{Lo: 0xfb2a, Hi: 0xfb36, Stride: 0x1}, + unicode.Range16{Lo: 0xfb38, Hi: 0xfb3c, Stride: 0x1}, + unicode.Range16{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 0x1}, + unicode.Range16{Lo: 0xfb40, Hi: 0xfb41, Stride: 0x1}, + unicode.Range16{Lo: 0xfb43, Hi: 0xfb44, Stride: 0x1}, + unicode.Range16{Lo: 0xfb46, Hi: 0xfb4f, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordKatakana = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x3031, Hi: 0x3035, Stride: 0x1}, + unicode.Range16{Lo: 0x309b, Hi: 0x309c, Stride: 0x1}, + unicode.Range16{Lo: 0x30a0, Hi: 0x30a0, Stride: 0x1}, + unicode.Range16{Lo: 0x30a1, Hi: 0x30fa, Stride: 0x1}, + unicode.Range16{Lo: 0x30fc, Hi: 0x30fe, Stride: 0x1}, + unicode.Range16{Lo: 0x30ff, Hi: 0x30ff, Stride: 0x1}, + unicode.Range16{Lo: 0x31f0, Hi: 0x31ff, Stride: 0x1}, + unicode.Range16{Lo: 0x32d0, Hi: 0x32fe, Stride: 0x1}, + unicode.Range16{Lo: 0x3300, Hi: 0x3357, Stride: 0x1}, + unicode.Range16{Lo: 0xff66, Hi: 0xff6f, Stride: 0x1}, + unicode.Range16{Lo: 0xff70, Hi: 0xff70, Stride: 0x1}, + unicode.Range16{Lo: 0xff71, Hi: 0xff9d, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1b000, Hi: 0x1b000, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordLF = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xa, Hi: 0xa, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordMidLetter = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x3a, Hi: 0x3a, Stride: 0x1}, + unicode.Range16{Lo: 0xb7, Hi: 0xb7, Stride: 0x1}, + unicode.Range16{Lo: 0x2d7, Hi: 0x2d7, Stride: 0x1}, + unicode.Range16{Lo: 0x387, Hi: 0x387, Stride: 0x1}, + unicode.Range16{Lo: 0x5f4, Hi: 0x5f4, Stride: 0x1}, + unicode.Range16{Lo: 0x2027, Hi: 0x2027, Stride: 0x1}, + unicode.Range16{Lo: 0xfe13, Hi: 0xfe13, Stride: 0x1}, + unicode.Range16{Lo: 0xfe55, Hi: 0xfe55, Stride: 0x1}, + unicode.Range16{Lo: 0xff1a, Hi: 0xff1a, Stride: 0x1}, + }, + LatinOffset: 2, +} + +var _WordMidNum = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2c, Hi: 0x2c, Stride: 0x1}, + unicode.Range16{Lo: 0x3b, Hi: 0x3b, Stride: 0x1}, + unicode.Range16{Lo: 0x37e, Hi: 0x37e, Stride: 0x1}, + unicode.Range16{Lo: 0x589, Hi: 0x589, Stride: 0x1}, + unicode.Range16{Lo: 0x60c, Hi: 0x60d, Stride: 0x1}, + unicode.Range16{Lo: 0x66c, Hi: 0x66c, Stride: 0x1}, + unicode.Range16{Lo: 0x7f8, Hi: 0x7f8, Stride: 0x1}, + unicode.Range16{Lo: 0x2044, Hi: 0x2044, Stride: 0x1}, + unicode.Range16{Lo: 0xfe10, Hi: 0xfe10, Stride: 0x1}, + unicode.Range16{Lo: 0xfe14, Hi: 0xfe14, Stride: 0x1}, + unicode.Range16{Lo: 0xfe50, Hi: 0xfe50, Stride: 0x1}, + unicode.Range16{Lo: 0xfe54, Hi: 0xfe54, Stride: 0x1}, + unicode.Range16{Lo: 0xff0c, Hi: 0xff0c, Stride: 0x1}, + unicode.Range16{Lo: 0xff1b, Hi: 0xff1b, Stride: 0x1}, + }, + LatinOffset: 2, +} + +var _WordMidNumLet = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2e, Hi: 0x2e, Stride: 0x1}, + unicode.Range16{Lo: 0x2018, Hi: 0x2018, Stride: 0x1}, + unicode.Range16{Lo: 0x2019, Hi: 0x2019, Stride: 0x1}, + unicode.Range16{Lo: 0x2024, Hi: 0x2024, Stride: 0x1}, + unicode.Range16{Lo: 0xfe52, Hi: 0xfe52, Stride: 0x1}, + unicode.Range16{Lo: 0xff07, Hi: 0xff07, Stride: 0x1}, + unicode.Range16{Lo: 0xff0e, Hi: 0xff0e, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordNewline = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xb, Hi: 0xc, Stride: 0x1}, + unicode.Range16{Lo: 0x85, Hi: 0x85, Stride: 0x1}, + unicode.Range16{Lo: 0x2028, Hi: 0x2028, Stride: 0x1}, + unicode.Range16{Lo: 0x2029, Hi: 0x2029, Stride: 0x1}, + }, + LatinOffset: 2, +} + +var _WordNumeric = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x30, Hi: 0x39, Stride: 0x1}, + unicode.Range16{Lo: 0x660, Hi: 0x669, Stride: 0x1}, + unicode.Range16{Lo: 0x66b, Hi: 0x66b, Stride: 0x1}, + unicode.Range16{Lo: 0x6f0, Hi: 0x6f9, Stride: 0x1}, + unicode.Range16{Lo: 0x7c0, Hi: 0x7c9, Stride: 0x1}, + unicode.Range16{Lo: 0x966, Hi: 0x96f, Stride: 0x1}, + unicode.Range16{Lo: 0x9e6, Hi: 0x9ef, Stride: 0x1}, + unicode.Range16{Lo: 0xa66, Hi: 0xa6f, Stride: 0x1}, + unicode.Range16{Lo: 0xae6, Hi: 0xaef, Stride: 0x1}, + unicode.Range16{Lo: 0xb66, Hi: 0xb6f, Stride: 0x1}, + unicode.Range16{Lo: 0xbe6, Hi: 0xbef, Stride: 0x1}, + unicode.Range16{Lo: 0xc66, Hi: 0xc6f, Stride: 0x1}, + unicode.Range16{Lo: 0xce6, Hi: 0xcef, Stride: 0x1}, + unicode.Range16{Lo: 0xd66, Hi: 0xd6f, Stride: 0x1}, + unicode.Range16{Lo: 0xde6, Hi: 0xdef, Stride: 0x1}, + unicode.Range16{Lo: 0xe50, Hi: 0xe59, Stride: 0x1}, + unicode.Range16{Lo: 0xed0, Hi: 0xed9, Stride: 0x1}, + unicode.Range16{Lo: 0xf20, Hi: 0xf29, Stride: 0x1}, + unicode.Range16{Lo: 0x1040, Hi: 0x1049, Stride: 0x1}, + unicode.Range16{Lo: 0x1090, Hi: 0x1099, Stride: 0x1}, + unicode.Range16{Lo: 0x17e0, Hi: 0x17e9, Stride: 0x1}, + unicode.Range16{Lo: 0x1810, Hi: 0x1819, Stride: 0x1}, + unicode.Range16{Lo: 0x1946, Hi: 0x194f, Stride: 0x1}, + unicode.Range16{Lo: 0x19d0, Hi: 0x19d9, Stride: 0x1}, + unicode.Range16{Lo: 0x1a80, Hi: 0x1a89, Stride: 0x1}, + unicode.Range16{Lo: 0x1a90, Hi: 0x1a99, Stride: 0x1}, + unicode.Range16{Lo: 0x1b50, Hi: 0x1b59, Stride: 0x1}, + unicode.Range16{Lo: 0x1bb0, Hi: 0x1bb9, Stride: 0x1}, + unicode.Range16{Lo: 0x1c40, Hi: 0x1c49, Stride: 0x1}, + unicode.Range16{Lo: 0x1c50, Hi: 0x1c59, Stride: 0x1}, + unicode.Range16{Lo: 0xa620, Hi: 0xa629, Stride: 0x1}, + unicode.Range16{Lo: 0xa8d0, Hi: 0xa8d9, Stride: 0x1}, + unicode.Range16{Lo: 0xa900, Hi: 0xa909, Stride: 0x1}, + unicode.Range16{Lo: 0xa9d0, Hi: 0xa9d9, Stride: 0x1}, + unicode.Range16{Lo: 0xa9f0, Hi: 0xa9f9, Stride: 0x1}, + unicode.Range16{Lo: 0xaa50, Hi: 0xaa59, Stride: 0x1}, + unicode.Range16{Lo: 0xabf0, Hi: 0xabf9, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x104a0, Hi: 0x104a9, Stride: 0x1}, + unicode.Range32{Lo: 0x11066, Hi: 0x1106f, Stride: 0x1}, + unicode.Range32{Lo: 0x110f0, Hi: 0x110f9, Stride: 0x1}, + unicode.Range32{Lo: 0x11136, Hi: 0x1113f, Stride: 0x1}, + unicode.Range32{Lo: 0x111d0, Hi: 0x111d9, Stride: 0x1}, + unicode.Range32{Lo: 0x112f0, Hi: 0x112f9, Stride: 0x1}, + unicode.Range32{Lo: 0x11450, Hi: 0x11459, Stride: 0x1}, + unicode.Range32{Lo: 0x114d0, Hi: 0x114d9, Stride: 0x1}, + unicode.Range32{Lo: 0x11650, Hi: 0x11659, Stride: 0x1}, + unicode.Range32{Lo: 0x116c0, Hi: 0x116c9, Stride: 0x1}, + unicode.Range32{Lo: 0x11730, Hi: 0x11739, Stride: 0x1}, + unicode.Range32{Lo: 0x118e0, Hi: 0x118e9, Stride: 0x1}, + unicode.Range32{Lo: 0x11c50, Hi: 0x11c59, Stride: 0x1}, + unicode.Range32{Lo: 0x16a60, Hi: 0x16a69, Stride: 0x1}, + unicode.Range32{Lo: 0x16b50, Hi: 0x16b59, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7ce, Hi: 0x1d7ff, Stride: 0x1}, + unicode.Range32{Lo: 0x1e950, Hi: 0x1e959, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordRegional_Indicator = &unicode.RangeTable{ + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f1e6, Hi: 0x1f1ff, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _WordSingle_Quote = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x27, Hi: 0x27, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _WordZWJ = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x200d, Hi: 0x200d, Stride: 0x1}, + }, + LatinOffset: 0, +} + +type _WordRuneRange unicode.RangeTable + +func _WordRuneType(r rune) *_WordRuneRange { + switch { + case unicode.Is(_WordALetter, r): + return (*_WordRuneRange)(_WordALetter) + case unicode.Is(_WordCR, r): + return (*_WordRuneRange)(_WordCR) + case unicode.Is(_WordDouble_Quote, r): + return (*_WordRuneRange)(_WordDouble_Quote) + case unicode.Is(_WordE_Base, r): + return (*_WordRuneRange)(_WordE_Base) + case unicode.Is(_WordE_Base_GAZ, r): + return (*_WordRuneRange)(_WordE_Base_GAZ) + case unicode.Is(_WordE_Modifier, r): + return (*_WordRuneRange)(_WordE_Modifier) + case unicode.Is(_WordExtend, r): + return (*_WordRuneRange)(_WordExtend) + case unicode.Is(_WordExtendNumLet, r): + return (*_WordRuneRange)(_WordExtendNumLet) + case unicode.Is(_WordFormat, r): + return (*_WordRuneRange)(_WordFormat) + case unicode.Is(_WordGlue_After_Zwj, r): + return (*_WordRuneRange)(_WordGlue_After_Zwj) + case unicode.Is(_WordHebrew_Letter, r): + return (*_WordRuneRange)(_WordHebrew_Letter) + case unicode.Is(_WordKatakana, r): + return (*_WordRuneRange)(_WordKatakana) + case unicode.Is(_WordLF, r): + return (*_WordRuneRange)(_WordLF) + case unicode.Is(_WordMidLetter, r): + return (*_WordRuneRange)(_WordMidLetter) + case unicode.Is(_WordMidNum, r): + return (*_WordRuneRange)(_WordMidNum) + case unicode.Is(_WordMidNumLet, r): + return (*_WordRuneRange)(_WordMidNumLet) + case unicode.Is(_WordNewline, r): + return (*_WordRuneRange)(_WordNewline) + case unicode.Is(_WordNumeric, r): + return (*_WordRuneRange)(_WordNumeric) + case unicode.Is(_WordRegional_Indicator, r): + return (*_WordRuneRange)(_WordRegional_Indicator) + case unicode.Is(_WordSingle_Quote, r): + return (*_WordRuneRange)(_WordSingle_Quote) + case unicode.Is(_WordZWJ, r): + return (*_WordRuneRange)(_WordZWJ) + default: + return nil + } +} +func (rng *_WordRuneRange) String() string { + switch (*unicode.RangeTable)(rng) { + case _WordALetter: + return "ALetter" + case _WordCR: + return "CR" + case _WordDouble_Quote: + return "Double_Quote" + case _WordE_Base: + return "E_Base" + case _WordE_Base_GAZ: + return "E_Base_GAZ" + case _WordE_Modifier: + return "E_Modifier" + case _WordExtend: + return "Extend" + case _WordExtendNumLet: + return "ExtendNumLet" + case _WordFormat: + return "Format" + case _WordGlue_After_Zwj: + return "Glue_After_Zwj" + case _WordHebrew_Letter: + return "Hebrew_Letter" + case _WordKatakana: + return "Katakana" + case _WordLF: + return "LF" + case _WordMidLetter: + return "MidLetter" + case _WordMidNum: + return "MidNum" + case _WordMidNumLet: + return "MidNumLet" + case _WordNewline: + return "Newline" + case _WordNumeric: + return "Numeric" + case _WordRegional_Indicator: + return "Regional_Indicator" + case _WordSingle_Quote: + return "Single_Quote" + case _WordZWJ: + return "ZWJ" + default: + return "Other" + } +} + +var _SentenceATerm = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2e, Hi: 0x2e, Stride: 0x1}, + unicode.Range16{Lo: 0x2024, Hi: 0x2024, Stride: 0x1}, + unicode.Range16{Lo: 0xfe52, Hi: 0xfe52, Stride: 0x1}, + unicode.Range16{Lo: 0xff0e, Hi: 0xff0e, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceCR = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xd, Hi: 0xd, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceClose = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x22, Hi: 0x22, Stride: 0x1}, + unicode.Range16{Lo: 0x27, Hi: 0x27, Stride: 0x1}, + unicode.Range16{Lo: 0x28, Hi: 0x28, Stride: 0x1}, + unicode.Range16{Lo: 0x29, Hi: 0x29, Stride: 0x1}, + unicode.Range16{Lo: 0x5b, Hi: 0x5b, Stride: 0x1}, + unicode.Range16{Lo: 0x5d, Hi: 0x5d, Stride: 0x1}, + unicode.Range16{Lo: 0x7b, Hi: 0x7b, Stride: 0x1}, + unicode.Range16{Lo: 0x7d, Hi: 0x7d, Stride: 0x1}, + unicode.Range16{Lo: 0xab, Hi: 0xab, Stride: 0x1}, + unicode.Range16{Lo: 0xbb, Hi: 0xbb, Stride: 0x1}, + unicode.Range16{Lo: 0xf3a, Hi: 0xf3a, Stride: 0x1}, + unicode.Range16{Lo: 0xf3b, Hi: 0xf3b, Stride: 0x1}, + unicode.Range16{Lo: 0xf3c, Hi: 0xf3c, Stride: 0x1}, + unicode.Range16{Lo: 0xf3d, Hi: 0xf3d, Stride: 0x1}, + unicode.Range16{Lo: 0x169b, Hi: 0x169b, Stride: 0x1}, + unicode.Range16{Lo: 0x169c, Hi: 0x169c, Stride: 0x1}, + unicode.Range16{Lo: 0x2018, Hi: 0x2018, Stride: 0x1}, + unicode.Range16{Lo: 0x2019, Hi: 0x2019, Stride: 0x1}, + unicode.Range16{Lo: 0x201a, Hi: 0x201a, Stride: 0x1}, + unicode.Range16{Lo: 0x201b, Hi: 0x201c, Stride: 0x1}, + unicode.Range16{Lo: 0x201d, Hi: 0x201d, Stride: 0x1}, + unicode.Range16{Lo: 0x201e, Hi: 0x201e, Stride: 0x1}, + unicode.Range16{Lo: 0x201f, Hi: 0x201f, Stride: 0x1}, + unicode.Range16{Lo: 0x2039, Hi: 0x2039, Stride: 0x1}, + unicode.Range16{Lo: 0x203a, Hi: 0x203a, Stride: 0x1}, + unicode.Range16{Lo: 0x2045, Hi: 0x2045, Stride: 0x1}, + unicode.Range16{Lo: 0x2046, Hi: 0x2046, Stride: 0x1}, + unicode.Range16{Lo: 0x207d, Hi: 0x207d, Stride: 0x1}, + unicode.Range16{Lo: 0x207e, Hi: 0x207e, Stride: 0x1}, + unicode.Range16{Lo: 0x208d, Hi: 0x208d, Stride: 0x1}, + unicode.Range16{Lo: 0x208e, Hi: 0x208e, Stride: 0x1}, + unicode.Range16{Lo: 0x2308, Hi: 0x2308, Stride: 0x1}, + unicode.Range16{Lo: 0x2309, Hi: 0x2309, Stride: 0x1}, + unicode.Range16{Lo: 0x230a, Hi: 0x230a, Stride: 0x1}, + unicode.Range16{Lo: 0x230b, Hi: 0x230b, Stride: 0x1}, + unicode.Range16{Lo: 0x2329, Hi: 0x2329, Stride: 0x1}, + unicode.Range16{Lo: 0x232a, Hi: 0x232a, Stride: 0x1}, + unicode.Range16{Lo: 0x275b, Hi: 0x2760, Stride: 0x1}, + unicode.Range16{Lo: 0x2768, Hi: 0x2768, Stride: 0x1}, + unicode.Range16{Lo: 0x2769, Hi: 0x2769, Stride: 0x1}, + unicode.Range16{Lo: 0x276a, Hi: 0x276a, Stride: 0x1}, + unicode.Range16{Lo: 0x276b, Hi: 0x276b, Stride: 0x1}, + unicode.Range16{Lo: 0x276c, Hi: 0x276c, Stride: 0x1}, + unicode.Range16{Lo: 0x276d, Hi: 0x276d, Stride: 0x1}, + unicode.Range16{Lo: 0x276e, Hi: 0x276e, Stride: 0x1}, + unicode.Range16{Lo: 0x276f, Hi: 0x276f, Stride: 0x1}, + unicode.Range16{Lo: 0x2770, Hi: 0x2770, Stride: 0x1}, + unicode.Range16{Lo: 0x2771, Hi: 0x2771, Stride: 0x1}, + unicode.Range16{Lo: 0x2772, Hi: 0x2772, Stride: 0x1}, + unicode.Range16{Lo: 0x2773, Hi: 0x2773, Stride: 0x1}, + unicode.Range16{Lo: 0x2774, Hi: 0x2774, Stride: 0x1}, + unicode.Range16{Lo: 0x2775, Hi: 0x2775, Stride: 0x1}, + unicode.Range16{Lo: 0x27c5, Hi: 0x27c5, Stride: 0x1}, + unicode.Range16{Lo: 0x27c6, Hi: 0x27c6, Stride: 0x1}, + unicode.Range16{Lo: 0x27e6, Hi: 0x27e6, Stride: 0x1}, + unicode.Range16{Lo: 0x27e7, Hi: 0x27e7, Stride: 0x1}, + unicode.Range16{Lo: 0x27e8, Hi: 0x27e8, Stride: 0x1}, + unicode.Range16{Lo: 0x27e9, Hi: 0x27e9, Stride: 0x1}, + unicode.Range16{Lo: 0x27ea, Hi: 0x27ea, Stride: 0x1}, + unicode.Range16{Lo: 0x27eb, Hi: 0x27eb, Stride: 0x1}, + unicode.Range16{Lo: 0x27ec, Hi: 0x27ec, Stride: 0x1}, + unicode.Range16{Lo: 0x27ed, Hi: 0x27ed, Stride: 0x1}, + unicode.Range16{Lo: 0x27ee, Hi: 0x27ee, Stride: 0x1}, + unicode.Range16{Lo: 0x27ef, Hi: 0x27ef, Stride: 0x1}, + unicode.Range16{Lo: 0x2983, Hi: 0x2983, Stride: 0x1}, + unicode.Range16{Lo: 0x2984, Hi: 0x2984, Stride: 0x1}, + unicode.Range16{Lo: 0x2985, Hi: 0x2985, Stride: 0x1}, + unicode.Range16{Lo: 0x2986, Hi: 0x2986, Stride: 0x1}, + unicode.Range16{Lo: 0x2987, Hi: 0x2987, Stride: 0x1}, + unicode.Range16{Lo: 0x2988, Hi: 0x2988, Stride: 0x1}, + unicode.Range16{Lo: 0x2989, Hi: 0x2989, Stride: 0x1}, + unicode.Range16{Lo: 0x298a, Hi: 0x298a, Stride: 0x1}, + unicode.Range16{Lo: 0x298b, Hi: 0x298b, Stride: 0x1}, + unicode.Range16{Lo: 0x298c, Hi: 0x298c, Stride: 0x1}, + unicode.Range16{Lo: 0x298d, Hi: 0x298d, Stride: 0x1}, + unicode.Range16{Lo: 0x298e, Hi: 0x298e, Stride: 0x1}, + unicode.Range16{Lo: 0x298f, Hi: 0x298f, Stride: 0x1}, + unicode.Range16{Lo: 0x2990, Hi: 0x2990, Stride: 0x1}, + unicode.Range16{Lo: 0x2991, Hi: 0x2991, Stride: 0x1}, + unicode.Range16{Lo: 0x2992, Hi: 0x2992, Stride: 0x1}, + unicode.Range16{Lo: 0x2993, Hi: 0x2993, Stride: 0x1}, + unicode.Range16{Lo: 0x2994, Hi: 0x2994, Stride: 0x1}, + unicode.Range16{Lo: 0x2995, Hi: 0x2995, Stride: 0x1}, + unicode.Range16{Lo: 0x2996, Hi: 0x2996, Stride: 0x1}, + unicode.Range16{Lo: 0x2997, Hi: 0x2997, Stride: 0x1}, + unicode.Range16{Lo: 0x2998, Hi: 0x2998, Stride: 0x1}, + unicode.Range16{Lo: 0x29d8, Hi: 0x29d8, Stride: 0x1}, + unicode.Range16{Lo: 0x29d9, Hi: 0x29d9, Stride: 0x1}, + unicode.Range16{Lo: 0x29da, Hi: 0x29da, Stride: 0x1}, + unicode.Range16{Lo: 0x29db, Hi: 0x29db, Stride: 0x1}, + unicode.Range16{Lo: 0x29fc, Hi: 0x29fc, Stride: 0x1}, + unicode.Range16{Lo: 0x29fd, Hi: 0x29fd, Stride: 0x1}, + unicode.Range16{Lo: 0x2e00, Hi: 0x2e01, Stride: 0x1}, + unicode.Range16{Lo: 0x2e02, Hi: 0x2e02, Stride: 0x1}, + unicode.Range16{Lo: 0x2e03, Hi: 0x2e03, Stride: 0x1}, + unicode.Range16{Lo: 0x2e04, Hi: 0x2e04, Stride: 0x1}, + unicode.Range16{Lo: 0x2e05, Hi: 0x2e05, Stride: 0x1}, + unicode.Range16{Lo: 0x2e06, Hi: 0x2e08, Stride: 0x1}, + unicode.Range16{Lo: 0x2e09, Hi: 0x2e09, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0a, Hi: 0x2e0a, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0b, Hi: 0x2e0b, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0c, Hi: 0x2e0c, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0d, Hi: 0x2e0d, Stride: 0x1}, + unicode.Range16{Lo: 0x2e1c, Hi: 0x2e1c, Stride: 0x1}, + unicode.Range16{Lo: 0x2e1d, Hi: 0x2e1d, Stride: 0x1}, + unicode.Range16{Lo: 0x2e20, Hi: 0x2e20, Stride: 0x1}, + unicode.Range16{Lo: 0x2e21, Hi: 0x2e21, Stride: 0x1}, + unicode.Range16{Lo: 0x2e22, Hi: 0x2e22, Stride: 0x1}, + unicode.Range16{Lo: 0x2e23, Hi: 0x2e23, Stride: 0x1}, + unicode.Range16{Lo: 0x2e24, Hi: 0x2e24, Stride: 0x1}, + unicode.Range16{Lo: 0x2e25, Hi: 0x2e25, Stride: 0x1}, + unicode.Range16{Lo: 0x2e26, Hi: 0x2e26, Stride: 0x1}, + unicode.Range16{Lo: 0x2e27, Hi: 0x2e27, Stride: 0x1}, + unicode.Range16{Lo: 0x2e28, Hi: 0x2e28, Stride: 0x1}, + unicode.Range16{Lo: 0x2e29, Hi: 0x2e29, Stride: 0x1}, + unicode.Range16{Lo: 0x2e42, Hi: 0x2e42, Stride: 0x1}, + unicode.Range16{Lo: 0x3008, Hi: 0x3008, Stride: 0x1}, + unicode.Range16{Lo: 0x3009, Hi: 0x3009, Stride: 0x1}, + unicode.Range16{Lo: 0x300a, Hi: 0x300a, Stride: 0x1}, + unicode.Range16{Lo: 0x300b, Hi: 0x300b, Stride: 0x1}, + unicode.Range16{Lo: 0x300c, Hi: 0x300c, Stride: 0x1}, + unicode.Range16{Lo: 0x300d, Hi: 0x300d, Stride: 0x1}, + unicode.Range16{Lo: 0x300e, Hi: 0x300e, Stride: 0x1}, + unicode.Range16{Lo: 0x300f, Hi: 0x300f, Stride: 0x1}, + unicode.Range16{Lo: 0x3010, Hi: 0x3010, Stride: 0x1}, + unicode.Range16{Lo: 0x3011, Hi: 0x3011, Stride: 0x1}, + unicode.Range16{Lo: 0x3014, Hi: 0x3014, Stride: 0x1}, + unicode.Range16{Lo: 0x3015, Hi: 0x3015, Stride: 0x1}, + unicode.Range16{Lo: 0x3016, Hi: 0x3016, Stride: 0x1}, + unicode.Range16{Lo: 0x3017, Hi: 0x3017, Stride: 0x1}, + unicode.Range16{Lo: 0x3018, Hi: 0x3018, Stride: 0x1}, + unicode.Range16{Lo: 0x3019, Hi: 0x3019, Stride: 0x1}, + unicode.Range16{Lo: 0x301a, Hi: 0x301a, Stride: 0x1}, + unicode.Range16{Lo: 0x301b, Hi: 0x301b, Stride: 0x1}, + unicode.Range16{Lo: 0x301d, Hi: 0x301d, Stride: 0x1}, + unicode.Range16{Lo: 0x301e, Hi: 0x301f, Stride: 0x1}, + unicode.Range16{Lo: 0xfd3e, Hi: 0xfd3e, Stride: 0x1}, + unicode.Range16{Lo: 0xfd3f, Hi: 0xfd3f, Stride: 0x1}, + unicode.Range16{Lo: 0xfe17, Hi: 0xfe17, Stride: 0x1}, + unicode.Range16{Lo: 0xfe18, Hi: 0xfe18, Stride: 0x1}, + unicode.Range16{Lo: 0xfe35, Hi: 0xfe35, Stride: 0x1}, + unicode.Range16{Lo: 0xfe36, Hi: 0xfe36, Stride: 0x1}, + unicode.Range16{Lo: 0xfe37, Hi: 0xfe37, Stride: 0x1}, + unicode.Range16{Lo: 0xfe38, Hi: 0xfe38, Stride: 0x1}, + unicode.Range16{Lo: 0xfe39, Hi: 0xfe39, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3a, Hi: 0xfe3a, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3b, Hi: 0xfe3b, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3c, Hi: 0xfe3c, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3d, Hi: 0xfe3d, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3e, Hi: 0xfe3e, Stride: 0x1}, + unicode.Range16{Lo: 0xfe3f, Hi: 0xfe3f, Stride: 0x1}, + unicode.Range16{Lo: 0xfe40, Hi: 0xfe40, Stride: 0x1}, + unicode.Range16{Lo: 0xfe41, Hi: 0xfe41, Stride: 0x1}, + unicode.Range16{Lo: 0xfe42, Hi: 0xfe42, Stride: 0x1}, + unicode.Range16{Lo: 0xfe43, Hi: 0xfe43, Stride: 0x1}, + unicode.Range16{Lo: 0xfe44, Hi: 0xfe44, Stride: 0x1}, + unicode.Range16{Lo: 0xfe47, Hi: 0xfe47, Stride: 0x1}, + unicode.Range16{Lo: 0xfe48, Hi: 0xfe48, Stride: 0x1}, + unicode.Range16{Lo: 0xfe59, Hi: 0xfe59, Stride: 0x1}, + unicode.Range16{Lo: 0xfe5a, Hi: 0xfe5a, Stride: 0x1}, + unicode.Range16{Lo: 0xfe5b, Hi: 0xfe5b, Stride: 0x1}, + unicode.Range16{Lo: 0xfe5c, Hi: 0xfe5c, Stride: 0x1}, + unicode.Range16{Lo: 0xfe5d, Hi: 0xfe5d, Stride: 0x1}, + unicode.Range16{Lo: 0xfe5e, Hi: 0xfe5e, Stride: 0x1}, + unicode.Range16{Lo: 0xff08, Hi: 0xff08, Stride: 0x1}, + unicode.Range16{Lo: 0xff09, Hi: 0xff09, Stride: 0x1}, + unicode.Range16{Lo: 0xff3b, Hi: 0xff3b, Stride: 0x1}, + unicode.Range16{Lo: 0xff3d, Hi: 0xff3d, Stride: 0x1}, + unicode.Range16{Lo: 0xff5b, Hi: 0xff5b, Stride: 0x1}, + unicode.Range16{Lo: 0xff5d, Hi: 0xff5d, Stride: 0x1}, + unicode.Range16{Lo: 0xff5f, Hi: 0xff5f, Stride: 0x1}, + unicode.Range16{Lo: 0xff60, Hi: 0xff60, Stride: 0x1}, + unicode.Range16{Lo: 0xff62, Hi: 0xff62, Stride: 0x1}, + unicode.Range16{Lo: 0xff63, Hi: 0xff63, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x1f676, Hi: 0x1f678, Stride: 0x1}, + }, + LatinOffset: 10, +} + +var _SentenceExtend = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x300, Hi: 0x36f, Stride: 0x1}, + unicode.Range16{Lo: 0x483, Hi: 0x487, Stride: 0x1}, + unicode.Range16{Lo: 0x488, Hi: 0x489, Stride: 0x1}, + unicode.Range16{Lo: 0x591, Hi: 0x5bd, Stride: 0x1}, + unicode.Range16{Lo: 0x5bf, Hi: 0x5bf, Stride: 0x1}, + unicode.Range16{Lo: 0x5c1, Hi: 0x5c2, Stride: 0x1}, + unicode.Range16{Lo: 0x5c4, Hi: 0x5c5, Stride: 0x1}, + unicode.Range16{Lo: 0x5c7, Hi: 0x5c7, Stride: 0x1}, + unicode.Range16{Lo: 0x610, Hi: 0x61a, Stride: 0x1}, + unicode.Range16{Lo: 0x64b, Hi: 0x65f, Stride: 0x1}, + unicode.Range16{Lo: 0x670, Hi: 0x670, Stride: 0x1}, + unicode.Range16{Lo: 0x6d6, Hi: 0x6dc, Stride: 0x1}, + unicode.Range16{Lo: 0x6df, Hi: 0x6e4, Stride: 0x1}, + unicode.Range16{Lo: 0x6e7, Hi: 0x6e8, Stride: 0x1}, + unicode.Range16{Lo: 0x6ea, Hi: 0x6ed, Stride: 0x1}, + unicode.Range16{Lo: 0x711, Hi: 0x711, Stride: 0x1}, + unicode.Range16{Lo: 0x730, Hi: 0x74a, Stride: 0x1}, + unicode.Range16{Lo: 0x7a6, Hi: 0x7b0, Stride: 0x1}, + unicode.Range16{Lo: 0x7eb, Hi: 0x7f3, Stride: 0x1}, + unicode.Range16{Lo: 0x816, Hi: 0x819, Stride: 0x1}, + unicode.Range16{Lo: 0x81b, Hi: 0x823, Stride: 0x1}, + unicode.Range16{Lo: 0x825, Hi: 0x827, Stride: 0x1}, + unicode.Range16{Lo: 0x829, Hi: 0x82d, Stride: 0x1}, + unicode.Range16{Lo: 0x859, Hi: 0x85b, Stride: 0x1}, + unicode.Range16{Lo: 0x8d4, Hi: 0x8e1, Stride: 0x1}, + unicode.Range16{Lo: 0x8e3, Hi: 0x902, Stride: 0x1}, + unicode.Range16{Lo: 0x903, Hi: 0x903, Stride: 0x1}, + unicode.Range16{Lo: 0x93a, Hi: 0x93a, Stride: 0x1}, + unicode.Range16{Lo: 0x93b, Hi: 0x93b, Stride: 0x1}, + unicode.Range16{Lo: 0x93c, Hi: 0x93c, Stride: 0x1}, + unicode.Range16{Lo: 0x93e, Hi: 0x940, Stride: 0x1}, + unicode.Range16{Lo: 0x941, Hi: 0x948, Stride: 0x1}, + unicode.Range16{Lo: 0x949, Hi: 0x94c, Stride: 0x1}, + unicode.Range16{Lo: 0x94d, Hi: 0x94d, Stride: 0x1}, + unicode.Range16{Lo: 0x94e, Hi: 0x94f, Stride: 0x1}, + unicode.Range16{Lo: 0x951, Hi: 0x957, Stride: 0x1}, + unicode.Range16{Lo: 0x962, Hi: 0x963, Stride: 0x1}, + unicode.Range16{Lo: 0x981, Hi: 0x981, Stride: 0x1}, + unicode.Range16{Lo: 0x982, Hi: 0x983, Stride: 0x1}, + unicode.Range16{Lo: 0x9bc, Hi: 0x9bc, Stride: 0x1}, + unicode.Range16{Lo: 0x9be, Hi: 0x9c0, Stride: 0x1}, + unicode.Range16{Lo: 0x9c1, Hi: 0x9c4, Stride: 0x1}, + unicode.Range16{Lo: 0x9c7, Hi: 0x9c8, Stride: 0x1}, + unicode.Range16{Lo: 0x9cb, Hi: 0x9cc, Stride: 0x1}, + unicode.Range16{Lo: 0x9cd, Hi: 0x9cd, Stride: 0x1}, + unicode.Range16{Lo: 0x9d7, Hi: 0x9d7, Stride: 0x1}, + unicode.Range16{Lo: 0x9e2, Hi: 0x9e3, Stride: 0x1}, + unicode.Range16{Lo: 0xa01, Hi: 0xa02, Stride: 0x1}, + unicode.Range16{Lo: 0xa03, Hi: 0xa03, Stride: 0x1}, + unicode.Range16{Lo: 0xa3c, Hi: 0xa3c, Stride: 0x1}, + unicode.Range16{Lo: 0xa3e, Hi: 0xa40, Stride: 0x1}, + unicode.Range16{Lo: 0xa41, Hi: 0xa42, Stride: 0x1}, + unicode.Range16{Lo: 0xa47, Hi: 0xa48, Stride: 0x1}, + unicode.Range16{Lo: 0xa4b, Hi: 0xa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xa51, Hi: 0xa51, Stride: 0x1}, + unicode.Range16{Lo: 0xa70, Hi: 0xa71, Stride: 0x1}, + unicode.Range16{Lo: 0xa75, Hi: 0xa75, Stride: 0x1}, + unicode.Range16{Lo: 0xa81, Hi: 0xa82, Stride: 0x1}, + unicode.Range16{Lo: 0xa83, Hi: 0xa83, Stride: 0x1}, + unicode.Range16{Lo: 0xabc, Hi: 0xabc, Stride: 0x1}, + unicode.Range16{Lo: 0xabe, Hi: 0xac0, Stride: 0x1}, + unicode.Range16{Lo: 0xac1, Hi: 0xac5, Stride: 0x1}, + unicode.Range16{Lo: 0xac7, Hi: 0xac8, Stride: 0x1}, + unicode.Range16{Lo: 0xac9, Hi: 0xac9, Stride: 0x1}, + unicode.Range16{Lo: 0xacb, Hi: 0xacc, Stride: 0x1}, + unicode.Range16{Lo: 0xacd, Hi: 0xacd, Stride: 0x1}, + unicode.Range16{Lo: 0xae2, Hi: 0xae3, Stride: 0x1}, + unicode.Range16{Lo: 0xb01, Hi: 0xb01, Stride: 0x1}, + unicode.Range16{Lo: 0xb02, Hi: 0xb03, Stride: 0x1}, + unicode.Range16{Lo: 0xb3c, Hi: 0xb3c, Stride: 0x1}, + unicode.Range16{Lo: 0xb3e, Hi: 0xb3e, Stride: 0x1}, + unicode.Range16{Lo: 0xb3f, Hi: 0xb3f, Stride: 0x1}, + unicode.Range16{Lo: 0xb40, Hi: 0xb40, Stride: 0x1}, + unicode.Range16{Lo: 0xb41, Hi: 0xb44, Stride: 0x1}, + unicode.Range16{Lo: 0xb47, Hi: 0xb48, Stride: 0x1}, + unicode.Range16{Lo: 0xb4b, Hi: 0xb4c, Stride: 0x1}, + unicode.Range16{Lo: 0xb4d, Hi: 0xb4d, Stride: 0x1}, + unicode.Range16{Lo: 0xb56, Hi: 0xb56, Stride: 0x1}, + unicode.Range16{Lo: 0xb57, Hi: 0xb57, Stride: 0x1}, + unicode.Range16{Lo: 0xb62, Hi: 0xb63, Stride: 0x1}, + unicode.Range16{Lo: 0xb82, Hi: 0xb82, Stride: 0x1}, + unicode.Range16{Lo: 0xbbe, Hi: 0xbbf, Stride: 0x1}, + unicode.Range16{Lo: 0xbc0, Hi: 0xbc0, Stride: 0x1}, + unicode.Range16{Lo: 0xbc1, Hi: 0xbc2, Stride: 0x1}, + unicode.Range16{Lo: 0xbc6, Hi: 0xbc8, Stride: 0x1}, + unicode.Range16{Lo: 0xbca, Hi: 0xbcc, Stride: 0x1}, + unicode.Range16{Lo: 0xbcd, Hi: 0xbcd, Stride: 0x1}, + unicode.Range16{Lo: 0xbd7, Hi: 0xbd7, Stride: 0x1}, + unicode.Range16{Lo: 0xc00, Hi: 0xc00, Stride: 0x1}, + unicode.Range16{Lo: 0xc01, Hi: 0xc03, Stride: 0x1}, + unicode.Range16{Lo: 0xc3e, Hi: 0xc40, Stride: 0x1}, + unicode.Range16{Lo: 0xc41, Hi: 0xc44, Stride: 0x1}, + unicode.Range16{Lo: 0xc46, Hi: 0xc48, Stride: 0x1}, + unicode.Range16{Lo: 0xc4a, Hi: 0xc4d, Stride: 0x1}, + unicode.Range16{Lo: 0xc55, Hi: 0xc56, Stride: 0x1}, + unicode.Range16{Lo: 0xc62, Hi: 0xc63, Stride: 0x1}, + unicode.Range16{Lo: 0xc81, Hi: 0xc81, Stride: 0x1}, + unicode.Range16{Lo: 0xc82, Hi: 0xc83, Stride: 0x1}, + unicode.Range16{Lo: 0xcbc, Hi: 0xcbc, Stride: 0x1}, + unicode.Range16{Lo: 0xcbe, Hi: 0xcbe, Stride: 0x1}, + unicode.Range16{Lo: 0xcbf, Hi: 0xcbf, Stride: 0x1}, + unicode.Range16{Lo: 0xcc0, Hi: 0xcc4, Stride: 0x1}, + unicode.Range16{Lo: 0xcc6, Hi: 0xcc6, Stride: 0x1}, + unicode.Range16{Lo: 0xcc7, Hi: 0xcc8, Stride: 0x1}, + unicode.Range16{Lo: 0xcca, Hi: 0xccb, Stride: 0x1}, + unicode.Range16{Lo: 0xccc, Hi: 0xccd, Stride: 0x1}, + unicode.Range16{Lo: 0xcd5, Hi: 0xcd6, Stride: 0x1}, + unicode.Range16{Lo: 0xce2, Hi: 0xce3, Stride: 0x1}, + unicode.Range16{Lo: 0xd01, Hi: 0xd01, Stride: 0x1}, + unicode.Range16{Lo: 0xd02, Hi: 0xd03, Stride: 0x1}, + unicode.Range16{Lo: 0xd3e, Hi: 0xd40, Stride: 0x1}, + unicode.Range16{Lo: 0xd41, Hi: 0xd44, Stride: 0x1}, + unicode.Range16{Lo: 0xd46, Hi: 0xd48, Stride: 0x1}, + unicode.Range16{Lo: 0xd4a, Hi: 0xd4c, Stride: 0x1}, + unicode.Range16{Lo: 0xd4d, Hi: 0xd4d, Stride: 0x1}, + unicode.Range16{Lo: 0xd57, Hi: 0xd57, Stride: 0x1}, + unicode.Range16{Lo: 0xd62, Hi: 0xd63, Stride: 0x1}, + unicode.Range16{Lo: 0xd82, Hi: 0xd83, Stride: 0x1}, + unicode.Range16{Lo: 0xdca, Hi: 0xdca, Stride: 0x1}, + unicode.Range16{Lo: 0xdcf, Hi: 0xdd1, Stride: 0x1}, + unicode.Range16{Lo: 0xdd2, Hi: 0xdd4, Stride: 0x1}, + unicode.Range16{Lo: 0xdd6, Hi: 0xdd6, Stride: 0x1}, + unicode.Range16{Lo: 0xdd8, Hi: 0xddf, Stride: 0x1}, + unicode.Range16{Lo: 0xdf2, Hi: 0xdf3, Stride: 0x1}, + unicode.Range16{Lo: 0xe31, Hi: 0xe31, Stride: 0x1}, + unicode.Range16{Lo: 0xe34, Hi: 0xe3a, Stride: 0x1}, + unicode.Range16{Lo: 0xe47, Hi: 0xe4e, Stride: 0x1}, + unicode.Range16{Lo: 0xeb1, Hi: 0xeb1, Stride: 0x1}, + unicode.Range16{Lo: 0xeb4, Hi: 0xeb9, Stride: 0x1}, + unicode.Range16{Lo: 0xebb, Hi: 0xebc, Stride: 0x1}, + unicode.Range16{Lo: 0xec8, Hi: 0xecd, Stride: 0x1}, + unicode.Range16{Lo: 0xf18, Hi: 0xf19, Stride: 0x1}, + unicode.Range16{Lo: 0xf35, Hi: 0xf35, Stride: 0x1}, + unicode.Range16{Lo: 0xf37, Hi: 0xf37, Stride: 0x1}, + unicode.Range16{Lo: 0xf39, Hi: 0xf39, Stride: 0x1}, + unicode.Range16{Lo: 0xf3e, Hi: 0xf3f, Stride: 0x1}, + unicode.Range16{Lo: 0xf71, Hi: 0xf7e, Stride: 0x1}, + unicode.Range16{Lo: 0xf7f, Hi: 0xf7f, Stride: 0x1}, + unicode.Range16{Lo: 0xf80, Hi: 0xf84, Stride: 0x1}, + unicode.Range16{Lo: 0xf86, Hi: 0xf87, Stride: 0x1}, + unicode.Range16{Lo: 0xf8d, Hi: 0xf97, Stride: 0x1}, + unicode.Range16{Lo: 0xf99, Hi: 0xfbc, Stride: 0x1}, + unicode.Range16{Lo: 0xfc6, Hi: 0xfc6, Stride: 0x1}, + unicode.Range16{Lo: 0x102b, Hi: 0x102c, Stride: 0x1}, + unicode.Range16{Lo: 0x102d, Hi: 0x1030, Stride: 0x1}, + unicode.Range16{Lo: 0x1031, Hi: 0x1031, Stride: 0x1}, + unicode.Range16{Lo: 0x1032, Hi: 0x1037, Stride: 0x1}, + unicode.Range16{Lo: 0x1038, Hi: 0x1038, Stride: 0x1}, + unicode.Range16{Lo: 0x1039, Hi: 0x103a, Stride: 0x1}, + unicode.Range16{Lo: 0x103b, Hi: 0x103c, Stride: 0x1}, + unicode.Range16{Lo: 0x103d, Hi: 0x103e, Stride: 0x1}, + unicode.Range16{Lo: 0x1056, Hi: 0x1057, Stride: 0x1}, + unicode.Range16{Lo: 0x1058, Hi: 0x1059, Stride: 0x1}, + unicode.Range16{Lo: 0x105e, Hi: 0x1060, Stride: 0x1}, + unicode.Range16{Lo: 0x1062, Hi: 0x1064, Stride: 0x1}, + unicode.Range16{Lo: 0x1067, Hi: 0x106d, Stride: 0x1}, + unicode.Range16{Lo: 0x1071, Hi: 0x1074, Stride: 0x1}, + unicode.Range16{Lo: 0x1082, Hi: 0x1082, Stride: 0x1}, + unicode.Range16{Lo: 0x1083, Hi: 0x1084, Stride: 0x1}, + unicode.Range16{Lo: 0x1085, Hi: 0x1086, Stride: 0x1}, + unicode.Range16{Lo: 0x1087, Hi: 0x108c, Stride: 0x1}, + unicode.Range16{Lo: 0x108d, Hi: 0x108d, Stride: 0x1}, + unicode.Range16{Lo: 0x108f, Hi: 0x108f, Stride: 0x1}, + unicode.Range16{Lo: 0x109a, Hi: 0x109c, Stride: 0x1}, + unicode.Range16{Lo: 0x109d, Hi: 0x109d, Stride: 0x1}, + unicode.Range16{Lo: 0x135d, Hi: 0x135f, Stride: 0x1}, + unicode.Range16{Lo: 0x1712, Hi: 0x1714, Stride: 0x1}, + unicode.Range16{Lo: 0x1732, Hi: 0x1734, Stride: 0x1}, + unicode.Range16{Lo: 0x1752, Hi: 0x1753, Stride: 0x1}, + unicode.Range16{Lo: 0x1772, Hi: 0x1773, Stride: 0x1}, + unicode.Range16{Lo: 0x17b4, Hi: 0x17b5, Stride: 0x1}, + unicode.Range16{Lo: 0x17b6, Hi: 0x17b6, Stride: 0x1}, + unicode.Range16{Lo: 0x17b7, Hi: 0x17bd, Stride: 0x1}, + unicode.Range16{Lo: 0x17be, Hi: 0x17c5, Stride: 0x1}, + unicode.Range16{Lo: 0x17c6, Hi: 0x17c6, Stride: 0x1}, + unicode.Range16{Lo: 0x17c7, Hi: 0x17c8, Stride: 0x1}, + unicode.Range16{Lo: 0x17c9, Hi: 0x17d3, Stride: 0x1}, + unicode.Range16{Lo: 0x17dd, Hi: 0x17dd, Stride: 0x1}, + unicode.Range16{Lo: 0x180b, Hi: 0x180d, Stride: 0x1}, + unicode.Range16{Lo: 0x1885, Hi: 0x1886, Stride: 0x1}, + unicode.Range16{Lo: 0x18a9, Hi: 0x18a9, Stride: 0x1}, + unicode.Range16{Lo: 0x1920, Hi: 0x1922, Stride: 0x1}, + unicode.Range16{Lo: 0x1923, Hi: 0x1926, Stride: 0x1}, + unicode.Range16{Lo: 0x1927, Hi: 0x1928, Stride: 0x1}, + unicode.Range16{Lo: 0x1929, Hi: 0x192b, Stride: 0x1}, + unicode.Range16{Lo: 0x1930, Hi: 0x1931, Stride: 0x1}, + unicode.Range16{Lo: 0x1932, Hi: 0x1932, Stride: 0x1}, + unicode.Range16{Lo: 0x1933, Hi: 0x1938, Stride: 0x1}, + unicode.Range16{Lo: 0x1939, Hi: 0x193b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a17, Hi: 0x1a18, Stride: 0x1}, + unicode.Range16{Lo: 0x1a19, Hi: 0x1a1a, Stride: 0x1}, + unicode.Range16{Lo: 0x1a1b, Hi: 0x1a1b, Stride: 0x1}, + unicode.Range16{Lo: 0x1a55, Hi: 0x1a55, Stride: 0x1}, + unicode.Range16{Lo: 0x1a56, Hi: 0x1a56, Stride: 0x1}, + unicode.Range16{Lo: 0x1a57, Hi: 0x1a57, Stride: 0x1}, + unicode.Range16{Lo: 0x1a58, Hi: 0x1a5e, Stride: 0x1}, + unicode.Range16{Lo: 0x1a60, Hi: 0x1a60, Stride: 0x1}, + unicode.Range16{Lo: 0x1a61, Hi: 0x1a61, Stride: 0x1}, + unicode.Range16{Lo: 0x1a62, Hi: 0x1a62, Stride: 0x1}, + unicode.Range16{Lo: 0x1a63, Hi: 0x1a64, Stride: 0x1}, + unicode.Range16{Lo: 0x1a65, Hi: 0x1a6c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a6d, Hi: 0x1a72, Stride: 0x1}, + unicode.Range16{Lo: 0x1a73, Hi: 0x1a7c, Stride: 0x1}, + unicode.Range16{Lo: 0x1a7f, Hi: 0x1a7f, Stride: 0x1}, + unicode.Range16{Lo: 0x1ab0, Hi: 0x1abd, Stride: 0x1}, + unicode.Range16{Lo: 0x1abe, Hi: 0x1abe, Stride: 0x1}, + unicode.Range16{Lo: 0x1b00, Hi: 0x1b03, Stride: 0x1}, + unicode.Range16{Lo: 0x1b04, Hi: 0x1b04, Stride: 0x1}, + unicode.Range16{Lo: 0x1b34, Hi: 0x1b34, Stride: 0x1}, + unicode.Range16{Lo: 0x1b35, Hi: 0x1b35, Stride: 0x1}, + unicode.Range16{Lo: 0x1b36, Hi: 0x1b3a, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3b, Hi: 0x1b3b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3c, Hi: 0x1b3c, Stride: 0x1}, + unicode.Range16{Lo: 0x1b3d, Hi: 0x1b41, Stride: 0x1}, + unicode.Range16{Lo: 0x1b42, Hi: 0x1b42, Stride: 0x1}, + unicode.Range16{Lo: 0x1b43, Hi: 0x1b44, Stride: 0x1}, + unicode.Range16{Lo: 0x1b6b, Hi: 0x1b73, Stride: 0x1}, + unicode.Range16{Lo: 0x1b80, Hi: 0x1b81, Stride: 0x1}, + unicode.Range16{Lo: 0x1b82, Hi: 0x1b82, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba1, Hi: 0x1ba1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba2, Hi: 0x1ba5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba6, Hi: 0x1ba7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ba8, Hi: 0x1ba9, Stride: 0x1}, + unicode.Range16{Lo: 0x1baa, Hi: 0x1baa, Stride: 0x1}, + unicode.Range16{Lo: 0x1bab, Hi: 0x1bad, Stride: 0x1}, + unicode.Range16{Lo: 0x1be6, Hi: 0x1be6, Stride: 0x1}, + unicode.Range16{Lo: 0x1be7, Hi: 0x1be7, Stride: 0x1}, + unicode.Range16{Lo: 0x1be8, Hi: 0x1be9, Stride: 0x1}, + unicode.Range16{Lo: 0x1bea, Hi: 0x1bec, Stride: 0x1}, + unicode.Range16{Lo: 0x1bed, Hi: 0x1bed, Stride: 0x1}, + unicode.Range16{Lo: 0x1bee, Hi: 0x1bee, Stride: 0x1}, + unicode.Range16{Lo: 0x1bef, Hi: 0x1bf1, Stride: 0x1}, + unicode.Range16{Lo: 0x1bf2, Hi: 0x1bf3, Stride: 0x1}, + unicode.Range16{Lo: 0x1c24, Hi: 0x1c2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1c2c, Hi: 0x1c33, Stride: 0x1}, + unicode.Range16{Lo: 0x1c34, Hi: 0x1c35, Stride: 0x1}, + unicode.Range16{Lo: 0x1c36, Hi: 0x1c37, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd4, Hi: 0x1ce0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce1, Hi: 0x1ce1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce2, Hi: 0x1ce8, Stride: 0x1}, + unicode.Range16{Lo: 0x1ced, Hi: 0x1ced, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf2, Hi: 0x1cf3, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf4, Hi: 0x1cf4, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf8, Hi: 0x1cf9, Stride: 0x1}, + unicode.Range16{Lo: 0x1dc0, Hi: 0x1df5, Stride: 0x1}, + unicode.Range16{Lo: 0x1dfb, Hi: 0x1dff, Stride: 0x1}, + unicode.Range16{Lo: 0x200c, Hi: 0x200d, Stride: 0x1}, + unicode.Range16{Lo: 0x20d0, Hi: 0x20dc, Stride: 0x1}, + unicode.Range16{Lo: 0x20dd, Hi: 0x20e0, Stride: 0x1}, + unicode.Range16{Lo: 0x20e1, Hi: 0x20e1, Stride: 0x1}, + unicode.Range16{Lo: 0x20e2, Hi: 0x20e4, Stride: 0x1}, + unicode.Range16{Lo: 0x20e5, Hi: 0x20f0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cef, Hi: 0x2cf1, Stride: 0x1}, + unicode.Range16{Lo: 0x2d7f, Hi: 0x2d7f, Stride: 0x1}, + unicode.Range16{Lo: 0x2de0, Hi: 0x2dff, Stride: 0x1}, + unicode.Range16{Lo: 0x302a, Hi: 0x302d, Stride: 0x1}, + unicode.Range16{Lo: 0x302e, Hi: 0x302f, Stride: 0x1}, + unicode.Range16{Lo: 0x3099, Hi: 0x309a, Stride: 0x1}, + unicode.Range16{Lo: 0xa66f, Hi: 0xa66f, Stride: 0x1}, + unicode.Range16{Lo: 0xa670, Hi: 0xa672, Stride: 0x1}, + unicode.Range16{Lo: 0xa674, Hi: 0xa67d, Stride: 0x1}, + unicode.Range16{Lo: 0xa69e, Hi: 0xa69f, Stride: 0x1}, + unicode.Range16{Lo: 0xa6f0, Hi: 0xa6f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa802, Hi: 0xa802, Stride: 0x1}, + unicode.Range16{Lo: 0xa806, Hi: 0xa806, Stride: 0x1}, + unicode.Range16{Lo: 0xa80b, Hi: 0xa80b, Stride: 0x1}, + unicode.Range16{Lo: 0xa823, Hi: 0xa824, Stride: 0x1}, + unicode.Range16{Lo: 0xa825, Hi: 0xa826, Stride: 0x1}, + unicode.Range16{Lo: 0xa827, Hi: 0xa827, Stride: 0x1}, + unicode.Range16{Lo: 0xa880, Hi: 0xa881, Stride: 0x1}, + unicode.Range16{Lo: 0xa8b4, Hi: 0xa8c3, Stride: 0x1}, + unicode.Range16{Lo: 0xa8c4, Hi: 0xa8c5, Stride: 0x1}, + unicode.Range16{Lo: 0xa8e0, Hi: 0xa8f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa926, Hi: 0xa92d, Stride: 0x1}, + unicode.Range16{Lo: 0xa947, Hi: 0xa951, Stride: 0x1}, + unicode.Range16{Lo: 0xa952, Hi: 0xa953, Stride: 0x1}, + unicode.Range16{Lo: 0xa980, Hi: 0xa982, Stride: 0x1}, + unicode.Range16{Lo: 0xa983, Hi: 0xa983, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b3, Hi: 0xa9b3, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b4, Hi: 0xa9b5, Stride: 0x1}, + unicode.Range16{Lo: 0xa9b6, Hi: 0xa9b9, Stride: 0x1}, + unicode.Range16{Lo: 0xa9ba, Hi: 0xa9bb, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bc, Hi: 0xa9bc, Stride: 0x1}, + unicode.Range16{Lo: 0xa9bd, Hi: 0xa9c0, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e5, Hi: 0xa9e5, Stride: 0x1}, + unicode.Range16{Lo: 0xaa29, Hi: 0xaa2e, Stride: 0x1}, + unicode.Range16{Lo: 0xaa2f, Hi: 0xaa30, Stride: 0x1}, + unicode.Range16{Lo: 0xaa31, Hi: 0xaa32, Stride: 0x1}, + unicode.Range16{Lo: 0xaa33, Hi: 0xaa34, Stride: 0x1}, + unicode.Range16{Lo: 0xaa35, Hi: 0xaa36, Stride: 0x1}, + unicode.Range16{Lo: 0xaa43, Hi: 0xaa43, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4c, Hi: 0xaa4c, Stride: 0x1}, + unicode.Range16{Lo: 0xaa4d, Hi: 0xaa4d, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7b, Hi: 0xaa7b, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7c, Hi: 0xaa7c, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7d, Hi: 0xaa7d, Stride: 0x1}, + unicode.Range16{Lo: 0xaab0, Hi: 0xaab0, Stride: 0x1}, + unicode.Range16{Lo: 0xaab2, Hi: 0xaab4, Stride: 0x1}, + unicode.Range16{Lo: 0xaab7, Hi: 0xaab8, Stride: 0x1}, + unicode.Range16{Lo: 0xaabe, Hi: 0xaabf, Stride: 0x1}, + unicode.Range16{Lo: 0xaac1, Hi: 0xaac1, Stride: 0x1}, + unicode.Range16{Lo: 0xaaeb, Hi: 0xaaeb, Stride: 0x1}, + unicode.Range16{Lo: 0xaaec, Hi: 0xaaed, Stride: 0x1}, + unicode.Range16{Lo: 0xaaee, Hi: 0xaaef, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf5, Hi: 0xaaf5, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf6, Hi: 0xaaf6, Stride: 0x1}, + unicode.Range16{Lo: 0xabe3, Hi: 0xabe4, Stride: 0x1}, + unicode.Range16{Lo: 0xabe5, Hi: 0xabe5, Stride: 0x1}, + unicode.Range16{Lo: 0xabe6, Hi: 0xabe7, Stride: 0x1}, + unicode.Range16{Lo: 0xabe8, Hi: 0xabe8, Stride: 0x1}, + unicode.Range16{Lo: 0xabe9, Hi: 0xabea, Stride: 0x1}, + unicode.Range16{Lo: 0xabec, Hi: 0xabec, Stride: 0x1}, + unicode.Range16{Lo: 0xabed, Hi: 0xabed, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1e, Hi: 0xfb1e, Stride: 0x1}, + unicode.Range16{Lo: 0xfe00, Hi: 0xfe0f, Stride: 0x1}, + unicode.Range16{Lo: 0xfe20, Hi: 0xfe2f, Stride: 0x1}, + unicode.Range16{Lo: 0xff9e, Hi: 0xff9f, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x101fd, Hi: 0x101fd, Stride: 0x1}, + unicode.Range32{Lo: 0x102e0, Hi: 0x102e0, Stride: 0x1}, + unicode.Range32{Lo: 0x10376, Hi: 0x1037a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a01, Hi: 0x10a03, Stride: 0x1}, + unicode.Range32{Lo: 0x10a05, Hi: 0x10a06, Stride: 0x1}, + unicode.Range32{Lo: 0x10a0c, Hi: 0x10a0f, Stride: 0x1}, + unicode.Range32{Lo: 0x10a38, Hi: 0x10a3a, Stride: 0x1}, + unicode.Range32{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 0x1}, + unicode.Range32{Lo: 0x10ae5, Hi: 0x10ae6, Stride: 0x1}, + unicode.Range32{Lo: 0x11000, Hi: 0x11000, Stride: 0x1}, + unicode.Range32{Lo: 0x11001, Hi: 0x11001, Stride: 0x1}, + unicode.Range32{Lo: 0x11002, Hi: 0x11002, Stride: 0x1}, + unicode.Range32{Lo: 0x11038, Hi: 0x11046, Stride: 0x1}, + unicode.Range32{Lo: 0x1107f, Hi: 0x11081, Stride: 0x1}, + unicode.Range32{Lo: 0x11082, Hi: 0x11082, Stride: 0x1}, + unicode.Range32{Lo: 0x110b0, Hi: 0x110b2, Stride: 0x1}, + unicode.Range32{Lo: 0x110b3, Hi: 0x110b6, Stride: 0x1}, + unicode.Range32{Lo: 0x110b7, Hi: 0x110b8, Stride: 0x1}, + unicode.Range32{Lo: 0x110b9, Hi: 0x110ba, Stride: 0x1}, + unicode.Range32{Lo: 0x11100, Hi: 0x11102, Stride: 0x1}, + unicode.Range32{Lo: 0x11127, Hi: 0x1112b, Stride: 0x1}, + unicode.Range32{Lo: 0x1112c, Hi: 0x1112c, Stride: 0x1}, + unicode.Range32{Lo: 0x1112d, Hi: 0x11134, Stride: 0x1}, + unicode.Range32{Lo: 0x11173, Hi: 0x11173, Stride: 0x1}, + unicode.Range32{Lo: 0x11180, Hi: 0x11181, Stride: 0x1}, + unicode.Range32{Lo: 0x11182, Hi: 0x11182, Stride: 0x1}, + unicode.Range32{Lo: 0x111b3, Hi: 0x111b5, Stride: 0x1}, + unicode.Range32{Lo: 0x111b6, Hi: 0x111be, Stride: 0x1}, + unicode.Range32{Lo: 0x111bf, Hi: 0x111c0, Stride: 0x1}, + unicode.Range32{Lo: 0x111ca, Hi: 0x111cc, Stride: 0x1}, + unicode.Range32{Lo: 0x1122c, Hi: 0x1122e, Stride: 0x1}, + unicode.Range32{Lo: 0x1122f, Hi: 0x11231, Stride: 0x1}, + unicode.Range32{Lo: 0x11232, Hi: 0x11233, Stride: 0x1}, + unicode.Range32{Lo: 0x11234, Hi: 0x11234, Stride: 0x1}, + unicode.Range32{Lo: 0x11235, Hi: 0x11235, Stride: 0x1}, + unicode.Range32{Lo: 0x11236, Hi: 0x11237, Stride: 0x1}, + unicode.Range32{Lo: 0x1123e, Hi: 0x1123e, Stride: 0x1}, + unicode.Range32{Lo: 0x112df, Hi: 0x112df, Stride: 0x1}, + unicode.Range32{Lo: 0x112e0, Hi: 0x112e2, Stride: 0x1}, + unicode.Range32{Lo: 0x112e3, Hi: 0x112ea, Stride: 0x1}, + unicode.Range32{Lo: 0x11300, Hi: 0x11301, Stride: 0x1}, + unicode.Range32{Lo: 0x11302, Hi: 0x11303, Stride: 0x1}, + unicode.Range32{Lo: 0x1133c, Hi: 0x1133c, Stride: 0x1}, + unicode.Range32{Lo: 0x1133e, Hi: 0x1133f, Stride: 0x1}, + unicode.Range32{Lo: 0x11340, Hi: 0x11340, Stride: 0x1}, + unicode.Range32{Lo: 0x11341, Hi: 0x11344, Stride: 0x1}, + unicode.Range32{Lo: 0x11347, Hi: 0x11348, Stride: 0x1}, + unicode.Range32{Lo: 0x1134b, Hi: 0x1134d, Stride: 0x1}, + unicode.Range32{Lo: 0x11357, Hi: 0x11357, Stride: 0x1}, + unicode.Range32{Lo: 0x11362, Hi: 0x11363, Stride: 0x1}, + unicode.Range32{Lo: 0x11366, Hi: 0x1136c, Stride: 0x1}, + unicode.Range32{Lo: 0x11370, Hi: 0x11374, Stride: 0x1}, + unicode.Range32{Lo: 0x11435, Hi: 0x11437, Stride: 0x1}, + unicode.Range32{Lo: 0x11438, Hi: 0x1143f, Stride: 0x1}, + unicode.Range32{Lo: 0x11440, Hi: 0x11441, Stride: 0x1}, + unicode.Range32{Lo: 0x11442, Hi: 0x11444, Stride: 0x1}, + unicode.Range32{Lo: 0x11445, Hi: 0x11445, Stride: 0x1}, + unicode.Range32{Lo: 0x11446, Hi: 0x11446, Stride: 0x1}, + unicode.Range32{Lo: 0x114b0, Hi: 0x114b2, Stride: 0x1}, + unicode.Range32{Lo: 0x114b3, Hi: 0x114b8, Stride: 0x1}, + unicode.Range32{Lo: 0x114b9, Hi: 0x114b9, Stride: 0x1}, + unicode.Range32{Lo: 0x114ba, Hi: 0x114ba, Stride: 0x1}, + unicode.Range32{Lo: 0x114bb, Hi: 0x114be, Stride: 0x1}, + unicode.Range32{Lo: 0x114bf, Hi: 0x114c0, Stride: 0x1}, + unicode.Range32{Lo: 0x114c1, Hi: 0x114c1, Stride: 0x1}, + unicode.Range32{Lo: 0x114c2, Hi: 0x114c3, Stride: 0x1}, + unicode.Range32{Lo: 0x115af, Hi: 0x115b1, Stride: 0x1}, + unicode.Range32{Lo: 0x115b2, Hi: 0x115b5, Stride: 0x1}, + unicode.Range32{Lo: 0x115b8, Hi: 0x115bb, Stride: 0x1}, + unicode.Range32{Lo: 0x115bc, Hi: 0x115bd, Stride: 0x1}, + unicode.Range32{Lo: 0x115be, Hi: 0x115be, Stride: 0x1}, + unicode.Range32{Lo: 0x115bf, Hi: 0x115c0, Stride: 0x1}, + unicode.Range32{Lo: 0x115dc, Hi: 0x115dd, Stride: 0x1}, + unicode.Range32{Lo: 0x11630, Hi: 0x11632, Stride: 0x1}, + unicode.Range32{Lo: 0x11633, Hi: 0x1163a, Stride: 0x1}, + unicode.Range32{Lo: 0x1163b, Hi: 0x1163c, Stride: 0x1}, + unicode.Range32{Lo: 0x1163d, Hi: 0x1163d, Stride: 0x1}, + unicode.Range32{Lo: 0x1163e, Hi: 0x1163e, Stride: 0x1}, + unicode.Range32{Lo: 0x1163f, Hi: 0x11640, Stride: 0x1}, + unicode.Range32{Lo: 0x116ab, Hi: 0x116ab, Stride: 0x1}, + unicode.Range32{Lo: 0x116ac, Hi: 0x116ac, Stride: 0x1}, + unicode.Range32{Lo: 0x116ad, Hi: 0x116ad, Stride: 0x1}, + unicode.Range32{Lo: 0x116ae, Hi: 0x116af, Stride: 0x1}, + unicode.Range32{Lo: 0x116b0, Hi: 0x116b5, Stride: 0x1}, + unicode.Range32{Lo: 0x116b6, Hi: 0x116b6, Stride: 0x1}, + unicode.Range32{Lo: 0x116b7, Hi: 0x116b7, Stride: 0x1}, + unicode.Range32{Lo: 0x1171d, Hi: 0x1171f, Stride: 0x1}, + unicode.Range32{Lo: 0x11720, Hi: 0x11721, Stride: 0x1}, + unicode.Range32{Lo: 0x11722, Hi: 0x11725, Stride: 0x1}, + unicode.Range32{Lo: 0x11726, Hi: 0x11726, Stride: 0x1}, + unicode.Range32{Lo: 0x11727, Hi: 0x1172b, Stride: 0x1}, + unicode.Range32{Lo: 0x11c2f, Hi: 0x11c2f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c30, Hi: 0x11c36, Stride: 0x1}, + unicode.Range32{Lo: 0x11c38, Hi: 0x11c3d, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3e, Hi: 0x11c3e, Stride: 0x1}, + unicode.Range32{Lo: 0x11c3f, Hi: 0x11c3f, Stride: 0x1}, + unicode.Range32{Lo: 0x11c92, Hi: 0x11ca7, Stride: 0x1}, + unicode.Range32{Lo: 0x11ca9, Hi: 0x11ca9, Stride: 0x1}, + unicode.Range32{Lo: 0x11caa, Hi: 0x11cb0, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb1, Hi: 0x11cb1, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb2, Hi: 0x11cb3, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb4, Hi: 0x11cb4, Stride: 0x1}, + unicode.Range32{Lo: 0x11cb5, Hi: 0x11cb6, Stride: 0x1}, + unicode.Range32{Lo: 0x16af0, Hi: 0x16af4, Stride: 0x1}, + unicode.Range32{Lo: 0x16b30, Hi: 0x16b36, Stride: 0x1}, + unicode.Range32{Lo: 0x16f51, Hi: 0x16f7e, Stride: 0x1}, + unicode.Range32{Lo: 0x16f8f, Hi: 0x16f92, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d165, Hi: 0x1d166, Stride: 0x1}, + unicode.Range32{Lo: 0x1d167, Hi: 0x1d169, Stride: 0x1}, + unicode.Range32{Lo: 0x1d16d, Hi: 0x1d172, Stride: 0x1}, + unicode.Range32{Lo: 0x1d17b, Hi: 0x1d182, Stride: 0x1}, + unicode.Range32{Lo: 0x1d185, Hi: 0x1d18b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 0x1}, + unicode.Range32{Lo: 0x1d242, Hi: 0x1d244, Stride: 0x1}, + unicode.Range32{Lo: 0x1da00, Hi: 0x1da36, Stride: 0x1}, + unicode.Range32{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 0x1}, + unicode.Range32{Lo: 0x1da75, Hi: 0x1da75, Stride: 0x1}, + unicode.Range32{Lo: 0x1da84, Hi: 0x1da84, Stride: 0x1}, + unicode.Range32{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 0x1}, + unicode.Range32{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 0x1}, + unicode.Range32{Lo: 0x1e000, Hi: 0x1e006, Stride: 0x1}, + unicode.Range32{Lo: 0x1e008, Hi: 0x1e018, Stride: 0x1}, + unicode.Range32{Lo: 0x1e01b, Hi: 0x1e021, Stride: 0x1}, + unicode.Range32{Lo: 0x1e023, Hi: 0x1e024, Stride: 0x1}, + unicode.Range32{Lo: 0x1e026, Hi: 0x1e02a, Stride: 0x1}, + unicode.Range32{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 0x1}, + unicode.Range32{Lo: 0x1e944, Hi: 0x1e94a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0020, Hi: 0xe007f, Stride: 0x1}, + unicode.Range32{Lo: 0xe0100, Hi: 0xe01ef, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _SentenceFormat = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xad, Hi: 0xad, Stride: 0x1}, + unicode.Range16{Lo: 0x600, Hi: 0x605, Stride: 0x1}, + unicode.Range16{Lo: 0x61c, Hi: 0x61c, Stride: 0x1}, + unicode.Range16{Lo: 0x6dd, Hi: 0x6dd, Stride: 0x1}, + unicode.Range16{Lo: 0x70f, Hi: 0x70f, Stride: 0x1}, + unicode.Range16{Lo: 0x8e2, Hi: 0x8e2, Stride: 0x1}, + unicode.Range16{Lo: 0x180e, Hi: 0x180e, Stride: 0x1}, + unicode.Range16{Lo: 0x200b, Hi: 0x200b, Stride: 0x1}, + unicode.Range16{Lo: 0x200e, Hi: 0x200f, Stride: 0x1}, + unicode.Range16{Lo: 0x202a, Hi: 0x202e, Stride: 0x1}, + unicode.Range16{Lo: 0x2060, Hi: 0x2064, Stride: 0x1}, + unicode.Range16{Lo: 0x2066, Hi: 0x206f, Stride: 0x1}, + unicode.Range16{Lo: 0xfeff, Hi: 0xfeff, Stride: 0x1}, + unicode.Range16{Lo: 0xfff9, Hi: 0xfffb, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x110bd, Hi: 0x110bd, Stride: 0x1}, + unicode.Range32{Lo: 0x1bca0, Hi: 0x1bca3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d173, Hi: 0x1d17a, Stride: 0x1}, + unicode.Range32{Lo: 0xe0001, Hi: 0xe0001, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceLF = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0xa, Hi: 0xa, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceLower = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x61, Hi: 0x7a, Stride: 0x1}, + unicode.Range16{Lo: 0xaa, Hi: 0xaa, Stride: 0x1}, + unicode.Range16{Lo: 0xb5, Hi: 0xb5, Stride: 0x1}, + unicode.Range16{Lo: 0xba, Hi: 0xba, Stride: 0x1}, + unicode.Range16{Lo: 0xdf, Hi: 0xf6, Stride: 0x1}, + unicode.Range16{Lo: 0xf8, Hi: 0xff, Stride: 0x1}, + unicode.Range16{Lo: 0x101, Hi: 0x101, Stride: 0x1}, + unicode.Range16{Lo: 0x103, Hi: 0x103, Stride: 0x1}, + unicode.Range16{Lo: 0x105, Hi: 0x105, Stride: 0x1}, + unicode.Range16{Lo: 0x107, Hi: 0x107, Stride: 0x1}, + unicode.Range16{Lo: 0x109, Hi: 0x109, Stride: 0x1}, + unicode.Range16{Lo: 0x10b, Hi: 0x10b, Stride: 0x1}, + unicode.Range16{Lo: 0x10d, Hi: 0x10d, Stride: 0x1}, + unicode.Range16{Lo: 0x10f, Hi: 0x10f, Stride: 0x1}, + unicode.Range16{Lo: 0x111, Hi: 0x111, Stride: 0x1}, + unicode.Range16{Lo: 0x113, Hi: 0x113, Stride: 0x1}, + unicode.Range16{Lo: 0x115, Hi: 0x115, Stride: 0x1}, + unicode.Range16{Lo: 0x117, Hi: 0x117, Stride: 0x1}, + unicode.Range16{Lo: 0x119, Hi: 0x119, Stride: 0x1}, + unicode.Range16{Lo: 0x11b, Hi: 0x11b, Stride: 0x1}, + unicode.Range16{Lo: 0x11d, Hi: 0x11d, Stride: 0x1}, + unicode.Range16{Lo: 0x11f, Hi: 0x11f, Stride: 0x1}, + unicode.Range16{Lo: 0x121, Hi: 0x121, Stride: 0x1}, + unicode.Range16{Lo: 0x123, Hi: 0x123, Stride: 0x1}, + unicode.Range16{Lo: 0x125, Hi: 0x125, Stride: 0x1}, + unicode.Range16{Lo: 0x127, Hi: 0x127, Stride: 0x1}, + unicode.Range16{Lo: 0x129, Hi: 0x129, Stride: 0x1}, + unicode.Range16{Lo: 0x12b, Hi: 0x12b, Stride: 0x1}, + unicode.Range16{Lo: 0x12d, Hi: 0x12d, Stride: 0x1}, + unicode.Range16{Lo: 0x12f, Hi: 0x12f, Stride: 0x1}, + unicode.Range16{Lo: 0x131, Hi: 0x131, Stride: 0x1}, + unicode.Range16{Lo: 0x133, Hi: 0x133, Stride: 0x1}, + unicode.Range16{Lo: 0x135, Hi: 0x135, Stride: 0x1}, + unicode.Range16{Lo: 0x137, Hi: 0x138, Stride: 0x1}, + unicode.Range16{Lo: 0x13a, Hi: 0x13a, Stride: 0x1}, + unicode.Range16{Lo: 0x13c, Hi: 0x13c, Stride: 0x1}, + unicode.Range16{Lo: 0x13e, Hi: 0x13e, Stride: 0x1}, + unicode.Range16{Lo: 0x140, Hi: 0x140, Stride: 0x1}, + unicode.Range16{Lo: 0x142, Hi: 0x142, Stride: 0x1}, + unicode.Range16{Lo: 0x144, Hi: 0x144, Stride: 0x1}, + unicode.Range16{Lo: 0x146, Hi: 0x146, Stride: 0x1}, + unicode.Range16{Lo: 0x148, Hi: 0x149, Stride: 0x1}, + unicode.Range16{Lo: 0x14b, Hi: 0x14b, Stride: 0x1}, + unicode.Range16{Lo: 0x14d, Hi: 0x14d, Stride: 0x1}, + unicode.Range16{Lo: 0x14f, Hi: 0x14f, Stride: 0x1}, + unicode.Range16{Lo: 0x151, Hi: 0x151, Stride: 0x1}, + unicode.Range16{Lo: 0x153, Hi: 0x153, Stride: 0x1}, + unicode.Range16{Lo: 0x155, Hi: 0x155, Stride: 0x1}, + unicode.Range16{Lo: 0x157, Hi: 0x157, Stride: 0x1}, + unicode.Range16{Lo: 0x159, Hi: 0x159, Stride: 0x1}, + unicode.Range16{Lo: 0x15b, Hi: 0x15b, Stride: 0x1}, + unicode.Range16{Lo: 0x15d, Hi: 0x15d, Stride: 0x1}, + unicode.Range16{Lo: 0x15f, Hi: 0x15f, Stride: 0x1}, + unicode.Range16{Lo: 0x161, Hi: 0x161, Stride: 0x1}, + unicode.Range16{Lo: 0x163, Hi: 0x163, Stride: 0x1}, + unicode.Range16{Lo: 0x165, Hi: 0x165, Stride: 0x1}, + unicode.Range16{Lo: 0x167, Hi: 0x167, Stride: 0x1}, + unicode.Range16{Lo: 0x169, Hi: 0x169, Stride: 0x1}, + unicode.Range16{Lo: 0x16b, Hi: 0x16b, Stride: 0x1}, + unicode.Range16{Lo: 0x16d, Hi: 0x16d, Stride: 0x1}, + unicode.Range16{Lo: 0x16f, Hi: 0x16f, Stride: 0x1}, + unicode.Range16{Lo: 0x171, Hi: 0x171, Stride: 0x1}, + unicode.Range16{Lo: 0x173, Hi: 0x173, Stride: 0x1}, + unicode.Range16{Lo: 0x175, Hi: 0x175, Stride: 0x1}, + unicode.Range16{Lo: 0x177, Hi: 0x177, Stride: 0x1}, + unicode.Range16{Lo: 0x17a, Hi: 0x17a, Stride: 0x1}, + unicode.Range16{Lo: 0x17c, Hi: 0x17c, Stride: 0x1}, + unicode.Range16{Lo: 0x17e, Hi: 0x180, Stride: 0x1}, + unicode.Range16{Lo: 0x183, Hi: 0x183, Stride: 0x1}, + unicode.Range16{Lo: 0x185, Hi: 0x185, Stride: 0x1}, + unicode.Range16{Lo: 0x188, Hi: 0x188, Stride: 0x1}, + unicode.Range16{Lo: 0x18c, Hi: 0x18d, Stride: 0x1}, + unicode.Range16{Lo: 0x192, Hi: 0x192, Stride: 0x1}, + unicode.Range16{Lo: 0x195, Hi: 0x195, Stride: 0x1}, + unicode.Range16{Lo: 0x199, Hi: 0x19b, Stride: 0x1}, + unicode.Range16{Lo: 0x19e, Hi: 0x19e, Stride: 0x1}, + unicode.Range16{Lo: 0x1a1, Hi: 0x1a1, Stride: 0x1}, + unicode.Range16{Lo: 0x1a3, Hi: 0x1a3, Stride: 0x1}, + unicode.Range16{Lo: 0x1a5, Hi: 0x1a5, Stride: 0x1}, + unicode.Range16{Lo: 0x1a8, Hi: 0x1a8, Stride: 0x1}, + unicode.Range16{Lo: 0x1aa, Hi: 0x1ab, Stride: 0x1}, + unicode.Range16{Lo: 0x1ad, Hi: 0x1ad, Stride: 0x1}, + unicode.Range16{Lo: 0x1b0, Hi: 0x1b0, Stride: 0x1}, + unicode.Range16{Lo: 0x1b4, Hi: 0x1b4, Stride: 0x1}, + unicode.Range16{Lo: 0x1b6, Hi: 0x1b6, Stride: 0x1}, + unicode.Range16{Lo: 0x1b9, Hi: 0x1ba, Stride: 0x1}, + unicode.Range16{Lo: 0x1bd, Hi: 0x1bf, Stride: 0x1}, + unicode.Range16{Lo: 0x1c6, Hi: 0x1c6, Stride: 0x1}, + unicode.Range16{Lo: 0x1c9, Hi: 0x1c9, Stride: 0x1}, + unicode.Range16{Lo: 0x1cc, Hi: 0x1cc, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce, Hi: 0x1ce, Stride: 0x1}, + unicode.Range16{Lo: 0x1d0, Hi: 0x1d0, Stride: 0x1}, + unicode.Range16{Lo: 0x1d2, Hi: 0x1d2, Stride: 0x1}, + unicode.Range16{Lo: 0x1d4, Hi: 0x1d4, Stride: 0x1}, + unicode.Range16{Lo: 0x1d6, Hi: 0x1d6, Stride: 0x1}, + unicode.Range16{Lo: 0x1d8, Hi: 0x1d8, Stride: 0x1}, + unicode.Range16{Lo: 0x1da, Hi: 0x1da, Stride: 0x1}, + unicode.Range16{Lo: 0x1dc, Hi: 0x1dd, Stride: 0x1}, + unicode.Range16{Lo: 0x1df, Hi: 0x1df, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1, Hi: 0x1e1, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3, Hi: 0x1e3, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5, Hi: 0x1e5, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7, Hi: 0x1e7, Stride: 0x1}, + unicode.Range16{Lo: 0x1e9, Hi: 0x1e9, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb, Hi: 0x1eb, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed, Hi: 0x1ed, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef, Hi: 0x1f0, Stride: 0x1}, + unicode.Range16{Lo: 0x1f3, Hi: 0x1f3, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5, Hi: 0x1f5, Stride: 0x1}, + unicode.Range16{Lo: 0x1f9, Hi: 0x1f9, Stride: 0x1}, + unicode.Range16{Lo: 0x1fb, Hi: 0x1fb, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd, Hi: 0x1fd, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff, Hi: 0x1ff, Stride: 0x1}, + unicode.Range16{Lo: 0x201, Hi: 0x201, Stride: 0x1}, + unicode.Range16{Lo: 0x203, Hi: 0x203, Stride: 0x1}, + unicode.Range16{Lo: 0x205, Hi: 0x205, Stride: 0x1}, + unicode.Range16{Lo: 0x207, Hi: 0x207, Stride: 0x1}, + unicode.Range16{Lo: 0x209, Hi: 0x209, Stride: 0x1}, + unicode.Range16{Lo: 0x20b, Hi: 0x20b, Stride: 0x1}, + unicode.Range16{Lo: 0x20d, Hi: 0x20d, Stride: 0x1}, + unicode.Range16{Lo: 0x20f, Hi: 0x20f, Stride: 0x1}, + unicode.Range16{Lo: 0x211, Hi: 0x211, Stride: 0x1}, + unicode.Range16{Lo: 0x213, Hi: 0x213, Stride: 0x1}, + unicode.Range16{Lo: 0x215, Hi: 0x215, Stride: 0x1}, + unicode.Range16{Lo: 0x217, Hi: 0x217, Stride: 0x1}, + unicode.Range16{Lo: 0x219, Hi: 0x219, Stride: 0x1}, + unicode.Range16{Lo: 0x21b, Hi: 0x21b, Stride: 0x1}, + unicode.Range16{Lo: 0x21d, Hi: 0x21d, Stride: 0x1}, + unicode.Range16{Lo: 0x21f, Hi: 0x21f, Stride: 0x1}, + unicode.Range16{Lo: 0x221, Hi: 0x221, Stride: 0x1}, + unicode.Range16{Lo: 0x223, Hi: 0x223, Stride: 0x1}, + unicode.Range16{Lo: 0x225, Hi: 0x225, Stride: 0x1}, + unicode.Range16{Lo: 0x227, Hi: 0x227, Stride: 0x1}, + unicode.Range16{Lo: 0x229, Hi: 0x229, Stride: 0x1}, + unicode.Range16{Lo: 0x22b, Hi: 0x22b, Stride: 0x1}, + unicode.Range16{Lo: 0x22d, Hi: 0x22d, Stride: 0x1}, + unicode.Range16{Lo: 0x22f, Hi: 0x22f, Stride: 0x1}, + unicode.Range16{Lo: 0x231, Hi: 0x231, Stride: 0x1}, + unicode.Range16{Lo: 0x233, Hi: 0x239, Stride: 0x1}, + unicode.Range16{Lo: 0x23c, Hi: 0x23c, Stride: 0x1}, + unicode.Range16{Lo: 0x23f, Hi: 0x240, Stride: 0x1}, + unicode.Range16{Lo: 0x242, Hi: 0x242, Stride: 0x1}, + unicode.Range16{Lo: 0x247, Hi: 0x247, Stride: 0x1}, + unicode.Range16{Lo: 0x249, Hi: 0x249, Stride: 0x1}, + unicode.Range16{Lo: 0x24b, Hi: 0x24b, Stride: 0x1}, + unicode.Range16{Lo: 0x24d, Hi: 0x24d, Stride: 0x1}, + unicode.Range16{Lo: 0x24f, Hi: 0x293, Stride: 0x1}, + unicode.Range16{Lo: 0x295, Hi: 0x2af, Stride: 0x1}, + unicode.Range16{Lo: 0x2b0, Hi: 0x2b8, Stride: 0x1}, + unicode.Range16{Lo: 0x2c0, Hi: 0x2c1, Stride: 0x1}, + unicode.Range16{Lo: 0x2e0, Hi: 0x2e4, Stride: 0x1}, + unicode.Range16{Lo: 0x371, Hi: 0x371, Stride: 0x1}, + unicode.Range16{Lo: 0x373, Hi: 0x373, Stride: 0x1}, + unicode.Range16{Lo: 0x377, Hi: 0x377, Stride: 0x1}, + unicode.Range16{Lo: 0x37a, Hi: 0x37a, Stride: 0x1}, + unicode.Range16{Lo: 0x37b, Hi: 0x37d, Stride: 0x1}, + unicode.Range16{Lo: 0x390, Hi: 0x390, Stride: 0x1}, + unicode.Range16{Lo: 0x3ac, Hi: 0x3ce, Stride: 0x1}, + unicode.Range16{Lo: 0x3d0, Hi: 0x3d1, Stride: 0x1}, + unicode.Range16{Lo: 0x3d5, Hi: 0x3d7, Stride: 0x1}, + unicode.Range16{Lo: 0x3d9, Hi: 0x3d9, Stride: 0x1}, + unicode.Range16{Lo: 0x3db, Hi: 0x3db, Stride: 0x1}, + unicode.Range16{Lo: 0x3dd, Hi: 0x3dd, Stride: 0x1}, + unicode.Range16{Lo: 0x3df, Hi: 0x3df, Stride: 0x1}, + unicode.Range16{Lo: 0x3e1, Hi: 0x3e1, Stride: 0x1}, + unicode.Range16{Lo: 0x3e3, Hi: 0x3e3, Stride: 0x1}, + unicode.Range16{Lo: 0x3e5, Hi: 0x3e5, Stride: 0x1}, + unicode.Range16{Lo: 0x3e7, Hi: 0x3e7, Stride: 0x1}, + unicode.Range16{Lo: 0x3e9, Hi: 0x3e9, Stride: 0x1}, + unicode.Range16{Lo: 0x3eb, Hi: 0x3eb, Stride: 0x1}, + unicode.Range16{Lo: 0x3ed, Hi: 0x3ed, Stride: 0x1}, + unicode.Range16{Lo: 0x3ef, Hi: 0x3f3, Stride: 0x1}, + unicode.Range16{Lo: 0x3f5, Hi: 0x3f5, Stride: 0x1}, + unicode.Range16{Lo: 0x3f8, Hi: 0x3f8, Stride: 0x1}, + unicode.Range16{Lo: 0x3fb, Hi: 0x3fc, Stride: 0x1}, + unicode.Range16{Lo: 0x430, Hi: 0x45f, Stride: 0x1}, + unicode.Range16{Lo: 0x461, Hi: 0x461, Stride: 0x1}, + unicode.Range16{Lo: 0x463, Hi: 0x463, Stride: 0x1}, + unicode.Range16{Lo: 0x465, Hi: 0x465, Stride: 0x1}, + unicode.Range16{Lo: 0x467, Hi: 0x467, Stride: 0x1}, + unicode.Range16{Lo: 0x469, Hi: 0x469, Stride: 0x1}, + unicode.Range16{Lo: 0x46b, Hi: 0x46b, Stride: 0x1}, + unicode.Range16{Lo: 0x46d, Hi: 0x46d, Stride: 0x1}, + unicode.Range16{Lo: 0x46f, Hi: 0x46f, Stride: 0x1}, + unicode.Range16{Lo: 0x471, Hi: 0x471, Stride: 0x1}, + unicode.Range16{Lo: 0x473, Hi: 0x473, Stride: 0x1}, + unicode.Range16{Lo: 0x475, Hi: 0x475, Stride: 0x1}, + unicode.Range16{Lo: 0x477, Hi: 0x477, Stride: 0x1}, + unicode.Range16{Lo: 0x479, Hi: 0x479, Stride: 0x1}, + unicode.Range16{Lo: 0x47b, Hi: 0x47b, Stride: 0x1}, + unicode.Range16{Lo: 0x47d, Hi: 0x47d, Stride: 0x1}, + unicode.Range16{Lo: 0x47f, Hi: 0x47f, Stride: 0x1}, + unicode.Range16{Lo: 0x481, Hi: 0x481, Stride: 0x1}, + unicode.Range16{Lo: 0x48b, Hi: 0x48b, Stride: 0x1}, + unicode.Range16{Lo: 0x48d, Hi: 0x48d, Stride: 0x1}, + unicode.Range16{Lo: 0x48f, Hi: 0x48f, Stride: 0x1}, + unicode.Range16{Lo: 0x491, Hi: 0x491, Stride: 0x1}, + unicode.Range16{Lo: 0x493, Hi: 0x493, Stride: 0x1}, + unicode.Range16{Lo: 0x495, Hi: 0x495, Stride: 0x1}, + unicode.Range16{Lo: 0x497, Hi: 0x497, Stride: 0x1}, + unicode.Range16{Lo: 0x499, Hi: 0x499, Stride: 0x1}, + unicode.Range16{Lo: 0x49b, Hi: 0x49b, Stride: 0x1}, + unicode.Range16{Lo: 0x49d, Hi: 0x49d, Stride: 0x1}, + unicode.Range16{Lo: 0x49f, Hi: 0x49f, Stride: 0x1}, + unicode.Range16{Lo: 0x4a1, Hi: 0x4a1, Stride: 0x1}, + unicode.Range16{Lo: 0x4a3, Hi: 0x4a3, Stride: 0x1}, + unicode.Range16{Lo: 0x4a5, Hi: 0x4a5, Stride: 0x1}, + unicode.Range16{Lo: 0x4a7, Hi: 0x4a7, Stride: 0x1}, + unicode.Range16{Lo: 0x4a9, Hi: 0x4a9, Stride: 0x1}, + unicode.Range16{Lo: 0x4ab, Hi: 0x4ab, Stride: 0x1}, + unicode.Range16{Lo: 0x4ad, Hi: 0x4ad, Stride: 0x1}, + unicode.Range16{Lo: 0x4af, Hi: 0x4af, Stride: 0x1}, + unicode.Range16{Lo: 0x4b1, Hi: 0x4b1, Stride: 0x1}, + unicode.Range16{Lo: 0x4b3, Hi: 0x4b3, Stride: 0x1}, + unicode.Range16{Lo: 0x4b5, Hi: 0x4b5, Stride: 0x1}, + unicode.Range16{Lo: 0x4b7, Hi: 0x4b7, Stride: 0x1}, + unicode.Range16{Lo: 0x4b9, Hi: 0x4b9, Stride: 0x1}, + unicode.Range16{Lo: 0x4bb, Hi: 0x4bb, Stride: 0x1}, + unicode.Range16{Lo: 0x4bd, Hi: 0x4bd, Stride: 0x1}, + unicode.Range16{Lo: 0x4bf, Hi: 0x4bf, Stride: 0x1}, + unicode.Range16{Lo: 0x4c2, Hi: 0x4c2, Stride: 0x1}, + unicode.Range16{Lo: 0x4c4, Hi: 0x4c4, Stride: 0x1}, + unicode.Range16{Lo: 0x4c6, Hi: 0x4c6, Stride: 0x1}, + unicode.Range16{Lo: 0x4c8, Hi: 0x4c8, Stride: 0x1}, + unicode.Range16{Lo: 0x4ca, Hi: 0x4ca, Stride: 0x1}, + unicode.Range16{Lo: 0x4cc, Hi: 0x4cc, Stride: 0x1}, + unicode.Range16{Lo: 0x4ce, Hi: 0x4cf, Stride: 0x1}, + unicode.Range16{Lo: 0x4d1, Hi: 0x4d1, Stride: 0x1}, + unicode.Range16{Lo: 0x4d3, Hi: 0x4d3, Stride: 0x1}, + unicode.Range16{Lo: 0x4d5, Hi: 0x4d5, Stride: 0x1}, + unicode.Range16{Lo: 0x4d7, Hi: 0x4d7, Stride: 0x1}, + unicode.Range16{Lo: 0x4d9, Hi: 0x4d9, Stride: 0x1}, + unicode.Range16{Lo: 0x4db, Hi: 0x4db, Stride: 0x1}, + unicode.Range16{Lo: 0x4dd, Hi: 0x4dd, Stride: 0x1}, + unicode.Range16{Lo: 0x4df, Hi: 0x4df, Stride: 0x1}, + unicode.Range16{Lo: 0x4e1, Hi: 0x4e1, Stride: 0x1}, + unicode.Range16{Lo: 0x4e3, Hi: 0x4e3, Stride: 0x1}, + unicode.Range16{Lo: 0x4e5, Hi: 0x4e5, Stride: 0x1}, + unicode.Range16{Lo: 0x4e7, Hi: 0x4e7, Stride: 0x1}, + unicode.Range16{Lo: 0x4e9, Hi: 0x4e9, Stride: 0x1}, + unicode.Range16{Lo: 0x4eb, Hi: 0x4eb, Stride: 0x1}, + unicode.Range16{Lo: 0x4ed, Hi: 0x4ed, Stride: 0x1}, + unicode.Range16{Lo: 0x4ef, Hi: 0x4ef, Stride: 0x1}, + unicode.Range16{Lo: 0x4f1, Hi: 0x4f1, Stride: 0x1}, + unicode.Range16{Lo: 0x4f3, Hi: 0x4f3, Stride: 0x1}, + unicode.Range16{Lo: 0x4f5, Hi: 0x4f5, Stride: 0x1}, + unicode.Range16{Lo: 0x4f7, Hi: 0x4f7, Stride: 0x1}, + unicode.Range16{Lo: 0x4f9, Hi: 0x4f9, Stride: 0x1}, + unicode.Range16{Lo: 0x4fb, Hi: 0x4fb, Stride: 0x1}, + unicode.Range16{Lo: 0x4fd, Hi: 0x4fd, Stride: 0x1}, + unicode.Range16{Lo: 0x4ff, Hi: 0x4ff, Stride: 0x1}, + unicode.Range16{Lo: 0x501, Hi: 0x501, Stride: 0x1}, + unicode.Range16{Lo: 0x503, Hi: 0x503, Stride: 0x1}, + unicode.Range16{Lo: 0x505, Hi: 0x505, Stride: 0x1}, + unicode.Range16{Lo: 0x507, Hi: 0x507, Stride: 0x1}, + unicode.Range16{Lo: 0x509, Hi: 0x509, Stride: 0x1}, + unicode.Range16{Lo: 0x50b, Hi: 0x50b, Stride: 0x1}, + unicode.Range16{Lo: 0x50d, Hi: 0x50d, Stride: 0x1}, + unicode.Range16{Lo: 0x50f, Hi: 0x50f, Stride: 0x1}, + unicode.Range16{Lo: 0x511, Hi: 0x511, Stride: 0x1}, + unicode.Range16{Lo: 0x513, Hi: 0x513, Stride: 0x1}, + unicode.Range16{Lo: 0x515, Hi: 0x515, Stride: 0x1}, + unicode.Range16{Lo: 0x517, Hi: 0x517, Stride: 0x1}, + unicode.Range16{Lo: 0x519, Hi: 0x519, Stride: 0x1}, + unicode.Range16{Lo: 0x51b, Hi: 0x51b, Stride: 0x1}, + unicode.Range16{Lo: 0x51d, Hi: 0x51d, Stride: 0x1}, + unicode.Range16{Lo: 0x51f, Hi: 0x51f, Stride: 0x1}, + unicode.Range16{Lo: 0x521, Hi: 0x521, Stride: 0x1}, + unicode.Range16{Lo: 0x523, Hi: 0x523, Stride: 0x1}, + unicode.Range16{Lo: 0x525, Hi: 0x525, Stride: 0x1}, + unicode.Range16{Lo: 0x527, Hi: 0x527, Stride: 0x1}, + unicode.Range16{Lo: 0x529, Hi: 0x529, Stride: 0x1}, + unicode.Range16{Lo: 0x52b, Hi: 0x52b, Stride: 0x1}, + unicode.Range16{Lo: 0x52d, Hi: 0x52d, Stride: 0x1}, + unicode.Range16{Lo: 0x52f, Hi: 0x52f, Stride: 0x1}, + unicode.Range16{Lo: 0x561, Hi: 0x587, Stride: 0x1}, + unicode.Range16{Lo: 0x13f8, Hi: 0x13fd, Stride: 0x1}, + unicode.Range16{Lo: 0x1c80, Hi: 0x1c88, Stride: 0x1}, + unicode.Range16{Lo: 0x1d00, Hi: 0x1d2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1d2c, Hi: 0x1d6a, Stride: 0x1}, + unicode.Range16{Lo: 0x1d6b, Hi: 0x1d77, Stride: 0x1}, + unicode.Range16{Lo: 0x1d78, Hi: 0x1d78, Stride: 0x1}, + unicode.Range16{Lo: 0x1d79, Hi: 0x1d9a, Stride: 0x1}, + unicode.Range16{Lo: 0x1d9b, Hi: 0x1dbf, Stride: 0x1}, + unicode.Range16{Lo: 0x1e01, Hi: 0x1e01, Stride: 0x1}, + unicode.Range16{Lo: 0x1e03, Hi: 0x1e03, Stride: 0x1}, + unicode.Range16{Lo: 0x1e05, Hi: 0x1e05, Stride: 0x1}, + unicode.Range16{Lo: 0x1e07, Hi: 0x1e07, Stride: 0x1}, + unicode.Range16{Lo: 0x1e09, Hi: 0x1e09, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0b, Hi: 0x1e0b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0d, Hi: 0x1e0d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0f, Hi: 0x1e0f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e11, Hi: 0x1e11, Stride: 0x1}, + unicode.Range16{Lo: 0x1e13, Hi: 0x1e13, Stride: 0x1}, + unicode.Range16{Lo: 0x1e15, Hi: 0x1e15, Stride: 0x1}, + unicode.Range16{Lo: 0x1e17, Hi: 0x1e17, Stride: 0x1}, + unicode.Range16{Lo: 0x1e19, Hi: 0x1e19, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1b, Hi: 0x1e1b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1d, Hi: 0x1e1d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1f, Hi: 0x1e1f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e21, Hi: 0x1e21, Stride: 0x1}, + unicode.Range16{Lo: 0x1e23, Hi: 0x1e23, Stride: 0x1}, + unicode.Range16{Lo: 0x1e25, Hi: 0x1e25, Stride: 0x1}, + unicode.Range16{Lo: 0x1e27, Hi: 0x1e27, Stride: 0x1}, + unicode.Range16{Lo: 0x1e29, Hi: 0x1e29, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2b, Hi: 0x1e2b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2d, Hi: 0x1e2d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2f, Hi: 0x1e2f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e31, Hi: 0x1e31, Stride: 0x1}, + unicode.Range16{Lo: 0x1e33, Hi: 0x1e33, Stride: 0x1}, + unicode.Range16{Lo: 0x1e35, Hi: 0x1e35, Stride: 0x1}, + unicode.Range16{Lo: 0x1e37, Hi: 0x1e37, Stride: 0x1}, + unicode.Range16{Lo: 0x1e39, Hi: 0x1e39, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3b, Hi: 0x1e3b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3d, Hi: 0x1e3d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3f, Hi: 0x1e3f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e41, Hi: 0x1e41, Stride: 0x1}, + unicode.Range16{Lo: 0x1e43, Hi: 0x1e43, Stride: 0x1}, + unicode.Range16{Lo: 0x1e45, Hi: 0x1e45, Stride: 0x1}, + unicode.Range16{Lo: 0x1e47, Hi: 0x1e47, Stride: 0x1}, + unicode.Range16{Lo: 0x1e49, Hi: 0x1e49, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4b, Hi: 0x1e4b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4d, Hi: 0x1e4d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4f, Hi: 0x1e4f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e51, Hi: 0x1e51, Stride: 0x1}, + unicode.Range16{Lo: 0x1e53, Hi: 0x1e53, Stride: 0x1}, + unicode.Range16{Lo: 0x1e55, Hi: 0x1e55, Stride: 0x1}, + unicode.Range16{Lo: 0x1e57, Hi: 0x1e57, Stride: 0x1}, + unicode.Range16{Lo: 0x1e59, Hi: 0x1e59, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5b, Hi: 0x1e5b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5d, Hi: 0x1e5d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5f, Hi: 0x1e5f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e61, Hi: 0x1e61, Stride: 0x1}, + unicode.Range16{Lo: 0x1e63, Hi: 0x1e63, Stride: 0x1}, + unicode.Range16{Lo: 0x1e65, Hi: 0x1e65, Stride: 0x1}, + unicode.Range16{Lo: 0x1e67, Hi: 0x1e67, Stride: 0x1}, + unicode.Range16{Lo: 0x1e69, Hi: 0x1e69, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6b, Hi: 0x1e6b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6d, Hi: 0x1e6d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6f, Hi: 0x1e6f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e71, Hi: 0x1e71, Stride: 0x1}, + unicode.Range16{Lo: 0x1e73, Hi: 0x1e73, Stride: 0x1}, + unicode.Range16{Lo: 0x1e75, Hi: 0x1e75, Stride: 0x1}, + unicode.Range16{Lo: 0x1e77, Hi: 0x1e77, Stride: 0x1}, + unicode.Range16{Lo: 0x1e79, Hi: 0x1e79, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7b, Hi: 0x1e7b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7d, Hi: 0x1e7d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7f, Hi: 0x1e7f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e81, Hi: 0x1e81, Stride: 0x1}, + unicode.Range16{Lo: 0x1e83, Hi: 0x1e83, Stride: 0x1}, + unicode.Range16{Lo: 0x1e85, Hi: 0x1e85, Stride: 0x1}, + unicode.Range16{Lo: 0x1e87, Hi: 0x1e87, Stride: 0x1}, + unicode.Range16{Lo: 0x1e89, Hi: 0x1e89, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8b, Hi: 0x1e8b, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8d, Hi: 0x1e8d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8f, Hi: 0x1e8f, Stride: 0x1}, + unicode.Range16{Lo: 0x1e91, Hi: 0x1e91, Stride: 0x1}, + unicode.Range16{Lo: 0x1e93, Hi: 0x1e93, Stride: 0x1}, + unicode.Range16{Lo: 0x1e95, Hi: 0x1e9d, Stride: 0x1}, + unicode.Range16{Lo: 0x1e9f, Hi: 0x1e9f, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea1, Hi: 0x1ea1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea3, Hi: 0x1ea3, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea5, Hi: 0x1ea5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea7, Hi: 0x1ea7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea9, Hi: 0x1ea9, Stride: 0x1}, + unicode.Range16{Lo: 0x1eab, Hi: 0x1eab, Stride: 0x1}, + unicode.Range16{Lo: 0x1ead, Hi: 0x1ead, Stride: 0x1}, + unicode.Range16{Lo: 0x1eaf, Hi: 0x1eaf, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb1, Hi: 0x1eb1, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb3, Hi: 0x1eb3, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb5, Hi: 0x1eb5, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb7, Hi: 0x1eb7, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb9, Hi: 0x1eb9, Stride: 0x1}, + unicode.Range16{Lo: 0x1ebb, Hi: 0x1ebb, Stride: 0x1}, + unicode.Range16{Lo: 0x1ebd, Hi: 0x1ebd, Stride: 0x1}, + unicode.Range16{Lo: 0x1ebf, Hi: 0x1ebf, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec1, Hi: 0x1ec1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec3, Hi: 0x1ec3, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec5, Hi: 0x1ec5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec7, Hi: 0x1ec7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec9, Hi: 0x1ec9, Stride: 0x1}, + unicode.Range16{Lo: 0x1ecb, Hi: 0x1ecb, Stride: 0x1}, + unicode.Range16{Lo: 0x1ecd, Hi: 0x1ecd, Stride: 0x1}, + unicode.Range16{Lo: 0x1ecf, Hi: 0x1ecf, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed1, Hi: 0x1ed1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed3, Hi: 0x1ed3, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed5, Hi: 0x1ed5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed7, Hi: 0x1ed7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed9, Hi: 0x1ed9, Stride: 0x1}, + unicode.Range16{Lo: 0x1edb, Hi: 0x1edb, Stride: 0x1}, + unicode.Range16{Lo: 0x1edd, Hi: 0x1edd, Stride: 0x1}, + unicode.Range16{Lo: 0x1edf, Hi: 0x1edf, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee1, Hi: 0x1ee1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee3, Hi: 0x1ee3, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee5, Hi: 0x1ee5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee7, Hi: 0x1ee7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee9, Hi: 0x1ee9, Stride: 0x1}, + unicode.Range16{Lo: 0x1eeb, Hi: 0x1eeb, Stride: 0x1}, + unicode.Range16{Lo: 0x1eed, Hi: 0x1eed, Stride: 0x1}, + unicode.Range16{Lo: 0x1eef, Hi: 0x1eef, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef1, Hi: 0x1ef1, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef3, Hi: 0x1ef3, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef5, Hi: 0x1ef5, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef7, Hi: 0x1ef7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef9, Hi: 0x1ef9, Stride: 0x1}, + unicode.Range16{Lo: 0x1efb, Hi: 0x1efb, Stride: 0x1}, + unicode.Range16{Lo: 0x1efd, Hi: 0x1efd, Stride: 0x1}, + unicode.Range16{Lo: 0x1eff, Hi: 0x1f07, Stride: 0x1}, + unicode.Range16{Lo: 0x1f10, Hi: 0x1f15, Stride: 0x1}, + unicode.Range16{Lo: 0x1f20, Hi: 0x1f27, Stride: 0x1}, + unicode.Range16{Lo: 0x1f30, Hi: 0x1f37, Stride: 0x1}, + unicode.Range16{Lo: 0x1f40, Hi: 0x1f45, Stride: 0x1}, + unicode.Range16{Lo: 0x1f50, Hi: 0x1f57, Stride: 0x1}, + unicode.Range16{Lo: 0x1f60, Hi: 0x1f67, Stride: 0x1}, + unicode.Range16{Lo: 0x1f70, Hi: 0x1f7d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f80, Hi: 0x1f87, Stride: 0x1}, + unicode.Range16{Lo: 0x1f90, Hi: 0x1f97, Stride: 0x1}, + unicode.Range16{Lo: 0x1fa0, Hi: 0x1fa7, Stride: 0x1}, + unicode.Range16{Lo: 0x1fb0, Hi: 0x1fb4, Stride: 0x1}, + unicode.Range16{Lo: 0x1fb6, Hi: 0x1fb7, Stride: 0x1}, + unicode.Range16{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc6, Hi: 0x1fc7, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd6, Hi: 0x1fd7, Stride: 0x1}, + unicode.Range16{Lo: 0x1fe0, Hi: 0x1fe7, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff6, Hi: 0x1ff7, Stride: 0x1}, + unicode.Range16{Lo: 0x2071, Hi: 0x2071, Stride: 0x1}, + unicode.Range16{Lo: 0x207f, Hi: 0x207f, Stride: 0x1}, + unicode.Range16{Lo: 0x2090, Hi: 0x209c, Stride: 0x1}, + unicode.Range16{Lo: 0x210a, Hi: 0x210a, Stride: 0x1}, + unicode.Range16{Lo: 0x210e, Hi: 0x210f, Stride: 0x1}, + unicode.Range16{Lo: 0x2113, Hi: 0x2113, Stride: 0x1}, + unicode.Range16{Lo: 0x212f, Hi: 0x212f, Stride: 0x1}, + unicode.Range16{Lo: 0x2134, Hi: 0x2134, Stride: 0x1}, + unicode.Range16{Lo: 0x2139, Hi: 0x2139, Stride: 0x1}, + unicode.Range16{Lo: 0x213c, Hi: 0x213d, Stride: 0x1}, + unicode.Range16{Lo: 0x2146, Hi: 0x2149, Stride: 0x1}, + unicode.Range16{Lo: 0x214e, Hi: 0x214e, Stride: 0x1}, + unicode.Range16{Lo: 0x2170, Hi: 0x217f, Stride: 0x1}, + unicode.Range16{Lo: 0x2184, Hi: 0x2184, Stride: 0x1}, + unicode.Range16{Lo: 0x24d0, Hi: 0x24e9, Stride: 0x1}, + unicode.Range16{Lo: 0x2c30, Hi: 0x2c5e, Stride: 0x1}, + unicode.Range16{Lo: 0x2c61, Hi: 0x2c61, Stride: 0x1}, + unicode.Range16{Lo: 0x2c65, Hi: 0x2c66, Stride: 0x1}, + unicode.Range16{Lo: 0x2c68, Hi: 0x2c68, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6a, Hi: 0x2c6a, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6c, Hi: 0x2c6c, Stride: 0x1}, + unicode.Range16{Lo: 0x2c71, Hi: 0x2c71, Stride: 0x1}, + unicode.Range16{Lo: 0x2c73, Hi: 0x2c74, Stride: 0x1}, + unicode.Range16{Lo: 0x2c76, Hi: 0x2c7b, Stride: 0x1}, + unicode.Range16{Lo: 0x2c7c, Hi: 0x2c7d, Stride: 0x1}, + unicode.Range16{Lo: 0x2c81, Hi: 0x2c81, Stride: 0x1}, + unicode.Range16{Lo: 0x2c83, Hi: 0x2c83, Stride: 0x1}, + unicode.Range16{Lo: 0x2c85, Hi: 0x2c85, Stride: 0x1}, + unicode.Range16{Lo: 0x2c87, Hi: 0x2c87, Stride: 0x1}, + unicode.Range16{Lo: 0x2c89, Hi: 0x2c89, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8b, Hi: 0x2c8b, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8d, Hi: 0x2c8d, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8f, Hi: 0x2c8f, Stride: 0x1}, + unicode.Range16{Lo: 0x2c91, Hi: 0x2c91, Stride: 0x1}, + unicode.Range16{Lo: 0x2c93, Hi: 0x2c93, Stride: 0x1}, + unicode.Range16{Lo: 0x2c95, Hi: 0x2c95, Stride: 0x1}, + unicode.Range16{Lo: 0x2c97, Hi: 0x2c97, Stride: 0x1}, + unicode.Range16{Lo: 0x2c99, Hi: 0x2c99, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9b, Hi: 0x2c9b, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9d, Hi: 0x2c9d, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9f, Hi: 0x2c9f, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca1, Hi: 0x2ca1, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca3, Hi: 0x2ca3, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca5, Hi: 0x2ca5, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca7, Hi: 0x2ca7, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca9, Hi: 0x2ca9, Stride: 0x1}, + unicode.Range16{Lo: 0x2cab, Hi: 0x2cab, Stride: 0x1}, + unicode.Range16{Lo: 0x2cad, Hi: 0x2cad, Stride: 0x1}, + unicode.Range16{Lo: 0x2caf, Hi: 0x2caf, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb1, Hi: 0x2cb1, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb3, Hi: 0x2cb3, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb5, Hi: 0x2cb5, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb7, Hi: 0x2cb7, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb9, Hi: 0x2cb9, Stride: 0x1}, + unicode.Range16{Lo: 0x2cbb, Hi: 0x2cbb, Stride: 0x1}, + unicode.Range16{Lo: 0x2cbd, Hi: 0x2cbd, Stride: 0x1}, + unicode.Range16{Lo: 0x2cbf, Hi: 0x2cbf, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc1, Hi: 0x2cc1, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc3, Hi: 0x2cc3, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc5, Hi: 0x2cc5, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc7, Hi: 0x2cc7, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc9, Hi: 0x2cc9, Stride: 0x1}, + unicode.Range16{Lo: 0x2ccb, Hi: 0x2ccb, Stride: 0x1}, + unicode.Range16{Lo: 0x2ccd, Hi: 0x2ccd, Stride: 0x1}, + unicode.Range16{Lo: 0x2ccf, Hi: 0x2ccf, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd1, Hi: 0x2cd1, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd3, Hi: 0x2cd3, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd5, Hi: 0x2cd5, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd7, Hi: 0x2cd7, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd9, Hi: 0x2cd9, Stride: 0x1}, + unicode.Range16{Lo: 0x2cdb, Hi: 0x2cdb, Stride: 0x1}, + unicode.Range16{Lo: 0x2cdd, Hi: 0x2cdd, Stride: 0x1}, + unicode.Range16{Lo: 0x2cdf, Hi: 0x2cdf, Stride: 0x1}, + unicode.Range16{Lo: 0x2ce1, Hi: 0x2ce1, Stride: 0x1}, + unicode.Range16{Lo: 0x2ce3, Hi: 0x2ce4, Stride: 0x1}, + unicode.Range16{Lo: 0x2cec, Hi: 0x2cec, Stride: 0x1}, + unicode.Range16{Lo: 0x2cee, Hi: 0x2cee, Stride: 0x1}, + unicode.Range16{Lo: 0x2cf3, Hi: 0x2cf3, Stride: 0x1}, + unicode.Range16{Lo: 0x2d00, Hi: 0x2d25, Stride: 0x1}, + unicode.Range16{Lo: 0x2d27, Hi: 0x2d27, Stride: 0x1}, + unicode.Range16{Lo: 0x2d2d, Hi: 0x2d2d, Stride: 0x1}, + unicode.Range16{Lo: 0xa641, Hi: 0xa641, Stride: 0x1}, + unicode.Range16{Lo: 0xa643, Hi: 0xa643, Stride: 0x1}, + unicode.Range16{Lo: 0xa645, Hi: 0xa645, Stride: 0x1}, + unicode.Range16{Lo: 0xa647, Hi: 0xa647, Stride: 0x1}, + unicode.Range16{Lo: 0xa649, Hi: 0xa649, Stride: 0x1}, + unicode.Range16{Lo: 0xa64b, Hi: 0xa64b, Stride: 0x1}, + unicode.Range16{Lo: 0xa64d, Hi: 0xa64d, Stride: 0x1}, + unicode.Range16{Lo: 0xa64f, Hi: 0xa64f, Stride: 0x1}, + unicode.Range16{Lo: 0xa651, Hi: 0xa651, Stride: 0x1}, + unicode.Range16{Lo: 0xa653, Hi: 0xa653, Stride: 0x1}, + unicode.Range16{Lo: 0xa655, Hi: 0xa655, Stride: 0x1}, + unicode.Range16{Lo: 0xa657, Hi: 0xa657, Stride: 0x1}, + unicode.Range16{Lo: 0xa659, Hi: 0xa659, Stride: 0x1}, + unicode.Range16{Lo: 0xa65b, Hi: 0xa65b, Stride: 0x1}, + unicode.Range16{Lo: 0xa65d, Hi: 0xa65d, Stride: 0x1}, + unicode.Range16{Lo: 0xa65f, Hi: 0xa65f, Stride: 0x1}, + unicode.Range16{Lo: 0xa661, Hi: 0xa661, Stride: 0x1}, + unicode.Range16{Lo: 0xa663, Hi: 0xa663, Stride: 0x1}, + unicode.Range16{Lo: 0xa665, Hi: 0xa665, Stride: 0x1}, + unicode.Range16{Lo: 0xa667, Hi: 0xa667, Stride: 0x1}, + unicode.Range16{Lo: 0xa669, Hi: 0xa669, Stride: 0x1}, + unicode.Range16{Lo: 0xa66b, Hi: 0xa66b, Stride: 0x1}, + unicode.Range16{Lo: 0xa66d, Hi: 0xa66d, Stride: 0x1}, + unicode.Range16{Lo: 0xa681, Hi: 0xa681, Stride: 0x1}, + unicode.Range16{Lo: 0xa683, Hi: 0xa683, Stride: 0x1}, + unicode.Range16{Lo: 0xa685, Hi: 0xa685, Stride: 0x1}, + unicode.Range16{Lo: 0xa687, Hi: 0xa687, Stride: 0x1}, + unicode.Range16{Lo: 0xa689, Hi: 0xa689, Stride: 0x1}, + unicode.Range16{Lo: 0xa68b, Hi: 0xa68b, Stride: 0x1}, + unicode.Range16{Lo: 0xa68d, Hi: 0xa68d, Stride: 0x1}, + unicode.Range16{Lo: 0xa68f, Hi: 0xa68f, Stride: 0x1}, + unicode.Range16{Lo: 0xa691, Hi: 0xa691, Stride: 0x1}, + unicode.Range16{Lo: 0xa693, Hi: 0xa693, Stride: 0x1}, + unicode.Range16{Lo: 0xa695, Hi: 0xa695, Stride: 0x1}, + unicode.Range16{Lo: 0xa697, Hi: 0xa697, Stride: 0x1}, + unicode.Range16{Lo: 0xa699, Hi: 0xa699, Stride: 0x1}, + unicode.Range16{Lo: 0xa69b, Hi: 0xa69b, Stride: 0x1}, + unicode.Range16{Lo: 0xa69c, Hi: 0xa69d, Stride: 0x1}, + unicode.Range16{Lo: 0xa723, Hi: 0xa723, Stride: 0x1}, + unicode.Range16{Lo: 0xa725, Hi: 0xa725, Stride: 0x1}, + unicode.Range16{Lo: 0xa727, Hi: 0xa727, Stride: 0x1}, + unicode.Range16{Lo: 0xa729, Hi: 0xa729, Stride: 0x1}, + unicode.Range16{Lo: 0xa72b, Hi: 0xa72b, Stride: 0x1}, + unicode.Range16{Lo: 0xa72d, Hi: 0xa72d, Stride: 0x1}, + unicode.Range16{Lo: 0xa72f, Hi: 0xa731, Stride: 0x1}, + unicode.Range16{Lo: 0xa733, Hi: 0xa733, Stride: 0x1}, + unicode.Range16{Lo: 0xa735, Hi: 0xa735, Stride: 0x1}, + unicode.Range16{Lo: 0xa737, Hi: 0xa737, Stride: 0x1}, + unicode.Range16{Lo: 0xa739, Hi: 0xa739, Stride: 0x1}, + unicode.Range16{Lo: 0xa73b, Hi: 0xa73b, Stride: 0x1}, + unicode.Range16{Lo: 0xa73d, Hi: 0xa73d, Stride: 0x1}, + unicode.Range16{Lo: 0xa73f, Hi: 0xa73f, Stride: 0x1}, + unicode.Range16{Lo: 0xa741, Hi: 0xa741, Stride: 0x1}, + unicode.Range16{Lo: 0xa743, Hi: 0xa743, Stride: 0x1}, + unicode.Range16{Lo: 0xa745, Hi: 0xa745, Stride: 0x1}, + unicode.Range16{Lo: 0xa747, Hi: 0xa747, Stride: 0x1}, + unicode.Range16{Lo: 0xa749, Hi: 0xa749, Stride: 0x1}, + unicode.Range16{Lo: 0xa74b, Hi: 0xa74b, Stride: 0x1}, + unicode.Range16{Lo: 0xa74d, Hi: 0xa74d, Stride: 0x1}, + unicode.Range16{Lo: 0xa74f, Hi: 0xa74f, Stride: 0x1}, + unicode.Range16{Lo: 0xa751, Hi: 0xa751, Stride: 0x1}, + unicode.Range16{Lo: 0xa753, Hi: 0xa753, Stride: 0x1}, + unicode.Range16{Lo: 0xa755, Hi: 0xa755, Stride: 0x1}, + unicode.Range16{Lo: 0xa757, Hi: 0xa757, Stride: 0x1}, + unicode.Range16{Lo: 0xa759, Hi: 0xa759, Stride: 0x1}, + unicode.Range16{Lo: 0xa75b, Hi: 0xa75b, Stride: 0x1}, + unicode.Range16{Lo: 0xa75d, Hi: 0xa75d, Stride: 0x1}, + unicode.Range16{Lo: 0xa75f, Hi: 0xa75f, Stride: 0x1}, + unicode.Range16{Lo: 0xa761, Hi: 0xa761, Stride: 0x1}, + unicode.Range16{Lo: 0xa763, Hi: 0xa763, Stride: 0x1}, + unicode.Range16{Lo: 0xa765, Hi: 0xa765, Stride: 0x1}, + unicode.Range16{Lo: 0xa767, Hi: 0xa767, Stride: 0x1}, + unicode.Range16{Lo: 0xa769, Hi: 0xa769, Stride: 0x1}, + unicode.Range16{Lo: 0xa76b, Hi: 0xa76b, Stride: 0x1}, + unicode.Range16{Lo: 0xa76d, Hi: 0xa76d, Stride: 0x1}, + unicode.Range16{Lo: 0xa76f, Hi: 0xa76f, Stride: 0x1}, + unicode.Range16{Lo: 0xa770, Hi: 0xa770, Stride: 0x1}, + unicode.Range16{Lo: 0xa771, Hi: 0xa778, Stride: 0x1}, + unicode.Range16{Lo: 0xa77a, Hi: 0xa77a, Stride: 0x1}, + unicode.Range16{Lo: 0xa77c, Hi: 0xa77c, Stride: 0x1}, + unicode.Range16{Lo: 0xa77f, Hi: 0xa77f, Stride: 0x1}, + unicode.Range16{Lo: 0xa781, Hi: 0xa781, Stride: 0x1}, + unicode.Range16{Lo: 0xa783, Hi: 0xa783, Stride: 0x1}, + unicode.Range16{Lo: 0xa785, Hi: 0xa785, Stride: 0x1}, + unicode.Range16{Lo: 0xa787, Hi: 0xa787, Stride: 0x1}, + unicode.Range16{Lo: 0xa78c, Hi: 0xa78c, Stride: 0x1}, + unicode.Range16{Lo: 0xa78e, Hi: 0xa78e, Stride: 0x1}, + unicode.Range16{Lo: 0xa791, Hi: 0xa791, Stride: 0x1}, + unicode.Range16{Lo: 0xa793, Hi: 0xa795, Stride: 0x1}, + unicode.Range16{Lo: 0xa797, Hi: 0xa797, Stride: 0x1}, + unicode.Range16{Lo: 0xa799, Hi: 0xa799, Stride: 0x1}, + unicode.Range16{Lo: 0xa79b, Hi: 0xa79b, Stride: 0x1}, + unicode.Range16{Lo: 0xa79d, Hi: 0xa79d, Stride: 0x1}, + unicode.Range16{Lo: 0xa79f, Hi: 0xa79f, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a1, Hi: 0xa7a1, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a3, Hi: 0xa7a3, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a5, Hi: 0xa7a5, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a7, Hi: 0xa7a7, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a9, Hi: 0xa7a9, Stride: 0x1}, + unicode.Range16{Lo: 0xa7b5, Hi: 0xa7b5, Stride: 0x1}, + unicode.Range16{Lo: 0xa7b7, Hi: 0xa7b7, Stride: 0x1}, + unicode.Range16{Lo: 0xa7f8, Hi: 0xa7f9, Stride: 0x1}, + unicode.Range16{Lo: 0xa7fa, Hi: 0xa7fa, Stride: 0x1}, + unicode.Range16{Lo: 0xab30, Hi: 0xab5a, Stride: 0x1}, + unicode.Range16{Lo: 0xab5c, Hi: 0xab5f, Stride: 0x1}, + unicode.Range16{Lo: 0xab60, Hi: 0xab65, Stride: 0x1}, + unicode.Range16{Lo: 0xab70, Hi: 0xabbf, Stride: 0x1}, + unicode.Range16{Lo: 0xfb00, Hi: 0xfb06, Stride: 0x1}, + unicode.Range16{Lo: 0xfb13, Hi: 0xfb17, Stride: 0x1}, + unicode.Range16{Lo: 0xff41, Hi: 0xff5a, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x10428, Hi: 0x1044f, Stride: 0x1}, + unicode.Range32{Lo: 0x104d8, Hi: 0x104fb, Stride: 0x1}, + unicode.Range32{Lo: 0x10cc0, Hi: 0x10cf2, Stride: 0x1}, + unicode.Range32{Lo: 0x118c0, Hi: 0x118df, Stride: 0x1}, + unicode.Range32{Lo: 0x1d41a, Hi: 0x1d433, Stride: 0x1}, + unicode.Range32{Lo: 0x1d44e, Hi: 0x1d454, Stride: 0x1}, + unicode.Range32{Lo: 0x1d456, Hi: 0x1d467, Stride: 0x1}, + unicode.Range32{Lo: 0x1d482, Hi: 0x1d49b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4b6, Hi: 0x1d4b9, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4bb, Hi: 0x1d4bb, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4bd, Hi: 0x1d4c3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4c5, Hi: 0x1d4cf, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4ea, Hi: 0x1d503, Stride: 0x1}, + unicode.Range32{Lo: 0x1d51e, Hi: 0x1d537, Stride: 0x1}, + unicode.Range32{Lo: 0x1d552, Hi: 0x1d56b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d586, Hi: 0x1d59f, Stride: 0x1}, + unicode.Range32{Lo: 0x1d5ba, Hi: 0x1d5d3, Stride: 0x1}, + unicode.Range32{Lo: 0x1d5ee, Hi: 0x1d607, Stride: 0x1}, + unicode.Range32{Lo: 0x1d622, Hi: 0x1d63b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d656, Hi: 0x1d66f, Stride: 0x1}, + unicode.Range32{Lo: 0x1d68a, Hi: 0x1d6a5, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6c2, Hi: 0x1d6da, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6dc, Hi: 0x1d6e1, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6fc, Hi: 0x1d714, Stride: 0x1}, + unicode.Range32{Lo: 0x1d716, Hi: 0x1d71b, Stride: 0x1}, + unicode.Range32{Lo: 0x1d736, Hi: 0x1d74e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d750, Hi: 0x1d755, Stride: 0x1}, + unicode.Range32{Lo: 0x1d770, Hi: 0x1d788, Stride: 0x1}, + unicode.Range32{Lo: 0x1d78a, Hi: 0x1d78f, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7aa, Hi: 0x1d7c2, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7c4, Hi: 0x1d7c9, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7cb, Hi: 0x1d7cb, Stride: 0x1}, + unicode.Range32{Lo: 0x1e922, Hi: 0x1e943, Stride: 0x1}, + }, + LatinOffset: 6, +} + +var _SentenceNumeric = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x30, Hi: 0x39, Stride: 0x1}, + unicode.Range16{Lo: 0x660, Hi: 0x669, Stride: 0x1}, + unicode.Range16{Lo: 0x66b, Hi: 0x66c, Stride: 0x1}, + unicode.Range16{Lo: 0x6f0, Hi: 0x6f9, Stride: 0x1}, + unicode.Range16{Lo: 0x7c0, Hi: 0x7c9, Stride: 0x1}, + unicode.Range16{Lo: 0x966, Hi: 0x96f, Stride: 0x1}, + unicode.Range16{Lo: 0x9e6, Hi: 0x9ef, Stride: 0x1}, + unicode.Range16{Lo: 0xa66, Hi: 0xa6f, Stride: 0x1}, + unicode.Range16{Lo: 0xae6, Hi: 0xaef, Stride: 0x1}, + unicode.Range16{Lo: 0xb66, Hi: 0xb6f, Stride: 0x1}, + unicode.Range16{Lo: 0xbe6, Hi: 0xbef, Stride: 0x1}, + unicode.Range16{Lo: 0xc66, Hi: 0xc6f, Stride: 0x1}, + unicode.Range16{Lo: 0xce6, Hi: 0xcef, Stride: 0x1}, + unicode.Range16{Lo: 0xd66, Hi: 0xd6f, Stride: 0x1}, + unicode.Range16{Lo: 0xde6, Hi: 0xdef, Stride: 0x1}, + unicode.Range16{Lo: 0xe50, Hi: 0xe59, Stride: 0x1}, + unicode.Range16{Lo: 0xed0, Hi: 0xed9, Stride: 0x1}, + unicode.Range16{Lo: 0xf20, Hi: 0xf29, Stride: 0x1}, + unicode.Range16{Lo: 0x1040, Hi: 0x1049, Stride: 0x1}, + unicode.Range16{Lo: 0x1090, Hi: 0x1099, Stride: 0x1}, + unicode.Range16{Lo: 0x17e0, Hi: 0x17e9, Stride: 0x1}, + unicode.Range16{Lo: 0x1810, Hi: 0x1819, Stride: 0x1}, + unicode.Range16{Lo: 0x1946, Hi: 0x194f, Stride: 0x1}, + unicode.Range16{Lo: 0x19d0, Hi: 0x19d9, Stride: 0x1}, + unicode.Range16{Lo: 0x1a80, Hi: 0x1a89, Stride: 0x1}, + unicode.Range16{Lo: 0x1a90, Hi: 0x1a99, Stride: 0x1}, + unicode.Range16{Lo: 0x1b50, Hi: 0x1b59, Stride: 0x1}, + unicode.Range16{Lo: 0x1bb0, Hi: 0x1bb9, Stride: 0x1}, + unicode.Range16{Lo: 0x1c40, Hi: 0x1c49, Stride: 0x1}, + unicode.Range16{Lo: 0x1c50, Hi: 0x1c59, Stride: 0x1}, + unicode.Range16{Lo: 0xa620, Hi: 0xa629, Stride: 0x1}, + unicode.Range16{Lo: 0xa8d0, Hi: 0xa8d9, Stride: 0x1}, + unicode.Range16{Lo: 0xa900, Hi: 0xa909, Stride: 0x1}, + unicode.Range16{Lo: 0xa9d0, Hi: 0xa9d9, Stride: 0x1}, + unicode.Range16{Lo: 0xa9f0, Hi: 0xa9f9, Stride: 0x1}, + unicode.Range16{Lo: 0xaa50, Hi: 0xaa59, Stride: 0x1}, + unicode.Range16{Lo: 0xabf0, Hi: 0xabf9, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x104a0, Hi: 0x104a9, Stride: 0x1}, + unicode.Range32{Lo: 0x11066, Hi: 0x1106f, Stride: 0x1}, + unicode.Range32{Lo: 0x110f0, Hi: 0x110f9, Stride: 0x1}, + unicode.Range32{Lo: 0x11136, Hi: 0x1113f, Stride: 0x1}, + unicode.Range32{Lo: 0x111d0, Hi: 0x111d9, Stride: 0x1}, + unicode.Range32{Lo: 0x112f0, Hi: 0x112f9, Stride: 0x1}, + unicode.Range32{Lo: 0x11450, Hi: 0x11459, Stride: 0x1}, + unicode.Range32{Lo: 0x114d0, Hi: 0x114d9, Stride: 0x1}, + unicode.Range32{Lo: 0x11650, Hi: 0x11659, Stride: 0x1}, + unicode.Range32{Lo: 0x116c0, Hi: 0x116c9, Stride: 0x1}, + unicode.Range32{Lo: 0x11730, Hi: 0x11739, Stride: 0x1}, + unicode.Range32{Lo: 0x118e0, Hi: 0x118e9, Stride: 0x1}, + unicode.Range32{Lo: 0x11c50, Hi: 0x11c59, Stride: 0x1}, + unicode.Range32{Lo: 0x16a60, Hi: 0x16a69, Stride: 0x1}, + unicode.Range32{Lo: 0x16b50, Hi: 0x16b59, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7ce, Hi: 0x1d7ff, Stride: 0x1}, + unicode.Range32{Lo: 0x1e950, Hi: 0x1e959, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceOLetter = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x1bb, Hi: 0x1bb, Stride: 0x1}, + unicode.Range16{Lo: 0x1c0, Hi: 0x1c3, Stride: 0x1}, + unicode.Range16{Lo: 0x294, Hi: 0x294, Stride: 0x1}, + unicode.Range16{Lo: 0x2b9, Hi: 0x2bf, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6, Hi: 0x2d1, Stride: 0x1}, + unicode.Range16{Lo: 0x2ec, Hi: 0x2ec, Stride: 0x1}, + unicode.Range16{Lo: 0x2ee, Hi: 0x2ee, Stride: 0x1}, + unicode.Range16{Lo: 0x374, Hi: 0x374, Stride: 0x1}, + unicode.Range16{Lo: 0x559, Hi: 0x559, Stride: 0x1}, + unicode.Range16{Lo: 0x5d0, Hi: 0x5ea, Stride: 0x1}, + unicode.Range16{Lo: 0x5f0, Hi: 0x5f2, Stride: 0x1}, + unicode.Range16{Lo: 0x5f3, Hi: 0x5f3, Stride: 0x1}, + unicode.Range16{Lo: 0x620, Hi: 0x63f, Stride: 0x1}, + unicode.Range16{Lo: 0x640, Hi: 0x640, Stride: 0x1}, + unicode.Range16{Lo: 0x641, Hi: 0x64a, Stride: 0x1}, + unicode.Range16{Lo: 0x66e, Hi: 0x66f, Stride: 0x1}, + unicode.Range16{Lo: 0x671, Hi: 0x6d3, Stride: 0x1}, + unicode.Range16{Lo: 0x6d5, Hi: 0x6d5, Stride: 0x1}, + unicode.Range16{Lo: 0x6e5, Hi: 0x6e6, Stride: 0x1}, + unicode.Range16{Lo: 0x6ee, Hi: 0x6ef, Stride: 0x1}, + unicode.Range16{Lo: 0x6fa, Hi: 0x6fc, Stride: 0x1}, + unicode.Range16{Lo: 0x6ff, Hi: 0x6ff, Stride: 0x1}, + unicode.Range16{Lo: 0x710, Hi: 0x710, Stride: 0x1}, + unicode.Range16{Lo: 0x712, Hi: 0x72f, Stride: 0x1}, + unicode.Range16{Lo: 0x74d, Hi: 0x7a5, Stride: 0x1}, + unicode.Range16{Lo: 0x7b1, Hi: 0x7b1, Stride: 0x1}, + unicode.Range16{Lo: 0x7ca, Hi: 0x7ea, Stride: 0x1}, + unicode.Range16{Lo: 0x7f4, Hi: 0x7f5, Stride: 0x1}, + unicode.Range16{Lo: 0x7fa, Hi: 0x7fa, Stride: 0x1}, + unicode.Range16{Lo: 0x800, Hi: 0x815, Stride: 0x1}, + unicode.Range16{Lo: 0x81a, Hi: 0x81a, Stride: 0x1}, + unicode.Range16{Lo: 0x824, Hi: 0x824, Stride: 0x1}, + unicode.Range16{Lo: 0x828, Hi: 0x828, Stride: 0x1}, + unicode.Range16{Lo: 0x840, Hi: 0x858, Stride: 0x1}, + unicode.Range16{Lo: 0x8a0, Hi: 0x8b4, Stride: 0x1}, + unicode.Range16{Lo: 0x8b6, Hi: 0x8bd, Stride: 0x1}, + unicode.Range16{Lo: 0x904, Hi: 0x939, Stride: 0x1}, + unicode.Range16{Lo: 0x93d, Hi: 0x93d, Stride: 0x1}, + unicode.Range16{Lo: 0x950, Hi: 0x950, Stride: 0x1}, + unicode.Range16{Lo: 0x958, Hi: 0x961, Stride: 0x1}, + unicode.Range16{Lo: 0x971, Hi: 0x971, Stride: 0x1}, + unicode.Range16{Lo: 0x972, Hi: 0x980, Stride: 0x1}, + unicode.Range16{Lo: 0x985, Hi: 0x98c, Stride: 0x1}, + unicode.Range16{Lo: 0x98f, Hi: 0x990, Stride: 0x1}, + unicode.Range16{Lo: 0x993, Hi: 0x9a8, Stride: 0x1}, + unicode.Range16{Lo: 0x9aa, Hi: 0x9b0, Stride: 0x1}, + unicode.Range16{Lo: 0x9b2, Hi: 0x9b2, Stride: 0x1}, + unicode.Range16{Lo: 0x9b6, Hi: 0x9b9, Stride: 0x1}, + unicode.Range16{Lo: 0x9bd, Hi: 0x9bd, Stride: 0x1}, + unicode.Range16{Lo: 0x9ce, Hi: 0x9ce, Stride: 0x1}, + unicode.Range16{Lo: 0x9dc, Hi: 0x9dd, Stride: 0x1}, + unicode.Range16{Lo: 0x9df, Hi: 0x9e1, Stride: 0x1}, + unicode.Range16{Lo: 0x9f0, Hi: 0x9f1, Stride: 0x1}, + unicode.Range16{Lo: 0xa05, Hi: 0xa0a, Stride: 0x1}, + unicode.Range16{Lo: 0xa0f, Hi: 0xa10, Stride: 0x1}, + unicode.Range16{Lo: 0xa13, Hi: 0xa28, Stride: 0x1}, + unicode.Range16{Lo: 0xa2a, Hi: 0xa30, Stride: 0x1}, + unicode.Range16{Lo: 0xa32, Hi: 0xa33, Stride: 0x1}, + unicode.Range16{Lo: 0xa35, Hi: 0xa36, Stride: 0x1}, + unicode.Range16{Lo: 0xa38, Hi: 0xa39, Stride: 0x1}, + unicode.Range16{Lo: 0xa59, Hi: 0xa5c, Stride: 0x1}, + unicode.Range16{Lo: 0xa5e, Hi: 0xa5e, Stride: 0x1}, + unicode.Range16{Lo: 0xa72, Hi: 0xa74, Stride: 0x1}, + unicode.Range16{Lo: 0xa85, Hi: 0xa8d, Stride: 0x1}, + unicode.Range16{Lo: 0xa8f, Hi: 0xa91, Stride: 0x1}, + unicode.Range16{Lo: 0xa93, Hi: 0xaa8, Stride: 0x1}, + unicode.Range16{Lo: 0xaaa, Hi: 0xab0, Stride: 0x1}, + unicode.Range16{Lo: 0xab2, Hi: 0xab3, Stride: 0x1}, + unicode.Range16{Lo: 0xab5, Hi: 0xab9, Stride: 0x1}, + unicode.Range16{Lo: 0xabd, Hi: 0xabd, Stride: 0x1}, + unicode.Range16{Lo: 0xad0, Hi: 0xad0, Stride: 0x1}, + unicode.Range16{Lo: 0xae0, Hi: 0xae1, Stride: 0x1}, + unicode.Range16{Lo: 0xaf9, Hi: 0xaf9, Stride: 0x1}, + unicode.Range16{Lo: 0xb05, Hi: 0xb0c, Stride: 0x1}, + unicode.Range16{Lo: 0xb0f, Hi: 0xb10, Stride: 0x1}, + unicode.Range16{Lo: 0xb13, Hi: 0xb28, Stride: 0x1}, + unicode.Range16{Lo: 0xb2a, Hi: 0xb30, Stride: 0x1}, + unicode.Range16{Lo: 0xb32, Hi: 0xb33, Stride: 0x1}, + unicode.Range16{Lo: 0xb35, Hi: 0xb39, Stride: 0x1}, + unicode.Range16{Lo: 0xb3d, Hi: 0xb3d, Stride: 0x1}, + unicode.Range16{Lo: 0xb5c, Hi: 0xb5d, Stride: 0x1}, + unicode.Range16{Lo: 0xb5f, Hi: 0xb61, Stride: 0x1}, + unicode.Range16{Lo: 0xb71, Hi: 0xb71, Stride: 0x1}, + unicode.Range16{Lo: 0xb83, Hi: 0xb83, Stride: 0x1}, + unicode.Range16{Lo: 0xb85, Hi: 0xb8a, Stride: 0x1}, + unicode.Range16{Lo: 0xb8e, Hi: 0xb90, Stride: 0x1}, + unicode.Range16{Lo: 0xb92, Hi: 0xb95, Stride: 0x1}, + unicode.Range16{Lo: 0xb99, Hi: 0xb9a, Stride: 0x1}, + unicode.Range16{Lo: 0xb9c, Hi: 0xb9c, Stride: 0x1}, + unicode.Range16{Lo: 0xb9e, Hi: 0xb9f, Stride: 0x1}, + unicode.Range16{Lo: 0xba3, Hi: 0xba4, Stride: 0x1}, + unicode.Range16{Lo: 0xba8, Hi: 0xbaa, Stride: 0x1}, + unicode.Range16{Lo: 0xbae, Hi: 0xbb9, Stride: 0x1}, + unicode.Range16{Lo: 0xbd0, Hi: 0xbd0, Stride: 0x1}, + unicode.Range16{Lo: 0xc05, Hi: 0xc0c, Stride: 0x1}, + unicode.Range16{Lo: 0xc0e, Hi: 0xc10, Stride: 0x1}, + unicode.Range16{Lo: 0xc12, Hi: 0xc28, Stride: 0x1}, + unicode.Range16{Lo: 0xc2a, Hi: 0xc39, Stride: 0x1}, + unicode.Range16{Lo: 0xc3d, Hi: 0xc3d, Stride: 0x1}, + unicode.Range16{Lo: 0xc58, Hi: 0xc5a, Stride: 0x1}, + unicode.Range16{Lo: 0xc60, Hi: 0xc61, Stride: 0x1}, + unicode.Range16{Lo: 0xc80, Hi: 0xc80, Stride: 0x1}, + unicode.Range16{Lo: 0xc85, Hi: 0xc8c, Stride: 0x1}, + unicode.Range16{Lo: 0xc8e, Hi: 0xc90, Stride: 0x1}, + unicode.Range16{Lo: 0xc92, Hi: 0xca8, Stride: 0x1}, + unicode.Range16{Lo: 0xcaa, Hi: 0xcb3, Stride: 0x1}, + unicode.Range16{Lo: 0xcb5, Hi: 0xcb9, Stride: 0x1}, + unicode.Range16{Lo: 0xcbd, Hi: 0xcbd, Stride: 0x1}, + unicode.Range16{Lo: 0xcde, Hi: 0xcde, Stride: 0x1}, + unicode.Range16{Lo: 0xce0, Hi: 0xce1, Stride: 0x1}, + unicode.Range16{Lo: 0xcf1, Hi: 0xcf2, Stride: 0x1}, + unicode.Range16{Lo: 0xd05, Hi: 0xd0c, Stride: 0x1}, + unicode.Range16{Lo: 0xd0e, Hi: 0xd10, Stride: 0x1}, + unicode.Range16{Lo: 0xd12, Hi: 0xd3a, Stride: 0x1}, + unicode.Range16{Lo: 0xd3d, Hi: 0xd3d, Stride: 0x1}, + unicode.Range16{Lo: 0xd4e, Hi: 0xd4e, Stride: 0x1}, + unicode.Range16{Lo: 0xd54, Hi: 0xd56, Stride: 0x1}, + unicode.Range16{Lo: 0xd5f, Hi: 0xd61, Stride: 0x1}, + unicode.Range16{Lo: 0xd7a, Hi: 0xd7f, Stride: 0x1}, + unicode.Range16{Lo: 0xd85, Hi: 0xd96, Stride: 0x1}, + unicode.Range16{Lo: 0xd9a, Hi: 0xdb1, Stride: 0x1}, + unicode.Range16{Lo: 0xdb3, Hi: 0xdbb, Stride: 0x1}, + unicode.Range16{Lo: 0xdbd, Hi: 0xdbd, Stride: 0x1}, + unicode.Range16{Lo: 0xdc0, Hi: 0xdc6, Stride: 0x1}, + unicode.Range16{Lo: 0xe01, Hi: 0xe30, Stride: 0x1}, + unicode.Range16{Lo: 0xe32, Hi: 0xe33, Stride: 0x1}, + unicode.Range16{Lo: 0xe40, Hi: 0xe45, Stride: 0x1}, + unicode.Range16{Lo: 0xe46, Hi: 0xe46, Stride: 0x1}, + unicode.Range16{Lo: 0xe81, Hi: 0xe82, Stride: 0x1}, + unicode.Range16{Lo: 0xe84, Hi: 0xe84, Stride: 0x1}, + unicode.Range16{Lo: 0xe87, Hi: 0xe88, Stride: 0x1}, + unicode.Range16{Lo: 0xe8a, Hi: 0xe8a, Stride: 0x1}, + unicode.Range16{Lo: 0xe8d, Hi: 0xe8d, Stride: 0x1}, + unicode.Range16{Lo: 0xe94, Hi: 0xe97, Stride: 0x1}, + unicode.Range16{Lo: 0xe99, Hi: 0xe9f, Stride: 0x1}, + unicode.Range16{Lo: 0xea1, Hi: 0xea3, Stride: 0x1}, + unicode.Range16{Lo: 0xea5, Hi: 0xea5, Stride: 0x1}, + unicode.Range16{Lo: 0xea7, Hi: 0xea7, Stride: 0x1}, + unicode.Range16{Lo: 0xeaa, Hi: 0xeab, Stride: 0x1}, + unicode.Range16{Lo: 0xead, Hi: 0xeb0, Stride: 0x1}, + unicode.Range16{Lo: 0xeb2, Hi: 0xeb3, Stride: 0x1}, + unicode.Range16{Lo: 0xebd, Hi: 0xebd, Stride: 0x1}, + unicode.Range16{Lo: 0xec0, Hi: 0xec4, Stride: 0x1}, + unicode.Range16{Lo: 0xec6, Hi: 0xec6, Stride: 0x1}, + unicode.Range16{Lo: 0xedc, Hi: 0xedf, Stride: 0x1}, + unicode.Range16{Lo: 0xf00, Hi: 0xf00, Stride: 0x1}, + unicode.Range16{Lo: 0xf40, Hi: 0xf47, Stride: 0x1}, + unicode.Range16{Lo: 0xf49, Hi: 0xf6c, Stride: 0x1}, + unicode.Range16{Lo: 0xf88, Hi: 0xf8c, Stride: 0x1}, + unicode.Range16{Lo: 0x1000, Hi: 0x102a, Stride: 0x1}, + unicode.Range16{Lo: 0x103f, Hi: 0x103f, Stride: 0x1}, + unicode.Range16{Lo: 0x1050, Hi: 0x1055, Stride: 0x1}, + unicode.Range16{Lo: 0x105a, Hi: 0x105d, Stride: 0x1}, + unicode.Range16{Lo: 0x1061, Hi: 0x1061, Stride: 0x1}, + unicode.Range16{Lo: 0x1065, Hi: 0x1066, Stride: 0x1}, + unicode.Range16{Lo: 0x106e, Hi: 0x1070, Stride: 0x1}, + unicode.Range16{Lo: 0x1075, Hi: 0x1081, Stride: 0x1}, + unicode.Range16{Lo: 0x108e, Hi: 0x108e, Stride: 0x1}, + unicode.Range16{Lo: 0x10d0, Hi: 0x10fa, Stride: 0x1}, + unicode.Range16{Lo: 0x10fc, Hi: 0x10fc, Stride: 0x1}, + unicode.Range16{Lo: 0x10fd, Hi: 0x1248, Stride: 0x1}, + unicode.Range16{Lo: 0x124a, Hi: 0x124d, Stride: 0x1}, + unicode.Range16{Lo: 0x1250, Hi: 0x1256, Stride: 0x1}, + unicode.Range16{Lo: 0x1258, Hi: 0x1258, Stride: 0x1}, + unicode.Range16{Lo: 0x125a, Hi: 0x125d, Stride: 0x1}, + unicode.Range16{Lo: 0x1260, Hi: 0x1288, Stride: 0x1}, + unicode.Range16{Lo: 0x128a, Hi: 0x128d, Stride: 0x1}, + unicode.Range16{Lo: 0x1290, Hi: 0x12b0, Stride: 0x1}, + unicode.Range16{Lo: 0x12b2, Hi: 0x12b5, Stride: 0x1}, + unicode.Range16{Lo: 0x12b8, Hi: 0x12be, Stride: 0x1}, + unicode.Range16{Lo: 0x12c0, Hi: 0x12c0, Stride: 0x1}, + unicode.Range16{Lo: 0x12c2, Hi: 0x12c5, Stride: 0x1}, + unicode.Range16{Lo: 0x12c8, Hi: 0x12d6, Stride: 0x1}, + unicode.Range16{Lo: 0x12d8, Hi: 0x1310, Stride: 0x1}, + unicode.Range16{Lo: 0x1312, Hi: 0x1315, Stride: 0x1}, + unicode.Range16{Lo: 0x1318, Hi: 0x135a, Stride: 0x1}, + unicode.Range16{Lo: 0x1380, Hi: 0x138f, Stride: 0x1}, + unicode.Range16{Lo: 0x1401, Hi: 0x166c, Stride: 0x1}, + unicode.Range16{Lo: 0x166f, Hi: 0x167f, Stride: 0x1}, + unicode.Range16{Lo: 0x1681, Hi: 0x169a, Stride: 0x1}, + unicode.Range16{Lo: 0x16a0, Hi: 0x16ea, Stride: 0x1}, + unicode.Range16{Lo: 0x16ee, Hi: 0x16f0, Stride: 0x1}, + unicode.Range16{Lo: 0x16f1, Hi: 0x16f8, Stride: 0x1}, + unicode.Range16{Lo: 0x1700, Hi: 0x170c, Stride: 0x1}, + unicode.Range16{Lo: 0x170e, Hi: 0x1711, Stride: 0x1}, + unicode.Range16{Lo: 0x1720, Hi: 0x1731, Stride: 0x1}, + unicode.Range16{Lo: 0x1740, Hi: 0x1751, Stride: 0x1}, + unicode.Range16{Lo: 0x1760, Hi: 0x176c, Stride: 0x1}, + unicode.Range16{Lo: 0x176e, Hi: 0x1770, Stride: 0x1}, + unicode.Range16{Lo: 0x1780, Hi: 0x17b3, Stride: 0x1}, + unicode.Range16{Lo: 0x17d7, Hi: 0x17d7, Stride: 0x1}, + unicode.Range16{Lo: 0x17dc, Hi: 0x17dc, Stride: 0x1}, + unicode.Range16{Lo: 0x1820, Hi: 0x1842, Stride: 0x1}, + unicode.Range16{Lo: 0x1843, Hi: 0x1843, Stride: 0x1}, + unicode.Range16{Lo: 0x1844, Hi: 0x1877, Stride: 0x1}, + unicode.Range16{Lo: 0x1880, Hi: 0x1884, Stride: 0x1}, + unicode.Range16{Lo: 0x1887, Hi: 0x18a8, Stride: 0x1}, + unicode.Range16{Lo: 0x18aa, Hi: 0x18aa, Stride: 0x1}, + unicode.Range16{Lo: 0x18b0, Hi: 0x18f5, Stride: 0x1}, + unicode.Range16{Lo: 0x1900, Hi: 0x191e, Stride: 0x1}, + unicode.Range16{Lo: 0x1950, Hi: 0x196d, Stride: 0x1}, + unicode.Range16{Lo: 0x1970, Hi: 0x1974, Stride: 0x1}, + unicode.Range16{Lo: 0x1980, Hi: 0x19ab, Stride: 0x1}, + unicode.Range16{Lo: 0x19b0, Hi: 0x19c9, Stride: 0x1}, + unicode.Range16{Lo: 0x1a00, Hi: 0x1a16, Stride: 0x1}, + unicode.Range16{Lo: 0x1a20, Hi: 0x1a54, Stride: 0x1}, + unicode.Range16{Lo: 0x1aa7, Hi: 0x1aa7, Stride: 0x1}, + unicode.Range16{Lo: 0x1b05, Hi: 0x1b33, Stride: 0x1}, + unicode.Range16{Lo: 0x1b45, Hi: 0x1b4b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b83, Hi: 0x1ba0, Stride: 0x1}, + unicode.Range16{Lo: 0x1bae, Hi: 0x1baf, Stride: 0x1}, + unicode.Range16{Lo: 0x1bba, Hi: 0x1be5, Stride: 0x1}, + unicode.Range16{Lo: 0x1c00, Hi: 0x1c23, Stride: 0x1}, + unicode.Range16{Lo: 0x1c4d, Hi: 0x1c4f, Stride: 0x1}, + unicode.Range16{Lo: 0x1c5a, Hi: 0x1c77, Stride: 0x1}, + unicode.Range16{Lo: 0x1c78, Hi: 0x1c7d, Stride: 0x1}, + unicode.Range16{Lo: 0x1ce9, Hi: 0x1cec, Stride: 0x1}, + unicode.Range16{Lo: 0x1cee, Hi: 0x1cf1, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf5, Hi: 0x1cf6, Stride: 0x1}, + unicode.Range16{Lo: 0x2135, Hi: 0x2138, Stride: 0x1}, + unicode.Range16{Lo: 0x2180, Hi: 0x2182, Stride: 0x1}, + unicode.Range16{Lo: 0x2185, Hi: 0x2188, Stride: 0x1}, + unicode.Range16{Lo: 0x2d30, Hi: 0x2d67, Stride: 0x1}, + unicode.Range16{Lo: 0x2d6f, Hi: 0x2d6f, Stride: 0x1}, + unicode.Range16{Lo: 0x2d80, Hi: 0x2d96, Stride: 0x1}, + unicode.Range16{Lo: 0x2da0, Hi: 0x2da6, Stride: 0x1}, + unicode.Range16{Lo: 0x2da8, Hi: 0x2dae, Stride: 0x1}, + unicode.Range16{Lo: 0x2db0, Hi: 0x2db6, Stride: 0x1}, + unicode.Range16{Lo: 0x2db8, Hi: 0x2dbe, Stride: 0x1}, + unicode.Range16{Lo: 0x2dc0, Hi: 0x2dc6, Stride: 0x1}, + unicode.Range16{Lo: 0x2dc8, Hi: 0x2dce, Stride: 0x1}, + unicode.Range16{Lo: 0x2dd0, Hi: 0x2dd6, Stride: 0x1}, + unicode.Range16{Lo: 0x2dd8, Hi: 0x2dde, Stride: 0x1}, + unicode.Range16{Lo: 0x2e2f, Hi: 0x2e2f, Stride: 0x1}, + unicode.Range16{Lo: 0x3005, Hi: 0x3005, Stride: 0x1}, + unicode.Range16{Lo: 0x3006, Hi: 0x3006, Stride: 0x1}, + unicode.Range16{Lo: 0x3007, Hi: 0x3007, Stride: 0x1}, + unicode.Range16{Lo: 0x3021, Hi: 0x3029, Stride: 0x1}, + unicode.Range16{Lo: 0x3031, Hi: 0x3035, Stride: 0x1}, + unicode.Range16{Lo: 0x3038, Hi: 0x303a, Stride: 0x1}, + unicode.Range16{Lo: 0x303b, Hi: 0x303b, Stride: 0x1}, + unicode.Range16{Lo: 0x303c, Hi: 0x303c, Stride: 0x1}, + unicode.Range16{Lo: 0x3041, Hi: 0x3096, Stride: 0x1}, + unicode.Range16{Lo: 0x309d, Hi: 0x309e, Stride: 0x1}, + unicode.Range16{Lo: 0x309f, Hi: 0x309f, Stride: 0x1}, + unicode.Range16{Lo: 0x30a1, Hi: 0x30fa, Stride: 0x1}, + unicode.Range16{Lo: 0x30fc, Hi: 0x30fe, Stride: 0x1}, + unicode.Range16{Lo: 0x30ff, Hi: 0x30ff, Stride: 0x1}, + unicode.Range16{Lo: 0x3105, Hi: 0x312d, Stride: 0x1}, + unicode.Range16{Lo: 0x3131, Hi: 0x318e, Stride: 0x1}, + unicode.Range16{Lo: 0x31a0, Hi: 0x31ba, Stride: 0x1}, + unicode.Range16{Lo: 0x31f0, Hi: 0x31ff, Stride: 0x1}, + unicode.Range16{Lo: 0x3400, Hi: 0x4db5, Stride: 0x1}, + unicode.Range16{Lo: 0x4e00, Hi: 0x9fd5, Stride: 0x1}, + unicode.Range16{Lo: 0xa000, Hi: 0xa014, Stride: 0x1}, + unicode.Range16{Lo: 0xa015, Hi: 0xa015, Stride: 0x1}, + unicode.Range16{Lo: 0xa016, Hi: 0xa48c, Stride: 0x1}, + unicode.Range16{Lo: 0xa4d0, Hi: 0xa4f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa4f8, Hi: 0xa4fd, Stride: 0x1}, + unicode.Range16{Lo: 0xa500, Hi: 0xa60b, Stride: 0x1}, + unicode.Range16{Lo: 0xa60c, Hi: 0xa60c, Stride: 0x1}, + unicode.Range16{Lo: 0xa610, Hi: 0xa61f, Stride: 0x1}, + unicode.Range16{Lo: 0xa62a, Hi: 0xa62b, Stride: 0x1}, + unicode.Range16{Lo: 0xa66e, Hi: 0xa66e, Stride: 0x1}, + unicode.Range16{Lo: 0xa67f, Hi: 0xa67f, Stride: 0x1}, + unicode.Range16{Lo: 0xa6a0, Hi: 0xa6e5, Stride: 0x1}, + unicode.Range16{Lo: 0xa6e6, Hi: 0xa6ef, Stride: 0x1}, + unicode.Range16{Lo: 0xa717, Hi: 0xa71f, Stride: 0x1}, + unicode.Range16{Lo: 0xa788, Hi: 0xa788, Stride: 0x1}, + unicode.Range16{Lo: 0xa78f, Hi: 0xa78f, Stride: 0x1}, + unicode.Range16{Lo: 0xa7f7, Hi: 0xa7f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa7fb, Hi: 0xa801, Stride: 0x1}, + unicode.Range16{Lo: 0xa803, Hi: 0xa805, Stride: 0x1}, + unicode.Range16{Lo: 0xa807, Hi: 0xa80a, Stride: 0x1}, + unicode.Range16{Lo: 0xa80c, Hi: 0xa822, Stride: 0x1}, + unicode.Range16{Lo: 0xa840, Hi: 0xa873, Stride: 0x1}, + unicode.Range16{Lo: 0xa882, Hi: 0xa8b3, Stride: 0x1}, + unicode.Range16{Lo: 0xa8f2, Hi: 0xa8f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa8fb, Hi: 0xa8fb, Stride: 0x1}, + unicode.Range16{Lo: 0xa8fd, Hi: 0xa8fd, Stride: 0x1}, + unicode.Range16{Lo: 0xa90a, Hi: 0xa925, Stride: 0x1}, + unicode.Range16{Lo: 0xa930, Hi: 0xa946, Stride: 0x1}, + unicode.Range16{Lo: 0xa960, Hi: 0xa97c, Stride: 0x1}, + unicode.Range16{Lo: 0xa984, Hi: 0xa9b2, Stride: 0x1}, + unicode.Range16{Lo: 0xa9cf, Hi: 0xa9cf, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e0, Hi: 0xa9e4, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e6, Hi: 0xa9e6, Stride: 0x1}, + unicode.Range16{Lo: 0xa9e7, Hi: 0xa9ef, Stride: 0x1}, + unicode.Range16{Lo: 0xa9fa, Hi: 0xa9fe, Stride: 0x1}, + unicode.Range16{Lo: 0xaa00, Hi: 0xaa28, Stride: 0x1}, + unicode.Range16{Lo: 0xaa40, Hi: 0xaa42, Stride: 0x1}, + unicode.Range16{Lo: 0xaa44, Hi: 0xaa4b, Stride: 0x1}, + unicode.Range16{Lo: 0xaa60, Hi: 0xaa6f, Stride: 0x1}, + unicode.Range16{Lo: 0xaa70, Hi: 0xaa70, Stride: 0x1}, + unicode.Range16{Lo: 0xaa71, Hi: 0xaa76, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7a, Hi: 0xaa7a, Stride: 0x1}, + unicode.Range16{Lo: 0xaa7e, Hi: 0xaaaf, Stride: 0x1}, + unicode.Range16{Lo: 0xaab1, Hi: 0xaab1, Stride: 0x1}, + unicode.Range16{Lo: 0xaab5, Hi: 0xaab6, Stride: 0x1}, + unicode.Range16{Lo: 0xaab9, Hi: 0xaabd, Stride: 0x1}, + unicode.Range16{Lo: 0xaac0, Hi: 0xaac0, Stride: 0x1}, + unicode.Range16{Lo: 0xaac2, Hi: 0xaac2, Stride: 0x1}, + unicode.Range16{Lo: 0xaadb, Hi: 0xaadc, Stride: 0x1}, + unicode.Range16{Lo: 0xaadd, Hi: 0xaadd, Stride: 0x1}, + unicode.Range16{Lo: 0xaae0, Hi: 0xaaea, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf2, Hi: 0xaaf2, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf3, Hi: 0xaaf4, Stride: 0x1}, + unicode.Range16{Lo: 0xab01, Hi: 0xab06, Stride: 0x1}, + unicode.Range16{Lo: 0xab09, Hi: 0xab0e, Stride: 0x1}, + unicode.Range16{Lo: 0xab11, Hi: 0xab16, Stride: 0x1}, + unicode.Range16{Lo: 0xab20, Hi: 0xab26, Stride: 0x1}, + unicode.Range16{Lo: 0xab28, Hi: 0xab2e, Stride: 0x1}, + unicode.Range16{Lo: 0xabc0, Hi: 0xabe2, Stride: 0x1}, + unicode.Range16{Lo: 0xac00, Hi: 0xd7a3, Stride: 0x1}, + unicode.Range16{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 0x1}, + unicode.Range16{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 0x1}, + unicode.Range16{Lo: 0xf900, Hi: 0xfa6d, Stride: 0x1}, + unicode.Range16{Lo: 0xfa70, Hi: 0xfad9, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1d, Hi: 0xfb1d, Stride: 0x1}, + unicode.Range16{Lo: 0xfb1f, Hi: 0xfb28, Stride: 0x1}, + unicode.Range16{Lo: 0xfb2a, Hi: 0xfb36, Stride: 0x1}, + unicode.Range16{Lo: 0xfb38, Hi: 0xfb3c, Stride: 0x1}, + unicode.Range16{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 0x1}, + unicode.Range16{Lo: 0xfb40, Hi: 0xfb41, Stride: 0x1}, + unicode.Range16{Lo: 0xfb43, Hi: 0xfb44, Stride: 0x1}, + unicode.Range16{Lo: 0xfb46, Hi: 0xfbb1, Stride: 0x1}, + unicode.Range16{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 0x1}, + unicode.Range16{Lo: 0xfd50, Hi: 0xfd8f, Stride: 0x1}, + unicode.Range16{Lo: 0xfd92, Hi: 0xfdc7, Stride: 0x1}, + unicode.Range16{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 0x1}, + unicode.Range16{Lo: 0xfe70, Hi: 0xfe74, Stride: 0x1}, + unicode.Range16{Lo: 0xfe76, Hi: 0xfefc, Stride: 0x1}, + unicode.Range16{Lo: 0xff66, Hi: 0xff6f, Stride: 0x1}, + unicode.Range16{Lo: 0xff70, Hi: 0xff70, Stride: 0x1}, + unicode.Range16{Lo: 0xff71, Hi: 0xff9d, Stride: 0x1}, + unicode.Range16{Lo: 0xffa0, Hi: 0xffbe, Stride: 0x1}, + unicode.Range16{Lo: 0xffc2, Hi: 0xffc7, Stride: 0x1}, + unicode.Range16{Lo: 0xffca, Hi: 0xffcf, Stride: 0x1}, + unicode.Range16{Lo: 0xffd2, Hi: 0xffd7, Stride: 0x1}, + unicode.Range16{Lo: 0xffda, Hi: 0xffdc, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x10000, Hi: 0x1000b, Stride: 0x1}, + unicode.Range32{Lo: 0x1000d, Hi: 0x10026, Stride: 0x1}, + unicode.Range32{Lo: 0x10028, Hi: 0x1003a, Stride: 0x1}, + unicode.Range32{Lo: 0x1003c, Hi: 0x1003d, Stride: 0x1}, + unicode.Range32{Lo: 0x1003f, Hi: 0x1004d, Stride: 0x1}, + unicode.Range32{Lo: 0x10050, Hi: 0x1005d, Stride: 0x1}, + unicode.Range32{Lo: 0x10080, Hi: 0x100fa, Stride: 0x1}, + unicode.Range32{Lo: 0x10140, Hi: 0x10174, Stride: 0x1}, + unicode.Range32{Lo: 0x10280, Hi: 0x1029c, Stride: 0x1}, + unicode.Range32{Lo: 0x102a0, Hi: 0x102d0, Stride: 0x1}, + unicode.Range32{Lo: 0x10300, Hi: 0x1031f, Stride: 0x1}, + unicode.Range32{Lo: 0x10330, Hi: 0x10340, Stride: 0x1}, + unicode.Range32{Lo: 0x10341, Hi: 0x10341, Stride: 0x1}, + unicode.Range32{Lo: 0x10342, Hi: 0x10349, Stride: 0x1}, + unicode.Range32{Lo: 0x1034a, Hi: 0x1034a, Stride: 0x1}, + unicode.Range32{Lo: 0x10350, Hi: 0x10375, Stride: 0x1}, + unicode.Range32{Lo: 0x10380, Hi: 0x1039d, Stride: 0x1}, + unicode.Range32{Lo: 0x103a0, Hi: 0x103c3, Stride: 0x1}, + unicode.Range32{Lo: 0x103c8, Hi: 0x103cf, Stride: 0x1}, + unicode.Range32{Lo: 0x103d1, Hi: 0x103d5, Stride: 0x1}, + unicode.Range32{Lo: 0x10450, Hi: 0x1049d, Stride: 0x1}, + unicode.Range32{Lo: 0x10500, Hi: 0x10527, Stride: 0x1}, + unicode.Range32{Lo: 0x10530, Hi: 0x10563, Stride: 0x1}, + unicode.Range32{Lo: 0x10600, Hi: 0x10736, Stride: 0x1}, + unicode.Range32{Lo: 0x10740, Hi: 0x10755, Stride: 0x1}, + unicode.Range32{Lo: 0x10760, Hi: 0x10767, Stride: 0x1}, + unicode.Range32{Lo: 0x10800, Hi: 0x10805, Stride: 0x1}, + unicode.Range32{Lo: 0x10808, Hi: 0x10808, Stride: 0x1}, + unicode.Range32{Lo: 0x1080a, Hi: 0x10835, Stride: 0x1}, + unicode.Range32{Lo: 0x10837, Hi: 0x10838, Stride: 0x1}, + unicode.Range32{Lo: 0x1083c, Hi: 0x1083c, Stride: 0x1}, + unicode.Range32{Lo: 0x1083f, Hi: 0x10855, Stride: 0x1}, + unicode.Range32{Lo: 0x10860, Hi: 0x10876, Stride: 0x1}, + unicode.Range32{Lo: 0x10880, Hi: 0x1089e, Stride: 0x1}, + unicode.Range32{Lo: 0x108e0, Hi: 0x108f2, Stride: 0x1}, + unicode.Range32{Lo: 0x108f4, Hi: 0x108f5, Stride: 0x1}, + unicode.Range32{Lo: 0x10900, Hi: 0x10915, Stride: 0x1}, + unicode.Range32{Lo: 0x10920, Hi: 0x10939, Stride: 0x1}, + unicode.Range32{Lo: 0x10980, Hi: 0x109b7, Stride: 0x1}, + unicode.Range32{Lo: 0x109be, Hi: 0x109bf, Stride: 0x1}, + unicode.Range32{Lo: 0x10a00, Hi: 0x10a00, Stride: 0x1}, + unicode.Range32{Lo: 0x10a10, Hi: 0x10a13, Stride: 0x1}, + unicode.Range32{Lo: 0x10a15, Hi: 0x10a17, Stride: 0x1}, + unicode.Range32{Lo: 0x10a19, Hi: 0x10a33, Stride: 0x1}, + unicode.Range32{Lo: 0x10a60, Hi: 0x10a7c, Stride: 0x1}, + unicode.Range32{Lo: 0x10a80, Hi: 0x10a9c, Stride: 0x1}, + unicode.Range32{Lo: 0x10ac0, Hi: 0x10ac7, Stride: 0x1}, + unicode.Range32{Lo: 0x10ac9, Hi: 0x10ae4, Stride: 0x1}, + unicode.Range32{Lo: 0x10b00, Hi: 0x10b35, Stride: 0x1}, + unicode.Range32{Lo: 0x10b40, Hi: 0x10b55, Stride: 0x1}, + unicode.Range32{Lo: 0x10b60, Hi: 0x10b72, Stride: 0x1}, + unicode.Range32{Lo: 0x10b80, Hi: 0x10b91, Stride: 0x1}, + unicode.Range32{Lo: 0x10c00, Hi: 0x10c48, Stride: 0x1}, + unicode.Range32{Lo: 0x11003, Hi: 0x11037, Stride: 0x1}, + unicode.Range32{Lo: 0x11083, Hi: 0x110af, Stride: 0x1}, + unicode.Range32{Lo: 0x110d0, Hi: 0x110e8, Stride: 0x1}, + unicode.Range32{Lo: 0x11103, Hi: 0x11126, Stride: 0x1}, + unicode.Range32{Lo: 0x11150, Hi: 0x11172, Stride: 0x1}, + unicode.Range32{Lo: 0x11176, Hi: 0x11176, Stride: 0x1}, + unicode.Range32{Lo: 0x11183, Hi: 0x111b2, Stride: 0x1}, + unicode.Range32{Lo: 0x111c1, Hi: 0x111c4, Stride: 0x1}, + unicode.Range32{Lo: 0x111da, Hi: 0x111da, Stride: 0x1}, + unicode.Range32{Lo: 0x111dc, Hi: 0x111dc, Stride: 0x1}, + unicode.Range32{Lo: 0x11200, Hi: 0x11211, Stride: 0x1}, + unicode.Range32{Lo: 0x11213, Hi: 0x1122b, Stride: 0x1}, + unicode.Range32{Lo: 0x11280, Hi: 0x11286, Stride: 0x1}, + unicode.Range32{Lo: 0x11288, Hi: 0x11288, Stride: 0x1}, + unicode.Range32{Lo: 0x1128a, Hi: 0x1128d, Stride: 0x1}, + unicode.Range32{Lo: 0x1128f, Hi: 0x1129d, Stride: 0x1}, + unicode.Range32{Lo: 0x1129f, Hi: 0x112a8, Stride: 0x1}, + unicode.Range32{Lo: 0x112b0, Hi: 0x112de, Stride: 0x1}, + unicode.Range32{Lo: 0x11305, Hi: 0x1130c, Stride: 0x1}, + unicode.Range32{Lo: 0x1130f, Hi: 0x11310, Stride: 0x1}, + unicode.Range32{Lo: 0x11313, Hi: 0x11328, Stride: 0x1}, + unicode.Range32{Lo: 0x1132a, Hi: 0x11330, Stride: 0x1}, + unicode.Range32{Lo: 0x11332, Hi: 0x11333, Stride: 0x1}, + unicode.Range32{Lo: 0x11335, Hi: 0x11339, Stride: 0x1}, + unicode.Range32{Lo: 0x1133d, Hi: 0x1133d, Stride: 0x1}, + unicode.Range32{Lo: 0x11350, Hi: 0x11350, Stride: 0x1}, + unicode.Range32{Lo: 0x1135d, Hi: 0x11361, Stride: 0x1}, + unicode.Range32{Lo: 0x11400, Hi: 0x11434, Stride: 0x1}, + unicode.Range32{Lo: 0x11447, Hi: 0x1144a, Stride: 0x1}, + unicode.Range32{Lo: 0x11480, Hi: 0x114af, Stride: 0x1}, + unicode.Range32{Lo: 0x114c4, Hi: 0x114c5, Stride: 0x1}, + unicode.Range32{Lo: 0x114c7, Hi: 0x114c7, Stride: 0x1}, + unicode.Range32{Lo: 0x11580, Hi: 0x115ae, Stride: 0x1}, + unicode.Range32{Lo: 0x115d8, Hi: 0x115db, Stride: 0x1}, + unicode.Range32{Lo: 0x11600, Hi: 0x1162f, Stride: 0x1}, + unicode.Range32{Lo: 0x11644, Hi: 0x11644, Stride: 0x1}, + unicode.Range32{Lo: 0x11680, Hi: 0x116aa, Stride: 0x1}, + unicode.Range32{Lo: 0x11700, Hi: 0x11719, Stride: 0x1}, + unicode.Range32{Lo: 0x118ff, Hi: 0x118ff, Stride: 0x1}, + unicode.Range32{Lo: 0x11ac0, Hi: 0x11af8, Stride: 0x1}, + unicode.Range32{Lo: 0x11c00, Hi: 0x11c08, Stride: 0x1}, + unicode.Range32{Lo: 0x11c0a, Hi: 0x11c2e, Stride: 0x1}, + unicode.Range32{Lo: 0x11c40, Hi: 0x11c40, Stride: 0x1}, + unicode.Range32{Lo: 0x11c72, Hi: 0x11c8f, Stride: 0x1}, + unicode.Range32{Lo: 0x12000, Hi: 0x12399, Stride: 0x1}, + unicode.Range32{Lo: 0x12400, Hi: 0x1246e, Stride: 0x1}, + unicode.Range32{Lo: 0x12480, Hi: 0x12543, Stride: 0x1}, + unicode.Range32{Lo: 0x13000, Hi: 0x1342e, Stride: 0x1}, + unicode.Range32{Lo: 0x14400, Hi: 0x14646, Stride: 0x1}, + unicode.Range32{Lo: 0x16800, Hi: 0x16a38, Stride: 0x1}, + unicode.Range32{Lo: 0x16a40, Hi: 0x16a5e, Stride: 0x1}, + unicode.Range32{Lo: 0x16ad0, Hi: 0x16aed, Stride: 0x1}, + unicode.Range32{Lo: 0x16b00, Hi: 0x16b2f, Stride: 0x1}, + unicode.Range32{Lo: 0x16b40, Hi: 0x16b43, Stride: 0x1}, + unicode.Range32{Lo: 0x16b63, Hi: 0x16b77, Stride: 0x1}, + unicode.Range32{Lo: 0x16b7d, Hi: 0x16b8f, Stride: 0x1}, + unicode.Range32{Lo: 0x16f00, Hi: 0x16f44, Stride: 0x1}, + unicode.Range32{Lo: 0x16f50, Hi: 0x16f50, Stride: 0x1}, + unicode.Range32{Lo: 0x16f93, Hi: 0x16f9f, Stride: 0x1}, + unicode.Range32{Lo: 0x16fe0, Hi: 0x16fe0, Stride: 0x1}, + unicode.Range32{Lo: 0x17000, Hi: 0x187ec, Stride: 0x1}, + unicode.Range32{Lo: 0x18800, Hi: 0x18af2, Stride: 0x1}, + unicode.Range32{Lo: 0x1b000, Hi: 0x1b001, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc00, Hi: 0x1bc6a, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc70, Hi: 0x1bc7c, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc80, Hi: 0x1bc88, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc90, Hi: 0x1bc99, Stride: 0x1}, + unicode.Range32{Lo: 0x1e800, Hi: 0x1e8c4, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee00, Hi: 0x1ee03, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee05, Hi: 0x1ee1f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee21, Hi: 0x1ee22, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee24, Hi: 0x1ee24, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee27, Hi: 0x1ee27, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee29, Hi: 0x1ee32, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee34, Hi: 0x1ee37, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee39, Hi: 0x1ee39, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee3b, Hi: 0x1ee3b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee42, Hi: 0x1ee42, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee47, Hi: 0x1ee47, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee49, Hi: 0x1ee49, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee4b, Hi: 0x1ee4b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee4d, Hi: 0x1ee4f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee51, Hi: 0x1ee52, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee54, Hi: 0x1ee54, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee57, Hi: 0x1ee57, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee59, Hi: 0x1ee59, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5b, Hi: 0x1ee5b, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5d, Hi: 0x1ee5d, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee5f, Hi: 0x1ee5f, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee61, Hi: 0x1ee62, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee64, Hi: 0x1ee64, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee67, Hi: 0x1ee6a, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee6c, Hi: 0x1ee72, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee74, Hi: 0x1ee77, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee79, Hi: 0x1ee7c, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee7e, Hi: 0x1ee7e, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee80, Hi: 0x1ee89, Stride: 0x1}, + unicode.Range32{Lo: 0x1ee8b, Hi: 0x1ee9b, Stride: 0x1}, + unicode.Range32{Lo: 0x1eea1, Hi: 0x1eea3, Stride: 0x1}, + unicode.Range32{Lo: 0x1eea5, Hi: 0x1eea9, Stride: 0x1}, + unicode.Range32{Lo: 0x1eeab, Hi: 0x1eebb, Stride: 0x1}, + unicode.Range32{Lo: 0x20000, Hi: 0x2a6d6, Stride: 0x1}, + unicode.Range32{Lo: 0x2a700, Hi: 0x2b734, Stride: 0x1}, + unicode.Range32{Lo: 0x2b740, Hi: 0x2b81d, Stride: 0x1}, + unicode.Range32{Lo: 0x2b820, Hi: 0x2cea1, Stride: 0x1}, + unicode.Range32{Lo: 0x2f800, Hi: 0x2fa1d, Stride: 0x1}, + }, + LatinOffset: 0, +} + +var _SentenceSContinue = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x2c, Hi: 0x2c, Stride: 0x1}, + unicode.Range16{Lo: 0x2d, Hi: 0x2d, Stride: 0x1}, + unicode.Range16{Lo: 0x3a, Hi: 0x3a, Stride: 0x1}, + unicode.Range16{Lo: 0x55d, Hi: 0x55d, Stride: 0x1}, + unicode.Range16{Lo: 0x60c, Hi: 0x60d, Stride: 0x1}, + unicode.Range16{Lo: 0x7f8, Hi: 0x7f8, Stride: 0x1}, + unicode.Range16{Lo: 0x1802, Hi: 0x1802, Stride: 0x1}, + unicode.Range16{Lo: 0x1808, Hi: 0x1808, Stride: 0x1}, + unicode.Range16{Lo: 0x2013, Hi: 0x2014, Stride: 0x1}, + unicode.Range16{Lo: 0x3001, Hi: 0x3001, Stride: 0x1}, + unicode.Range16{Lo: 0xfe10, Hi: 0xfe11, Stride: 0x1}, + unicode.Range16{Lo: 0xfe13, Hi: 0xfe13, Stride: 0x1}, + unicode.Range16{Lo: 0xfe31, Hi: 0xfe32, Stride: 0x1}, + unicode.Range16{Lo: 0xfe50, Hi: 0xfe51, Stride: 0x1}, + unicode.Range16{Lo: 0xfe55, Hi: 0xfe55, Stride: 0x1}, + unicode.Range16{Lo: 0xfe58, Hi: 0xfe58, Stride: 0x1}, + unicode.Range16{Lo: 0xfe63, Hi: 0xfe63, Stride: 0x1}, + unicode.Range16{Lo: 0xff0c, Hi: 0xff0c, Stride: 0x1}, + unicode.Range16{Lo: 0xff0d, Hi: 0xff0d, Stride: 0x1}, + unicode.Range16{Lo: 0xff1a, Hi: 0xff1a, Stride: 0x1}, + unicode.Range16{Lo: 0xff64, Hi: 0xff64, Stride: 0x1}, + }, + LatinOffset: 3, +} + +var _SentenceSTerm = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x21, Hi: 0x21, Stride: 0x1}, + unicode.Range16{Lo: 0x3f, Hi: 0x3f, Stride: 0x1}, + unicode.Range16{Lo: 0x589, Hi: 0x589, Stride: 0x1}, + unicode.Range16{Lo: 0x61f, Hi: 0x61f, Stride: 0x1}, + unicode.Range16{Lo: 0x6d4, Hi: 0x6d4, Stride: 0x1}, + unicode.Range16{Lo: 0x700, Hi: 0x702, Stride: 0x1}, + unicode.Range16{Lo: 0x7f9, Hi: 0x7f9, Stride: 0x1}, + unicode.Range16{Lo: 0x964, Hi: 0x965, Stride: 0x1}, + unicode.Range16{Lo: 0x104a, Hi: 0x104b, Stride: 0x1}, + unicode.Range16{Lo: 0x1362, Hi: 0x1362, Stride: 0x1}, + unicode.Range16{Lo: 0x1367, Hi: 0x1368, Stride: 0x1}, + unicode.Range16{Lo: 0x166e, Hi: 0x166e, Stride: 0x1}, + unicode.Range16{Lo: 0x1735, Hi: 0x1736, Stride: 0x1}, + unicode.Range16{Lo: 0x1803, Hi: 0x1803, Stride: 0x1}, + unicode.Range16{Lo: 0x1809, Hi: 0x1809, Stride: 0x1}, + unicode.Range16{Lo: 0x1944, Hi: 0x1945, Stride: 0x1}, + unicode.Range16{Lo: 0x1aa8, Hi: 0x1aab, Stride: 0x1}, + unicode.Range16{Lo: 0x1b5a, Hi: 0x1b5b, Stride: 0x1}, + unicode.Range16{Lo: 0x1b5e, Hi: 0x1b5f, Stride: 0x1}, + unicode.Range16{Lo: 0x1c3b, Hi: 0x1c3c, Stride: 0x1}, + unicode.Range16{Lo: 0x1c7e, Hi: 0x1c7f, Stride: 0x1}, + unicode.Range16{Lo: 0x203c, Hi: 0x203d, Stride: 0x1}, + unicode.Range16{Lo: 0x2047, Hi: 0x2049, Stride: 0x1}, + unicode.Range16{Lo: 0x2e2e, Hi: 0x2e2e, Stride: 0x1}, + unicode.Range16{Lo: 0x2e3c, Hi: 0x2e3c, Stride: 0x1}, + unicode.Range16{Lo: 0x3002, Hi: 0x3002, Stride: 0x1}, + unicode.Range16{Lo: 0xa4ff, Hi: 0xa4ff, Stride: 0x1}, + unicode.Range16{Lo: 0xa60e, Hi: 0xa60f, Stride: 0x1}, + unicode.Range16{Lo: 0xa6f3, Hi: 0xa6f3, Stride: 0x1}, + unicode.Range16{Lo: 0xa6f7, Hi: 0xa6f7, Stride: 0x1}, + unicode.Range16{Lo: 0xa876, Hi: 0xa877, Stride: 0x1}, + unicode.Range16{Lo: 0xa8ce, Hi: 0xa8cf, Stride: 0x1}, + unicode.Range16{Lo: 0xa92f, Hi: 0xa92f, Stride: 0x1}, + unicode.Range16{Lo: 0xa9c8, Hi: 0xa9c9, Stride: 0x1}, + unicode.Range16{Lo: 0xaa5d, Hi: 0xaa5f, Stride: 0x1}, + unicode.Range16{Lo: 0xaaf0, Hi: 0xaaf1, Stride: 0x1}, + unicode.Range16{Lo: 0xabeb, Hi: 0xabeb, Stride: 0x1}, + unicode.Range16{Lo: 0xfe56, Hi: 0xfe57, Stride: 0x1}, + unicode.Range16{Lo: 0xff01, Hi: 0xff01, Stride: 0x1}, + unicode.Range16{Lo: 0xff1f, Hi: 0xff1f, Stride: 0x1}, + unicode.Range16{Lo: 0xff61, Hi: 0xff61, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x10a56, Hi: 0x10a57, Stride: 0x1}, + unicode.Range32{Lo: 0x11047, Hi: 0x11048, Stride: 0x1}, + unicode.Range32{Lo: 0x110be, Hi: 0x110c1, Stride: 0x1}, + unicode.Range32{Lo: 0x11141, Hi: 0x11143, Stride: 0x1}, + unicode.Range32{Lo: 0x111c5, Hi: 0x111c6, Stride: 0x1}, + unicode.Range32{Lo: 0x111cd, Hi: 0x111cd, Stride: 0x1}, + unicode.Range32{Lo: 0x111de, Hi: 0x111df, Stride: 0x1}, + unicode.Range32{Lo: 0x11238, Hi: 0x11239, Stride: 0x1}, + unicode.Range32{Lo: 0x1123b, Hi: 0x1123c, Stride: 0x1}, + unicode.Range32{Lo: 0x112a9, Hi: 0x112a9, Stride: 0x1}, + unicode.Range32{Lo: 0x1144b, Hi: 0x1144c, Stride: 0x1}, + unicode.Range32{Lo: 0x115c2, Hi: 0x115c3, Stride: 0x1}, + unicode.Range32{Lo: 0x115c9, Hi: 0x115d7, Stride: 0x1}, + unicode.Range32{Lo: 0x11641, Hi: 0x11642, Stride: 0x1}, + unicode.Range32{Lo: 0x1173c, Hi: 0x1173e, Stride: 0x1}, + unicode.Range32{Lo: 0x11c41, Hi: 0x11c42, Stride: 0x1}, + unicode.Range32{Lo: 0x16a6e, Hi: 0x16a6f, Stride: 0x1}, + unicode.Range32{Lo: 0x16af5, Hi: 0x16af5, Stride: 0x1}, + unicode.Range32{Lo: 0x16b37, Hi: 0x16b38, Stride: 0x1}, + unicode.Range32{Lo: 0x16b44, Hi: 0x16b44, Stride: 0x1}, + unicode.Range32{Lo: 0x1bc9f, Hi: 0x1bc9f, Stride: 0x1}, + unicode.Range32{Lo: 0x1da88, Hi: 0x1da88, Stride: 0x1}, + }, + LatinOffset: 2, +} + +var _SentenceSep = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x85, Hi: 0x85, Stride: 0x1}, + unicode.Range16{Lo: 0x2028, Hi: 0x2028, Stride: 0x1}, + unicode.Range16{Lo: 0x2029, Hi: 0x2029, Stride: 0x1}, + }, + LatinOffset: 1, +} + +var _SentenceSp = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x9, Hi: 0x9, Stride: 0x1}, + unicode.Range16{Lo: 0xb, Hi: 0xc, Stride: 0x1}, + unicode.Range16{Lo: 0x20, Hi: 0x20, Stride: 0x1}, + unicode.Range16{Lo: 0xa0, Hi: 0xa0, Stride: 0x1}, + unicode.Range16{Lo: 0x1680, Hi: 0x1680, Stride: 0x1}, + unicode.Range16{Lo: 0x2000, Hi: 0x200a, Stride: 0x1}, + unicode.Range16{Lo: 0x202f, Hi: 0x202f, Stride: 0x1}, + unicode.Range16{Lo: 0x205f, Hi: 0x205f, Stride: 0x1}, + unicode.Range16{Lo: 0x3000, Hi: 0x3000, Stride: 0x1}, + }, + LatinOffset: 4, +} + +var _SentenceUpper = &unicode.RangeTable{ + R16: []unicode.Range16{ + unicode.Range16{Lo: 0x41, Hi: 0x5a, Stride: 0x1}, + unicode.Range16{Lo: 0xc0, Hi: 0xd6, Stride: 0x1}, + unicode.Range16{Lo: 0xd8, Hi: 0xde, Stride: 0x1}, + unicode.Range16{Lo: 0x100, Hi: 0x100, Stride: 0x1}, + unicode.Range16{Lo: 0x102, Hi: 0x102, Stride: 0x1}, + unicode.Range16{Lo: 0x104, Hi: 0x104, Stride: 0x1}, + unicode.Range16{Lo: 0x106, Hi: 0x106, Stride: 0x1}, + unicode.Range16{Lo: 0x108, Hi: 0x108, Stride: 0x1}, + unicode.Range16{Lo: 0x10a, Hi: 0x10a, Stride: 0x1}, + unicode.Range16{Lo: 0x10c, Hi: 0x10c, Stride: 0x1}, + unicode.Range16{Lo: 0x10e, Hi: 0x10e, Stride: 0x1}, + unicode.Range16{Lo: 0x110, Hi: 0x110, Stride: 0x1}, + unicode.Range16{Lo: 0x112, Hi: 0x112, Stride: 0x1}, + unicode.Range16{Lo: 0x114, Hi: 0x114, Stride: 0x1}, + unicode.Range16{Lo: 0x116, Hi: 0x116, Stride: 0x1}, + unicode.Range16{Lo: 0x118, Hi: 0x118, Stride: 0x1}, + unicode.Range16{Lo: 0x11a, Hi: 0x11a, Stride: 0x1}, + unicode.Range16{Lo: 0x11c, Hi: 0x11c, Stride: 0x1}, + unicode.Range16{Lo: 0x11e, Hi: 0x11e, Stride: 0x1}, + unicode.Range16{Lo: 0x120, Hi: 0x120, Stride: 0x1}, + unicode.Range16{Lo: 0x122, Hi: 0x122, Stride: 0x1}, + unicode.Range16{Lo: 0x124, Hi: 0x124, Stride: 0x1}, + unicode.Range16{Lo: 0x126, Hi: 0x126, Stride: 0x1}, + unicode.Range16{Lo: 0x128, Hi: 0x128, Stride: 0x1}, + unicode.Range16{Lo: 0x12a, Hi: 0x12a, Stride: 0x1}, + unicode.Range16{Lo: 0x12c, Hi: 0x12c, Stride: 0x1}, + unicode.Range16{Lo: 0x12e, Hi: 0x12e, Stride: 0x1}, + unicode.Range16{Lo: 0x130, Hi: 0x130, Stride: 0x1}, + unicode.Range16{Lo: 0x132, Hi: 0x132, Stride: 0x1}, + unicode.Range16{Lo: 0x134, Hi: 0x134, Stride: 0x1}, + unicode.Range16{Lo: 0x136, Hi: 0x136, Stride: 0x1}, + unicode.Range16{Lo: 0x139, Hi: 0x139, Stride: 0x1}, + unicode.Range16{Lo: 0x13b, Hi: 0x13b, Stride: 0x1}, + unicode.Range16{Lo: 0x13d, Hi: 0x13d, Stride: 0x1}, + unicode.Range16{Lo: 0x13f, Hi: 0x13f, Stride: 0x1}, + unicode.Range16{Lo: 0x141, Hi: 0x141, Stride: 0x1}, + unicode.Range16{Lo: 0x143, Hi: 0x143, Stride: 0x1}, + unicode.Range16{Lo: 0x145, Hi: 0x145, Stride: 0x1}, + unicode.Range16{Lo: 0x147, Hi: 0x147, Stride: 0x1}, + unicode.Range16{Lo: 0x14a, Hi: 0x14a, Stride: 0x1}, + unicode.Range16{Lo: 0x14c, Hi: 0x14c, Stride: 0x1}, + unicode.Range16{Lo: 0x14e, Hi: 0x14e, Stride: 0x1}, + unicode.Range16{Lo: 0x150, Hi: 0x150, Stride: 0x1}, + unicode.Range16{Lo: 0x152, Hi: 0x152, Stride: 0x1}, + unicode.Range16{Lo: 0x154, Hi: 0x154, Stride: 0x1}, + unicode.Range16{Lo: 0x156, Hi: 0x156, Stride: 0x1}, + unicode.Range16{Lo: 0x158, Hi: 0x158, Stride: 0x1}, + unicode.Range16{Lo: 0x15a, Hi: 0x15a, Stride: 0x1}, + unicode.Range16{Lo: 0x15c, Hi: 0x15c, Stride: 0x1}, + unicode.Range16{Lo: 0x15e, Hi: 0x15e, Stride: 0x1}, + unicode.Range16{Lo: 0x160, Hi: 0x160, Stride: 0x1}, + unicode.Range16{Lo: 0x162, Hi: 0x162, Stride: 0x1}, + unicode.Range16{Lo: 0x164, Hi: 0x164, Stride: 0x1}, + unicode.Range16{Lo: 0x166, Hi: 0x166, Stride: 0x1}, + unicode.Range16{Lo: 0x168, Hi: 0x168, Stride: 0x1}, + unicode.Range16{Lo: 0x16a, Hi: 0x16a, Stride: 0x1}, + unicode.Range16{Lo: 0x16c, Hi: 0x16c, Stride: 0x1}, + unicode.Range16{Lo: 0x16e, Hi: 0x16e, Stride: 0x1}, + unicode.Range16{Lo: 0x170, Hi: 0x170, Stride: 0x1}, + unicode.Range16{Lo: 0x172, Hi: 0x172, Stride: 0x1}, + unicode.Range16{Lo: 0x174, Hi: 0x174, Stride: 0x1}, + unicode.Range16{Lo: 0x176, Hi: 0x176, Stride: 0x1}, + unicode.Range16{Lo: 0x178, Hi: 0x179, Stride: 0x1}, + unicode.Range16{Lo: 0x17b, Hi: 0x17b, Stride: 0x1}, + unicode.Range16{Lo: 0x17d, Hi: 0x17d, Stride: 0x1}, + unicode.Range16{Lo: 0x181, Hi: 0x182, Stride: 0x1}, + unicode.Range16{Lo: 0x184, Hi: 0x184, Stride: 0x1}, + unicode.Range16{Lo: 0x186, Hi: 0x187, Stride: 0x1}, + unicode.Range16{Lo: 0x189, Hi: 0x18b, Stride: 0x1}, + unicode.Range16{Lo: 0x18e, Hi: 0x191, Stride: 0x1}, + unicode.Range16{Lo: 0x193, Hi: 0x194, Stride: 0x1}, + unicode.Range16{Lo: 0x196, Hi: 0x198, Stride: 0x1}, + unicode.Range16{Lo: 0x19c, Hi: 0x19d, Stride: 0x1}, + unicode.Range16{Lo: 0x19f, Hi: 0x1a0, Stride: 0x1}, + unicode.Range16{Lo: 0x1a2, Hi: 0x1a2, Stride: 0x1}, + unicode.Range16{Lo: 0x1a4, Hi: 0x1a4, Stride: 0x1}, + unicode.Range16{Lo: 0x1a6, Hi: 0x1a7, Stride: 0x1}, + unicode.Range16{Lo: 0x1a9, Hi: 0x1a9, Stride: 0x1}, + unicode.Range16{Lo: 0x1ac, Hi: 0x1ac, Stride: 0x1}, + unicode.Range16{Lo: 0x1ae, Hi: 0x1af, Stride: 0x1}, + unicode.Range16{Lo: 0x1b1, Hi: 0x1b3, Stride: 0x1}, + unicode.Range16{Lo: 0x1b5, Hi: 0x1b5, Stride: 0x1}, + unicode.Range16{Lo: 0x1b7, Hi: 0x1b8, Stride: 0x1}, + unicode.Range16{Lo: 0x1bc, Hi: 0x1bc, Stride: 0x1}, + unicode.Range16{Lo: 0x1c4, Hi: 0x1c5, Stride: 0x1}, + unicode.Range16{Lo: 0x1c7, Hi: 0x1c8, Stride: 0x1}, + unicode.Range16{Lo: 0x1ca, Hi: 0x1cb, Stride: 0x1}, + unicode.Range16{Lo: 0x1cd, Hi: 0x1cd, Stride: 0x1}, + unicode.Range16{Lo: 0x1cf, Hi: 0x1cf, Stride: 0x1}, + unicode.Range16{Lo: 0x1d1, Hi: 0x1d1, Stride: 0x1}, + unicode.Range16{Lo: 0x1d3, Hi: 0x1d3, Stride: 0x1}, + unicode.Range16{Lo: 0x1d5, Hi: 0x1d5, Stride: 0x1}, + unicode.Range16{Lo: 0x1d7, Hi: 0x1d7, Stride: 0x1}, + unicode.Range16{Lo: 0x1d9, Hi: 0x1d9, Stride: 0x1}, + unicode.Range16{Lo: 0x1db, Hi: 0x1db, Stride: 0x1}, + unicode.Range16{Lo: 0x1de, Hi: 0x1de, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0, Hi: 0x1e0, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2, Hi: 0x1e2, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4, Hi: 0x1e4, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6, Hi: 0x1e6, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8, Hi: 0x1e8, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea, Hi: 0x1ea, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec, Hi: 0x1ec, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee, Hi: 0x1ee, Stride: 0x1}, + unicode.Range16{Lo: 0x1f1, Hi: 0x1f2, Stride: 0x1}, + unicode.Range16{Lo: 0x1f4, Hi: 0x1f4, Stride: 0x1}, + unicode.Range16{Lo: 0x1f6, Hi: 0x1f8, Stride: 0x1}, + unicode.Range16{Lo: 0x1fa, Hi: 0x1fa, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc, Hi: 0x1fc, Stride: 0x1}, + unicode.Range16{Lo: 0x1fe, Hi: 0x1fe, Stride: 0x1}, + unicode.Range16{Lo: 0x200, Hi: 0x200, Stride: 0x1}, + unicode.Range16{Lo: 0x202, Hi: 0x202, Stride: 0x1}, + unicode.Range16{Lo: 0x204, Hi: 0x204, Stride: 0x1}, + unicode.Range16{Lo: 0x206, Hi: 0x206, Stride: 0x1}, + unicode.Range16{Lo: 0x208, Hi: 0x208, Stride: 0x1}, + unicode.Range16{Lo: 0x20a, Hi: 0x20a, Stride: 0x1}, + unicode.Range16{Lo: 0x20c, Hi: 0x20c, Stride: 0x1}, + unicode.Range16{Lo: 0x20e, Hi: 0x20e, Stride: 0x1}, + unicode.Range16{Lo: 0x210, Hi: 0x210, Stride: 0x1}, + unicode.Range16{Lo: 0x212, Hi: 0x212, Stride: 0x1}, + unicode.Range16{Lo: 0x214, Hi: 0x214, Stride: 0x1}, + unicode.Range16{Lo: 0x216, Hi: 0x216, Stride: 0x1}, + unicode.Range16{Lo: 0x218, Hi: 0x218, Stride: 0x1}, + unicode.Range16{Lo: 0x21a, Hi: 0x21a, Stride: 0x1}, + unicode.Range16{Lo: 0x21c, Hi: 0x21c, Stride: 0x1}, + unicode.Range16{Lo: 0x21e, Hi: 0x21e, Stride: 0x1}, + unicode.Range16{Lo: 0x220, Hi: 0x220, Stride: 0x1}, + unicode.Range16{Lo: 0x222, Hi: 0x222, Stride: 0x1}, + unicode.Range16{Lo: 0x224, Hi: 0x224, Stride: 0x1}, + unicode.Range16{Lo: 0x226, Hi: 0x226, Stride: 0x1}, + unicode.Range16{Lo: 0x228, Hi: 0x228, Stride: 0x1}, + unicode.Range16{Lo: 0x22a, Hi: 0x22a, Stride: 0x1}, + unicode.Range16{Lo: 0x22c, Hi: 0x22c, Stride: 0x1}, + unicode.Range16{Lo: 0x22e, Hi: 0x22e, Stride: 0x1}, + unicode.Range16{Lo: 0x230, Hi: 0x230, Stride: 0x1}, + unicode.Range16{Lo: 0x232, Hi: 0x232, Stride: 0x1}, + unicode.Range16{Lo: 0x23a, Hi: 0x23b, Stride: 0x1}, + unicode.Range16{Lo: 0x23d, Hi: 0x23e, Stride: 0x1}, + unicode.Range16{Lo: 0x241, Hi: 0x241, Stride: 0x1}, + unicode.Range16{Lo: 0x243, Hi: 0x246, Stride: 0x1}, + unicode.Range16{Lo: 0x248, Hi: 0x248, Stride: 0x1}, + unicode.Range16{Lo: 0x24a, Hi: 0x24a, Stride: 0x1}, + unicode.Range16{Lo: 0x24c, Hi: 0x24c, Stride: 0x1}, + unicode.Range16{Lo: 0x24e, Hi: 0x24e, Stride: 0x1}, + unicode.Range16{Lo: 0x370, Hi: 0x370, Stride: 0x1}, + unicode.Range16{Lo: 0x372, Hi: 0x372, Stride: 0x1}, + unicode.Range16{Lo: 0x376, Hi: 0x376, Stride: 0x1}, + unicode.Range16{Lo: 0x37f, Hi: 0x37f, Stride: 0x1}, + unicode.Range16{Lo: 0x386, Hi: 0x386, Stride: 0x1}, + unicode.Range16{Lo: 0x388, Hi: 0x38a, Stride: 0x1}, + unicode.Range16{Lo: 0x38c, Hi: 0x38c, Stride: 0x1}, + unicode.Range16{Lo: 0x38e, Hi: 0x38f, Stride: 0x1}, + unicode.Range16{Lo: 0x391, Hi: 0x3a1, Stride: 0x1}, + unicode.Range16{Lo: 0x3a3, Hi: 0x3ab, Stride: 0x1}, + unicode.Range16{Lo: 0x3cf, Hi: 0x3cf, Stride: 0x1}, + unicode.Range16{Lo: 0x3d2, Hi: 0x3d4, Stride: 0x1}, + unicode.Range16{Lo: 0x3d8, Hi: 0x3d8, Stride: 0x1}, + unicode.Range16{Lo: 0x3da, Hi: 0x3da, Stride: 0x1}, + unicode.Range16{Lo: 0x3dc, Hi: 0x3dc, Stride: 0x1}, + unicode.Range16{Lo: 0x3de, Hi: 0x3de, Stride: 0x1}, + unicode.Range16{Lo: 0x3e0, Hi: 0x3e0, Stride: 0x1}, + unicode.Range16{Lo: 0x3e2, Hi: 0x3e2, Stride: 0x1}, + unicode.Range16{Lo: 0x3e4, Hi: 0x3e4, Stride: 0x1}, + unicode.Range16{Lo: 0x3e6, Hi: 0x3e6, Stride: 0x1}, + unicode.Range16{Lo: 0x3e8, Hi: 0x3e8, Stride: 0x1}, + unicode.Range16{Lo: 0x3ea, Hi: 0x3ea, Stride: 0x1}, + unicode.Range16{Lo: 0x3ec, Hi: 0x3ec, Stride: 0x1}, + unicode.Range16{Lo: 0x3ee, Hi: 0x3ee, Stride: 0x1}, + unicode.Range16{Lo: 0x3f4, Hi: 0x3f4, Stride: 0x1}, + unicode.Range16{Lo: 0x3f7, Hi: 0x3f7, Stride: 0x1}, + unicode.Range16{Lo: 0x3f9, Hi: 0x3fa, Stride: 0x1}, + unicode.Range16{Lo: 0x3fd, Hi: 0x42f, Stride: 0x1}, + unicode.Range16{Lo: 0x460, Hi: 0x460, Stride: 0x1}, + unicode.Range16{Lo: 0x462, Hi: 0x462, Stride: 0x1}, + unicode.Range16{Lo: 0x464, Hi: 0x464, Stride: 0x1}, + unicode.Range16{Lo: 0x466, Hi: 0x466, Stride: 0x1}, + unicode.Range16{Lo: 0x468, Hi: 0x468, Stride: 0x1}, + unicode.Range16{Lo: 0x46a, Hi: 0x46a, Stride: 0x1}, + unicode.Range16{Lo: 0x46c, Hi: 0x46c, Stride: 0x1}, + unicode.Range16{Lo: 0x46e, Hi: 0x46e, Stride: 0x1}, + unicode.Range16{Lo: 0x470, Hi: 0x470, Stride: 0x1}, + unicode.Range16{Lo: 0x472, Hi: 0x472, Stride: 0x1}, + unicode.Range16{Lo: 0x474, Hi: 0x474, Stride: 0x1}, + unicode.Range16{Lo: 0x476, Hi: 0x476, Stride: 0x1}, + unicode.Range16{Lo: 0x478, Hi: 0x478, Stride: 0x1}, + unicode.Range16{Lo: 0x47a, Hi: 0x47a, Stride: 0x1}, + unicode.Range16{Lo: 0x47c, Hi: 0x47c, Stride: 0x1}, + unicode.Range16{Lo: 0x47e, Hi: 0x47e, Stride: 0x1}, + unicode.Range16{Lo: 0x480, Hi: 0x480, Stride: 0x1}, + unicode.Range16{Lo: 0x48a, Hi: 0x48a, Stride: 0x1}, + unicode.Range16{Lo: 0x48c, Hi: 0x48c, Stride: 0x1}, + unicode.Range16{Lo: 0x48e, Hi: 0x48e, Stride: 0x1}, + unicode.Range16{Lo: 0x490, Hi: 0x490, Stride: 0x1}, + unicode.Range16{Lo: 0x492, Hi: 0x492, Stride: 0x1}, + unicode.Range16{Lo: 0x494, Hi: 0x494, Stride: 0x1}, + unicode.Range16{Lo: 0x496, Hi: 0x496, Stride: 0x1}, + unicode.Range16{Lo: 0x498, Hi: 0x498, Stride: 0x1}, + unicode.Range16{Lo: 0x49a, Hi: 0x49a, Stride: 0x1}, + unicode.Range16{Lo: 0x49c, Hi: 0x49c, Stride: 0x1}, + unicode.Range16{Lo: 0x49e, Hi: 0x49e, Stride: 0x1}, + unicode.Range16{Lo: 0x4a0, Hi: 0x4a0, Stride: 0x1}, + unicode.Range16{Lo: 0x4a2, Hi: 0x4a2, Stride: 0x1}, + unicode.Range16{Lo: 0x4a4, Hi: 0x4a4, Stride: 0x1}, + unicode.Range16{Lo: 0x4a6, Hi: 0x4a6, Stride: 0x1}, + unicode.Range16{Lo: 0x4a8, Hi: 0x4a8, Stride: 0x1}, + unicode.Range16{Lo: 0x4aa, Hi: 0x4aa, Stride: 0x1}, + unicode.Range16{Lo: 0x4ac, Hi: 0x4ac, Stride: 0x1}, + unicode.Range16{Lo: 0x4ae, Hi: 0x4ae, Stride: 0x1}, + unicode.Range16{Lo: 0x4b0, Hi: 0x4b0, Stride: 0x1}, + unicode.Range16{Lo: 0x4b2, Hi: 0x4b2, Stride: 0x1}, + unicode.Range16{Lo: 0x4b4, Hi: 0x4b4, Stride: 0x1}, + unicode.Range16{Lo: 0x4b6, Hi: 0x4b6, Stride: 0x1}, + unicode.Range16{Lo: 0x4b8, Hi: 0x4b8, Stride: 0x1}, + unicode.Range16{Lo: 0x4ba, Hi: 0x4ba, Stride: 0x1}, + unicode.Range16{Lo: 0x4bc, Hi: 0x4bc, Stride: 0x1}, + unicode.Range16{Lo: 0x4be, Hi: 0x4be, Stride: 0x1}, + unicode.Range16{Lo: 0x4c0, Hi: 0x4c1, Stride: 0x1}, + unicode.Range16{Lo: 0x4c3, Hi: 0x4c3, Stride: 0x1}, + unicode.Range16{Lo: 0x4c5, Hi: 0x4c5, Stride: 0x1}, + unicode.Range16{Lo: 0x4c7, Hi: 0x4c7, Stride: 0x1}, + unicode.Range16{Lo: 0x4c9, Hi: 0x4c9, Stride: 0x1}, + unicode.Range16{Lo: 0x4cb, Hi: 0x4cb, Stride: 0x1}, + unicode.Range16{Lo: 0x4cd, Hi: 0x4cd, Stride: 0x1}, + unicode.Range16{Lo: 0x4d0, Hi: 0x4d0, Stride: 0x1}, + unicode.Range16{Lo: 0x4d2, Hi: 0x4d2, Stride: 0x1}, + unicode.Range16{Lo: 0x4d4, Hi: 0x4d4, Stride: 0x1}, + unicode.Range16{Lo: 0x4d6, Hi: 0x4d6, Stride: 0x1}, + unicode.Range16{Lo: 0x4d8, Hi: 0x4d8, Stride: 0x1}, + unicode.Range16{Lo: 0x4da, Hi: 0x4da, Stride: 0x1}, + unicode.Range16{Lo: 0x4dc, Hi: 0x4dc, Stride: 0x1}, + unicode.Range16{Lo: 0x4de, Hi: 0x4de, Stride: 0x1}, + unicode.Range16{Lo: 0x4e0, Hi: 0x4e0, Stride: 0x1}, + unicode.Range16{Lo: 0x4e2, Hi: 0x4e2, Stride: 0x1}, + unicode.Range16{Lo: 0x4e4, Hi: 0x4e4, Stride: 0x1}, + unicode.Range16{Lo: 0x4e6, Hi: 0x4e6, Stride: 0x1}, + unicode.Range16{Lo: 0x4e8, Hi: 0x4e8, Stride: 0x1}, + unicode.Range16{Lo: 0x4ea, Hi: 0x4ea, Stride: 0x1}, + unicode.Range16{Lo: 0x4ec, Hi: 0x4ec, Stride: 0x1}, + unicode.Range16{Lo: 0x4ee, Hi: 0x4ee, Stride: 0x1}, + unicode.Range16{Lo: 0x4f0, Hi: 0x4f0, Stride: 0x1}, + unicode.Range16{Lo: 0x4f2, Hi: 0x4f2, Stride: 0x1}, + unicode.Range16{Lo: 0x4f4, Hi: 0x4f4, Stride: 0x1}, + unicode.Range16{Lo: 0x4f6, Hi: 0x4f6, Stride: 0x1}, + unicode.Range16{Lo: 0x4f8, Hi: 0x4f8, Stride: 0x1}, + unicode.Range16{Lo: 0x4fa, Hi: 0x4fa, Stride: 0x1}, + unicode.Range16{Lo: 0x4fc, Hi: 0x4fc, Stride: 0x1}, + unicode.Range16{Lo: 0x4fe, Hi: 0x4fe, Stride: 0x1}, + unicode.Range16{Lo: 0x500, Hi: 0x500, Stride: 0x1}, + unicode.Range16{Lo: 0x502, Hi: 0x502, Stride: 0x1}, + unicode.Range16{Lo: 0x504, Hi: 0x504, Stride: 0x1}, + unicode.Range16{Lo: 0x506, Hi: 0x506, Stride: 0x1}, + unicode.Range16{Lo: 0x508, Hi: 0x508, Stride: 0x1}, + unicode.Range16{Lo: 0x50a, Hi: 0x50a, Stride: 0x1}, + unicode.Range16{Lo: 0x50c, Hi: 0x50c, Stride: 0x1}, + unicode.Range16{Lo: 0x50e, Hi: 0x50e, Stride: 0x1}, + unicode.Range16{Lo: 0x510, Hi: 0x510, Stride: 0x1}, + unicode.Range16{Lo: 0x512, Hi: 0x512, Stride: 0x1}, + unicode.Range16{Lo: 0x514, Hi: 0x514, Stride: 0x1}, + unicode.Range16{Lo: 0x516, Hi: 0x516, Stride: 0x1}, + unicode.Range16{Lo: 0x518, Hi: 0x518, Stride: 0x1}, + unicode.Range16{Lo: 0x51a, Hi: 0x51a, Stride: 0x1}, + unicode.Range16{Lo: 0x51c, Hi: 0x51c, Stride: 0x1}, + unicode.Range16{Lo: 0x51e, Hi: 0x51e, Stride: 0x1}, + unicode.Range16{Lo: 0x520, Hi: 0x520, Stride: 0x1}, + unicode.Range16{Lo: 0x522, Hi: 0x522, Stride: 0x1}, + unicode.Range16{Lo: 0x524, Hi: 0x524, Stride: 0x1}, + unicode.Range16{Lo: 0x526, Hi: 0x526, Stride: 0x1}, + unicode.Range16{Lo: 0x528, Hi: 0x528, Stride: 0x1}, + unicode.Range16{Lo: 0x52a, Hi: 0x52a, Stride: 0x1}, + unicode.Range16{Lo: 0x52c, Hi: 0x52c, Stride: 0x1}, + unicode.Range16{Lo: 0x52e, Hi: 0x52e, Stride: 0x1}, + unicode.Range16{Lo: 0x531, Hi: 0x556, Stride: 0x1}, + unicode.Range16{Lo: 0x10a0, Hi: 0x10c5, Stride: 0x1}, + unicode.Range16{Lo: 0x10c7, Hi: 0x10c7, Stride: 0x1}, + unicode.Range16{Lo: 0x10cd, Hi: 0x10cd, Stride: 0x1}, + unicode.Range16{Lo: 0x13a0, Hi: 0x13f5, Stride: 0x1}, + unicode.Range16{Lo: 0x1e00, Hi: 0x1e00, Stride: 0x1}, + unicode.Range16{Lo: 0x1e02, Hi: 0x1e02, Stride: 0x1}, + unicode.Range16{Lo: 0x1e04, Hi: 0x1e04, Stride: 0x1}, + unicode.Range16{Lo: 0x1e06, Hi: 0x1e06, Stride: 0x1}, + unicode.Range16{Lo: 0x1e08, Hi: 0x1e08, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0a, Hi: 0x1e0a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0c, Hi: 0x1e0c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e0e, Hi: 0x1e0e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e10, Hi: 0x1e10, Stride: 0x1}, + unicode.Range16{Lo: 0x1e12, Hi: 0x1e12, Stride: 0x1}, + unicode.Range16{Lo: 0x1e14, Hi: 0x1e14, Stride: 0x1}, + unicode.Range16{Lo: 0x1e16, Hi: 0x1e16, Stride: 0x1}, + unicode.Range16{Lo: 0x1e18, Hi: 0x1e18, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1a, Hi: 0x1e1a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1c, Hi: 0x1e1c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e1e, Hi: 0x1e1e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e20, Hi: 0x1e20, Stride: 0x1}, + unicode.Range16{Lo: 0x1e22, Hi: 0x1e22, Stride: 0x1}, + unicode.Range16{Lo: 0x1e24, Hi: 0x1e24, Stride: 0x1}, + unicode.Range16{Lo: 0x1e26, Hi: 0x1e26, Stride: 0x1}, + unicode.Range16{Lo: 0x1e28, Hi: 0x1e28, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2a, Hi: 0x1e2a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2c, Hi: 0x1e2c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e2e, Hi: 0x1e2e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e30, Hi: 0x1e30, Stride: 0x1}, + unicode.Range16{Lo: 0x1e32, Hi: 0x1e32, Stride: 0x1}, + unicode.Range16{Lo: 0x1e34, Hi: 0x1e34, Stride: 0x1}, + unicode.Range16{Lo: 0x1e36, Hi: 0x1e36, Stride: 0x1}, + unicode.Range16{Lo: 0x1e38, Hi: 0x1e38, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3a, Hi: 0x1e3a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3c, Hi: 0x1e3c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e3e, Hi: 0x1e3e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e40, Hi: 0x1e40, Stride: 0x1}, + unicode.Range16{Lo: 0x1e42, Hi: 0x1e42, Stride: 0x1}, + unicode.Range16{Lo: 0x1e44, Hi: 0x1e44, Stride: 0x1}, + unicode.Range16{Lo: 0x1e46, Hi: 0x1e46, Stride: 0x1}, + unicode.Range16{Lo: 0x1e48, Hi: 0x1e48, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4a, Hi: 0x1e4a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4c, Hi: 0x1e4c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e4e, Hi: 0x1e4e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e50, Hi: 0x1e50, Stride: 0x1}, + unicode.Range16{Lo: 0x1e52, Hi: 0x1e52, Stride: 0x1}, + unicode.Range16{Lo: 0x1e54, Hi: 0x1e54, Stride: 0x1}, + unicode.Range16{Lo: 0x1e56, Hi: 0x1e56, Stride: 0x1}, + unicode.Range16{Lo: 0x1e58, Hi: 0x1e58, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5a, Hi: 0x1e5a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5c, Hi: 0x1e5c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e5e, Hi: 0x1e5e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e60, Hi: 0x1e60, Stride: 0x1}, + unicode.Range16{Lo: 0x1e62, Hi: 0x1e62, Stride: 0x1}, + unicode.Range16{Lo: 0x1e64, Hi: 0x1e64, Stride: 0x1}, + unicode.Range16{Lo: 0x1e66, Hi: 0x1e66, Stride: 0x1}, + unicode.Range16{Lo: 0x1e68, Hi: 0x1e68, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6a, Hi: 0x1e6a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6c, Hi: 0x1e6c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e6e, Hi: 0x1e6e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e70, Hi: 0x1e70, Stride: 0x1}, + unicode.Range16{Lo: 0x1e72, Hi: 0x1e72, Stride: 0x1}, + unicode.Range16{Lo: 0x1e74, Hi: 0x1e74, Stride: 0x1}, + unicode.Range16{Lo: 0x1e76, Hi: 0x1e76, Stride: 0x1}, + unicode.Range16{Lo: 0x1e78, Hi: 0x1e78, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7a, Hi: 0x1e7a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7c, Hi: 0x1e7c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e7e, Hi: 0x1e7e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e80, Hi: 0x1e80, Stride: 0x1}, + unicode.Range16{Lo: 0x1e82, Hi: 0x1e82, Stride: 0x1}, + unicode.Range16{Lo: 0x1e84, Hi: 0x1e84, Stride: 0x1}, + unicode.Range16{Lo: 0x1e86, Hi: 0x1e86, Stride: 0x1}, + unicode.Range16{Lo: 0x1e88, Hi: 0x1e88, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8a, Hi: 0x1e8a, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8c, Hi: 0x1e8c, Stride: 0x1}, + unicode.Range16{Lo: 0x1e8e, Hi: 0x1e8e, Stride: 0x1}, + unicode.Range16{Lo: 0x1e90, Hi: 0x1e90, Stride: 0x1}, + unicode.Range16{Lo: 0x1e92, Hi: 0x1e92, Stride: 0x1}, + unicode.Range16{Lo: 0x1e94, Hi: 0x1e94, Stride: 0x1}, + unicode.Range16{Lo: 0x1e9e, Hi: 0x1e9e, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea0, Hi: 0x1ea0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea2, Hi: 0x1ea2, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea4, Hi: 0x1ea4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea6, Hi: 0x1ea6, Stride: 0x1}, + unicode.Range16{Lo: 0x1ea8, Hi: 0x1ea8, Stride: 0x1}, + unicode.Range16{Lo: 0x1eaa, Hi: 0x1eaa, Stride: 0x1}, + unicode.Range16{Lo: 0x1eac, Hi: 0x1eac, Stride: 0x1}, + unicode.Range16{Lo: 0x1eae, Hi: 0x1eae, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb0, Hi: 0x1eb0, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb2, Hi: 0x1eb2, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb4, Hi: 0x1eb4, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb6, Hi: 0x1eb6, Stride: 0x1}, + unicode.Range16{Lo: 0x1eb8, Hi: 0x1eb8, Stride: 0x1}, + unicode.Range16{Lo: 0x1eba, Hi: 0x1eba, Stride: 0x1}, + unicode.Range16{Lo: 0x1ebc, Hi: 0x1ebc, Stride: 0x1}, + unicode.Range16{Lo: 0x1ebe, Hi: 0x1ebe, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec0, Hi: 0x1ec0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec2, Hi: 0x1ec2, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec4, Hi: 0x1ec4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec6, Hi: 0x1ec6, Stride: 0x1}, + unicode.Range16{Lo: 0x1ec8, Hi: 0x1ec8, Stride: 0x1}, + unicode.Range16{Lo: 0x1eca, Hi: 0x1eca, Stride: 0x1}, + unicode.Range16{Lo: 0x1ecc, Hi: 0x1ecc, Stride: 0x1}, + unicode.Range16{Lo: 0x1ece, Hi: 0x1ece, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed0, Hi: 0x1ed0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed2, Hi: 0x1ed2, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed4, Hi: 0x1ed4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed6, Hi: 0x1ed6, Stride: 0x1}, + unicode.Range16{Lo: 0x1ed8, Hi: 0x1ed8, Stride: 0x1}, + unicode.Range16{Lo: 0x1eda, Hi: 0x1eda, Stride: 0x1}, + unicode.Range16{Lo: 0x1edc, Hi: 0x1edc, Stride: 0x1}, + unicode.Range16{Lo: 0x1ede, Hi: 0x1ede, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee0, Hi: 0x1ee0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee2, Hi: 0x1ee2, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee4, Hi: 0x1ee4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee6, Hi: 0x1ee6, Stride: 0x1}, + unicode.Range16{Lo: 0x1ee8, Hi: 0x1ee8, Stride: 0x1}, + unicode.Range16{Lo: 0x1eea, Hi: 0x1eea, Stride: 0x1}, + unicode.Range16{Lo: 0x1eec, Hi: 0x1eec, Stride: 0x1}, + unicode.Range16{Lo: 0x1eee, Hi: 0x1eee, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef0, Hi: 0x1ef0, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef2, Hi: 0x1ef2, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef4, Hi: 0x1ef4, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef6, Hi: 0x1ef6, Stride: 0x1}, + unicode.Range16{Lo: 0x1ef8, Hi: 0x1ef8, Stride: 0x1}, + unicode.Range16{Lo: 0x1efa, Hi: 0x1efa, Stride: 0x1}, + unicode.Range16{Lo: 0x1efc, Hi: 0x1efc, Stride: 0x1}, + unicode.Range16{Lo: 0x1efe, Hi: 0x1efe, Stride: 0x1}, + unicode.Range16{Lo: 0x1f08, Hi: 0x1f0f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f18, Hi: 0x1f1d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f28, Hi: 0x1f2f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f38, Hi: 0x1f3f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f48, Hi: 0x1f4d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f59, Hi: 0x1f59, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 0x1}, + unicode.Range16{Lo: 0x1f5f, Hi: 0x1f5f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f68, Hi: 0x1f6f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f88, Hi: 0x1f8f, Stride: 0x1}, + unicode.Range16{Lo: 0x1f98, Hi: 0x1f9f, Stride: 0x1}, + unicode.Range16{Lo: 0x1fa8, Hi: 0x1faf, Stride: 0x1}, + unicode.Range16{Lo: 0x1fb8, Hi: 0x1fbc, Stride: 0x1}, + unicode.Range16{Lo: 0x1fc8, Hi: 0x1fcc, Stride: 0x1}, + unicode.Range16{Lo: 0x1fd8, Hi: 0x1fdb, Stride: 0x1}, + unicode.Range16{Lo: 0x1fe8, Hi: 0x1fec, Stride: 0x1}, + unicode.Range16{Lo: 0x1ff8, Hi: 0x1ffc, Stride: 0x1}, + unicode.Range16{Lo: 0x2102, Hi: 0x2102, Stride: 0x1}, + unicode.Range16{Lo: 0x2107, Hi: 0x2107, Stride: 0x1}, + unicode.Range16{Lo: 0x210b, Hi: 0x210d, Stride: 0x1}, + unicode.Range16{Lo: 0x2110, Hi: 0x2112, Stride: 0x1}, + unicode.Range16{Lo: 0x2115, Hi: 0x2115, Stride: 0x1}, + unicode.Range16{Lo: 0x2119, Hi: 0x211d, Stride: 0x1}, + unicode.Range16{Lo: 0x2124, Hi: 0x2124, Stride: 0x1}, + unicode.Range16{Lo: 0x2126, Hi: 0x2126, Stride: 0x1}, + unicode.Range16{Lo: 0x2128, Hi: 0x2128, Stride: 0x1}, + unicode.Range16{Lo: 0x212a, Hi: 0x212d, Stride: 0x1}, + unicode.Range16{Lo: 0x2130, Hi: 0x2133, Stride: 0x1}, + unicode.Range16{Lo: 0x213e, Hi: 0x213f, Stride: 0x1}, + unicode.Range16{Lo: 0x2145, Hi: 0x2145, Stride: 0x1}, + unicode.Range16{Lo: 0x2160, Hi: 0x216f, Stride: 0x1}, + unicode.Range16{Lo: 0x2183, Hi: 0x2183, Stride: 0x1}, + unicode.Range16{Lo: 0x24b6, Hi: 0x24cf, Stride: 0x1}, + unicode.Range16{Lo: 0x2c00, Hi: 0x2c2e, Stride: 0x1}, + unicode.Range16{Lo: 0x2c60, Hi: 0x2c60, Stride: 0x1}, + unicode.Range16{Lo: 0x2c62, Hi: 0x2c64, Stride: 0x1}, + unicode.Range16{Lo: 0x2c67, Hi: 0x2c67, Stride: 0x1}, + unicode.Range16{Lo: 0x2c69, Hi: 0x2c69, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6b, Hi: 0x2c6b, Stride: 0x1}, + unicode.Range16{Lo: 0x2c6d, Hi: 0x2c70, Stride: 0x1}, + unicode.Range16{Lo: 0x2c72, Hi: 0x2c72, Stride: 0x1}, + unicode.Range16{Lo: 0x2c75, Hi: 0x2c75, Stride: 0x1}, + unicode.Range16{Lo: 0x2c7e, Hi: 0x2c80, Stride: 0x1}, + unicode.Range16{Lo: 0x2c82, Hi: 0x2c82, Stride: 0x1}, + unicode.Range16{Lo: 0x2c84, Hi: 0x2c84, Stride: 0x1}, + unicode.Range16{Lo: 0x2c86, Hi: 0x2c86, Stride: 0x1}, + unicode.Range16{Lo: 0x2c88, Hi: 0x2c88, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8a, Hi: 0x2c8a, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8c, Hi: 0x2c8c, Stride: 0x1}, + unicode.Range16{Lo: 0x2c8e, Hi: 0x2c8e, Stride: 0x1}, + unicode.Range16{Lo: 0x2c90, Hi: 0x2c90, Stride: 0x1}, + unicode.Range16{Lo: 0x2c92, Hi: 0x2c92, Stride: 0x1}, + unicode.Range16{Lo: 0x2c94, Hi: 0x2c94, Stride: 0x1}, + unicode.Range16{Lo: 0x2c96, Hi: 0x2c96, Stride: 0x1}, + unicode.Range16{Lo: 0x2c98, Hi: 0x2c98, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9a, Hi: 0x2c9a, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9c, Hi: 0x2c9c, Stride: 0x1}, + unicode.Range16{Lo: 0x2c9e, Hi: 0x2c9e, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca0, Hi: 0x2ca0, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca2, Hi: 0x2ca2, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca4, Hi: 0x2ca4, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca6, Hi: 0x2ca6, Stride: 0x1}, + unicode.Range16{Lo: 0x2ca8, Hi: 0x2ca8, Stride: 0x1}, + unicode.Range16{Lo: 0x2caa, Hi: 0x2caa, Stride: 0x1}, + unicode.Range16{Lo: 0x2cac, Hi: 0x2cac, Stride: 0x1}, + unicode.Range16{Lo: 0x2cae, Hi: 0x2cae, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb0, Hi: 0x2cb0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb2, Hi: 0x2cb2, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb4, Hi: 0x2cb4, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb6, Hi: 0x2cb6, Stride: 0x1}, + unicode.Range16{Lo: 0x2cb8, Hi: 0x2cb8, Stride: 0x1}, + unicode.Range16{Lo: 0x2cba, Hi: 0x2cba, Stride: 0x1}, + unicode.Range16{Lo: 0x2cbc, Hi: 0x2cbc, Stride: 0x1}, + unicode.Range16{Lo: 0x2cbe, Hi: 0x2cbe, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc0, Hi: 0x2cc0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc2, Hi: 0x2cc2, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc4, Hi: 0x2cc4, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc6, Hi: 0x2cc6, Stride: 0x1}, + unicode.Range16{Lo: 0x2cc8, Hi: 0x2cc8, Stride: 0x1}, + unicode.Range16{Lo: 0x2cca, Hi: 0x2cca, Stride: 0x1}, + unicode.Range16{Lo: 0x2ccc, Hi: 0x2ccc, Stride: 0x1}, + unicode.Range16{Lo: 0x2cce, Hi: 0x2cce, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd0, Hi: 0x2cd0, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd2, Hi: 0x2cd2, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd4, Hi: 0x2cd4, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd6, Hi: 0x2cd6, Stride: 0x1}, + unicode.Range16{Lo: 0x2cd8, Hi: 0x2cd8, Stride: 0x1}, + unicode.Range16{Lo: 0x2cda, Hi: 0x2cda, Stride: 0x1}, + unicode.Range16{Lo: 0x2cdc, Hi: 0x2cdc, Stride: 0x1}, + unicode.Range16{Lo: 0x2cde, Hi: 0x2cde, Stride: 0x1}, + unicode.Range16{Lo: 0x2ce0, Hi: 0x2ce0, Stride: 0x1}, + unicode.Range16{Lo: 0x2ce2, Hi: 0x2ce2, Stride: 0x1}, + unicode.Range16{Lo: 0x2ceb, Hi: 0x2ceb, Stride: 0x1}, + unicode.Range16{Lo: 0x2ced, Hi: 0x2ced, Stride: 0x1}, + unicode.Range16{Lo: 0x2cf2, Hi: 0x2cf2, Stride: 0x1}, + unicode.Range16{Lo: 0xa640, Hi: 0xa640, Stride: 0x1}, + unicode.Range16{Lo: 0xa642, Hi: 0xa642, Stride: 0x1}, + unicode.Range16{Lo: 0xa644, Hi: 0xa644, Stride: 0x1}, + unicode.Range16{Lo: 0xa646, Hi: 0xa646, Stride: 0x1}, + unicode.Range16{Lo: 0xa648, Hi: 0xa648, Stride: 0x1}, + unicode.Range16{Lo: 0xa64a, Hi: 0xa64a, Stride: 0x1}, + unicode.Range16{Lo: 0xa64c, Hi: 0xa64c, Stride: 0x1}, + unicode.Range16{Lo: 0xa64e, Hi: 0xa64e, Stride: 0x1}, + unicode.Range16{Lo: 0xa650, Hi: 0xa650, Stride: 0x1}, + unicode.Range16{Lo: 0xa652, Hi: 0xa652, Stride: 0x1}, + unicode.Range16{Lo: 0xa654, Hi: 0xa654, Stride: 0x1}, + unicode.Range16{Lo: 0xa656, Hi: 0xa656, Stride: 0x1}, + unicode.Range16{Lo: 0xa658, Hi: 0xa658, Stride: 0x1}, + unicode.Range16{Lo: 0xa65a, Hi: 0xa65a, Stride: 0x1}, + unicode.Range16{Lo: 0xa65c, Hi: 0xa65c, Stride: 0x1}, + unicode.Range16{Lo: 0xa65e, Hi: 0xa65e, Stride: 0x1}, + unicode.Range16{Lo: 0xa660, Hi: 0xa660, Stride: 0x1}, + unicode.Range16{Lo: 0xa662, Hi: 0xa662, Stride: 0x1}, + unicode.Range16{Lo: 0xa664, Hi: 0xa664, Stride: 0x1}, + unicode.Range16{Lo: 0xa666, Hi: 0xa666, Stride: 0x1}, + unicode.Range16{Lo: 0xa668, Hi: 0xa668, Stride: 0x1}, + unicode.Range16{Lo: 0xa66a, Hi: 0xa66a, Stride: 0x1}, + unicode.Range16{Lo: 0xa66c, Hi: 0xa66c, Stride: 0x1}, + unicode.Range16{Lo: 0xa680, Hi: 0xa680, Stride: 0x1}, + unicode.Range16{Lo: 0xa682, Hi: 0xa682, Stride: 0x1}, + unicode.Range16{Lo: 0xa684, Hi: 0xa684, Stride: 0x1}, + unicode.Range16{Lo: 0xa686, Hi: 0xa686, Stride: 0x1}, + unicode.Range16{Lo: 0xa688, Hi: 0xa688, Stride: 0x1}, + unicode.Range16{Lo: 0xa68a, Hi: 0xa68a, Stride: 0x1}, + unicode.Range16{Lo: 0xa68c, Hi: 0xa68c, Stride: 0x1}, + unicode.Range16{Lo: 0xa68e, Hi: 0xa68e, Stride: 0x1}, + unicode.Range16{Lo: 0xa690, Hi: 0xa690, Stride: 0x1}, + unicode.Range16{Lo: 0xa692, Hi: 0xa692, Stride: 0x1}, + unicode.Range16{Lo: 0xa694, Hi: 0xa694, Stride: 0x1}, + unicode.Range16{Lo: 0xa696, Hi: 0xa696, Stride: 0x1}, + unicode.Range16{Lo: 0xa698, Hi: 0xa698, Stride: 0x1}, + unicode.Range16{Lo: 0xa69a, Hi: 0xa69a, Stride: 0x1}, + unicode.Range16{Lo: 0xa722, Hi: 0xa722, Stride: 0x1}, + unicode.Range16{Lo: 0xa724, Hi: 0xa724, Stride: 0x1}, + unicode.Range16{Lo: 0xa726, Hi: 0xa726, Stride: 0x1}, + unicode.Range16{Lo: 0xa728, Hi: 0xa728, Stride: 0x1}, + unicode.Range16{Lo: 0xa72a, Hi: 0xa72a, Stride: 0x1}, + unicode.Range16{Lo: 0xa72c, Hi: 0xa72c, Stride: 0x1}, + unicode.Range16{Lo: 0xa72e, Hi: 0xa72e, Stride: 0x1}, + unicode.Range16{Lo: 0xa732, Hi: 0xa732, Stride: 0x1}, + unicode.Range16{Lo: 0xa734, Hi: 0xa734, Stride: 0x1}, + unicode.Range16{Lo: 0xa736, Hi: 0xa736, Stride: 0x1}, + unicode.Range16{Lo: 0xa738, Hi: 0xa738, Stride: 0x1}, + unicode.Range16{Lo: 0xa73a, Hi: 0xa73a, Stride: 0x1}, + unicode.Range16{Lo: 0xa73c, Hi: 0xa73c, Stride: 0x1}, + unicode.Range16{Lo: 0xa73e, Hi: 0xa73e, Stride: 0x1}, + unicode.Range16{Lo: 0xa740, Hi: 0xa740, Stride: 0x1}, + unicode.Range16{Lo: 0xa742, Hi: 0xa742, Stride: 0x1}, + unicode.Range16{Lo: 0xa744, Hi: 0xa744, Stride: 0x1}, + unicode.Range16{Lo: 0xa746, Hi: 0xa746, Stride: 0x1}, + unicode.Range16{Lo: 0xa748, Hi: 0xa748, Stride: 0x1}, + unicode.Range16{Lo: 0xa74a, Hi: 0xa74a, Stride: 0x1}, + unicode.Range16{Lo: 0xa74c, Hi: 0xa74c, Stride: 0x1}, + unicode.Range16{Lo: 0xa74e, Hi: 0xa74e, Stride: 0x1}, + unicode.Range16{Lo: 0xa750, Hi: 0xa750, Stride: 0x1}, + unicode.Range16{Lo: 0xa752, Hi: 0xa752, Stride: 0x1}, + unicode.Range16{Lo: 0xa754, Hi: 0xa754, Stride: 0x1}, + unicode.Range16{Lo: 0xa756, Hi: 0xa756, Stride: 0x1}, + unicode.Range16{Lo: 0xa758, Hi: 0xa758, Stride: 0x1}, + unicode.Range16{Lo: 0xa75a, Hi: 0xa75a, Stride: 0x1}, + unicode.Range16{Lo: 0xa75c, Hi: 0xa75c, Stride: 0x1}, + unicode.Range16{Lo: 0xa75e, Hi: 0xa75e, Stride: 0x1}, + unicode.Range16{Lo: 0xa760, Hi: 0xa760, Stride: 0x1}, + unicode.Range16{Lo: 0xa762, Hi: 0xa762, Stride: 0x1}, + unicode.Range16{Lo: 0xa764, Hi: 0xa764, Stride: 0x1}, + unicode.Range16{Lo: 0xa766, Hi: 0xa766, Stride: 0x1}, + unicode.Range16{Lo: 0xa768, Hi: 0xa768, Stride: 0x1}, + unicode.Range16{Lo: 0xa76a, Hi: 0xa76a, Stride: 0x1}, + unicode.Range16{Lo: 0xa76c, Hi: 0xa76c, Stride: 0x1}, + unicode.Range16{Lo: 0xa76e, Hi: 0xa76e, Stride: 0x1}, + unicode.Range16{Lo: 0xa779, Hi: 0xa779, Stride: 0x1}, + unicode.Range16{Lo: 0xa77b, Hi: 0xa77b, Stride: 0x1}, + unicode.Range16{Lo: 0xa77d, Hi: 0xa77e, Stride: 0x1}, + unicode.Range16{Lo: 0xa780, Hi: 0xa780, Stride: 0x1}, + unicode.Range16{Lo: 0xa782, Hi: 0xa782, Stride: 0x1}, + unicode.Range16{Lo: 0xa784, Hi: 0xa784, Stride: 0x1}, + unicode.Range16{Lo: 0xa786, Hi: 0xa786, Stride: 0x1}, + unicode.Range16{Lo: 0xa78b, Hi: 0xa78b, Stride: 0x1}, + unicode.Range16{Lo: 0xa78d, Hi: 0xa78d, Stride: 0x1}, + unicode.Range16{Lo: 0xa790, Hi: 0xa790, Stride: 0x1}, + unicode.Range16{Lo: 0xa792, Hi: 0xa792, Stride: 0x1}, + unicode.Range16{Lo: 0xa796, Hi: 0xa796, Stride: 0x1}, + unicode.Range16{Lo: 0xa798, Hi: 0xa798, Stride: 0x1}, + unicode.Range16{Lo: 0xa79a, Hi: 0xa79a, Stride: 0x1}, + unicode.Range16{Lo: 0xa79c, Hi: 0xa79c, Stride: 0x1}, + unicode.Range16{Lo: 0xa79e, Hi: 0xa79e, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a0, Hi: 0xa7a0, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a2, Hi: 0xa7a2, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a4, Hi: 0xa7a4, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a6, Hi: 0xa7a6, Stride: 0x1}, + unicode.Range16{Lo: 0xa7a8, Hi: 0xa7a8, Stride: 0x1}, + unicode.Range16{Lo: 0xa7aa, Hi: 0xa7ae, Stride: 0x1}, + unicode.Range16{Lo: 0xa7b0, Hi: 0xa7b4, Stride: 0x1}, + unicode.Range16{Lo: 0xa7b6, Hi: 0xa7b6, Stride: 0x1}, + unicode.Range16{Lo: 0xff21, Hi: 0xff3a, Stride: 0x1}, + }, + R32: []unicode.Range32{ + unicode.Range32{Lo: 0x10400, Hi: 0x10427, Stride: 0x1}, + unicode.Range32{Lo: 0x104b0, Hi: 0x104d3, Stride: 0x1}, + unicode.Range32{Lo: 0x10c80, Hi: 0x10cb2, Stride: 0x1}, + unicode.Range32{Lo: 0x118a0, Hi: 0x118bf, Stride: 0x1}, + unicode.Range32{Lo: 0x1d400, Hi: 0x1d419, Stride: 0x1}, + unicode.Range32{Lo: 0x1d434, Hi: 0x1d44d, Stride: 0x1}, + unicode.Range32{Lo: 0x1d468, Hi: 0x1d481, Stride: 0x1}, + unicode.Range32{Lo: 0x1d49c, Hi: 0x1d49c, Stride: 0x1}, + unicode.Range32{Lo: 0x1d49e, Hi: 0x1d49f, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a2, Hi: 0x1d4a2, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a5, Hi: 0x1d4a6, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4a9, Hi: 0x1d4ac, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4ae, Hi: 0x1d4b5, Stride: 0x1}, + unicode.Range32{Lo: 0x1d4d0, Hi: 0x1d4e9, Stride: 0x1}, + unicode.Range32{Lo: 0x1d504, Hi: 0x1d505, Stride: 0x1}, + unicode.Range32{Lo: 0x1d507, Hi: 0x1d50a, Stride: 0x1}, + unicode.Range32{Lo: 0x1d50d, Hi: 0x1d514, Stride: 0x1}, + unicode.Range32{Lo: 0x1d516, Hi: 0x1d51c, Stride: 0x1}, + unicode.Range32{Lo: 0x1d538, Hi: 0x1d539, Stride: 0x1}, + unicode.Range32{Lo: 0x1d53b, Hi: 0x1d53e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d540, Hi: 0x1d544, Stride: 0x1}, + unicode.Range32{Lo: 0x1d546, Hi: 0x1d546, Stride: 0x1}, + unicode.Range32{Lo: 0x1d54a, Hi: 0x1d550, Stride: 0x1}, + unicode.Range32{Lo: 0x1d56c, Hi: 0x1d585, Stride: 0x1}, + unicode.Range32{Lo: 0x1d5a0, Hi: 0x1d5b9, Stride: 0x1}, + unicode.Range32{Lo: 0x1d5d4, Hi: 0x1d5ed, Stride: 0x1}, + unicode.Range32{Lo: 0x1d608, Hi: 0x1d621, Stride: 0x1}, + unicode.Range32{Lo: 0x1d63c, Hi: 0x1d655, Stride: 0x1}, + unicode.Range32{Lo: 0x1d670, Hi: 0x1d689, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6a8, Hi: 0x1d6c0, Stride: 0x1}, + unicode.Range32{Lo: 0x1d6e2, Hi: 0x1d6fa, Stride: 0x1}, + unicode.Range32{Lo: 0x1d71c, Hi: 0x1d734, Stride: 0x1}, + unicode.Range32{Lo: 0x1d756, Hi: 0x1d76e, Stride: 0x1}, + unicode.Range32{Lo: 0x1d790, Hi: 0x1d7a8, Stride: 0x1}, + unicode.Range32{Lo: 0x1d7ca, Hi: 0x1d7ca, Stride: 0x1}, + unicode.Range32{Lo: 0x1e900, Hi: 0x1e921, Stride: 0x1}, + unicode.Range32{Lo: 0x1f130, Hi: 0x1f149, Stride: 0x1}, + unicode.Range32{Lo: 0x1f150, Hi: 0x1f169, Stride: 0x1}, + unicode.Range32{Lo: 0x1f170, Hi: 0x1f189, Stride: 0x1}, + }, + LatinOffset: 3, +} + +type _SentenceRuneRange unicode.RangeTable + +func _SentenceRuneType(r rune) *_SentenceRuneRange { + switch { + case unicode.Is(_SentenceATerm, r): + return (*_SentenceRuneRange)(_SentenceATerm) + case unicode.Is(_SentenceCR, r): + return (*_SentenceRuneRange)(_SentenceCR) + case unicode.Is(_SentenceClose, r): + return (*_SentenceRuneRange)(_SentenceClose) + case unicode.Is(_SentenceExtend, r): + return (*_SentenceRuneRange)(_SentenceExtend) + case unicode.Is(_SentenceFormat, r): + return (*_SentenceRuneRange)(_SentenceFormat) + case unicode.Is(_SentenceLF, r): + return (*_SentenceRuneRange)(_SentenceLF) + case unicode.Is(_SentenceLower, r): + return (*_SentenceRuneRange)(_SentenceLower) + case unicode.Is(_SentenceNumeric, r): + return (*_SentenceRuneRange)(_SentenceNumeric) + case unicode.Is(_SentenceOLetter, r): + return (*_SentenceRuneRange)(_SentenceOLetter) + case unicode.Is(_SentenceSContinue, r): + return (*_SentenceRuneRange)(_SentenceSContinue) + case unicode.Is(_SentenceSTerm, r): + return (*_SentenceRuneRange)(_SentenceSTerm) + case unicode.Is(_SentenceSep, r): + return (*_SentenceRuneRange)(_SentenceSep) + case unicode.Is(_SentenceSp, r): + return (*_SentenceRuneRange)(_SentenceSp) + case unicode.Is(_SentenceUpper, r): + return (*_SentenceRuneRange)(_SentenceUpper) + default: + return nil + } +} +func (rng *_SentenceRuneRange) String() string { + switch (*unicode.RangeTable)(rng) { + case _SentenceATerm: + return "ATerm" + case _SentenceCR: + return "CR" + case _SentenceClose: + return "Close" + case _SentenceExtend: + return "Extend" + case _SentenceFormat: + return "Format" + case _SentenceLF: + return "LF" + case _SentenceLower: + return "Lower" + case _SentenceNumeric: + return "Numeric" + case _SentenceOLetter: + return "OLetter" + case _SentenceSContinue: + return "SContinue" + case _SentenceSTerm: + return "STerm" + case _SentenceSep: + return "Sep" + case _SentenceSp: + return "Sp" + case _SentenceUpper: + return "Upper" + default: + return "Other" + } +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/unicode2ragel.rb b/vendor/github.com/apparentlymart/go-textseg/textseg/unicode2ragel.rb new file mode 100644 index 00000000..422e4e5c --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/unicode2ragel.rb @@ -0,0 +1,335 @@ +#!/usr/bin/env ruby +# +# This scripted has been updated to accept more command-line arguments: +# +# -u, --url URL to process +# -m, --machine Machine name +# -p, --properties Properties to add to the machine +# -o, --output Write output to file +# +# Updated by: Marty Schoch +# +# This script uses the unicode spec to generate a Ragel state machine +# that recognizes unicode alphanumeric characters. It generates 5 +# character classes: uupper, ulower, ualpha, udigit, and ualnum. +# Currently supported encodings are UTF-8 [default] and UCS-4. +# +# Usage: unicode2ragel.rb [options] +# -e, --encoding [ucs4 | utf8] Data encoding +# -h, --help Show this message +# +# This script was originally written as part of the Ferret search +# engine library. +# +# Author: Rakan El-Khalil + +require 'optparse' +require 'open-uri' + +ENCODINGS = [ :utf8, :ucs4 ] +ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" } +DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt" +DEFAULT_MACHINE_NAME= "WChar" + +### +# Display vars & default option + +TOTAL_WIDTH = 80 +RANGE_WIDTH = 23 +@encoding = :utf8 +@chart_url = DEFAULT_CHART_URL +machine_name = DEFAULT_MACHINE_NAME +properties = [] +@output = $stdout + +### +# Option parsing + +cli_opts = OptionParser.new do |opts| + opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o| + @encoding = o.downcase.to_sym + end + opts.on("-h", "--help", "Show this message") do + puts opts + exit + end + opts.on("-u", "--url URL", "URL to process") do |o| + @chart_url = o + end + opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o| + machine_name = o + end + opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o| + properties = o + end + opts.on("-o", "--output FILE", "output file") do |o| + @output = File.new(o, "w+") + end +end + +cli_opts.parse(ARGV) +unless ENCODINGS.member? @encoding + puts "Invalid encoding: #{@encoding}" + puts cli_opts + exit +end + +## +# Downloads the document at url and yields every alpha line's hex +# range and description. + +def each_alpha( url, property ) + open( url ) do |file| + file.each_line do |line| + next if line =~ /^#/; + next if line !~ /; #{property} #/; + + range, description = line.split(/;/) + range.strip! + description.gsub!(/.*#/, '').strip! + + if range =~ /\.\./ + start, stop = range.split '..' + else start = stop = range + end + + yield start.hex .. stop.hex, description + end + end +end + +### +# Formats to hex at minimum width + +def to_hex( n ) + r = "%0X" % n + r = "0#{r}" unless (r.length % 2).zero? + r +end + +### +# UCS4 is just a straight hex conversion of the unicode codepoint. + +def to_ucs4( range ) + rangestr = "0x" + to_hex(range.begin) + rangestr << "..0x" + to_hex(range.end) if range.begin != range.end + [ rangestr ] +end + +## +# 0x00 - 0x7f -> 0zzzzzzz[7] +# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6] +# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6] +# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] + +UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff] + +def to_utf8_enc( n ) + r = 0 + if n <= 0x7f + r = n + elsif n <= 0x7ff + y = 0xc0 | (n >> 6) + z = 0x80 | (n & 0x3f) + r = y << 8 | z + elsif n <= 0xffff + x = 0xe0 | (n >> 12) + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = x << 16 | y << 8 | z + elsif n <= 0x10ffff + w = 0xf0 | (n >> 18) + x = 0x80 | (n >> 12) & 0x3f + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = w << 24 | x << 16 | y << 8 | z + end + + to_hex(r) +end + +def from_utf8_enc( n ) + n = n.hex + r = 0 + if n <= 0x7f + r = n + elsif n <= 0xdfff + y = (n >> 8) & 0x1f + z = n & 0x3f + r = y << 6 | z + elsif n <= 0xefffff + x = (n >> 16) & 0x0f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = x << 10 | y << 6 | z + elsif n <= 0xf7ffffff + w = (n >> 24) & 0x07 + x = (n >> 16) & 0x3f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = w << 18 | x << 12 | y << 6 | z + end + r +end + +### +# Given a range, splits it up into ranges that can be continuously +# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff] +# This is not strictly needed since the current [5.1] unicode standard +# doesn't have ranges that straddle utf8 boundaries. This is included +# for completeness as there is no telling if that will ever change. + +def utf8_ranges( range ) + ranges = [] + UTF8_BOUNDARIES.each do |max| + if range.begin <= max + if range.end <= max + ranges << range + return ranges + end + + ranges << (range.begin .. max) + range = (max + 1) .. range.end + end + end + ranges +end + +def build_range( start, stop ) + size = start.size/2 + left = size - 1 + return [""] if size < 1 + + a = start[0..1] + b = stop[0..1] + + ### + # Shared prefix + + if a == b + return build_range(start[2..-1], stop[2..-1]).map do |elt| + "0x#{a} " + elt + end + end + + ### + # Unshared prefix, end of run + + return ["0x#{a}..0x#{b} "] if left.zero? + + ### + # Unshared prefix, not end of run + # Range can be 0x123456..0x56789A + # Which is equivalent to: + # 0x123456 .. 0x12FFFF + # 0x130000 .. 0x55FFFF + # 0x560000 .. 0x56789A + + ret = [] + ret << build_range(start, a + "FF" * left) + + ### + # Only generate middle range if need be. + + if a.hex+1 != b.hex + max = to_hex(b.hex - 1) + max = "FF" if b == "FF" + ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left + end + + ### + # Don't generate last range if it is covered by first range + + ret << build_range(b + "00" * left, stop) unless b == "FF" + ret.flatten! +end + +def to_utf8( range ) + utf8_ranges( range ).map do |r| + begin_enc = to_utf8_enc(r.begin) + end_enc = to_utf8_enc(r.end) + build_range begin_enc, end_enc + end.flatten! +end + +## +# Perform a 3-way comparison of the number of codepoints advertised by +# the unicode spec for the given range, the originally parsed range, +# and the resulting utf8 encoded range. + +def count_codepoints( code ) + code.split(' ').inject(1) do |acc, elt| + if elt =~ /0x(.+)\.\.0x(.+)/ + if @encoding == :utf8 + acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1) + else + acc * ($2.hex - $1.hex + 1) + end + else + acc + end + end +end + +def is_valid?( range, desc, codes ) + spec_count = 1 + spec_count = $1.to_i if desc =~ /\[(\d+)\]/ + range_count = range.end - range.begin + 1 + + sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) } + sum == spec_count and sum == range_count +end + +## +# Generate the state maching to stdout + +def generate_machine( name, property ) + pipe = " " + @output.puts " #{name} = " + each_alpha( @chart_url, property ) do |range, desc| + + codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range) + + #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless + # is_valid? range, desc, codes + + range_width = codes.map { |a| a.size }.max + range_width = RANGE_WIDTH if range_width < RANGE_WIDTH + + desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11 + desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH + + if desc.size > desc_width + desc = desc[0..desc_width - 4] + "..." + end + + codes.each_with_index do |r, idx| + desc = "" unless idx.zero? + code = "%-#{range_width}s" % r + @output.puts " #{pipe} #{code} ##{desc}" + pipe = "|" + end + end + @output.puts " ;" + @output.puts "" +end + +@output.puts <= label + }) + if idx < num && n.edges[idx].label == label { + n.edges[idx].node = node + return + } + panic("replacing missing edge") +} + +func (n *node) getEdge(label byte) *node { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return n.edges[idx].node + } + return nil +} + +func (n *node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} + +// Tree implements a radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over +// a standard hash map is prefix-based lookups and +// ordered iteration, +type Tree struct { + root *node + size int +} + +// New returns an empty Tree +func New() *Tree { + return NewFromMap(nil) +} + +// NewFromMap returns a new tree containing the keys +// from an existing map +func NewFromMap(m map[string]interface{}) *Tree { + t := &Tree{root: &node{}} + for k, v := range m { + t.Insert(k, v) + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// Insert is used to add a newentry or update +// an existing entry. Returns if updated. +func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { + var parent *node + n := t.root + search := s + for { + // Handle key exhaution + if len(search) == 0 { + if n.isLeaf() { + old := n.leaf.val + n.leaf.val = v + return old, true + } + + n.leaf = &leafNode{ + key: s, + val: v, + } + t.size++ + return nil, false + } + + // Look for the edge + parent = n + n = n.getEdge(search[0]) + + // No edge, create one + if n == nil { + e := edge{ + label: search[0], + node: &node{ + leaf: &leafNode{ + key: s, + val: v, + }, + prefix: search, + }, + } + parent.addEdge(e) + t.size++ + return nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + search = search[commonPrefix:] + continue + } + + // Split the node + t.size++ + child := &node{ + prefix: search[:commonPrefix], + } + parent.updateEdge(search[0], child) + + // Restore the existing node + child.addEdge(edge{ + label: n.prefix[commonPrefix], + node: n, + }) + n.prefix = n.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + key: s, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + child.leaf = leaf + return nil, false + } + + // Create a new edge for the node + child.addEdge(edge{ + label: search[0], + node: &node{ + leaf: leaf, + prefix: search, + }, + }) + return nil, false + } +} + +// Delete is used to delete a key, returning the previous +// value and if it was deleted +func (t *Tree) Delete(s string) (interface{}, bool) { + var parent *node + var label byte + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if !n.isLeaf() { + break + } + goto DELETE + } + + // Look for an edge + parent = n + label = search[0] + n = n.getEdge(label) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false + +DELETE: + // Delete the leaf + leaf := n.leaf + n.leaf = nil + t.size-- + + // Check if we should delete this node from the parent + if parent != nil && len(n.edges) == 0 { + parent.delEdge(label) + } + + // Check if we should merge this node + if n != t.root && len(n.edges) == 1 { + n.mergeChild() + } + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + + return leaf.val, true +} + +// DeletePrefix is used to delete the subtree under a prefix +// Returns how many nodes were deleted +// Use this to delete large subtrees efficiently +func (t *Tree) DeletePrefix(s string) int { + return t.deletePrefix(nil, t.root, s) +} + +// delete does a recursive deletion +func (t *Tree) deletePrefix(parent, n *node, prefix string) int { + // Check for key exhaustion + if len(prefix) == 0 { + // Remove the leaf node + subTreeSize := 0 + //recursively walk from all edges of the node to be deleted + recursiveWalk(n, func(s string, v interface{}) bool { + subTreeSize++ + return false + }) + if n.isLeaf() { + n.leaf = nil + } + n.edges = nil // deletes the entire subtree + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + t.size -= subTreeSize + return subTreeSize + } + + // Look for an edge + label := prefix[0] + child := n.getEdge(label) + if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) { + return 0 + } + + // Consume the search prefix + if len(child.prefix) > len(prefix) { + prefix = prefix[len(prefix):] + } else { + prefix = prefix[len(child.prefix):] + } + return t.deletePrefix(n, child, prefix) +} + +func (n *node) mergeChild() { + e := n.edges[0] + child := e.node + n.prefix = n.prefix + child.prefix + n.leaf = child.leaf + n.edges = child.edges +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(s string) (interface{}, bool) { + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.val, true + } + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { + var last *leafNode + n := t.root + search := s + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return "", nil, false +} + +// Minimum is used to return the minimum value in the tree +func (t *Tree) Minimum() (string, interface{}, bool) { + n := t.root + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return "", nil, false +} + +// Maximum is used to return the maximum value in the tree +func (t *Tree) Maximum() (string, interface{}, bool) { + n := t.root + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + break + } + return "", nil, false +} + +// Walk is used to walk the tree +func (t *Tree) Walk(fn WalkFn) { + recursiveWalk(t.root, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { + n := t.root + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if strings.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } + +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (t *Tree) WalkPath(path string, fn WalkFn) { + n := t.root + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// ToMap is used to walk the tree and convert it into a map +func (t *Tree) ToMap() map[string]interface{} { + out := make(map[string]interface{}, t.size) + t.Walk(func(k string, v interface{}) bool { + out[k] = v + return false + }) + return out +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go index fc38172f..710eb432 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -61,6 +61,12 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + nl, id, id2 := "", "", "" if v.Len() > 3 { nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 4003c04b..212fe25e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -2,8 +2,6 @@ package client import ( "fmt" - "io/ioutil" - "net/http/httputil" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client/metadata" @@ -12,9 +10,17 @@ import ( // A Config provides configuration to a service client instance. type Config struct { - Config *aws.Config - Handlers request.Handlers - Endpoint, SigningRegion string + Config *aws.Config + Handlers request.Handlers + Endpoint string + SigningRegion string + SigningName string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overriden based on metadata the + // service has. + SigningNameDerived bool } // ConfigProvider provides a generic way for a service client to receive @@ -23,6 +29,13 @@ type ConfigProvider interface { ClientConfig(serviceName string, cfgs ...*aws.Config) Config } +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + // A Client implements the base client request and response handling // used by all service clients. type Client struct { @@ -38,7 +51,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op svc := &Client{ Config: cfg, ClientInfo: info, - Handlers: handlers, + Handlers: handlers.Copy(), } switch retryer, ok := cfg.Retryer.(request.Retryer); { @@ -78,62 +91,6 @@ func (c *Client) AddDebugHandlers() { return } - c.Handlers.Send.PushFront(logRequest) - c.Handlers.Send.PushBack(logResponse) -} - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -const logReqErrMsg = `DEBUG ERROR: Request %s/%s: ----[ REQUEST DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -func logRequest(r *request.Request) { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - if logBody { - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.Body.Seek(r.BodyStart, 0) - r.HTTPRequest.Body = ioutil.NopCloser(r.Body) - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -const logRespErrMsg = `DEBUG ERROR: Response %s/%s: ----[ RESPONSE DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -func logResponse(r *request.Request) { - var msg = "no response data" - if r.HTTPResponse != nil { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, err := httputil.DumpResponse(r.HTTPResponse, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - msg = string(dumpedBody) - } else if r.Error != nil { - msg = r.Error.Error() - } - r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index 43a3676b..a397b0d0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -1,11 +1,11 @@ package client import ( - "math/rand" - "sync" + "strconv" "time" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" ) // DefaultRetryer implements basic retry logic using exponential backoff for @@ -15,11 +15,11 @@ import ( // the MaxRetries method: // // type retryer struct { -// service.DefaultRetryer +// client.DefaultRetryer // } // // // This implementation always has 100 max retries -// func (d retryer) MaxRetries() uint { return 100 } +// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { NumMaxRetries int } @@ -30,31 +30,39 @@ func (d DefaultRetryer) MaxRetries() int { return d.NumMaxRetries } -var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) - // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { // Set the upper limit of delay in retrying at ~five minutes minTime := 30 throttle := d.shouldThrottle(r) if throttle { + if delay, ok := getRetryDelay(r); ok { + return delay + } + minTime = 500 } retryCount := r.RetryCount - if retryCount > 13 { - retryCount = 13 - } else if throttle && retryCount > 8 { + if throttle && retryCount > 8 { retryCount = 8 + } else if retryCount > 13 { + retryCount = 13 } - delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) + delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) return time.Duration(delay) * time.Millisecond } // ShouldRetry returns true if the request should be retried. func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { - if r.HTTPResponse.StatusCode >= 500 { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + + if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { return true } return r.IsErrorRetryable() || d.shouldThrottle(r) @@ -62,29 +70,47 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { // ShouldThrottle returns true if the request should be throttled. func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - if r.HTTPResponse.StatusCode == 502 || - r.HTTPResponse.StatusCode == 503 || - r.HTTPResponse.StatusCode == 504 { - return true + switch r.HTTPResponse.StatusCode { + case 429: + case 502: + case 503: + case 504: + default: + return r.IsErrorThrottle() } - return r.IsErrorThrottle() -} -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source + return true } -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true } -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 00000000..ce9fb896 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,184 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.ResetBody() + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + +func logResponse(r *request.Request) { + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + } + + handlerFn := func(req *request.Request) { + b, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go index 4778056d..920e9fdd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -3,6 +3,7 @@ package metadata // ClientInfo wraps immutable data from the client.Client structure. type ClientInfo struct { ServiceName string + ServiceID string APIVersion string Endpoint string SigningName string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index fca92258..e9695ef2 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -5,6 +5,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" ) // UseServiceDefaultRetries instructs the config to use the service's own @@ -17,13 +18,13 @@ const UseServiceDefaultRetries = -1 type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, -// all clients will use the defaults.DefaultConfig tructure. +// all clients will use the defaults.DefaultConfig structure. // // // Create Session with MaxRetry configuration to be shared by multiple // // service clients. -// sess, err := session.NewSession(&aws.Config{ +// sess := session.Must(session.NewSession(&aws.Config{ // MaxRetries: aws.Int(3), -// }) +// })) // // // Create S3 service client with a specific Region. // svc := s3.New(sess, &aws.Config{ @@ -44,17 +45,28 @@ type Config struct { // that overrides the default generated endpoint for a client. Set this // to `""` to use the default generated endpoint. // - // @note You must still provide a `Region` value when specifying an - // endpoint for a client. + // Note: You must still provide a `Region` value when specifying an + // endpoint for a client. Endpoint *string + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + // The region to send requests to. This parameter is required and must // be configured globally or on a per-client basis unless otherwise // noted. A full list of regions is found in the "Regions and Endpoints" // document. // - // @see http://docs.aws.amazon.com/general/latest/gr/rande.html - // AWS Regions and Endpoints + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS + // Regions and Endpoints. Region *string // Set this to `true` to disable SSL when sending requests. Defaults @@ -83,7 +95,7 @@ type Config struct { // recoverable failures. // // When nil or the value does not implement the request.Retryer interface, - // the request.DefaultRetryer will be used. + // the client.DefaultRetryer will be used. // // When both Retryer and MaxRetries are non-nil, the former is used and // the latter ignored. @@ -108,9 +120,10 @@ type Config struct { // will use virtual hosted bucket addressing when possible // (`http://BUCKET.s3.amazonaws.com/KEY`). // - // @note This configuration option is specific to the Amazon S3 service. - // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html - // Amazon S3: Virtual Hosting of Buckets + // Note: This configuration option is specific to the Amazon S3 service. + // + // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // for Amazon S3: Virtual Hosting of Buckets S3ForcePathStyle *bool // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` @@ -137,11 +150,17 @@ type Config struct { // accelerate enabled. If the bucket is not enabled for accelerate an error // will be returned. The bucket name must be DNS compatible to also work // with accelerate. - // - // Not compatible with UseDualStack requests will fail if both flags are - // specified. S3UseAccelerate *bool + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + // Set this to `true` to disable the EC2Metadata client from overriding the // default http.Client's Timeout. This is helpful if you do not want the // EC2Metadata client to create a new http.Client. This options is only @@ -152,13 +171,14 @@ type Config struct { // the EC2Metadata overriding the timeout for default credentials chain. // // Example: - // sess, err := session.NewSession(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDiableTimeoutOverride(true))) // // svc := s3.New(sess) // EC2MetadataDisableTimeoutOverride *bool - // Instructs the endpiont to be generated for a service client to + // Instructs the endpoint to be generated for a service client to // be the dual stack endpoint. The dual stack endpoint will support // both IPv4 and IPv6 addressing. // @@ -172,7 +192,7 @@ type Config struct { // // Only supported with. // - // sess, err := session.NewSession() + // sess := session.Must(session.NewSession()) // // svc := s3.New(sess, &aws.Config{ // UseDualStack: aws.Bool(true), @@ -184,7 +204,41 @@ type Config struct { // request delays. This value should only be used for testing. To adjust // the delay of a request see the aws/client.DefaultRetryer and // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool + + // EnableEndpointDiscovery will allow for endpoint discovery on operations that + // have the definition in its model. By default, endpoint discovery is off. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // EnableEndpointDiscovery: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("/foo/bar/moo"), + // }) + EnableEndpointDiscovery *bool } // NewConfig returns a new Config pointer that can be chained with builder @@ -192,9 +246,9 @@ type Config struct { // // // Create Session with MaxRetry configuration to be shared by multiple // // service clients. -// sess, err := session.NewSession(aws.NewConfig(). +// sess := session.Must(session.NewSession(aws.NewConfig(). // WithMaxRetries(3), -// ) +// )) // // // Create S3 service client with a specific Region. // svc := s3.New(sess, aws.NewConfig(). @@ -225,6 +279,13 @@ func (c *Config) WithEndpoint(endpoint string) *Config { return c } +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + // WithRegion sets a config Region value returning a Config pointer for // chaining. func (c *Config) WithRegion(region string) *Config { @@ -300,6 +361,15 @@ func (c *Config) WithS3Disable100Continue(disable bool) *Config { func (c *Config) WithS3UseAccelerate(enable bool) *Config { c.S3UseAccelerate = &enable return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + } // WithUseDualStack sets a config UseDualStack value returning a Config @@ -323,6 +393,12 @@ func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { return c } +// WithEndpointDiscovery will set whether or not to use endpoint discovery. +func (c *Config) WithEndpointDiscovery(t bool) *Config { + c.EnableEndpointDiscovery = &t + return c +} + // MergeIn merges the passed in configs into the existing config object. func (c *Config) MergeIn(cfgs ...*Config) { for _, other := range cfgs { @@ -347,6 +423,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.Endpoint = other.Endpoint } + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + if other.Region != nil { dst.Region = other.Region } @@ -395,6 +475,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.S3UseAccelerate = other.S3UseAccelerate } + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + if other.UseDualStack != nil { dst.UseDualStack = other.UseDualStack } @@ -406,6 +490,18 @@ func mergeInConfig(dst *Config, other *Config) { if other.SleepDelay != nil { dst.SleepDelay = other.SleepDelay } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } + + if other.EnableEndpointDiscovery != nil { + dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context.go b/vendor/github.com/aws/aws-sdk-go/aws/context.go new file mode 100644 index 00000000..79f42685 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context.go @@ -0,0 +1,71 @@ +package aws + +import ( + "time" +) + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return backgroundCtx +} + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go new file mode 100644 index 00000000..8fdda530 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go @@ -0,0 +1,41 @@ +// +build !go1.7 + +package aws + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case backgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +var ( + backgroundCtx = new(emptyCtx) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go new file mode 100644 index 00000000..064f75c9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go @@ -0,0 +1,9 @@ +// +build go1.7 + +package aws + +import "context" + +var ( + backgroundCtx = context.Background() +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go index 3b73a7da..ff5d58e0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -311,6 +311,24 @@ func TimeValue(v *time.Time) time.Time { return time.Time{} } +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + // TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". // The result is undefined if the Unix time cannot be represented by an int64. // Which includes calling TimeUnixMilli on a zero Time is undefined. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index 8456e29b..cfcddf3d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -3,16 +3,16 @@ package corehandlers import ( "bytes" "fmt" - "io" "io/ioutil" "net/http" "net/url" "regexp" - "runtime" "strconv" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" ) @@ -25,7 +25,7 @@ type lener interface { // or will use the HTTPRequest.Header's "Content-Length" if defined. If unable // to determine request body length and no "Content-Length" was specified it will panic. // -// The Content-Length will only be aded to the request if the length of the body +// The Content-Length will only be added to the request if the length of the body // is greater than 0. If the body is empty or the current `Content-Length` // header is <= 0, the header will also be stripped. var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { @@ -34,18 +34,13 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { length, _ = strconv.ParseInt(slength, 10, 64) } else { - switch body := r.Body.(type) { - case nil: - length = 0 - case lener: - length = int64(body.Len()) - case io.Seeker: - r.BodyStart, _ = body.Seek(0, 1) - end, _ := body.Seek(0, 2) - body.Seek(r.BodyStart, 0) // make sure to seek back to original location - length = end - r.BodyStart - default: - panic("Cannot get length of body, must provide `ContentLength`") + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } } } @@ -58,54 +53,126 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen } }} -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - var reStatusCode = regexp.MustCompile(`^(\d{3})`) +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 10 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(10 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + // SendHandler is a request handler to send service request using HTTP client. -var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { - var err error - r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) - if err != nil { - // Prevent leaking if an HTTPResponse was returned. Clean up - // the body. - if r.HTTPResponse != nil { - r.HTTPResponse.Body.Close() +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects } - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other url redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok && e.Err != nil { - if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) } - if r.HTTPResponse == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), + StatusCode: int(code), + Status: http.StatusText(int(code)), Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } + return } - // Catch all other request errors. - r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable } -}} + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} // ValidateResponseHandler is a request handler to validate service response. var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { @@ -120,13 +187,22 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { // If one of the other handlers already set the retry state // we don't want to override it based on the service's state - if r.Retryable == nil { + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { r.Retryable = aws.Bool(r.ShouldRetry(r)) } if r.WillRetry() { r.RetryDelay = r.RetryRules(r) - r.Config.SleepDelay(r.RetryDelay) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } // when the expired token exception occurs the credentials // need to be expired locally so that the next request to diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 00000000..a15f496b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec_env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go index 857311f6..3ad1e798 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -9,11 +9,9 @@ var ( // providers in the ChainProvider. // // This has been deprecated. For verbose error messaging set - // aws.Config.CredentialsChainVerboseErrors to true - // - // @readonly + // aws.Config.CredentialsChainVerboseErrors to true. ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", - `no valid providers in chain. Deprecated. + `no valid providers in chain. Deprecated. For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, nil) ) @@ -34,21 +32,23 @@ var ( // // Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. // In this example EnvProvider will first check if any credentials are available -// vai the environment variables. If there are none ChainProvider will check +// via the environment variables. If there are none ChainProvider will check // the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider // does not return any credentials ChainProvider will return the error // ErrNoValidProvidersFoundInChain // -// creds := NewChainCredentials( -// []Provider{ -// &EnvProvider{}, -// &EC2RoleProvider{ +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ // Client: ec2metadata.New(sess), // }, // }) // // // Usage of ChainCredentials with aws.Config -// svc := ec2.New(&aws.Config{Credentials: creds}) +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) // type ChainProvider struct { Providers []Provider diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 7b8ebf5f..dc82f4c3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -14,7 +14,7 @@ // // Example of using the environment variable credentials. // -// creds := NewEnvCredentials() +// creds := credentials.NewEnvCredentials() // // // Retrieve the credentials value // credValue, err := creds.Get() @@ -26,7 +26,7 @@ // This may be helpful to proactively expire credentials and refresh them sooner // than they would naturally expire on their own. // -// creds := NewCredentials(&EC2RoleProvider{}) +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) // creds.Expire() // credsValue, err := creds.Get() // // New credentials will be retrieved instead of from cache. @@ -43,7 +43,7 @@ // func (m *MyProvider) Retrieve() (Value, error) {...} // func (m *MyProvider) IsExpired() bool {...} // -// creds := NewCredentials(&MyProvider{}) +// creds := credentials.NewCredentials(&MyProvider{}) // credValue, err := creds.Get() // package credentials @@ -60,10 +60,10 @@ import ( // when making service API calls. For example, when accessing public // s3 buckets. // -// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) // // Access public S3 buckets. -// -// @readonly var AnonymousCredentials = NewStaticCredentials("", "", "") // A Value is the AWS credentials value for individual credential fields. @@ -88,7 +88,7 @@ type Value struct { // The Provider should not need to implement its own mutexes, because // that will be managed by Credentials. type Provider interface { - // Refresh returns nil if it successfully retrieved the value. + // Retrieve returns nil if it successfully retrieved the value. // Error is returned if the value were not obtainable, or empty. Retrieve() (Value, error) @@ -97,6 +97,27 @@ type Provider interface { IsExpired() bool } +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + // A Expiry provides shared expiration logic to be used by credentials // providers to implement expiry functionality. // @@ -135,13 +156,14 @@ func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { // IsExpired returns if the credentials are expired. func (e *Expiry) IsExpired() bool { - if e.CurrentTime == nil { - e.CurrentTime = time.Now + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now } - return e.expiration.Before(e.CurrentTime()) + return e.expiration.Before(curTime()) } -// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// A Credentials provides concurrency safe retrieval of AWS credentials Value. // Credentials will cache the credentials value until they expire. Once the value // expires the next Get will attempt to retrieve valid credentials. // @@ -155,7 +177,8 @@ func (e *Expiry) IsExpired() bool { type Credentials struct { creds Value forceRefresh bool - m sync.Mutex + + m sync.RWMutex provider Provider } @@ -178,6 +201,17 @@ func NewCredentials(provider Provider) *Credentials { // If Credentials.Expire() was called the credentials Value will be force // expired, and the next call to Get() will cause them to be refreshed. func (c *Credentials) Get() (Value, error) { + // Check the cached credentials first with just the read lock. + c.m.RLock() + if !c.isExpired() { + creds := c.creds + c.m.RUnlock() + return creds, nil + } + c.m.RUnlock() + + // Credentials are expired need to retrieve the credentials taking the full + // lock. c.m.Lock() defer c.m.Unlock() @@ -211,8 +245,8 @@ func (c *Credentials) Expire() { // If the Credentials were forced to be expired with Expire() this will // reflect that override. func (c *Credentials) IsExpired() bool { - c.m.Lock() - defer c.m.Unlock() + c.m.RLock() + defer c.m.RUnlock() return c.isExpired() } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index aa9d689a..0ed791be 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -4,7 +4,6 @@ import ( "bufio" "encoding/json" "fmt" - "path" "strings" "time" @@ -12,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/internal/sdkuri" ) // ProviderName provides a name of EC2Role provider @@ -111,7 +111,7 @@ func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { }, nil } -// A ec2RoleCredRespBody provides the shape for unmarshalling credential +// A ec2RoleCredRespBody provides the shape for unmarshaling credential // request responses. type ec2RoleCredRespBody struct { // Success State @@ -125,7 +125,7 @@ type ec2RoleCredRespBody struct { Message string } -const iamSecurityCredsPath = "/iam/security-credentials" +const iamSecurityCredsPath = "iam/security-credentials/" // requestCredList requests a list of credentials from the EC2 service. // If there are no credentials, or there is an error making or receiving the request @@ -153,7 +153,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { // If the credentials cannot be found, or there is an error reading the response // and error will be returned. func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) if err != nil { return ec2RoleCredRespBody{}, awserr.New("EC2RoleRequestError", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index a4cec5c5..ace51313 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -65,6 +65,10 @@ type Provider struct { // // If ExpiryWindow is 0 or less it will be ignored. ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string } // NewProviderClient returns a credentials Provider for retrieving AWS credentials @@ -152,6 +156,9 @@ func (p *Provider) getCredentials() (*getCredentialsOutput, error) { out := &getCredentialsOutput{} req := p.Client.NewRequest(op, nil, out) req.HTTPRequest.Header.Set("Accept", "application/json") + if authToken := p.AuthorizationToken; len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } return out, req.Send() } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go index 96655bc4..54c5cf73 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -12,14 +12,10 @@ const EnvProviderName = "EnvProvider" var ( // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be // found in the process's environment. - // - // @readonly ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key // can't be found in the process's environment. - // - // @readonly ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) ) @@ -29,6 +25,7 @@ var ( // Environment variables used: // // * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// // * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY type EnvProvider struct { retrieved bool diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go index 7fb7cbf0..e1551495 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -3,11 +3,10 @@ package credentials import ( "fmt" "os" - "path/filepath" - - "github.com/go-ini/ini" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/ini" + "github.com/aws/aws-sdk-go/internal/shareddefaults" ) // SharedCredsProviderName provides a name of SharedCreds provider @@ -15,8 +14,6 @@ const SharedCredsProviderName = "SharedCredentialsProvider" var ( // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. - // - // @readonly ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) ) @@ -79,36 +76,37 @@ func (p *SharedCredentialsProvider) IsExpired() bool { // The credentials retrieved from the profile will be returned or error. Error will be // returned if it fails to read from the file, or the data is invalid. func loadProfile(filename, profile string) (Value, error) { - config, err := ini.Load(filename) + config, err := ini.OpenFile(filename) if err != nil { return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) } - iniProfile, err := config.GetSection(profile) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + + iniProfile, ok := config.GetSection(profile) + if !ok { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) } - id, err := iniProfile.GetKey("aws_access_key_id") - if err != nil { + id := iniProfile.String("aws_access_key_id") + if len(id) == 0 { return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), - err) + nil) } - secret, err := iniProfile.GetKey("aws_secret_access_key") - if err != nil { + secret := iniProfile.String("aws_secret_access_key") + if len(secret) == 0 { return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), nil) } // Default to empty string if not found - token := iniProfile.Key("aws_session_token") + token := iniProfile.String("aws_session_token") return Value{ - AccessKeyID: id.String(), - SecretAccessKey: secret.String(), - SessionToken: token.String(), + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, ProviderName: SharedCredsProviderName, }, nil } @@ -117,22 +115,23 @@ func loadProfile(filename, profile string) (Value, error) { // // Will return an error if the user's home directory path cannot be found. func (p *SharedCredentialsProvider) filename() (string, error) { - if p.Filename == "" { - if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { - return p.Filename, nil - } - - homeDir := os.Getenv("HOME") // *nix - if homeDir == "" { // Windows - homeDir = os.Getenv("USERPROFILE") - } - if homeDir == "" { - return "", ErrSharedCredentialsHomeNotFound - } - - p.Filename = filepath.Join(homeDir, ".aws", "credentials") + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil } + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + return p.Filename, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go index 4f5dab3f..531139e3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -9,8 +9,6 @@ const StaticProviderName = "StaticProvider" var ( // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - // - // @readonly ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) ) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 30c847ae..4108e433 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -1,7 +1,81 @@ -// Package stscreds are credential Providers to retrieve STS AWS credentials. -// -// STS provides multiple ways to retrieve credentials which can be used when making -// future AWS service API operation calls. +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ package stscreds import ( @@ -9,11 +83,31 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/service/sts" ) +// StdinTokenProvider will prompt on stdout and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Printf("Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + // ProviderName provides a name of AssumeRole provider const ProviderName = "AssumeRoleProvider" @@ -27,8 +121,15 @@ type AssumeRoler interface { var DefaultDuration = time.Duration(15) * time.Minute // AssumeRoleProvider retrieves temporary credentials from the STS service, and -// keeps track of their expiration time. This provider must be used explicitly, -// as it is not included in the credentials chain. +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. type AssumeRoleProvider struct { credentials.Expiry @@ -65,8 +166,23 @@ type AssumeRoleProvider struct { // assumed requires MFA (that is, if the policy includes a condition that tests // for MFA). If the role being assumed requires MFA and if the TokenCode value // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. TokenCode *string + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + // ExpiryWindow will allow the credentials to trigger refreshing prior to // the credentials actually expiring. This is beneficial so race conditions // with expiring credentials do not cause request to fail unexpectedly @@ -85,6 +201,10 @@ type AssumeRoleProvider struct { // // Takes a Config provider to create the STS client. The ConfigProvider is // satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { p := &AssumeRoleProvider{ Client: sts.New(c), @@ -103,7 +223,11 @@ func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*As // AssumeRoleProvider. The credentials will expire every 15 minutes and the // role will be named after a nanosecond timestamp of this operation. // -// Takes an AssumeRoler which can be satisfiede by the STS client. +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { p := &AssumeRoleProvider{ Client: svc, @@ -139,12 +263,25 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { if p.Policy != nil { input.Policy = p.Policy } - if p.SerialNumber != nil && p.TokenCode != nil { - input.SerialNumber = p.SerialNumber - input.TokenCode = p.TokenCode + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } } - roleOutput, err := p.Client.AssumeRole(input) + roleOutput, err := p.Client.AssumeRole(input) if err != nil { return credentials.Value{ProviderName: ProviderName}, err } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 00000000..152d785b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,46 @@ +// Package csm provides Client Side Monitoring (CSM) which enables sending metrics +// via UDP connection. Using the Start function will enable the reporting of +// metrics on a given port. If Start is called, with different parameters, again, +// a panic will occur. +// +// Pause can be called to pause any metrics publishing on a given port. Sessions +// that have had their handlers modified via InjectHandlers may still be used. +// However, the handlers will act as a no-op meaning no metrics will be published. +// +// Example: +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// r.InjectHandlers(&sess.Handlers) +// +// client := s3.New(sess) +// resp, err := client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +// +// Start returns a Reporter that is used to enable or disable monitoring. If +// access to the Reporter is required later, calling Get will return the Reporter +// singleton. +// +// Example: +// r := csm.Get() +// r.Continue() +package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 00000000..2f0c6eac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,67 @@ +package csm + +import ( + "fmt" + "sync" +) + +var ( + lock sync.Mutex +) + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// Start will start the a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// Example: +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 00000000..6f57024d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,53 @@ +package csm + +import ( + "strconv" + "time" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 00000000..514fc373 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,54 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(&ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(&ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(&ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 00000000..11861844 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,242 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // DefaultPort is used when no port is specified + DefaultPort = "31000" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptCount: aws.Int(r.RetryCount + 1), + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + setError(&m, awserr) + } + } + + rep.metricsCh.Push(m) +} + +func setError(m *metric, err awserr.Error) { + msg := err.Error() + code := err.Code() + + switch code { + case "RequestError", + "SerializationError", + request.CanceledErrorCode: + m.SDKException = &code + m.SDKExceptionMessage = &msg + default: + m.AWSException = &code + m.AWSExceptionMessage = &msg + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from +// being added. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring +// to be resumed. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// Example: +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + apiCallHandler := request.NamedHandler{Name: APICallMetricHandlerName, Fn: rep.sendAPICallMetric} + apiCallAttemptHandler := request.NamedHandler{Name: APICallAttemptMetricHandlerName, Fn: rep.sendAPICallAttemptMetric} + + handlers.Complete.PushFrontNamed(apiCallHandler) + handlers.Complete.PushFrontNamed(apiCallAttemptHandler) + + handlers.AfterRetry.PushFrontNamed(apiCallAttemptHandler) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 10b7d864..23bb639e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -9,18 +9,22 @@ package defaults import ( "fmt" + "net" "net/http" + "net/url" "os" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/endpoints" + "github.com/aws/aws-sdk-go/internal/shareddefaults" ) // A Defaults provides a collection of default values for SDK clients. @@ -56,7 +60,7 @@ func Config() *aws.Config { WithMaxRetries(aws.UseServiceDefaultRetries). WithLogger(aws.NewDefaultLogger()). WithLogLevel(aws.LogOff). - WithSleepDelay(time.Sleep) + WithEndpointResolver(endpoints.DefaultResolver()) } // Handlers returns the default request handlers. @@ -70,8 +74,10 @@ func Handlers() request.Handlers { handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) handlers.Validate.AfterEachFn = request.HandlerListStopOnError handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) handlers.Build.AfterEachFn = request.HandlerListStopOnError handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) handlers.Send.PushBackNamed(corehandlers.SendHandler) handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) @@ -87,43 +93,115 @@ func Handlers() request.Handlers { func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { return credentials.NewCredentials(&credentials.ChainProvider{ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - RemoteCredProvider(*cfg, handlers), - }, + Providers: CredProviders(cfg, handlers), }) } -// RemoteCredProvider returns a credenitials provider for the default remote +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + +const ( + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote // endpoints such as EC2 or ECS Roles. func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } - if len(ecsCredURI) > 0 { - return ecsCredProvider(cfg, handlers, ecsCredURI) + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) + return httpCredProvider(cfg, handlers, u) } return ec2RoleProvider(cfg, handlers) } -func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider { - const host = `169.254.170.2` +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } + } - return endpointcreds.NewProviderClient(cfg, handlers, - fmt.Sprintf("http://%s%s", host, uri), + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) }, ) } func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, - aws.StringValue(cfg.Region), true, false) + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") return &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.NewClient(cfg, handlers, endpoint, signingRegion), + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), ExpiryWindow: 5 * time.Minute, } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 00000000..ca0ee1dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 00000000..4fcb6161 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index 669c813a..c215cd3f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -3,12 +3,13 @@ package ec2metadata import ( "encoding/json" "fmt" - "path" + "net/http" "strings" "time" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" ) // GetMetadata uses the path provided to request information from the EC2 @@ -18,7 +19,7 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { op := &request.Operation{ Name: "GetMetadata", HTTPMethod: "GET", - HTTPPath: path.Join("/", "meta-data", p), + HTTPPath: sdkuri.PathJoin("/meta-data", p), } output := &metadataOutput{} @@ -27,6 +28,27 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { return output.Content, req.Send() } +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: "/user-data", + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { + if r.HTTPResponse.StatusCode == http.StatusNotFound { + r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) + } + }) + + return output.Content, req.Send() +} + // GetDynamicData uses the path provided to request information from the EC2 // instance metadata service for dynamic data. The content will be returned // as a string, or error if the request failed. @@ -34,7 +56,7 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { op := &request.Operation{ Name: "GetDynamicData", HTTPMethod: "GET", - HTTPPath: path.Join("/", "dynamic", p), + HTTPPath: sdkuri.PathJoin("/dynamic", p), } output := &metadataOutput{} @@ -111,7 +133,7 @@ func (c *EC2Metadata) Available() bool { return true } -// An EC2IAMInfo provides the shape for unmarshalling +// An EC2IAMInfo provides the shape for unmarshaling // an IAM info from the metadata API type EC2IAMInfo struct { Code string @@ -120,7 +142,7 @@ type EC2IAMInfo struct { InstanceProfileID string } -// An EC2InstanceIdentityDocument provides the shape for unmarshalling +// An EC2InstanceIdentityDocument provides the shape for unmarshaling // an instance identity document type EC2InstanceIdentityDocument struct { DevpayProductCodes []string `json:"devpayProductCodes"` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index 5b4379db..53457cac 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -1,5 +1,10 @@ // Package ec2metadata provides the client for making API calls to the // EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environemnt variable is set to true, (case insensitive). package ec2metadata import ( @@ -7,17 +12,21 @@ import ( "errors" "io" "net/http" + "os" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/request" ) // ServiceName is the name of the service. const ServiceName = "ec2metadata" +const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" // A EC2Metadata is an EC2 Metadata service Client. type EC2Metadata struct { @@ -63,6 +72,7 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceName, Endpoint: endpoint, APIVersion: "latest", }, @@ -75,6 +85,21 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio svc.Handlers.Validate.Clear() svc.Handlers.Validate.PushBack(validateEndpointHandler) + // Disable the EC2 Metadata service if the environment variable is set. + // This shortcirctes the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + // Add additional options to the service config for _, option := range opts { option(svc.Client) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 00000000..c04ba06c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,155 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custAddEC2Metadata(p) + custAddS3DualStack(p) + custRmIotDataService(p) + custFixAppAutoscalingChina(p) + } + + return ps, nil +} + +func custAddS3DualStack(p *partition) { + if p.ID != "aws" { + return + } + + s, ok := p.Services["s3"] + if !ok { + return + } + + s.Defaults.HasDualStack = boxedTrue + s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" + + p.Services["s3"] = s +} + +func custAddEC2Metadata(p *partition) { + p.Services["ec2metadata"] = service{ + IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + } +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + if e, a := s.Defaults.Hostname, expectHostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + + s.Defaults.Hostname = expectHostname + ".cn" + p.Services[serviceName] = s +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 00000000..2b2d0081 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,3487 @@ +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. +) + +// AWS Standard partition's regions. +const ( + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). + EuWest1RegionID = "eu-west-1" // EU (Ireland). + EuWest2RegionID = "eu-west-2" // EU (London). + EuWest3RegionID = "eu-west-3" // EU (Paris). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). +) + +// Service identifiers +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). +// +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). +// +// partitions := endpoints.DefaultPartitions +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "eu-central-1": region{ + Description: "EU (Frankfurt)", + }, + "eu-west-1": region{ + Description: "EU (Ireland)", + }, + "eu-west-2": region{ + Description: "EU (London)", + }, + "eu-west-3": region{ + Description: "EU (Paris)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "a4b": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "acm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "api.mediatailor": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "api.pricing": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appsync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "autoscaling-plans", + }, + }, + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudsearch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-sync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "devicefarm": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "discovery": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecr": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glue": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mgh": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "s3": service{ + PartitionEndpoint: "us-east-1", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "Shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Defaults: endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{ + Hostname: "sts.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "support": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-west-1": region{ + Description: "AWS GovCloud (US)", + }, + }, + Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecr": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips-us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 00000000..84316b92 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,66 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 00000000..e29c0951 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,449 @@ +package endpoints + +import ( + "fmt" + "regexp" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + UseDualStack bool + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unkonwn and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !ok { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id string + p *partition +} + +// ID returns the identifier of the partition. +func (p Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned my look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// * UnknownServiceError +// * UnknownEndpointError +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p Partition) Regions() map[string]Region { + rs := map[string]Region{} + for id, r := range p.p.Regions { + rs[id] = Region{ + id: id, + desc: r.Description, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p Partition) Services() map[string]Service { + ss := map[string]Service{} + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r Region) ID() string { return r.id } + +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[r.id]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + for id := range s.p.Services[s.id].Endpoints { + if r, ok := s.p.Regions[id]; ok { + rs[id] = Region{ + id: id, + desc: r.Description, + p: s.p, + } + } + } + + return rs +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { + es := map[string]Endpoint{} + for id := range s.p.Services[s.id].Endpoints { + es[id] = Endpoint{ + id: id, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 00000000..ff6f76db --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,307 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpoint `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[region] + + if hasEndpoint && hasService { + return true + } + + if strictMatch { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + s, hasService := p.Services[service] + if !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + e, hasEndpoint := s.endpointForRegion(region) + if !hasEndpoint && opt.StrictMatching { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + } + + defs := []endpoint{p.Defaults, s.Defaults} + return e.resolve(service, region, p.DNSSuffix, defs, opt), nil +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es endpoints) []string { + list := make([]string, 0, len(es)) + for k := range es { + list = append(list, k) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpoint `json:"defaults"` + Endpoints endpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string) (endpoint, bool) { + if s.IsRegionalized == boxedFalse { + return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + } + + if e, ok := s.Endpoints[region]; ok { + return e, true + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type endpoints map[string]endpoint + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + // Custom fields not modeled + HasDualStack boxedBool `json:"-"` + DualStackHostname string `json:"-"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + hostname := e.Hostname + + // Offset the hostname for dualstack if enabled + if opts.UseDualStack && e.HasDualStack == boxedTrue { + hostname = e.DualStackHostname + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + return ResolvedEndpoint{ + URL: u, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + } +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if other.HasDualStack != boxedBoolUnset { + e.HasDualStack = other.HasDualStack + } + if len(other.DualStackHostname) > 0 { + e.DualStackHostname = other.DualStackHostname + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 00000000..05e92df2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,337 @@ +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" . }} + + {{ range $_, $partition := . }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ template "service consts" . }} + + {{ template "endpoint resolvers" . }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. + // + // partitions := endpoints.DefaultPartitions + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults }}, + {{- end }} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults -}}, + {{- end }} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +endpoints{ + {{ range $id, $endpoint := . -}} + "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} + {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} + +} +{{- end }} +` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go index 57663616..fa06f7a8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -5,13 +5,9 @@ import "github.com/aws/aws-sdk-go/aws/awserr" var ( // ErrMissingRegion is an error that is returned if region configuration is // not found. - // - // @readonly ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) // ErrMissingEndpoint is an error that is returned if an endpoint cannot be // resolved for a service. - // - // @readonly ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) ) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 00000000..91a6f277 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go index db87188e..6ed15b2e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -26,14 +26,14 @@ func (l *LogLevelType) Value() LogLevelType { // Matches returns true if the v LogLevel is enabled by this LogLevel. Should be // used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nill, will default to LogOff comparison. +// LogLevel is nil, will default to LogOff comparison. func (l *LogLevelType) Matches(v LogLevelType) bool { c := l.Value() return c&v == v } // AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default // to LogOff comparison. func (l *LogLevelType) AtLeast(v LogLevelType) bool { c := l.Value() @@ -71,6 +71,12 @@ const ( // LogDebugWithRequestErrors states the SDK should log when service requests fail // to build, send, validate, or unmarshal. LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody ) // A Logger is a minimalistic interface for the SDK to log messages to. Should diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 00000000..271da432 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,19 @@ +// +build !appengine,!plan9 + +package request + +import ( + "net" + "os" + "syscall" +) + +func isErrConnectionReset(err error) bool { + if opErr, ok := err.(*net.OpError); ok { + if sysErr, ok := opErr.Err.(*os.SyscallError); ok { + return sysErr.Err == syscall.ECONNRESET + } + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go new file mode 100644 index 00000000..daf9eca4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go @@ -0,0 +1,11 @@ +// +build appengine plan9 + +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + return strings.Contains(err.Error(), "connection reset") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 5279c19c..605a72d3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -14,10 +14,12 @@ type Handlers struct { Send HandlerList ValidateResponse HandlerList Unmarshal HandlerList + UnmarshalStream HandlerList UnmarshalMeta HandlerList UnmarshalError HandlerList Retry HandlerList AfterRetry HandlerList + Complete HandlerList } // Copy returns of this handler's lists. @@ -29,10 +31,12 @@ func (h *Handlers) Copy() Handlers { Send: h.Send.copy(), ValidateResponse: h.ValidateResponse.copy(), Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), UnmarshalError: h.UnmarshalError.copy(), UnmarshalMeta: h.UnmarshalMeta.copy(), Retry: h.Retry.copy(), AfterRetry: h.AfterRetry.copy(), + Complete: h.Complete.copy(), } } @@ -43,11 +47,13 @@ func (h *Handlers) Clear() { h.Send.Clear() h.Sign.Clear() h.Unmarshal.Clear() + h.UnmarshalStream.Clear() h.UnmarshalMeta.Clear() h.UnmarshalError.Clear() h.ValidateResponse.Clear() h.Retry.Clear() h.AfterRetry.Clear() + h.Complete.Clear() } // A HandlerListRunItem represents an entry in the HandlerList which @@ -85,13 +91,17 @@ func (l *HandlerList) copy() HandlerList { n := HandlerList{ AfterEachFn: l.AfterEachFn, } - n.list = append([]NamedHandler{}, l.list...) + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) return n } // Clear clears the handler list. func (l *HandlerList) Clear() { - l.list = []NamedHandler{} + l.list = l.list[0:0] } // Len returns the number of handlers in the list. @@ -101,33 +111,100 @@ func (l *HandlerList) Len() int { // PushBack pushes handler f to the back of the handler list. func (l *HandlerList) PushBack(f func(*Request)) { - l.list = append(l.list, NamedHandler{"__anonymous", f}) -} - -// PushFront pushes handler f to the front of the handler list. -func (l *HandlerList) PushFront(f func(*Request)) { - l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) + l.PushBackNamed(NamedHandler{"__anonymous", f}) } // PushBackNamed pushes named handler f to the back of the handler list. func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } l.list = append(l.list, n) } +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + // PushFrontNamed pushes named handler f to the front of the handler list. func (l *HandlerList) PushFrontNamed(n NamedHandler) { - l.list = append([]NamedHandler{n}, l.list...) + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } } // Remove removes a NamedHandler n func (l *HandlerList) Remove(n NamedHandler) { - newlist := []NamedHandler{} - for _, m := range l.list { - if m.Name != n.Name { - newlist = append(newlist, m) + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true } } - l.list = newlist + + return swapped +} + +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } } // Run executes all handlers in the list with a given request object. @@ -163,6 +240,16 @@ func HandlerListStopOnError(item HandlerListRunItem) bool { return item.Request.Error == nil } +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + // MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request // header. If the extra parameters are provided they will be added as metadata to the // name/version pair resulting in the following format. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go index a4087f20..79f79602 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -1,5 +1,3 @@ -// +build go1.5 - package request import ( @@ -9,20 +7,13 @@ import ( ) func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := &http.Request{ - URL: &url.URL{}, - Header: http.Header{}, - Close: r.Close, - Body: body, - Host: r.Host, - Method: r.Method, - Proto: r.Proto, - ContentLength: r.ContentLength, - // Cancel will be deprecated in 1.7 and will be replaced with Context - Cancel: r.Cancel, - } - + req := new(http.Request) + *req = *r + req.URL = &url.URL{} *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} for k, v := range r.Header { for _, vv := range v { req.Header.Add(k, vv) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go deleted file mode 100644 index 75da021e..00000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build !go1.5 - -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := &http.Request{ - URL: &url.URL{}, - Header: http.Header{}, - Close: r.Close, - Body: body, - Host: r.Host, - Method: r.Method, - Proto: r.Proto, - ContentLength: r.ContentLength, - } - - *req.URL = *r.URL - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go index da6396d2..b0c2ef4f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -3,25 +3,28 @@ package request import ( "io" "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" ) // offsetReader is a thread-safe io.ReadCloser to prevent racing // with retrying requests type offsetReader struct { buf io.ReadSeeker - lock sync.RWMutex + lock sync.Mutex closed bool } func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { reader := &offsetReader{} - buf.Seek(offset, 0) + buf.Seek(offset, sdkio.SeekStart) reader.buf = buf return reader } -// Close is a thread-safe close. Uses the write lock. +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. func (o *offsetReader) Close() error { o.lock.Lock() defer o.lock.Unlock() @@ -29,10 +32,10 @@ func (o *offsetReader) Close() error { return nil } -// Read is a thread-safe read using a read lock. +// Read is a thread-safe read of the underlying io.ReadSeeker func (o *offsetReader) Read(p []byte) (int, error) { - o.lock.RLock() - defer o.lock.RUnlock() + o.lock.Lock() + defer o.lock.Unlock() if o.closed { return 0, io.EOF @@ -41,6 +44,14 @@ func (o *offsetReader) Read(p []byte) (int, error) { return o.buf.Read(p) } +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + // CloseAndCopy will return a new offsetReader with a copy of the old buffer // and close the old buffer. func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 2832aaa4..63e7f71c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -4,7 +4,7 @@ import ( "bytes" "fmt" "io" - "io/ioutil" + "net" "net/http" "net/url" "reflect" @@ -14,6 +14,29 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" ) // A Request is the service request to be made. @@ -23,25 +46,39 @@ type Request struct { Handlers Handlers Retryer - Time time.Time - ExpireTime time.Duration - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration - NotHoist bool - SignedHeaderVals http.Header - LastSignedAt time.Time + AttemptTime time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + + context aws.Context built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader } // An Operation is the service API operation to be made. @@ -50,14 +87,8 @@ type Operation struct { HTTPMethod string HTTPPath string *Paginator -} -// Paginator keeps track of pagination configuration for an API operation. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string + BeforePresignFn func(r *Request) error } // New returns a new Request pointer for the service API @@ -83,12 +114,15 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) } + SanitizeHostForHeader(httpReq) + r := &Request{ Config: cfg, ClientInfo: clientInfo, Handlers: handlers.Copy(), Retryer: retryer, + AttemptTime: time.Now(), Time: time.Now(), ExpireTime: 0, Operation: operation, @@ -103,8 +137,99 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, return r } +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + // WillRetry returns if the request's can be retried. func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() } @@ -135,31 +260,75 @@ func (r *Request) SetStringBody(s string) { // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { - r.HTTPRequest.Body = newOffsetReader(reader, 0) r.Body = reader + r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. + r.ResetBody() } // Presign returns the request's signed URL. Error will be returned -// if the signing fails. -func (r *Request) Presign(expireTime time.Duration) (string, error) { - r.ExpireTime = expireTime +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expriation +// time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. r.NotHoist = false - r.Sign() - if r.Error != nil { - return "", r.Error - } - return r.HTTPRequest.URL.String(), nil + + u, _, err := getPresignedURL(r, expire) + return u, err } -// PresignRequest behaves just like presign, but hoists all headers and signs them. -// Also returns the signed hash back to the user -func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { - r.ExpireTime = expireTime - r.NotHoist = true - r.Sign() - if r.Error != nil { - return "", nil, r.Error +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. The expire parameter is only used for +// presigned Amazon S3 API requests. All other AWS services will use a fixed +// expriation time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + + if r.Operation.BeforePresignFn != nil { + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err + } + } + + if err := r.Sign(); err != nil { + return "", nil, err } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil } @@ -179,7 +348,7 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) { // Build will build the request's object so it can be signed and sent // to the service. Build will also validate all the request's parameters. -// Anny additional build Handlers set on this request will be run +// Any additional build Handlers set on this request will be run // in the order they were set. // // The request will only be built once. Multiple calls to build will have @@ -205,9 +374,9 @@ func (r *Request) Build() error { return r.Error } -// Sign will sign the request returning error if errors are encountered. +// Sign will sign the request, returning error if errors are encountered. // -// Send will build the request prior to signing. All Sign Handlers will +// Sign will build the request prior to signing. All Sign Handlers will // be executed in the order they were set. func (r *Request) Sign() error { r.Build() @@ -220,7 +389,64 @@ func (r *Request) Sign() error { return r.Error } -// Send will send the request returning error if errors are encountered. +func (r *Request) getNextRequestBody() (io.ReadCloser, error) { + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody = newOffsetReader(r.Body, r.BodyStart) + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := aws.SeekerLen(r.Body) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) + } + + var body io.ReadCloser + if l == 0 { + body = NoBody + } else if l > 0 { + body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + body = NoBody + default: + body = r.safeBody + } + } + + return body, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request, returning error if errors are encountered. // // Send will sign the request prior to sending. All Send Handlers will // be executed in the order they were set. @@ -231,29 +457,32 @@ func (r *Request) Sign() error { // // readLoop() and getConn(req *Request, cm connectMethod) // https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. func (r *Request) Send() error { + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + for { + r.AttemptTime = time.Now() if aws.BoolValue(r.Retryable) { if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) } - var body io.ReadCloser - if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok { - body = reader.CloseAndCopy(r.BodyStart) - } else { - if r.Config.Logger != nil { - r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions") - } - r.Body.Seek(r.BodyStart, 0) - body = ioutil.NopCloser(r.Body) - } + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body) + // Closing response body to ensure that no response body is leaked + // between retry attempts. if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - // Closing response body. Since we are setting a new request to send off, this - // response will get squashed and leaked. r.HTTPResponse.Body.Close() } } @@ -267,7 +496,7 @@ func (r *Request) Send() error { r.Handlers.Send.Run(r) if r.Error != nil { - if strings.Contains(r.Error.Error(), "net/http: request canceled") { + if !shouldRetryCancel(r) { return r.Error } @@ -275,22 +504,22 @@ func (r *Request) Send() error { r.Handlers.Retry.Run(r) r.Handlers.AfterRetry.Run(r) if r.Error != nil { - debugLogReqError(r, "Send Request", false, r.Error) + debugLogReqError(r, "Send Request", false, err) return r.Error } debugLogReqError(r, "Send Request", true, err) continue } - r.Handlers.UnmarshalMeta.Run(r) r.Handlers.ValidateResponse.Run(r) if r.Error != nil { - err := r.Error r.Handlers.UnmarshalError.Run(r) + err := r.Error + r.Handlers.Retry.Run(r) r.Handlers.AfterRetry.Run(r) if r.Error != nil { - debugLogReqError(r, "Validate Response", false, r.Error) + debugLogReqError(r, "Validate Response", false, err) return r.Error } debugLogReqError(r, "Validate Response", true, err) @@ -303,7 +532,7 @@ func (r *Request) Send() error { r.Handlers.Retry.Run(r) r.Handlers.AfterRetry.Run(r) if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", false, r.Error) + debugLogReqError(r, "Unmarshal Response", false, err) return r.Error } debugLogReqError(r, "Unmarshal Response", true, err) @@ -316,6 +545,17 @@ func (r *Request) Send() error { return nil } +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + // AddToUserAgent adds the string to the end of the request's current user agent. func AddToUserAgent(r *Request, s string) { curUA := r.HTTPRequest.Header.Get("User-Agent") @@ -324,3 +564,98 @@ func AddToUserAgent(r *Request, s string) { } r.HTTPRequest.Header.Set("User-Agent", s) } + +func shouldRetryCancel(r *Request) bool { + awsErr, ok := r.Error.(awserr.Error) + timeoutErr := false + errStr := r.Error.Error() + if ok { + if awsErr.Code() == CanceledErrorCode { + return false + } + err := awsErr.OrigErr() + netErr, netOK := err.(net.Error) + timeoutErr = netOK && netErr.Temporary() + if urlErr, ok := err.(*url.Error); !timeoutErr && ok { + errStr = urlErr.Err.Error() + } + } + + // There can be two types of canceled errors here. + // The first being a net.Error and the other being an error. + // If the request was timed out, we want to continue the retry + // process. Otherwise, return the canceled error. + return timeoutErr || + (errStr != "net/http: request canceled" && + errStr != "net/http: request canceled while waiting for connection") + +} + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 00000000..e36e468b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,39 @@ +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// NoBody is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 00000000..7c6a8000 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,33 @@ +// +build go1.8 + +package request + +import ( + "net/http" +) + +// NoBody is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 00000000..a7365cd1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 00000000..307fa070 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index 2939ec47..a633ed5a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -2,29 +2,138 @@ package request import ( "reflect" + "sync/atomic" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" ) -//type Paginater interface { -// HasNextPage() bool -// NextPage() *Request -// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error -//} +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// cont := true +// for p.Next() && cont { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// } +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool -// HasNextPage returns true if this request has more pages of data available. -func (r *Request) HasNextPage() bool { - return len(r.nextPageTokens()) > 0 + started bool + prevTokens []interface{} + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err } -// nextPageTokens returns the tokens to use when asking for the next page of -// data. +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.prevTokens = p.nextTokens + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. func (r *Request) nextPageTokens() []interface{} { if r.Operation.Paginator == nil { return nil } - if r.Operation.TruncationToken != "" { tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) if len(tr) == 0 { @@ -46,13 +155,28 @@ func (r *Request) nextPageTokens() []interface{} { tokens := []interface{}{} tokenAdded := false for _, outToken := range r.Operation.OutputTokens { - v, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(v) > 0 { - tokens = append(tokens, v[0]) - tokenAdded = true - } else { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } } + + tokenAdded = true + tokens = append(tokens, v) } if !tokenAdded { return nil @@ -61,9 +185,40 @@ func (r *Request) nextPageTokens() []interface{} { return tokens } +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + // NextPage returns a new Request that can be executed to return the next // page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + tokens := r.nextPageTokens() if len(tokens) == 0 { return nil @@ -90,7 +245,12 @@ func (r *Request) NextPage() *Request { // as the structure "T". The lastPage value represents whether the page is // the last page of data or not. The return value of this function should // return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + for page := r; page != nil; page = page.NextPage() { if err := page.Send(); err != nil { return err diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index 8cc8b015..7d527029 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -8,7 +8,7 @@ import ( ) // Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the service.DefaultRetryer +// The default implementation used by most services is the client.DefaultRetryer // structure, which contains basic retry logic using exponential backoff. type Retryer interface { RetryRules(*Request) time.Duration @@ -26,8 +26,10 @@ func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { // retryableCodes is a collection of service response codes which are retry-able // without any further action. var retryableCodes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, + "RequestError": {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout } var throttleCodes = map[string]struct{}{ @@ -36,8 +38,8 @@ var throttleCodes = map[string]struct{}{ "ThrottlingException": {}, "RequestLimitExceeded": {}, "RequestThrottled": {}, - "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 } // credsExpiredCodes is a collection of error codes which signify the credentials @@ -67,35 +69,93 @@ func isCodeExpiredCreds(code string) bool { return ok } +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +type temporaryError interface { + Temporary() bool +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporaryError); ok { + return t.Temporary() || isErrConnectionReset(err) + } + + return isErrConnectionReset(err) +} + // IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -func (r *Request) IsErrorRetryable() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeRetryable(err.Code()) +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) } } return false } // IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set -func (r *Request) IsErrorThrottle() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeThrottle(err.Code()) +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeThrottle(aerr.Code()) } } return false } -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -func (r *Request) IsErrorExpired() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeExpiredCreds(err.Code()) +// IsErrorExpiredCreds returns whether the error code is a credential expiry error. +// Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeExpiredCreds(aerr.Code()) } } return false } + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + return IsErrorThrottle(r.Error) +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 00000000..09a44eb9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go index 2520286b..bcfd947a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -17,6 +17,10 @@ const ( ParamMinValueErrCode = "ParamMinValueError" // ParamMinLenErrCode is the error code for fields without enough elements. ParamMinLenErrCode = "ParamMinLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" ) // Validator provides a way for types to perform validation logic on their @@ -220,7 +224,7 @@ type ErrParamMinLen struct { func NewErrParamMinLen(field string, min int) *ErrParamMinLen { return &ErrParamMinLen{ errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, + code: ParamMinLenErrCode, field: field, msg: fmt.Sprintf("minimum field size of %v", min), }, @@ -232,3 +236,26 @@ func NewErrParamMinLen(field string, min int) *ErrParamMinLen { func (e *ErrParamMinLen) MinLen() int { return e.min } + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 00000000..4601f883 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,295 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index 097d3237..98d420fd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -23,7 +23,7 @@ additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. Alternatively you can explicitly create a Session with shared config enabled. To do this you can use NewSessionWithOptions to configure how the Session will be created. Using the NewSessionWithOptions with SharedConfigState set to -SharedConfigEnabled will create the session as if the AWS_SDK_LOAD_CONFIG +SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG environment variable was set. Creating Sessions @@ -45,16 +45,16 @@ region, and profile loaded from the environment and shared config automatically. Requires the AWS_PROFILE to be set, or "default" is used. // Create Session - sess, err := session.NewSession() + sess := session.Must(session.NewSession()) // Create a Session with a custom region - sess, err := session.NewSession(&aws.Config{Region: aws.String("us-east-1")}) + sess := session.Must(session.NewSession(&aws.Config{ + Region: aws.String("us-east-1"), + })) // Create a S3 client instance from a session - sess, err := session.NewSession() - if err != nil { - // Handle Session creation error - } + sess := session.Must(session.NewSession()) + svc := s3.New(sess) Create Session With Option Overrides @@ -66,24 +66,26 @@ through code instead of being driven by environment variables only. Use NewSessionWithOptions when you want to provide the config profile, or override the shared config state (AWS_SDK_LOAD_CONFIG). - // Equivalent to session.New - sess, err := session.NewSessionWithOptions(session.Options{}) + // Equivalent to session.NewSession() + sess := session.Must(session.NewSessionWithOptions(session.Options{ + // Options + })) // Specify profile to load for the session's config - sess, err := session.NewSessionWithOptions(session.Options{ + sess := session.Must(session.NewSessionWithOptions(session.Options{ Profile: "profile_name", - }) + })) // Specify profile for config and region for requests - sess, err := session.NewSessionWithOptions(session.Options{ + sess := session.Must(session.NewSessionWithOptions(session.Options{ Config: aws.Config{Region: aws.String("us-east-1")}, Profile: "profile_name", - }) + })) // Force enable Shared Config support - sess, err := session.NewSessionWithOptions(session.Options{ - SharedConfigState: SharedConfigEnable, - }) + sess := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) Adding Handlers @@ -93,7 +95,8 @@ handler logs every request and its payload made by a service client: // Create a session, and add additional handlers for all service // clients created with the Session to inherit. Adds logging handler. - sess, err := session.NewSession() + sess := session.Must(session.NewSession()) + sess.Handlers.Send.PushFront(func(r *request.Request) { // Log every request made and its payload logger.Println("Request: %s/%s, Payload: %s", @@ -121,12 +124,11 @@ file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both files have the same format. If both config files are present the configuration from both files will be -read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared credentials -file (~/.aws/config). +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). Credentials are the values the SDK should use for authenticating requests with -AWS Services. They arfrom a configuration file will need to include both +AWS Services. They are from a configuration file will need to include both aws_access_key_id and aws_secret_access_key must be provided together in the same file to be considered valid. The values will be ignored if not a complete group. aws_session_token is an optional field that can be provided if both of @@ -138,15 +140,14 @@ the other two fields are also provided. Assume Role values allow you to configure the SDK to assume an IAM role using a set of credentials provided in a config file via the source_profile field. -Both "role_arn" and "source_profile" are required. The SDK does not support -assuming a role with MFA token Via the Session's constructor. You can use the -stscreds.AssumeRoleProvider credentials provider to specify custom -configuration and support for MFA. +Both "role_arn" and "source_profile" are required. The SDK supports assuming +a role with MFA token if the session option AssumeRoleTokenProvider +is set. role_arn = arn:aws:iam:::role/ source_profile = profile_with_creds external_id = 1234 - mfa_serial = not supported! + mfa_serial = role_session_name = session_name Region is the region the SDK should use for looking up AWS service endpoints @@ -154,6 +155,37 @@ and signing requests. region = us-east-1 +Assume Role with MFA token + +To create a session with support for assuming an IAM role with MFA set the +session option AssumeRoleTokenProvider to a function that will prompt for the +MFA token code when the SDK assumes the role and refreshes the role's credentials. +This allows you to configure the SDK via the shared config to assumea role +with MFA tokens. + +In order for the SDK to assume a role with MFA the SharedConfigState +session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG +environment variable set. + +The shared configuration instructs the SDK to assume an IAM role with MFA +when the mfa_serial configuration field is set in the shared config +(~/.aws/config) or shared credentials (~/.aws/credentials) file. + +If mfa_serial is set in the configuration, the SDK will assume the role, and +the AssumeRoleTokenProvider session option is not set an an error will +be returned when creating the session. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess) + +To setup assume role outside of a session see the stscrds.AssumeRoleProvider +documentation. + Environment Variables When a Session is created several environment variables can be set to adjust @@ -218,6 +250,24 @@ $HOME/.aws/config on Linux/Unix based systems, and AWS_CONFIG_FILE=$HOME/my_shared_config +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. */ package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index d2f0c844..c94d0fb9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -2,12 +2,16 @@ package session import ( "os" - "path/filepath" "strconv" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" ) +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + // envConfig is a collection of environment values the SDK will read // setup config from. All environment values are optional. But some values // such as credentials require multiple values to be complete or the values @@ -75,9 +79,47 @@ type envConfig struct { // // AWS_CONFIG_FILE=$HOME/my_shared_config SharedConfigFile string + + // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + csmEnabled string + CSMEnabled bool + CSMPort string + CSMClientID string + + enableEndpointDiscovery string + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool } var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } credAccessEnvKey = []string{ "AWS_ACCESS_KEY_ID", "AWS_ACCESS_KEY", @@ -90,6 +132,10 @@ var ( "AWS_SESSION_TOKEN", } + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + regionEnvKeys = []string{ "AWS_REGION", "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set @@ -98,6 +144,12 @@ var ( "AWS_PROFILE", "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -130,11 +182,17 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + cfg.CSMEnabled = len(cfg.csmEnabled) > 0 + // Require logical grouping of credentials if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { cfg.Creds = credentials.Value{} } else { - cfg.Creds.ProviderName = "EnvConfigCredentials" + cfg.Creds.ProviderName = EnvProviderName } regionKeys := regionEnvKeys @@ -147,8 +205,23 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.Region, regionKeys) setFromEnvVal(&cfg.Profile, profileKeys) - cfg.SharedCredentialsFile = sharedCredentialsFilename() - cfg.SharedConfigFile = sharedConfigFilename() + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") return cfg } @@ -161,28 +234,3 @@ func setFromEnvVal(dst *string, keys []string) { } } } - -func sharedCredentialsFilename() string { - if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 { - return name - } - - return filepath.Join(userHomeDir(), ".aws", "credentials") -} - -func sharedConfigFilename() string { - if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 { - return name - } - - return filepath.Join(userHomeDir(), ".aws", "config") -} - -func userHomeDir() string { - homeDir := os.Getenv("HOME") // *nix - if len(homeDir) == 0 { // windows - homeDir = os.Getenv("USERPROFILE") - } - - return homeDir -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 2374b1f2..9b1ad609 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -1,7 +1,13 @@ package session import ( + "crypto/tls" + "crypto/x509" "fmt" + "io" + "io/ioutil" + "net/http" + "os" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -9,18 +15,37 @@ import ( "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/csm" "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/endpoints" + "github.com/aws/aws-sdk-go/internal/shareddefaults" ) +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + // A Session provides a central location to create service clients from and // store configurations and request handlers for those services. // // Sessions are safe to create service clients concurrently, but it is not safe // to mutate the Session concurrently. // -// The Session satisfies the service client's client.ClientConfigProvider. +// The Session satisfies the service client's client.ConfigProvider. type Session struct { Config *aws.Config Handlers request.Handlers @@ -34,17 +59,17 @@ type Session struct { // If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New // method could now encounter an error when loading the configuration. When // The environment variable is set, and an error occurs, New will return a -// session that will fail all requests reporting the error that occured while +// session that will fail all requests reporting the error that occurred while // loading the session. Use NewSession to get the error when creating the // session. // // If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value // the shared config file (~/.aws/config) will also be loaded, in addition to -// the shared credentials file (~/.aws/config). Values set in both the +// the shared credentials file (~/.aws/credentials). Values set in both the // shared config, and shared credentials will be taken from the shared // credentials file. // -// Deprecated: Use NewSession functiions to create sessions instead. NewSession +// Deprecated: Use NewSession functions to create sessions instead. NewSession // has the same functionality as New except an error can be returned when the // func is called instead of waiting to receive an error until a request is made. func New(cfgs ...*aws.Config) *Session { @@ -52,14 +77,19 @@ func New(cfgs ...*aws.Config) *Session { envCfg := loadEnvConfig() if envCfg.EnableSharedConfig { - s, err := newSession(envCfg, cfgs...) + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) if err != nil { // Old session.New expected all errors to be discovered when // a request is made, and would report the errors then. This // needs to be replicated if an error occurs while creating // the session. msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + - "Use session.NewSession to handle errors occuring during session creation." + "Use session.NewSession to handle errors occurring during session creation." // Session creation failed, need to report the error and prevent // any requests from succeeding. @@ -70,10 +100,16 @@ func New(cfgs ...*aws.Config) *Session { r.Error = err }) } + return s } - return oldNewSession(cfgs...) + s := deprecatedNewSession(cfgs...) + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + } + + return s } // NewSession returns a new Session created from SDK defaults, config files, @@ -83,18 +119,19 @@ func New(cfgs ...*aws.Config) *Session { // // If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value // the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/config). Values set in both the +// the shared credentials file (~/.aws/credentials). Values set in both the // shared config, and shared credentials will be taken from the shared // credentials file. Enabling the Shared Config will also allow the Session // to be built with retrieving credentials with AssumeRole set in the config. // // See the NewSessionWithOptions func for information on how to override or -// control through code how the Session will be created. Such as specifing the +// control through code how the Session will be created. Such as specifying the // config profile, and controlling if shared config is enabled or not. func NewSession(cfgs ...*aws.Config) (*Session, error) { - envCfg := loadEnvConfig() + opts := Options{} + opts.Config.MergeIn(cfgs...) - return newSession(envCfg, cfgs...) + return NewSessionWithOptions(opts) } // SharedConfigState provides the ability to optionally override the state @@ -124,7 +161,7 @@ type Options struct { // Provides config values for the SDK to use when creating service clients // and making API requests to services. Any value set in with this field // will override the associated value provided by the SDK defaults, - // environment or config files where relevent. + // environment or config files where relevant. // // If not set, configuration values from from SDK defaults, environment, // config will be used. @@ -147,6 +184,45 @@ type Options struct { // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable // and enable or disable the shared config functionality. SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // Enabling this option will attempt to merge the Transport into the SDK's HTTP + // client. If the client's Transport is not a http.Transport an error will be + // returned. If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // The Session option CustomCABundle is also available when creating sessions + // to also enable this feature. CustomCABundle session option field has priority + // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + CustomCABundle io.Reader } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -155,31 +231,36 @@ type Options struct { // // If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value // the shared config file (~/.aws/config) will also be loaded in addition to -// the shared credentials file (~/.aws/config). Values set in both the +// the shared credentials file (~/.aws/credentials). Values set in both the // shared config, and shared credentials will be taken from the shared // credentials file. Enabling the Shared Config will also allow the Session // to be built with retrieving credentials with AssumeRole set in the config. // // // Equivalent to session.New -// sess, err := session.NewSessionWithOptions(session.Options{}) +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) // // // Specify profile to load for the session's config -// sess, err := session.NewSessionWithOptions(session.Options{ +// sess := session.Must(session.NewSessionWithOptions(session.Options{ // Profile: "profile_name", -// }) +// })) // // // Specify profile for config and region for requests -// sess, err := session.NewSessionWithOptions(session.Options{ +// sess := session.Must(session.NewSessionWithOptions(session.Options{ // Config: aws.Config{Region: aws.String("us-east-1")}, // Profile: "profile_name", -// }) +// })) // // // Force enable Shared Config support -// sess, err := session.NewSessionWithOptions(session.Options{ -// SharedConfigState: SharedConfigEnable, -// }) +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) func NewSessionWithOptions(opts Options) (*Session, error) { - envCfg := loadEnvConfig() + var envCfg envConfig + if opts.SharedConfigState == SharedConfigEnable { + envCfg = loadSharedEnvConfig() + } else { + envCfg = loadEnvConfig() + } if len(opts.Profile) > 0 { envCfg.Profile = opts.Profile @@ -192,7 +273,18 @@ func NewSessionWithOptions(opts Options) (*Session, error) { envCfg.EnableSharedConfig = true } - return newSession(envCfg, &opts.Config) + // Only use AWS_CA_BUNDLE if session option is not provided. + if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { + f, err := os.Open(envCfg.CustomCABundle) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to open custom CA bundle PEM file", err) + } + defer f.Close() + opts.CustomCABundle = f + } + + return newSession(opts, envCfg, &opts.Config) } // Must is a helper function to ensure the Session is valid and there was no @@ -210,13 +302,18 @@ func Must(sess *Session, err error) *Session { return sess } -func oldNewSession(cfgs ...*aws.Config) *Session { +func deprecatedNewSession(cfgs ...*aws.Config) *Session { cfg := defaults.Config() handlers := defaults.Handlers() // Apply the passed in configs so the configuration can be applied to the // default credential chain cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } cfg.Credentials = defaults.CredChain(cfg, handlers) // Reapply any passed in configs to override credentials if set @@ -228,11 +325,23 @@ func oldNewSession(cfgs ...*aws.Config) *Session { } initHandlers(s) - return s } -func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { +func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) { + logger.Log("Enabling CSM") + if len(port) == 0 { + port = csm.DefaultPort + } + + r, err := csm.Start(clientID, "127.0.0.1:"+port) + if err != nil { + return + } + r.InjectHandlers(handlers) +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() handlers := defaults.Handlers() @@ -241,13 +350,18 @@ func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { userCfg := &aws.Config{} userCfg.MergeIn(cfgs...) - // Order config files will be loaded in with later files overwriting + // Ordered config files will be loaded in with later files overwriting // previous config file values. - cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} - if !envCfg.EnableSharedConfig { - // The shared config file (~/.aws/config) is only loaded if instructed - // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). - cfgFiles = cfgFiles[1:] + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } } // Load additional config from file(s) @@ -256,7 +370,9 @@ func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { return nil, err } - mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers) + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } s := &Session{ Config: cfg, @@ -264,11 +380,66 @@ func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { } initHandlers(s) + if envCfg.CSMEnabled { + enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger) + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { + return nil, err + } + } return s, nil } -func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers) { +func loadCustomCABundle(s *Session, bundle io.Reader) error { + var t *http.Transport + switch v := s.Config.HTTPClient.Transport.(type) { + case *http.Transport: + t = v + default: + if s.Config.HTTPClient.Transport != nil { + return awserr.New("LoadCustomCABundleError", + "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) + } + } + if t == nil { + t = &http.Transport{} + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + s.Config.HTTPClient.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New("LoadCustomCABundleError", + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { // Merge in user provided configuration cfg.MergeIn(userCfg) @@ -281,8 +452,67 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share } } + if aws.BoolValue(envCfg.EnableEndpointDiscovery) { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) + } + } + // Configure credentials if not already set if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + + // inspect the profile to see if a credential source has been specified. + if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 { + + // if both credential_source and source_profile have been set, return an error + // as this is undefined behavior. + if len(sharedCfg.AssumeRole.SourceProfile) > 0 { + return ErrSharedConfigSourceCollision + } + + // valid credential source values + const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" + ) + + switch sharedCfg.AssumeRole.CredentialSource { + case credSourceEc2Metadata: + cfgCp := *cfg + p := defaults.RemoteCredProvider(cfgCp, handlers) + cfgCp.Credentials = credentials.NewCredentials(p) + + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return AssumeRoleTokenProviderNotSetError{} + } + + cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) + case credSourceEnvironment: + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + envCfg.Creds, + ) + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return ErrSharedConfigECSContainerEnvVarEmpty + } + + cfgCp := *cfg + p := defaults.RemoteCredProvider(cfgCp, handlers) + creds := credentials.NewCredentials(p) + + cfg.Credentials = creds + default: + return ErrSharedConfigInvalidCredSource + } + + return nil + } + if len(envCfg.Creds.AccessKeyID) > 0 { cfg.Credentials = credentials.NewStaticCredentialsFromCreds( envCfg.Creds, @@ -292,22 +522,14 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( sharedCfg.AssumeRoleSource.Creds, ) - cfg.Credentials = stscreds.NewCredentials( - &Session{ - Config: &cfgCp, - Handlers: handlers.Copy(), - }, - sharedCfg.AssumeRole.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName - if len(sharedCfg.AssumeRole.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) - } + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return AssumeRoleTokenProviderNotSetError{} + } - // MFA not supported - }, - ) + cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts) } else if len(sharedCfg.Creds.AccessKeyID) > 0 { cfg.Credentials = credentials.NewStaticCredentialsFromCreds( sharedCfg.Creds, @@ -326,6 +548,57 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share }) } } + + return nil +} + +func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials { + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.AssumeRole.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + + // Assume role with external ID + if len(sharedCfg.AssumeRole.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.AssumeRole.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ) +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the +// MFAToken option is not set when shared config is configured load assume a +// role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) } type credProviderError struct { @@ -370,19 +643,69 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session { // configure the service client instances. Passing the Session to the service // client's constructor (New) will use this method to configure the client. func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + // Backwards compatibility, the error will be eaten if user calls ClientConfig + // directly. All SDK services will use ClientconfigWithError. + cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) + + return cfg +} + +func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { s = s.Copy(cfgs...) - endpoint, signingRegion := endpoints.NormalizeEndpoint( - aws.StringValue(s.Config.Endpoint), - serviceName, - aws.StringValue(s.Config.Region), - aws.BoolValue(s.Config.DisableSSL), - aws.BoolValue(s.Config.UseDualStack), - ) + + var resolved endpoints.ResolvedEndpoint + var err error + + region := aws.StringValue(s.Config.Region) + + if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { + resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } else { + resolved, err = s.Config.EndpointResolver.EndpointFor( + serviceName, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) + opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + }, err +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + + region := aws.StringValue(s.Config.Region) + + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: endpoint, - SigningRegion: signingRegion, + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 0147eede..427b8a4e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -2,11 +2,11 @@ package session import ( "fmt" - "os" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/internal/ini" ) const ( @@ -16,15 +16,19 @@ const ( sessionTokenKey = `aws_session_token` // optional // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional // Additional Config fields regionKey = `region` + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + // DefaultSharedConfigProfile is the default profile to be used when // loading configuration from the config files if another profile name // is not provided. @@ -32,11 +36,12 @@ const ( ) type assumeRoleConfig struct { - RoleARN string - SourceProfile string - ExternalID string - MFASerial string - RoleSessionName string + RoleARN string + SourceProfile string + CredentialSource string + ExternalID string + MFASerial string + RoleSessionName string } // sharedConfig represents the configuration fields of the SDK config files. @@ -60,11 +65,17 @@ type sharedConfig struct { // // region Region string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool } type sharedConfigFile struct { Filename string - IniData *ini.File + IniData ini.Sections } // loadSharedConfig retrieves the configuration from the list of files @@ -105,18 +116,16 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { files := make([]sharedConfigFile, 0, len(filenames)) for _, filename := range filenames { - if _, err := os.Stat(filename); os.IsNotExist(err) { - // Trim files from the list that don't exist. + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { + // Skip files which can't be opened and read for whatever reason continue - } - - f, err := ini.Load(filename) - if err != nil { - return nil, SharedConfigLoadError{Filename: filename} + } else if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} } files = append(files, sharedConfigFile{ - Filename: filename, IniData: f, + Filename: filename, IniData: sections, }) } @@ -126,6 +135,13 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { var assumeRoleSrc sharedConfig + if len(cfg.AssumeRole.CredentialSource) > 0 { + // setAssumeRoleSource is only called when source_profile is found. + // If both source_profile and credential_source are set, then + // ErrSharedConfigSourceCollision will be returned + return ErrSharedConfigSourceCollision + } + // Multiple level assume role chains are not support if cfg.AssumeRole.SourceProfile == origProfile { assumeRoleSrc = *cfg @@ -170,45 +186,54 @@ func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFil // if a config file only includes aws_access_key_id but no aws_secret_access_key // the aws_access_key_id will be ignored. func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { - section, err := file.IniData.GetSection(profile) - if err != nil { + section, ok := file.IniData.GetSection(profile) + if !ok { // Fallback to to alternate profile name: profile - section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) - if err != nil { - return SharedConfigProfileNotExistsError{Profile: profile, Err: err} + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} } } // Shared Credentials - akid := section.Key(accessKeyIDKey).String() - secret := section.Key(secretAccessKey).String() + akid := section.String(accessKeyIDKey) + secret := section.String(secretAccessKey) if len(akid) > 0 && len(secret) > 0 { cfg.Creds = credentials.Value{ AccessKeyID: akid, SecretAccessKey: secret, - SessionToken: section.Key(sessionTokenKey).String(), + SessionToken: section.String(sessionTokenKey), ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), } } // Assume Role - roleArn := section.Key(roleArnKey).String() - srcProfile := section.Key(sourceProfileKey).String() - if len(roleArn) > 0 && len(srcProfile) > 0 { + roleArn := section.String(roleArnKey) + srcProfile := section.String(sourceProfileKey) + credentialSource := section.String(credentialSourceKey) + hasSource := len(srcProfile) > 0 || len(credentialSource) > 0 + if len(roleArn) > 0 && hasSource { cfg.AssumeRole = assumeRoleConfig{ - RoleARN: roleArn, - SourceProfile: srcProfile, - ExternalID: section.Key(externalIDKey).String(), - MFASerial: section.Key(mfaSerialKey).String(), - RoleSessionName: section.Key(roleSessionNameKey).String(), + RoleARN: roleArn, + SourceProfile: srcProfile, + CredentialSource: credentialSource, + ExternalID: section.String(externalIDKey), + MFASerial: section.String(mfaSerialKey), + RoleSessionName: section.String(roleSessionNameKey), } } // Region - if v := section.Key(regionKey).String(); len(v) > 0 { + if v := section.String(regionKey); len(v) > 0 { cfg.Region = v } + // Endpoint discovery + if section.Has(enableEndpointDiscoveryKey) { + v := section.Bool(enableEndpointDiscoveryKey) + cfg.EnableEndpointDiscovery = &v + } + return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 00000000..6aa2ed24 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 00000000..bd082e9d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,24 @@ +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 7d99f54d..155645d6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -2,15 +2,65 @@ // // Provides request signing for request that need to be signed with // AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. package v4 import ( - "bytes" "crypto/hmac" "crypto/sha256" "encoding/hex" "fmt" "io" + "io/ioutil" "net/http" "net/url" "sort" @@ -21,6 +71,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" "github.com/aws/aws-sdk-go/private/protocol/rest" ) @@ -36,8 +87,9 @@ const ( var ignoredHeaders = rules{ blacklist{ mapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, }, }, } @@ -82,7 +134,9 @@ var requiredSignedHeaders = rules{ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Tagging": struct{}{}, "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, }, }, patterns{"X-Amz-Meta-"}, @@ -119,10 +173,33 @@ type Signer struct { // request's query string. DisableHeaderHoisting bool + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disales the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + // currentTimeFn returns the time value which represents the current time. // This value should only be used for testing. If it is nil the default // time.Now will be used. currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool } // NewSigner returns a Signer pointer configured with the credentials and optional @@ -150,10 +227,13 @@ type signingCtx struct { ExpireTime time.Duration SignedHeaderVals http.Header + DisableURIPathEscaping bool + credValues credentials.Value isPresign bool formattedTime string formattedShortTime string + unsignedPayload bool bodyDigest string signedHeaders string @@ -175,6 +255,12 @@ type signingCtx struct { // is not needed as the full request context will be captured by the http.Request // value. It is included for reference though. // +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// // Sign differs from Presign in that it will sign the request using HTTP // header values. This type of signing is intended for http.Request values that // will not be shared, or are shared in a way the header values on the request @@ -185,7 +271,7 @@ type signingCtx struct { // "X-Amz-Content-Sha256" header with a precomputed value. The signer will // only compute the hash if the request header value is empty. func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, 0, signTime) + return v4.signWithBody(r, body, service, region, 0, false, signTime) } // Presign signs AWS v4 requests with the provided body, service name, region @@ -219,32 +305,33 @@ func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region strin // presigned request's signature you can set the "X-Amz-Content-Sha256" // HTTP header and that will be included in the request's signature. func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, exp, signTime) + return v4.signWithBody(r, body, service, region, exp, true, signTime) } -func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { currentTimeFn := v4.currentTimeFn if currentTimeFn == nil { currentTimeFn = time.Now } ctx := &signingCtx{ - Request: r, - Body: body, - Query: r.URL.Query(), - Time: signTime, - ExpireTime: exp, - isPresign: exp != 0, - ServiceName: service, - Region: region, + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: isPresign, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) } if ctx.isRequestSigned() { - if !v4.Credentials.IsExpired() && currentTimeFn().Before(ctx.Time.Add(10*time.Minute)) { - // If the request is already signed, and the credentials have not - // expired, and the request is not too old ignore the signing request. - return ctx.SignedHeaderVals, nil - } ctx.Time = currentTimeFn() ctx.handlePresignRemoval() } @@ -255,8 +342,25 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi return http.Header{}, err } + ctx.sanitizeHostForHeader() ctx.assignAmzQueryValues() - ctx.build(v4.DisableHeaderHoisting) + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } if v4.Debug.Matches(aws.LogDebugWithSigning) { v4.logSigningInfo(ctx) @@ -265,6 +369,10 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi return ctx.SignedHeaderVals, nil } +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + func (ctx *signingCtx) handlePresignRemoval() { if !ctx.isPresign { return @@ -303,7 +411,7 @@ var SignRequestHandler = request.NamedHandler{ } // SignSDKRequest signs an AWS request with the V4 signature. This -// request handler is bested used only with the SDK's built in service client's +// request handler should only be used with the SDK's built in service client's // API operation requests. // // This function should not be used on its on its own, but in conjunction with @@ -316,7 +424,18 @@ var SignRequestHandler = request.NamedHandler{ func SignSDKRequest(req *request.Request) { signSDKRequestWithCurrTime(req, time.Now) } -func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) { + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now, opts...) + }, + } +} + +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { // If the request does not need to be signed ignore the signing of the // request if the AnonymousCredentials object is used. if req.Config.Credentials == credentials.AnonymousCredentials { @@ -338,14 +457,28 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time v4.Logger = req.Config.Logger v4.DisableHeaderHoisting = req.NotHoist v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true }) + for _, opt := range opts { + opt(v4) + } + signingTime := req.Time if !req.LastSignedAt.IsZero() { signingTime = req.LastSignedAt } - signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.Body, name, region, req.ExpireTime, signingTime) + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, req.ExpireTime > 0, signingTime, + ) if err != nil { req.Error = err req.SignedHeaderVals = nil @@ -356,7 +489,7 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time req.LastSignedAt = curTimeFn() } -const logSignInfoMsg = `DEBUG: Request Signiture: +const logSignInfoMsg = `DEBUG: Request Signature: ---[ CANONICAL STRING ]----------------------------- %s ---[ STRING TO SIGN ]-------------------------------- @@ -375,10 +508,14 @@ func (v4 *Signer) logSigningInfo(ctx *signingCtx) { v4.Logger.Log(msg) } -func (ctx *signingCtx) build(disableHeaderHoisting bool) { +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { ctx.buildTime() // no depends ctx.buildCredentialString() // no depends + if err := ctx.buildBodyDigest(); err != nil { + return err + } + unsignedHeaders := ctx.Request.Header if ctx.isPresign { if !disableHeaderHoisting { @@ -390,7 +527,6 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { } } - ctx.buildBodyDigest() ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) ctx.buildCanonicalString() // depends on canon headers / signed headers ctx.buildStringToSign() // depends on canon string @@ -406,6 +542,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { } ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) } + + return nil } func (ctx *signingCtx) buildTime() { @@ -480,29 +618,26 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { headerValues := make([]string, len(headers)) for i, k := range headers { if k == "host" { - headerValues[i] = "host:" + ctx.Request.URL.Host + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } } else { headerValues[i] = k + ":" + strings.Join(ctx.SignedHeaderVals[k], ",") } } - - ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") } func (ctx *signingCtx) buildCanonicalString() { ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) - uri := ctx.Request.URL.Opaque - if uri != "" { - uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") - } else { - uri = ctx.Request.URL.Path - } - if uri == "" { - uri = "/" - } - if ctx.ServiceName != "s3" { + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { uri = rest.EscapePath(uri, false) } @@ -535,21 +670,34 @@ func (ctx *signingCtx) buildSignature() { ctx.signature = hex.EncodeToString(signature) } -func (ctx *signingCtx) buildBodyDigest() { +func (ctx *signingCtx) buildBodyDigest() error { hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") if hash == "" { - if ctx.isPresign && ctx.ServiceName == "s3" { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + + if ctx.unsignedPayload || s3Presign { hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign } else if ctx.Body == nil { hash = emptyStringSHA256 } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) } - if ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { + + if includeSHA256Header { ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) } } ctx.bodyDigest = hash + + return nil } // isRequestSigned returns if the request is currently signed or presigned @@ -589,56 +737,61 @@ func makeSha256(data []byte) []byte { func makeSha256Reader(reader io.ReadSeeker) []byte { hash := sha256.New() - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) + start, _ := reader.Seek(0, sdkio.SeekCurrent) + defer reader.Seek(start, sdkio.SeekStart) + + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } - io.Copy(hash, reader) return hash.Sum(nil) } -const doubleSpaces = " " +const doubleSpace = " " -var doubleSpaceBytes = []byte(doubleSpaces) - -func stripExcessSpaces(headerVals []string) []string { - vals := make([]string, len(headerVals)) - for i, str := range headerVals { - // Trim leading and trailing spaces - trimmed := strings.TrimSpace(str) +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain muliple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } - idx := strings.Index(trimmed, doubleSpaces) - var buf []byte - for idx > -1 { - // Multiple adjacent spaces found - if buf == nil { - // first time create the buffer - buf = []byte(trimmed) - } + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] - stripToIdx := -1 - for j := idx + 1; j < len(buf); j++ { - if buf[j] != ' ' { - buf = append(buf[:idx+1], buf[j:]...) - stripToIdx = j - break - } - } + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } - if stripToIdx >= 0 { - idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) - if idx >= 0 { - idx += stripToIdx + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ } + spaces++ } else { - idx = -1 + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ } } - if buf != nil { - vals[i] = string(buf) - } else { - vals[i] = trimmed - } + vals[i] = string(buf[:m]) } - return vals } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index fa014b49..8b6f2342 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -3,9 +3,17 @@ package aws import ( "io" "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" ) -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should +// only be used with an io.Reader that is also an io.Seeker. Doing so may +// cause request signature errors, or request body's not sent for GET, HEAD +// and DELETE HTTP methods. +// +// Deprecated: Should only be used with io.ReadSeeker. If using for +// S3 PutObject to stream content use s3manager.Uploader instead. func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { return ReaderSeekerCloser{r} } @@ -16,6 +24,22 @@ type ReaderSeekerCloser struct { r io.Reader } +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + // Read reads from the reader up to size of p. The number of bytes read, and // error if it occurred will be returned. // @@ -44,6 +68,77 @@ func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { return int64(0), nil } +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + // Close closes the ReaderSeekerCloser. // // If the ReaderSeekerCloser is not an io.Closer nothing will be done. @@ -102,5 +197,5 @@ func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { func (b *WriteAtBuffer) Bytes() []byte { b.m.Lock() defer b.m.Unlock() - return b.buf[:len(b.buf):len(b.buf)] + return b.buf } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 00000000..6192b245 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 00000000..0210d272 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 750a7f47..54d4ba10 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.4.10" +const SDKVersion = "1.15.73" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go new file mode 100644 index 00000000..e83a9988 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go new file mode 100644 index 00000000..0895d53c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go new file mode 100644 index 00000000..0b76999b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go new file mode 100644 index 00000000..25ce0fe1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go @@ -0,0 +1,29 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> value stmt' +// stmt' -> epsilon | op stmt +// value -> number | string | boolean | quoted_string +// +// section -> [ section' +// section' -> value section_close +// section_close -> ] +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go new file mode 100644 index 00000000..04345a54 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go new file mode 100644 index 00000000..91ba2a59 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go new file mode 100644 index 00000000..8d462f77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go new file mode 100644 index 00000000..3b0ca7af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go @@ -0,0 +1,51 @@ +package ini + +import ( + "io" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (Sections, error) { + f, err := os.Open(path) + if err != nil { + return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) + } + defer f.Close() + + return Parse(f) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go new file mode 100644 index 00000000..582c024a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go @@ -0,0 +1,165 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // ErrCodeUnableToReadFile is used when a file is failed to be + // opened or read from. + ErrCodeUnableToReadFile = "FailedRead" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go new file mode 100644 index 00000000..3ea751a9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -0,0 +1,337 @@ +package ini + +import ( + "fmt" + "io" +) + +// State enums for the parse table +const ( + InvalidState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]int{ + ASTKindStart: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: map[TokenType]int{ + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: map[TokenType]int{ + TokenLit: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + }, + ASTKindStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: map[TokenType]int{ + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: map[TokenType]int{ + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + // + // This grammar occurs when the RHS is a number, word, or quoted string. + // equal_expr -> lit op equal_expr' + // equal_expr' -> number | string | quoted_string + // quoted_string -> " quoted_string' + // quoted_string' -> string quoted_string_end + // quoted_string_end -> " + // + // otherwise + // expr_stmt -> equal_expr (expr_stmt')* + // expr_stmt' -> ws S | op S | MarkComplete + // S -> equal_expr' expr_stmt' + switch k.Kind { + case ASTKindEqualExpr: + // assiging a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + k.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i > 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + i-- + } + + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok)) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container)) + } + + // returns a sublist which exludes the start symbol + return stack.List(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go new file mode 100644 index 00000000..24df543d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -0,0 +1,324 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType + IntegerType + StringType + QuotedStringType + BoolType +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 + decimal float64 + boolean bool + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = runeCompare(v.raw, runesTrue) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// Append will append values and change the type to a string +// type. +func (v *Value) Append(tok Token) { + r := tok.Raw() + if v.Type != QuotedStringType { + v.Type = StringType + r = tok.raw[1 : len(tok.raw)-1] + } + if tok.Type() != TokenLit { + v.raw = append(v.raw, tok.Raw()...) + } else { + v.raw = append(v.raw, r...) + } +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else if isNumberValue(b) { + var base int + base, n, err = getNumericalValue(b) + if err != nil { + return token, 0, err + } + + value := b[:n] + vType := IntegerType + if contains(value, '.') || hasExponent(value) { + vType = DecimalType + } + token = newToken(TokenLit, value, vType) + token.base = base + } else if isBoolValue(b) { + n, err = getBoolValue(b) + + token = newToken(TokenLit, b[:n], BoolType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() int64 { + return v.integer +} + +// FloatValue returns a float value +func (v Value) FloatValue() float64 { + return v.decimal +} + +// BoolValue returns a bool value +func (v Value) BoolValue() bool { + return v.boolean +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go new file mode 100644 index 00000000..e52ac399 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go new file mode 100644 index 00000000..a45c0bc5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go new file mode 100644 index 00000000..8a84c7cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go new file mode 100644 index 00000000..45728701 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go @@ -0,0 +1,43 @@ +package ini + +import "fmt" + +const ( + // ErrCodeParseError is returned when a parsing error + // has occurred. + ErrCodeParseError = "INIParseError" +) + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +// Code will return the ErrCodeParseError +func (err *ParseError) Code() string { + return ErrCodeParseError +} + +// Message returns the error's message +func (err *ParseError) Message() string { + return err.msg +} + +// OrigError return nothing since there will never be any +// original error. +func (err *ParseError) OrigError() error { + return nil +} + +func (err *ParseError) Error() string { + return fmt.Sprintf("%s: %s", err.Code(), err.Message()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go new file mode 100644 index 00000000..7f01cf7c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go new file mode 100644 index 00000000..f82095ba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go new file mode 100644 index 00000000..6bb69644 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + + s.Continue() + return false + } + s.prevTok = tok + + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true + s.prevTok = emptyToken +} + +func (s *skipper) Continue() { + s.shouldSkip = false + s.prevTok = emptyToken +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go new file mode 100644 index 00000000..ba0af01b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini defintion. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go new file mode 100644 index 00000000..305999d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go new file mode 100644 index 00000000..94841c32 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -0,0 +1,166 @@ +package ini + +import ( + "fmt" + "sort" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + scope string + Sections Sections +} + +// NewDefaultVisitor return a DefaultVisitor +func NewDefaultVisitor() *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + if rhs.Root.Type() != TokenLit { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + t.values[key] = v + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + v.Sections.container[name] = Section{} + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + Name string + values values +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) bool { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) int64 { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) float64 { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go new file mode 100644 index 00000000..99915f7f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go new file mode 100644 index 00000000..7ffb4ae0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go new file mode 100644 index 00000000..0b9b0dfc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go @@ -0,0 +1,57 @@ +package s3err + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequestFailure provides additional S3 specific metadata for the request +// failure. +type RequestFailure struct { + awserr.RequestFailure + + hostID string +} + +// NewRequestFailure returns a request failure error decordated with S3 +// specific metadata. +func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure { + return &RequestFailure{RequestFailure: err, hostID: hostID} +} + +func (r RequestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r RequestFailure) String() string { + return r.Error() +} + +// HostID returns the HostID request response value. +func (r RequestFailure) HostID() string { + return r.hostID +} + +// RequestFailureWrapperHandler returns a handler to rap an +// awserr.RequestFailure with the S3 request ID 2 from the response. +func RequestFailureWrapperHandler() request.NamedHandler { + return request.NamedHandler{ + Name: "awssdk.s3.errorHandler", + Fn: func(req *request.Request) { + reqErr, ok := req.Error.(awserr.RequestFailure) + if !ok || reqErr == nil { + return + } + + hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2") + if req.Error == nil { + return + } + + req.Error = NewRequestFailure(reqErr, hostID) + }, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 00000000..5aa9137e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,10 @@ +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 00000000..e5f00561 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 00000000..0c9802d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 00000000..38ea61af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 00000000..b63e4c26 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overriden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 00000000..ebcbc2b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,40 @@ +package shareddefaults + +import ( + "os" + "path/filepath" + "runtime" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go deleted file mode 100644 index b4ad7405..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go +++ /dev/null @@ -1,70 +0,0 @@ -// Package endpoints validates regional endpoints for services. -package endpoints - -//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go -//go:generate gofmt -s -w endpoints_map.go - -import ( - "fmt" - "regexp" - "strings" -) - -// NormalizeEndpoint takes and endpoint and service API information to return a -// normalized endpoint and signing region. If the endpoint is not an empty string -// the service name and region will be used to look up the service's API endpoint. -// If the endpoint is provided the scheme will be added if it is not present. -func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL, useDualStack bool) (normEndpoint, signingRegion string) { - if endpoint == "" { - return EndpointForRegion(serviceName, region, disableSSL, useDualStack) - } - - return AddScheme(endpoint, disableSSL), "" -} - -// EndpointForRegion returns an endpoint and its signing region for a service and region. -// if the service and region pair are not found endpoint and signingRegion will be empty. -func EndpointForRegion(svcName, region string, disableSSL, useDualStack bool) (endpoint, signingRegion string) { - dualStackField := "" - if useDualStack { - dualStackField = "/dualstack" - } - - derivedKeys := []string{ - region + "/" + svcName + dualStackField, - region + "/*" + dualStackField, - "*/" + svcName + dualStackField, - "*/*" + dualStackField, - } - - for _, key := range derivedKeys { - if val, ok := endpointsMap.Endpoints[key]; ok { - ep := val.Endpoint - ep = strings.Replace(ep, "{region}", region, -1) - ep = strings.Replace(ep, "{service}", svcName, -1) - - endpoint = ep - signingRegion = val.SigningRegion - break - } - } - - return AddScheme(endpoint, disableSSL), signingRegion -} - -// Regular expression to determine if the endpoint string is prefixed with a scheme. -var schemeRE = regexp.MustCompile("^([^:]+)://") - -// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no -// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. -func AddScheme(endpoint string, disableSSL bool) string { - if endpoint != "" && !schemeRE.MatchString(endpoint) { - scheme := "https" - if disableSSL { - scheme = "http" - } - endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) - } - - return endpoint -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json deleted file mode 100644 index c5bf3c7c..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "version": 2, - "endpoints": { - "*/*": { - "endpoint": "{service}.{region}.amazonaws.com" - }, - "cn-north-1/*": { - "endpoint": "{service}.{region}.amazonaws.com.cn", - "signatureVersion": "v4" - }, - "cn-north-1/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "us-gov-west-1/iam": { - "endpoint": "iam.us-gov.amazonaws.com" - }, - "us-gov-west-1/sts": { - "endpoint": "sts.us-gov-west-1.amazonaws.com" - }, - "us-gov-west-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "us-gov-west-1/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "*/cloudfront": { - "endpoint": "cloudfront.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/cloudsearchdomain": { - "endpoint": "", - "signingRegion": "us-east-1" - }, - "*/data.iot": { - "endpoint": "", - "signingRegion": "us-east-1" - }, - "*/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "*/iam": { - "endpoint": "iam.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/importexport": { - "endpoint": "importexport.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/route53": { - "endpoint": "route53.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/sts": { - "endpoint": "sts.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/waf": { - "endpoint": "waf.amazonaws.com", - "signingRegion": "us-east-1" - }, - "us-east-1/sdb": { - "endpoint": "sdb.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "*/s3/dualstack": { - "endpoint": "s3.dualstack.{region}.amazonaws.com" - }, - "us-east-1/s3": { - "endpoint": "s3.amazonaws.com" - }, - "eu-central-1/s3": { - "endpoint": "{service}.{region}.amazonaws.com" - } - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go deleted file mode 100644 index a81d158c..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go +++ /dev/null @@ -1,91 +0,0 @@ -package endpoints - -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -type endpointStruct struct { - Version int - Endpoints map[string]endpointEntry -} - -type endpointEntry struct { - Endpoint string - SigningRegion string -} - -var endpointsMap = endpointStruct{ - Version: 2, - Endpoints: map[string]endpointEntry{ - "*/*": { - Endpoint: "{service}.{region}.amazonaws.com", - }, - "*/cloudfront": { - Endpoint: "cloudfront.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/cloudsearchdomain": { - Endpoint: "", - SigningRegion: "us-east-1", - }, - "*/data.iot": { - Endpoint: "", - SigningRegion: "us-east-1", - }, - "*/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - }, - "*/iam": { - Endpoint: "iam.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/importexport": { - Endpoint: "importexport.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/route53": { - Endpoint: "route53.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "*/s3/dualstack": { - Endpoint: "s3.dualstack.{region}.amazonaws.com", - }, - "*/sts": { - Endpoint: "sts.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/waf": { - Endpoint: "waf.amazonaws.com", - SigningRegion: "us-east-1", - }, - "cn-north-1/*": { - Endpoint: "{service}.{region}.amazonaws.com.cn", - }, - "cn-north-1/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - }, - "eu-central-1/s3": { - Endpoint: "{service}.{region}.amazonaws.com", - }, - "us-east-1/s3": { - Endpoint: "s3.amazonaws.com", - }, - "us-east-1/sdb": { - Endpoint: "sdb.amazonaws.com", - SigningRegion: "us-east-1", - }, - "us-gov-west-1/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - }, - "us-gov-west-1/iam": { - Endpoint: "iam.us-gov.amazonaws.com", - }, - "us-gov-west-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "us-gov-west-1/sts": { - Endpoint: "sts.us-gov-west-1.amazonaws.com", - }, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go new file mode 100644 index 00000000..ecc7bf82 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go @@ -0,0 +1,144 @@ +package eventstream + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" +) + +type decodedMessage struct { + rawMessage + Headers decodedHeaders `json:"headers"` +} +type jsonMessage struct { + Length json.Number `json:"total_length"` + HeadersLen json.Number `json:"headers_length"` + PreludeCRC json.Number `json:"prelude_crc"` + Headers decodedHeaders `json:"headers"` + Payload []byte `json:"payload"` + CRC json.Number `json:"message_crc"` +} + +func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { + var jsonMsg jsonMessage + if err = json.Unmarshal(b, &jsonMsg); err != nil { + return err + } + + d.Length, err = numAsUint32(jsonMsg.Length) + if err != nil { + return err + } + d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) + if err != nil { + return err + } + d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) + if err != nil { + return err + } + d.Headers = jsonMsg.Headers + d.Payload = jsonMsg.Payload + d.CRC, err = numAsUint32(jsonMsg.CRC) + if err != nil { + return err + } + + return nil +} + +func (d *decodedMessage) MarshalJSON() ([]byte, error) { + jsonMsg := jsonMessage{ + Length: json.Number(strconv.Itoa(int(d.Length))), + HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), + PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), + Headers: d.Headers, + Payload: d.Payload, + CRC: json.Number(strconv.Itoa(int(d.CRC))), + } + + return json.Marshal(jsonMsg) +} + +func numAsUint32(n json.Number) (uint32, error) { + v, err := n.Int64() + if err != nil { + return 0, fmt.Errorf("failed to get int64 json number, %v", err) + } + + return uint32(v), nil +} + +func (d decodedMessage) Message() Message { + return Message{ + Headers: Headers(d.Headers), + Payload: d.Payload, + } +} + +type decodedHeaders Headers + +func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { + var jsonHeaders []struct { + Name string `json:"name"` + Type valueType `json:"type"` + Value interface{} `json:"value"` + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + if err := decoder.Decode(&jsonHeaders); err != nil { + return err + } + + var headers Headers + for _, h := range jsonHeaders { + value, err := valueFromType(h.Type, h.Value) + if err != nil { + return err + } + headers.Set(h.Name, value) + } + (*hs) = decodedHeaders(headers) + + return nil +} + +func valueFromType(typ valueType, val interface{}) (Value, error) { + switch typ { + case trueValueType: + return BoolValue(true), nil + case falseValueType: + return BoolValue(false), nil + case int8ValueType: + v, err := val.(json.Number).Int64() + return Int8Value(int8(v)), err + case int16ValueType: + v, err := val.(json.Number).Int64() + return Int16Value(int16(v)), err + case int32ValueType: + v, err := val.(json.Number).Int64() + return Int32Value(int32(v)), err + case int64ValueType: + v, err := val.(json.Number).Int64() + return Int64Value(v), err + case bytesValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return BytesValue(v), err + case stringValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return StringValue(string(v)), err + case timestampValueType: + v, err := val.(json.Number).Int64() + return TimestampValue(timeFromEpochMilli(v)), err + case uuidValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + var tv UUIDValue + copy(tv[:], v) + return tv, err + default: + panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go new file mode 100644 index 00000000..4b972b2d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go @@ -0,0 +1,199 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Decoder provides decoding of an Event Stream messages. +type Decoder struct { + r io.Reader + logger aws.Logger +} + +// NewDecoder initializes and returns a Decoder for decoding event +// stream messages from the reader provided. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + } +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the stream. +func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { + reader := d.r + if d.logger != nil { + debugMsgBuf := bytes.NewBuffer(nil) + reader = io.TeeReader(reader, debugMsgBuf) + defer func() { + logMessageDecode(d.logger, debugMsgBuf, m, err) + }() + } + + crc := crc32.New(crc32IEEETable) + hashReader := io.TeeReader(reader, crc) + + prelude, err := decodePrelude(hashReader, crc) + if err != nil { + return Message{}, err + } + + if prelude.HeadersLen > 0 { + lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) + m.Headers, err = decodeHeaders(lr) + if err != nil { + return Message{}, err + } + } + + if payloadLen := prelude.PayloadLen(); payloadLen > 0 { + buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) + if err != nil { + return Message{}, err + } + m.Payload = buf + } + + msgCRC := crc.Sum32() + if err := validateCRC(reader, msgCRC); err != nil { + return Message{}, err + } + + return m, nil +} + +// UseLogger specifies the Logger that that the decoder should use to log the +// message decode to. +func (d *Decoder) UseLogger(logger aws.Logger) { + d.logger = logger +} + +func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Raw message:\n%s\n", + hex.Dump(msgBuf.Bytes())) + + if decodeErr != nil { + fmt.Fprintf(w, "Decode error: %v\n", decodeErr) + return + } + + rawMsg, err := msg.rawMessage() + if err != nil { + fmt.Fprintf(w, "failed to create raw message, %v\n", err) + return + } + + decodedMsg := decodedMessage{ + rawMessage: rawMsg, + Headers: decodedHeaders(msg.Headers), + } + + fmt.Fprintf(w, "Decoded message:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(decodedMsg); err != nil { + fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) + } +} + +func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { + var p messagePrelude + + var err error + p.Length, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + p.HeadersLen, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + if err := p.ValidateLens(); err != nil { + return messagePrelude{}, err + } + + preludeCRC := crc.Sum32() + if err := validateCRC(r, preludeCRC); err != nil { + return messagePrelude{}, err + } + + p.PreludeCRC = preludeCRC + + return p, nil +} + +func decodePayload(buf []byte, r io.Reader) ([]byte, error) { + w := bytes.NewBuffer(buf[0:0]) + + _, err := io.Copy(w, r) + return w.Bytes(), err +} + +func decodeUint8(r io.Reader) (uint8, error) { + type byteReader interface { + ReadByte() (byte, error) + } + + if br, ok := r.(byteReader); ok { + v, err := br.ReadByte() + return uint8(v), err + } + + var b [1]byte + _, err := io.ReadFull(r, b[:]) + return uint8(b[0]), err +} +func decodeUint16(r io.Reader) (uint16, error) { + var b [2]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(bs), nil +} +func decodeUint32(r io.Reader) (uint32, error) { + var b [4]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(bs), nil +} +func decodeUint64(r io.Reader) (uint64, error) { + var b [8]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint64(bs), nil +} + +func validateCRC(r io.Reader, expect uint32) error { + msgCRC, err := decodeUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return ChecksumError{} + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go new file mode 100644 index 00000000..150a6098 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go @@ -0,0 +1,114 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash" + "hash/crc32" + "io" +) + +// Encoder provides EventStream message encoding. +type Encoder struct { + w io.Writer + + headersBuf *bytes.Buffer +} + +// NewEncoder initializes and returns an Encoder to encode Event Stream +// messages to an io.Writer. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + headersBuf: bytes.NewBuffer(nil), + } +} + +// Encode encodes a single EventStream message to the io.Writer the Encoder +// was created with. An error is returned if writing the message fails. +func (e *Encoder) Encode(msg Message) error { + e.headersBuf.Reset() + + err := encodeHeaders(e.headersBuf, msg.Headers) + if err != nil { + return err + } + + crc := crc32.New(crc32IEEETable) + hashWriter := io.MultiWriter(e.w, crc) + + headersLen := uint32(e.headersBuf.Len()) + payloadLen := uint32(len(msg.Payload)) + + if err := encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + return err + } + + if headersLen > 0 { + if _, err := io.Copy(hashWriter, e.headersBuf); err != nil { + return err + } + } + + if payloadLen > 0 { + if _, err := hashWriter.Write(msg.Payload); err != nil { + return err + } + } + + msgCRC := crc.Sum32() + return binary.Write(e.w, binary.BigEndian, msgCRC) +} + +func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { + p := messagePrelude{ + Length: minMsgLen + headersLen + payloadLen, + HeadersLen: headersLen, + } + if err := p.ValidateLens(); err != nil { + return err + } + + err := binaryWriteFields(w, binary.BigEndian, + p.Length, + p.HeadersLen, + ) + if err != nil { + return err + } + + p.PreludeCRC = crc.Sum32() + err = binary.Write(w, binary.BigEndian, p.PreludeCRC) + if err != nil { + return err + } + + return nil +} + +func encodeHeaders(w io.Writer, headers Headers) error { + for _, h := range headers { + hn := headerName{ + Len: uint8(len(h.Name)), + } + copy(hn.Name[:hn.Len], h.Name) + if err := hn.encode(w); err != nil { + return err + } + + if err := h.Value.encode(w); err != nil { + return err + } + } + + return nil +} + +func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { + for _, v := range vs { + if err := binary.Write(w, order, v); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go new file mode 100644 index 00000000..5481ef30 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go @@ -0,0 +1,23 @@ +package eventstream + +import "fmt" + +// LengthError provides the error for items being larger than a maximum length. +type LengthError struct { + Part string + Want int + Have int + Value interface{} +} + +func (e LengthError) Error() string { + return fmt.Sprintf("%s length invalid, %d/%d, %v", + e.Part, e.Want, e.Have, e.Value) +} + +// ChecksumError provides the error for message checksum invalidation errors. +type ChecksumError struct{} + +func (e ChecksumError) Error() string { + return "message checksum mismatch" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go new file mode 100644 index 00000000..97937c8e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go @@ -0,0 +1,196 @@ +package eventstreamapi + +import ( + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Unmarshaler provides the interface for unmarshaling a EventStream +// message into a SDK type. +type Unmarshaler interface { + UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error +} + +// EventStream headers with specific meaning to async API functionality. +const ( + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) + +// EventReader provides reading from the EventStream of an reader. +type EventReader struct { + reader io.ReadCloser + decoder *eventstream.Decoder + + unmarshalerForEventType func(string) (Unmarshaler, error) + payloadUnmarshaler protocol.PayloadUnmarshaler + + payloadBuf []byte +} + +// NewEventReader returns a EventReader built from the reader and unmarshaler +// provided. Use ReadStream method to start reading from the EventStream. +func NewEventReader( + reader io.ReadCloser, + payloadUnmarshaler protocol.PayloadUnmarshaler, + unmarshalerForEventType func(string) (Unmarshaler, error), +) *EventReader { + return &EventReader{ + reader: reader, + decoder: eventstream.NewDecoder(reader), + payloadUnmarshaler: payloadUnmarshaler, + unmarshalerForEventType: unmarshalerForEventType, + payloadBuf: make([]byte, 10*1024), + } +} + +// UseLogger instructs the EventReader to use the logger and log level +// specified. +func (r *EventReader) UseLogger(logger aws.Logger, logLevel aws.LogLevelType) { + if logger != nil && logLevel.Matches(aws.LogDebugWithEventStreamBody) { + r.decoder.UseLogger(logger) + } +} + +// ReadEvent attempts to read a message from the EventStream and return the +// unmarshaled event value that the message is for. +// +// For EventStream API errors check if the returned error satisfies the +// awserr.Error interface to get the error's Code and Message components. +// +// EventUnmarshalers called with EventStream messages must take copies of the +// message's Payload. The payload will is reused between events read. +func (r *EventReader) ReadEvent() (event interface{}, err error) { + msg, err := r.decoder.Decode(r.payloadBuf) + if err != nil { + return nil, err + } + defer func() { + // Reclaim payload buffer for next message read. + r.payloadBuf = msg.Payload[0:0] + }() + + typ, err := GetHeaderString(msg, MessageTypeHeader) + if err != nil { + return nil, err + } + + switch typ { + case EventMessageType: + return r.unmarshalEventMessage(msg) + case ExceptionMessageType: + err = r.unmarshalEventException(msg) + return nil, err + case ErrorMessageType: + return nil, r.unmarshalErrorMessage(msg) + default: + return nil, fmt.Errorf("unknown eventstream message type, %v", typ) + } +} + +func (r *EventReader) unmarshalEventMessage( + msg eventstream.Message, +) (event interface{}, err error) { + eventType, err := GetHeaderString(msg, EventTypeHeader) + if err != nil { + return nil, err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return nil, err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return nil, err + } + + return ev, nil +} + +func (r *EventReader) unmarshalEventException( + msg eventstream.Message, +) (err error) { + eventType, err := GetHeaderString(msg, ExceptionTypeHeader) + if err != nil { + return err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return err + } + + var ok bool + err, ok = ev.(error) + if !ok { + err = messageError{ + code: "SerializationError", + msg: fmt.Sprintf( + "event stream exception %s mapped to non-error %T, %v", + eventType, ev, ev, + ), + } + } + + return err +} + +func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) { + var msgErr messageError + + msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader) + if err != nil { + return err + } + + msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader) + if err != nil { + return err + } + + return msgErr +} + +// Close closes the EventReader's EventStream reader. +func (r *EventReader) Close() error { + return r.reader.Close() +} + +// GetHeaderString returns the value of the header as a string. If the header +// is not set or the value is not a string an error will be returned. +func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { + headerVal := msg.Headers.Get(headerName) + if headerVal == nil { + return "", fmt.Errorf("error header %s not present", headerName) + } + + v, ok := headerVal.Get().(string) + if !ok { + return "", fmt.Errorf("error header value is not a string, %T", headerVal) + } + + return v, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go new file mode 100644 index 00000000..5ea5a988 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go @@ -0,0 +1,24 @@ +package eventstreamapi + +import "fmt" + +type messageError struct { + code string + msg string +} + +func (e messageError) Code() string { + return e.code +} + +func (e messageError) Message() string { + return e.msg +} + +func (e messageError) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.msg) +} + +func (e messageError) OrigErr() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go new file mode 100644 index 00000000..3b44dde2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go @@ -0,0 +1,166 @@ +package eventstream + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Headers are a collection of EventStream header values. +type Headers []Header + +// Header is a single EventStream Key Value header pair. +type Header struct { + Name string + Value Value +} + +// Set associates the name with a value. If the header name already exists in +// the Headers the value will be replaced with the new one. +func (hs *Headers) Set(name string, value Value) { + var i int + for ; i < len(*hs); i++ { + if (*hs)[i].Name == name { + (*hs)[i].Value = value + return + } + } + + *hs = append(*hs, Header{ + Name: name, Value: value, + }) +} + +// Get returns the Value associated with the header. Nil is returned if the +// value does not exist. +func (hs Headers) Get(name string) Value { + for i := 0; i < len(hs); i++ { + if h := hs[i]; h.Name == name { + return h.Value + } + } + return nil +} + +// Del deletes the value in the Headers if it exists. +func (hs *Headers) Del(name string) { + for i := 0; i < len(*hs); i++ { + if (*hs)[i].Name == name { + copy((*hs)[i:], (*hs)[i+1:]) + (*hs) = (*hs)[:len(*hs)-1] + } + } +} + +func decodeHeaders(r io.Reader) (Headers, error) { + hs := Headers{} + + for { + name, err := decodeHeaderName(r) + if err != nil { + if err == io.EOF { + // EOF while getting header name means no more headers + break + } + return nil, err + } + + value, err := decodeHeaderValue(r) + if err != nil { + return nil, err + } + + hs.Set(name, value) + } + + return hs, nil +} + +func decodeHeaderName(r io.Reader) (string, error) { + var n headerName + + var err error + n.Len, err = decodeUint8(r) + if err != nil { + return "", err + } + + name := n.Name[:n.Len] + if _, err := io.ReadFull(r, name); err != nil { + return "", err + } + + return string(name), nil +} + +func decodeHeaderValue(r io.Reader) (Value, error) { + var raw rawValue + + typ, err := decodeUint8(r) + if err != nil { + return nil, err + } + raw.Type = valueType(typ) + + var v Value + + switch raw.Type { + case trueValueType: + v = BoolValue(true) + case falseValueType: + v = BoolValue(false) + case int8ValueType: + var tv Int8Value + err = tv.decode(r) + v = tv + case int16ValueType: + var tv Int16Value + err = tv.decode(r) + v = tv + case int32ValueType: + var tv Int32Value + err = tv.decode(r) + v = tv + case int64ValueType: + var tv Int64Value + err = tv.decode(r) + v = tv + case bytesValueType: + var tv BytesValue + err = tv.decode(r) + v = tv + case stringValueType: + var tv StringValue + err = tv.decode(r) + v = tv + case timestampValueType: + var tv TimestampValue + err = tv.decode(r) + v = tv + case uuidValueType: + var tv UUIDValue + err = tv.decode(r) + v = tv + default: + panic(fmt.Sprintf("unknown value type %d", raw.Type)) + } + + // Error could be EOF, let caller deal with it + return v, err +} + +const maxHeaderNameLen = 255 + +type headerName struct { + Len uint8 + Name [maxHeaderNameLen]byte +} + +func (v headerName) encode(w io.Writer) error { + if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { + return err + } + + _, err := w.Write(v.Name[:v.Len]) + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go new file mode 100644 index 00000000..e3fc0766 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go @@ -0,0 +1,501 @@ +package eventstream + +import ( + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" +) + +const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 + +// valueType is the EventStream header value type. +type valueType uint8 + +// Header value types +const ( + trueValueType valueType = iota + falseValueType + int8ValueType // Byte + int16ValueType // Short + int32ValueType // Integer + int64ValueType // Long + bytesValueType + stringValueType + timestampValueType + uuidValueType +) + +func (t valueType) String() string { + switch t { + case trueValueType: + return "bool" + case falseValueType: + return "bool" + case int8ValueType: + return "int8" + case int16ValueType: + return "int16" + case int32ValueType: + return "int32" + case int64ValueType: + return "int64" + case bytesValueType: + return "byte_array" + case stringValueType: + return "string" + case timestampValueType: + return "timestamp" + case uuidValueType: + return "uuid" + default: + return fmt.Sprintf("unknown value type %d", uint8(t)) + } +} + +type rawValue struct { + Type valueType + Len uint16 // Only set for variable length slices + Value []byte // byte representation of value, BigEndian encoding. +} + +func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { + return binaryWriteFields(w, binary.BigEndian, + r.Type, + v, + ) +} + +func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { + binary.Write(w, binary.BigEndian, r.Type) + + _, err := w.Write(v) + return err +} + +func (r rawValue) encodeBytes(w io.Writer, v []byte) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + _, err = w.Write(v) + return err +} + +func (r rawValue) encodeString(w io.Writer, v string) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + type stringWriter interface { + WriteString(string) (int, error) + } + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + if sw, ok := w.(stringWriter); ok { + _, err = sw.WriteString(v) + } else { + _, err = w.Write([]byte(v)) + } + + return err +} + +func decodeFixedBytesValue(r io.Reader, buf []byte) error { + _, err := io.ReadFull(r, buf) + return err +} + +func decodeBytesValue(r io.Reader) ([]byte, error) { + var raw rawValue + var err error + raw.Len, err = decodeUint16(r) + if err != nil { + return nil, err + } + + buf := make([]byte, raw.Len) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +func decodeStringValue(r io.Reader) (string, error) { + v, err := decodeBytesValue(r) + return string(v), err +} + +// Value represents the abstract header value. +type Value interface { + Get() interface{} + String() string + valueType() valueType + encode(io.Writer) error +} + +// An BoolValue provides eventstream encoding, and representation +// of a Go bool value. +type BoolValue bool + +// Get returns the underlying type +func (v BoolValue) Get() interface{} { + return bool(v) +} + +// valueType returns the EventStream header value type value. +func (v BoolValue) valueType() valueType { + if v { + return trueValueType + } + return falseValueType +} + +func (v BoolValue) String() string { + return strconv.FormatBool(bool(v)) +} + +// encode encodes the BoolValue into an eventstream binary value +// representation. +func (v BoolValue) encode(w io.Writer) error { + return binary.Write(w, binary.BigEndian, v.valueType()) +} + +// An Int8Value provides eventstream encoding, and representation of a Go +// int8 value. +type Int8Value int8 + +// Get returns the underlying value. +func (v Int8Value) Get() interface{} { + return int8(v) +} + +// valueType returns the EventStream header value type value. +func (Int8Value) valueType() valueType { + return int8ValueType +} + +func (v Int8Value) String() string { + return fmt.Sprintf("0x%02x", int8(v)) +} + +// encode encodes the Int8Value into an eventstream binary value +// representation. +func (v Int8Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeScalar(w, v) +} + +func (v *Int8Value) decode(r io.Reader) error { + n, err := decodeUint8(r) + if err != nil { + return err + } + + *v = Int8Value(n) + return nil +} + +// An Int16Value provides eventstream encoding, and representation of a Go +// int16 value. +type Int16Value int16 + +// Get returns the underlying value. +func (v Int16Value) Get() interface{} { + return int16(v) +} + +// valueType returns the EventStream header value type value. +func (Int16Value) valueType() valueType { + return int16ValueType +} + +func (v Int16Value) String() string { + return fmt.Sprintf("0x%04x", int16(v)) +} + +// encode encodes the Int16Value into an eventstream binary value +// representation. +func (v Int16Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int16Value) decode(r io.Reader) error { + n, err := decodeUint16(r) + if err != nil { + return err + } + + *v = Int16Value(n) + return nil +} + +// An Int32Value provides eventstream encoding, and representation of a Go +// int32 value. +type Int32Value int32 + +// Get returns the underlying value. +func (v Int32Value) Get() interface{} { + return int32(v) +} + +// valueType returns the EventStream header value type value. +func (Int32Value) valueType() valueType { + return int32ValueType +} + +func (v Int32Value) String() string { + return fmt.Sprintf("0x%08x", int32(v)) +} + +// encode encodes the Int32Value into an eventstream binary value +// representation. +func (v Int32Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int32Value) decode(r io.Reader) error { + n, err := decodeUint32(r) + if err != nil { + return err + } + + *v = Int32Value(n) + return nil +} + +// An Int64Value provides eventstream encoding, and representation of a Go +// int64 value. +type Int64Value int64 + +// Get returns the underlying value. +func (v Int64Value) Get() interface{} { + return int64(v) +} + +// valueType returns the EventStream header value type value. +func (Int64Value) valueType() valueType { + return int64ValueType +} + +func (v Int64Value) String() string { + return fmt.Sprintf("0x%016x", int64(v)) +} + +// encode encodes the Int64Value into an eventstream binary value +// representation. +func (v Int64Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int64Value) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = Int64Value(n) + return nil +} + +// An BytesValue provides eventstream encoding, and representation of a Go +// byte slice. +type BytesValue []byte + +// Get returns the underlying value. +func (v BytesValue) Get() interface{} { + return []byte(v) +} + +// valueType returns the EventStream header value type value. +func (BytesValue) valueType() valueType { + return bytesValueType +} + +func (v BytesValue) String() string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +// encode encodes the BytesValue into an eventstream binary value +// representation. +func (v BytesValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeBytes(w, []byte(v)) +} + +func (v *BytesValue) decode(r io.Reader) error { + buf, err := decodeBytesValue(r) + if err != nil { + return err + } + + *v = BytesValue(buf) + return nil +} + +// An StringValue provides eventstream encoding, and representation of a Go +// string. +type StringValue string + +// Get returns the underlying value. +func (v StringValue) Get() interface{} { + return string(v) +} + +// valueType returns the EventStream header value type value. +func (StringValue) valueType() valueType { + return stringValueType +} + +func (v StringValue) String() string { + return string(v) +} + +// encode encodes the StringValue into an eventstream binary value +// representation. +func (v StringValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeString(w, string(v)) +} + +func (v *StringValue) decode(r io.Reader) error { + s, err := decodeStringValue(r) + if err != nil { + return err + } + + *v = StringValue(s) + return nil +} + +// An TimestampValue provides eventstream encoding, and representation of a Go +// timestamp. +type TimestampValue time.Time + +// Get returns the underlying value. +func (v TimestampValue) Get() interface{} { + return time.Time(v) +} + +// valueType returns the EventStream header value type value. +func (TimestampValue) valueType() valueType { + return timestampValueType +} + +func (v TimestampValue) epochMilli() int64 { + nano := time.Time(v).UnixNano() + msec := nano / int64(time.Millisecond) + return msec +} + +func (v TimestampValue) String() string { + msec := v.epochMilli() + return strconv.FormatInt(msec, 10) +} + +// encode encodes the TimestampValue into an eventstream binary value +// representation. +func (v TimestampValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + msec := v.epochMilli() + return raw.encodeScalar(w, msec) +} + +func (v *TimestampValue) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = TimestampValue(timeFromEpochMilli(int64(n))) + return nil +} + +func timeFromEpochMilli(t int64) time.Time { + secs := t / 1e3 + msec := t % 1e3 + return time.Unix(secs, msec*int64(time.Millisecond)).UTC() +} + +// An UUIDValue provides eventstream encoding, and representation of a UUID +// value. +type UUIDValue [16]byte + +// Get returns the underlying value. +func (v UUIDValue) Get() interface{} { + return v[:] +} + +// valueType returns the EventStream header value type value. +func (UUIDValue) valueType() valueType { + return uuidValueType +} + +func (v UUIDValue) String() string { + return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:]) +} + +// encode encodes the UUIDValue into an eventstream binary value +// representation. +func (v UUIDValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeFixedSlice(w, v[:]) +} + +func (v *UUIDValue) decode(r io.Reader) error { + tv := (*v)[:] + return decodeFixedBytesValue(r, tv) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go new file mode 100644 index 00000000..2dc012a6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -0,0 +1,103 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash/crc32" +) + +const preludeLen = 8 +const preludeCRCLen = 4 +const msgCRCLen = 4 +const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen +const maxPayloadLen = 1024 * 1024 * 16 // 16MB +const maxHeadersLen = 1024 * 128 // 128KB +const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen + +var crc32IEEETable = crc32.MakeTable(crc32.IEEE) + +// A Message provides the eventstream message representation. +type Message struct { + Headers Headers + Payload []byte +} + +func (m *Message) rawMessage() (rawMessage, error) { + var raw rawMessage + + if len(m.Headers) > 0 { + var headers bytes.Buffer + if err := encodeHeaders(&headers, m.Headers); err != nil { + return rawMessage{}, err + } + raw.Headers = headers.Bytes() + raw.HeadersLen = uint32(len(raw.Headers)) + } + + raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen + + hash := crc32.New(crc32IEEETable) + binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) + raw.PreludeCRC = hash.Sum32() + + binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) + + if raw.HeadersLen > 0 { + hash.Write(raw.Headers) + } + + // Read payload bytes and update hash for it as well. + if len(m.Payload) > 0 { + raw.Payload = m.Payload + hash.Write(raw.Payload) + } + + raw.CRC = hash.Sum32() + + return raw, nil +} + +type messagePrelude struct { + Length uint32 + HeadersLen uint32 + PreludeCRC uint32 +} + +func (p messagePrelude) PayloadLen() uint32 { + return p.Length - p.HeadersLen - minMsgLen +} + +func (p messagePrelude) ValidateLens() error { + if p.Length == 0 || p.Length > maxMsgLen { + return LengthError{ + Part: "message prelude", + Want: maxMsgLen, + Have: int(p.Length), + } + } + if p.HeadersLen > maxHeadersLen { + return LengthError{ + Part: "message headers", + Want: maxHeadersLen, + Have: int(p.HeadersLen), + } + } + if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { + return LengthError{ + Part: "message payload", + Want: maxPayloadLen, + Have: int(payloadLen), + } + } + + return nil +} + +type rawMessage struct { + messagePrelude + + Headers []byte + Payload []byte + + CRC uint32 +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 00000000..f06f44ee --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,21 @@ +package protocol + +// ValidHostLabel returns if the label is a valid RFC 1123 Section 2.1 domain +// host label name. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 00000000..776d1101 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 00000000..e21614a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "GET"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index c705481c..60e5b09d 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -1,7 +1,7 @@ // Package query provides serialization of AWS query requests, and responses. package query -//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go import ( "net/url" @@ -25,7 +25,7 @@ func Build(r *request.Request) { return } - if r.ExpireTime == 0 { + if !r.IsPresigned() { r.HTTPRequest.Method = "POST" r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") r.SetBufferBody([]byte(body.Encode())) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 60ea0bd1..75866d01 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -76,6 +76,9 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri if field.PkgPath != "" { continue // ignore unexported fields } + if field.Tag.Get("ignore") != "" { + continue + } if protocol.CanSetIdempotencyToken(value.Field(i), field) { token := protocol.GetIdempotencyToken() @@ -118,9 +121,17 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string return nil } + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + // check for unflattened list member if !q.isEC2 && tag.Get("flattened") == "" { - prefix += ".member" + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } } for i := 0; i < value.Len(); i++ { @@ -222,7 +233,12 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) case time.Time: const ISO8601UTC = "2006-01-02T15:04:05Z" - v.Set(name, value.UTC().Format(ISO8601UTC)) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) default: return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go index a3ea4095..3495c730 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -1,6 +1,6 @@ package query -//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go import ( "encoding/xml" @@ -23,7 +23,11 @@ func Unmarshal(r *request.Request) { decoder := xml.NewDecoder(r.HTTPResponse.Body) err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") if err != nil { - r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go index f2142961..46d354e8 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -28,7 +28,11 @@ func UnmarshalError(r *request.Request) { bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "failed to read from query HTTP response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } @@ -61,6 +65,10 @@ func UnmarshalError(r *request.Request) { } // Failed to retrieve any error message from the response body - r.Error = awserr.New("SerializationError", - "failed to decode query XML error response", decodeErr) + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", + "failed to decode query XML error response", decodeErr), + r.HTTPResponse.StatusCode, + r.RequestID, + ) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 5f412516..b34f5258 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -14,13 +14,12 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" ) -// RFC822 returns an RFC822 formatted timestamp for AWS protocols -const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" - // Whether the byte value can be sent without escaping in AWS URLs var noEscape [256]bool @@ -46,14 +45,29 @@ var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} func Build(r *request.Request) { if r.ParamsFilled() { v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v) + buildLocationElements(r, v, false) buildBody(r, v) } } -func buildLocationElements(r *request.Request, v reflect.Value) { +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { query := r.HTTPRequest.URL.Query() + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + for i := 0; i < v.NumField(); i++ { m := v.Field(i) if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { @@ -66,23 +80,34 @@ func buildLocationElements(r *request.Request, v reflect.Value) { if name == "" { name = field.Name } - if m.Kind() == reflect.Ptr { + if kind := m.Kind(); kind == reflect.Ptr { m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } } if !m.IsValid() { continue } + if field.Tag.Get("ignore") != "" { + continue + } var err error switch field.Tag.Get("location") { case "headers": // header maps - err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) case "header": - err = buildHeader(&r.HTTPRequest.Header, m, name) + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) case "uri": - err = buildURI(r.HTTPRequest.URL, m, name) + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) case "querystring": - err = buildQueryString(query, m, name) + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } } r.Error = err } @@ -92,7 +117,9 @@ func buildLocationElements(r *request.Request, v reflect.Value) { } r.HTTPRequest.URL.RawQuery = query.Encode() - updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } } func buildBody(r *request.Request, v reflect.Value) { @@ -120,8 +147,8 @@ func buildBody(r *request.Request, v reflect.Value) { } } -func buildHeader(header *http.Header, v reflect.Value, name string) error { - str, err := convertType(v) +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) if err == errValueNotSet { return nil } else if err != nil { @@ -133,9 +160,10 @@ func buildHeader(header *http.Header, v reflect.Value, name string) error { return nil } -func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") for _, key := range v.MapKeys() { - str, err := convertType(v.MapIndex(key)) + str, err := convertType(v.MapIndex(key), tag) if err == errValueNotSet { continue } else if err != nil { @@ -148,23 +176,24 @@ func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { return nil } -func buildURI(u *url.URL, v reflect.Value, name string) error { - value, err := convertType(v) +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) if err == errValueNotSet { return nil } else if err != nil { return awserr.New("SerializationError", "failed to encode REST request", err) } - uri := u.Path - uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1) - uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1) - u.Path = uri + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) return nil } -func buildQueryString(query url.Values, v reflect.Value, name string) error { +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { switch value := v.Interface().(type) { case []*string: for _, item := range value { @@ -181,7 +210,7 @@ func buildQueryString(query url.Values, v reflect.Value, name string) error { } } default: - str, err := convertType(v) + str, err := convertType(v, tag) if err == errValueNotSet { return nil } else if err != nil { @@ -193,25 +222,17 @@ func buildQueryString(query url.Values, v reflect.Value, name string) error { return nil } -func updatePath(url *url.URL, urlPath string) { - scheme, query := url.Scheme, url.RawQuery +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") - hasSlash := strings.HasSuffix(urlPath, "/") + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) - // clean up path - urlPath = path.Clean(urlPath) - if hasSlash && !strings.HasSuffix(urlPath, "/") { - urlPath += "/" + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" } - - // get formatted URL minus scheme so we can build this into Opaque - url.Scheme, url.Path, url.RawQuery = "", "", "" - s := url.String() - url.Scheme = scheme - url.RawQuery = query - - // build opaque URI - url.Opaque = s + urlPath } // EscapePath escapes part of a URL path in Amazon style @@ -228,13 +249,12 @@ func EscapePath(path string, encodeSep bool) string { return buf.String() } -func convertType(v reflect.Value) (string, error) { +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { v = reflect.Indirect(v) if !v.IsValid() { return "", errValueNotSet } - var str string switch value := v.Interface().(type) { case string: str = value @@ -247,9 +267,28 @@ func convertType(v reflect.Value) (string, error) { case float64: str = strconv.FormatFloat(value, 'f', -1, 64) case time.Time: - str = value.UTC().Format(RFC822) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) + } default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) return "", err } return str, nil diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 2cba1d9a..33fd53b1 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -1,6 +1,7 @@ package rest import ( + "bytes" "encoding/base64" "fmt" "io" @@ -14,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" ) // UnmarshalHandler is a named request handler for unmarshaling rest protocol requests @@ -70,10 +72,16 @@ func unmarshalBody(r *request.Request, v reflect.Value) { } default: switch payload.Type().String() { - case "io.ReadSeeker": - payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) - case "aws.ReadSeekCloser", "io.ReadCloser": + case "io.ReadCloser": payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", + "failed to read response body", err) + return + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) default: io.Copy(ioutil.Discard, r.HTTPResponse.Body) defer r.HTTPResponse.Body.Close() @@ -105,7 +113,7 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) { case "statusCode": unmarshalStatusCode(m, r.HTTPResponse.StatusCode) case "header": - err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) if err != nil { r.Error = awserr.New("SerializationError", "failed to decode REST response", err) break @@ -152,8 +160,13 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err return nil } -func unmarshalHeader(v reflect.Value, header string) error { - if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + isJSONValue := tag.Get("type") == "jsonvalue" + if isJSONValue { + if len(header) == 0 { + return nil + } + } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { return nil } @@ -185,11 +198,25 @@ func unmarshalHeader(v reflect.Value, header string) error { } v.Set(reflect.ValueOf(&f)) case *time.Time: - t, err := time.Parse(RFC822, header) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) if err != nil { return err } v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + m, err := protocol.DecodeJSONValue(header, escaping) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) default: err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) return err diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go index c74b97e1..b0f4e245 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -2,8 +2,8 @@ // requests and responses. package restxml -//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go -//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go import ( "bytes" @@ -36,7 +36,11 @@ func Build(r *request.Request) { var buf bytes.Buffer err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) if err != nil { - r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "failed to encode rest XML request", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } r.SetBufferBody(buf.Bytes()) @@ -50,7 +54,11 @@ func Unmarshal(r *request.Request) { decoder := xml.NewDecoder(r.HTTPResponse.Body) err := xmlutil.UnmarshalXML(r.Data, decoder, "") if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "failed to decode REST XML response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } } else { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 00000000..b7ed6c6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,72 @@ +package protocol + +import ( + "strconv" + "time" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC() + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822TimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601TimeFormat) + case UnixTimeFormatName: + return strconv.FormatInt(t.Unix(), 10) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: + return time.Parse(RFC822TimeFormat, value) + case ISO8601TimeFormatName: + return time.Parse(ISO8601TimeFormat, value) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), 0), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index 221029ba..1bfe45f6 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -13,9 +13,13 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) -// BuildXML will serialize params into an xml.Encoder. -// Error will be returned if the serialization of any of the params or nested values fails. +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { b := xmlBuilder{encoder: e, namespaces: map[string]string{}} root := NewXMLElement(xml.Name{}) if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { @@ -23,7 +27,7 @@ func BuildXML(params interface{}, e *xml.Encoder) error { } for _, c := range root.Children { for _, v := range c { - return StructToXML(e, v, false) + return StructToXML(e, v, sorted) } } return nil @@ -90,8 +94,6 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl return nil } - fieldAdded := false - // unwrap payloads if payload := tag.Get("payload"); payload != "" { field, _ := value.Type().FieldByName(payload) @@ -119,6 +121,8 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl child.Attr = append(child.Attr, ns) } + var payloadFields, nonPayloadFields int + t := value.Type() for i := 0; i < value.NumField(); i++ { member := elemOf(value.Field(i)) @@ -127,11 +131,16 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl if field.PkgPath != "" { continue // ignore unexported fields } + if field.Tag.Get("ignore") != "" { + continue + } mTag := field.Tag if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ continue } + payloadFields++ if protocol.CanSetIdempotencyToken(value.Field(i), field) { token := protocol.GetIdempotencyToken() @@ -146,11 +155,11 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl if err := b.buildValue(member, child, mTag); err != nil { return err } - - fieldAdded = true } - if fieldAdded { // only append this child if we have one ore more valid members + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { current.AddChild(child) } @@ -275,8 +284,12 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl case float32: str = strconv.FormatFloat(float64(converted), 'f', -1, 32) case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - str = converted.UTC().Format(ISO8601UTC) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) default: return fmt.Errorf("unsupported value for param %s: %v (%s)", tag.Get("locationName"), value.Interface(), value.Type().Name()) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 49f291a8..ff1ef683 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -9,13 +9,18 @@ import ( "strconv" "strings" "time" + + "github.com/aws/aws-sdk-go/private/protocol" ) // UnmarshalXML deserializes an xml.Decoder into the container v. V // needs to match the shape of the XML expected to be decoded. // If the shape doesn't match unmarshaling will fail. func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { - n, _ := XMLToStruct(d, nil) + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } if n.Children != nil { for _, root := range n.Children { for _, c := range root { @@ -23,7 +28,7 @@ func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { c = wrappedChild[0] // pull out wrapped element } - err := parse(reflect.ValueOf(v), c, "") + err = parse(reflect.ValueOf(v), c, "") if err != nil { if err == io.EOF { return nil @@ -49,9 +54,15 @@ func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { if t == "" { switch rtype.Kind() { case reflect.Struct: - t = "structure" + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } case reflect.Slice: - t = "list" + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } case reflect.Map: t = "map" } @@ -111,11 +122,8 @@ func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { elems := node.Children[name] if elems == nil { // try to find the field in attributes - for _, a := range node.Attr { - if name == a.Name.Local { - // turn this into a text node for de-serializing - elems = []*XMLNode{{Text: a.Value}} - } + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} } } @@ -247,8 +255,12 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { } r.Set(reflect.ValueOf(&v)) case *time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - t, err := time.Parse(ISO8601UTC, node.Text) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) if err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go index 72c198a9..515ce152 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -2,6 +2,7 @@ package xmlutil import ( "encoding/xml" + "fmt" "io" "sort" ) @@ -12,6 +13,9 @@ type XMLNode struct { Children map[string][]*XMLNode `json:",omitempty"` Text string `json:",omitempty"` Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode } // NewXMLElement returns a pointer to a new XMLNode initialized to default values. @@ -25,6 +29,7 @@ func NewXMLElement(name xml.Name) *XMLNode { // AddChild adds child to the XMLNode. func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n if _, ok := n.Children[child.Name.Local]; !ok { n.Children[child.Name.Local] = []*XMLNode{} } @@ -36,11 +41,16 @@ func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { out := &XMLNode{} for { tok, err := d.Token() - if tok == nil || err == io.EOF { - break - } if err != nil { - return out, err + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break } switch typed := tok.(type) { @@ -59,21 +69,54 @@ func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { slice = []*XMLNode{} } node, e := XMLToStruct(d, &el) + out.findNamespaces() if e != nil { return out, e } node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut slice = append(slice, node) out.Children[name] = slice case xml.EndElement: if s != nil && s.Name.Local == typed.Name.Local { // matching end token return out, nil } + out = &XMLNode{} } } return out, nil } +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + // StructToXML writes an XMLNode to a xml.Encoder as tokens. func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) diff --git a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go deleted file mode 100644 index b51e9449..00000000 --- a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go +++ /dev/null @@ -1,134 +0,0 @@ -package waiter - -import ( - "fmt" - "reflect" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Config provides a collection of configuration values to setup a generated -// waiter code with. -type Config struct { - Name string - Delay int - MaxAttempts int - Operation string - Acceptors []WaitAcceptor -} - -// A WaitAcceptor provides the information needed to wait for an API operation -// to complete. -type WaitAcceptor struct { - Expected interface{} - Matcher string - State string - Argument string -} - -// A Waiter provides waiting for an operation to complete. -type Waiter struct { - Config - Client interface{} - Input interface{} -} - -// Wait waits for an operation to complete, expire max attempts, or fail. Error -// is returned if the operation fails. -func (w *Waiter) Wait() error { - client := reflect.ValueOf(w.Client) - in := reflect.ValueOf(w.Input) - method := client.MethodByName(w.Config.Operation + "Request") - - for i := 0; i < w.MaxAttempts; i++ { - res := method.Call([]reflect.Value{in}) - req := res[0].Interface().(*request.Request) - req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter")) - - err := req.Send() - for _, a := range w.Acceptors { - result := false - var vals []interface{} - switch a.Matcher { - case "pathAll", "path": - // Require all matches to be equal for result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - if len(vals) == 0 { - break - } - result = true - for _, val := range vals { - if !awsutil.DeepEqual(val, a.Expected) { - result = false - break - } - } - case "pathAny": - // Only a single match needs to equal for the result to match - vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) - for _, val := range vals { - if awsutil.DeepEqual(val, a.Expected) { - result = true - break - } - } - case "status": - s := a.Expected.(int) - result = s == req.HTTPResponse.StatusCode - case "error": - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) - } - case "pathList": - // ignored matcher - default: - logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s", - w.Config.Operation, a.Matcher) - } - - if !result { - // If there was no matching result found there is nothing more to do - // for this response, retry the request. - continue - } - - switch a.State { - case "success": - // waiter completed - return nil - case "failure": - // Waiter failure state triggered - return awserr.New("ResourceNotReady", - fmt.Sprintf("failed waiting for successful resource state"), err) - case "retry": - // clear the error and retry the operation - err = nil - default: - logf(client, "WARNING: Waiter for %s encountered unexpected state: %s", - w.Config.Operation, a.State) - } - } - if err != nil { - return err - } - - time.Sleep(time.Second * time.Duration(w.Delay)) - } - - return awserr.New("ResourceNotReady", - fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil) -} - -func logf(client reflect.Value, msg string, args ...interface{}) { - cfgVal := client.FieldByName("Config") - if !cfgVal.IsValid() { - return - } - if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil { - cfg.Logger.Log(fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 553b0e4a..d5d61772 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -1,16 +1,24 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. -// Package s3 provides a client for Amazon Simple Storage Service. package s3 import ( + "bytes" "fmt" "io" + "sync" + "sync/atomic" "time" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" + "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go/private/protocol/rest" "github.com/aws/aws-sdk-go/private/protocol/restxml" ) @@ -18,17 +26,18 @@ const opAbortMultipartUpload = "AbortMultipartUpload" // AbortMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the AbortMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AbortMultipartUpload method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AbortMultipartUpload for more information on using the AbortMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AbortMultipartUploadRequest method. // req, resp := client.AbortMultipartUploadRequest(params) @@ -38,6 +47,7 @@ const opAbortMultipartUpload = "AbortMultipartUpload" // fmt.Println(resp) // } // +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { op := &request.Operation{ Name: opAbortMultipartUpload, @@ -49,38 +59,68 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req input = &AbortMultipartUploadInput{} } - req = c.newRequest(op, input, output) output = &AbortMultipartUploadOutput{} - req.Data = output + req = c.newRequest(op, input, output) return } +// AbortMultipartUpload API operation for Amazon Simple Storage Service. +// // Aborts a multipart upload. // // To verify that all parts have been removed, so you don't get charged for // the part storage, you should call the List Parts operation and ensure the // parts list is empty. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation AbortMultipartUpload for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchUpload "NoSuchUpload" +// The specified multipart upload does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { req, out := c.AbortMultipartUploadRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See AbortMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCompleteMultipartUpload = "CompleteMultipartUpload" // CompleteMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CompleteMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CompleteMultipartUpload method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CompleteMultipartUploadRequest method. // req, resp := client.CompleteMultipartUploadRequest(params) @@ -90,6 +130,7 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload" // fmt.Println(resp) // } // +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { op := &request.Operation{ Name: opCompleteMultipartUpload, @@ -101,34 +142,59 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) input = &CompleteMultipartUploadInput{} } - req = c.newRequest(op, input, output) output = &CompleteMultipartUploadOutput{} - req.Data = output + req = c.newRequest(op, input, output) return } +// CompleteMultipartUpload API operation for Amazon Simple Storage Service. +// // Completes a multipart upload by assembling previously uploaded parts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CompleteMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { req, out := c.CompleteMultipartUploadRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCopyObject = "CopyObject" // CopyObjectRequest generates a "aws/request.Request" representing the // client's request for the CopyObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopyObject for more information on using the CopyObject +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CopyObject method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CopyObjectRequest method. // req, resp := client.CopyObjectRequest(params) @@ -138,6 +204,7 @@ const opCopyObject = "CopyObject" // fmt.Println(resp) // } // +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { op := &request.Operation{ Name: opCopyObject, @@ -149,34 +216,65 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou input = &CopyObjectInput{} } - req = c.newRequest(op, input, output) output = &CopyObjectOutput{} - req.Data = output + req = c.newRequest(op, input, output) return } +// CopyObject API operation for Amazon Simple Storage Service. +// // Creates a copy of an object that is already stored in Amazon S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CopyObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" +// The source object of the COPY operation is not in the active tier and is +// only stored in Amazon Glacier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { req, out := c.CopyObjectRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CopyObjectWithContext is the same as CopyObject with the addition of +// the ability to pass a context and additional request options. +// +// See CopyObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateBucket = "CreateBucket" // CreateBucketRequest generates a "aws/request.Request" representing the // client's request for the CreateBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateBucket method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBucket for more information on using the CreateBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateBucketRequest method. // req, resp := client.CreateBucketRequest(params) @@ -186,6 +284,7 @@ const opCreateBucket = "CreateBucket" // fmt.Println(resp) // } // +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { op := &request.Operation{ Name: opCreateBucket, @@ -197,34 +296,67 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request input = &CreateBucketInput{} } - req = c.newRequest(op, input, output) output = &CreateBucketOutput{} - req.Data = output + req = c.newRequest(op, input, output) return } +// CreateBucket API operation for Amazon Simple Storage Service. +// // Creates a new bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBucketAlreadyExists "BucketAlreadyExists" +// The requested bucket name is not available. The bucket namespace is shared +// by all users of the system. Please select a different name and try again. +// +// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { req, out := c.CreateBucketRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateBucketWithContext is the same as CreateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateMultipartUpload = "CreateMultipartUpload" // CreateMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CreateMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateMultipartUpload method directly -// instead. +// See CreateMultipartUpload for more information on using the CreateMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateMultipartUploadRequest method. // req, resp := client.CreateMultipartUploadRequest(params) @@ -234,6 +366,7 @@ const opCreateMultipartUpload = "CreateMultipartUpload" // fmt.Println(resp) // } // +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { op := &request.Operation{ Name: opCreateMultipartUpload, @@ -245,12 +378,13 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re input = &CreateMultipartUploadInput{} } - req = c.newRequest(op, input, output) output = &CreateMultipartUploadOutput{} - req.Data = output + req = c.newRequest(op, input, output) return } +// CreateMultipartUpload API operation for Amazon Simple Storage Service. +// // Initiates a multipart upload and returns an upload ID. // // Note: After you initiate multipart upload and upload one or more parts, you @@ -258,27 +392,51 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // for storage of the uploaded parts. Only after you either complete or abort // multipart upload, Amazon S3 frees up the parts storage and stops charging // you for the parts storage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { req, out := c.CreateMultipartUploadRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucket = "DeleteBucket" // DeleteBucketRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucket method directly -// instead. +// See DeleteBucket for more information on using the DeleteBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketRequest method. // req, resp := client.DeleteBucketRequest(params) @@ -288,6 +446,7 @@ const opDeleteBucket = "DeleteBucket" // fmt.Println(resp) // } // +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { op := &request.Operation{ Name: opDeleteBucket, @@ -299,2864 +458,13199 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request input = &DeleteBucketInput{} } + output = &DeleteBucketOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteBucketOutput{} - req.Data = output return } +// DeleteBucket API operation for Amazon Simple Storage Service. +// // Deletes the bucket. All objects (including all object versions and Delete // Markers) in the bucket must be deleted before the bucket itself can be deleted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucket for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { req, out := c.DeleteBucketRequest(input) - err := req.Send() - return out, err + return out, req.Send() } -const opDeleteBucketCors = "DeleteBucketCors" +// DeleteBucketWithContext is the same as DeleteBucket with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// DeleteBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration" + +// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketCors method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration +// API call, and error handling. // -// // Example sending a request using the DeleteBucketCorsRequest method. -// req, resp := client.DeleteBucketCorsRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. +// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { op := &request.Operation{ - Name: opDeleteBucketCors, + Name: opDeleteBucketAnalyticsConfiguration, HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?cors", + HTTPPath: "/{Bucket}?analytics", } if input == nil { - input = &DeleteBucketCorsInput{} + input = &DeleteBucketAnalyticsConfigurationInput{} } + output = &DeleteBucketAnalyticsConfigurationOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteBucketCorsOutput{} - req.Data = output return } -// Deletes the cors configuration information set for the bucket. -func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { - req, out := c.DeleteBucketCorsRequest(input) - err := req.Send() - return out, err +// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + return out, req.Send() } -const opDeleteBucketLifecycle = "DeleteBucketLifecycle" +// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketLifecycle method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See DeleteBucketCors for more information on using the DeleteBucketCors +// API call, and error handling. // -// // Example sending a request using the DeleteBucketLifecycleRequest method. -// req, resp := client.DeleteBucketLifecycleRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketCorsRequest method. +// req, resp := client.DeleteBucketCorsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { op := &request.Operation{ - Name: opDeleteBucketLifecycle, + Name: opDeleteBucketCors, HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?lifecycle", + HTTPPath: "/{Bucket}?cors", } if input == nil { - input = &DeleteBucketLifecycleInput{} + input = &DeleteBucketCorsInput{} } + output = &DeleteBucketCorsOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteBucketLifecycleOutput{} - req.Data = output return } -// Deletes the lifecycle configuration from the bucket. -func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { - req, out := c.DeleteBucketLifecycleRequest(input) - err := req.Send() - return out, err +// DeleteBucketCors API operation for Amazon Simple Storage Service. +// +// Deletes the cors configuration information set for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + return out, req.Send() } -const opDeleteBucketPolicy = "DeleteBucketPolicy" +// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opDeleteBucketEncryption = "DeleteBucketEncryption" + +// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketPolicy method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption +// API call, and error handling. // -// // Example sending a request using the DeleteBucketPolicyRequest method. -// req, resp := client.DeleteBucketPolicyRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketEncryptionRequest method. +// req, resp := client.DeleteBucketEncryptionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) { op := &request.Operation{ - Name: opDeleteBucketPolicy, + Name: opDeleteBucketEncryption, HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?policy", + HTTPPath: "/{Bucket}?encryption", } if input == nil { - input = &DeleteBucketPolicyInput{} + input = &DeleteBucketEncryptionInput{} } + output = &DeleteBucketEncryptionOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteBucketPolicyOutput{} - req.Data = output return } -// Deletes the policy from the bucket. -func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { - req, out := c.DeleteBucketPolicyRequest(input) - err := req.Send() - return out, err +// DeleteBucketEncryption API operation for Amazon Simple Storage Service. +// +// Deletes the server-side encryption configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + return out, req.Send() } -const opDeleteBucketReplication = "DeleteBucketReplication" +// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" + +// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketReplication method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration +// API call, and error handling. // -// // Example sending a request using the DeleteBucketReplicationRequest method. -// req, resp := client.DeleteBucketReplicationRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. +// req, resp := client.DeleteBucketInventoryConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { op := &request.Operation{ - Name: opDeleteBucketReplication, + Name: opDeleteBucketInventoryConfiguration, HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?replication", + HTTPPath: "/{Bucket}?inventory", } if input == nil { - input = &DeleteBucketReplicationInput{} + input = &DeleteBucketInventoryConfigurationInput{} } + output = &DeleteBucketInventoryConfigurationOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteBucketReplicationOutput{} - req.Data = output return } -// Deletes the replication configuration from the bucket. -func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { - req, out := c.DeleteBucketReplicationRequest(input) - err := req.Send() - return out, err +// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an inventory configuration (identified by the inventory ID) from +// the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + return out, req.Send() } -const opDeleteBucketTagging = "DeleteBucketTagging" +// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketTagging method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle +// API call, and error handling. // -// // Example sending a request using the DeleteBucketTaggingRequest method. -// req, resp := client.DeleteBucketTaggingRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { op := &request.Operation{ - Name: opDeleteBucketTagging, + Name: opDeleteBucketLifecycle, HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?tagging", + HTTPPath: "/{Bucket}?lifecycle", } if input == nil { - input = &DeleteBucketTaggingInput{} + input = &DeleteBucketLifecycleInput{} } + output = &DeleteBucketLifecycleOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteBucketTaggingOutput{} - req.Data = output return } -// Deletes the tags from the bucket. -func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { - req, out := c.DeleteBucketTaggingRequest(input) - err := req.Send() - return out, err +// DeleteBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deletes the lifecycle configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + return out, req.Send() } -const opDeleteBucketWebsite = "DeleteBucketWebsite" +// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the DeleteBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" + +// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketWebsite method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration +// API call, and error handling. // -// // Example sending a request using the DeleteBucketWebsiteRequest method. -// req, resp := client.DeleteBucketWebsiteRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. +// req, resp := client.DeleteBucketMetricsConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { op := &request.Operation{ - Name: opDeleteBucketWebsite, + Name: opDeleteBucketMetricsConfiguration, HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?website", + HTTPPath: "/{Bucket}?metrics", } if input == nil { - input = &DeleteBucketWebsiteInput{} + input = &DeleteBucketMetricsConfigurationInput{} } + output = &DeleteBucketMetricsConfigurationOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteBucketWebsiteOutput{} - req.Data = output return } -// This operation removes the website configuration from the bucket. -func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { - req, out := c.DeleteBucketWebsiteRequest(input) - err := req.Send() - return out, err +// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes a metrics configuration (specified by the metrics configuration ID) +// from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + return out, req.Send() } -const opDeleteObject = "DeleteObject" +// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// DeleteObjectRequest generates a "aws/request.Request" representing the -// client's request for the DeleteObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObject method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the DeleteObjectRequest method. -// req, resp := client.DeleteObjectRequest(params) +// +// // Example sending a request using the DeleteBucketPolicyRequest method. +// req, resp := client.DeleteBucketPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { op := &request.Operation{ - Name: opDeleteObject, + Name: opDeleteBucketPolicy, HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}/{Key+}", + HTTPPath: "/{Bucket}?policy", } if input == nil { - input = &DeleteObjectInput{} + input = &DeleteBucketPolicyInput{} } + output = &DeleteBucketPolicyOutput{} req = c.newRequest(op, input, output) - output = &DeleteObjectOutput{} - req.Data = output + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// Removes the null version (if there is one) of an object and inserts a delete -// marker, which becomes the latest version of the object. If there isn't a -// null version, Amazon S3 does not remove any objects. -func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { - req, out := c.DeleteObjectRequest(input) - err := req.Send() - return out, err +// DeleteBucketPolicy API operation for Amazon Simple Storage Service. +// +// Deletes the policy from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + return out, req.Send() } -const opDeleteObjects = "DeleteObjects" +// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// DeleteObjectsRequest generates a "aws/request.Request" representing the -// client's request for the DeleteObjects operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObjects method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See DeleteBucketReplication for more information on using the DeleteBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the DeleteObjectsRequest method. -// req, resp := client.DeleteObjectsRequest(params) +// +// // Example sending a request using the DeleteBucketReplicationRequest method. +// req, resp := client.DeleteBucketReplicationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { op := &request.Operation{ - Name: opDeleteObjects, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}?delete", + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", } if input == nil { - input = &DeleteObjectsInput{} + input = &DeleteBucketReplicationInput{} } + output = &DeleteBucketReplicationOutput{} req = c.newRequest(op, input, output) - output = &DeleteObjectsOutput{} - req.Data = output + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// This operation enables you to delete multiple objects from a bucket using -// a single HTTP request. You may specify up to 1000 keys. -func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { - req, out := c.DeleteObjectsRequest(input) - err := req.Send() - return out, err +// DeleteBucketReplication API operation for Amazon Simple Storage Service. +// +// Deletes the replication configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + return out, req.Send() } -const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" +// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketAccelerateConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAccelerateConfiguration method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See DeleteBucketTagging for more information on using the DeleteBucketTagging +// API call, and error handling. // -// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. -// req, resp := client.GetBucketAccelerateConfigurationRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketTaggingRequest method. +// req, resp := client.DeleteBucketTaggingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { op := &request.Operation{ - Name: opGetBucketAccelerateConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?accelerate", + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", } if input == nil { - input = &GetBucketAccelerateConfigurationInput{} + input = &DeleteBucketTaggingInput{} } + output = &DeleteBucketTaggingOutput{} req = c.newRequest(op, input, output) - output = &GetBucketAccelerateConfigurationOutput{} - req.Data = output + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// Returns the accelerate configuration of a bucket. -func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { - req, out := c.GetBucketAccelerateConfigurationRequest(input) - err := req.Send() - return out, err +// DeleteBucketTagging API operation for Amazon Simple Storage Service. +// +// Deletes the tags from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + return out, req.Send() } -const opGetBucketAcl = "GetBucketAcl" +// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetBucketAclRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAcl method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite +// API call, and error handling. // -// // Example sending a request using the GetBucketAclRequest method. -// req, resp := client.GetBucketAclRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketWebsiteRequest method. +// req, resp := client.DeleteBucketWebsiteRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { op := &request.Operation{ - Name: opGetBucketAcl, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?acl", + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", } if input == nil { - input = &GetBucketAclInput{} + input = &DeleteBucketWebsiteInput{} } + output = &DeleteBucketWebsiteOutput{} req = c.newRequest(op, input, output) - output = &GetBucketAclOutput{} - req.Data = output + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// Gets the access control policy for the bucket. -func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { - req, out := c.GetBucketAclRequest(input) - err := req.Send() - return out, err +// DeleteBucketWebsite API operation for Amazon Simple Storage Service. +// +// This operation removes the website configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + return out, req.Send() } -const opGetBucketCors = "GetBucketCors" +// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketCors method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See DeleteObject for more information on using the DeleteObject +// API call, and error handling. // -// // Example sending a request using the GetBucketCorsRequest method. -// req, resp := client.GetBucketCorsRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteObjectRequest method. +// req, resp := client.DeleteObjectRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { op := &request.Operation{ - Name: opGetBucketCors, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?cors", + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", } if input == nil { - input = &GetBucketCorsInput{} + input = &DeleteObjectInput{} } + output = &DeleteObjectOutput{} req = c.newRequest(op, input, output) - output = &GetBucketCorsOutput{} - req.Data = output return } -// Returns the cors configuration for the bucket. -func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { - req, out := c.GetBucketCorsRequest(input) - err := req.Send() - return out, err +// DeleteObject API operation for Amazon Simple Storage Service. +// +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + return out, req.Send() } -const opGetBucketLifecycle = "GetBucketLifecycle" +// DeleteObjectWithContext is the same as DeleteObject with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opDeleteObjectTagging = "DeleteObjectTagging" + +// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLifecycle method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See DeleteObjectTagging for more information on using the DeleteObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the GetBucketLifecycleRequest method. -// req, resp := client.GetBucketLifecycleRequest(params) +// +// // Example sending a request using the DeleteObjectTaggingRequest method. +// req, resp := client.DeleteObjectTaggingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") - } +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { op := &request.Operation{ - Name: opGetBucketLifecycle, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?lifecycle", + Name: opDeleteObjectTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}?tagging", } if input == nil { - input = &GetBucketLifecycleInput{} + input = &DeleteObjectTaggingInput{} } + output = &DeleteObjectTaggingOutput{} req = c.newRequest(op, input, output) - output = &GetBucketLifecycleOutput{} - req.Data = output return } -// Deprecated, see the GetBucketLifecycleConfiguration operation. -func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { - req, out := c.GetBucketLifecycleRequest(input) - err := req.Send() - return out, err +// DeleteObjectTagging API operation for Amazon Simple Storage Service. +// +// Removes the tag-set from an existing object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + return out, req.Send() } -const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" +// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLifecycleConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLifecycleConfiguration method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See DeleteObjects for more information on using the DeleteObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. -// req, resp := client.GetBucketLifecycleConfigurationRequest(params) +// +// // Example sending a request using the DeleteObjectsRequest method. +// req, resp := client.DeleteObjectsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { op := &request.Operation{ - Name: opGetBucketLifecycleConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?lifecycle", + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", } if input == nil { - input = &GetBucketLifecycleConfigurationInput{} + input = &DeleteObjectsInput{} } + output = &DeleteObjectsOutput{} req = c.newRequest(op, input, output) - output = &GetBucketLifecycleConfigurationOutput{} - req.Data = output return } -// Returns the lifecycle configuration information set on the bucket. -func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { - req, out := c.GetBucketLifecycleConfigurationRequest(input) - err := req.Send() - return out, err +// DeleteObjects API operation for Amazon Simple Storage Service. +// +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. You may specify up to 1000 keys. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjects for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + return out, req.Send() } -const opGetBucketLocation = "GetBucketLocation" +// DeleteObjectsWithContext is the same as DeleteObjects with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetBucketLocationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLocation operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" + +// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAccelerateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLocation method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration +// API call, and error handling. // -// // Example sending a request using the GetBucketLocationRequest method. -// req, resp := client.GetBucketLocationRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. +// req, resp := client.GetBucketAccelerateConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { op := &request.Operation{ - Name: opGetBucketLocation, + Name: opGetBucketAccelerateConfiguration, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?location", + HTTPPath: "/{Bucket}?accelerate", } if input == nil { - input = &GetBucketLocationInput{} + input = &GetBucketAccelerateConfigurationInput{} } + output = &GetBucketAccelerateConfigurationOutput{} req = c.newRequest(op, input, output) - output = &GetBucketLocationOutput{} - req.Data = output return } -// Returns the region the bucket resides in. -func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { - req, out := c.GetBucketLocationRequest(input) - err := req.Send() - return out, err +// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the accelerate configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAccelerateConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + return out, req.Send() } -const opGetBucketLogging = "GetBucketLogging" +// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetBucketLoggingRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketLogging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLogging method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketAcl for more information on using the GetBucketAcl +// API call, and error handling. // -// // Example sending a request using the GetBucketLoggingRequest method. -// req, resp := client.GetBucketLoggingRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAclRequest method. +// req, resp := client.GetBucketAclRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { op := &request.Operation{ - Name: opGetBucketLogging, + Name: opGetBucketAcl, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?logging", + HTTPPath: "/{Bucket}?acl", } if input == nil { - input = &GetBucketLoggingInput{} + input = &GetBucketAclInput{} } + output = &GetBucketAclOutput{} req = c.newRequest(op, input, output) - output = &GetBucketLoggingOutput{} - req.Data = output return } -// Returns the logging status of a bucket and the permissions users have to -// view and modify that status. To use GET, you must be the bucket owner. -func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { - req, out := c.GetBucketLoggingRequest(input) - err := req.Send() - return out, err +// GetBucketAcl API operation for Amazon Simple Storage Service. +// +// Gets the access control policy for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + return out, req.Send() } -const opGetBucketNotification = "GetBucketNotification" +// GetBucketAclWithContext is the same as GetBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetBucketNotificationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketNotification operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" + +// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketNotification method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration +// API call, and error handling. // -// // Example sending a request using the GetBucketNotificationRequest method. -// req, resp := client.GetBucketNotificationRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. +// req, resp := client.GetBucketAnalyticsConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") - } +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { op := &request.Operation{ - Name: opGetBucketNotification, + Name: opGetBucketAnalyticsConfiguration, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?notification", + HTTPPath: "/{Bucket}?analytics", } if input == nil { - input = &GetBucketNotificationConfigurationRequest{} + input = &GetBucketAnalyticsConfigurationInput{} } + output = &GetBucketAnalyticsConfigurationOutput{} req = c.newRequest(op, input, output) - output = &NotificationConfigurationDeprecated{} - req.Data = output return } -// Deprecated, see the GetBucketNotificationConfiguration operation. -func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { - req, out := c.GetBucketNotificationRequest(input) - err := req.Send() - return out, err +// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Gets an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + return out, req.Send() } -const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" +// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketNotificationConfiguration method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketCors for more information on using the GetBucketCors +// API call, and error handling. // -// // Example sending a request using the GetBucketNotificationConfigurationRequest method. -// req, resp := client.GetBucketNotificationConfigurationRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketCorsRequest method. +// req, resp := client.GetBucketCorsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { op := &request.Operation{ - Name: opGetBucketNotificationConfiguration, + Name: opGetBucketCors, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?notification", + HTTPPath: "/{Bucket}?cors", } if input == nil { - input = &GetBucketNotificationConfigurationRequest{} + input = &GetBucketCorsInput{} } + output = &GetBucketCorsOutput{} req = c.newRequest(op, input, output) - output = &NotificationConfiguration{} - req.Data = output return } -// Returns the notification configuration of a bucket. -func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { - req, out := c.GetBucketNotificationConfigurationRequest(input) - err := req.Send() - return out, err +// GetBucketCors API operation for Amazon Simple Storage Service. +// +// Returns the cors configuration for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + return out, req.Send() } -const opGetBucketPolicy = "GetBucketPolicy" +// GetBucketCorsWithContext is the same as GetBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketEncryption = "GetBucketEncryption" + +// GetBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketPolicy method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketEncryption for more information on using the GetBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the GetBucketPolicyRequest method. -// req, resp := client.GetBucketPolicyRequest(params) +// +// // Example sending a request using the GetBucketEncryptionRequest method. +// req, resp := client.GetBucketEncryptionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) { op := &request.Operation{ - Name: opGetBucketPolicy, + Name: opGetBucketEncryption, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?policy", + HTTPPath: "/{Bucket}?encryption", } if input == nil { - input = &GetBucketPolicyInput{} + input = &GetBucketEncryptionInput{} } + output = &GetBucketEncryptionOutput{} req = c.newRequest(op, input, output) - output = &GetBucketPolicyOutput{} - req.Data = output return } -// Returns the policy of a specified bucket. -func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { - req, out := c.GetBucketPolicyRequest(input) - err := req.Send() - return out, err +// GetBucketEncryption API operation for Amazon Simple Storage Service. +// +// Returns the server-side encryption configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + return out, req.Send() } -const opGetBucketReplication = "GetBucketReplication" +// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" + +// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketReplication method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the GetBucketReplicationRequest method. -// req, resp := client.GetBucketReplicationRequest(params) +// +// // Example sending a request using the GetBucketInventoryConfigurationRequest method. +// req, resp := client.GetBucketInventoryConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { op := &request.Operation{ - Name: opGetBucketReplication, + Name: opGetBucketInventoryConfiguration, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?replication", + HTTPPath: "/{Bucket}?inventory", } if input == nil { - input = &GetBucketReplicationInput{} + input = &GetBucketInventoryConfigurationInput{} } + output = &GetBucketInventoryConfigurationOutput{} req = c.newRequest(op, input, output) - output = &GetBucketReplicationOutput{} - req.Data = output return } -// Returns the replication configuration of a bucket. -func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { - req, out := c.GetBucketReplicationRequest(input) - err := req.Send() - return out, err +// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Returns an inventory configuration (identified by the inventory ID) from +// the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + return out, req.Send() } -const opGetBucketRequestPayment = "GetBucketRequestPayment" +// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketRequestPayment operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketRequestPayment method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketLifecycle for more information on using the GetBucketLifecycle +// API call, and error handling. // -// // Example sending a request using the GetBucketRequestPaymentRequest method. -// req, resp := client.GetBucketRequestPaymentRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLifecycleRequest method. +// req, resp := client.GetBucketLifecycleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") + } op := &request.Operation{ - Name: opGetBucketRequestPayment, + Name: opGetBucketLifecycle, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?requestPayment", + HTTPPath: "/{Bucket}?lifecycle", } if input == nil { - input = &GetBucketRequestPaymentInput{} + input = &GetBucketLifecycleInput{} } + output = &GetBucketLifecycleOutput{} req = c.newRequest(op, input, output) - output = &GetBucketRequestPaymentOutput{} - req.Data = output return } -// Returns the request payment configuration of a bucket. -func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { - req, out := c.GetBucketRequestPaymentRequest(input) - err := req.Send() - return out, err +// GetBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deprecated, see the GetBucketLifecycleConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + return out, req.Send() } -const opGetBucketTagging = "GetBucketTagging" +// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketLifecycleWithContext has been deprecated +func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketTagging method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration +// API call, and error handling. // -// // Example sending a request using the GetBucketTaggingRequest method. -// req, resp := client.GetBucketTaggingRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { op := &request.Operation{ - Name: opGetBucketTagging, + Name: opGetBucketLifecycleConfiguration, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?tagging", + HTTPPath: "/{Bucket}?lifecycle", } if input == nil { - input = &GetBucketTaggingInput{} + input = &GetBucketLifecycleConfigurationInput{} } + output = &GetBucketLifecycleConfigurationOutput{} req = c.newRequest(op, input, output) - output = &GetBucketTaggingOutput{} - req.Data = output return } -// Returns the tag set associated with the bucket. -func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { - req, out := c.GetBucketTaggingRequest(input) - err := req.Send() - return out, err +// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the lifecycle configuration information set on the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + return out, req.Send() } -const opGetBucketVersioning = "GetBucketVersioning" +// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetBucketVersioningRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketVersioning operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketVersioning method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketLocation for more information on using the GetBucketLocation +// API call, and error handling. // -// // Example sending a request using the GetBucketVersioningRequest method. -// req, resp := client.GetBucketVersioningRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { op := &request.Operation{ - Name: opGetBucketVersioning, + Name: opGetBucketLocation, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?versioning", + HTTPPath: "/{Bucket}?location", } if input == nil { - input = &GetBucketVersioningInput{} + input = &GetBucketLocationInput{} } + output = &GetBucketLocationOutput{} req = c.newRequest(op, input, output) - output = &GetBucketVersioningOutput{} - req.Data = output return } -// Returns the versioning state of a bucket. -func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { - req, out := c.GetBucketVersioningRequest(input) - err := req.Send() - return out, err +// GetBucketLocation API operation for Amazon Simple Storage Service. +// +// Returns the region the bucket resides in. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLocation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + return out, req.Send() } -const opGetBucketWebsite = "GetBucketWebsite" +// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the GetBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketWebsite method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketLogging for more information on using the GetBucketLogging +// API call, and error handling. // -// // Example sending a request using the GetBucketWebsiteRequest method. -// req, resp := client.GetBucketWebsiteRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { op := &request.Operation{ - Name: opGetBucketWebsite, + Name: opGetBucketLogging, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?website", + HTTPPath: "/{Bucket}?logging", } if input == nil { - input = &GetBucketWebsiteInput{} + input = &GetBucketLoggingInput{} } + output = &GetBucketLoggingOutput{} req = c.newRequest(op, input, output) - output = &GetBucketWebsiteOutput{} - req.Data = output return } -// Returns the website configuration for a bucket. -func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { - req, out := c.GetBucketWebsiteRequest(input) - err := req.Send() - return out, err +// GetBucketLogging API operation for Amazon Simple Storage Service. +// +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. To use GET, you must be the bucket owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + return out, req.Send() } -const opGetObject = "GetObject" +// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetObjectRequest generates a "aws/request.Request" representing the -// client's request for the GetObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" + +// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObject method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration +// API call, and error handling. // -// // Example sending a request using the GetObjectRequest method. -// req, resp := client.GetObjectRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketMetricsConfigurationRequest method. +// req, resp := client.GetBucketMetricsConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { op := &request.Operation{ - Name: opGetObject, + Name: opGetBucketMetricsConfiguration, HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}", + HTTPPath: "/{Bucket}?metrics", } if input == nil { - input = &GetObjectInput{} + input = &GetBucketMetricsConfigurationInput{} } + output = &GetBucketMetricsConfigurationOutput{} req = c.newRequest(op, input, output) - output = &GetObjectOutput{} - req.Data = output return } -// Retrieves objects from Amazon S3. -func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { - req, out := c.GetObjectRequest(input) - err := req.Send() - return out, err +// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Gets a metrics configuration (specified by the metrics configuration ID) +// from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + return out, req.Send() } -const opGetObjectAcl = "GetObjectAcl" +// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetObjectAclRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectAcl method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketNotification for more information on using the GetBucketNotification +// API call, and error handling. // -// // Example sending a request using the GetObjectAclRequest method. -// req, resp := client.GetObjectAclRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketNotificationRequest method. +// req, resp := client.GetBucketNotificationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") + } op := &request.Operation{ - Name: opGetObjectAcl, + Name: opGetBucketNotification, HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?acl", + HTTPPath: "/{Bucket}?notification", } if input == nil { - input = &GetObjectAclInput{} + input = &GetBucketNotificationConfigurationRequest{} } + output = &NotificationConfigurationDeprecated{} req = c.newRequest(op, input, output) - output = &GetObjectAclOutput{} - req.Data = output return } -// Returns the access control list (ACL) of an object. -func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { - req, out := c.GetObjectAclRequest(input) - err := req.Send() - return out, err +// GetBucketNotification API operation for Amazon Simple Storage Service. +// +// Deprecated, see the GetBucketNotificationConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + return out, req.Send() } -const opGetObjectTorrent = "GetObjectTorrent" +// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketNotificationWithContext has been deprecated +func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetObjectTorrentRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectTorrent operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotificationConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectTorrent method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the GetObjectTorrentRequest method. -// req, resp := client.GetObjectTorrentRequest(params) +// +// // Example sending a request using the GetBucketNotificationConfigurationRequest method. +// req, resp := client.GetBucketNotificationConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { op := &request.Operation{ - Name: opGetObjectTorrent, + Name: opGetBucketNotificationConfiguration, HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?torrent", + HTTPPath: "/{Bucket}?notification", } if input == nil { - input = &GetObjectTorrentInput{} + input = &GetBucketNotificationConfigurationRequest{} } + output = &NotificationConfiguration{} req = c.newRequest(op, input, output) - output = &GetObjectTorrentOutput{} - req.Data = output return } -// Return torrent files from a bucket. -func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { - req, out := c.GetObjectTorrentRequest(input) - err := req.Send() - return out, err +// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the notification configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotificationConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + return out, req.Send() } -const opHeadBucket = "HeadBucket" +// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// HeadBucketRequest generates a "aws/request.Request" representing the -// client's request for the HeadBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the HeadBucket method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketPolicy for more information on using the GetBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the HeadBucketRequest method. -// req, resp := client.HeadBucketRequest(params) +// +// // Example sending a request using the GetBucketPolicyRequest method. +// req, resp := client.GetBucketPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { op := &request.Operation{ - Name: opHeadBucket, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}", + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", } if input == nil { - input = &HeadBucketInput{} + input = &GetBucketPolicyInput{} } + output = &GetBucketPolicyOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &HeadBucketOutput{} - req.Data = output return } -// This operation is useful to determine if a bucket exists and you have permission -// to access it. -func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { - req, out := c.HeadBucketRequest(input) - err := req.Send() - return out, err +// GetBucketPolicy API operation for Amazon Simple Storage Service. +// +// Returns the policy of a specified bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + return out, req.Send() } -const opHeadObject = "HeadObject" +// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// HeadObjectRequest generates a "aws/request.Request" representing the -// client's request for the HeadObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the HeadObject method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketReplication for more information on using the GetBucketReplication +// API call, and error handling. // -// // Example sending a request using the HeadObjectRequest method. -// req, resp := client.HeadObjectRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketReplicationRequest method. +// req, resp := client.GetBucketReplicationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { op := &request.Operation{ - Name: opHeadObject, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}/{Key+}", + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", } if input == nil { - input = &HeadObjectInput{} + input = &GetBucketReplicationInput{} } + output = &GetBucketReplicationOutput{} req = c.newRequest(op, input, output) - output = &HeadObjectOutput{} - req.Data = output return } -// The HEAD operation retrieves metadata from an object without returning the -// object itself. This operation is useful if you're only interested in an object's -// metadata. To use HEAD, you must have READ access to the object. -func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { - req, out := c.HeadObjectRequest(input) - err := req.Send() - return out, err +// GetBucketReplication API operation for Amazon Simple Storage Service. +// +// Returns the replication configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + return out, req.Send() } -const opListBuckets = "ListBuckets" +// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// ListBucketsRequest generates a "aws/request.Request" representing the -// client's request for the ListBuckets operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketRequestPayment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBuckets method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment +// API call, and error handling. // -// // Example sending a request using the ListBucketsRequest method. -// req, resp := client.ListBucketsRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketRequestPaymentRequest method. +// req, resp := client.GetBucketRequestPaymentRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { op := &request.Operation{ - Name: opListBuckets, + Name: opGetBucketRequestPayment, HTTPMethod: "GET", - HTTPPath: "/", + HTTPPath: "/{Bucket}?requestPayment", } if input == nil { - input = &ListBucketsInput{} + input = &GetBucketRequestPaymentInput{} } + output = &GetBucketRequestPaymentOutput{} req = c.newRequest(op, input, output) - output = &ListBucketsOutput{} - req.Data = output return } -// Returns a list of all buckets owned by the authenticated sender of the request. -func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { - req, out := c.ListBucketsRequest(input) - err := req.Send() - return out, err +// GetBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Returns the request payment configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketRequestPayment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + return out, req.Send() } -const opListMultipartUploads = "ListMultipartUploads" +// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// ListMultipartUploadsRequest generates a "aws/request.Request" representing the -// client's request for the ListMultipartUploads operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListMultipartUploads method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketTagging for more information on using the GetBucketTagging +// API call, and error handling. // -// // Example sending a request using the ListMultipartUploadsRequest method. -// req, resp := client.ListMultipartUploadsRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketTaggingRequest method. +// req, resp := client.GetBucketTaggingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { op := &request.Operation{ - Name: opListMultipartUploads, + Name: opGetBucketTagging, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?uploads", - Paginator: &request.Paginator{ - InputTokens: []string{"KeyMarker", "UploadIdMarker"}, - OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, - LimitToken: "MaxUploads", - TruncationToken: "IsTruncated", - }, + HTTPPath: "/{Bucket}?tagging", } if input == nil { - input = &ListMultipartUploadsInput{} + input = &GetBucketTaggingInput{} } + output = &GetBucketTaggingOutput{} req = c.newRequest(op, input, output) - output = &ListMultipartUploadsOutput{} - req.Data = output return } -// This operation lists in-progress multipart uploads. -func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { - req, out := c.ListMultipartUploadsRequest(input) - err := req.Send() - return out, err -} - -// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +// GetBucketTagging API operation for Amazon Simple Storage Service. // -// See ListMultipartUploads method for more information on how to use this operation. +// Returns the tag set associated with the bucket. // -// Note: This operation can generate multiple requests to a service. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// // Example iterating over at most 3 pages of a ListMultipartUploads operation. -// pageNum := 0 -// err := client.ListMultipartUploadsPages(params, -// func(page *ListMultipartUploadsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + return out, req.Send() +} + +// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketTagging for details on how to use this API operation. // -func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListMultipartUploadsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListMultipartUploadsOutput), lastPage) - }) +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -const opListObjectVersions = "ListObjectVersions" +const opGetBucketVersioning = "GetBucketVersioning" -// ListObjectVersionsRequest generates a "aws/request.Request" representing the -// client's request for the ListObjectVersions operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// GetBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjectVersions method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketVersioning for more information on using the GetBucketVersioning +// API call, and error handling. // -// // Example sending a request using the ListObjectVersionsRequest method. -// req, resp := client.ListObjectVersionsRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketVersioningRequest method. +// req, resp := client.GetBucketVersioningRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { op := &request.Operation{ - Name: opListObjectVersions, + Name: opGetBucketVersioning, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?versions", - Paginator: &request.Paginator{ - InputTokens: []string{"KeyMarker", "VersionIdMarker"}, - OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, - LimitToken: "MaxKeys", - TruncationToken: "IsTruncated", - }, + HTTPPath: "/{Bucket}?versioning", } if input == nil { - input = &ListObjectVersionsInput{} + input = &GetBucketVersioningInput{} } + output = &GetBucketVersioningOutput{} req = c.newRequest(op, input, output) - output = &ListObjectVersionsOutput{} - req.Data = output return } -// Returns metadata about all of the versions of objects in a bucket. -func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { - req, out := c.ListObjectVersionsRequest(input) - err := req.Send() - return out, err -} - -// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +// GetBucketVersioning API operation for Amazon Simple Storage Service. // -// See ListObjectVersions method for more information on how to use this operation. +// Returns the versioning state of a bucket. // -// Note: This operation can generate multiple requests to a service. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// // Example iterating over at most 3 pages of a ListObjectVersions operation. -// pageNum := 0 -// err := client.ListObjectVersionsPages(params, -// func(page *ListObjectVersionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + return out, req.Send() +} + +// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of +// the ability to pass a context and additional request options. // -func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListObjectVersionsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListObjectVersionsOutput), lastPage) - }) +// See GetBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -const opListObjects = "ListObjects" +const opGetBucketWebsite = "GetBucketWebsite" -// ListObjectsRequest generates a "aws/request.Request" representing the -// client's request for the ListObjects operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// GetBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjects method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetBucketWebsite for more information on using the GetBucketWebsite +// API call, and error handling. // -// // Example sending a request using the ListObjectsRequest method. -// req, resp := client.ListObjectsRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketWebsiteRequest method. +// req, resp := client.GetBucketWebsiteRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { op := &request.Operation{ - Name: opListObjects, + Name: opGetBucketWebsite, HTTPMethod: "GET", - HTTPPath: "/{Bucket}", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker || Contents[-1].Key"}, - LimitToken: "MaxKeys", - TruncationToken: "IsTruncated", - }, + HTTPPath: "/{Bucket}?website", } if input == nil { - input = &ListObjectsInput{} + input = &GetBucketWebsiteInput{} } + output = &GetBucketWebsiteOutput{} req = c.newRequest(op, input, output) - output = &ListObjectsOutput{} - req.Data = output return } -// Returns some or all (up to 1000) of the objects in a bucket. You can use -// the request parameters as selection criteria to return a subset of the objects -// in a bucket. -func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { - req, out := c.ListObjectsRequest(input) - err := req.Send() - return out, err -} - -// ListObjectsPages iterates over the pages of a ListObjects operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +// GetBucketWebsite API operation for Amazon Simple Storage Service. // -// See ListObjects method for more information on how to use this operation. +// Returns the website configuration for a bucket. // -// Note: This operation can generate multiple requests to a service. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// // Example iterating over at most 3 pages of a ListObjects operation. -// pageNum := 0 -// err := client.ListObjectsPages(params, -// func(page *ListObjectsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + return out, req.Send() +} + +// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketWebsite for details on how to use this API operation. // -func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListObjectsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListObjectsOutput), lastPage) - }) +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -const opListObjectsV2 = "ListObjectsV2" +const opGetObject = "GetObject" -// ListObjectsV2Request generates a "aws/request.Request" representing the -// client's request for the ListObjectsV2 operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// GetObjectRequest generates a "aws/request.Request" representing the +// client's request for the GetObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjectsV2 method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetObject for more information on using the GetObject +// API call, and error handling. // -// // Example sending a request using the ListObjectsV2Request method. -// req, resp := client.ListObjectsV2Request(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectRequest method. +// req, resp := client.GetObjectRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { op := &request.Operation{ - Name: opListObjectsV2, + Name: opGetObject, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?list-type=2", - Paginator: &request.Paginator{ - InputTokens: []string{"ContinuationToken"}, - OutputTokens: []string{"NextContinuationToken"}, - LimitToken: "MaxKeys", - TruncationToken: "", - }, + HTTPPath: "/{Bucket}/{Key+}", } if input == nil { - input = &ListObjectsV2Input{} + input = &GetObjectInput{} } + output = &GetObjectOutput{} req = c.newRequest(op, input, output) - output = &ListObjectsV2Output{} - req.Data = output return } -// Returns some or all (up to 1000) of the objects in a bucket. You can use -// the request parameters as selection criteria to return a subset of the objects -// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend -// you use this revised API for new application development. -func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { - req, out := c.ListObjectsV2Request(input) - err := req.Send() - return out, err -} - -// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +// GetObject API operation for Amazon Simple Storage Service. // -// See ListObjectsV2 method for more information on how to use this operation. +// Retrieves objects from Amazon S3. // -// Note: This operation can generate multiple requests to a service. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// // Example iterating over at most 3 pages of a ListObjectsV2 operation. -// pageNum := 0 -// err := client.ListObjectsV2Pages(params, -// func(page *ListObjectsV2Output, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. // -func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(p *ListObjectsV2Output, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListObjectsV2Request(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListObjectsV2Output), lastPage) - }) +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + return out, req.Send() } -const opListParts = "ListParts" +// GetObjectWithContext is the same as GetObject with the addition of +// the ability to pass a context and additional request options. +// +// See GetObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// ListPartsRequest generates a "aws/request.Request" representing the -// client's request for the ListParts operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListParts method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetObjectAcl for more information on using the GetObjectAcl +// API call, and error handling. // -// // Example sending a request using the ListPartsRequest method. -// req, resp := client.ListPartsRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// err := req.Send() +// +// // Example sending a request using the GetObjectAclRequest method. +// req, resp := client.GetObjectAclRequest(params) +// +// err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { op := &request.Operation{ - Name: opListParts, + Name: opGetObjectAcl, HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}", - Paginator: &request.Paginator{ - InputTokens: []string{"PartNumberMarker"}, - OutputTokens: []string{"NextPartNumberMarker"}, - LimitToken: "MaxParts", - TruncationToken: "IsTruncated", - }, + HTTPPath: "/{Bucket}/{Key+}?acl", } if input == nil { - input = &ListPartsInput{} + input = &GetObjectAclInput{} } + output = &GetObjectAclOutput{} req = c.newRequest(op, input, output) - output = &ListPartsOutput{} - req.Data = output return } -// Lists the parts that have been uploaded for a specific multipart upload. -func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { - req, out := c.ListPartsRequest(input) - err := req.Send() - return out, err -} - -// ListPartsPages iterates over the pages of a ListParts operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +// GetObjectAcl API operation for Amazon Simple Storage Service. // -// See ListParts method for more information on how to use this operation. +// Returns the access control list (ACL) of an object. // -// Note: This operation can generate multiple requests to a service. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// // Example iterating over at most 3 pages of a ListParts operation. -// pageNum := 0 -// err := client.ListPartsPages(params, -// func(page *ListPartsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectAcl for usage and error information. // -func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListPartsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListPartsOutput), lastPage) - }) +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + return out, req.Send() } -const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" +// GetObjectAclWithContext is the same as GetObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAccelerateConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetObjectTagging = "GetObjectTagging" + +// GetObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAccelerateConfiguration method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetObjectTagging for more information on using the GetObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. -// req, resp := client.PutBucketAccelerateConfigurationRequest(params) +// +// // Example sending a request using the GetObjectTaggingRequest method. +// req, resp := client.GetObjectTaggingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { op := &request.Operation{ - Name: opPutBucketAccelerateConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?accelerate", + Name: opGetObjectTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?tagging", } if input == nil { - input = &PutBucketAccelerateConfigurationInput{} + input = &GetObjectTaggingInput{} } + output = &GetObjectTaggingOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketAccelerateConfigurationOutput{} - req.Data = output return } -// Sets the accelerate configuration of an existing bucket. -func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { - req, out := c.PutBucketAccelerateConfigurationRequest(input) - err := req.Send() - return out, err +// GetObjectTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag-set of an object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + return out, req.Send() } -const opPutBucketAcl = "PutBucketAcl" +// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// PutBucketAclRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAcl method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See GetObjectTorrent for more information on using the GetObjectTorrent +// API call, and error handling. // -// // Example sending a request using the PutBucketAclRequest method. -// req, resp := client.PutBucketAclRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { op := &request.Operation{ - Name: opPutBucketAcl, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?acl", + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", } if input == nil { - input = &PutBucketAclInput{} + input = &GetObjectTorrentInput{} } + output = &GetObjectTorrentOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketAclOutput{} - req.Data = output return } -// Sets the permissions on a bucket using access control lists (ACL). -func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { - req, out := c.PutBucketAclRequest(input) - err := req.Send() - return out, err +// GetObjectTorrent API operation for Amazon Simple Storage Service. +// +// Return torrent files from a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTorrent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + return out, req.Send() } -const opPutBucketCors = "PutBucketCors" +// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTorrent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// PutBucketCorsRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketCors method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See HeadBucket for more information on using the HeadBucket +// API call, and error handling. // -// // Example sending a request using the PutBucketCorsRequest method. -// req, resp := client.PutBucketCorsRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { op := &request.Operation{ - Name: opPutBucketCors, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?cors", + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", } if input == nil { - input = &PutBucketCorsInput{} + input = &HeadBucketInput{} } + output = &HeadBucketOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketCorsOutput{} - req.Data = output return } -// Sets the cors configuration for a bucket. -func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { - req, out := c.PutBucketCorsRequest(input) - err := req.Send() - return out, err +// HeadBucket API operation for Amazon Simple Storage Service. +// +// This operation is useful to determine if a bucket exists and you have permission +// to access it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + return out, req.Send() } -const opPutBucketLifecycle = "PutBucketLifecycle" +// HeadBucketWithContext is the same as HeadBucket with the addition of +// the ability to pass a context and additional request options. +// +// See HeadBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// PutBucketLifecycleRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLifecycle method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See HeadObject for more information on using the HeadObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the PutBucketLifecycleRequest method. -// req, resp := client.PutBucketLifecycleRequest(params) +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") - } +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { op := &request.Operation{ - Name: opPutBucketLifecycle, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?lifecycle", + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", } if input == nil { - input = &PutBucketLifecycleInput{} + input = &HeadObjectInput{} } + output = &HeadObjectOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketLifecycleOutput{} - req.Data = output return } -// Deprecated, see the PutBucketLifecycleConfiguration operation. -func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { - req, out := c.PutBucketLifecycleRequest(input) - err := req.Send() - return out, err +// HeadObject API operation for Amazon Simple Storage Service. +// +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses +// for more information on returned errors. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + return out, req.Send() } -const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" +// HeadObjectWithContext is the same as HeadObject with the addition of +// the ability to pass a context and additional request options. +// +// See HeadObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLifecycleConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" + +// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLifecycleConfiguration method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations +// API call, and error handling. // -// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. -// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. +// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { op := &request.Operation{ - Name: opPutBucketLifecycleConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?lifecycle", + Name: opListBucketAnalyticsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", } if input == nil { - input = &PutBucketLifecycleConfigurationInput{} + input = &ListBucketAnalyticsConfigurationsInput{} } + output = &ListBucketAnalyticsConfigurationsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketLifecycleConfigurationOutput{} - req.Data = output return } -// Sets lifecycle configuration for your bucket. If a lifecycle configuration -// exists, it replaces it. -func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { - req, out := c.PutBucketLifecycleConfigurationRequest(input) - err := req.Send() - return out, err +// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the analytics configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketAnalyticsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + return out, req.Send() } -const opPutBucketLogging = "PutBucketLogging" +// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketAnalyticsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// PutBucketLoggingRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketLogging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" + +// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketInventoryConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLogging method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the PutBucketLoggingRequest method. -// req, resp := client.PutBucketLoggingRequest(params) +// +// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. +// req, resp := client.ListBucketInventoryConfigurationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { op := &request.Operation{ - Name: opPutBucketLogging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?logging", + Name: opListBucketInventoryConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", } if input == nil { - input = &PutBucketLoggingInput{} + input = &ListBucketInventoryConfigurationsInput{} } + output = &ListBucketInventoryConfigurationsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketLoggingOutput{} - req.Data = output return } -// Set the logging parameters for a bucket and to specify permissions for who -// can view and modify the logging parameters. To set the logging status of -// a bucket, you must be the bucket owner. -func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { - req, out := c.PutBucketLoggingRequest(input) - err := req.Send() - return out, err +// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. +// +// Returns a list of inventory configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketInventoryConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + return out, req.Send() } -const opPutBucketNotification = "PutBucketNotification" - -// PutBucketNotificationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketNotification operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of +// the ability to pass a context and additional request options. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketNotification method directly -// instead. +// See ListBucketInventoryConfigurations for details on how to use this API operation. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" + +// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketMetricsConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the PutBucketNotificationRequest method. -// req, resp := client.PutBucketNotificationRequest(params) +// +// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. +// req, resp := client.ListBucketMetricsConfigurationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { - if c.Client.Config.Logger != nil { - c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") - } +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { op := &request.Operation{ - Name: opPutBucketNotification, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?notification", + Name: opListBucketMetricsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", } if input == nil { - input = &PutBucketNotificationInput{} + input = &ListBucketMetricsConfigurationsInput{} } + output = &ListBucketMetricsConfigurationsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketNotificationOutput{} - req.Data = output return } -// Deprecated, see the PutBucketNotificationConfiguraiton operation. -func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { - req, out := c.PutBucketNotificationRequest(input) - err := req.Send() - return out, err +// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the metrics configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketMetricsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + return out, req.Send() } -const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" +// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketMetricsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketNotificationConfiguration method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See ListBuckets for more information on using the ListBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the PutBucketNotificationConfigurationRequest method. -// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { op := &request.Operation{ - Name: opPutBucketNotificationConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?notification", + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", } if input == nil { - input = &PutBucketNotificationConfigurationInput{} + input = &ListBucketsInput{} } + output = &ListBucketsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketNotificationConfigurationOutput{} - req.Data = output return } -// Enables notifications of specified events for a bucket. -func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { - req, out := c.PutBucketNotificationConfigurationRequest(input) - err := req.Send() - return out, err +// ListBuckets API operation for Amazon Simple Storage Service. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBuckets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + return out, req.Send() } -const opPutBucketPolicy = "PutBucketPolicy" +// ListBucketsWithContext is the same as ListBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// PutBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketPolicy method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See ListMultipartUploads for more information on using the ListMultipartUploads +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the PutBucketPolicyRequest method. -// req, resp := client.PutBucketPolicyRequest(params) +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { op := &request.Operation{ - Name: opPutBucketPolicy, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?policy", + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, } if input == nil { - input = &PutBucketPolicyInput{} + input = &ListMultipartUploadsInput{} } + output = &ListMultipartUploadsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketPolicyOutput{} - req.Data = output return } -// Replaces a policy on a bucket. If the bucket already has a policy, the one -// in this request completely replaces it. -func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { - req, out := c.PutBucketPolicyRequest(input) - err := req.Send() - return out, err +// ListMultipartUploads API operation for Amazon Simple Storage Service. +// +// This operation lists in-progress multipart uploads. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListMultipartUploads for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + return out, req.Send() } -const opPutBucketReplication = "PutBucketReplication" +// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of +// the ability to pass a context and additional request options. +// +// See ListMultipartUploads for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// PutBucketReplicationRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketReplication method directly -// instead. +// See ListMultipartUploads method for more information on how to use this operation. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// Note: This operation can generate multiple requests to a service. // -// // Example sending a request using the PutBucketReplicationRequest method. -// req, resp := client.PutBucketReplicationRequest(params) +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { + return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except +// it takes a Context and allows setting request options on the pages. // -func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { - op := &request.Operation{ - Name: opPutBucketReplication, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?replication", +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMultipartUploadsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMultipartUploadsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } - if input == nil { - input = &PutBucketReplicationInput{} + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketReplicationOutput{} - req.Data = output - return -} - -// Creates a new replication configuration (or replaces an existing one, if -// present). -func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { - req, out := c.PutBucketReplicationRequest(input) - err := req.Send() - return out, err + return p.Err() } -const opPutBucketRequestPayment = "PutBucketRequestPayment" +const opListObjectVersions = "ListObjectVersions" -// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketRequestPayment operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketRequestPayment method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See ListObjectVersions for more information on using the ListObjectVersions +// API call, and error handling. // -// // Example sending a request using the PutBucketRequestPaymentRequest method. -// req, resp := client.PutBucketRequestPaymentRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { op := &request.Operation{ - Name: opPutBucketRequestPayment, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?requestPayment", + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, } if input == nil { - input = &PutBucketRequestPaymentInput{} + input = &ListObjectVersionsInput{} } + output = &ListObjectVersionsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketRequestPaymentOutput{} - req.Data = output return } -// Sets the request payment configuration for a bucket. By default, the bucket -// owner pays for downloads from the bucket. This configuration parameter enables -// the bucket owner (only) to specify that the person requesting the download -// will be charged for the download. Documentation on requester pays buckets -// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html -func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { - req, out := c.PutBucketRequestPaymentRequest(input) - err := req.Send() - return out, err +// ListObjectVersions API operation for Amazon Simple Storage Service. +// +// Returns metadata about all of the versions of objects in a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectVersions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + return out, req.Send() } -const opPutBucketTagging = "PutBucketTagging" +// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// PutBucketTaggingRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketTagging method directly -// instead. +// See ListObjectVersions method for more information on how to use this operation. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// Note: This operation can generate multiple requests to a service. // -// // Example sending a request using the PutBucketTaggingRequest method. -// req, resp := client.PutBucketTaggingRequest(params) +// // Example iterating over at most 3 pages of a ListObjectVersions operation. +// pageNum := 0 +// err := client.ListObjectVersionsPages(params, +// func(page *ListObjectVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { + return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except +// it takes a Context and allows setting request options on the pages. // -func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { - op := &request.Operation{ - Name: opPutBucketTagging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?tagging", +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } - if input == nil { - input = &PutBucketTaggingInput{} + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketTaggingOutput{} - req.Data = output - return -} - -// Sets the tags for a bucket. -func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { - req, out := c.PutBucketTaggingRequest(input) - err := req.Send() - return out, err + return p.Err() } -const opPutBucketVersioning = "PutBucketVersioning" +const opListObjects = "ListObjects" -// PutBucketVersioningRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketVersioning operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketVersioning method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See ListObjects for more information on using the ListObjects +// API call, and error handling. // -// // Example sending a request using the PutBucketVersioningRequest method. -// req, resp := client.PutBucketVersioningRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { op := &request.Operation{ - Name: opPutBucketVersioning, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?versioning", + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, } if input == nil { - input = &PutBucketVersioningInput{} + input = &ListObjectsInput{} } + output = &ListObjectsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketVersioningOutput{} - req.Data = output return } -// Sets the versioning state of an existing bucket. To set the versioning state, -// you must be the bucket owner. -func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { - req, out := c.PutBucketVersioningRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketWebsite = "PutBucketWebsite" - -// PutBucketWebsiteRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// ListObjects API operation for Amazon Simple Storage Service. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketWebsite method directly -// instead. +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// // Example sending a request using the PutBucketWebsiteRequest method. -// req, resp := client.PutBucketWebsiteRequest(params) +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjects for usage and error information. // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. // -func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { - op := &request.Operation{ - Name: opPutBucketWebsite, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &PutBucketWebsiteInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutBucketWebsiteOutput{} - req.Data = output - return +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + return out, req.Send() } -// Set the website configuration for a bucket. -func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { - req, out := c.PutBucketWebsiteRequest(input) - err := req.Send() - return out, err +// ListObjectsWithContext is the same as ListObjects with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -const opPutObject = "PutObject" - -// PutObjectRequest generates a "aws/request.Request" representing the -// client's request for the PutObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObject method directly -// instead. +// See ListObjects method for more information on how to use this operation. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// Note: This operation can generate multiple requests to a service. // -// // Example sending a request using the PutObjectRequest method. -// req, resp := client.PutObjectRequest(params) +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { + return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsPagesWithContext same as ListObjectsPages except +// it takes a Context and allows setting request options on the pages. // -func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { - op := &request.Operation{ - Name: opPutObject, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } - if input == nil { - input = &PutObjectInput{} + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) } - - req = c.newRequest(op, input, output) - output = &PutObjectOutput{} - req.Data = output - return -} - -// Adds an object to a bucket. -func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { - req, out := c.PutObjectRequest(input) - err := req.Send() - return out, err + return p.Err() } -const opPutObjectAcl = "PutObjectAcl" +const opListObjectsV2 = "ListObjectsV2" -// PutObjectAclRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObjectAcl method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See ListObjectsV2 for more information on using the ListObjectsV2 +// API call, and error handling. // -// // Example sending a request using the PutObjectAclRequest method. -// req, resp := client.PutObjectAclRequest(params) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { op := &request.Operation{ - Name: opPutObjectAcl, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?acl", + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, } if input == nil { - input = &PutObjectAclInput{} + input = &ListObjectsV2Input{} } + output = &ListObjectsV2Output{} req = c.newRequest(op, input, output) - output = &PutObjectAclOutput{} - req.Data = output return } -// uses the acl subresource to set the access control list (ACL) permissions -// for an object that already exists in a bucket -func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { - req, out := c.PutObjectAclRequest(input) - err := req.Send() - return out, err +// ListObjectsV2 API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend +// you use this revised API for new application development. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectsV2 for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + return out, req.Send() } -const opRestoreObject = "RestoreObject" +// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectsV2 for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// RestoreObjectRequest generates a "aws/request.Request" representing the -// client's request for the RestoreObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RestoreObject method directly -// instead. +// See ListObjectsV2 method for more information on how to use this operation. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// Note: This operation can generate multiple requests to a service. // -// // Example sending a request using the RestoreObjectRequest method. -// req, resp := client.RestoreObjectRequest(params) +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { + return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except +// it takes a Context and allows setting request options on the pages. // -func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { - op := &request.Operation{ - Name: opRestoreObject, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?restore", +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsV2Input + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsV2Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } - if input == nil { - input = &RestoreObjectInput{} + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) } - - req = c.newRequest(op, input, output) - output = &RestoreObjectOutput{} - req.Data = output - return -} - -// Restores an archived copy of an object back into Amazon S3 -func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) - err := req.Send() - return out, err + return p.Err() } -const opUploadPart = "UploadPart" +const opListParts = "ListParts" -// UploadPartRequest generates a "aws/request.Request" representing the -// client's request for the UploadPart operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UploadPart method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See ListParts for more information on using the ListParts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the UploadPartRequest method. -// req, resp := client.UploadPartRequest(params) +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { op := &request.Operation{ - Name: opUploadPart, - HTTPMethod: "PUT", + Name: opListParts, + HTTPMethod: "GET", HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, } if input == nil { - input = &UploadPartInput{} + input = &ListPartsInput{} } + output = &ListPartsOutput{} req = c.newRequest(op, input, output) - output = &UploadPartOutput{} - req.Data = output return } -// Uploads a part in a multipart upload. +// ListParts API operation for Amazon Simple Storage Service. // -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging -// you for the parts storage. -func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { - req, out := c.UploadPartRequest(input) - err := req.Send() - return out, err -} - -const opUploadPartCopy = "UploadPartCopy" - -// UploadPartCopyRequest generates a "aws/request.Request" representing the -// client's request for the UploadPartCopy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// Lists the parts that have been uploaded for a specific multipart upload. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UploadPartCopy method directly -// instead. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListParts for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + return out, req.Send() +} + +// ListPartsWithContext is the same as ListParts with the addition of +// the ability to pass a context and additional request options. // -// // Example sending a request using the UploadPartCopyRequest method. -// req, resp := client.UploadPartCopyRequest(params) +// See ListParts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { + return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPartsPagesWithContext same as ListPartsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPartsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPartsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" + +// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAccelerateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. +// req, resp := client.PutBucketAccelerateConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { op := &request.Operation{ - Name: opUploadPartCopy, + Name: opPutBucketAccelerateConfiguration, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &PutBucketAccelerateConfigurationInput{} + } + + output = &PutBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// Sets the accelerate configuration of an existing bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAccelerateConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAcl for more information on using the PutBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + output = &PutBucketAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAcl API operation for Amazon Simple Storage Service. +// +// Sets the permissions on a bucket using access control lists (ACL). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + return out, req.Send() +} + +// PutBucketAclWithContext is the same as PutBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" + +// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. +// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAnalyticsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &PutBucketAnalyticsConfigurationInput{} + } + + output = &PutBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketCors for more information on using the PutBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + output = &PutBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketCors API operation for Amazon Simple Storage Service. +// +// Sets the cors configuration for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + return out, req.Send() +} + +// PutBucketCorsWithContext is the same as PutBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketEncryption = "PutBucketEncryption" + +// PutBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketEncryption for more information on using the PutBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketEncryptionRequest method. +// req, resp := client.PutBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { + op := &request.Operation{ + Name: opPutBucketEncryption, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &PutBucketEncryptionInput{} + } + + output = &PutBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketEncryption API operation for Amazon Simple Storage Service. +// +// Creates a new server-side encryption configuration (or replaces an existing +// one, if present). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + return out, req.Send() +} + +// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" + +// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketInventoryConfigurationRequest method. +// req, resp := client.PutBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketInventoryConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &PutBucketInventoryConfigurationInput{} + } + + output = &PutBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Adds an inventory configuration (identified by the inventory ID) from the +// bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycle for more information on using the PutBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + output = &PutBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deprecated, see the PutBucketLifecycleConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketLifecycleWithContext has been deprecated +func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + output = &PutBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Sets lifecycle configuration for your bucket. If a lifecycle configuration +// exists, it replaces it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLogging for more information on using the PutBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + output = &PutBucketLoggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketLogging API operation for Amazon Simple Storage Service. +// +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. To set the logging status of +// a bucket, you must be the bucket owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + return out, req.Send() +} + +// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" + +// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketMetricsConfigurationRequest method. +// req, resp := client.PutBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketMetricsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &PutBucketMetricsConfigurationInput{} + } + + output = &PutBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets a metrics configuration (specified by the metrics configuration ID) +// for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketNotification for more information on using the PutBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketNotificationRequest method. +// req, resp := client.PutBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + output = &PutBucketNotificationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketNotification API operation for Amazon Simple Storage Service. +// +// Deprecated, see the PutBucketNotificationConfiguraiton operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketNotificationWithContext has been deprecated +func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotificationConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketNotificationConfigurationRequest method. +// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + output = &PutBucketNotificationConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Enables notifications of specified events for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotificationConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketPolicy for more information on using the PutBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + output = &PutBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketPolicy API operation for Amazon Simple Storage Service. +// +// Replaces a policy on a bucket. If the bucket already has a policy, the one +// in this request completely replaces it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + return out, req.Send() +} + +// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketReplication for more information on using the PutBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketReplicationRequest method. +// req, resp := client.PutBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + output = &PutBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketReplication API operation for Amazon Simple Storage Service. +// +// Creates a new replication configuration (or replaces an existing one, if +// present). For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// in the Amazon S3 Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + return out, req.Send() +} + +// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketRequestPayment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketRequestPaymentRequest method. +// req, resp := client.PutBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + output = &PutBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. Documentation on requester pays buckets +// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketRequestPayment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketTagging for more information on using the PutBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + output = &PutBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketTagging API operation for Amazon Simple Storage Service. +// +// Sets the tags for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + return out, req.Send() +} + +// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketVersioning for more information on using the PutBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + output = &PutBucketVersioningOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketVersioning API operation for Amazon Simple Storage Service. +// +// Sets the versioning state of an existing bucket. To set the versioning state, +// you must be the bucket owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + return out, req.Send() +} + +// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketWebsite for more information on using the PutBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + output = &PutBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketWebsite API operation for Amazon Simple Storage Service. +// +// Set the website configuration for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + return out, req.Send() +} + +// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObject for more information on using the PutObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + output = &PutObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObject API operation for Amazon Simple Storage Service. +// +// Adds an object to a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + return out, req.Send() +} + +// PutObjectWithContext is the same as PutObject with the addition of +// the ability to pass a context and additional request options. +// +// See PutObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectAcl for more information on using the PutObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + output = &PutObjectAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObjectAcl API operation for Amazon Simple Storage Service. +// +// uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + return out, req.Send() +} + +// PutObjectAclWithContext is the same as PutObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectTagging = "PutObjectTagging" + +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectTagging for more information on using the PutObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { + op := &request.Operation{ + Name: opPutObjectTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &PutObjectTaggingInput{} + } + + output = &PutObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObjectTagging API operation for Amazon Simple Storage Service. +// +// Sets the supplied tag-set to an object that already exists in a bucket +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + return out, req.Send() +} + +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreObject for more information on using the RestoreObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// Restores an archived copy of an object back into Amazon S3 +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This operation is not allowed against this storage tier +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSelectObjectContent = "SelectObjectContent" + +// SelectObjectContentRequest generates a "aws/request.Request" representing the +// client's request for the SelectObjectContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SelectObjectContent for more information on using the SelectObjectContent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SelectObjectContentRequest method. +// req, resp := client.SelectObjectContentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { + op := &request.Operation{ + Name: opSelectObjectContent, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", + } + + if input == nil { + input = &SelectObjectContentInput{} + } + + output = &SelectObjectContentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(output.runEventStreamLoop) + return +} + +// SelectObjectContent API operation for Amazon Simple Storage Service. +// +// This operation filters the contents of an Amazon S3 object based on a simple +// Structured Query Language (SQL) statement. In the request, along with the +// SQL expression, you must also specify a data serialization format (JSON or +// CSV) of the object. Amazon S3 uses this to parse object data into records, +// and returns only records that match the specified SQL expression. You must +// also specify the data serialization format for the response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation SelectObjectContent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + return out, req.Send() +} + +// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of +// the ability to pass a context and additional request options. +// +// See SelectObjectContent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPart for more information on using the UploadPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + output = &UploadPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPart API operation for Amazon Simple Storage Service. +// +// Uploads a part in a multipart upload. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPart for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPartCopy for more information on using the UploadPartCopy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + output = &UploadPartCopyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPartCopy API operation for Amazon Simple Storage Service. +// +// Uploads a part by copying data from an existing object as data source. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPartCopy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + return out, req.Send() +} + +// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPartCopy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Specifies the days since the initiation of an Incomplete Multipart Upload +// that Lifecycle will wait before permanently removing all parts of the upload. +type AbortIncompleteMultipartUpload struct { + _ struct{} `type:"structure"` + + // Indicates the number of days that must pass since initiation for Lifecycle + // to abort an Incomplete Multipart Upload. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + +// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. +func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { + s.DaysAfterInitiation = &v + return s +} + +type AbortMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *AbortMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { + s.UploadId = &v + return s +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +type AccelerateConfiguration struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s AccelerateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccelerateConfiguration) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { + s.Status = &v + return s +} + +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} + if s.Grants != nil { + for i, v := range s.Grants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrants sets the Grants field's value. +func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { + s.Owner = v + return s +} + +// Container for information regarding the access control for replicas. +type AccessControlTranslation struct { + _ struct{} `type:"structure"` + + // The override value for the owner of the replica object. + // + // Owner is a required field + Owner *string `type:"string" required:"true" enum:"OwnerOverride"` +} + +// String returns the string representation +func (s AccessControlTranslation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlTranslation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlTranslation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"} + if s.Owner == nil { + invalidParams.Add(request.NewErrParamRequired("Owner")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation { + s.Owner = &v + return s +} + +type AnalyticsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix to use when evaluating an AND predicate. + Prefix *string `type:"string"` + + // The list of tags to use when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s AnalyticsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { + s.Tags = v + return s +} + +type AnalyticsConfiguration struct { + _ struct{} `type:"structure"` + + // The filter used to describe a set of objects for analyses. A filter must + // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). + // If no filter is provided, all objects will be considered in any analysis. + Filter *AnalyticsFilter `type:"structure"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // If present, it indicates that data related to access patterns will be collected + // and made available to analyze the tradeoffs between different storage classes. + // + // StorageClassAnalysis is a required field + StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.StorageClassAnalysis == nil { + invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.StorageClassAnalysis != nil { + if err := s.StorageClassAnalysis.Validate(); err != nil { + invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration { + s.Id = &v + return s +} + +// SetStorageClassAnalysis sets the StorageClassAnalysis field's value. +func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration { + s.StorageClassAnalysis = v + return s +} + +type AnalyticsExportDestination struct { + _ struct{} `type:"structure"` + + // A destination signifying output to an S3 bucket. + // + // S3BucketDestination is a required field + S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsExportDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsExportDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsExportDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination { + s.S3BucketDestination = v + return s +} + +type AnalyticsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating an + // analytics filter. The operator must have at least two predicates. + And *AnalyticsAndOperator `type:"structure"` + + // The prefix to use when evaluating an analytics filter. + Prefix *string `type:"string"` + + // The tag to use when evaluating an analytics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s AnalyticsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { + s.Tag = v + return s +} + +type AnalyticsS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The Amazon resource name (ARN) of the bucket to which data is exported. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The account ID that owns the destination bucket. If no account ID is provided, + // the owner will not be validated prior to exporting data. + BucketAccountId *string `type:"string"` + + // The file format used when exporting data to Amazon S3. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` + + // The prefix to use when exporting data. The exported data begins with this + // prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s AnalyticsS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination { + s.Bucket = &v + return s +} + +func (s *AnalyticsS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketAccountId sets the BucketAccountId field's value. +func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { + s.BucketAccountId = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination { + s.Prefix = &v + return s +} + +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. + CreationDate *time.Time `type:"timestamp"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *Bucket) SetCreationDate(v time.Time) *Bucket { + s.CreationDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *Bucket) SetName(v string) *Bucket { + s.Name = &v + return s +} + +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Rules is a required field + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration { + s.Rules = v + return s +} + +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + // Container for logging information. Presence of this element indicates that + // logging is enabled. Parameters TargetBucket and TargetPrefix are required + // in this case. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLoggingStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} + if s.LoggingEnabled != nil { + if err := s.LoggingEnabled.Validate(); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { + s.LoggingEnabled = v + return s +} + +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + // CORSRules is a required field + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} + if s.CORSRules == nil { + invalidParams.Add(request.NewErrParamRequired("CORSRules")) + } + if s.CORSRules != nil { + for i, v := range s.CORSRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCORSRules sets the CORSRules field's value. +func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { + s.CORSRules = v + return s +} + +type CORSRule struct { + _ struct{} `type:"structure"` + + // Specifies which headers are allowed in a pre-flight OPTIONS request. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // Identifies HTTP methods that the domain/origin specified in the rule is allowed + // to execute. + // + // AllowedMethods is a required field + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + // + // AllowedOrigins is a required field + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSRule"} + if s.AllowedMethods == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) + } + if s.AllowedOrigins == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowedHeaders sets the AllowedHeaders field's value. +func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { + s.AllowedHeaders = v + return s +} + +// SetAllowedMethods sets the AllowedMethods field's value. +func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { + s.AllowedMethods = v + return s +} + +// SetAllowedOrigins sets the AllowedOrigins field's value. +func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { + s.AllowedOrigins = v + return s +} + +// SetExposeHeaders sets the ExposeHeaders field's value. +func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { + s.ExposeHeaders = v + return s +} + +// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. +func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { + s.MaxAgeSeconds = &v + return s +} + +// Describes how a CSV-formatted input object is formatted. +type CSVInput struct { + _ struct{} `type:"structure"` + + // Specifies that CSV field values may contain quoted record delimiters and + // such records should be allowed. Default value is FALSE. Setting this value + // to TRUE may lower performance. + AllowQuotedRecordDelimiter *bool `type:"boolean"` + + // Single character used to indicate a row should be ignored when present at + // the start of a row. + Comments *string `type:"string"` + + // Value used to separate individual fields in a record. + FieldDelimiter *string `type:"string"` + + // Describes the first line of input. Valid values: None, Ignore, Use. + FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` + + // Value used for escaping where the field delimiter is part of the value. + QuoteCharacter *string `type:"string"` + + // Single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Value used to separate individual records. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVInput) GoString() string { + return s.String() +} + +// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value. +func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput { + s.AllowQuotedRecordDelimiter = &v + return s +} + +// SetComments sets the Comments field's value. +func (s *CSVInput) SetComments(v string) *CSVInput { + s.Comments = &v + return s +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput { + s.FieldDelimiter = &v + return s +} + +// SetFileHeaderInfo sets the FileHeaderInfo field's value. +func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput { + s.FileHeaderInfo = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { + s.RecordDelimiter = &v + return s +} + +// Describes how CSV-formatted results are formatted. +type CSVOutput struct { + _ struct{} `type:"structure"` + + // Value used to separate individual fields in a record. + FieldDelimiter *string `type:"string"` + + // Value used for escaping where the field delimiter is part of the value. + QuoteCharacter *string `type:"string"` + + // Single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Indicates whether or not all output fields should be quoted. + QuoteFields *string `type:"string" enum:"QuoteFields"` + + // Value used to separate individual records. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVOutput) GoString() string { + return s.String() +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput { + s.FieldDelimiter = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetQuoteFields sets the QuoteFields field's value. +func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput { + s.QuoteFields = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { + s.RecordDelimiter = &v + return s +} + +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + CloudFunction *string `type:"string"` + + // Bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + InvocationRole *string `type:"string"` +} + +// String returns the string representation +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +// SetCloudFunction sets the CloudFunction field's value. +func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration { + s.CloudFunction = &v + return s +} + +// SetEvent sets the Event field's value. +func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration { + s.Id = &v + return s +} + +// SetInvocationRole sets the InvocationRole field's value. +func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration { + s.InvocationRole = &v + return s +} + +type CommonPrefix struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommonPrefix) GoString() string { + return s.String() +} + +// SetPrefix sets the Prefix field's value. +func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { + s.Prefix = &v + return s +} + +type CompleteMultipartUploadInput struct { + _ struct{} `type:"structure" payload:"MultipartUpload"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { + s.Key = &v + return s +} + +// SetMultipartUpload sets the MultipartUpload field's value. +func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { + s.MultipartUpload = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { + s.UploadId = &v + return s +} + +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + Bucket *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + Key *string `min:"1" type:"string"` + + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetETag sets the ETag field's value. +func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { + s.Expiration = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { + s.Key = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { + s.Location = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { + s.VersionId = &v + return s +} + +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +// SetParts sets the Parts field's value. +func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { + s.Parts = v + return s +} + +type CompletedPart struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedPart) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CompletedPart) SetETag(v string) *CompletedPart { + s.ETag = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { + s.PartNumber = &v + return s +} + +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. +func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { + s.HttpErrorCodeReturnedEquals = &v + return s +} + +// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. +func (s *Condition) SetKeyPrefixEquals(v string) *Condition { + s.KeyPrefixEquals = &v + return s +} + +type ContinuationEvent struct { + _ struct{} `locationName:"ContinuationEvent" type:"structure"` +} + +// String returns the string representation +func (s ContinuationEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinuationEvent) GoString() string { + return s.String() +} + +// The ContinuationEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ContinuationEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ContinuationEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +type CopyObjectInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object destination object this value must be used in + // conjunction with the TaggingDirective. The tag-set must be encoded as URL + // Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // Specifies whether the object tag-set are copied from the source object or + // replaced with tag-set provided in the request. + TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { + s.Bucket = &v + return s +} + +func (s *CopyObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { + s.ContentType = &v + return s +} + +// SetCopySource sets the CopySource field's value. +func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { + s.Metadata = v + return s +} + +// SetMetadataDirective sets the MetadataDirective field's value. +func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { + s.MetadataDirective = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { + s.Tagging = &v + return s +} + +// SetTaggingDirective sets the TaggingDirective field's value. +func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { + s.TaggingDirective = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + CopyObjectResult *CopyObjectResult `type:"structure"` + + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +// SetCopyObjectResult sets the CopyObjectResult field's value. +func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { + s.CopyObjectResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { + s.CopySourceVersionId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { + s.VersionId = &v + return s +} + +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { + s.LastModified = &v + return s +} + +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyPartResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyPartResult) SetETag(v string) *CopyPartResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { + s.LastModified = &v + return s +} + +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the region where the bucket will be created. If you don't specify + // a region, the bucket will be created in US Standard. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { + s.LocationConstraint = &v + return s +} + +type CreateBucketInput struct { + _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { + s.Bucket = &v + return s +} + +func (s *CreateBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. +func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { + s.CreateBucketConfiguration = v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { + s.GrantWriteACP = &v + return s +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { + s.Location = &v + return s +} + +type CreateMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { + s.ContentType = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { + s.Metadata = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { + s.SSECustomerKey = &v + return s +} + +func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { + s.WebsiteRedirectLocation = &v + return s +} + +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `locationName:"Bucket" type:"string"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { + s.Key = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { + s.UploadId = &v + return s +} + +type Delete struct { + _ struct{} `type:"structure"` + + // Objects is a required field + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Objects == nil { + invalidParams.Add(request.NewErrParamRequired("Objects")) + } + if s.Objects != nil { + for i, v := range s.Objects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjects sets the Objects field's value. +func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { + s.Objects = v + return s +} + +// SetQuiet sets the Quiet field's value. +func (s *Delete) SetQuiet(v bool) *Delete { + s.Quiet = &v + return s +} + +type DeleteBucketAnalyticsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which an analytics configuration is deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +type DeleteBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketCorsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +type DeleteBucketEncryptionInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the server-side encryption configuration + // to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketEncryptionOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the inventory configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +type DeleteBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +type DeleteBucketMetricsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the metrics configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +type DeleteBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationInput struct { + _ struct{} `type:"structure"` + + // Deletes the replication subresource associated with the specified bucket. + // + // There is usually some time lag before replication configuration deletion + // is fully propagated to all the Amazon S3 systems. + // + // For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) + // in the Amazon S3 Developer Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp"` + + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +// SetIsLatest sets the IsLatest field's value. +func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { + s.Owner = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { + s.VersionId = &v + return s +} + +// Specifies whether Amazon S3 should replicate delete makers. +type DeleteMarkerReplication struct { + _ struct{} `type:"structure"` + + // The status of the delete marker replication. + // + // In the current implementation, Amazon S3 does not replicate the delete markers. + // Therefore, the status must be Disabled. + Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` +} + +// String returns the string representation +func (s DeleteMarkerReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerReplication) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { + s.Status = &v + return s +} + +type DeleteObjectInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { + s.Key = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { + s.VersionId = &v + return s +} + +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { + s.RequestCharged = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { + s.VersionId = &v + return s +} + +type DeleteObjectTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The versionId of the object that the tag-set will be removed from. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { + s.VersionId = &v + return s +} + +type DeleteObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was removed from. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { + s.VersionId = &v + return s +} + +type DeleteObjectsInput struct { + _ struct{} `type:"structure" payload:"Delete"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Delete is a required field + Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Delete == nil { + invalidParams.Add(request.NewErrParamRequired("Delete")) + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelete sets the Delete field's value. +func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { + s.Delete = v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { + s.RequestPayer = &v + return s +} + +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + Deleted []*DeletedObject `type:"list" flattened:"true"` + + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +// SetDeleted sets the Deleted field's value. +func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { + s.Deleted = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { + s.Errors = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { + s.RequestCharged = &v + return s +} + +type DeletedObject struct { + _ struct{} `type:"structure"` + + DeleteMarker *bool `type:"boolean"` + + DeleteMarkerVersionId *string `type:"string"` + + Key *string `min:"1" type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletedObject) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { + s.DeleteMarker = &v + return s +} + +// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. +func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { + s.DeleteMarkerVersionId = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeletedObject) SetKey(v string) *DeletedObject { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeletedObject) SetVersionId(v string) *DeletedObject { + s.VersionId = &v + return s +} + +// Container for replication destination information. +type Destination struct { + _ struct{} `type:"structure"` + + // Container for information regarding the access control for replicas. + // + // Use only in a cross-account scenario, where source and destination bucket + // owners are not the same, when you want to change replica ownership to the + // AWS account that owns the destination bucket. If you don't add this element + // to the replication configuration, the replicas are owned by same AWS account + // that owns the source object. + AccessControlTranslation *AccessControlTranslation `type:"structure"` + + // Account ID of the destination bucket. Currently Amazon S3 verifies this value + // only if Access Control Translation is enabled. + // + // In a cross-account scenario, if you tell Amazon S3 to change replica ownership + // to the AWS account that owns the destination bucket by adding the AccessControlTranslation + // element, this is the account ID of the destination bucket owner. + Account *string `type:"string"` + + // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store + // replicas of the object identified by the rule. + // + // If you have multiple rules in your replication configuration, all rules must + // specify the same bucket as the destination. A replication configuration can + // replicate objects only to one destination bucket. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // Container that provides encryption-related information. You must specify + // this element if the SourceSelectionCriteria is specified. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Destination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.AccessControlTranslation != nil { + if err := s.AccessControlTranslation.Validate(); err != nil { + invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlTranslation sets the AccessControlTranslation field's value. +func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination { + s.AccessControlTranslation = v + return s +} + +// SetAccount sets the Account field's value. +func (s *Destination) SetAccount(v string) *Destination { + s.Account = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *Destination) SetBucket(v string) *Destination { + s.Bucket = &v + return s +} + +func (s *Destination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination { + s.EncryptionConfiguration = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Destination) SetStorageClass(v string) *Destination { + s.StorageClass = &v + return s +} + +// Describes the server-side encryption that will be applied to the restore +// results. +type Encryption struct { + _ struct{} `type:"structure"` + + // The server-side encryption algorithm used when storing job results in Amazon + // S3 (e.g., AES256, aws:kms). + // + // EncryptionType is a required field + EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` + + // If the encryption type is aws:kms, this optional value can be used to specify + // the encryption context for the restore results. + KMSContext *string `type:"string"` + + // If the encryption type is aws:kms, this optional value specifies the AWS + // KMS key ID to use for encryption of job results. + KMSKeyId *string `type:"string"` +} + +// String returns the string representation +func (s Encryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Encryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Encryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Encryption"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *Encryption) SetEncryptionType(v string) *Encryption { + s.EncryptionType = &v + return s +} + +// SetKMSContext sets the KMSContext field's value. +func (s *Encryption) SetKMSContext(v string) *Encryption { + s.KMSContext = &v + return s +} + +// SetKMSKeyId sets the KMSKeyId field's value. +func (s *Encryption) SetKMSKeyId(v string) *Encryption { + s.KMSKeyId = &v + return s +} + +// Container for information regarding encryption based configuration for replicas. +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // The ID of the AWS KMS key for the region where the destination bucket resides. + // Amazon S3 uses this key to encrypt the replica object. + ReplicaKmsKeyID *string `type:"string"` +} + +// String returns the string representation +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value. +func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration { + s.ReplicaKmsKeyID = &v + return s +} + +type EndEvent struct { + _ struct{} `locationName:"EndEvent" type:"structure"` +} + +// String returns the string representation +func (s EndEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EndEvent) GoString() string { + return s.String() +} + +// The EndEvent is and event in the SelectObjectContentEventStream group of events. +func (s *EndEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the EndEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *EndEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +type Error struct { + _ struct{} `type:"structure"` + + Code *string `type:"string"` + + Key *string `min:"1" type:"string"` + + Message *string `type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *Error) SetCode(v string) *Error { + s.Code = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Error) SetKey(v string) *Error { + s.Key = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *Error) SetMessage(v string) *Error { + s.Message = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *Error) SetVersionId(v string) *Error { + s.VersionId = &v + return s +} + +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ErrorDocument) SetKey(v string) *ErrorDocument { + s.Key = &v + return s +} + +// Container for key value pair that defines the criteria for the filter rule. +type FilterRule struct { + _ struct{} `type:"structure"` + + // Object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. + // Overlapping prefixes and suffixes are not supported. For more information, + // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Name *string `type:"string" enum:"FilterRuleName"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterRule) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *FilterRule) SetName(v string) *FilterRule { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *FilterRule) SetValue(v string) *FilterRule { + s.Value = &v + return s +} + +type GetBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure"` + + // Name of the bucket for which the accelerate configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { + s.Status = &v + return s +} + +type GetBucketAclInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { + s.Owner = v + return s +} + +type GetBucketAnalyticsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which an analytics configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +type GetBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput { + s.AnalyticsConfiguration = v + return s +} + +type GetBucketCorsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *GetBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +// SetCORSRules sets the CORSRules field's value. +func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { + s.CORSRules = v + return s +} + +type GetBucketEncryptionInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which the server-side encryption configuration + // is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *GetBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketEncryptionOutput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Container for server-side encryption configuration rules. Currently S3 supports + // one rule only. + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionOutput) GoString() string { + return s.String() +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput { + s.ServerSideEncryptionConfiguration = v + return s +} + +type GetBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the inventory configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +type GetBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // Specifies the inventory configuration. + InventoryConfiguration *InventoryConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput { + s.InventoryConfiguration = v + return s +} + +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { + s.Rules = v + return s +} + +type GetBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput { + s.Rules = v + return s +} + +type GetBucketLocationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLocationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { + s.LocationConstraint = &v + return s +} + +type GetBucketLoggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + // Container for logging information. Presence of this element indicates that + // logging is enabled. Parameters TargetBucket and TargetPrefix are required + // in this case. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { + s.LoggingEnabled = v + return s +} + +type GetBucketMetricsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the metrics configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +type GetBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` + + // Specifies the metrics configuration. + MetricsConfiguration *MetricsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput { + s.MetricsConfiguration = v + return s +} + +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `type:"structure"` + + // Name of the bucket to get the notification configuration for. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketNotificationConfigurationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest { + s.Bucket = &v + return s +} + +func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketPolicyInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *GetBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { + s.Policy = &v + return s +} + +type GetBucketReplicationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { + s.ReplicationConfiguration = v + return s +} + +type GetBucketRequestPaymentInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +func (s *GetBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +// SetPayer sets the Payer field's value. +func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { + s.Payer = &v + return s +} + +type GetBucketTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { + s.TagSet = v + return s +} + +type GetBucketVersioningInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { + s.Bucket = &v + return s +} + +func (s *GetBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { + s.Status = &v + return s +} + +type GetBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *GetBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { + s.RoutingRules = v + return s +} + +type GetObjectAclInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { + s.Bucket = &v + return s +} + +func (s *GetObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { + s.VersionId = &v + return s +} + +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { + s.Owner = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { + s.RequestCharged = &v + return s +} + +type GetObjectInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { + s.Bucket = &v + return s +} + +func (s *GetObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetIfMatch sets the IfMatch field's value. +func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectInput) SetKey(v string) *GetObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *GetObjectInput) SetRange(v string) *GetObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { + s.RequestPayer = &v + return s +} + +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { + s.ResponseExpires = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *GetObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { + s.VersionId = &v + return s +} + +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object. + TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { + s.Body = v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { + s.MissingMeta = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +type GetObjectTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { + s.VersionId = &v + return s +} + +type GetObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { + s.TagSet = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { + s.VersionId = &v + return s +} + +type GetObjectTorrentInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTorrentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTorrentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput { + s.RequestPayer = &v + return s +} + +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +// SetBody sets the Body field's value. +func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput { + s.Body = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput { + s.RequestCharged = &v + return s +} + +type GlacierJobParameters struct { + _ struct{} `type:"structure"` + + // Glacier retrieval tier at which the restore will be processed. + // + // Tier is a required field + Tier *string `type:"string" required:"true" enum:"Tier"` +} + +// String returns the string representation +func (s GlacierJobParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlacierJobParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlacierJobParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} + if s.Tier == nil { + invalidParams.Add(request.NewErrParamRequired("Tier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTier sets the Tier field's value. +func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { + s.Tier = &v + return s +} + +type Grant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *Grant) SetGrantee(v *Grantee) *Grant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *Grant) SetPermission(v string) *Grant { + s.Permission = &v + return s +} + +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + // + // Type is a required field + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grantee) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grantee) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grantee"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Grantee) SetDisplayName(v string) *Grantee { + s.DisplayName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *Grantee) SetEmailAddress(v string) *Grantee { + s.EmailAddress = &v + return s +} + +// SetID sets the ID field's value. +func (s *Grantee) SetID(v string) *Grantee { + s.ID = &v + return s +} + +// SetType sets the Type field's value. +func (s *Grantee) SetType(v string) *Grantee { + s.Type = &v + return s +} + +// SetURI sets the URI field's value. +func (s *Grantee) SetURI(v string) *Grantee { + s.URI = &v + return s +} + +type HeadBucketInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { + s.Bucket = &v + return s +} + +func (s *HeadBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type HeadBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +type HeadObjectInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { + s.Bucket = &v + return s +} + +func (s *HeadObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetIfMatch sets the IfMatch field's value. +func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *HeadObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { + s.VersionId = &v + return s +} + +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { + s.MissingMeta = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ + // the data that is returned will be for the object with the key name images/index.html) + // The suffix must not be empty and must not include a slash character. + // + // Suffix is a required field + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} + if s.Suffix == nil { + invalidParams.Add(request.NewErrParamRequired("Suffix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSuffix sets the Suffix field's value. +func (s *IndexDocument) SetSuffix(v string) *IndexDocument { + s.Suffix = &v + return s +} + +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Initiator) SetDisplayName(v string) *Initiator { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Initiator) SetID(v string) *Initiator { + s.ID = &v + return s +} + +// Describes the serialization format of the object. +type InputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of a CSV-encoded object. + CSV *CSVInput `type:"structure"` + + // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default + // Value: NONE. + CompressionType *string `type:"string" enum:"CompressionType"` + + // Specifies JSON as object's input serialization format. + JSON *JSONInput `type:"structure"` + + // Specifies Parquet as object's input serialization format. + Parquet *ParquetInput `type:"structure"` +} + +// String returns the string representation +func (s InputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization { + s.CSV = v + return s +} + +// SetCompressionType sets the CompressionType field's value. +func (s *InputSerialization) SetCompressionType(v string) *InputSerialization { + s.CompressionType = &v + return s +} + +// SetJSON sets the JSON field's value. +func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization { + s.JSON = v + return s +} + +// SetParquet sets the Parquet field's value. +func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { + s.Parquet = v + return s +} + +type InventoryConfiguration struct { + _ struct{} `type:"structure"` + + // Contains information about where to publish the inventory results. + // + // Destination is a required field + Destination *InventoryDestination `type:"structure" required:"true"` + + // Specifies an inventory filter. The inventory only includes objects that meet + // the filter's criteria. + Filter *InventoryFilter `type:"structure"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Specifies which object version(s) to included in the inventory results. + // + // IncludedObjectVersions is a required field + IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` + + // Specifies whether the inventory is enabled or disabled. + // + // IsEnabled is a required field + IsEnabled *bool `type:"boolean" required:"true"` + + // Contains the optional fields that are included in the inventory results. + OptionalFields []*string `locationNameList:"Field" type:"list"` + + // Specifies the schedule for generating inventory results. + // + // Schedule is a required field + Schedule *InventorySchedule `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InventoryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IncludedObjectVersions == nil { + invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions")) + } + if s.IsEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("IsEnabled")) + } + if s.Schedule == nil { + invalidParams.Add(request.NewErrParamRequired("Schedule")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration { + s.Destination = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration { + s.Id = &v + return s +} + +// SetIncludedObjectVersions sets the IncludedObjectVersions field's value. +func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration { + s.IncludedObjectVersions = &v + return s +} + +// SetIsEnabled sets the IsEnabled field's value. +func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration { + s.IsEnabled = &v + return s +} + +// SetOptionalFields sets the OptionalFields field's value. +func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration { + s.OptionalFields = v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration { + s.Schedule = v + return s +} + +type InventoryDestination struct { + _ struct{} `type:"structure"` + + // Contains the bucket name, file format, bucket owner (optional), and prefix + // (optional) where inventory results are published. + // + // S3BucketDestination is a required field + S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InventoryDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination { + s.S3BucketDestination = v + return s +} + +// Contains the type of server-side encryption used to encrypt the inventory +// results. +type InventoryEncryption struct { + _ struct{} `type:"structure"` + + // Specifies the use of SSE-KMS to encrypt delievered Inventory reports. + SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` + + // Specifies the use of SSE-S3 to encrypt delievered Inventory reports. + SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation +func (s InventoryEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryEncryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryEncryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"} + if s.SSEKMS != nil { + if err := s.SSEKMS.Validate(); err != nil { + invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSSEKMS sets the SSEKMS field's value. +func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption { + s.SSEKMS = v + return s +} + +// SetSSES3 sets the SSES3 field's value. +func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { + s.SSES3 = v + return s +} + +type InventoryFilter struct { + _ struct{} `type:"structure"` + + // The prefix that an object must have to be included in the inventory results. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s InventoryFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { + s.Prefix = &v + return s +} + +type InventoryS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The ID of the account that owns the destination bucket. + AccountId *string `type:"string"` + + // The Amazon resource name (ARN) of the bucket where inventory results will + // be published. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption `type:"structure"` + + // Specifies the output format of the inventory results. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"InventoryFormat"` + + // The prefix that is prepended to all inventory results. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s InventoryS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams } + return nil +} - if input == nil { - input = &UploadPartCopyInput{} +// SetAccountId sets the AccountId field's value. +func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination { + s.Bucket = &v + return s +} + +func (s *InventoryS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v } + return *s.Bucket +} - req = c.newRequest(op, input, output) - output = &UploadPartCopyOutput{} - req.Data = output - return +// SetEncryption sets the Encryption field's value. +func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination { + s.Encryption = v + return s } -// Uploads a part by copying data from an existing object as data source. -func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { - req, out := c.UploadPartCopyRequest(input) - err := req.Send() - return out, err +// SetFormat sets the Format field's value. +func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { + s.Format = &v + return s } -// Specifies the days since the initiation of an Incomplete Multipart Upload -// that Lifecycle will wait before permanently removing all parts of the upload. -type AbortIncompleteMultipartUpload struct { +// SetPrefix sets the Prefix field's value. +func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination { + s.Prefix = &v + return s +} + +type InventorySchedule struct { _ struct{} `type:"structure"` - // Indicates the number of days that must pass since initiation for Lifecycle - // to abort an Incomplete Multipart Upload. - DaysAfterInitiation *int64 `type:"integer"` + // Specifies how frequently inventory results are produced. + // + // Frequency is a required field + Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` } // String returns the string representation -func (s AbortIncompleteMultipartUpload) String() string { +func (s InventorySchedule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AbortIncompleteMultipartUpload) GoString() string { +func (s InventorySchedule) GoString() string { return s.String() } -type AbortMultipartUploadInput struct { +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventorySchedule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"} + if s.Frequency == nil { + invalidParams.Add(request.NewErrParamRequired("Frequency")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFrequency sets the Frequency field's value. +func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { + s.Frequency = &v + return s +} + +type JSONInput struct { _ struct{} `type:"structure"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The type of JSON. Valid values: Document, Lines. + Type *string `type:"string" enum:"JSONType"` +} - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` +// String returns the string representation +func (s JSONInput) String() string { + return awsutil.Prettify(s) +} - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +// GoString returns the string representation +func (s JSONInput) GoString() string { + return s.String() +} - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +// SetType sets the Type field's value. +func (s *JSONInput) SetType(v string) *JSONInput { + s.Type = &v + return s +} + +type JSONOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual records in the output. + RecordDelimiter *string `type:"string"` } // String returns the string representation -func (s AbortMultipartUploadInput) String() string { +func (s JSONOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AbortMultipartUploadInput) GoString() string { +func (s JSONOutput) GoString() string { + return s.String() +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { + s.RecordDelimiter = &v + return s +} + +// Container for object key name prefix and suffix filtering rules. +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for key value pair that defines the criteria for the + // filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyFilter) GoString() string { + return s.String() +} + +// SetFilterRules sets the FilterRules field's value. +func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { + s.FilterRules = v + return s +} + +// Container for specifying the AWS Lambda notification configuration. +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + // + // LambdaFunctionArn is a required field + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AbortMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) +func (s *LambdaFunctionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) + if s.LambdaFunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) } if invalidParams.Len() > 0 { @@ -3165,71 +13659,181 @@ func (s *AbortMultipartUploadInput) Validate() error { return nil } -type AbortMultipartUploadOutput struct { +// SetEvents sets the Events field's value. +func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration { + s.Id = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration { + s.LambdaFunctionArn = &v + return s +} + +type LifecycleConfiguration struct { _ struct{} `type:"structure"` - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // Rules is a required field + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } // String returns the string representation -func (s AbortMultipartUploadOutput) String() string { +func (s LifecycleConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AbortMultipartUploadOutput) GoString() string { +func (s LifecycleConfiguration) GoString() string { return s.String() } -type AccelerateConfiguration struct { +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { + s.Rules = v + return s +} + +type LifecycleExpiration struct { _ struct{} `type:"structure"` - // The accelerate configuration of the bucket. - Status *string `type:"string" enum:"BucketAccelerateStatus"` + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false + // the policy takes no action. This cannot be specified with Days or Date in + // a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `type:"boolean"` } // String returns the string representation -func (s AccelerateConfiguration) String() string { +func (s LifecycleExpiration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AccelerateConfiguration) GoString() string { +func (s LifecycleExpiration) GoString() string { return s.String() } -type AccessControlPolicy struct { +// SetDate sets the Date field's value. +func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { + s.Days = &v + return s +} + +// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. +func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { + s.ExpiredObjectDeleteMarker = &v + return s +} + +type LifecycleRule struct { _ struct{} `type:"structure"` - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` - Owner *Owner `type:"structure"` + Expiration *LifecycleExpiration `type:"structure"` + + // The Filter is used to identify objects that a Lifecycle Rule applies to. + // A Filter must have exactly one of Prefix, Tag, or And specified. + Filter *LifecycleRuleFilter `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. This is + // deprecated; use Filter instead. + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` } // String returns the string representation -func (s AccessControlPolicy) String() string { +func (s LifecycleRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AccessControlPolicy) GoString() string { +func (s LifecycleRule) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AccessControlPolicy) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} - if s.Grants != nil { - for i, v := range s.Grants { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) - } +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) } } @@ -3239,55 +13843,93 @@ func (s *AccessControlPolicy) Validate() error { return nil } -type Bucket struct { - _ struct{} `type:"structure"` +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { + s.Expiration = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *LifecycleRule) SetID(v string) *LifecycleRule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { + s.NoncurrentVersionExpiration = v + return s +} - // Date the bucket was created. - CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. +func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { + s.NoncurrentVersionTransitions = v + return s +} - // The name of the bucket. - Name *string `type:"string"` +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule { + s.Prefix = &v + return s } -// String returns the string representation -func (s Bucket) String() string { - return awsutil.Prettify(s) +// SetStatus sets the Status field's value. +func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { + s.Status = &v + return s } -// GoString returns the string representation -func (s Bucket) GoString() string { - return s.String() +// SetTransitions sets the Transitions field's value. +func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { + s.Transitions = v + return s } -type BucketLifecycleConfiguration struct { +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or +// more predicates. The Lifecycle Rule will apply to any object matching all +// of the predicates configured inside the And operator. +type LifecycleRuleAndOperator struct { _ struct{} `type:"structure"` - Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the rule + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } // String returns the string representation -func (s BucketLifecycleConfiguration) String() string { +func (s LifecycleRuleAndOperator) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BucketLifecycleConfiguration) GoString() string { +func (s LifecycleRuleAndOperator) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *BucketLifecycleConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { +func (s *LifecycleRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { if v == nil { continue } if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) } } } @@ -3298,28 +13940,56 @@ func (s *BucketLifecycleConfiguration) Validate() error { return nil } -type BucketLoggingStatus struct { +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { + s.Tags = v + return s +} + +// The Filter is used to identify objects that a Lifecycle Rule applies to. +// A Filter must have exactly one of Prefix, Tag, or And specified. +type LifecycleRuleFilter struct { _ struct{} `type:"structure"` - LoggingEnabled *LoggingEnabled `type:"structure"` + // This is used in a Lifecycle Rule Filter to apply a logical AND to two or + // more predicates. The Lifecycle Rule will apply to any object matching all + // of the predicates configured inside the And operator. + And *LifecycleRuleAndOperator `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string"` + + // This tag must exist in the object's tag set in order for the rule to apply. + Tag *Tag `type:"structure"` } // String returns the string representation -func (s BucketLoggingStatus) String() string { +func (s LifecycleRuleFilter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BucketLoggingStatus) GoString() string { +func (s LifecycleRuleFilter) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *BucketLoggingStatus) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} - if s.LoggingEnabled != nil { - if err := s.LoggingEnabled.Validate(); err != nil { - invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) +func (s *LifecycleRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) } } @@ -3329,37 +13999,52 @@ func (s *BucketLoggingStatus) Validate() error { return nil } -type CORSConfiguration struct { +// SetAnd sets the And field's value. +func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { + s.Tag = v + return s +} + +type ListBucketAnalyticsConfigurationsInput struct { _ struct{} `type:"structure"` - CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` + // The name of the bucket from which analytics configurations are retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` } // String returns the string representation -func (s CORSConfiguration) String() string { +func (s ListBucketAnalyticsConfigurationsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CORSConfiguration) GoString() string { +func (s ListBucketAnalyticsConfigurationsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CORSConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} - if s.CORSRules == nil { - invalidParams.Add(request.NewErrParamRequired("CORSRules")) - } - if s.CORSRules != nil { - for i, v := range s.CORSRules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) - } - } +func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } if invalidParams.Len() > 0 { @@ -3368,47 +14053,109 @@ func (s *CORSConfiguration) Validate() error { return nil } -type CORSRule struct { +// SetBucket sets the Bucket field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +type ListBucketAnalyticsConfigurationsOutput struct { _ struct{} `type:"structure"` - // Specifies which headers are allowed in a pre-flight OPTIONS request. - AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + // The list of analytics configurations for a bucket. + AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` - // Identifies HTTP methods that the domain/origin specified in the rule is allowed - // to execute. - AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + // The ContinuationToken that represents where this request began. + ContinuationToken *string `type:"string"` - // One or more origins you want customers to be able to access the bucket from. - AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` - // One or more headers in the response that you want customers to be able to - // access from their applications (for example, from a JavaScript XMLHttpRequest - // object). - ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + // NextContinuationToken is sent when isTruncated is true, which indicates that + // there are more analytics configurations to list. The next request must include + // this NextContinuationToken. The token is obfuscated and is not a usable value. + NextContinuationToken *string `type:"string"` +} - // The time in seconds that your browser is to cache the preflight response - // for the specified resource. - MaxAgeSeconds *int64 `type:"integer"` +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { + s.AnalyticsConfigurationList = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketInventoryConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the inventory configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker used to continue an inventory configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` } // String returns the string representation -func (s CORSRule) String() string { +func (s ListBucketInventoryConfigurationsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CORSRule) GoString() string { +func (s ListBucketInventoryConfigurationsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CORSRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CORSRule"} - if s.AllowedMethods == nil { - invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) - } - if s.AllowedOrigins == nil { - invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) +func (s *ListBucketInventoryConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } if invalidParams.Len() > 0 { @@ -3417,92 +14164,110 @@ func (s *CORSRule) Validate() error { return nil } -type CloudFunctionConfiguration struct { - _ struct{} `type:"structure"` +// SetBucket sets the Bucket field's value. +func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput { + s.Bucket = &v + return s +} - CloudFunction *string `type:"string"` +func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // Bucket event for which to send notifications. - Event *string `deprecated:"true" type:"string" enum:"Event"` +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { + s.ContinuationToken = &v + return s +} - Events []*string `locationName:"Event" type:"list" flattened:"true"` +type ListBucketInventoryConfigurationsOutput struct { + _ struct{} `type:"structure"` - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` + // If sent in the request, the marker that is used as a starting point for this + // inventory configuration list response. + ContinuationToken *string `type:"string"` - InvocationRole *string `type:"string"` + // The list of inventory configurations for a bucket. + InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` + + // Indicates whether the returned list of inventory configurations is truncated + // in this response. A value of true indicates that the list is truncated. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` } // String returns the string representation -func (s CloudFunctionConfiguration) String() string { +func (s ListBucketInventoryConfigurationsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CloudFunctionConfiguration) GoString() string { +func (s ListBucketInventoryConfigurationsOutput) GoString() string { return s.String() } -type CommonPrefix struct { - _ struct{} `type:"structure"` +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.ContinuationToken = &v + return s +} - Prefix *string `type:"string"` +// SetInventoryConfigurationList sets the InventoryConfigurationList field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput { + s.InventoryConfigurationList = v + return s } -// String returns the string representation -func (s CommonPrefix) String() string { - return awsutil.Prettify(s) +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput { + s.IsTruncated = &v + return s } -// GoString returns the string representation -func (s CommonPrefix) GoString() string { - return s.String() +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.NextContinuationToken = &v + return s } -type CompleteMultipartUploadInput struct { - _ struct{} `type:"structure" payload:"MultipartUpload"` +type ListBucketMetricsConfigurationsInput struct { + _ struct{} `type:"structure"` + // The name of the bucket containing the metrics configurations to retrieve. + // + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` + // The marker that is used to continue a metrics configuration listing that + // has been truncated. Use the NextContinuationToken from a previously truncated + // list response to continue the listing. The continuation token is an opaque + // value that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` } // String returns the string representation -func (s CompleteMultipartUploadInput) String() string { +func (s ListBucketMetricsConfigurationsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CompleteMultipartUploadInput) GoString() string { +func (s ListBucketMetricsConfigurationsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CompleteMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} +func (s *ListBucketMetricsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) - } if invalidParams.Len() > 0 { return invalidParams @@ -3510,259 +14275,413 @@ func (s *CompleteMultipartUploadInput) Validate() error { return nil } -type CompleteMultipartUploadOutput struct { +// SetBucket sets the Bucket field's value. +func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +type ListBucketMetricsConfigurationsOutput struct { _ struct{} `type:"structure"` - Bucket *string `type:"string"` + // The marker that is used as a starting point for this metrics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` - // Entity tag of the object. - ETag *string `type:"string"` + // Indicates whether the returned list of metrics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + // The list of metrics configurations for a bucket. + MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"` - Key *string `min:"1" type:"string"` + // The marker used to continue a metrics configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} - Location *string `type:"string"` +// String returns the string representation +func (s ListBucketMetricsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +// GoString returns the string representation +func (s ListBucketMetricsConfigurationsOutput) GoString() string { + return s.String() +} - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.ContinuationToken = &v + return s +} - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput { + s.IsTruncated = &v + return s +} - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +// SetMetricsConfigurationList sets the MetricsConfigurationList field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput { + s.MetricsConfigurationList = v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsInput) GoString() string { + return s.String() +} + +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + Owner *Owner `type:"structure"` } // String returns the string representation -func (s CompleteMultipartUploadOutput) String() string { +func (s ListBucketsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CompleteMultipartUploadOutput) GoString() string { +func (s ListBucketsOutput) GoString() string { return s.String() } -type CompletedMultipartUpload struct { +// SetBuckets sets the Buckets field's value. +func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { + s.Buckets = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { + s.Owner = v + return s +} + +type ListMultipartUploadsInput struct { _ struct{} `type:"structure"` - Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` } // String returns the string representation -func (s CompletedMultipartUpload) String() string { +func (s ListMultipartUploadsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CompletedMultipartUpload) GoString() string { +func (s ListMultipartUploadsInput) GoString() string { return s.String() } -type CompletedPart struct { - _ struct{} `type:"structure"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } - // Entity tag returned when the part was uploaded. - ETag *string `type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Part number that identifies the part. This is a positive integer between - // 1 and 10,000. - PartNumber *int64 `type:"integer"` +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { + s.Bucket = &v + return s } -// String returns the string representation -func (s CompletedPart) String() string { - return awsutil.Prettify(s) +func (s *ListMultipartUploadsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// GoString returns the string representation -func (s CompletedPart) GoString() string { - return s.String() +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { + s.Delimiter = &v + return s } -type Condition struct { - _ struct{} `type:"structure"` +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { + s.EncodingType = &v + return s +} - // The HTTP error code when the redirect is applied. In the event of an error, - // if the error code equals this value, then the specified redirect is applied. - // Required when parent element Condition is specified and sibling KeyPrefixEquals - // is not specified. If both are specified, then both must be true for the redirect - // to be applied. - HttpErrorCodeReturnedEquals *string `type:"string"` +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { + s.KeyMarker = &v + return s +} - // The object key name prefix when the redirect is applied. For example, to - // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. - // To redirect request for all pages with the prefix docs/, the key prefix will - // be /docs, which identifies all objects in the docs/ folder. Required when - // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals - // is not specified. If both conditions are specified, both must be true for - // the redirect to be applied. - KeyPrefixEquals *string `type:"string"` +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { + s.MaxUploads = &v + return s } -// String returns the string representation -func (s Condition) String() string { - return awsutil.Prettify(s) +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { + s.Prefix = &v + return s } -// GoString returns the string representation -func (s Condition) GoString() string { - return s.String() +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { + s.UploadIdMarker = &v + return s } -type CopyObjectInput struct { +type ListMultipartUploadsOutput struct { _ struct{} `type:"structure"` - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + Delimiter *string `type:"string"` - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + // The key at or after which the listing began. + KeyMarker *string `type:"string"` - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` - // The name of the source bucket and key name of the source object, separated - // by a slash (/). Must be URL-encoded. - CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` - // Copies the object if its entity tag (ETag) matches the specified tag. - CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` - // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. - CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` - // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). - CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one - // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { + s.Bucket = &v + return s +} - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` +func (s *ListMultipartUploadsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { + s.CommonPrefixes = v + return s +} - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { + s.Delimiter = &v + return s +} - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { + s.EncodingType = &v + return s +} - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { + s.IsTruncated = &v + return s +} - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { + s.KeyMarker = &v + return s +} - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { + s.MaxUploads = &v + return s +} - // Specifies whether the metadata is copied from the source object or replaced - // with metadata provided in the request. - MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { + s.NextKeyMarker = &v + return s +} - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.NextUploadIdMarker = &v + return s +} - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { + s.Prefix = &v + return s +} - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.UploadIdMarker = &v + return s +} - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` +// SetUploads sets the Uploads field's value. +func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { + s.Uploads = v + return s +} - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` +type ListObjectVersionsInput struct { + _ struct{} `type:"structure"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` } // String returns the string representation -func (s CopyObjectInput) String() string { +func (s ListObjectVersionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CopyObjectInput) GoString() string { +func (s ListObjectVersionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CopyObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} +func (s *ListObjectVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.CopySource == nil { - invalidParams.Add(request.NewErrParamRequired("CopySource")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -3770,148 +14689,228 @@ func (s *CopyObjectInput) Validate() error { return nil } -type CopyObjectOutput struct { - _ struct{} `type:"structure" payload:"CopyObjectResult"` +// SetBucket sets the Bucket field's value. +func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { + s.Bucket = &v + return s +} - CopyObjectResult *CopyObjectResult `type:"structure"` +func (s *ListObjectVersionsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { + s.Delimiter = &v + return s +} - // If the object expiration is configured, the response includes this header. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { + s.EncodingType = &v + return s +} - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { + s.KeyMarker = &v + return s +} - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { + s.MaxKeys = &v + return s +} - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { + s.Prefix = &v + return s +} - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { + s.VersionIdMarker = &v + return s +} - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` - // Version ID of the newly created copy. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. If your results were truncated, you can + // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last Key returned in a truncated response. + KeyMarker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // Use this value for the key marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // Use this value for the next version id marker parameter in a subsequent request. + NextVersionIdMarker *string `type:"string"` + + Prefix *string `type:"string"` + + VersionIdMarker *string `type:"string"` + + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` } // String returns the string representation -func (s CopyObjectOutput) String() string { +func (s ListObjectVersionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CopyObjectOutput) GoString() string { +func (s ListObjectVersionsOutput) GoString() string { return s.String() } -type CopyObjectResult struct { - _ struct{} `type:"structure"` - - ETag *string `type:"string"` - - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { + s.CommonPrefixes = v + return s } -// String returns the string representation -func (s CopyObjectResult) String() string { - return awsutil.Prettify(s) +// SetDeleteMarkers sets the DeleteMarkers field's value. +func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { + s.DeleteMarkers = v + return s } -// GoString returns the string representation -func (s CopyObjectResult) GoString() string { - return s.String() +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { + s.Delimiter = &v + return s } -type CopyPartResult struct { - _ struct{} `type:"structure"` +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { + s.EncodingType = &v + return s +} - // Entity tag of the object. - ETag *string `type:"string"` +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { + s.IsTruncated = &v + return s +} - // Date and time at which the object was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { + s.KeyMarker = &v + return s } -// String returns the string representation -func (s CopyPartResult) String() string { - return awsutil.Prettify(s) +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { + s.MaxKeys = &v + return s } -// GoString returns the string representation -func (s CopyPartResult) GoString() string { - return s.String() +// SetName sets the Name field's value. +func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { + s.Name = &v + return s } -type CreateBucketConfiguration struct { - _ struct{} `type:"structure"` +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { + s.NextKeyMarker = &v + return s +} - // Specifies the region where the bucket will be created. If you don't specify - // a region, the bucket will be created in US Standard. - LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { + s.NextVersionIdMarker = &v + return s } -// String returns the string representation -func (s CreateBucketConfiguration) String() string { - return awsutil.Prettify(s) +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { + s.Prefix = &v + return s } -// GoString returns the string representation -func (s CreateBucketConfiguration) GoString() string { - return s.String() +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { + s.VersionIdMarker = &v + return s } -type CreateBucketInput struct { - _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` +// SetVersions sets the Versions field's value. +func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { + s.Versions = v + return s +} - // The canned ACL to apply to the bucket. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` +type ListObjectsInput struct { + _ struct{} `type:"structure"` + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // Confirms that the requester knows that she or he will be charged for the + // list objects request. Bucket owners need not specify this parameter in their + // requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } // String returns the string representation -func (s CreateBucketInput) String() string { +func (s ListObjectsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateBucketInput) GoString() string { +func (s ListObjectsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} +func (s *ListObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } @@ -3922,129 +14921,216 @@ func (s *CreateBucketInput) Validate() error { return nil } -type CreateBucketOutput struct { +// SetBucket sets the Bucket field's value. +func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { + s.Bucket = &v + return s +} + +func (s *ListObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { + s.EncodingType = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { + s.RequestPayer = &v + return s +} + +type ListObjectsOutput struct { _ struct{} `type:"structure"` - Location *string `location:"header" locationName:"Location" type:"string"` + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Contents []*Object `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + Marker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + Prefix *string `type:"string"` } // String returns the string representation -func (s CreateBucketOutput) String() string { +func (s ListObjectsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateBucketOutput) GoString() string { +func (s ListObjectsOutput) GoString() string { return s.String() } -type CreateMultipartUploadInput struct { - _ struct{} `type:"structure"` - - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { + s.CommonPrefixes = v + return s +} - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` +// SetContents sets the Contents field's value. +func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { + s.Contents = v + return s +} - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { + s.Delimiter = &v + return s +} - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { + s.EncodingType = &v + return s +} - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { + s.IsTruncated = &v + return s +} - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` +// SetMarker sets the Marker field's value. +func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { + s.Marker = &v + return s +} - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { + s.MaxKeys = &v + return s +} - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` +// SetName sets the Name field's value. +func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { + s.Name = &v + return s +} - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` +// SetNextMarker sets the NextMarker field's value. +func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { + s.NextMarker = &v + return s +} - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { + s.Prefix = &v + return s +} - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` +type ListObjectsV2Input struct { + _ struct{} `type:"structure"` - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + // Name of the bucket to list. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // The owner field is not present in listV2 by default, if you want to return + // owner field with each key in the result then set the fetch owner field to + // true + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + // Confirms that the requester knows that she or he will be charged for the + // list objects request in V2 style. Bucket owners need not specify this parameter + // in their requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` } // String returns the string representation -func (s CreateMultipartUploadInput) String() string { +func (s ListObjectsV2Input) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateMultipartUploadInput) GoString() string { +func (s ListObjectsV2Input) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateMultipartUploadInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -4052,166 +15138,253 @@ func (s *CreateMultipartUploadInput) Validate() error { return nil } -type CreateMultipartUploadOutput struct { - _ struct{} `type:"structure"` - - // Date when multipart upload will become eligible for abort operation by lifecycle. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` - - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. - AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` - - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `locationName:"Bucket" type:"string"` +// SetBucket sets the Bucket field's value. +func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { + s.Bucket = &v + return s +} - // Object key for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` +func (s *ListObjectsV2Input) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { + s.ContinuationToken = &v + return s +} - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { + s.Delimiter = &v + return s +} - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { + s.EncodingType = &v + return s +} - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` +// SetFetchOwner sets the FetchOwner field's value. +func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { + s.FetchOwner = &v + return s +} - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { + s.MaxKeys = &v + return s +} - // ID for the initiated multipart upload. - UploadId *string `type:"string"` +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { + s.Prefix = &v + return s } -// String returns the string representation -func (s CreateMultipartUploadOutput) String() string { - return awsutil.Prettify(s) +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { + s.RequestPayer = &v + return s } -// GoString returns the string representation -func (s CreateMultipartUploadOutput) GoString() string { - return s.String() +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { + s.StartAfter = &v + return s } -type Delete struct { +type ListObjectsV2Output struct { _ struct{} `type:"structure"` - Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by delimiter + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - // Element to enable quiet mode for the request. When you add this element, - // you must set its value to true. - Quiet *bool `type:"boolean"` + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than equals to MaxKeys field. Say you ask for 50 keys, your + // result will include less than equals 50 keys + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `type:"integer"` + + // Name of the bucket to list. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `type:"string"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `type:"string"` } // String returns the string representation -func (s Delete) String() string { +func (s ListObjectsV2Output) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Delete) GoString() string { +func (s ListObjectsV2Output) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *Delete) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Delete"} - if s.Objects == nil { - invalidParams.Add(request.NewErrParamRequired("Objects")) - } - if s.Objects != nil { - for i, v := range s.Objects { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) - } - } - } +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { + s.CommonPrefixes = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetContents sets the Contents field's value. +func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { + s.Contents = v + return s } -type DeleteBucketCorsInput struct { - _ struct{} `type:"structure"` +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { + s.ContinuationToken = &v + return s +} - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { + s.Delimiter = &v + return s } -// String returns the string representation -func (s DeleteBucketCorsInput) String() string { - return awsutil.Prettify(s) +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { + s.EncodingType = &v + return s } -// GoString returns the string representation -func (s DeleteBucketCorsInput) GoString() string { - return s.String() +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { + s.IsTruncated = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } +// SetKeyCount sets the KeyCount field's value. +func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { + s.KeyCount = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { + s.MaxKeys = &v + return s } -type DeleteBucketCorsOutput struct { - _ struct{} `type:"structure"` +// SetName sets the Name field's value. +func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { + s.Name = &v + return s } -// String returns the string representation -func (s DeleteBucketCorsOutput) String() string { - return awsutil.Prettify(s) +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { + s.NextContinuationToken = &v + return s } -// GoString returns the string representation -func (s DeleteBucketCorsOutput) GoString() string { - return s.String() +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { + s.Prefix = &v + return s } -type DeleteBucketInput struct { +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { + s.StartAfter = &v + return s +} + +type ListPartsInput struct { _ struct{} `type:"structure"` + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID identifying the multipart upload whose parts are being listed. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } // String returns the string representation -func (s DeleteBucketInput) String() string { +func (s ListPartsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketInput) GoString() string { +func (s ListPartsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } if invalidParams.Len() > 0 { return invalidParams @@ -4219,170 +15392,270 @@ func (s *DeleteBucketInput) Validate() error { return nil } -type DeleteBucketLifecycleInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetBucket sets the Bucket field's value. +func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { + s.Bucket = &v + return s } -// String returns the string representation -func (s DeleteBucketLifecycleInput) String() string { - return awsutil.Prettify(s) +func (s *ListPartsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// GoString returns the string representation -func (s DeleteBucketLifecycleInput) GoString() string { - return s.String() +// SetKey sets the Key field's value. +func (s *ListPartsInput) SetKey(v string) *ListPartsInput { + s.Key = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { + s.MaxParts = &v + return s } -type DeleteBucketLifecycleOutput struct { - _ struct{} `type:"structure"` +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { + s.PartNumberMarker = &v + return s } -// String returns the string representation -func (s DeleteBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { + s.RequestPayer = &v + return s } -// GoString returns the string representation -func (s DeleteBucketLifecycleOutput) GoString() string { - return s.String() +// SetUploadId sets the UploadId field's value. +func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { + s.UploadId = &v + return s } -type DeleteBucketOutput struct { +type ListPartsOutput struct { _ struct{} `type:"structure"` + + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + Owner *Owner `type:"structure"` + + // Part number after which listing begins. + PartNumberMarker *int64 `type:"integer"` + + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` } // String returns the string representation -func (s DeleteBucketOutput) String() string { +func (s ListPartsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketOutput) GoString() string { +func (s ListPartsOutput) GoString() string { return s.String() } -type DeleteBucketPolicyInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetAbortDate sets the AbortDate field's value. +func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { + s.AbortDate = &v + return s } -// String returns the string representation -func (s DeleteBucketPolicyInput) String() string { - return awsutil.Prettify(s) +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { + s.AbortRuleId = &v + return s } -// GoString returns the string representation -func (s DeleteBucketPolicyInput) GoString() string { - return s.String() +// SetBucket sets the Bucket field's value. +func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { + s.Bucket = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} +func (s *ListPartsOutput) getBucket() (v string) { if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams + return v } - return nil + return *s.Bucket } -type DeleteBucketPolicyOutput struct { - _ struct{} `type:"structure"` +// SetInitiator sets the Initiator field's value. +func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { + s.Initiator = v + return s } -// String returns the string representation -func (s DeleteBucketPolicyOutput) String() string { - return awsutil.Prettify(s) +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { + s.IsTruncated = &v + return s } -// GoString returns the string representation -func (s DeleteBucketPolicyOutput) GoString() string { - return s.String() +// SetKey sets the Key field's value. +func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { + s.Key = &v + return s } -type DeleteBucketReplicationInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { + s.MaxParts = &v + return s } -// String returns the string representation -func (s DeleteBucketReplicationInput) String() string { - return awsutil.Prettify(s) +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { + s.NextPartNumberMarker = &v + return s } -// GoString returns the string representation -func (s DeleteBucketReplicationInput) GoString() string { - return s.String() +// SetOwner sets the Owner field's value. +func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { + s.Owner = v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { + s.PartNumberMarker = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetParts sets the Parts field's value. +func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { + s.Parts = v + return s } -type DeleteBucketReplicationOutput struct { - _ struct{} `type:"structure"` +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { + s.RequestCharged = &v + return s } -// String returns the string representation -func (s DeleteBucketReplicationOutput) String() string { - return awsutil.Prettify(s) +// SetStorageClass sets the StorageClass field's value. +func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { + s.StorageClass = &v + return s } -// GoString returns the string representation -func (s DeleteBucketReplicationOutput) GoString() string { - return s.String() +// SetUploadId sets the UploadId field's value. +func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { + s.UploadId = &v + return s } -type DeleteBucketTaggingInput struct { +// Describes an S3 location that will receive the results of the restore request. +type Location struct { _ struct{} `type:"structure"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // A list of grants that control access to the staged results. + AccessControlList []*Grant `locationNameList:"Grant" type:"list"` + + // The name of the bucket where the restore results will be placed. + // + // BucketName is a required field + BucketName *string `type:"string" required:"true"` + + // The canned ACL to apply to the restore results. + CannedACL *string `type:"string" enum:"ObjectCannedACL"` + + // Describes the server-side encryption that will be applied to the restore + // results. + Encryption *Encryption `type:"structure"` + + // The prefix that is prepended to the restore results for this request. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // The class of storage used to store the restore results. + StorageClass *string `type:"string" enum:"StorageClass"` + + // The tag-set that is applied to the restore results. + Tagging *Tagging `type:"structure"` + + // A list of metadata to store with the restore results in S3. + UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` } // String returns the string representation -func (s DeleteBucketTaggingInput) String() string { +func (s Location) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketTaggingInput) GoString() string { +func (s Location) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Location"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.AccessControlList != nil { + for i, v := range s.AccessControlList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -4391,41 +15664,107 @@ func (s *DeleteBucketTaggingInput) Validate() error { return nil } -type DeleteBucketTaggingOutput struct { - _ struct{} `type:"structure"` +// SetAccessControlList sets the AccessControlList field's value. +func (s *Location) SetAccessControlList(v []*Grant) *Location { + s.AccessControlList = v + return s } -// String returns the string representation -func (s DeleteBucketTaggingOutput) String() string { - return awsutil.Prettify(s) +// SetBucketName sets the BucketName field's value. +func (s *Location) SetBucketName(v string) *Location { + s.BucketName = &v + return s } -// GoString returns the string representation -func (s DeleteBucketTaggingOutput) GoString() string { - return s.String() +// SetCannedACL sets the CannedACL field's value. +func (s *Location) SetCannedACL(v string) *Location { + s.CannedACL = &v + return s } -type DeleteBucketWebsiteInput struct { +// SetEncryption sets the Encryption field's value. +func (s *Location) SetEncryption(v *Encryption) *Location { + s.Encryption = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Location) SetPrefix(v string) *Location { + s.Prefix = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Location) SetStorageClass(v string) *Location { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *Location) SetTagging(v *Tagging) *Location { + s.Tagging = v + return s +} + +// SetUserMetadata sets the UserMetadata field's value. +func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { + s.UserMetadata = v + return s +} + +// Container for logging information. Presence of this element indicates that +// logging is enabled. Parameters TargetBucket and TargetPrefix are required +// in this case. +type LoggingEnabled struct { _ struct{} `type:"structure"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + // + // TargetBucket is a required field + TargetBucket *string `type:"string" required:"true"` + + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // This element lets you specify a prefix for the keys that the log files will + // be stored under. + // + // TargetPrefix is a required field + TargetPrefix *string `type:"string" required:"true"` } // String returns the string representation -func (s DeleteBucketWebsiteInput) String() string { +func (s LoggingEnabled) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketWebsiteInput) GoString() string { +func (s LoggingEnabled) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetBucket == nil { + invalidParams.Add(request.NewErrParamRequired("TargetBucket")) + } + if s.TargetPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("TargetPrefix")) + } + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -4434,91 +15773,87 @@ func (s *DeleteBucketWebsiteInput) Validate() error { return nil } -type DeleteBucketWebsiteOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketWebsiteOutput) GoString() string { - return s.String() -} - -type DeleteMarkerEntry struct { - _ struct{} `type:"structure"` +// SetTargetBucket sets the TargetBucket field's value. +func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { + s.TargetBucket = &v + return s +} - // Specifies whether the object is (true) or is not (false) the latest version - // of an object. - IsLatest *bool `type:"boolean"` +// SetTargetGrants sets the TargetGrants field's value. +func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { + s.TargetGrants = v + return s +} - // The object key. - Key *string `min:"1" type:"string"` +// SetTargetPrefix sets the TargetPrefix field's value. +func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { + s.TargetPrefix = &v + return s +} - // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +// A metadata key-value pair to store with an object. +type MetadataEntry struct { + _ struct{} `type:"structure"` - Owner *Owner `type:"structure"` + Name *string `type:"string"` - // Version ID of an object. - VersionId *string `type:"string"` + Value *string `type:"string"` } // String returns the string representation -func (s DeleteMarkerEntry) String() string { +func (s MetadataEntry) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteMarkerEntry) GoString() string { +func (s MetadataEntry) GoString() string { return s.String() } -type DeleteObjectInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetName sets the Name field's value. +func (s *MetadataEntry) SetName(v string) *MetadataEntry { + s.Name = &v + return s +} - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` +// SetValue sets the Value field's value. +func (s *MetadataEntry) SetValue(v string) *MetadataEntry { + s.Value = &v + return s +} - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` +type MetricsAndOperator struct { + _ struct{} `type:"structure"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // The prefix used when evaluating an AND predicate. + Prefix *string `type:"string"` - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` + // The list of tags used when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } // String returns the string representation -func (s DeleteObjectInput) String() string { +func (s MetricsAndOperator) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteObjectInput) GoString() string { +func (s MetricsAndOperator) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) +func (s *MetricsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -4527,72 +15862,108 @@ func (s *DeleteObjectInput) Validate() error { return nil } -type DeleteObjectOutput struct { - _ struct{} `type:"structure"` +// SetPrefix sets the Prefix field's value. +func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { + s.Prefix = &v + return s +} - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` +// SetTags sets the Tags field's value. +func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { + s.Tags = v + return s +} - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +type MetricsConfiguration struct { + _ struct{} `type:"structure"` - // Returns the version ID of the delete marker created as a result of the DELETE - // operation. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + // Specifies a metrics configuration filter. The metrics configuration will + // only include objects that meet the filter's criteria. A filter must be a + // prefix, a tag, or a conjunction (MetricsAndOperator). + Filter *MetricsFilter `type:"structure"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` } // String returns the string representation -func (s DeleteObjectOutput) String() string { +func (s MetricsConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteObjectOutput) GoString() string { +func (s MetricsConfiguration) GoString() string { return s.String() } -type DeleteObjectsInput struct { - _ struct{} `type:"structure" payload:"Delete"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - Delete *Delete `locationName:"Delete" type:"structure" required:"true"` +// SetFilter sets the Filter field's value. +func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration { + s.Filter = v + return s +} - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` +// SetId sets the Id field's value. +func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { + s.Id = &v + return s +} - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +type MetricsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *MetricsAndOperator `type:"structure"` + + // The prefix used when evaluating a metrics filter. + Prefix *string `type:"string"` + + // The tag used when evaluating a metrics filter. + Tag *Tag `type:"structure"` } // String returns the string representation -func (s DeleteObjectsInput) String() string { +func (s MetricsFilter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteObjectsInput) GoString() string { +func (s MetricsFilter) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteObjectsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Delete == nil { - invalidParams.Add(request.NewErrParamRequired("Delete")) +func (s *MetricsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } } - if s.Delete != nil { - if err := s.Delete.Validate(); err != nil { - invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) } } @@ -4602,131 +15973,219 @@ func (s *DeleteObjectsInput) Validate() error { return nil } -type DeleteObjectsOutput struct { +// SetAnd sets the And field's value. +func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { + s.Tag = v + return s +} + +type MultipartUpload struct { _ struct{} `type:"structure"` - Deleted []*DeletedObject `type:"list" flattened:"true"` + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp"` - Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` } // String returns the string representation -func (s DeleteObjectsOutput) String() string { +func (s MultipartUpload) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteObjectsOutput) GoString() string { +func (s MultipartUpload) GoString() string { return s.String() } -type DeletedObject struct { - _ struct{} `type:"structure"` - - DeleteMarker *bool `type:"boolean"` +// SetInitiated sets the Initiated field's value. +func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { + s.Initiated = &v + return s +} - DeleteMarkerVersionId *string `type:"string"` +// SetInitiator sets the Initiator field's value. +func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { + s.Initiator = v + return s +} - Key *string `min:"1" type:"string"` +// SetKey sets the Key field's value. +func (s *MultipartUpload) SetKey(v string) *MultipartUpload { + s.Key = &v + return s +} - VersionId *string `type:"string"` +// SetOwner sets the Owner field's value. +func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { + s.Owner = v + return s } -// String returns the string representation -func (s DeletedObject) String() string { - return awsutil.Prettify(s) +// SetStorageClass sets the StorageClass field's value. +func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { + s.StorageClass = &v + return s } -// GoString returns the string representation -func (s DeletedObject) GoString() string { - return s.String() +// SetUploadId sets the UploadId field's value. +func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { + s.UploadId = &v + return s } -type Destination struct { +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +type NoncurrentVersionExpiration struct { _ struct{} `type:"structure"` - // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store - // replicas of the object identified by the rule. - Bucket *string `type:"string" required:"true"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` } // String returns the string representation -func (s Destination) String() string { +func (s NoncurrentVersionExpiration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Destination) GoString() string { +func (s NoncurrentVersionExpiration) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *Destination) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Destination"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { + s.NoncurrentDays = &v + return s } -type Error struct { +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA, ONEZONE_IA or GLACIER storage class. If your +// bucket is versioning-enabled (or versioning is suspended), you can set this +// action to request that Amazon S3 transition noncurrent object versions to +// the STANDARD_IA, ONEZONE_IA or GLACIER storage class at a specific period +// in the object's lifetime. +type NoncurrentVersionTransition struct { _ struct{} `type:"structure"` - Code *string `type:"string"` - - Key *string `min:"1" type:"string"` - - Message *string `type:"string"` + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` - VersionId *string `type:"string"` + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` } // String returns the string representation -func (s Error) String() string { +func (s NoncurrentVersionTransition) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Error) GoString() string { +func (s NoncurrentVersionTransition) GoString() string { return s.String() } -type ErrorDocument struct { +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { + s.NoncurrentDays = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { + s.StorageClass = &v + return s +} + +// Container for specifying the notification configuration of the bucket. If +// this element is empty, notifications are turned off on the bucket. +type NotificationConfiguration struct { _ struct{} `type:"structure"` - // The object key name to use when a 4XX class error occurs. - Key *string `min:"1" type:"string" required:"true"` + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` } // String returns the string representation -func (s ErrorDocument) String() string { +func (s NotificationConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ErrorDocument) GoString() string { +func (s NotificationConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ErrorDocument) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) +func (s *NotificationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} + if s.LambdaFunctionConfigurations != nil { + for i, v := range s.LambdaFunctionConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueueConfigurations != nil { + for i, v := range s.QueueConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) + } + } } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + if s.TopicConfigurations != nil { + for i, v := range s.TopicConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -4735,146 +16194,181 @@ func (s *ErrorDocument) Validate() error { return nil } -// Container for key value pair that defines the criteria for the filter rule. -type FilterRule struct { - _ struct{} `type:"structure"` - - // Object key name prefix or suffix identifying one or more objects to which - // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. - // Overlapping prefixes and suffixes are not supported. For more information, - // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. - Name *string `type:"string" enum:"FilterRuleName"` - - Value *string `type:"string"` +// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. +func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { + s.LambdaFunctionConfigurations = v + return s } -// String returns the string representation -func (s FilterRule) String() string { - return awsutil.Prettify(s) +// SetQueueConfigurations sets the QueueConfigurations field's value. +func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration { + s.QueueConfigurations = v + return s } -// GoString returns the string representation -func (s FilterRule) GoString() string { - return s.String() +// SetTopicConfigurations sets the TopicConfigurations field's value. +func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration { + s.TopicConfigurations = v + return s } -type GetBucketAccelerateConfigurationInput struct { +type NotificationConfigurationDeprecated struct { _ struct{} `type:"structure"` - // Name of the bucket for which the accelerate configuration is retrieved. - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` } // String returns the string representation -func (s GetBucketAccelerateConfigurationInput) String() string { +func (s NotificationConfigurationDeprecated) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketAccelerateConfigurationInput) GoString() string { +func (s NotificationConfigurationDeprecated) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAccelerateConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } +// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated { + s.CloudFunctionConfiguration = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetQueueConfiguration sets the QueueConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.QueueConfiguration = v + return s } -type GetBucketAccelerateConfigurationOutput struct { +// SetTopicConfiguration sets the TopicConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.TopicConfiguration = v + return s +} + +// Container for object key name filtering rules. For information about key +// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +type NotificationConfigurationFilter struct { _ struct{} `type:"structure"` - // The accelerate configuration of the bucket. - Status *string `type:"string" enum:"BucketAccelerateStatus"` + // Container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` } // String returns the string representation -func (s GetBucketAccelerateConfigurationOutput) String() string { +func (s NotificationConfigurationFilter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketAccelerateConfigurationOutput) GoString() string { +func (s NotificationConfigurationFilter) GoString() string { return s.String() } -type GetBucketAclInput struct { +// SetKey sets the Key field's value. +func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter { + s.Key = v + return s +} + +type Object struct { _ struct{} `type:"structure"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + ETag *string `type:"string"` + + Key *string `min:"1" type:"string"` + + LastModified *time.Time `type:"timestamp"` + + Owner *Owner `type:"structure"` + + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` } // String returns the string representation -func (s GetBucketAclInput) String() string { +func (s Object) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketAclInput) GoString() string { +func (s Object) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetETag sets the ETag field's value. +func (s *Object) SetETag(v string) *Object { + s.ETag = &v + return s } -type GetBucketAclOutput struct { - _ struct{} `type:"structure"` +// SetKey sets the Key field's value. +func (s *Object) SetKey(v string) *Object { + s.Key = &v + return s +} - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` +// SetLastModified sets the LastModified field's value. +func (s *Object) SetLastModified(v time.Time) *Object { + s.LastModified = &v + return s +} - Owner *Owner `type:"structure"` +// SetOwner sets the Owner field's value. +func (s *Object) SetOwner(v *Owner) *Object { + s.Owner = v + return s } -// String returns the string representation -func (s GetBucketAclOutput) String() string { - return awsutil.Prettify(s) +// SetSize sets the Size field's value. +func (s *Object) SetSize(v int64) *Object { + s.Size = &v + return s } -// GoString returns the string representation -func (s GetBucketAclOutput) GoString() string { - return s.String() +// SetStorageClass sets the StorageClass field's value. +func (s *Object) SetStorageClass(v string) *Object { + s.StorageClass = &v + return s } -type GetBucketCorsInput struct { +type ObjectIdentifier struct { _ struct{} `type:"structure"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Key name of the object to delete. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` } // String returns the string representation -func (s GetBucketCorsInput) String() string { +func (s ObjectIdentifier) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketCorsInput) GoString() string { +func (s ObjectIdentifier) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *ObjectIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } if invalidParams.Len() > 0 { @@ -4883,88 +16377,128 @@ func (s *GetBucketCorsInput) Validate() error { return nil } -type GetBucketCorsOutput struct { +// SetKey sets the Key field's value. +func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { + s.VersionId = &v + return s +} + +type ObjectVersion struct { _ struct{} `type:"structure"` - CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp"` + + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` } // String returns the string representation -func (s GetBucketCorsOutput) String() string { +func (s ObjectVersion) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketCorsOutput) GoString() string { +func (s ObjectVersion) GoString() string { return s.String() } -type GetBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetETag sets the ETag field's value. +func (s *ObjectVersion) SetETag(v string) *ObjectVersion { + s.ETag = &v + return s } -// String returns the string representation -func (s GetBucketLifecycleConfigurationInput) String() string { - return awsutil.Prettify(s) +// SetIsLatest sets the IsLatest field's value. +func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { + s.IsLatest = &v + return s } -// GoString returns the string representation -func (s GetBucketLifecycleConfigurationInput) GoString() string { - return s.String() +// SetKey sets the Key field's value. +func (s *ObjectVersion) SetKey(v string) *ObjectVersion { + s.Key = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLifecycleConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetLastModified sets the LastModified field's value. +func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { + s.LastModified = &v + return s } -type GetBucketLifecycleConfigurationOutput struct { - _ struct{} `type:"structure"` +// SetOwner sets the Owner field's value. +func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { + s.Owner = v + return s +} - Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +// SetSize sets the Size field's value. +func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { + s.Size = &v + return s } -// String returns the string representation -func (s GetBucketLifecycleConfigurationOutput) String() string { - return awsutil.Prettify(s) +// SetStorageClass sets the StorageClass field's value. +func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { + s.StorageClass = &v + return s } -// GoString returns the string representation -func (s GetBucketLifecycleConfigurationOutput) GoString() string { - return s.String() +// SetVersionId sets the VersionId field's value. +func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { + s.VersionId = &v + return s } -type GetBucketLifecycleInput struct { +// Describes the location where the restore job's output is stored. +type OutputLocation struct { _ struct{} `type:"structure"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Describes an S3 location that will receive the results of the restore request. + S3 *Location `type:"structure"` } // String returns the string representation -func (s GetBucketLifecycleInput) String() string { +func (s OutputLocation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketLifecycleInput) GoString() string { +func (s OutputLocation) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *OutputLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputLocation"} + if s.S3 != nil { + if err := s.S3.Validate(); err != nil { + invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -4973,161 +16507,251 @@ func (s *GetBucketLifecycleInput) Validate() error { return nil } -type GetBucketLifecycleOutput struct { +// SetS3 sets the S3 field's value. +func (s *OutputLocation) SetS3(v *Location) *OutputLocation { + s.S3 = v + return s +} + +// Describes how results of the Select job are serialized. +type OutputSerialization struct { _ struct{} `type:"structure"` - Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` + // Describes the serialization of CSV-encoded Select results. + CSV *CSVOutput `type:"structure"` + + // Specifies JSON as request's output serialization format. + JSON *JSONOutput `type:"structure"` } // String returns the string representation -func (s GetBucketLifecycleOutput) String() string { +func (s OutputSerialization) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketLifecycleOutput) GoString() string { +func (s OutputSerialization) GoString() string { return s.String() } -type GetBucketLocationInput struct { +// SetCSV sets the CSV field's value. +func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization { + s.CSV = v + return s +} + +// SetJSON sets the JSON field's value. +func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { + s.JSON = v + return s +} + +type Owner struct { _ struct{} `type:"structure"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + DisplayName *string `type:"string"` + + ID *string `type:"string"` } // String returns the string representation -func (s GetBucketLocationInput) String() string { +func (s Owner) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketLocationInput) GoString() string { +func (s Owner) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLocationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } +// SetDisplayName sets the DisplayName field's value. +func (s *Owner) SetDisplayName(v string) *Owner { + s.DisplayName = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetID sets the ID field's value. +func (s *Owner) SetID(v string) *Owner { + s.ID = &v + return s } -type GetBucketLocationOutput struct { +type ParquetInput struct { _ struct{} `type:"structure"` - - LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } // String returns the string representation -func (s GetBucketLocationOutput) String() string { +func (s ParquetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketLocationOutput) GoString() string { +func (s ParquetInput) GoString() string { return s.String() } -type GetBucketLoggingInput struct { +type Part struct { _ struct{} `type:"structure"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size of the uploaded part data. + Size *int64 `type:"integer"` } // String returns the string representation -func (s GetBucketLoggingInput) String() string { +func (s Part) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketLoggingInput) GoString() string { +func (s Part) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketLoggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } +// SetETag sets the ETag field's value. +func (s *Part) SetETag(v string) *Part { + s.ETag = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetLastModified sets the LastModified field's value. +func (s *Part) SetLastModified(v time.Time) *Part { + s.LastModified = &v + return s } -type GetBucketLoggingOutput struct { +// SetPartNumber sets the PartNumber field's value. +func (s *Part) SetPartNumber(v int64) *Part { + s.PartNumber = &v + return s +} + +// SetSize sets the Size field's value. +func (s *Part) SetSize(v int64) *Part { + s.Size = &v + return s +} + +type Progress struct { _ struct{} `type:"structure"` - LoggingEnabled *LoggingEnabled `type:"structure"` + // Current number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // Current number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // Current number of object bytes scanned. + BytesScanned *int64 `type:"long"` } // String returns the string representation -func (s GetBucketLoggingOutput) String() string { +func (s Progress) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketLoggingOutput) GoString() string { +func (s Progress) GoString() string { return s.String() } -type GetBucketNotificationConfigurationRequest struct { - _ struct{} `type:"structure"` +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Progress) SetBytesProcessed(v int64) *Progress { + s.BytesProcessed = &v + return s +} - // Name of the bucket to get the notification configuration for. - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetBytesReturned sets the BytesReturned field's value. +func (s *Progress) SetBytesReturned(v int64) *Progress { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Progress) SetBytesScanned(v int64) *Progress { + s.BytesScanned = &v + return s +} + +type ProgressEvent struct { + _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` + + // The Progress event details. + Details *Progress `locationName:"Details" type:"structure"` } // String returns the string representation -func (s GetBucketNotificationConfigurationRequest) String() string { +func (s ProgressEvent) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketNotificationConfigurationRequest) GoString() string { +func (s ProgressEvent) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketNotificationConfigurationRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } +// SetDetails sets the Details field's value. +func (s *ProgressEvent) SetDetails(v *Progress) *ProgressEvent { + s.Details = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams +// The ProgressEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ProgressEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ProgressEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err } return nil } -type GetBucketPolicyInput struct { - _ struct{} `type:"structure"` +type PutBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure" payload:"AccelerateConfiguration"` + + // Specifies the Accelerate Configuration you want to set for the bucket. + // + // AccelerateConfiguration is a required field + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // Name of the bucket for which the accelerate configuration is set. + // + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s GetBucketPolicyInput) String() string { +func (s PutBucketAccelerateConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketPolicyInput) GoString() string { +func (s PutBucketAccelerateConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} +func (s *PutBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} + if s.AccelerateConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) + } if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } @@ -5138,45 +16762,88 @@ func (s *GetBucketPolicyInput) Validate() error { return nil } -type GetBucketPolicyOutput struct { - _ struct{} `type:"structure" payload:"Policy"` +// SetAccelerateConfiguration sets the AccelerateConfiguration field's value. +func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput { + s.AccelerateConfiguration = v + return s +} - // The bucket policy as a JSON document. - Policy *string `type:"string"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type PutBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s GetBucketPolicyOutput) String() string { +func (s PutBucketAccelerateConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketPolicyOutput) GoString() string { +func (s PutBucketAccelerateConfigurationOutput) GoString() string { return s.String() } -type GetBucketReplicationInput struct { - _ struct{} `type:"structure"` +type PutBucketAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` } // String returns the string representation -func (s GetBucketReplicationInput) String() string { +func (s PutBucketAclInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketReplicationInput) GoString() string { +func (s PutBucketAclInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} +func (s *PutBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5184,92 +16851,121 @@ func (s *GetBucketReplicationInput) Validate() error { return nil } -type GetBucketReplicationOutput struct { - _ struct{} `type:"structure" payload:"ReplicationConfiguration"` - - // Container for replication rules. You can add as many as 1,000 rules. Total - // replication configuration size can be up to 2 MB. - ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +// SetACL sets the ACL field's value. +func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { + s.ACL = &v + return s } -// String returns the string representation -func (s GetBucketReplicationOutput) String() string { - return awsutil.Prettify(s) +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { + s.AccessControlPolicy = v + return s } -// GoString returns the string representation -func (s GetBucketReplicationOutput) GoString() string { - return s.String() +// SetBucket sets the Bucket field's value. +func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { + s.Bucket = &v + return s } -type GetBucketRequestPaymentInput struct { - _ struct{} `type:"structure"` +func (s *PutBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { + s.GrantFullControl = &v + return s } -// String returns the string representation -func (s GetBucketRequestPaymentInput) String() string { - return awsutil.Prettify(s) +// SetGrantRead sets the GrantRead field's value. +func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { + s.GrantRead = &v + return s } -// GoString returns the string representation -func (s GetBucketRequestPaymentInput) GoString() string { - return s.String() +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { + s.GrantReadACP = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketRequestPaymentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { + s.GrantWrite = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { + s.GrantWriteACP = &v + return s } -type GetBucketRequestPaymentOutput struct { +type PutBucketAclOutput struct { _ struct{} `type:"structure"` - - // Specifies who pays for the download and request fees. - Payer *string `type:"string" enum:"Payer"` } // String returns the string representation -func (s GetBucketRequestPaymentOutput) String() string { +func (s PutBucketAclOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketRequestPaymentOutput) GoString() string { +func (s PutBucketAclOutput) GoString() string { return s.String() } -type GetBucketTaggingInput struct { - _ struct{} `type:"structure"` +type PutBucketAnalyticsConfigurationInput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + // The configuration and any analyses for the analytics filter. + // + // AnalyticsConfiguration is a required field + AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The name of the bucket to which an analytics configuration is stored. + // + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` } // String returns the string representation -func (s GetBucketTaggingInput) String() string { +func (s PutBucketAnalyticsConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketTaggingInput) GoString() string { +func (s PutBucketAnalyticsConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} +func (s *PutBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"} + if s.AnalyticsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration")) + } if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.AnalyticsConfiguration != nil { + if err := s.AnalyticsConfiguration.Validate(); err != nil { + invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5277,44 +16973,79 @@ func (s *GetBucketTaggingInput) Validate() error { return nil } -type GetBucketTaggingOutput struct { - _ struct{} `type:"structure"` +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput { + s.AnalyticsConfiguration = v + return s +} - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +type PutBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s GetBucketTaggingOutput) String() string { +func (s PutBucketAnalyticsConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketTaggingOutput) GoString() string { +func (s PutBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } -type GetBucketVersioningInput struct { - _ struct{} `type:"structure"` +type PutBucketCorsInput struct { + _ struct{} `type:"structure" payload:"CORSConfiguration"` + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // CORSConfiguration is a required field + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s GetBucketVersioningInput) String() string { +func (s PutBucketCorsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketVersioningInput) GoString() string { +func (s PutBucketCorsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketVersioningInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5322,50 +17053,79 @@ func (s *GetBucketVersioningInput) Validate() error { return nil } -type GetBucketVersioningOutput struct { - _ struct{} `type:"structure"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { + s.Bucket = &v + return s +} - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA - // delete. If the bucket has never been so configured, this element is not returned. - MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` +func (s *PutBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // The versioning state of the bucket. - Status *string `type:"string" enum:"BucketVersioningStatus"` +// SetCORSConfiguration sets the CORSConfiguration field's value. +func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { + s.CORSConfiguration = v + return s +} + +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s GetBucketVersioningOutput) String() string { +func (s PutBucketCorsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketVersioningOutput) GoString() string { +func (s PutBucketCorsOutput) GoString() string { return s.String() } -type GetBucketWebsiteInput struct { - _ struct{} `type:"structure"` +type PutBucketEncryptionInput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + // The name of the bucket for which the server-side encryption configuration + // is set. + // + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for server-side encryption configuration rules. Currently S3 supports + // one rule only. + // + // ServerSideEncryptionConfiguration is a required field + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s GetBucketWebsiteInput) String() string { +func (s PutBucketEncryptionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketWebsiteInput) GoString() string { +func (s PutBucketEncryptionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} +func (s *PutBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.ServerSideEncryptionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) + } + if s.ServerSideEncryptionConfiguration != nil { + if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5373,66 +17133,84 @@ func (s *GetBucketWebsiteInput) Validate() error { return nil } -type GetBucketWebsiteOutput struct { - _ struct{} `type:"structure"` - - ErrorDocument *ErrorDocument `type:"structure"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput { + s.Bucket = &v + return s +} - IndexDocument *IndexDocument `type:"structure"` +func (s *PutBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput { + s.ServerSideEncryptionConfiguration = v + return s +} - RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +type PutBucketEncryptionOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s GetBucketWebsiteOutput) String() string { +func (s PutBucketEncryptionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketWebsiteOutput) GoString() string { +func (s PutBucketEncryptionOutput) GoString() string { return s.String() } -type GetObjectAclInput struct { - _ struct{} `type:"structure"` +type PutBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + // The name of the bucket where the inventory configuration will be stored. + // + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` + // Specifies the inventory configuration. + // + // InventoryConfiguration is a required field + InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s GetObjectAclInput) String() string { +func (s PutBucketInventoryConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetObjectAclInput) GoString() string { +func (s PutBucketInventoryConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} +func (s *PutBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + if s.InventoryConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) + } + if s.InventoryConfiguration != nil { + if err := s.InventoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -5441,120 +17219,74 @@ func (s *GetObjectAclInput) Validate() error { return nil } -type GetObjectAclOutput struct { - _ struct{} `type:"structure"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` +func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - Owner *Owner `type:"structure"` +// SetId sets the Id field's value. +func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { + s.Id = &v + return s +} - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { + s.InventoryConfiguration = v + return s +} + +type PutBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s GetObjectAclOutput) String() string { +func (s PutBucketInventoryConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetObjectAclOutput) GoString() string { +func (s PutBucketInventoryConfigurationOutput) GoString() string { return s.String() } -type GetObjectInput struct { - _ struct{} `type:"structure"` +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Return the object only if its entity tag (ETag) is the same as the one specified, - // otherwise return a 412 (precondition failed). - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` - - // Return the object only if it has been modified since the specified time, - // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` - - // Return the object only if its entity tag (ETag) is different from the one - // specified, otherwise return a 304 (not modified). - IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` - - // Return the object only if it has not been modified since the specified time, - // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. - Range *string `location:"header" locationName:"Range" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Sets the Cache-Control header of the response. - ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` - - // Sets the Content-Disposition header of the response - ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` - - // Sets the Content-Encoding header of the response. - ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` - - // Sets the Content-Language header of the response. - ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` - - // Sets the Content-Type header of the response. - ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` - - // Sets the Expires header of the response. - ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s GetObjectInput) String() string { +func (s PutBucketLifecycleConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetObjectInput) GoString() string { +func (s PutBucketLifecycleConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -5563,150 +17295,68 @@ func (s *GetObjectInput) Validate() error { return nil } -type GetObjectOutput struct { - _ struct{} `type:"structure" payload:"Body"` - - AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` - - // Object data. - Body io.ReadCloser `type:"blob"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // The portion of the object returned in the response. - ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs - // providing object expiration information. The value of the rule-id is URL - // encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *string `location:"header" locationName:"Expires" type:"string"` - - // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. - MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - - ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Provides information about object restoration operation and expiration time - // of the restored object copy. - Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` +func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { + s.LifecycleConfiguration = v + return s +} - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s GetObjectOutput) String() string { +func (s PutBucketLifecycleConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetObjectOutput) GoString() string { +func (s PutBucketLifecycleConfigurationOutput) GoString() string { return s.String() } -type GetObjectTorrentInput struct { - _ struct{} `type:"structure"` +type PutBucketLifecycleInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s GetObjectTorrentInput) String() string { +func (s PutBucketLifecycleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetObjectTorrentInput) GoString() string { +func (s PutBucketLifecycleInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectTorrentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} +func (s *PutBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -5715,51 +17365,71 @@ func (s *GetObjectTorrentInput) Validate() error { return nil } -type GetObjectTorrentOutput struct { - _ struct{} `type:"structure" payload:"Body"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - Body io.ReadCloser `type:"blob"` +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { + s.LifecycleConfiguration = v + return s +} - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s GetObjectTorrentOutput) String() string { +func (s PutBucketLifecycleOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetObjectTorrentOutput) GoString() string { +func (s PutBucketLifecycleOutput) GoString() string { return s.String() } -type Grant struct { - _ struct{} `type:"structure"` +type PutBucketLoggingInput struct { + _ struct{} `type:"structure" payload:"BucketLoggingStatus"` - Grantee *Grantee `type:"structure"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies the permission given to the grantee. - Permission *string `type:"string" enum:"Permission"` + // BucketLoggingStatus is a required field + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s Grant) String() string { +func (s PutBucketLoggingInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Grant) GoString() string { +func (s PutBucketLoggingInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Grant) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Grant"} - if s.Grantee != nil { - if err := s.Grantee.Validate(); err != nil { - invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) +func (s *PutBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.BucketLoggingStatus == nil { + invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + } + if s.BucketLoggingStatus != nil { + if err := s.BucketLoggingStatus.Validate(); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) } } @@ -5769,70 +17439,85 @@ func (s *Grant) Validate() error { return nil } -type Grantee struct { - _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` - - // Screen name of the grantee. - DisplayName *string `type:"string"` - - // Email address of the grantee. - EmailAddress *string `type:"string"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { + s.Bucket = &v + return s +} - // The canonical user ID of the grantee. - ID *string `type:"string"` +func (s *PutBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // Type of grantee - Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` +// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. +func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { + s.BucketLoggingStatus = v + return s +} - // URI of the grantee group. - URI *string `type:"string"` +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s Grantee) String() string { +func (s PutBucketLoggingOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Grantee) GoString() string { +func (s PutBucketLoggingOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *Grantee) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Grantee"} - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } +type PutBucketMetricsConfigurationInput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // The name of the bucket for which the metrics configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` -type HeadBucketInput struct { - _ struct{} `type:"structure"` + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Specifies the metrics configuration. + // + // MetricsConfiguration is a required field + MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s HeadBucketInput) String() string { +func (s PutBucketMetricsConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HeadBucketInput) GoString() string { +func (s PutBucketMetricsConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *HeadBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} +func (s *PutBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.MetricsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration")) + } + if s.MetricsConfiguration != nil { + if err := s.MetricsConfiguration.Validate(); err != nil { + invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5840,93 +17525,81 @@ func (s *HeadBucketInput) Validate() error { return nil } -type HeadBucketOutput struct { +// SetBucket sets the Bucket field's value. +func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput { + s.MetricsConfiguration = v + return s +} + +type PutBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s HeadBucketOutput) String() string { +func (s PutBucketMetricsConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HeadBucketOutput) GoString() string { +func (s PutBucketMetricsConfigurationOutput) GoString() string { return s.String() } -type HeadObjectInput struct { - _ struct{} `type:"structure"` +type PutBucketNotificationConfigurationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Return the object only if its entity tag (ETag) is the same as the one specified, - // otherwise return a 412 (precondition failed). - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` - - // Return the object only if it has been modified since the specified time, - // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` - - // Return the object only if its entity tag (ETag) is different from the one - // specified, otherwise return a 304 (not modified). - IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` - - // Return the object only if it has not been modified since the specified time, - // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. - Range *string `location:"header" locationName:"Range" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` + // Container for specifying the notification configuration of the bucket. If + // this element is empty, notifications are turned off on the bucket. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s HeadObjectInput) String() string { +func (s PutBucketNotificationConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HeadObjectInput) GoString() string { +func (s PutBucketNotificationConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *HeadObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} +func (s *PutBucketNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + if s.NotificationConfiguration != nil { + if err := s.NotificationConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -5935,134 +17608,67 @@ func (s *HeadObjectInput) Validate() error { return nil } -type HeadObjectOutput struct { - _ struct{} `type:"structure"` - - AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs - // providing object expiration information. The value of the rule-id is URL - // encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *string `location:"header" locationName:"Expires" type:"string"` - - // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. - MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - - ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Provides information about object restoration operation and expiration time - // of the restored object copy. - Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { + s.Bucket = &v + return s +} - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` +func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { + s.NotificationConfiguration = v + return s +} - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s HeadObjectOutput) String() string { +func (s PutBucketNotificationConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HeadObjectOutput) GoString() string { +func (s PutBucketNotificationConfigurationOutput) GoString() string { return s.String() } -type IndexDocument struct { - _ struct{} `type:"structure"` +type PutBucketNotificationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` - // A suffix that is appended to a request that is for a directory on the website - // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ - // the data that is returned will be for the object with the key name images/index.html) - // The suffix must not be empty and must not include a slash character. - Suffix *string `type:"string" required:"true"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s IndexDocument) String() string { +func (s PutBucketNotificationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s IndexDocument) GoString() string { +func (s PutBucketNotificationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *IndexDocument) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} - if s.Suffix == nil { - invalidParams.Add(request.NewErrParamRequired("Suffix")) +func (s *PutBucketNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) } if invalidParams.Len() > 0 { @@ -6071,84 +17677,73 @@ func (s *IndexDocument) Validate() error { return nil } -type Initiator struct { - _ struct{} `type:"structure"` - - // Name of the Principal. - DisplayName *string `type:"string"` - - // If the principal is an AWS account, it provides the Canonical User ID. If - // the principal is an IAM User, it provides a user ARN value. - ID *string `type:"string"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { + s.Bucket = &v + return s } -// String returns the string representation -func (s Initiator) String() string { - return awsutil.Prettify(s) +func (s *PutBucketNotificationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// GoString returns the string representation -func (s Initiator) GoString() string { - return s.String() +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { + s.NotificationConfiguration = v + return s } -// Container for object key name prefix and suffix filtering rules. -type KeyFilter struct { +type PutBucketNotificationOutput struct { _ struct{} `type:"structure"` - - // A list of containers for key value pair that defines the criteria for the - // filter rule. - FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` } // String returns the string representation -func (s KeyFilter) String() string { +func (s PutBucketNotificationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s KeyFilter) GoString() string { +func (s PutBucketNotificationOutput) GoString() string { return s.String() } -// Container for specifying the AWS Lambda notification configuration. -type LambdaFunctionConfiguration struct { - _ struct{} `type:"structure"` - - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` +type PutBucketPolicyInput struct { + _ struct{} `type:"structure" payload:"Policy"` - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. - Filter *NotificationConfigurationFilter `type:"structure"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. + ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` - // Lambda cloud function ARN that Amazon S3 can invoke when it detects events - // of the specified type. - LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` + // The bucket policy as a JSON document. + // + // Policy is a required field + Policy *string `type:"string" required:"true"` } // String returns the string representation -func (s LambdaFunctionConfiguration) String() string { +func (s PutBucketPolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LambdaFunctionConfiguration) GoString() string { +func (s PutBucketPolicyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *LambdaFunctionConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) +func (s *PutBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.LambdaFunctionArn == nil { - invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) } if invalidParams.Len() > 0 { @@ -6157,122 +17752,81 @@ func (s *LambdaFunctionConfiguration) Validate() error { return nil } -type LifecycleConfiguration struct { - _ struct{} `type:"structure"` - - Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { + s.Bucket = &v + return s } -// String returns the string representation -func (s LifecycleConfiguration) String() string { - return awsutil.Prettify(s) +func (s *PutBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// GoString returns the string representation -func (s LifecycleConfiguration) GoString() string { - return s.String() +// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. +func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { + s.ConfirmRemoveSelfBucketAccess = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetPolicy sets the Policy field's value. +func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { + s.Policy = &v + return s } -type LifecycleExpiration struct { +type PutBucketPolicyOutput struct { _ struct{} `type:"structure"` - - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. - Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. - Days *int64 `type:"integer"` - - // Indicates whether Amazon S3 will remove a delete marker with no noncurrent - // versions. If set to true, the delete marker will be expired; if set to false - // the policy takes no action. This cannot be specified with Days or Date in - // a Lifecycle Expiration Policy. - ExpiredObjectDeleteMarker *bool `type:"boolean"` } // String returns the string representation -func (s LifecycleExpiration) String() string { +func (s PutBucketPolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LifecycleExpiration) GoString() string { +func (s PutBucketPolicyOutput) GoString() string { return s.String() } -type LifecycleRule struct { - _ struct{} `type:"structure"` - - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` - - Expiration *LifecycleExpiration `type:"structure"` - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` - - // Specifies when noncurrent object versions expire. Upon expiration, Amazon - // S3 permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) - // to request that Amazon S3 delete noncurrent object versions at a specific - // period in the object's lifetime. - NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` - - NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` - - // Prefix identifying one or more objects to which the rule applies. - Prefix *string `type:"string" required:"true"` +type PutBucketReplicationInput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. - Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + // + // ReplicationConfiguration is a required field + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s LifecycleRule) String() string { +func (s PutBucketReplicationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LifecycleRule) GoString() string { +func (s PutBucketReplicationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *LifecycleRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) +func (s *PutBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) + if s.ReplicationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) + } + if s.ReplicationConfiguration != nil { + if err := s.ReplicationConfiguration.Validate(); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -6281,89 +17835,73 @@ func (s *LifecycleRule) Validate() error { return nil } -type ListBucketsInput struct { - _ struct{} `type:"structure"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput { + s.Bucket = &v + return s } -// String returns the string representation -func (s ListBucketsInput) String() string { - return awsutil.Prettify(s) +func (s *PutBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// GoString returns the string representation -func (s ListBucketsInput) GoString() string { - return s.String() +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { + s.ReplicationConfiguration = v + return s } -type ListBucketsOutput struct { +type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` - - Buckets []*Bucket `locationNameList:"Bucket" type:"list"` - - Owner *Owner `type:"structure"` } // String returns the string representation -func (s ListBucketsOutput) String() string { +func (s PutBucketReplicationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBucketsOutput) GoString() string { +func (s PutBucketReplicationOutput) GoString() string { return s.String() } -type ListMultipartUploadsInput struct { - _ struct{} `type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Together with upload-id-marker, this parameter specifies the multipart upload - // after which listing should begin. - KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - - // Sets the maximum number of multipart uploads, from 1 to 1,000, to return - // in the response body. 1,000 is the maximum number of uploads that can be - // returned in a response. - MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` +type PutBucketRequestPaymentInput struct { + _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` - // Lists in-progress uploads only for those keys that begin with the specified - // prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Together with key-marker, specifies the multipart upload after which listing - // should begin. If key-marker is not specified, the upload-id-marker parameter - // is ignored. - UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` + // RequestPaymentConfiguration is a required field + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s ListMultipartUploadsInput) String() string { +func (s PutBucketRequestPaymentInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListMultipartUploadsInput) GoString() string { +func (s PutBucketRequestPaymentInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListMultipartUploadsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} +func (s *PutBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.RequestPaymentConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + } + if s.RequestPaymentConfiguration != nil { + if err := s.RequestPaymentConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -6371,106 +17909,73 @@ func (s *ListMultipartUploadsInput) Validate() error { return nil } -type ListMultipartUploadsOutput struct { - _ struct{} `type:"structure"` - - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `type:"string"` - - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // Indicates whether the returned list of multipart uploads is truncated. A - // value of true indicates that the list was truncated. The list can be truncated - // if the number of multipart uploads exceeds the limit allowed or specified - // by max uploads. - IsTruncated *bool `type:"boolean"` - - // The key at or after which the listing began. - KeyMarker *string `type:"string"` - - // Maximum number of multipart uploads that could have been included in the - // response. - MaxUploads *int64 `type:"integer"` - - // When a list is truncated, this element specifies the value that should be - // used for the key-marker request parameter in a subsequent request. - NextKeyMarker *string `type:"string"` - - // When a list is truncated, this element specifies the value that should be - // used for the upload-id-marker request parameter in a subsequent request. - NextUploadIdMarker *string `type:"string"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput { + s.Bucket = &v + return s +} - // When a prefix is provided in the request, this field contains the specified - // prefix. The result contains only keys starting with the specified prefix. - Prefix *string `type:"string"` +func (s *PutBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // Upload ID after which listing began. - UploadIdMarker *string `type:"string"` +// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. +func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { + s.RequestPaymentConfiguration = v + return s +} - Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s ListMultipartUploadsOutput) String() string { +func (s PutBucketRequestPaymentOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListMultipartUploadsOutput) GoString() string { +func (s PutBucketRequestPaymentOutput) GoString() string { return s.String() } -type ListObjectVersionsInput struct { - _ struct{} `type:"structure"` +type PutBucketTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Specifies the key to start with when listing objects in a bucket. - KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Specifies the object version you want to start listing from. - VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s ListObjectVersionsInput) String() string { +func (s PutBucketTaggingInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListObjectVersionsInput) GoString() string { +func (s PutBucketTaggingInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} +func (s *PutBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -6478,98 +17983,72 @@ func (s *ListObjectVersionsInput) Validate() error { return nil } -type ListObjectVersionsOutput struct { - _ struct{} `type:"structure"` - - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` - - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. If your results were truncated, you can - // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker - // response parameters as a starting place in another request to return the - // rest of the results. - IsTruncated *bool `type:"boolean"` - - // Marks the last Key returned in a truncated response. - KeyMarker *string `type:"string"` - - MaxKeys *int64 `type:"integer"` - - Name *string `type:"string"` - - // Use this value for the key marker request parameter in a subsequent request. - NextKeyMarker *string `type:"string"` - - // Use this value for the next version id marker parameter in a subsequent request. - NextVersionIdMarker *string `type:"string"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { + s.Bucket = &v + return s +} - Prefix *string `type:"string"` +func (s *PutBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - VersionIdMarker *string `type:"string"` +// SetTagging sets the Tagging field's value. +func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { + s.Tagging = v + return s +} - Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s ListObjectVersionsOutput) String() string { +func (s PutBucketTaggingOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListObjectVersionsOutput) GoString() string { +func (s PutBucketTaggingOutput) GoString() string { return s.String() } -type ListObjectsInput struct { - _ struct{} `type:"structure"` +type PutBucketVersioningInput struct { + _ struct{} `type:"structure" payload:"VersioningConfiguration"` + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Specifies the key to start with when listing objects in a bucket. - Marker *string `location:"querystring" locationName:"marker" type:"string"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + // VersioningConfiguration is a required field + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s ListObjectsInput) String() string { +func (s PutBucketVersioningInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListObjectsInput) GoString() string { +func (s PutBucketVersioningInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} +func (s *PutBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.VersioningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + } if invalidParams.Len() > 0 { return invalidParams @@ -6577,100 +18056,79 @@ func (s *ListObjectsInput) Validate() error { return nil } -type ListObjectsOutput struct { - _ struct{} `type:"structure"` - - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - Contents []*Object `type:"list" flattened:"true"` - - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. - IsTruncated *bool `type:"boolean"` - - Marker *string `type:"string"` - - MaxKeys *int64 `type:"integer"` - - Name *string `type:"string"` - - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. Amazon S3 lists objects in alphabetical - // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMaker and it is truncated, - // you can use the value of the last Key in the response as the marker in the - // subsequent request to get the next set of object keys. - NextMarker *string `type:"string"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { + s.Bucket = &v + return s +} - Prefix *string `type:"string"` +func (s *PutBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// String returns the string representation -func (s ListObjectsOutput) String() string { - return awsutil.Prettify(s) +// SetMFA sets the MFA field's value. +func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { + s.MFA = &v + return s } -// GoString returns the string representation -func (s ListObjectsOutput) GoString() string { - return s.String() +// SetVersioningConfiguration sets the VersioningConfiguration field's value. +func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { + s.VersioningConfiguration = v + return s } -type ListObjectsV2Input struct { +type PutBucketVersioningOutput struct { _ struct{} `type:"structure"` +} - // Name of the bucket to list. - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key - ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // The owner field is not present in listV2 by default, if you want to return - // owner field with each key in the result then set the fetch owner field to - // true - FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` +// String returns the string representation +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` +// GoString returns the string representation +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` +type PutBucketWebsiteInput struct { + _ struct{} `type:"structure" payload:"WebsiteConfiguration"` - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket - StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // WebsiteConfiguration is a required field + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s ListObjectsV2Input) String() string { +func (s PutBucketWebsiteInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListObjectsV2Input) GoString() string { +func (s PutBucketWebsiteInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListObjectsV2Input) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} +func (s *PutBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.WebsiteConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) + } + if s.WebsiteConfiguration != nil { + if err := s.WebsiteConfiguration.Validate(); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -6678,80 +18136,68 @@ func (s *ListObjectsV2Input) Validate() error { return nil } -type ListObjectsV2Output struct { - _ struct{} `type:"structure"` - - // CommonPrefixes contains all (if there are any) keys between Prefix and the - // next occurrence of the string specified by delimiter - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - // Metadata about each object returned. - Contents []*Object `type:"list" flattened:"true"` - - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key - ContinuationToken *string `type:"string"` - - // A delimiter is a character you use to group keys. - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. - IsTruncated *bool `type:"boolean"` - - // KeyCount is the number of keys returned with this request. KeyCount will - // always be less than equals to MaxKeys field. Say you ask for 50 keys, your - // result will include less than equals 50 keys - KeyCount *int64 `type:"integer"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `type:"integer"` - - // Name of the bucket to list. - Name *string `type:"string"` +// SetBucket sets the Bucket field's value. +func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { + s.Bucket = &v + return s +} - // NextContinuationToken is sent when isTruncated is true which means there - // are more keys in the bucket that can be listed. The next list requests to - // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken - // is obfuscated and is not a real key - NextContinuationToken *string `type:"string"` +func (s *PutBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // Limits the response to keys that begin with the specified prefix. - Prefix *string `type:"string"` +// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. +func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { + s.WebsiteConfiguration = v + return s +} - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket - StartAfter *string `type:"string"` +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s ListObjectsV2Output) String() string { +func (s PutBucketWebsiteOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListObjectsV2Output) GoString() string { +func (s PutBucketWebsiteOutput) GoString() string { return s.String() } -type ListPartsInput struct { - _ struct{} `type:"structure"` +type PutObjectAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - // Sets the maximum number of parts to return. - MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - // Specifies the part after which listing should begin. Only parts with higher - // part numbers will be listed. - PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. @@ -6759,23 +18205,23 @@ type ListPartsInput struct { // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Upload ID identifying the multipart upload whose parts are being listed. - UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s ListPartsInput) String() string { +func (s PutObjectAclInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListPartsInput) GoString() string { +func (s PutObjectAclInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListPartsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} +func (s *PutObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } @@ -6785,8 +18231,10 @@ func (s *ListPartsInput) Validate() error { if s.Key != nil && len(*s.Key) < 1 { invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } - if s.UploadId == nil { - invalidParams.Add(request.NewErrParamRequired("UploadId")) + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -6795,104 +18243,228 @@ func (s *ListPartsInput) Validate() error { return nil } -type ListPartsOutput struct { - _ struct{} `type:"structure"` +// SetACL sets the ACL field's value. +func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { + s.ACL = &v + return s +} - // Date when multipart upload will become eligible for abort operation by lifecycle. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { + s.AccessControlPolicy = v + return s +} - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. - AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` +// SetBucket sets the Bucket field's value. +func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { + s.Bucket = &v + return s +} - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `type:"string"` +func (s *PutObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // Identifies who initiated the multipart upload. - Initiator *Initiator `type:"structure"` +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { + s.GrantFullControl = &v + return s +} - // Indicates whether the returned list of parts is truncated. - IsTruncated *bool `type:"boolean"` +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { + s.GrantRead = &v + return s +} - // Object key for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { + s.GrantReadACP = &v + return s +} - // Maximum number of parts that were allowed in the response. - MaxParts *int64 `type:"integer"` +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { + s.GrantWrite = &v + return s +} - // When a list is truncated, this element specifies the last part in the list, - // as well as the value to use for the part-number-marker request parameter - // in a subsequent request. - NextPartNumberMarker *int64 `type:"integer"` +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { + s.GrantWriteACP = &v + return s +} - Owner *Owner `type:"structure"` +// SetKey sets the Key field's value. +func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { + s.Key = &v + return s +} - // Part number after which listing begins. - PartNumberMarker *int64 `type:"integer"` +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { + s.RequestPayer = &v + return s +} - Parts []*Part `locationName:"Part" type:"list" flattened:"true"` +// SetVersionId sets the VersionId field's value. +func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { + s.VersionId = &v + return s +} + +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { + s.RequestCharged = &v + return s +} + +type PutObjectInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // Name of the bucket to which the PUT operation was initiated. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the part data. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT operation was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - // Upload ID identifying the multipart upload whose parts are being listed. - UploadId *string `type:"string"` -} + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` -// String returns the string representation -func (s ListPartsOutput) String() string { - return awsutil.Prettify(s) -} + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` -// GoString returns the string representation -func (s ListPartsOutput) GoString() string { - return s.String() -} + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` -type LoggingEnabled struct { - _ struct{} `type:"structure"` + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // Specifies the bucket where you want Amazon S3 to store server access logs. - // You can have your logs delivered to any bucket that you own, including the - // same bucket that is being logged. You can also configure multiple buckets - // to deliver their logs to the same target bucket. In this case you should - // choose a different TargetPrefix for each source bucket so that the delivered - // log files can be distinguished by key. - TargetBucket *string `type:"string"` + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + // The tag-set for the object. The tag-set must be encoded as URL Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` - // This element lets you specify a prefix for the keys that the log files will - // be stored under. - TargetPrefix *string `type:"string"` + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } // String returns the string representation -func (s LoggingEnabled) String() string { +func (s PutObjectInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LoggingEnabled) GoString() string { +func (s PutObjectInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *LoggingEnabled) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} - if s.TargetGrants != nil { - for i, v := range s.TargetGrants { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) - } - } +func (s *PutObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } if invalidParams.Len() > 0 { @@ -6901,250 +18473,315 @@ func (s *LoggingEnabled) Validate() error { return nil } -type MultipartUpload struct { - _ struct{} `type:"structure"` - - // Date and time at which the multipart upload was initiated. - Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Identifies who initiated the multipart upload. - Initiator *Initiator `type:"structure"` - - // Key of the object for which the multipart upload was initiated. - Key *string `min:"1" type:"string"` - - Owner *Owner `type:"structure"` +// SetACL sets the ACL field's value. +func (s *PutObjectInput) SetACL(v string) *PutObjectInput { + s.ACL = &v + return s +} - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` +// SetBody sets the Body field's value. +func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { + s.Body = v + return s +} - // Upload ID that identifies the multipart upload. - UploadId *string `type:"string"` +// SetBucket sets the Bucket field's value. +func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { + s.Bucket = &v + return s } -// String returns the string representation -func (s MultipartUpload) String() string { - return awsutil.Prettify(s) +func (s *PutObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// GoString returns the string representation -func (s MultipartUpload) GoString() string { - return s.String() +// SetCacheControl sets the CacheControl field's value. +func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { + s.CacheControl = &v + return s } -// Specifies when noncurrent object versions expire. Upon expiration, Amazon -// S3 permanently deletes the noncurrent object versions. You set this lifecycle -// configuration action on a bucket that has versioning enabled (or suspended) -// to request that Amazon S3 delete noncurrent object versions at a specific -// period in the object's lifetime. -type NoncurrentVersionExpiration struct { - _ struct{} `type:"structure"` +// SetContentDisposition sets the ContentDisposition field's value. +func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { + s.ContentDisposition = &v + return s +} - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in - // the Amazon Simple Storage Service Developer Guide. - NoncurrentDays *int64 `type:"integer"` +// SetContentEncoding sets the ContentEncoding field's value. +func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { + s.ContentEncoding = &v + return s } -// String returns the string representation -func (s NoncurrentVersionExpiration) String() string { - return awsutil.Prettify(s) +// SetContentLanguage sets the ContentLanguage field's value. +func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { + s.ContentLanguage = &v + return s } -// GoString returns the string representation -func (s NoncurrentVersionExpiration) GoString() string { - return s.String() +// SetContentLength sets the ContentLength field's value. +func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { + s.ContentLength = &v + return s } -// Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA or GLACIER storage class. If your bucket is -// versioning-enabled (or versioning is suspended), you can set this action -// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA -// or GLACIER storage class at a specific period in the object's lifetime. -type NoncurrentVersionTransition struct { - _ struct{} `type:"structure"` +// SetContentMD5 sets the ContentMD5 field's value. +func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { + s.ContentMD5 = &v + return s +} - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in - // the Amazon Simple Storage Service Developer Guide. - NoncurrentDays *int64 `type:"integer"` +// SetContentType sets the ContentType field's value. +func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { + s.ContentType = &v + return s +} - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"TransitionStorageClass"` +// SetExpires sets the Expires field's value. +func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { + s.Expires = &v + return s } -// String returns the string representation -func (s NoncurrentVersionTransition) String() string { - return awsutil.Prettify(s) +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { + s.GrantFullControl = &v + return s } -// GoString returns the string representation -func (s NoncurrentVersionTransition) GoString() string { - return s.String() +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { + s.GrantRead = &v + return s } -// Container for specifying the notification configuration of the bucket. If -// this element is empty, notifications are turned off on the bucket. -type NotificationConfiguration struct { - _ struct{} `type:"structure"` +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { + s.GrantReadACP = &v + return s +} - LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { + s.GrantWriteACP = &v + return s +} - QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` +// SetKey sets the Key field's value. +func (s *PutObjectInput) SetKey(v string) *PutObjectInput { + s.Key = &v + return s +} - TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +// SetMetadata sets the Metadata field's value. +func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { + s.Metadata = v + return s } -// String returns the string representation -func (s NotificationConfiguration) String() string { - return awsutil.Prettify(s) +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { + s.RequestPayer = &v + return s } -// GoString returns the string representation -func (s NotificationConfiguration) GoString() string { - return s.String() +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { + s.SSECustomerAlgorithm = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *NotificationConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} - if s.LambdaFunctionConfigurations != nil { - for i, v := range s.LambdaFunctionConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) - } - } - } - if s.QueueConfigurations != nil { - for i, v := range s.QueueConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) - } - } - } - if s.TopicConfigurations != nil { - for i, v := range s.TopicConfigurations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) - } - } - } +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { + s.SSECustomerKey = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams +func (s *PutObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v } - return nil + return *s.SSECustomerKey } -type NotificationConfigurationDeprecated struct { - _ struct{} `type:"structure"` +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} - CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { + s.SSEKMSKeyId = &v + return s +} - QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { + s.ServerSideEncryption = &v + return s +} - TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +// SetStorageClass sets the StorageClass field's value. +func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { + s.StorageClass = &v + return s } -// String returns the string representation -func (s NotificationConfigurationDeprecated) String() string { - return awsutil.Prettify(s) +// SetTagging sets the Tagging field's value. +func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { + s.Tagging = &v + return s } -// GoString returns the string representation -func (s NotificationConfigurationDeprecated) GoString() string { - return s.String() +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { + s.WebsiteRedirectLocation = &v + return s } -// Container for object key name filtering rules. For information about key -// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon Simple Storage Service Developer Guide. -type NotificationConfigurationFilter struct { +type PutObjectOutput struct { _ struct{} `type:"structure"` - // Container for object key name prefix and suffix filtering rules. - Key *KeyFilter `locationName:"S3Key" type:"structure"` + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } // String returns the string representation -func (s NotificationConfigurationFilter) String() string { +func (s PutObjectOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NotificationConfigurationFilter) GoString() string { +func (s PutObjectOutput) GoString() string { return s.String() } -type Object struct { - _ struct{} `type:"structure"` - - ETag *string `type:"string"` +// SetETag sets the ETag field's value. +func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { + s.ETag = &v + return s +} - Key *string `min:"1" type:"string"` +// SetExpiration sets the Expiration field's value. +func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { + s.Expiration = &v + return s +} - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { + s.RequestCharged = &v + return s +} - Owner *Owner `type:"structure"` +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} - Size *int64 `type:"integer"` +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"ObjectStorageClass"` +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { + s.SSEKMSKeyId = &v + return s } -// String returns the string representation -func (s Object) String() string { - return awsutil.Prettify(s) +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { + s.ServerSideEncryption = &v + return s } -// GoString returns the string representation -func (s Object) GoString() string { - return s.String() +// SetVersionId sets the VersionId field's value. +func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { + s.VersionId = &v + return s } -type ObjectIdentifier struct { - _ struct{} `type:"structure"` +type PutObjectTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` - // Key name of the object to delete. - Key *string `min:"1" type:"string" required:"true"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // VersionId for the specific version of the object to delete. - VersionId *string `type:"string"` + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s ObjectIdentifier) String() string { +func (s PutObjectTaggingInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ObjectIdentifier) GoString() string { +func (s PutObjectTaggingInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ObjectIdentifier) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} +func (s *PutObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } if s.Key != nil && len(*s.Key) < 1 { invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -7152,116 +18789,101 @@ func (s *ObjectIdentifier) Validate() error { return nil } -type ObjectVersion struct { - _ struct{} `type:"structure"` - - ETag *string `type:"string"` - - // Specifies whether the object is (true) or is not (false) the latest version - // of an object. - IsLatest *bool `type:"boolean"` - - // The object key. - Key *string `min:"1" type:"string"` - - // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Owner *Owner `type:"structure"` - - // Size in bytes of the object. - Size *int64 `type:"integer"` +// SetBucket sets the Bucket field's value. +func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { + s.Bucket = &v + return s +} - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` +func (s *PutObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // Version ID of an object. - VersionId *string `type:"string"` +// SetKey sets the Key field's value. +func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { + s.Key = &v + return s } -// String returns the string representation -func (s ObjectVersion) String() string { - return awsutil.Prettify(s) +// SetTagging sets the Tagging field's value. +func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { + s.Tagging = v + return s } -// GoString returns the string representation -func (s ObjectVersion) GoString() string { - return s.String() +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { + s.VersionId = &v + return s } -type Owner struct { +type PutObjectTaggingOutput struct { _ struct{} `type:"structure"` - DisplayName *string `type:"string"` - - ID *string `type:"string"` + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } // String returns the string representation -func (s Owner) String() string { +func (s PutObjectTaggingOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Owner) GoString() string { +func (s PutObjectTaggingOutput) GoString() string { return s.String() } -type Part struct { - _ struct{} `type:"structure"` - - // Entity tag returned when the part was uploaded. - ETag *string `type:"string"` - - // Date and time at which the part was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Part number identifying the part. This is a positive integer between 1 and - // 10,000. - PartNumber *int64 `type:"integer"` - - // Size of the uploaded part data. - Size *int64 `type:"integer"` +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { + s.VersionId = &v + return s } -// String returns the string representation -func (s Part) String() string { - return awsutil.Prettify(s) -} +// Container for specifying an configuration when you want Amazon S3 to publish +// events to an Amazon Simple Queue Service (Amazon SQS) queue. +type QueueConfiguration struct { + _ struct{} `type:"structure"` -// GoString returns the string representation -func (s Part) GoString() string { - return s.String() -} + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` -type PutBucketAccelerateConfigurationInput struct { - _ struct{} `type:"structure" payload:"AccelerateConfiguration"` + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` - // Specifies the Accelerate Configuration you want to set for the bucket. - AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"` + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` - // Name of the bucket for which the accelerate configuration is set. - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + // + // QueueArn is a required field + QueueArn *string `locationName:"Queue" type:"string" required:"true"` } // String returns the string representation -func (s PutBucketAccelerateConfigurationInput) String() string { +func (s QueueConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketAccelerateConfigurationInput) GoString() string { +func (s QueueConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAccelerateConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} - if s.AccelerateConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) +func (s *QueueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) } - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) + if s.QueueArn == nil { + invalidParams.Add(request.NewErrParamRequired("QueueArn")) } if invalidParams.Len() > 0 { @@ -7270,170 +18892,216 @@ func (s *PutBucketAccelerateConfigurationInput) Validate() error { return nil } -type PutBucketAccelerateConfigurationOutput struct { - _ struct{} `type:"structure"` +// SetEvents sets the Events field's value. +func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { + s.Events = v + return s } -// String returns the string representation -func (s PutBucketAccelerateConfigurationOutput) String() string { - return awsutil.Prettify(s) +// SetFilter sets the Filter field's value. +func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { + s.Filter = v + return s } -// GoString returns the string representation -func (s PutBucketAccelerateConfigurationOutput) GoString() string { - return s.String() +// SetId sets the Id field's value. +func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { + s.Id = &v + return s } -type PutBucketAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` - - // The canned ACL to apply to the bucket. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetQueueArn sets the QueueArn field's value. +func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { + s.QueueArn = &v + return s +} - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + // Bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + Events []*string `locationName:"Event" type:"list" flattened:"true"` - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + Queue *string `type:"string"` } // String returns the string representation -func (s PutBucketAclInput) String() string { +func (s QueueConfigurationDeprecated) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketAclInput) GoString() string { +func (s QueueConfigurationDeprecated) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.AccessControlPolicy != nil { - if err := s.AccessControlPolicy.Validate(); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) - } - } +// SetEvent sets the Event field's value. +func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { + s.Event = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetEvents sets the Events field's value. +func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { + s.Events = v + return s } -type PutBucketAclOutput struct { - _ struct{} `type:"structure"` +// SetId sets the Id field's value. +func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { + s.Id = &v + return s +} + +// SetQueue sets the Queue field's value. +func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { + s.Queue = &v + return s +} + +type RecordsEvent struct { + _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` + + // The byte array of partial, one or more result records. + // + // Payload is automatically base64 encoded/decoded by the SDK. + Payload []byte `type:"blob"` } // String returns the string representation -func (s PutBucketAclOutput) String() string { +func (s RecordsEvent) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketAclOutput) GoString() string { +func (s RecordsEvent) GoString() string { return s.String() } -type PutBucketCorsInput struct { - _ struct{} `type:"structure" payload:"CORSConfiguration"` +// SetPayload sets the Payload field's value. +func (s *RecordsEvent) SetPayload(v []byte) *RecordsEvent { + s.Payload = v + return s +} - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// The RecordsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *RecordsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the RecordsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + s.Payload = make([]byte, len(msg.Payload)) + copy(s.Payload, msg.Payload) + return nil +} + +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + ReplaceKeyPrefixWith *string `type:"string"` - CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"` + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the sibling is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + ReplaceKeyWith *string `type:"string"` } // String returns the string representation -func (s PutBucketCorsInput) String() string { +func (s Redirect) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketCorsInput) GoString() string { +func (s Redirect) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketCorsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.CORSConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) - } - if s.CORSConfiguration != nil { - if err := s.CORSConfiguration.Validate(); err != nil { - invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) - } - } +// SetHostName sets the HostName field's value. +func (s *Redirect) SetHostName(v string) *Redirect { + s.HostName = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetHttpRedirectCode sets the HttpRedirectCode field's value. +func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { + s.HttpRedirectCode = &v + return s } -type PutBucketCorsOutput struct { - _ struct{} `type:"structure"` +// SetProtocol sets the Protocol field's value. +func (s *Redirect) SetProtocol(v string) *Redirect { + s.Protocol = &v + return s } -// String returns the string representation -func (s PutBucketCorsOutput) String() string { - return awsutil.Prettify(s) +// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. +func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { + s.ReplaceKeyPrefixWith = &v + return s } -// GoString returns the string representation -func (s PutBucketCorsOutput) GoString() string { - return s.String() +// SetReplaceKeyWith sets the ReplaceKeyWith field's value. +func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { + s.ReplaceKeyWith = &v + return s } -type PutBucketLifecycleConfigurationInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Name of the host where requests will be redirected. + // + // HostName is a required field + HostName *string `type:"string" required:"true"` - LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` } // String returns the string representation -func (s PutBucketLifecycleConfigurationInput) String() string { +func (s RedirectAllRequestsTo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketLifecycleConfigurationInput) GoString() string { +func (s RedirectAllRequestsTo) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLifecycleConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.LifecycleConfiguration != nil { - if err := s.LifecycleConfiguration.Validate(); err != nil { - invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) - } +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) } if invalidParams.Len() > 0 { @@ -7442,47 +19110,63 @@ func (s *PutBucketLifecycleConfigurationInput) Validate() error { return nil } -type PutBucketLifecycleConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketLifecycleConfigurationOutput) String() string { - return awsutil.Prettify(s) +// SetHostName sets the HostName field's value. +func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { + s.HostName = &v + return s } -// GoString returns the string representation -func (s PutBucketLifecycleConfigurationOutput) GoString() string { - return s.String() +// SetProtocol sets the Protocol field's value. +func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { + s.Protocol = &v + return s } -type PutBucketLifecycleInput struct { - _ struct{} `type:"structure" payload:"LifecycleConfiguration"` +// Container for replication rules. You can add as many as 1,000 rules. Total +// replication configuration size can be up to 2 MB. +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating + // the objects. + // + // Role is a required field + Role *string `type:"string" required:"true"` - LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` + // Container for one or more replication rules. Replication configuration must + // have at least one rule and can contain up to 1,000 rules. + // + // Rules is a required field + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } // String returns the string representation -func (s PutBucketLifecycleInput) String() string { +func (s ReplicationConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketLifecycleInput) GoString() string { +func (s ReplicationConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLifecycleInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *ReplicationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) } - if s.LifecycleConfiguration != nil { - if err := s.LifecycleConfiguration.Validate(); err != nil { - invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } } } @@ -7492,50 +19176,106 @@ func (s *PutBucketLifecycleInput) Validate() error { return nil } -type PutBucketLifecycleOutput struct { - _ struct{} `type:"structure"` +// SetRole sets the Role field's value. +func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { + s.Role = &v + return s } -// String returns the string representation -func (s PutBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) +// SetRules sets the Rules field's value. +func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { + s.Rules = v + return s } -// GoString returns the string representation -func (s PutBucketLifecycleOutput) GoString() string { - return s.String() -} +// Container for information about a particular replication rule. +type ReplicationRule struct { + _ struct{} `type:"structure"` -type PutBucketLoggingInput struct { - _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + // Specifies whether Amazon S3 should replicate delete makers. + DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for replication destination information. + // + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // Filter that identifies subset of objects to which the replication rule applies. + // A Filter must specify exactly one Prefix, Tag, or an And child element. + Filter *ReplicationRuleFilter `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Object keyname prefix identifying one or more objects to which the rule applies. + // Maximum prefix length can be up to 1,024 characters. + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // The priority associated with the rule. If you specify multiple rules in a + // replication configuration, then Amazon S3 applies rule priority in the event + // there are conflicts (two or more rules identify the same object based on + // filter specified). The rule with higher priority takes precedence. For example, + // + // * Same object quality prefix based filter criteria If prefixes you specified + // in multiple rules overlap. + // + // * Same object qualify tag based filter criteria specified in multiple + // rules + // + // For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) + // in the Amazon S3 Developer Guide. + Priority *int64 `type:"integer"` + + // Container that describes additional filters in identifying source objects + // that you want to replicate. Currently, Amazon S3 supports only the filter + // that you can specify for objects created with server-side encryption using + // an AWS KMS-managed key. You can choose to enable or disable replication of + // these objects. + // + // if you want Amazon S3 to replicate objects created with server-side encryption + // using AWS KMS-managed keys. + SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` - BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` + // The rule is ignored if status is not Enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` } // String returns the string representation -func (s PutBucketLoggingInput) String() string { +func (s ReplicationRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketLoggingInput) GoString() string { +func (s ReplicationRule) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketLoggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *ReplicationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) } - if s.BucketLoggingStatus == nil { - invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) } - if s.BucketLoggingStatus != nil { - if err := s.BucketLoggingStatus.Validate(); err != nil { - invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.SourceSelectionCriteria != nil { + if err := s.SourceSelectionCriteria.Validate(); err != nil { + invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams)) } } @@ -7545,52 +19285,83 @@ func (s *PutBucketLoggingInput) Validate() error { return nil } -type PutBucketLoggingOutput struct { - _ struct{} `type:"structure"` +// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value. +func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule { + s.DeleteMarkerReplication = v + return s } -// String returns the string representation -func (s PutBucketLoggingOutput) String() string { - return awsutil.Prettify(s) +// SetDestination sets the Destination field's value. +func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { + s.Destination = v + return s } -// GoString returns the string representation -func (s PutBucketLoggingOutput) GoString() string { - return s.String() +// SetFilter sets the Filter field's value. +func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule { + s.Filter = v + return s } -type PutBucketNotificationConfigurationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` +// SetID sets the ID field's value. +func (s *ReplicationRule) SetID(v string) *ReplicationRule { + s.ID = &v + return s +} - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { + s.Prefix = &v + return s +} - // Container for specifying the notification configuration of the bucket. If - // this element is empty, notifications are turned off on the bucket. - NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` +// SetPriority sets the Priority field's value. +func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule { + s.Priority = &v + return s +} + +// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. +func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { + s.SourceSelectionCriteria = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { + s.Status = &v + return s +} + +type ReplicationRuleAndOperator struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` + + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } // String returns the string representation -func (s PutBucketNotificationConfigurationInput) String() string { +func (s ReplicationRuleAndOperator) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketNotificationConfigurationInput) GoString() string { +func (s ReplicationRuleAndOperator) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketNotificationConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.NotificationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) - } - if s.NotificationConfiguration != nil { - if err := s.NotificationConfiguration.Validate(); err != nil { - invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) +func (s *ReplicationRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } } } @@ -7600,46 +19371,66 @@ func (s *PutBucketNotificationConfigurationInput) Validate() error { return nil } -type PutBucketNotificationConfigurationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutBucketNotificationConfigurationOutput) String() string { - return awsutil.Prettify(s) +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator { + s.Prefix = &v + return s } -// GoString returns the string representation -func (s PutBucketNotificationConfigurationOutput) GoString() string { - return s.String() +// SetTags sets the Tags field's value. +func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator { + s.Tags = v + return s } -type PutBucketNotificationInput struct { - _ struct{} `type:"structure" payload:"NotificationConfiguration"` +// Filter that identifies subset of objects to which the replication rule applies. +// A Filter must specify exactly one Prefix, Tag, or an And child element. +type ReplicationRuleFilter struct { + _ struct{} `type:"structure"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for specifying rule filters. These filters determine the subset + // of objects to which the rule applies. The element is required only if you + // specify more than one filter. For example: + // + // * You specify both a Prefix and a Tag filters. Then you wrap these in + // an And tag. + // + // * You specify filter based on multiple tags. Then you wrap the Tag elements + // in an And tag. + And *ReplicationRuleAndOperator `type:"structure"` + + // Object keyname prefix that identifies subset of objects to which the rule + // applies. + Prefix *string `type:"string"` - NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` + // Container for specifying a tag key and value. + // + // The rule applies only to objects having the tag in its tagset. + Tag *Tag `type:"structure"` } // String returns the string representation -func (s PutBucketNotificationInput) String() string { +func (s ReplicationRuleFilter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketNotificationInput) GoString() string { +func (s ReplicationRuleFilter) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketNotificationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *ReplicationRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } } - if s.NotificationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -7648,47 +19439,48 @@ func (s *PutBucketNotificationInput) Validate() error { return nil } -type PutBucketNotificationOutput struct { - _ struct{} `type:"structure"` +// SetAnd sets the And field's value. +func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter { + s.And = v + return s } -// String returns the string representation -func (s PutBucketNotificationOutput) String() string { - return awsutil.Prettify(s) +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter { + s.Prefix = &v + return s } -// GoString returns the string representation -func (s PutBucketNotificationOutput) GoString() string { - return s.String() +// SetTag sets the Tag field's value. +func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter { + s.Tag = v + return s } -type PutBucketPolicyInput struct { - _ struct{} `type:"structure" payload:"Policy"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` - // The bucket policy as a JSON document. - Policy *string `type:"string" required:"true"` + // Specifies who pays for the download and request fees. + // + // Payer is a required field + Payer *string `type:"string" required:"true" enum:"Payer"` } // String returns the string representation -func (s PutBucketPolicyInput) String() string { +func (s RequestPaymentConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketPolicyInput) GoString() string { +func (s RequestPaymentConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Policy == nil { - invalidParams.Add(request.NewErrParamRequired("Policy")) +func (s *RequestPaymentConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} + if s.Payer == nil { + invalidParams.Add(request.NewErrParamRequired("Payer")) } if invalidParams.Len() > 0 { @@ -7697,52 +19489,82 @@ func (s *PutBucketPolicyInput) Validate() error { return nil } -type PutBucketPolicyOutput struct { +// SetPayer sets the Payer field's value. +func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { + s.Payer = &v + return s +} + +type RequestProgress struct { _ struct{} `type:"structure"` + + // Specifies whether periodic QueryProgress frames should be sent. Valid values: + // TRUE, FALSE. Default value: FALSE. + Enabled *bool `type:"boolean"` } // String returns the string representation -func (s PutBucketPolicyOutput) String() string { +func (s RequestProgress) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketPolicyOutput) GoString() string { +func (s RequestProgress) GoString() string { return s.String() } -type PutBucketReplicationInput struct { - _ struct{} `type:"structure" payload:"ReplicationConfiguration"` +// SetEnabled sets the Enabled field's value. +func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { + s.Enabled = &v + return s +} + +type RestoreObjectInput struct { + _ struct{} `type:"structure" payload:"RestoreRequest"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for restore job parameters. + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Container for replication rules. You can add as many as 1,000 rules. Total - // replication configuration size can be up to 2 MB. - ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s PutBucketReplicationInput) String() string { +func (s RestoreObjectInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketReplicationInput) GoString() string { +func (s RestoreObjectInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.ReplicationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) } - if s.ReplicationConfiguration != nil { - if err := s.ReplicationConfiguration.Validate(); err != nil { - invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) } } @@ -7752,50 +19574,131 @@ func (s *PutBucketReplicationInput) Validate() error { return nil } -type PutBucketReplicationOutput struct { +// SetBucket sets the Bucket field's value. +func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { + s.Bucket = &v + return s +} + +func (s *RestoreObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { + s.RequestPayer = &v + return s +} + +// SetRestoreRequest sets the RestoreRequest field's value. +func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { + s.RestoreRequest = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { + s.VersionId = &v + return s +} + +type RestoreObjectOutput struct { _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Indicates the path in the provided S3 output location where Select results + // will be restored to. + RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` } // String returns the string representation -func (s PutBucketReplicationOutput) String() string { +func (s RestoreObjectOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketReplicationOutput) GoString() string { +func (s RestoreObjectOutput) GoString() string { return s.String() } -type PutBucketRequestPaymentInput struct { - _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` +// SetRequestCharged sets the RequestCharged field's value. +func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { + s.RequestCharged = &v + return s +} - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetRestoreOutputPath sets the RestoreOutputPath field's value. +func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput { + s.RestoreOutputPath = &v + return s +} + +// Container for restore job parameters. +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. + Days *int64 `type:"integer"` + + // The optional description for the job. + Description *string `type:"string"` - RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` + // Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation. + GlacierJobParameters *GlacierJobParameters `type:"structure"` + + // Describes the location where the restore job's output is stored. + OutputLocation *OutputLocation `type:"structure"` + + // Describes the parameters for Select job types. + SelectParameters *SelectParameters `type:"structure"` + + // Glacier retrieval tier at which the restore will be processed. + Tier *string `type:"string" enum:"Tier"` + + // Type of restore request. + Type *string `type:"string" enum:"RestoreRequestType"` } // String returns the string representation -func (s PutBucketRequestPaymentInput) String() string { +func (s RestoreRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketRequestPaymentInput) GoString() string { +func (s RestoreRequest) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketRequestPaymentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.GlacierJobParameters != nil { + if err := s.GlacierJobParameters.Validate(); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + } } - if s.RequestPaymentConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + if s.OutputLocation != nil { + if err := s.OutputLocation.Validate(); err != nil { + invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) + } } - if s.RequestPaymentConfiguration != nil { - if err := s.RequestPaymentConfiguration.Validate(); err != nil { - invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) + if s.SelectParameters != nil { + if err := s.SelectParameters.Validate(); err != nil { + invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams)) } } @@ -7805,51 +19708,80 @@ func (s *PutBucketRequestPaymentInput) Validate() error { return nil } -type PutBucketRequestPaymentOutput struct { - _ struct{} `type:"structure"` +// SetDays sets the Days field's value. +func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { + s.Days = &v + return s } -// String returns the string representation -func (s PutBucketRequestPaymentOutput) String() string { - return awsutil.Prettify(s) +// SetDescription sets the Description field's value. +func (s *RestoreRequest) SetDescription(v string) *RestoreRequest { + s.Description = &v + return s } -// GoString returns the string representation -func (s PutBucketRequestPaymentOutput) GoString() string { - return s.String() +// SetGlacierJobParameters sets the GlacierJobParameters field's value. +func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { + s.GlacierJobParameters = v + return s } -type PutBucketTaggingInput struct { - _ struct{} `type:"structure" payload:"Tagging"` +// SetOutputLocation sets the OutputLocation field's value. +func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest { + s.OutputLocation = v + return s +} - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetSelectParameters sets the SelectParameters field's value. +func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest { + s.SelectParameters = v + return s +} + +// SetTier sets the Tier field's value. +func (s *RestoreRequest) SetTier(v string) *RestoreRequest { + s.Tier = &v + return s +} + +// SetType sets the Type field's value. +func (s *RestoreRequest) SetType(v string) *RestoreRequest { + s.Type = &v + return s +} + +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can can specify a different error code to return. + // + // Redirect is a required field + Redirect *Redirect `type:"structure" required:"true"` } // String returns the string representation -func (s PutBucketTaggingInput) String() string { +func (s RoutingRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketTaggingInput) GoString() string { +func (s RoutingRule) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Tagging == nil { - invalidParams.Add(request.NewErrParamRequired("Tagging")) - } - if s.Tagging != nil { - if err := s.Tagging.Validate(); err != nil { - invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) - } +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) } if invalidParams.Len() > 0 { @@ -7858,50 +19790,77 @@ func (s *PutBucketTaggingInput) Validate() error { return nil } -type PutBucketTaggingOutput struct { - _ struct{} `type:"structure"` +// SetCondition sets the Condition field's value. +func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { + s.Condition = v + return s } -// String returns the string representation -func (s PutBucketTaggingOutput) String() string { - return awsutil.Prettify(s) +// SetRedirect sets the Redirect field's value. +func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { + s.Redirect = v + return s } -// GoString returns the string representation -func (s PutBucketTaggingOutput) GoString() string { - return s.String() -} +type Rule struct { + _ struct{} `type:"structure"` -type PutBucketVersioningInput struct { - _ struct{} `type:"structure" payload:"VersioningConfiguration"` + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + Expiration *LifecycleExpiration `type:"structure"` - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA, ONEZONE_IA or GLACIER storage class. If your + // bucket is versioning-enabled (or versioning is suspended), you can set this + // action to request that Amazon S3 transition noncurrent object versions to + // the STANDARD_IA, ONEZONE_IA or GLACIER storage class at a specific period + // in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` - VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` + // Prefix identifying one or more objects to which the rule applies. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transition *Transition `type:"structure"` } // String returns the string representation -func (s PutBucketVersioningInput) String() string { +func (s Rule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketVersioningInput) GoString() string { +func (s Rule) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketVersioningInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) } - if s.VersioningConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) } if invalidParams.Len() > 0 { @@ -7910,51 +19869,80 @@ func (s *PutBucketVersioningInput) Validate() error { return nil } -type PutBucketVersioningOutput struct { - _ struct{} `type:"structure"` +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { + s.AbortIncompleteMultipartUpload = v + return s } -// String returns the string representation -func (s PutBucketVersioningOutput) String() string { - return awsutil.Prettify(s) +// SetExpiration sets the Expiration field's value. +func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { + s.Expiration = v + return s } -// GoString returns the string representation -func (s PutBucketVersioningOutput) GoString() string { - return s.String() +// SetID sets the ID field's value. +func (s *Rule) SetID(v string) *Rule { + s.ID = &v + return s } -type PutBucketWebsiteInput struct { - _ struct{} `type:"structure" payload:"WebsiteConfiguration"` +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { + s.NoncurrentVersionExpiration = v + return s +} - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. +func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { + s.NoncurrentVersionTransition = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Rule) SetPrefix(v string) *Rule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Rule) SetStatus(v string) *Rule { + s.Status = &v + return s +} + +// SetTransition sets the Transition field's value. +func (s *Rule) SetTransition(v *Transition) *Rule { + s.Transition = v + return s +} + +// Specifies the use of SSE-KMS to encrypt delievered Inventory reports. +type SSEKMS struct { + _ struct{} `locationName:"SSE-KMS" type:"structure"` - WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` + // Specifies the ID of the AWS Key Management Service (KMS) master encryption + // key to use for encrypting Inventory reports. + // + // KeyId is a required field + KeyId *string `type:"string" required:"true"` } // String returns the string representation -func (s PutBucketWebsiteInput) String() string { +func (s SSEKMS) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketWebsiteInput) GoString() string { +func (s SSEKMS) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketWebsiteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.WebsiteConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) - } - if s.WebsiteConfiguration != nil { - if err := s.WebsiteConfiguration.Validate(); err != nil { - invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) - } +func (s *SSEKMS) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SSEKMS"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) } if invalidParams.Len() > 0 { @@ -7963,226 +19951,337 @@ func (s *PutBucketWebsiteInput) Validate() error { return nil } -type PutBucketWebsiteOutput struct { - _ struct{} `type:"structure"` +// SetKeyId sets the KeyId field's value. +func (s *SSEKMS) SetKeyId(v string) *SSEKMS { + s.KeyId = &v + return s +} + +// Specifies the use of SSE-S3 to encrypt delievered Inventory reports. +type SSES3 struct { + _ struct{} `locationName:"SSE-S3" type:"structure"` } // String returns the string representation -func (s PutBucketWebsiteOutput) String() string { +func (s SSES3) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketWebsiteOutput) GoString() string { +func (s SSES3) GoString() string { return s.String() } -type PutObjectAclInput struct { - _ struct{} `type:"structure" payload:"AccessControlPolicy"` - - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` +// SelectObjectContentEventStream provides handling of EventStreams for +// the SelectObjectContent API. +// +// Use this type to receive SelectObjectContentEventStream events. The events +// can be read from the Events channel member. +// +// The events that can be received are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStream struct { + // Reader is the EventStream reader for the SelectObjectContentEventStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer +} - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` +// Close closes the EventStream. This will also cause the Events channel to be +// closed. You can use the closing of the Events channel to terminate your +// application's read from the API's EventStream. +// +// Will close the underlying EventStream reader. For EventStream over HTTP +// connection this will also close the HTTP connection. +// +// Close must be called when done using the EventStream API. Not calling Close +// may result in resource leaks. +func (es *SelectObjectContentEventStream) Close() (err error) { + es.Reader.Close() + return es.Err() +} - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` +// Err returns any error that occurred while reading EventStream Events from +// the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.Reader.Err(); err != nil { + return err + } + es.StreamCloser.Close() - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + return nil +} - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` +// Events returns a channel to read EventStream Events from the +// SelectObjectContent API. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return es.Reader.Events() +} - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +// SelectObjectContentEventStreamEvent groups together all EventStream +// events read from the SelectObjectContent API. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStreamEvent interface { + eventSelectObjectContentEventStream() +} - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` +// SelectObjectContentEventStreamReader provides the interface for reading EventStream +// Events from the SelectObjectContent API. The +// default implementation for this interface will be SelectObjectContentEventStream. +// +// The reader's Close method must allow multiple concurrent calls. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStreamReader interface { + // Returns a channel of events as they are read from the event stream. + Events() <-chan SelectObjectContentEventStreamEvent - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Close will close the underlying event stream reader. For event stream over + // HTTP this will also close the HTTP connection. + Close() error - // VersionId used to reference a specific version of the object. - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` + // Returns any error that has occured while reading from the event stream. + Err() error } -// String returns the string representation -func (s PutObjectAclInput) String() string { - return awsutil.Prettify(s) -} +type readSelectObjectContentEventStream struct { + eventReader *eventstreamapi.EventReader + stream chan SelectObjectContentEventStreamEvent + errVal atomic.Value -// GoString returns the string representation -func (s PutObjectAclInput) GoString() string { - return s.String() + done chan struct{} + closeOnce sync.Once } -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectAclInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.AccessControlPolicy != nil { - if err := s.AccessControlPolicy.Validate(); err != nil { - invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) - } +func newReadSelectObjectContentEventStream( + reader io.ReadCloser, + unmarshalers request.HandlerList, + logger aws.Logger, + logLevel aws.LogLevelType, +) *readSelectObjectContentEventStream { + r := &readSelectObjectContentEventStream{ + stream: make(chan SelectObjectContentEventStreamEvent), + done: make(chan struct{}), } - if invalidParams.Len() > 0 { - return invalidParams - } - return nil + r.eventReader = eventstreamapi.NewEventReader( + reader, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: unmarshalers, + }, + r.unmarshalerForEventType, + ) + r.eventReader.UseLogger(logger, logLevel) + + return r } -type PutObjectAclOutput struct { - _ struct{} `type:"structure"` +// Close will close the underlying event stream reader. For EventStream over +// HTTP this will also close the HTTP connection. +func (r *readSelectObjectContentEventStream) Close() error { + r.closeOnce.Do(r.safeClose) - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + return r.Err() } -// String returns the string representation -func (s PutObjectAclOutput) String() string { - return awsutil.Prettify(s) +func (r *readSelectObjectContentEventStream) safeClose() { + close(r.done) + err := r.eventReader.Close() + if err != nil { + r.errVal.Store(err) + } } -// GoString returns the string representation -func (s PutObjectAclOutput) GoString() string { - return s.String() -} +func (r *readSelectObjectContentEventStream) Err() error { + if v := r.errVal.Load(); v != nil { + return v.(error) + } -type PutObjectInput struct { - _ struct{} `type:"structure" payload:"Body"` + return nil +} - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` +func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return r.stream +} - // Object data. - Body io.ReadSeeker `type:"blob"` +func (r *readSelectObjectContentEventStream) readEventStream() { + defer close(r.stream) - // Name of the bucket to which the PUT operation was initiated. - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + for { + event, err := r.eventReader.ReadEvent() + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + // If closed already ignore the error + return + default: + } + r.errVal.Store(err) + return + } - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + select { + case r.stream <- event.(SelectObjectContentEventStreamEvent): + case <-r.done: + return + } + } +} - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` +func (r *readSelectObjectContentEventStream) unmarshalerForEventType( + eventType string, +) (eventstreamapi.Unmarshaler, error) { + switch eventType { + case "Cont": + return &ContinuationEvent{}, nil - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + case "End": + return &EndEvent{}, nil - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + case "Progress": + return &ProgressEvent{}, nil - // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + case "Records": + return &RecordsEvent{}, nil - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + case "Stats": + return &StatsEvent{}, nil + default: + return nil, awserr.New( + request.ErrCodeSerialization, + fmt.Sprintf("unknown event type name, %s, for SelectObjectContentEventStream", eventType), + nil, + ) + } +} - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` +// Request to filter the contents of an Amazon S3 object based on a simple Structured +// Query Language (SQL) statement. In the request, along with the SQL expression, +// you must also specify a data serialization format (JSON or CSV) of the object. +// Amazon S3 uses this to parse object data into records, and returns only records +// that match the specified SQL expression. You must also specify the data serialization +// format for the response. For more information, go to S3Select API Documentation +// (http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +type SelectObjectContentInput struct { + _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + // The S3 Bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + // The type of the provided expression (e.g., SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // Describes the format of the data in the object that is being queried. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` - // Object key for which the PUT operation was initiated. + // The Object Key. + // + // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + // Describes the format of the data that you want Amazon S3 to return in response. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Specifies if periodic request progress information should be enabled. + RequestProgress *RequestProgress `type:"structure"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // The SSE Algorithm used to encrypt the object. For more information, go to + // Server-Side Encryption (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. + // The SSE Customer Key. For more information, go to Server-Side Encryption + // (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // The SSE Customer Key MD5. For more information, go to Server-Side Encryption + // (Using Customer-Provided Encryption Keys (http://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } // String returns the string representation -func (s PutObjectInput) String() string { +func (s SelectObjectContentInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutObjectInput) GoString() string { +func (s SelectObjectContentInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} +func (s *SelectObjectContentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectObjectContentInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } if s.Key != nil && len(*s.Key) < 1 { invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } if invalidParams.Len() > 0 { return invalidParams @@ -8190,188 +20289,234 @@ func (s *PutObjectInput) Validate() error { return nil } -type PutObjectOutput struct { - _ struct{} `type:"structure"` - - // Entity tag for the uploaded object. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +// SetBucket sets the Bucket field's value. +func (s *SelectObjectContentInput) SetBucket(v string) *SelectObjectContentInput { + s.Bucket = &v + return s } -// String returns the string representation -func (s PutObjectOutput) String() string { - return awsutil.Prettify(s) +func (s *SelectObjectContentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// GoString returns the string representation -func (s PutObjectOutput) GoString() string { - return s.String() +// SetExpression sets the Expression field's value. +func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput { + s.Expression = &v + return s } -// Container for specifying an configuration when you want Amazon S3 to publish -// events to an Amazon Simple Queue Service (Amazon SQS) queue. -type QueueConfiguration struct { - _ struct{} `type:"structure"` - - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectObjectContentInput) SetExpressionType(v string) *SelectObjectContentInput { + s.ExpressionType = &v + return s +} - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. - Filter *NotificationConfigurationFilter `type:"structure"` +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectObjectContentInput) SetInputSerialization(v *InputSerialization) *SelectObjectContentInput { + s.InputSerialization = v + return s +} - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` +// SetKey sets the Key field's value. +func (s *SelectObjectContentInput) SetKey(v string) *SelectObjectContentInput { + s.Key = &v + return s +} - // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects - // events of specified type. - QueueArn *string `locationName:"Queue" type:"string" required:"true"` +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectObjectContentInput) SetOutputSerialization(v *OutputSerialization) *SelectObjectContentInput { + s.OutputSerialization = v + return s } -// String returns the string representation -func (s QueueConfiguration) String() string { - return awsutil.Prettify(s) +// SetRequestProgress sets the RequestProgress field's value. +func (s *SelectObjectContentInput) SetRequestProgress(v *RequestProgress) *SelectObjectContentInput { + s.RequestProgress = v + return s } -// GoString returns the string representation -func (s QueueConfiguration) GoString() string { - return s.String() +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *SelectObjectContentInput) SetSSECustomerAlgorithm(v string) *SelectObjectContentInput { + s.SSECustomerAlgorithm = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *QueueConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) - } - if s.QueueArn == nil { - invalidParams.Add(request.NewErrParamRequired("QueueArn")) - } +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *SelectObjectContentInput) SetSSECustomerKey(v string) *SelectObjectContentInput { + s.SSECustomerKey = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams +func (s *SelectObjectContentInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v } - return nil + return *s.SSECustomerKey } -type QueueConfigurationDeprecated struct { - _ struct{} `type:"structure"` - - // Bucket event for which to send notifications. - Event *string `deprecated:"true" type:"string" enum:"Event"` - - Events []*string `locationName:"Event" type:"list" flattened:"true"` +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectContentInput { + s.SSECustomerKeyMD5 = &v + return s +} - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` +type SelectObjectContentOutput struct { + _ struct{} `type:"structure" payload:"Payload"` - Queue *string `type:"string"` + // Use EventStream to use the API's stream. + EventStream *SelectObjectContentEventStream `type:"structure"` } // String returns the string representation -func (s QueueConfigurationDeprecated) String() string { +func (s SelectObjectContentOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s QueueConfigurationDeprecated) GoString() string { +func (s SelectObjectContentOutput) GoString() string { return s.String() } -type Redirect struct { - _ struct{} `type:"structure"` +// SetEventStream sets the EventStream field's value. +func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { + s.EventStream = v + return s +} - // The host name to use in the redirect request. - HostName *string `type:"string"` +func (s *SelectObjectContentOutput) runEventStreamLoop(r *request.Request) { + if r.Error != nil { + return + } + reader := newReadSelectObjectContentEventStream( + r.HTTPResponse.Body, + r.Handlers.UnmarshalStream, + r.Config.Logger, + r.Config.LogLevel.Value(), + ) + go reader.readEventStream() + + eventStream := &SelectObjectContentEventStream{ + StreamCloser: r.HTTPResponse.Body, + Reader: reader, + } + s.EventStream = eventStream +} + +// Describes the parameters for Select job types. +type SelectParameters struct { + _ struct{} `type:"structure"` - // The HTTP redirect code to use on the response. Not required if one of the - // siblings is present. - HttpRedirectCode *string `type:"string"` + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` + // The type of the provided expression (e.g., SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` - // The object key prefix to use in the redirect request. For example, to redirect - // requests for all pages with prefix docs/ (objects in the docs/ folder) to - // documents/, you can set a condition block with KeyPrefixEquals set to docs/ - // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required - // if one of the siblings is present. Can be present only if ReplaceKeyWith - // is not provided. - ReplaceKeyPrefixWith *string `type:"string"` + // Describes the serialization format of the object. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` - // The specific object key to use in the redirect request. For example, redirect - // request to error.html. Not required if one of the sibling is present. Can - // be present only if ReplaceKeyPrefixWith is not provided. - ReplaceKeyWith *string `type:"string"` + // Describes how the results of the Select job are serialized. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` } // String returns the string representation -func (s Redirect) String() string { +func (s SelectParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Redirect) GoString() string { +func (s SelectParameters) GoString() string { return s.String() } -type RedirectAllRequestsTo struct { +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectParameters"} + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExpression sets the Expression field's value. +func (s *SelectParameters) SetExpression(v string) *SelectParameters { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectParameters) SetExpressionType(v string) *SelectParameters { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters { + s.InputSerialization = v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters { + s.OutputSerialization = v + return s +} + +// Describes the default server-side encryption to apply to new objects in the +// bucket. If Put Object request does not specify any server-side encryption, +// this default encryption will be applied. +type ServerSideEncryptionByDefault struct { _ struct{} `type:"structure"` - // Name of the host where requests will be redirected. - HostName *string `type:"string" required:"true"` + // KMS master key ID to use for the default encryption. This parameter is allowed + // if SSEAlgorithm is aws:kms. + KMSMasterKeyID *string `type:"string"` - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` + // Server-side encryption algorithm to use for the default encryption. + // + // SSEAlgorithm is a required field + SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` } // String returns the string representation -func (s RedirectAllRequestsTo) String() string { +func (s ServerSideEncryptionByDefault) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RedirectAllRequestsTo) GoString() string { +func (s ServerSideEncryptionByDefault) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RedirectAllRequestsTo) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} - if s.HostName == nil { - invalidParams.Add(request.NewErrParamRequired("HostName")) +func (s *ServerSideEncryptionByDefault) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"} + if s.SSEAlgorithm == nil { + invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm")) } if invalidParams.Len() > 0 { @@ -8380,36 +20525,43 @@ func (s *RedirectAllRequestsTo) Validate() error { return nil } -// Container for replication rules. You can add as many as 1,000 rules. Total -// replication configuration size can be up to 2 MB. -type ReplicationConfiguration struct { - _ struct{} `type:"structure"` +// SetKMSMasterKeyID sets the KMSMasterKeyID field's value. +func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault { + s.KMSMasterKeyID = &v + return s +} - // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating - // the objects. - Role *string `type:"string" required:"true"` +// SetSSEAlgorithm sets the SSEAlgorithm field's value. +func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault { + s.SSEAlgorithm = &v + return s +} - // Container for information about a particular replication rule. Replication - // configuration must have at least one rule and can contain up to 1,000 rules. - Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +// Container for server-side encryption configuration rules. Currently S3 supports +// one rule only. +type ServerSideEncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Container for information about a particular server-side encryption configuration + // rule. + // + // Rules is a required field + Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } // String returns the string representation -func (s ReplicationConfiguration) String() string { +func (s ServerSideEncryptionConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ReplicationConfiguration) GoString() string { +func (s ServerSideEncryptionConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} - if s.Role == nil { - invalidParams.Add(request.NewErrParamRequired("Role")) - } +func (s *ServerSideEncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"} if s.Rules == nil { invalidParams.Add(request.NewErrParamRequired("Rules")) } @@ -8430,48 +20582,39 @@ func (s *ReplicationConfiguration) Validate() error { return nil } -type ReplicationRule struct { - _ struct{} `type:"structure"` - - Destination *Destination `type:"structure" required:"true"` - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` +// SetRules sets the Rules field's value. +func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration { + s.Rules = v + return s +} - // Object keyname prefix identifying one or more objects to which the rule applies. - // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes - // are not supported. - Prefix *string `type:"string" required:"true"` +// Container for information about a particular server-side encryption configuration +// rule. +type ServerSideEncryptionRule struct { + _ struct{} `type:"structure"` - // The rule is ignored if status is not Enabled. - Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` + // Describes the default server-side encryption to apply to new objects in the + // bucket. If Put Object request does not specify any server-side encryption, + // this default encryption will be applied. + ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` } // String returns the string representation -func (s ReplicationRule) String() string { +func (s ServerSideEncryptionRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ReplicationRule) GoString() string { +func (s ServerSideEncryptionRule) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) - } - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - if s.Destination != nil { - if err := s.Destination.Validate(); err != nil { - invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) +func (s *ServerSideEncryptionRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"} + if s.ApplyServerSideEncryptionByDefault != nil { + if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil { + invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams)) } } @@ -8481,28 +20624,39 @@ func (s *ReplicationRule) Validate() error { return nil } -type RequestPaymentConfiguration struct { +// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value. +func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule { + s.ApplyServerSideEncryptionByDefault = v + return s +} + +// Container for filters that define which source objects should be replicated. +type SourceSelectionCriteria struct { _ struct{} `type:"structure"` - // Specifies who pays for the download and request fees. - Payer *string `type:"string" required:"true" enum:"Payer"` + // Container for filter information of selection of KMS Encrypted S3 objects. + // The element is required if you include SourceSelectionCriteria in the replication + // configuration. + SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` } // String returns the string representation -func (s RequestPaymentConfiguration) String() string { +func (s SourceSelectionCriteria) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RequestPaymentConfiguration) GoString() string { +func (s SourceSelectionCriteria) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RequestPaymentConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} - if s.Payer == nil { - invalidParams.Add(request.NewErrParamRequired("Payer")) +func (s *SourceSelectionCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"} + if s.SseKmsEncryptedObjects != nil { + if err := s.SseKmsEncryptedObjects.Validate(); err != nil { + invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -8511,50 +20665,38 @@ func (s *RequestPaymentConfiguration) Validate() error { return nil } -type RestoreObjectInput struct { - _ struct{} `type:"structure" payload:"RestoreRequest"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value. +func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria { + s.SseKmsEncryptedObjects = v + return s +} - RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` +// Container for filter information of selection of KMS Encrypted S3 objects. +type SseKmsEncryptedObjects struct { + _ struct{} `type:"structure"` - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` + // The replication for KMS encrypted S3 objects is disabled if status is not + // Enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` } // String returns the string representation -func (s RestoreObjectInput) String() string { +func (s SseKmsEncryptedObjects) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RestoreObjectInput) GoString() string { +func (s SseKmsEncryptedObjects) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.RestoreRequest != nil { - if err := s.RestoreRequest.Validate(); err != nil { - invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) - } +func (s *SseKmsEncryptedObjects) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) } if invalidParams.Len() > 0 { @@ -8563,84 +20705,118 @@ func (s *RestoreObjectInput) Validate() error { return nil } -type RestoreObjectOutput struct { +// SetStatus sets the Status field's value. +func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { + s.Status = &v + return s +} + +type Stats struct { _ struct{} `type:"structure"` - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // Total number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // Total number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // Total number of object bytes scanned. + BytesScanned *int64 `type:"long"` } // String returns the string representation -func (s RestoreObjectOutput) String() string { +func (s Stats) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RestoreObjectOutput) GoString() string { +func (s Stats) GoString() string { return s.String() } -type RestoreRequest struct { - _ struct{} `type:"structure"` +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Stats) SetBytesProcessed(v int64) *Stats { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Stats) SetBytesReturned(v int64) *Stats { + s.BytesReturned = &v + return s +} - // Lifetime of the active copy in days - Days *int64 `type:"integer" required:"true"` +// SetBytesScanned sets the BytesScanned field's value. +func (s *Stats) SetBytesScanned(v int64) *Stats { + s.BytesScanned = &v + return s +} + +type StatsEvent struct { + _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` + + // The Stats event details. + Details *Stats `locationName:"Details" type:"structure"` } // String returns the string representation -func (s RestoreRequest) String() string { +func (s StatsEvent) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RestoreRequest) GoString() string { +func (s StatsEvent) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} - if s.Days == nil { - invalidParams.Add(request.NewErrParamRequired("Days")) - } +// SetDetails sets the Details field's value. +func (s *StatsEvent) SetDetails(v *Stats) *StatsEvent { + s.Details = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams +// The StatsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *StatsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the StatsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *StatsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err } return nil } -type RoutingRule struct { +type StorageClassAnalysis struct { _ struct{} `type:"structure"` - // A container for describing a condition that must be met for the specified - // redirect to apply. For example, 1. If request is for pages in the /docs folder, - // redirect to the /documents folder. 2. If request results in HTTP error 4xx, - // redirect request to another host where you might process the error. - Condition *Condition `type:"structure"` - - // Container for redirect information. You can redirect requests to another - // host, to another page, or with another protocol. In the event of an error, - // you can can specify a different error code to return. - Redirect *Redirect `type:"structure" required:"true"` + // A container used to describe how data related to the storage class analysis + // should be exported. + DataExport *StorageClassAnalysisDataExport `type:"structure"` } // String returns the string representation -func (s RoutingRule) String() string { +func (s StorageClassAnalysis) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RoutingRule) GoString() string { +func (s StorageClassAnalysis) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RoutingRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} - if s.Redirect == nil { - invalidParams.Add(request.NewErrParamRequired("Redirect")) +func (s *StorageClassAnalysis) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"} + if s.DataExport != nil { + if err := s.DataExport.Validate(); err != nil { + invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -8649,60 +20825,49 @@ func (s *RoutingRule) Validate() error { return nil } -type Rule struct { - _ struct{} `type:"structure"` - - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` - - Expiration *LifecycleExpiration `type:"structure"` - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` - - // Specifies when noncurrent object versions expire. Upon expiration, Amazon - // S3 permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) - // to request that Amazon S3 delete noncurrent object versions at a specific - // period in the object's lifetime. - NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` - - // Container for the transition rule that describes when noncurrent objects - // transition to the STANDARD_IA or GLACIER storage class. If your bucket is - // versioning-enabled (or versioning is suspended), you can set this action - // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA - // or GLACIER storage class at a specific period in the object's lifetime. - NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` +// SetDataExport sets the DataExport field's value. +func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis { + s.DataExport = v + return s +} - // Prefix identifying one or more objects to which the rule applies. - Prefix *string `type:"string" required:"true"` +type StorageClassAnalysisDataExport struct { + _ struct{} `type:"structure"` - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. - Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + // The place to store the data for an analysis. + // + // Destination is a required field + Destination *AnalyticsExportDestination `type:"structure" required:"true"` - Transition *Transition `type:"structure"` + // The version of the output schema to use when exporting data. Must be V_1. + // + // OutputSchemaVersion is a required field + OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"` } // String returns the string representation -func (s Rule) String() string { +func (s StorageClassAnalysisDataExport) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Rule) GoString() string { +func (s StorageClassAnalysisDataExport) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Rule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Rule"} - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) +func (s *StorageClassAnalysisDataExport) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) + if s.OutputSchemaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -8711,13 +20876,29 @@ func (s *Rule) Validate() error { return nil } +// SetDestination sets the Destination field's value. +func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport { + s.Destination = v + return s +} + +// SetOutputSchemaVersion sets the OutputSchemaVersion field's value. +func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport { + s.OutputSchemaVersion = &v + return s +} + type Tag struct { _ struct{} `type:"structure"` // Name of the tag. + // + // Key is a required field Key *string `min:"1" type:"string" required:"true"` // Value of the tag. + // + // Value is a required field Value *string `type:"string" required:"true"` } @@ -8750,9 +20931,22 @@ func (s *Tag) Validate() error { return nil } +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + type Tagging struct { _ struct{} `type:"structure"` + // TagSet is a required field TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` } @@ -8789,10 +20983,16 @@ func (s *Tagging) Validate() error { return nil } +// SetTagSet sets the TagSet field's value. +func (s *Tagging) SetTagSet(v []*Tag) *Tagging { + s.TagSet = v + return s +} + type TargetGrant struct { _ struct{} `type:"structure"` - Grantee *Grantee `type:"structure"` + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Logging permissions assigned to the Grantee for the bucket. Permission *string `type:"string" enum:"BucketLogsPermission"` @@ -8823,11 +21023,24 @@ func (s *TargetGrant) Validate() error { return nil } +// SetGrantee sets the Grantee field's value. +func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *TargetGrant) SetPermission(v string) *TargetGrant { + s.Permission = &v + return s +} + // Container for specifying the configuration when you want Amazon S3 to publish // events to an Amazon Simple Notification Service (Amazon SNS) topic. type TopicConfiguration struct { _ struct{} `type:"structure"` + // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` // Container for object key name filtering rules. For information about key @@ -8841,6 +21054,8 @@ type TopicConfiguration struct { // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects // events of specified type. + // + // TopicArn is a required field TopicArn *string `locationName:"Topic" type:"string" required:"true"` } @@ -8870,10 +21085,36 @@ func (s *TopicConfiguration) Validate() error { return nil } +// SetEvents sets the Events field's value. +func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfiguration) SetId(v string) *TopicConfiguration { + s.Id = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { + s.TopicArn = &v + return s +} + type TopicConfigurationDeprecated struct { _ struct{} `type:"structure"` // Bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` Events []*string `locationName:"Event" type:"list" flattened:"true"` @@ -8897,6 +21138,30 @@ func (s TopicConfigurationDeprecated) GoString() string { return s.String() } +// SetEvent sets the Event field's value. +func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated { + s.Id = &v + return s +} + +// SetTopic sets the Topic field's value. +func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated { + s.Topic = &v + return s +} + type Transition struct { _ struct{} `type:"structure"` @@ -8922,27 +21187,48 @@ func (s Transition) GoString() string { return s.String() } +// SetDate sets the Date field's value. +func (s *Transition) SetDate(v time.Time) *Transition { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Transition) SetDays(v int64) *Transition { + s.Days = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Transition) SetStorageClass(v string) *Transition { + s.StorageClass = &v + return s +} + type UploadPartCopyInput struct { _ struct{} `type:"structure"` + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // The name of the source bucket and key name of the source object, separated // by a slash (/). Must be URL-encoded. + // + // CopySource is a required field CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` // Copies the object if its entity tag (ETag) matches the specified tag. CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` // Copies the object if its entity tag (ETag) is different than the specified // ETag. CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` // The range of bytes to copy from the source object. The range value must use // the form bytes=first-last, where the first and last are the zero-based byte @@ -8964,10 +21250,13 @@ type UploadPartCopyInput struct { // key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Part number of part being copied. This is a positive integer between 1 and // 10,000. + // + // PartNumber is a required field PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` // Confirms that the requester knows that she or he will be charged for the @@ -8993,6 +21282,8 @@ type UploadPartCopyInput struct { SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose part is being copied. + // + // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } @@ -9034,6 +21325,129 @@ func (s *UploadPartCopyInput) Validate() error { return nil } +// SetBucket sets the Bucket field's value. +func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { + s.Bucket = &v + return s +} + +func (s *UploadPartCopyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCopySource sets the CopySource field's value. +func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceRange sets the CopySourceRange field's value. +func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput { + s.CopySourceRange = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { + s.UploadId = &v + return s +} + type UploadPartCopyOutput struct { _ struct{} `type:"structure" payload:"CopyPartResult"` @@ -9076,6 +21490,48 @@ func (s UploadPartCopyOutput) GoString() string { return s.String() } +// SetCopyPartResult sets the CopyPartResult field's value. +func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput { + s.CopyPartResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput { + s.CopySourceVersionId = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput { + s.ServerSideEncryption = &v + return s +} + type UploadPartInput struct { _ struct{} `type:"structure" payload:"Body"` @@ -9083,17 +21539,26 @@ type UploadPartInput struct { Body io.ReadSeeker `type:"blob"` // Name of the bucket to which the multipart upload was initiated. + // + // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Size of the body in bytes. This parameter is useful when the size of the // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + // The base64-encoded 128-bit MD5 digest of the part data. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + // Object key for which the multipart upload was initiated. + // + // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // Part number of part being uploaded. This is a positive integer between 1 // and 10,000. + // + // PartNumber is a required field PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` // Confirms that the requester knows that she or he will be charged for the @@ -9119,6 +21584,8 @@ type UploadPartInput struct { SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose part is being uploaded. + // + // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } @@ -9157,6 +21624,86 @@ func (s *UploadPartInput) Validate() error { return nil } +// SetBody sets the Body field's value. +func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { + s.Bucket = &v + return s +} + +func (s *UploadPartInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContentLength sets the ContentLength field's value. +func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { + s.ContentLength = &v + return s +} + +// SetContentMD5 sets the ContentMD5 field's value. +func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { + s.ContentMD5 = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartInput) SetKey(v string) *UploadPartInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { + s.UploadId = &v + return s +} + type UploadPartOutput struct { _ struct{} `type:"structure"` @@ -9196,6 +21743,42 @@ func (s UploadPartOutput) GoString() string { return s.String() } +// SetETag sets the ETag field's value. +func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { + s.ETag = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { + s.ServerSideEncryption = &v + return s +} + type VersioningConfiguration struct { _ struct{} `type:"structure"` @@ -9218,6 +21801,18 @@ func (s VersioningConfiguration) GoString() string { return s.String() } +// SetMFADelete sets the MFADelete field's value. +func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { + s.Status = &v + return s +} + type WebsiteConfiguration struct { _ struct{} `type:"structure"` @@ -9275,65 +21870,130 @@ func (s *WebsiteConfiguration) Validate() error { return nil } +// SetErrorDocument sets the ErrorDocument field's value. +func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration { + s.RoutingRules = v + return s +} + +const ( + // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value + AnalyticsS3ExportFileFormatCsv = "CSV" +) + const ( - // @enum BucketAccelerateStatus + // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value BucketAccelerateStatusEnabled = "Enabled" - // @enum BucketAccelerateStatus + + // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value BucketAccelerateStatusSuspended = "Suspended" ) const ( - // @enum BucketCannedACL + // BucketCannedACLPrivate is a BucketCannedACL enum value BucketCannedACLPrivate = "private" - // @enum BucketCannedACL + + // BucketCannedACLPublicRead is a BucketCannedACL enum value BucketCannedACLPublicRead = "public-read" - // @enum BucketCannedACL + + // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value BucketCannedACLPublicReadWrite = "public-read-write" - // @enum BucketCannedACL + + // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value BucketCannedACLAuthenticatedRead = "authenticated-read" ) const ( - // @enum BucketLocationConstraint + // BucketLocationConstraintEu is a BucketLocationConstraint enum value BucketLocationConstraintEu = "EU" - // @enum BucketLocationConstraint + + // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value BucketLocationConstraintEuWest1 = "eu-west-1" - // @enum BucketLocationConstraint + + // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value BucketLocationConstraintUsWest1 = "us-west-1" - // @enum BucketLocationConstraint + + // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value BucketLocationConstraintUsWest2 = "us-west-2" - // @enum BucketLocationConstraint + + // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value BucketLocationConstraintApSouth1 = "ap-south-1" - // @enum BucketLocationConstraint + + // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value BucketLocationConstraintApSoutheast1 = "ap-southeast-1" - // @enum BucketLocationConstraint + + // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value BucketLocationConstraintApSoutheast2 = "ap-southeast-2" - // @enum BucketLocationConstraint + + // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value BucketLocationConstraintApNortheast1 = "ap-northeast-1" - // @enum BucketLocationConstraint + + // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value BucketLocationConstraintSaEast1 = "sa-east-1" - // @enum BucketLocationConstraint + + // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value BucketLocationConstraintCnNorth1 = "cn-north-1" - // @enum BucketLocationConstraint + + // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value BucketLocationConstraintEuCentral1 = "eu-central-1" ) const ( - // @enum BucketLogsPermission + // BucketLogsPermissionFullControl is a BucketLogsPermission enum value BucketLogsPermissionFullControl = "FULL_CONTROL" - // @enum BucketLogsPermission + + // BucketLogsPermissionRead is a BucketLogsPermission enum value BucketLogsPermissionRead = "READ" - // @enum BucketLogsPermission + + // BucketLogsPermissionWrite is a BucketLogsPermission enum value BucketLogsPermissionWrite = "WRITE" ) const ( - // @enum BucketVersioningStatus + // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value BucketVersioningStatusEnabled = "Enabled" - // @enum BucketVersioningStatus + + // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value BucketVersioningStatusSuspended = "Suspended" ) +const ( + // CompressionTypeNone is a CompressionType enum value + CompressionTypeNone = "NONE" + + // CompressionTypeGzip is a CompressionType enum value + CompressionTypeGzip = "GZIP" + + // CompressionTypeBzip2 is a CompressionType enum value + CompressionTypeBzip2 = "BZIP2" +) + +const ( + // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusEnabled = "Enabled" + + // DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusDisabled = "Disabled" +) + // Requests Amazon S3 to encode the object keys in the response and specifies // the encoding method to use. An object key may contain any Unicode character; // however, XML 1.0 parser cannot parse some characters, such as characters @@ -9341,147 +22001,268 @@ const ( // XML 1.0, you can add this parameter to request that Amazon S3 encode the // keys in the response. const ( - // @enum EncodingType + // EncodingTypeUrl is a EncodingType enum value EncodingTypeUrl = "url" ) // Bucket event for which to send notifications. const ( - // @enum Event + // EventS3ReducedRedundancyLostObject is a Event enum value EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" - // @enum Event + + // EventS3ObjectCreated is a Event enum value EventS3ObjectCreated = "s3:ObjectCreated:*" - // @enum Event + + // EventS3ObjectCreatedPut is a Event enum value EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" - // @enum Event + + // EventS3ObjectCreatedPost is a Event enum value EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" - // @enum Event + + // EventS3ObjectCreatedCopy is a Event enum value EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" - // @enum Event + + // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" - // @enum Event + + // EventS3ObjectRemoved is a Event enum value EventS3ObjectRemoved = "s3:ObjectRemoved:*" - // @enum Event + + // EventS3ObjectRemovedDelete is a Event enum value EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" - // @enum Event + + // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" ) const ( - // @enum ExpirationStatus + // ExpirationStatusEnabled is a ExpirationStatus enum value ExpirationStatusEnabled = "Enabled" - // @enum ExpirationStatus + + // ExpirationStatusDisabled is a ExpirationStatus enum value ExpirationStatusDisabled = "Disabled" ) const ( - // @enum FilterRuleName + // ExpressionTypeSql is a ExpressionType enum value + ExpressionTypeSql = "SQL" +) + +const ( + // FileHeaderInfoUse is a FileHeaderInfo enum value + FileHeaderInfoUse = "USE" + + // FileHeaderInfoIgnore is a FileHeaderInfo enum value + FileHeaderInfoIgnore = "IGNORE" + + // FileHeaderInfoNone is a FileHeaderInfo enum value + FileHeaderInfoNone = "NONE" +) + +const ( + // FilterRuleNamePrefix is a FilterRuleName enum value FilterRuleNamePrefix = "prefix" - // @enum FilterRuleName + + // FilterRuleNameSuffix is a FilterRuleName enum value FilterRuleNameSuffix = "suffix" ) const ( - // @enum MFADelete + // InventoryFormatCsv is a InventoryFormat enum value + InventoryFormatCsv = "CSV" + + // InventoryFormatOrc is a InventoryFormat enum value + InventoryFormatOrc = "ORC" +) + +const ( + // InventoryFrequencyDaily is a InventoryFrequency enum value + InventoryFrequencyDaily = "Daily" + + // InventoryFrequencyWeekly is a InventoryFrequency enum value + InventoryFrequencyWeekly = "Weekly" +) + +const ( + // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsAll = "All" + + // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsCurrent = "Current" +) + +const ( + // InventoryOptionalFieldSize is a InventoryOptionalField enum value + InventoryOptionalFieldSize = "Size" + + // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value + InventoryOptionalFieldLastModifiedDate = "LastModifiedDate" + + // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value + InventoryOptionalFieldStorageClass = "StorageClass" + + // InventoryOptionalFieldEtag is a InventoryOptionalField enum value + InventoryOptionalFieldEtag = "ETag" + + // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value + InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded" + + // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value + InventoryOptionalFieldReplicationStatus = "ReplicationStatus" + + // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value + InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" +) + +const ( + // JSONTypeDocument is a JSONType enum value + JSONTypeDocument = "DOCUMENT" + + // JSONTypeLines is a JSONType enum value + JSONTypeLines = "LINES" +) + +const ( + // MFADeleteEnabled is a MFADelete enum value MFADeleteEnabled = "Enabled" - // @enum MFADelete + + // MFADeleteDisabled is a MFADelete enum value MFADeleteDisabled = "Disabled" ) const ( - // @enum MFADeleteStatus + // MFADeleteStatusEnabled is a MFADeleteStatus enum value MFADeleteStatusEnabled = "Enabled" - // @enum MFADeleteStatus + + // MFADeleteStatusDisabled is a MFADeleteStatus enum value MFADeleteStatusDisabled = "Disabled" ) const ( - // @enum MetadataDirective + // MetadataDirectiveCopy is a MetadataDirective enum value MetadataDirectiveCopy = "COPY" - // @enum MetadataDirective + + // MetadataDirectiveReplace is a MetadataDirective enum value MetadataDirectiveReplace = "REPLACE" ) const ( - // @enum ObjectCannedACL + // ObjectCannedACLPrivate is a ObjectCannedACL enum value ObjectCannedACLPrivate = "private" - // @enum ObjectCannedACL + + // ObjectCannedACLPublicRead is a ObjectCannedACL enum value ObjectCannedACLPublicRead = "public-read" - // @enum ObjectCannedACL + + // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value ObjectCannedACLPublicReadWrite = "public-read-write" - // @enum ObjectCannedACL + + // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value ObjectCannedACLAuthenticatedRead = "authenticated-read" - // @enum ObjectCannedACL + + // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value ObjectCannedACLAwsExecRead = "aws-exec-read" - // @enum ObjectCannedACL + + // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value ObjectCannedACLBucketOwnerRead = "bucket-owner-read" - // @enum ObjectCannedACL + + // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" ) const ( - // @enum ObjectStorageClass + // ObjectStorageClassStandard is a ObjectStorageClass enum value ObjectStorageClassStandard = "STANDARD" - // @enum ObjectStorageClass + + // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" - // @enum ObjectStorageClass + + // ObjectStorageClassGlacier is a ObjectStorageClass enum value ObjectStorageClassGlacier = "GLACIER" + + // ObjectStorageClassStandardIa is a ObjectStorageClass enum value + ObjectStorageClassStandardIa = "STANDARD_IA" + + // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value + ObjectStorageClassOnezoneIa = "ONEZONE_IA" ) const ( - // @enum ObjectVersionStorageClass + // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value ObjectVersionStorageClassStandard = "STANDARD" ) const ( - // @enum Payer + // OwnerOverrideDestination is a OwnerOverride enum value + OwnerOverrideDestination = "Destination" +) + +const ( + // PayerRequester is a Payer enum value PayerRequester = "Requester" - // @enum Payer + + // PayerBucketOwner is a Payer enum value PayerBucketOwner = "BucketOwner" ) const ( - // @enum Permission + // PermissionFullControl is a Permission enum value PermissionFullControl = "FULL_CONTROL" - // @enum Permission + + // PermissionWrite is a Permission enum value PermissionWrite = "WRITE" - // @enum Permission + + // PermissionWriteAcp is a Permission enum value PermissionWriteAcp = "WRITE_ACP" - // @enum Permission + + // PermissionRead is a Permission enum value PermissionRead = "READ" - // @enum Permission + + // PermissionReadAcp is a Permission enum value PermissionReadAcp = "READ_ACP" ) const ( - // @enum Protocol + // ProtocolHttp is a Protocol enum value ProtocolHttp = "http" - // @enum Protocol + + // ProtocolHttps is a Protocol enum value ProtocolHttps = "https" ) const ( - // @enum ReplicationRuleStatus + // QuoteFieldsAlways is a QuoteFields enum value + QuoteFieldsAlways = "ALWAYS" + + // QuoteFieldsAsneeded is a QuoteFields enum value + QuoteFieldsAsneeded = "ASNEEDED" +) + +const ( + // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value ReplicationRuleStatusEnabled = "Enabled" - // @enum ReplicationRuleStatus + + // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value ReplicationRuleStatusDisabled = "Disabled" ) const ( - // @enum ReplicationStatus + // ReplicationStatusComplete is a ReplicationStatus enum value ReplicationStatusComplete = "COMPLETE" - // @enum ReplicationStatus + + // ReplicationStatusPending is a ReplicationStatus enum value ReplicationStatusPending = "PENDING" - // @enum ReplicationStatus + + // ReplicationStatusFailed is a ReplicationStatus enum value ReplicationStatusFailed = "FAILED" - // @enum ReplicationStatus + + // ReplicationStatusReplica is a ReplicationStatus enum value ReplicationStatusReplica = "REPLICA" ) // If present, indicates that the requester was successfully charged for the // request. const ( - // @enum RequestCharged + // RequestChargedRequester is a RequestCharged enum value RequestChargedRequester = "requester" ) @@ -9490,38 +22271,87 @@ const ( // Documentation on downloading objects from requester pays buckets can be found // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html const ( - // @enum RequestPayer + // RequestPayerRequester is a RequestPayer enum value RequestPayerRequester = "requester" ) const ( - // @enum ServerSideEncryption + // RestoreRequestTypeSelect is a RestoreRequestType enum value + RestoreRequestTypeSelect = "SELECT" +) + +const ( + // ServerSideEncryptionAes256 is a ServerSideEncryption enum value ServerSideEncryptionAes256 = "AES256" - // @enum ServerSideEncryption + + // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value ServerSideEncryptionAwsKms = "aws:kms" ) const ( - // @enum StorageClass + // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusEnabled = "Enabled" + + // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusDisabled = "Disabled" +) + +const ( + // StorageClassStandard is a StorageClass enum value StorageClassStandard = "STANDARD" - // @enum StorageClass + + // StorageClassReducedRedundancy is a StorageClass enum value StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" - // @enum StorageClass + + // StorageClassStandardIa is a StorageClass enum value StorageClassStandardIa = "STANDARD_IA" + + // StorageClassOnezoneIa is a StorageClass enum value + StorageClassOnezoneIa = "ONEZONE_IA" ) const ( - // @enum TransitionStorageClass + // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value + StorageClassAnalysisSchemaVersionV1 = "V_1" +) + +const ( + // TaggingDirectiveCopy is a TaggingDirective enum value + TaggingDirectiveCopy = "COPY" + + // TaggingDirectiveReplace is a TaggingDirective enum value + TaggingDirectiveReplace = "REPLACE" +) + +const ( + // TierStandard is a Tier enum value + TierStandard = "Standard" + + // TierBulk is a Tier enum value + TierBulk = "Bulk" + + // TierExpedited is a Tier enum value + TierExpedited = "Expedited" +) + +const ( + // TransitionStorageClassGlacier is a TransitionStorageClass enum value TransitionStorageClassGlacier = "GLACIER" - // @enum TransitionStorageClass + + // TransitionStorageClassStandardIa is a TransitionStorageClass enum value TransitionStorageClassStandardIa = "STANDARD_IA" + + // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value + TransitionStorageClassOnezoneIa = "ONEZONE_IA" ) const ( - // @enum Type + // TypeCanonicalUser is a Type enum value TypeCanonicalUser = "CanonicalUser" - // @enum Type + + // TypeAmazonCustomerByEmail is a Type enum value TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" - // @enum Type + + // TypeGroup is a Type enum value TypeGroup = "Group" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go new file mode 100644 index 00000000..5c8ce5cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go @@ -0,0 +1,249 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + contentMD5Header = "Content-Md5" + contentSha256Header = "X-Amz-Content-Sha256" + amzTeHeader = "X-Amz-Te" + amzTxEncodingHeader = "X-Amz-Transfer-Encoding" + + appendMD5TxEncoding = "append-md5" +) + +// contentMD5 computes and sets the HTTP Content-MD5 header for requests that +// require it. +func contentMD5(r *request.Request) { + h := md5.New() + + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + if _, err := copySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} + +// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the +// request. If the body is not seekable or S3DisableContentMD5Validation set +// this handler will be ignored. +func computeBodyHashes(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + if r.Error != nil || !aws.IsReaderSeekable(r.Body) { + return + } + + var md5Hash, sha256Hash hash.Hash + hashers := make([]io.Writer, 0, 2) + + // Determine upfront which hashes can be set without overriding user + // provide header data. + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 { + md5Hash = md5.New() + hashers = append(hashers, md5Hash) + } + + if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 { + sha256Hash = sha256.New() + hashers = append(hashers, sha256Hash) + } + + // Create the destination writer based on the hashes that are not already + // provided by the user. + var dst io.Writer + switch len(hashers) { + case 0: + return + case 1: + dst = hashers[0] + default: + dst = io.MultiWriter(hashers...) + } + + if _, err := copySeekableBody(dst, r.Body); err != nil { + r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) + return + } + + // For the hashes created, set the associated headers that the user did not + // already provide. + if md5Hash != nil { + sum := make([]byte, md5.Size) + encoded := make([]byte, md5Base64EncLen) + + base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)} + } + + if sha256Hash != nil { + encoded := make([]byte, sha256HexEncLen) + sum := make([]byte, sha256.Size) + + hex.Encode(encoded, sha256Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)} + } +} + +const ( + md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen + sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen +) + +func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // hash the body. seek back to the first position after reading to reset + // the body for transmission. copy errors may be assumed to be from the + // body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} + +// Adds the x-amz-te: append_md5 header to the request. This requests the service +// responds with a trailing MD5 checksum. +// +// Will not ask for append MD5 if disabled, the request is presigned or, +// or the API operation does not support content MD5 validation. +func askForTxEncodingAppendMD5(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding) +} + +func useMD5ValidationReader(r *request.Request) { + if r.Error != nil { + return + } + + if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding { + return + } + + var bodyReader *io.ReadCloser + var contentLen int64 + switch tv := r.Data.(type) { + case *GetObjectOutput: + bodyReader = &tv.Body + contentLen = aws.Int64Value(tv.ContentLength) + // Update ContentLength hiden the trailing MD5 checksum. + tv.ContentLength = aws.Int64(contentLen - md5.Size) + tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range")) + default: + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("%s: %s header received on unsupported API, %s", + amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name, + ), nil) + return + } + + if contentLen < md5.Size { + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("invalid Content-Length %d for %s %s", + contentLen, appendMD5TxEncoding, amzTxEncodingHeader, + ), nil) + return + } + + // Wrap and swap the response body reader with the validation reader. + *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size) +} + +type md5ValidationReader struct { + rawReader io.ReadCloser + payload io.Reader + hash hash.Hash + + payloadLen int64 + read int64 +} + +func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader { + h := md5.New() + return &md5ValidationReader{ + rawReader: reader, + payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h), + hash: h, + payloadLen: payloadLen, + } +} + +func (v *md5ValidationReader) Read(p []byte) (n int, err error) { + n, err = v.payload.Read(p) + if err != nil && err != io.EOF { + return n, err + } + + v.read += int64(n) + + if err == io.EOF { + if v.read != v.payloadLen { + return n, io.ErrUnexpectedEOF + } + expectSum := make([]byte, md5.Size) + actualSum := make([]byte, md5.Size) + if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil { + return n, sumReadErr + } + actualSum = v.hash.Sum(actualSum[0:0]) + if !bytes.Equal(expectSum, actualSum) { + return n, awserr.New("InvalidChecksum", + fmt.Sprintf("expected MD5 checksum %s, got %s", + hex.EncodeToString(expectSum), + hex.EncodeToString(actualSum), + ), + nil) + } + } + + return n, err +} + +func (v *md5ValidationReader) Close() error { + return v.rawReader.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go index c3a2702d..bc68a46a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -12,6 +12,69 @@ import ( var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) +// NormalizeBucketLocation is a utility function which will update the +// passed in value to always be a region ID. Generally this would be used +// with GetBucketLocation API operation. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +func NormalizeBucketLocation(loc string) string { + switch loc { + case "": + loc = "us-east-1" + case "EU": + loc = "eu-west-1" + } + + return loc +} + +// NormalizeBucketLocationHandler is a request handler which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }) +// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +// err := req.Send() +var NormalizeBucketLocationHandler = request.NamedHandler{ + Name: "awssdk.s3.NormalizeBucketLocation", + Fn: func(req *request.Request) { + if req.Error != nil { + return + } + + out := req.Data.(*GetBucketLocationOutput) + loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) + out.LocationConstraint = aws.String(loc) + }, +} + +// WithNormalizeBucketLocation is a request option which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// result, err := svc.GetBucketLocationWithContext(ctx, +// &s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }, +// s3.WithNormalizeBucketLocation, +// ) +func WithNormalizeBucketLocation(r *request.Request) { + r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +} + func buildGetBucketLocation(r *request.Request) { if r.DataFilled() { out := r.Data.(*GetBucketLocationOutput) @@ -24,7 +87,7 @@ func buildGetBucketLocation(r *request.Request) { match := reBucketLocation.FindSubmatch(b) if len(match) > 1 { loc := string(match[1]) - out.LocationConstraint = &loc + out.LocationConstraint = aws.String(loc) } } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go deleted file mode 100644 index 9fc5df94..00000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go +++ /dev/null @@ -1,36 +0,0 @@ -package s3 - -import ( - "crypto/md5" - "encoding/base64" - "io" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// contentMD5 computes and sets the HTTP Content-MD5 header for requests that -// require it. -func contentMD5(r *request.Request) { - h := md5.New() - - // hash the body. seek back to the first position after reading to reset - // the body for transmission. copy errors may be assumed to be from the - // body. - _, err := io.Copy(h, r.Body) - if err != nil { - r.Error = awserr.New("ContentMD5", "failed to read body", err) - return - } - _, err = r.Body.Seek(0, 0) - if err != nil { - r.Error = awserr.New("ContentMD5", "failed to seek body", err) - return - } - - // encode the md5 checksum in base64 and set the request header. - sum := h.Sum(nil) - sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum))) - base64.StdEncoding.Encode(sum64, sum) - r.HTTPRequest.Header.Set("Content-MD5", string(sum64)) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index 84633472..6f560a40 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -3,6 +3,7 @@ package s3 import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3err" ) func init() { @@ -21,6 +22,7 @@ func defaultInitClientFn(c *client.Client) { // S3 uses custom error unmarshaling logic c.Handlers.UnmarshalError.Clear() c.Handlers.UnmarshalError.PushBack(unmarshalError) + c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler()) } func defaultInitRequestFn(r *request.Request) { @@ -42,5 +44,30 @@ func defaultInitRequestFn(r *request.Request) { r.Handlers.Validate.PushFront(populateLocationConstraint) case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler()) + case opPutObject, opUploadPart: + r.Handlers.Build.PushBack(computeBodyHashes) + // Disabled until #1837 root issue is resolved. + // case opGetObject: + // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) + // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) } } + +// bucketGetter is an accessor interface to grab the "Bucket" field from +// an S3 type. +type bucketGetter interface { + getBucket() string +} + +// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey" +// field from an S3 type. +type sseCustomerKeyGetter interface { + getSSECustomerKey() string +} + +// copySourceSSECustomerKeyGetter is an accessor interface to grab the +// "CopySourceSSECustomerKey" field from an S3 type. +type copySourceSSECustomerKeyGetter interface { + getCopySourceSSECustomerKey() string +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go new file mode 100644 index 00000000..0def0225 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go @@ -0,0 +1,26 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package s3 provides the client and types for making API +// requests to Amazon Simple Storage Service. +// +// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service. +// +// See s3 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/ +// +// Using the Client +// +// To contact Amazon Simple Storage Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Simple Storage Service client S3 for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go new file mode 100644 index 00000000..39b912c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -0,0 +1,109 @@ +// Upload Managers +// +// The s3manager package's Uploader provides concurrent upload of content to S3 +// by taking advantage of S3's Multipart APIs. The Uploader also supports both +// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker +// for optimizations if the Body satisfies that type. Once the Uploader instance +// is created you can call Upload concurrently from multiple goroutines safely. +// +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// f, err := os.Open(filename) +// if err != nil { +// return fmt.Errorf("failed to open file %q, %v", filename, err) +// } +// +// // Upload the file to S3. +// result, err := uploader.Upload(&s3manager.UploadInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// Body: f, +// }) +// if err != nil { +// return fmt.Errorf("failed to upload file, %v", err) +// } +// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location)) +// +// See the s3manager package's Uploader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader +// +// Download Manager +// +// The s3manager package's Downloader provides concurrently downloading of Objects +// from S3. The Downloader will write S3 Object content with an io.WriterAt. +// Once the Downloader instance is created you can call Download concurrently from +// multiple goroutines safely. +// +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a file to write the S3 Object contents to. +// f, err := os.Create(filename) +// if err != nil { +// return fmt.Errorf("failed to create file %q, %v", filename, err) +// } +// +// // Write the contents of S3 Object to the file +// n, err := downloader.Download(f, &s3.GetObjectInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// }) +// if err != nil { +// return fmt.Errorf("failed to download file, %v", err) +// } +// fmt.Printf("file downloaded, %d bytes\n", n) +// +// See the s3manager package's Downloader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader +// +// Get Bucket Region +// +// GetBucketRegion will attempt to get the region for a bucket using a region +// hint to determine which AWS partition to perform the query on. Use this utility +// to determine the region a bucket is in. +// +// sess := session.Must(session.NewSession()) +// +// bucket := "my-bucket" +// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") +// if err != nil { +// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) +// } +// return err +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// See the s3manager package's GetBucketRegion function documentation for more information +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion +// +// S3 Crypto Client +// +// The s3crypto package provides the tools to upload and download encrypted +// content from S3. The Encryption and Decryption clients can be used concurrently +// once the client is created. +// +// sess := session.Must(session.NewSession()) +// +// // Create the decryption client. +// svc := s3crypto.NewDecryptionClient(sess) +// +// // The object will be downloaded from S3 and decrypted locally. By metadata +// // about the object's encryption will instruct the decryption client how +// // decrypt the content of the object. By default KMS is used for keys. +// result, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String(myBucket), +// Key: aws.String(myKey), +// }) +// +// See the s3crypto package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ +// +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go new file mode 100644 index 00000000..931cb17b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -0,0 +1,48 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +const ( + + // ErrCodeBucketAlreadyExists for service response error code + // "BucketAlreadyExists". + // + // The requested bucket name is not available. The bucket namespace is shared + // by all users of the system. Please select a different name and try again. + ErrCodeBucketAlreadyExists = "BucketAlreadyExists" + + // ErrCodeBucketAlreadyOwnedByYou for service response error code + // "BucketAlreadyOwnedByYou". + ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + + // ErrCodeNoSuchBucket for service response error code + // "NoSuchBucket". + // + // The specified bucket does not exist. + ErrCodeNoSuchBucket = "NoSuchBucket" + + // ErrCodeNoSuchKey for service response error code + // "NoSuchKey". + // + // The specified key does not exist. + ErrCodeNoSuchKey = "NoSuchKey" + + // ErrCodeNoSuchUpload for service response error code + // "NoSuchUpload". + // + // The specified multipart upload does not exist. + ErrCodeNoSuchUpload = "NoSuchUpload" + + // ErrCodeObjectAlreadyInActiveTierError for service response error code + // "ObjectAlreadyInActiveTierError". + // + // This operation is not allowed against this storage tier + ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" + + // ErrCodeObjectNotInActiveTierError for service response error code + // "ObjectNotInActiveTierError". + // + // The source object of the COPY operation is not in the active tier and is + // only stored in Amazon Glacier. + ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go index ccbf5cc1..a7fbc2de 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -8,7 +8,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) @@ -37,14 +36,6 @@ var accelerateOpBlacklist = operationBlacklist{ func updateEndpointForS3Config(r *request.Request) { forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) accelerate := aws.BoolValue(r.Config.S3UseAccelerate) - useDualStack := aws.BoolValue(r.Config.UseDualStack) - - if useDualStack && accelerate { - r.Error = awserr.New("InvalidParameterException", - fmt.Sprintf("configuration aws.Config.UseDualStack is not compatible with aws.Config.Accelerate"), - nil) - return - } if accelerate && accelerateOpBlacklist.Continue(r) { if forceHostStyle { @@ -75,6 +66,10 @@ func updateEndpointForHostStyle(r *request.Request) { moveBucketToHost(r.HTTPRequest.URL, bucket) } +var ( + accelElem = []byte("s3-accelerate.dualstack.") +) + func updateEndpointForAccelerate(r *request.Request) { bucket, ok := bucketNameFromReqParams(r.Params) if !ok { @@ -86,28 +81,40 @@ func updateEndpointForAccelerate(r *request.Request) { if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { r.Error = awserr.New("InvalidParameterException", - fmt.Sprintf("bucket name %s is not compatibile with S3 Accelerate", bucket), + fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket), nil) return } - // Change endpoint from s3(-[a-z0-1-])?.amazonaws.com to s3-accelerate.amazonaws.com - r.HTTPRequest.URL.Host = replaceHostRegion(r.HTTPRequest.URL.Host, "accelerate") + parts := strings.Split(r.HTTPRequest.URL.Host, ".") + if len(parts) < 3 { + r.Error = awserr.New("InvalidParameterExecption", + fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", + r.HTTPRequest.URL.Host), nil) + return + } + + if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { + parts[0] = "s3-accelerate" + } + for i := 1; i+1 < len(parts); i++ { + if parts[i] == aws.StringValue(r.Config.Region) { + parts = append(parts[:i], parts[i+1:]...) + break + } + } + + r.HTTPRequest.URL.Host = strings.Join(parts, ".") + moveBucketToHost(r.HTTPRequest.URL, bucket) } // Attempts to retrieve the bucket name from the request input parameters. // If no bucket is found, or the field is empty "", false will be returned. func bucketNameFromReqParams(params interface{}) (string, bool) { - b, _ := awsutil.ValuesAtPath(params, "Bucket") - if len(b) == 0 { - return "", false - } - - if bucket, ok := b[0].(*string); ok { - if bucketStr := aws.StringValue(bucket); bucketStr != "" { - return bucketStr, true - } + if iface, ok := params.(bucketGetter); ok { + b := iface.getBucket() + return b, len(b) > 0 } return "", false @@ -146,28 +153,3 @@ func moveBucketToHost(u *url.URL, bucket string) { u.Path = "/" } } - -const s3HostPrefix = "s3" - -// replaceHostRegion replaces the S3 region string in the host with the -// value provided. If v is empty the host prefix returned will be s3. -func replaceHostRegion(host, v string) string { - if !strings.HasPrefix(host, s3HostPrefix) { - return host - } - - suffix := host[len(s3HostPrefix):] - for i := len(s3HostPrefix); i < len(host); i++ { - if host[i] == '.' { - // Trim until '.' leave the it in place. - suffix = host[i:] - break - } - } - - if len(v) == 0 { - return fmt.Sprintf("s3%s", suffix) - } - - return fmt.Sprintf("s3-%s%s", v, suffix) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index 5833952a..d17dcc9d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package s3 @@ -11,9 +11,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restxml" ) -// S3 is a client for Amazon S3. -//The service client's operations are safe to be used concurrently. -// It is not safe to mutate any of the client's properties though. +// S3 provides the API operation methods for making requests to +// Amazon Simple Storage Service. See this package's package overview docs +// for details on the service. +// +// S3 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. type S3 struct { *client.Client } @@ -24,8 +27,12 @@ var initClient func(*client.Client) // Used for custom request initialization logic var initRequest func(*request.Request) -// A ServiceName is the name of the service the client will make API calls to. -const ServiceName = "s3" +// Service information constants +const ( + ServiceName = "s3" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "S3" // ServiceID is a unique identifer of a specific service. +) // New creates a new instance of the S3 client with a session. // If additional configuration is needed for the client instance use the optional @@ -38,17 +45,19 @@ const ServiceName = "s3" // // Create a S3 client with additional configuration // svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { - c := p.ClientConfig(ServiceName, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *S3 { +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 { svc := &S3{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, SigningRegion: signingRegion, Endpoint: endpoint, APIVersion: "2006-03-01", @@ -58,12 +67,16 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) { + s.DisableURIPathEscaping = true + })) svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) + // Run custom client initialization if present if initClient != nil { initClient(svc.Client) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go index 268ea2fb..8010c4fa 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -5,17 +5,27 @@ import ( "encoding/base64" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) func validateSSERequiresSSL(r *request.Request) { - if r.HTTPRequest.URL.Scheme != "https" { - p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") - if len(p) > 0 { + if r.HTTPRequest.URL.Scheme == "https" { + return + } + + if iface, ok := r.Params.(sseCustomerKeyGetter); ok { + if len(iface.getSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } + + if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + if len(iface.getCopySourceSSECustomerKey()) > 0 { r.Error = errSSERequiresSSL + return } } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go index ce65fcda..fde3050f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -5,20 +5,24 @@ import ( "io/ioutil" "net/http" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" ) func copyMultipartStatusOKUnmarhsalError(r *request.Request) { b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "unable to read response body", err) + r.Error = awserr.NewRequestFailure( + awserr.New("SerializationError", "unable to read response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } body := bytes.NewReader(b) - r.HTTPResponse.Body = aws.ReadSeekCloser(body) - defer r.HTTPResponse.Body.(aws.ReaderSeekerCloser).Seek(0, 0) + r.HTTPResponse.Body = ioutil.NopCloser(body) + defer body.Seek(0, sdkio.SeekStart) if body.Len() == 0 { // If there is no body don't attempt to parse the body. diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go index ed91c587..12c0612c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -48,6 +48,7 @@ func unmarshalError(r *request.Request) { } else { errCode = resp.Code errMsg = resp.Message + err = nil } // Fallback to status code converted to message if still no error code @@ -58,8 +59,19 @@ func unmarshalError(r *request.Request) { } r.Error = awserr.NewRequestFailure( - awserr.New(errCode, errMsg, nil), + awserr.New(errCode, errMsg, err), r.HTTPResponse.StatusCode, r.RequestID, ) } + +// A RequestFailure provides access to the S3 Request ID and Host ID values +// returned from API operation errors. Getting the error as a string will +// return the formated error with the same information as awserr.RequestFailure, +// while also adding the HostID value from the response. +type RequestFailure interface { + awserr.RequestFailure + + // Host ID is the S3 Host ID needed for debug, and contacting support + HostID() string +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go index cbd3d311..2596c694 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -1,123 +1,214 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package s3 import ( - "github.com/aws/aws-sdk-go/private/waiter" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" ) +// WaitUntilBucketExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadBucket", - Delay: 5, + return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketExists", MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 200, }, { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 301, }, { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 403, }, { - State: "retry", - Matcher: "status", - Argument: "", + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 404, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } +// WaitUntilBucketNotExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadBucket", - Delay: 5, + return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketNotExists", MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 404, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } +// WaitUntilObjectExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadObject", - Delay: 5, + return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectExists", MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 200, }, { - State: "retry", - Matcher: "status", - Argument: "", + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 404, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } +// WaitUntilObjectNotExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadObject", - Delay: 5, + return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectNotExists", MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 404, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index f11e8675..ee908f91 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -1,11 +1,11 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. -// Package sts provides a client for AWS Security Token Service. package sts import ( "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) @@ -14,17 +14,18 @@ const opAssumeRole = "AssumeRole" // AssumeRoleRequest generates a "aws/request.Request" representing the // client's request for the AssumeRole operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRole method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleRequest method. // req, resp := client.AssumeRoleRequest(params) @@ -34,6 +35,7 @@ const opAssumeRole = "AssumeRole" // fmt.Println(resp) // } // +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { op := &request.Operation{ Name: opAssumeRole, @@ -45,12 +47,13 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o input = &AssumeRoleInput{} } - req = c.newRequest(op, input, output) output = &AssumeRoleOutput{} - req.Data = output + req = c.newRequest(op, input, output) return } +// AssumeRole API operation for AWS Security Token Service. +// // Returns a set of temporary security credentials (consisting of an access // key ID, a secret access key, and a security token) that you can use to access // AWS resources that you might not normally have access to. Typically, you @@ -60,7 +63,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// Important: You cannot call AssumeRole by using AWS root account credentials; +// Important: You cannot call AssumeRole by using AWS root account credentials; // access is denied. You must use credentials for an IAM user or an IAM role // to call AssumeRole. // @@ -85,22 +88,31 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) // in the IAM User Guide. // -// The temporary security credentials are valid for the duration that you specified -// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a -// maximum of 3600 seconds (1 hour). The default is 1 hour. +// By default, the temporary security credentials created by AssumeRole last +// for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // -// The temporary security credentials created by AssumeRole can be used to -// make API calls to any AWS service with the following exception: you cannot -// call the STS service's GetFederationToken or GetSessionToken APIs. +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any AWS service with the following exception: you cannot call +// the STS service's GetFederationToken or GetSessionToken APIs. // -// Optionally, you can pass an IAM access policy to this operation. If you -// choose not to pass a policy, the temporary security credentials that are -// returned by the operation have the permissions that are defined in the access -// policy of the role that is being assumed. If you pass a policy to this operation, +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, // the temporary security credentials that are returned by the operation have // the permissions that are allowed by both the access policy of the role that -// is being assumed, and the policy that you pass. This gives you a way to -// further restrict the permissions for the resulting temporary security credentials. +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. // You cannot use the passed policy to grant permissions that are in excess // of those allowed by the access policy of the role that is being assumed. // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, @@ -118,9 +130,14 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // the user to call AssumeRole on the ARN of the role in the other account. // If the user is in the same account as the role, then you can either attach // a policy to the user (identical to the previous different account user), -// or you can add the user as a principal directly in the role's trust policy +// or you can add the user as a principal directly in the role's trust policy. +// In this case, the trust policy acts as the only resource-based policy in +// IAM, and users in the same account as the role do not need explicit permission +// to assume the role. For more information about trust policies and resource-based +// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. // -// Using MFA with AssumeRole +// Using MFA with AssumeRole // // You can optionally include multi-factor authentication (MFA) information // when you call AssumeRole. This is useful for cross-account scenarios in which @@ -131,7 +148,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // denied. The condition in a trust policy that tests for MFA authentication // might look like the following example. // -// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} // // For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) // in the IAM User Guide guide. @@ -140,27 +157,69 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // parameters. The SerialNumber value identifies the user's hardware or virtual // MFA device. The TokenCode is the time-based one-time password (TOTP) that // the MFA devices produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { req, out := c.AssumeRoleRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithSAML operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRoleWithSAML method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleWithSAMLRequest method. // req, resp := client.AssumeRoleWithSAMLRequest(params) @@ -170,6 +229,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // fmt.Println(resp) // } // +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { op := &request.Operation{ Name: opAssumeRoleWithSAML, @@ -181,12 +241,13 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re input = &AssumeRoleWithSAMLInput{} } - req = c.newRequest(op, input, output) output = &AssumeRoleWithSAMLOutput{} - req.Data = output + req = c.newRequest(op, input, output) return } +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// // Returns a set of temporary security credentials for users who have been authenticated // via a SAML authentication response. This operation provides a mechanism for // tying an enterprise identity store or directory to role-based AWS access @@ -200,23 +261,32 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // an access key ID, a secret access key, and a security token. Applications // can use these temporary security credentials to sign calls to AWS services. // -// The temporary security credentials are valid for the duration that you specified -// when calling AssumeRole, or until the time specified in the SAML authentication -// response's SessionNotOnOrAfter value, whichever is shorter. The duration -// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). -// The default is 1 hour. +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // -// The temporary security credentials created by AssumeRoleWithSAML can be -// used to make API calls to any AWS service with the following exception: you -// cannot call the STS service's GetFederationToken or GetSessionToken APIs. +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any AWS service with the following exception: you cannot +// call the STS service's GetFederationToken or GetSessionToken APIs. // -// Optionally, you can pass an IAM access policy to this operation. If you -// choose not to pass a policy, the temporary security credentials that are -// returned by the operation have the permissions that are defined in the access -// policy of the role that is being assumed. If you pass a policy to this operation, +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, // the temporary security credentials that are returned by the operation have // the permissions that are allowed by the intersection of both the access policy -// of the role that is being assumed, and the policy that you pass. This means +// of the role that is being assumed, and the policy that you pass. This means // that both policies must grant the permission for the action to be allowed. // This gives you a way to further restrict the permissions for the resulting // temporary security credentials. You cannot use the passed policy to grant @@ -225,8 +295,8 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) // in the IAM User Guide. // -// Before your application can call AssumeRoleWithSAML, you must configure -// your SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, // you must use AWS Identity and Access Management (IAM) to create a SAML provider // entity in your AWS account that represents your identity provider, and create // an IAM role that specifies this SAML provider in its trust policy. @@ -235,46 +305,103 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // The identity of the caller is validated by using keys in the metadata document // that is uploaded for the SAML provider entity for your identity provider. // -// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail // logs. The entry includes the value in the NameID element of the SAML assertion. // We recommend that you use a NameIDType that is not associated with any personally // identifiable information (PII). For example, you could instead use the Persistent // Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). // -// For more information, see the following resources: +// For more information, see the following resources: // -// About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. +// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. // -// Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. +// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. // -// Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. +// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. // -// Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. +// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { req, out := c.AssumeRoleWithSAMLRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithWebIdentity operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRoleWithWebIdentity method directly -// instead. +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleWithWebIdentityRequest method. // req, resp := client.AssumeRoleWithWebIdentityRequest(params) @@ -284,6 +411,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // fmt.Println(resp) // } // +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { op := &request.Operation{ Name: opAssumeRoleWithWebIdentity, @@ -295,19 +423,20 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI input = &AssumeRoleWithWebIdentityInput{} } - req = c.newRequest(op, input, output) output = &AssumeRoleWithWebIdentityOutput{} - req.Data = output + req = c.newRequest(op, input, output) return } +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// // Returns a set of temporary security credentials for users who have been authenticated // in a mobile or web application with a web identity provider, such as Amazon // Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible // identity provider. // -// For mobile applications, we recommend that you use Amazon Cognito. You -// can use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) // and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely // identify a user and supply the user with a consistent identity throughout // the lifetime of an application. @@ -317,7 +446,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) // in the AWS SDK for iOS Developer Guide. // -// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security // credentials. Therefore, you can distribute an application (for example, on // mobile devices) that requests temporary security credentials without including // long-term AWS credentials in the application, and without deploying server-based @@ -332,22 +461,31 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // key ID, a secret access key, and a security token. Applications can use these // temporary security credentials to sign calls to AWS service APIs. // -// The credentials are valid for the duration that you specified when calling -// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to -// a maximum of 3600 seconds (1 hour). The default is 1 hour. +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI operations but +// does not apply when you use those operations to create a console URL. For +// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // -// The temporary security credentials created by AssumeRoleWithWebIdentity -// can be used to make API calls to any AWS service with the following exception: +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any AWS service with the following exception: // you cannot call the STS service's GetFederationToken or GetSessionToken APIs. // -// Optionally, you can pass an IAM access policy to this operation. If you -// choose not to pass a policy, the temporary security credentials that are -// returned by the operation have the permissions that are defined in the access -// policy of the role that is being assumed. If you pass a policy to this operation, +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, // the temporary security credentials that are returned by the operation have // the permissions that are allowed by both the access policy of the role that -// is being assumed, and the policy that you pass. This gives you a way to -// further restrict the permissions for the resulting temporary security credentials. +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. // You cannot use the passed policy to grant permissions that are in excess // of those allowed by the access policy of the role that is being assumed. // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, @@ -360,53 +498,121 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // the identity provider that is associated with the identity token. In other // words, the identity provider must be specified in the role's trust policy. // -// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail // logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) // of the provided Web Identity Token. We recommend that you avoid using any // personally identifiable information (PII) in this field. For example, you // could instead use a GUID or a pairwise identifier, as suggested in the OIDC // specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). // -// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity // API, see the following resources: // -// Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual) -// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// // -// Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). -// This interactive website lets you walk through the process of authenticating -// via Login with Amazon, Facebook, or Google, getting temporary security credentials, -// and then using those credentials to make a request to AWS. +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// This interactive website lets you walk through the process of authenticating +// via Login with Amazon, Facebook, or Google, getting temporary security +// credentials, and then using those credentials to make a request to AWS. // -// AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android -// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample apps -// that show how to invoke the identity providers, and then how to use the information -// from these providers to get and use temporary security credentials. // -// Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). -// This article discusses web identity federation and shows an example of how -// to use web identity federation to get access to content in Amazon S3. +// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android +// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample +// apps that show how to invoke the identity providers, and then how to use +// the information from these providers to get and use temporary security +// credentials. +// +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the non-AWS identity provider +// (IDP) that was asked to verify the incoming identity token could not be reached. +// This is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the non-AWS identity provider might be down or not responding. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { req, out := c.AssumeRoleWithWebIdentityRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the // client's request for the DecodeAuthorizationMessage operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DecodeAuthorizationMessage method directly -// instead. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DecodeAuthorizationMessageRequest method. // req, resp := client.DecodeAuthorizationMessageRequest(params) @@ -416,6 +622,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // fmt.Println(resp) // } // +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { op := &request.Operation{ Name: opDecodeAuthorizationMessage, @@ -427,12 +634,13 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag input = &DecodeAuthorizationMessageInput{} } - req = c.newRequest(op, input, output) output = &DecodeAuthorizationMessageOutput{} - req.Data = output + req = c.newRequest(op, input, output) return } +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// // Decodes additional information about the authorization status of a request // from an encoded message returned in response to an AWS request. // @@ -441,51 +649,82 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // (an HTTP 403 response). Some AWS actions additionally return an encoded message // that can provide details about this authorization failure. // -// Only certain AWS actions return an encoded authorization message. The documentation +// Only certain AWS actions return an encoded authorization message. The documentation // for an individual action indicates whether that action returns an encoded // message in addition to returning an HTTP code. // -// The message is encoded because the details of the authorization status -// can constitute privileged information that the user who requested the action +// The message is encoded because the details of the authorization status can +// constitute privileged information that the user who requested the action // should not see. To decode an authorization status message, a user must be // granted permissions via an IAM policy to request the DecodeAuthorizationMessage // (sts:DecodeAuthorizationMessage) action. // // The decoded message includes the following type of information: // -// Whether the request was denied due to an explicit deny or due to the absence -// of an explicit allow. For more information, see Determining Whether a Request -// is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. +// * Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested action. +// +// * The requested resource. // -// The principal who made the request. +// * The values of condition keys in the context of the user's request. // -// The requested action. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// The requested resource. +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. // -// The values of condition keys in the context of the user's request. +// Returned Error Codes: +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { req, out := c.DecodeAuthorizationMessageRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetCallerIdentity = "GetCallerIdentity" // GetCallerIdentityRequest generates a "aws/request.Request" representing the // client's request for the GetCallerIdentity operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetCallerIdentity method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetCallerIdentityRequest method. // req, resp := client.GetCallerIdentityRequest(params) @@ -495,6 +734,7 @@ const opGetCallerIdentity = "GetCallerIdentity" // fmt.Println(resp) // } // +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { op := &request.Operation{ Name: opGetCallerIdentity, @@ -506,35 +746,60 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ input = &GetCallerIdentityInput{} } - req = c.newRequest(op, input, output) output = &GetCallerIdentityOutput{} - req.Data = output + req = c.newRequest(op, input, output) return } +// GetCallerIdentity API operation for AWS Security Token Service. +// // Returns details about the IAM identity whose credentials are used to call // the API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { req, out := c.GetCallerIdentityRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetFederationToken = "GetFederationToken" // GetFederationTokenRequest generates a "aws/request.Request" representing the // client's request for the GetFederationToken operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetFederationToken method directly -// instead. +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetFederationTokenRequest method. // req, resp := client.GetFederationTokenRequest(params) @@ -544,6 +809,7 @@ const opGetFederationToken = "GetFederationToken" // fmt.Println(resp) // } // +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { op := &request.Operation{ Name: opGetFederationToken, @@ -555,12 +821,13 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re input = &GetFederationTokenInput{} } - req = c.newRequest(op, input, output) output = &GetFederationTokenOutput{} - req.Data = output + req = c.newRequest(op, input, output) return } +// GetFederationToken API operation for AWS Security Token Service. +// // Returns a set of temporary security credentials (consisting of an access // key ID, a secret access key, and a security token) for a federated user. // A typical use is in a proxy application that gets temporary security credentials @@ -573,20 +840,20 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// If you are creating a mobile-based or browser-based app that can authenticate +// If you are creating a mobile-based or browser-based app that can authenticate // users using a web identity provider like Login with Amazon, Facebook, Google, // or an OpenID Connect-compatible identity provider, we recommend that you // use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. // For more information, see Federation Through a Web-based Identity Provider // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). // -// The GetFederationToken action must be called by using the long-term AWS -// security credentials of an IAM user. You can also call GetFederationToken -// using the security credentials of an AWS root account, but we do not recommended -// it. Instead, we recommend that you create an IAM user for the purpose of -// the proxy application and then attach a policy to the IAM user that limits -// federated users to only the actions and resources that they need access to. -// For more information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// The GetFederationToken action must be called by using the long-term AWS security +// credentials of an IAM user. You can also call GetFederationToken using the +// security credentials of an AWS root account, but we do not recommended it. +// Instead, we recommend that you create an IAM user for the purpose of the +// proxy application and then attach a policy to the IAM user that limits federated +// users to only the actions and resources that they need access to. For more +// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) // in the IAM User Guide. // // The temporary security credentials that are obtained by using the long-term @@ -595,30 +862,30 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // is 43200 seconds (12 hours). Temporary credentials that are obtained by using // AWS root account credentials have a maximum duration of 3600 seconds (1 hour). // -// The temporary security credentials created by GetFederationToken can be -// used to make API calls to any AWS service with the following exceptions: +// The temporary security credentials created by GetFederationToken can be used +// to make API calls to any AWS service with the following exceptions: // -// You cannot use these credentials to call any IAM APIs. +// * You cannot use these credentials to call any IAM APIs. // -// You cannot call any STS APIs. +// * You cannot call any STS APIs except GetCallerIdentity. // -// Permissions +// Permissions // // The permissions for the temporary security credentials returned by GetFederationToken // are determined by a combination of the following: // -// The policy or policies that are attached to the IAM user whose credentials -// are used to call GetFederationToken. +// * The policy or policies that are attached to the IAM user whose credentials +// are used to call GetFederationToken. // -// The policy that is passed as a parameter in the call. +// * The policy that is passed as a parameter in the call. // -// The passed policy is attached to the temporary security credentials that +// The passed policy is attached to the temporary security credentials that // result from the GetFederationToken API call--that is, to the federated user. // When the federated user makes an AWS request, AWS evaluates the policy attached // to the federated user in combination with the policy or policies attached // to the IAM user whose credentials were used to call GetFederationToken. AWS -// allows the federated user's request only when both the federated user and -// the IAM user are explicitly allowed to perform the requested action. The +// allows the federated user's request only when both the federated user and +// the IAM user are explicitly allowed to perform the requested action. The // passed policy cannot grant more permissions than those that are defined in // the IAM user policy. // @@ -639,27 +906,69 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // For information about using GetFederationToken to create temporary security // credentials, see GetFederationToken—Federation Through a Custom Identity // Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { req, out := c.GetFederationTokenRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetSessionToken = "GetSessionToken" // GetSessionTokenRequest generates a "aws/request.Request" representing the // client's request for the GetSessionToken operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetSessionToken method directly -// instead. +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetSessionTokenRequest method. // req, resp := client.GetSessionTokenRequest(params) @@ -669,6 +978,7 @@ const opGetSessionToken = "GetSessionToken" // fmt.Println(resp) // } // +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { op := &request.Operation{ Name: opGetSessionToken, @@ -680,12 +990,13 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. input = &GetSessionTokenInput{} } - req = c.newRequest(op, input, output) output = &GetSessionTokenOutput{} - req.Data = output + req = c.newRequest(op, input, output) return } +// GetSessionToken API operation for AWS Security Token Service. +// // Returns a set of temporary credentials for an AWS account or IAM user. The // credentials consist of an access key ID, a secret access key, and a security // token. Typically, you use GetSessionToken if you want to use MFA to protect @@ -711,17 +1022,17 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // The temporary security credentials created by GetSessionToken can be used // to make API calls to any AWS service with the following exceptions: // -// You cannot call any IAM APIs unless MFA authentication information is -// included in the request. +// * You cannot call any IAM APIs unless MFA authentication information is +// included in the request. // -// You cannot call any STS API except AssumeRole. +// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity. // -// We recommend that you do not call GetSessionToken with root account credentials. +// We recommend that you do not call GetSessionToken with root account credentials. // Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) // by creating one or more IAM users, giving them the necessary permissions, // and using IAM users for everyday interaction with AWS. // -// The permissions associated with the temporary security credentials returned +// The permissions associated with the temporary security credentials returned // by GetSessionToken are based on the permissions associated with account or // IAM user whose credentials are used to call the action. If GetSessionToken // is called using root account credentials, the temporary credentials have @@ -732,25 +1043,65 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // For more information about using GetSessionToken to create temporary credentials, // go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) // in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { req, out := c.GetSessionTokenRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } type AssumeRoleInput struct { _ struct{} `type:"structure"` // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. // - // This is separate from the duration of a console session that you might - // request using the returned credentials. The request to the federation endpoint - // for a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Creating a URL that Enables - // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -764,10 +1115,9 @@ type AssumeRoleInput struct { // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) // in the IAM User Guide. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters consisting of upper- and lower-case alphanumeric characters - // with no spaces. You can also include underscores or any of the following - // characters: =,.@:\/- + // The regex used to validated this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- ExternalId *string `min:"2" type:"string"` // An IAM policy in JSON format. @@ -789,7 +1139,7 @@ type AssumeRoleInput struct { // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plain text must be 2048 bytes or shorter. However, an internal + // The policy plain text must be 2048 bytes or shorter. However, an internal // conversion compresses it into a packed binary format with a separate limit. // The PackedPolicySize response element indicates by percentage how close to // the upper size limit the policy is, with 100% equaling the maximum allowed @@ -797,6 +1147,8 @@ type AssumeRoleInput struct { Policy *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` // An identifier for the assumed role session. @@ -809,10 +1161,11 @@ type AssumeRoleInput struct { // requests using the temporary security credentials will expose the role session // name to the external account in their CloudTrail logs. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters consisting of upper- and lower-case alphanumeric characters - // with no spaces. You can also include underscores or any of the following - // characters: =,.@- + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field RoleSessionName *string `min:"2" type:"string" required:"true"` // The identification number of the MFA device that is associated with the user @@ -821,10 +1174,9 @@ type AssumeRoleInput struct { // The value is either the serial number for a hardware device (such as GAHT12345678) // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). // - // The format for this parameter, as described by its regex pattern, is a string - // of characters consisting of upper- and lower-case alphanumeric characters - // with no spaces. You can also include underscores or any of the following - // characters: =,.@- + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- SerialNumber *string `min:"9" type:"string"` // The value provided by the MFA device, if the trust policy of the role being @@ -884,6 +1236,48 @@ func (s *AssumeRoleInput) Validate() error { return nil } +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + // Contains the response to a successful AssumeRole request, including temporary // AWS credentials that can be used to make AWS requests. type AssumeRoleOutput struct { @@ -899,10 +1293,10 @@ type AssumeRoleOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - // As of this writing, the typical size is less than 4096 bytes, but that can - // vary. Also, future updates to AWS might require larger sizes. + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. Credentials *Credentials `type:"structure"` // A percentage value that indicates the size of the policy in packed form. @@ -921,21 +1315,48 @@ func (s AssumeRoleOutput) GoString() string { return s.String() } +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + type AssumeRoleWithSAMLInput struct { _ struct{} `type:"structure"` - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. An expiration can also be specified in the SAML authentication - // response's SessionNotOnOrAfter value. The actual expiration time is whichever - // value is shorter. + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. // - // This is separate from the duration of a console session that you might - // request using the returned credentials. The request to the federation endpoint - // for a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Enabling SAML 2.0 Federated - // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -944,8 +1365,8 @@ type AssumeRoleWithSAMLInput struct { // The policy parameter is optional. If you pass a policy, the temporary security // credentials that are returned by the operation have the permissions that // are allowed by both the access policy of the role that is being assumed, - // and the policy that you pass. This gives you a way to further restrict - // the permissions for the resulting temporary security credentials. You cannot + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot // use the passed policy to grant permissions that are in excess of those allowed // by the access policy of the role that is being assumed. For more information, // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity @@ -958,7 +1379,7 @@ type AssumeRoleWithSAMLInput struct { // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plain text must be 2048 bytes or shorter. However, an internal + // The policy plain text must be 2048 bytes or shorter. However, an internal // conversion compresses it into a packed binary format with a separate limit. // The PackedPolicySize response element indicates by percentage how close to // the upper size limit the policy is, with 100% equaling the maximum allowed @@ -967,16 +1388,21 @@ type AssumeRoleWithSAMLInput struct { // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes // the IdP. + // + // PrincipalArn is a required field PrincipalArn *string `min:"20" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` // The base-64 encoded SAML authentication response provided by the IdP. // - // For more information, see Configuring a Relying Party and Adding Claims - // (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) // in the Using IAM guide. + // + // SAMLAssertion is a required field SAMLAssertion *string `min:"4" type:"string" required:"true"` } @@ -1024,6 +1450,36 @@ func (s *AssumeRoleWithSAMLInput) Validate() error { return nil } +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + // Contains the response to a successful AssumeRoleWithSAML request, including // temporary AWS credentials that can be used to make AWS requests. type AssumeRoleWithSAMLOutput struct { @@ -1040,10 +1496,10 @@ type AssumeRoleWithSAMLOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - // As of this writing, the typical size is less than 4096 bytes, but that can - // vary. Also, future updates to AWS might require larger sizes. + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. Credentials *Credentials `type:"structure"` // The value of the Issuer element of the SAML assertion. @@ -1056,7 +1512,7 @@ type AssumeRoleWithSAMLOutput struct { // // The following pseudocode shows how the hash value is calculated: // - // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" // ) ) NameQualifier *string `type:"string"` @@ -1072,7 +1528,7 @@ type AssumeRoleWithSAMLOutput struct { // element of the SAML assertion. Typical examples of the format are transient // or persistent. // - // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient // is returned as transient. If the format includes any other prefix, the format // is returned with no modifications. @@ -1089,19 +1545,75 @@ func (s AssumeRoleWithSAMLOutput) GoString() string { return s.String() } +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + type AssumeRoleWithWebIdentityInput struct { _ struct{} `type:"structure"` // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. // - // This is separate from the duration of a console session that you might - // request using the returned credentials. The request to the federation endpoint - // for a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Creating a URL that Enables - // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1110,8 +1622,8 @@ type AssumeRoleWithWebIdentityInput struct { // The policy parameter is optional. If you pass a policy, the temporary security // credentials that are returned by the operation have the permissions that // are allowed by both the access policy of the role that is being assumed, - // and the policy that you pass. This gives you a way to further restrict - // the permissions for the resulting temporary security credentials. You cannot + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot // use the passed policy to grant permissions that are in excess of those allowed // by the access policy of the role that is being assumed. For more information, // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) @@ -1123,7 +1635,7 @@ type AssumeRoleWithWebIdentityInput struct { // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plain text must be 2048 bytes or shorter. However, an internal + // The policy plain text must be 2048 bytes or shorter. However, an internal // conversion compresses it into a packed binary format with a separate limit. // The PackedPolicySize response element indicates by percentage how close to // the upper size limit the policy is, with 100% equaling the maximum allowed @@ -1140,6 +1652,8 @@ type AssumeRoleWithWebIdentityInput struct { ProviderId *string `min:"4" type:"string"` // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` // An identifier for the assumed role session. Typically, you pass the name @@ -1148,16 +1662,19 @@ type AssumeRoleWithWebIdentityInput struct { // are associated with that user. This session name is included as part of the // ARN and assumed role ID in the AssumedRoleUser response element. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters consisting of upper- and lower-case alphanumeric characters - // with no spaces. You can also include underscores or any of the following - // characters: =,.@- + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field RoleSessionName *string `min:"2" type:"string" required:"true"` // The OAuth 2.0 access token or OpenID Connect ID token that is provided by // the identity provider. Your application must get this token by authenticating // the user who is using your application with a web identity provider before // the application makes an AssumeRoleWithWebIdentity call. + // + // WebIdentityToken is a required field WebIdentityToken *string `min:"4" type:"string" required:"true"` } @@ -1208,6 +1725,42 @@ func (s *AssumeRoleWithWebIdentityInput) Validate() error { return nil } +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + // Contains the response to a successful AssumeRoleWithWebIdentity request, // including temporary AWS credentials that can be used to make AWS requests. type AssumeRoleWithWebIdentityOutput struct { @@ -1228,10 +1781,10 @@ type AssumeRoleWithWebIdentityOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security token. // - // Note: The size of the security token that STS APIs return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - // As of this writing, the typical size is less than 4096 bytes, but that can - // vary. Also, future updates to AWS might require larger sizes. + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. Credentials *Credentials `type:"structure"` // A percentage value that indicates the size of the policy in packed form. @@ -1264,6 +1817,42 @@ func (s AssumeRoleWithWebIdentityOutput) GoString() string { return s.String() } +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + // The identifiers for the temporary security credentials that the operation // returns. type AssumedRoleUser struct { @@ -1273,11 +1862,15 @@ type AssumedRoleUser struct { // AssumeRole action. For more information about ARNs and how to use them in // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) // in Using IAM. + // + // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` // A unique identifier that contains the role ID and the role session name of // the role that is being assumed. The role ID is generated by AWS when the // role is created. + // + // AssumedRoleId is a required field AssumedRoleId *string `min:"2" type:"string" required:"true"` } @@ -1291,20 +1884,40 @@ func (s AssumedRoleUser) GoString() string { return s.String() } +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + // AWS credentials for API authentication. type Credentials struct { _ struct{} `type:"structure"` // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field AccessKeyId *string `min:"16" type:"string" required:"true"` // The date on which the current credentials expire. - Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" required:"true"` // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a required field SecretAccessKey *string `type:"string" required:"true"` // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field SessionToken *string `type:"string" required:"true"` } @@ -1318,10 +1931,36 @@ func (s Credentials) GoString() string { return s.String() } +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + type DecodeAuthorizationMessageInput struct { _ struct{} `type:"structure"` // The encoded message that was returned with the response. + // + // EncodedMessage is a required field EncodedMessage *string `min:"1" type:"string" required:"true"` } @@ -1351,6 +1990,12 @@ func (s *DecodeAuthorizationMessageInput) Validate() error { return nil } +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + // A document that contains additional information about the authorization status // of a request from an encoded message that is returned in response to an AWS // request. @@ -1371,6 +2016,12 @@ func (s DecodeAuthorizationMessageOutput) GoString() string { return s.String() } +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + // Identifiers for the federated user that is associated with the credentials. type FederatedUser struct { _ struct{} `type:"structure"` @@ -1379,10 +2030,14 @@ type FederatedUser struct { // For more information about ARNs and how to use them in policies, see IAM // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) // in Using IAM. + // + // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` // The string that identifies the federated user associated with the credentials, // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field FederatedUserId *string `min:"2" type:"string" required:"true"` } @@ -1396,6 +2051,18 @@ func (s FederatedUser) GoString() string { return s.String() } +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + type GetCallerIdentityInput struct { _ struct{} `type:"structure"` } @@ -1439,6 +2106,24 @@ func (s GetCallerIdentityOutput) GoString() string { return s.String() } +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + type GetFederationTokenInput struct { _ struct{} `type:"structure"` @@ -1456,10 +2141,11 @@ type GetFederationTokenInput struct { // the federated user name in a resource-based policy, such as in an Amazon // S3 bucket policy. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters consisting of upper- and lower-case alphanumeric characters - // with no spaces. You can also include underscores or any of the following - // characters: =,.@- + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field Name *string `min:"2" type:"string" required:"true"` // An IAM policy in JSON format that is passed with the GetFederationToken call @@ -1483,13 +2169,13 @@ type GetFederationTokenInput struct { // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plain text must be 2048 bytes or shorter. However, an internal + // The policy plain text must be 2048 bytes or shorter. However, an internal // conversion compresses it into a packed binary format with a separate limit. // The PackedPolicySize response element indicates by percentage how close to // the upper size limit the policy is, with 100% equaling the maximum allowed // size. // - // For more information about how permissions work, see Permissions for GetFederationToken + // For more information about how permissions work, see Permissions for GetFederationToken // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). Policy *string `min:"1" type:"string"` } @@ -1526,6 +2212,24 @@ func (s *GetFederationTokenInput) Validate() error { return nil } +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + // Contains the response to a successful GetFederationToken request, including // temporary AWS credentials that can be used to make AWS requests. type GetFederationTokenOutput struct { @@ -1534,10 +2238,10 @@ type GetFederationTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - // As of this writing, the typical size is less than 4096 bytes, but that can - // vary. Also, future updates to AWS might require larger sizes. + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. Credentials *Credentials `type:"structure"` // Identifiers for the federated user associated with the credentials (such @@ -1562,6 +2266,24 @@ func (s GetFederationTokenOutput) GoString() string { return s.String() } +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + type GetSessionTokenInput struct { _ struct{} `type:"structure"` @@ -1581,10 +2303,9 @@ type GetSessionTokenInput struct { // You can find the device for an IAM user by going to the AWS Management Console // and viewing the user's security credentials. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters consisting of upper- and lower-case alphanumeric characters - // with no spaces. You can also include underscores or any of the following - // characters: =,.@- + // The regex used to validated this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- SerialNumber *string `min:"9" type:"string"` // The value provided by the MFA device, if MFA is required. If any policy requires @@ -1627,6 +2348,24 @@ func (s *GetSessionTokenInput) Validate() error { return nil } +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + // Contains the response to a successful GetSessionToken request, including // temporary AWS credentials that can be used to make AWS requests. type GetSessionTokenOutput struct { @@ -1635,10 +2374,10 @@ type GetSessionTokenOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. - // We strongly recommend that you make no assumptions about the maximum size. - // As of this writing, the typical size is less than 4096 bytes, but that can - // vary. Also, future updates to AWS might require larger sizes. + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. Credentials *Credentials `type:"structure"` } @@ -1651,3 +2390,9 @@ func (s GetSessionTokenOutput) String() string { func (s GetSessionTokenOutput) GoString() string { return s.String() } + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 00000000..ef681ab0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,72 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient +// way to create programmatic access to STS. For example, the SDKs take care +// of cryptographically signing requests, managing errors, and retrying requests +// automatically. For information about the AWS SDKs, including how to download +// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com +// that maps to the US East (N. Virginia) region. Additional regions are available +// and are activated by default. For more information, see Activating and Deactivating +// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) +// in the AWS General Reference. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. To learn more about CloudTrail, including how to turn it on and find +// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 00000000..e24884ef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,73 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the non-AWS identity provider + // (IDP) that was asked to verify the incoming identity token could not be reached. + // This is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the non-AWS identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by AWS. Get + // a new identity token from the identity provider and then retry the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the policy document was too large. The error + // message describes how big the policy document is, in packed form, as a percentage + // of what the API allows. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index c938e6ca..185c914d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package sts @@ -11,53 +11,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/query" ) -// The AWS Security Token Service (STS) is a web service that enables you to -// request temporary, limited-privilege credentials for AWS Identity and Access -// Management (IAM) users or for users that you authenticate (federated users). -// This guide provides descriptions of the STS API. For more detailed information -// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. // -// As an alternative to using the API, you can use one of the AWS SDKs, which -// consist of libraries and sample code for various programming languages and -// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient -// way to create programmatic access to STS. For example, the SDKs take care -// of cryptographically signing requests, managing errors, and retrying requests -// automatically. For information about the AWS SDKs, including how to download -// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). -// -// For information about setting up signatures and authorization through the -// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) -// in the AWS General Reference. For general information about the Query API, -// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in Using IAM. For information about using security tokens with other AWS -// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) -// in the IAM User Guide. -// -// If you're new to AWS and need additional technical information about a specific -// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ -// (http://aws.amazon.com/documentation/). -// -// Endpoints -// -// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com -// that maps to the US East (N. Virginia) region. Additional regions are available -// and are activated by default. For more information, see Activating and Deactivating -// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) -// in the AWS General Reference. -// -// Recording API requests -// -// STS supports AWS CloudTrail, which is a service that records AWS calls for -// your AWS account and delivers log files to an Amazon S3 bucket. By using -// information collected by CloudTrail, you can determine what requests were -// successfully made to STS, who made the request, when it was made, and so -// on. To learn more about CloudTrail, including how to turn it on and find -// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). -//The service client's operations are safe to be used concurrently. -// It is not safe to mutate any of the client's properties though. +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. type STS struct { *client.Client } @@ -68,8 +27,12 @@ var initClient func(*client.Client) // Used for custom request initialization logic var initRequest func(*request.Request) -// A ServiceName is the name of the service the client will make API calls to. -const ServiceName = "sts" +// Service information constants +const ( + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifer of a specific service. +) // New creates a new instance of the STS client with a session. // If additional configuration is needed for the client instance use the optional @@ -82,17 +45,19 @@ const ServiceName = "sts" // // Create a STS client with additional configuration // svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { - c := p.ClientConfig(ServiceName, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *STS { +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { svc := &STS{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, SigningRegion: signingRegion, Endpoint: endpoint, APIVersion: "2011-06-15", diff --git a/vendor/github.com/bgentry/speakeasy/.gitignore b/vendor/github.com/bgentry/speakeasy/.gitignore new file mode 100644 index 00000000..9e131146 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/.gitignore @@ -0,0 +1,2 @@ +example/example +example/example.exe diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE b/vendor/github.com/bgentry/speakeasy/LICENSE new file mode 100644 index 00000000..37d60fc3 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/LICENSE @@ -0,0 +1,24 @@ +MIT License + +Copyright (c) 2017 Blake Gentry + +This license applies to the non-Windows portions of this library. The Windows +portion maintains its own Apache 2.0 license. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS new file mode 100644 index 00000000..ff177f61 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [2013] [the CloudFoundry Authors] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/bgentry/speakeasy/Readme.md b/vendor/github.com/bgentry/speakeasy/Readme.md new file mode 100644 index 00000000..fceda751 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/Readme.md @@ -0,0 +1,30 @@ +# Speakeasy + +This package provides cross-platform Go (#golang) helpers for taking user input +from the terminal while not echoing the input back (similar to `getpasswd`). The +package uses syscalls to avoid any dependence on cgo, and is therefore +compatible with cross-compiling. + +[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc] + +## Unicode + +Multi-byte unicode characters work successfully on Mac OS X. On Windows, +however, this may be problematic (as is UTF in general on Windows). Other +platforms have not been tested. + +## License + +The code herein was not written by me, but was compiled from two separate open +source packages. Unix portions were imported from [gopass][gopass], while +Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s +[Windows terminal helpers][cf-ui-windows]. + +The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly +from the source (though I attempted to fill in the correct owner in the +boilerplate copyright notice). + +[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI" +[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers" +[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org" +[gopass]: https://code.google.com/p/gopass "gopass" diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy.go b/vendor/github.com/bgentry/speakeasy/speakeasy.go new file mode 100644 index 00000000..71c1dd1b --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy.go @@ -0,0 +1,49 @@ +package speakeasy + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Ask the user to enter a password with input hidden. prompt is a string to +// display before the user's input. Returns the provided password, or an error +// if the command failed. +func Ask(prompt string) (password string, err error) { + return FAsk(os.Stdout, prompt) +} + +// FAsk is the same as Ask, except it is possible to specify the file to write +// the prompt to. If 'nil' is passed as the writer, no prompt will be written. +func FAsk(wr io.Writer, prompt string) (password string, err error) { + if wr != nil && prompt != "" { + fmt.Fprint(wr, prompt) // Display the prompt. + } + password, err = getPassword() + + // Carriage return after the user input. + if wr != nil { + fmt.Fprintln(wr, "") + } + return +} + +func readline() (value string, err error) { + var valb []byte + var n int + b := make([]byte, 1) + for { + // read one byte at a time so we don't accidentally read extra bytes + n, err = os.Stdin.Read(b) + if err != nil && err != io.EOF { + return "", err + } + if n == 0 || b[0] == '\n' { + break + } + valb = append(valb, b[0]) + } + + return strings.TrimSuffix(string(valb), "\r"), nil +} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go new file mode 100644 index 00000000..d99fda19 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go @@ -0,0 +1,93 @@ +// based on https://code.google.com/p/gopass +// Author: johnsiilver@gmail.com (John Doak) +// +// Original code is based on code by RogerV in the golang-nuts thread: +// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247 + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package speakeasy + +import ( + "fmt" + "os" + "os/signal" + "strings" + "syscall" +) + +const sttyArg0 = "/bin/stty" + +var ( + sttyArgvEOff = []string{"stty", "-echo"} + sttyArgvEOn = []string{"stty", "echo"} +) + +// getPassword gets input hidden from the terminal from a user. This is +// accomplished by turning off terminal echo, reading input from the user and +// finally turning on terminal echo. +func getPassword() (password string, err error) { + sig := make(chan os.Signal, 10) + brk := make(chan bool) + + // File descriptors for stdin, stdout, and stderr. + fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()} + + // Setup notifications of termination signals to channel sig, create a process to + // watch for these signals so we can turn back on echo if need be. + signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT, + syscall.SIGTERM) + go catchSignal(fd, sig, brk) + + // Turn off the terminal echo. + pid, err := echoOff(fd) + if err != nil { + return "", err + } + + // Turn on the terminal echo and stop listening for signals. + defer signal.Stop(sig) + defer close(brk) + defer echoOn(fd) + + syscall.Wait4(pid, nil, 0, nil) + + line, err := readline() + if err == nil { + password = strings.TrimSpace(line) + } else { + err = fmt.Errorf("failed during password entry: %s", err) + } + + return password, err +} + +// echoOff turns off the terminal echo. +func echoOff(fd []uintptr) (int, error) { + pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd}) + if err != nil { + return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err) + } + return pid, nil +} + +// echoOn turns back on the terminal echo. +func echoOn(fd []uintptr) { + // Turn on the terminal echo. + pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd}) + if e == nil { + syscall.Wait4(pid, nil, 0, nil) + } +} + +// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn +// terminal echo back on before the program ends. Otherwise the user is left +// with echo off on their terminal. +func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) { + select { + case <-sig: + echoOn(fd) + os.Exit(-1) + case <-brk: + } +} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go new file mode 100644 index 00000000..c2093a80 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go @@ -0,0 +1,41 @@ +// +build windows + +package speakeasy + +import ( + "syscall" +) + +// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT: +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx +const ENABLE_ECHO_INPUT = 0x0004 + +func getPassword() (password string, err error) { + var oldMode uint32 + + err = syscall.GetConsoleMode(syscall.Stdin, &oldMode) + if err != nil { + return + } + + var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT) + + err = setConsoleMode(syscall.Stdin, newMode) + defer setConsoleMode(syscall.Stdin, oldMode) + if err != nil { + return + } + + return readline() +} + +func setConsoleMode(console syscall.Handle, mode uint32) (err error) { + dll := syscall.MustLoadDLL("kernel32") + proc := dll.MustFindProc("SetConsoleMode") + r, _, err := proc.Call(uintptr(console), uintptr(mode)) + + if r == 0 { + return err + } + return nil +} diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml new file mode 100644 index 00000000..102fb9a6 --- /dev/null +++ b/vendor/github.com/blang/semver/.travis.yml @@ -0,0 +1,21 @@ +language: go +matrix: + include: + - go: 1.4.3 + - go: 1.5.4 + - go: 1.6.3 + - go: 1.7 + - go: tip + allow_failures: + - go: tip +install: +- go get golang.org/x/tools/cmd/cover +- go get github.com/mattn/goveralls +script: +- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci + -repotoken $COVERALLS_TOKEN +- echo "Build examples" ; cd examples && go build +- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .) +env: + global: + secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw= diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE new file mode 100644 index 00000000..5ba5c86f --- /dev/null +++ b/vendor/github.com/blang/semver/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Benedikt Lang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md new file mode 100644 index 00000000..08b2e4a3 --- /dev/null +++ b/vendor/github.com/blang/semver/README.md @@ -0,0 +1,194 @@ +semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) +====== + +semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. + +Usage +----- +```bash +$ go get github.com/blang/semver +``` +Note: Always vendor your dependencies or fix on a specific version tag. + +```go +import github.com/blang/semver +v1, err := semver.Make("1.0.0-beta") +v2, err := semver.Make("2.0.0-beta") +v1.Compare(v2) +``` + +Also check the [GoDocs](http://godoc.org/github.com/blang/semver). + +Why should I use this lib? +----- + +- Fully spec compatible +- No reflection +- No regex +- Fully tested (Coverage >99%) +- Readable parsing/validation errors +- Fast (See [Benchmarks](#benchmarks)) +- Only Stdlib +- Uses values instead of pointers +- Many features, see below + + +Features +----- + +- Parsing and validation at all levels +- Comparator-like comparisons +- Compare Helper Methods +- InPlace manipulation +- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` +- Wildcards `>=1.x`, `<=2.5.x` +- Sortable (implements sort.Interface) +- database/sql compatible (sql.Scanner/Valuer) +- encoding/json compatible (json.Marshaler/Unmarshaler) + +Ranges +------ + +A `Range` is a set of conditions which specify which versions satisfy the range. + +A condition is composed of an operator and a version. The supported operators are: + +- `<1.0.0` Less than `1.0.0` +- `<=1.0.0` Less than or equal to `1.0.0` +- `>1.0.0` Greater than `1.0.0` +- `>=1.0.0` Greater than or equal to `1.0.0` +- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` +- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. + +Note that spaces between the operator and the version will be gracefully tolerated. + +A `Range` can link multiple `Ranges` separated by space: + +Ranges can be linked by logical AND: + + - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0` + - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2` + +Ranges can also be linked by logical OR: + + - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x` + +AND has a higher precedence than OR. It's not possible to use brackets. + +Ranges can be combined by both AND and OR + + - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` + +Range usage: + +``` +v, err := semver.Parse("1.2.3") +range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") +if range(v) { + //valid +} + +``` + +Example +----- + +Have a look at full examples in [examples/main.go](examples/main.go) + +```go +import github.com/blang/semver + +v, err := semver.Make("0.0.1-alpha.preview+123.github") +fmt.Printf("Major: %d\n", v.Major) +fmt.Printf("Minor: %d\n", v.Minor) +fmt.Printf("Patch: %d\n", v.Patch) +fmt.Printf("Pre: %s\n", v.Pre) +fmt.Printf("Build: %s\n", v.Build) + +// Prerelease versions array +if len(v.Pre) > 0 { + fmt.Println("Prerelease versions:") + for i, pre := range v.Pre { + fmt.Printf("%d: %q\n", i, pre) + } +} + +// Build meta data array +if len(v.Build) > 0 { + fmt.Println("Build meta data:") + for i, build := range v.Build { + fmt.Printf("%d: %q\n", i, build) + } +} + +v001, err := semver.Make("0.0.1") +// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE +v001.GT(v) == true +v.LT(v001) == true +v.GTE(v) == true +v.LTE(v) == true + +// Or use v.Compare(v2) for comparisons (-1, 0, 1): +v001.Compare(v) == 1 +v.Compare(v001) == -1 +v.Compare(v) == 0 + +// Manipulate Version in place: +v.Pre[0], err = semver.NewPRVersion("beta") +if err != nil { + fmt.Printf("Error parsing pre release version: %q", err) +} + +fmt.Println("\nValidate versions:") +v.Build[0] = "?" + +err = v.Validate() +if err != nil { + fmt.Printf("Validation failed: %s\n", err) +} +``` + + +Benchmarks +----- + + BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op + BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op + BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op + BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op + BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op + BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op + BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op + BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op + BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op + BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op + BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op + BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op + BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op + BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op + BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op + BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op + +See benchmark cases at [semver_test.go](semver_test.go) + + +Motivation +----- + +I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. + + +Contribution +----- + +Feel free to make a pull request. For bigger changes create a issue first to discuss about it. + + +License +----- + +See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go new file mode 100644 index 00000000..a74bf7c4 --- /dev/null +++ b/vendor/github.com/blang/semver/json.go @@ -0,0 +1,23 @@ +package semver + +import ( + "encoding/json" +) + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *Version) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + *v, err = Parse(versionString) + + return +} diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json new file mode 100644 index 00000000..45246f69 --- /dev/null +++ b/vendor/github.com/blang/semver/package.json @@ -0,0 +1,17 @@ +{ + "author": "blang", + "bugs": { + "URL": "https://github.com/blang/semver/issues", + "url": "https://github.com/blang/semver/issues" + }, + "gx": { + "dvcsimport": "github.com/blang/semver" + }, + "gxVersion": "0.10.0", + "language": "go", + "license": "MIT", + "name": "semver", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "3.5.0" +} + diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go new file mode 100644 index 00000000..fca406d4 --- /dev/null +++ b/vendor/github.com/blang/semver/range.go @@ -0,0 +1,416 @@ +package semver + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +type wildcardType int + +const ( + noneWildcard wildcardType = iota + majorWildcard wildcardType = 1 + minorWildcard wildcardType = 2 + patchWildcard wildcardType = 3 +) + +func wildcardTypefromInt(i int) wildcardType { + switch i { + case 1: + return majorWildcard + case 2: + return minorWildcard + case 3: + return patchWildcard + default: + return noneWildcard + } +} + +type comparator func(Version, Version) bool + +var ( + compEQ comparator = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 0 + } + compNE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) != 0 + } + compGT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 1 + } + compGE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) >= 0 + } + compLT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == -1 + } + compLE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) <= 0 + } +) + +type versionRange struct { + v Version + c comparator +} + +// rangeFunc creates a Range from the given versionRange. +func (vr *versionRange) rangeFunc() Range { + return Range(func(v Version) bool { + return vr.c(v, vr.v) + }) +} + +// Range represents a range of versions. +// A Range can be used to check if a Version satisfies it: +// +// range, err := semver.ParseRange(">1.0.0 <2.0.0") +// range(semver.MustParse("1.1.1") // returns true +type Range func(Version) bool + +// OR combines the existing Range with another Range using logical OR. +func (rf Range) OR(f Range) Range { + return Range(func(v Version) bool { + return rf(v) || f(v) + }) +} + +// AND combines the existing Range with another Range using logical AND. +func (rf Range) AND(f Range) Range { + return Range(func(v Version) bool { + return rf(v) && f(v) + }) +} + +// ParseRange parses a range and returns a Range. +// If the range could not be parsed an error is returned. +// +// Valid ranges are: +// - "<1.0.0" +// - "<=1.0.0" +// - ">1.0.0" +// - ">=1.0.0" +// - "1.0.0", "=1.0.0", "==1.0.0" +// - "!1.0.0", "!=1.0.0" +// +// A Range can consist of multiple ranges separated by space: +// Ranges can be linked by logical AND: +// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" +// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 +// +// Ranges can also be linked by logical OR: +// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" +// +// AND has a higher precedence than OR. It's not possible to use brackets. +// +// Ranges can be combined by both AND and OR +// +// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` +func ParseRange(s string) (Range, error) { + parts := splitAndTrim(s) + orParts, err := splitORParts(parts) + if err != nil { + return nil, err + } + expandedParts, err := expandWildcardVersion(orParts) + if err != nil { + return nil, err + } + var orFn Range + for _, p := range expandedParts { + var andFn Range + for _, ap := range p { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + vr, err := buildVersionRange(opStr, vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) + } + rf := vr.rangeFunc() + + // Set function + if andFn == nil { + andFn = rf + } else { // Combine with existing function + andFn = andFn.AND(rf) + } + } + if orFn == nil { + orFn = andFn + } else { + orFn = orFn.OR(andFn) + } + + } + return orFn, nil +} + +// splitORParts splits the already cleaned parts by '||'. +// Checks for invalid positions of the operator and returns an +// error if found. +func splitORParts(parts []string) ([][]string, error) { + var ORparts [][]string + last := 0 + for i, p := range parts { + if p == "||" { + if i == 0 { + return nil, fmt.Errorf("First element in range is '||'") + } + ORparts = append(ORparts, parts[last:i]) + last = i + 1 + } + } + if last == len(parts) { + return nil, fmt.Errorf("Last element in range is '||'") + } + ORparts = append(ORparts, parts[last:]) + return ORparts, nil +} + +// buildVersionRange takes a slice of 2: operator and version +// and builds a versionRange, otherwise an error. +func buildVersionRange(opStr, vStr string) (*versionRange, error) { + c := parseComparator(opStr) + if c == nil { + return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) + } + v, err := Parse(vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) + } + + return &versionRange{ + v: v, + c: c, + }, nil + +} + +// inArray checks if a byte is contained in an array of bytes +func inArray(s byte, list []byte) bool { + for _, el := range list { + if el == s { + return true + } + } + return false +} + +// splitAndTrim splits a range string by spaces and cleans whitespaces +func splitAndTrim(s string) (result []string) { + last := 0 + var lastChar byte + excludeFromSplit := []byte{'>', '<', '='} + for i := 0; i < len(s); i++ { + if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { + if last < i-1 { + result = append(result, s[last:i]) + } + last = i + 1 + } else if s[i] != ' ' { + lastChar = s[i] + } + } + if last < len(s)-1 { + result = append(result, s[last:]) + } + + for i, v := range result { + result[i] = strings.Replace(v, " ", "", -1) + } + + // parts := strings.Split(s, " ") + // for _, x := range parts { + // if s := strings.TrimSpace(x); len(s) != 0 { + // result = append(result, s) + // } + // } + return +} + +// splitComparatorVersion splits the comparator from the version. +// Input must be free of leading or trailing spaces. +func splitComparatorVersion(s string) (string, string, error) { + i := strings.IndexFunc(s, unicode.IsDigit) + if i == -1 { + return "", "", fmt.Errorf("Could not get version from string: %q", s) + } + return strings.TrimSpace(s[0:i]), s[i:], nil +} + +// getWildcardType will return the type of wildcard that the +// passed version contains +func getWildcardType(vStr string) wildcardType { + parts := strings.Split(vStr, ".") + nparts := len(parts) + wildcard := parts[nparts-1] + + possibleWildcardType := wildcardTypefromInt(nparts) + if wildcard == "x" { + return possibleWildcardType + } + + return noneWildcard +} + +// createVersionFromWildcard will convert a wildcard version +// into a regular version, replacing 'x's with '0's, handling +// special cases like '1.x.x' and '1.x' +func createVersionFromWildcard(vStr string) string { + // handle 1.x.x + vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) + vStr2 = strings.Replace(vStr2, ".x", ".0", 1) + parts := strings.Split(vStr2, ".") + + // handle 1.x + if len(parts) == 2 { + return vStr2 + ".0" + } + + return vStr2 +} + +// incrementMajorVersion will increment the major version +// of the passed version +func incrementMajorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + parts[0] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// incrementMajorVersion will increment the minor version +// of the passed version +func incrementMinorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[1]) + if err != nil { + return "", err + } + parts[1] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// expandWildcardVersion will expand wildcards inside versions +// following these rules: +// +// * when dealing with patch wildcards: +// >= 1.2.x will become >= 1.2.0 +// <= 1.2.x will become < 1.3.0 +// > 1.2.x will become >= 1.3.0 +// < 1.2.x will become < 1.2.0 +// != 1.2.x will become < 1.2.0 >= 1.3.0 +// +// * when dealing with minor wildcards: +// >= 1.x will become >= 1.0.0 +// <= 1.x will become < 2.0.0 +// > 1.x will become >= 2.0.0 +// < 1.0 will become < 1.0.0 +// != 1.x will become < 1.0.0 >= 2.0.0 +// +// * when dealing with wildcards without +// version operator: +// 1.2.x will become >= 1.2.0 < 1.3.0 +// 1.x will become >= 1.0.0 < 2.0.0 +func expandWildcardVersion(parts [][]string) ([][]string, error) { + var expandedParts [][]string + for _, p := range parts { + var newParts []string + for _, ap := range p { + if strings.Index(ap, "x") != -1 { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + + versionWildcardType := getWildcardType(vStr) + flatVersion := createVersionFromWildcard(vStr) + + var resultOperator string + var shouldIncrementVersion bool + switch opStr { + case ">": + resultOperator = ">=" + shouldIncrementVersion = true + case ">=": + resultOperator = ">=" + case "<": + resultOperator = "<" + case "<=": + resultOperator = "<" + shouldIncrementVersion = true + case "", "=", "==": + newParts = append(newParts, ">="+flatVersion) + resultOperator = "<" + shouldIncrementVersion = true + case "!=", "!": + newParts = append(newParts, "<"+flatVersion) + resultOperator = ">=" + shouldIncrementVersion = true + } + + var resultVersion string + if shouldIncrementVersion { + switch versionWildcardType { + case patchWildcard: + resultVersion, _ = incrementMinorVersion(flatVersion) + case minorWildcard: + resultVersion, _ = incrementMajorVersion(flatVersion) + } + } else { + resultVersion = flatVersion + } + + ap = resultOperator + resultVersion + } + newParts = append(newParts, ap) + } + expandedParts = append(expandedParts, newParts) + } + + return expandedParts, nil +} + +func parseComparator(s string) comparator { + switch s { + case "==": + fallthrough + case "": + fallthrough + case "=": + return compEQ + case ">": + return compGT + case ">=": + return compGE + case "<": + return compLT + case "<=": + return compLE + case "!": + fallthrough + case "!=": + return compNE + } + + return nil +} + +// MustParseRange is like ParseRange but panics if the range cannot be parsed. +func MustParseRange(s string) Range { + r, err := ParseRange(s) + if err != nil { + panic(`semver: ParseRange(` + s + `): ` + err.Error()) + } + return r +} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go new file mode 100644 index 00000000..8ee0842e --- /dev/null +++ b/vendor/github.com/blang/semver/semver.go @@ -0,0 +1,418 @@ +package semver + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + numbers string = "0123456789" + alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + alphanum = alphas + numbers +) + +// SpecVersion is the latest fully supported spec version of semver +var SpecVersion = Version{ + Major: 2, + Minor: 0, + Patch: 0, +} + +// Version represents a semver compatible version +type Version struct { + Major uint64 + Minor uint64 + Patch uint64 + Pre []PRVersion + Build []string //No Precendence +} + +// Version to string +func (v Version) String() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + + if len(v.Pre) > 0 { + b = append(b, '-') + b = append(b, v.Pre[0].String()...) + + for _, pre := range v.Pre[1:] { + b = append(b, '.') + b = append(b, pre.String()...) + } + } + + if len(v.Build) > 0 { + b = append(b, '+') + b = append(b, v.Build[0]...) + + for _, build := range v.Build[1:] { + b = append(b, '.') + b = append(b, build...) + } + } + + return string(b) +} + +// Equals checks if v is equal to o. +func (v Version) Equals(o Version) bool { + return (v.Compare(o) == 0) +} + +// EQ checks if v is equal to o. +func (v Version) EQ(o Version) bool { + return (v.Compare(o) == 0) +} + +// NE checks if v is not equal to o. +func (v Version) NE(o Version) bool { + return (v.Compare(o) != 0) +} + +// GT checks if v is greater than o. +func (v Version) GT(o Version) bool { + return (v.Compare(o) == 1) +} + +// GTE checks if v is greater than or equal to o. +func (v Version) GTE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// GE checks if v is greater than or equal to o. +func (v Version) GE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// LT checks if v is less than o. +func (v Version) LT(o Version) bool { + return (v.Compare(o) == -1) +} + +// LTE checks if v is less than or equal to o. +func (v Version) LTE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// LE checks if v is less than or equal to o. +func (v Version) LE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// Compare compares Versions v to o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v Version) Compare(o Version) int { + if v.Major != o.Major { + if v.Major > o.Major { + return 1 + } + return -1 + } + if v.Minor != o.Minor { + if v.Minor > o.Minor { + return 1 + } + return -1 + } + if v.Patch != o.Patch { + if v.Patch > o.Patch { + return 1 + } + return -1 + } + + // Quick comparison if a version has no prerelease versions + if len(v.Pre) == 0 && len(o.Pre) == 0 { + return 0 + } else if len(v.Pre) == 0 && len(o.Pre) > 0 { + return 1 + } else if len(v.Pre) > 0 && len(o.Pre) == 0 { + return -1 + } + + i := 0 + for ; i < len(v.Pre) && i < len(o.Pre); i++ { + if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { + continue + } else if comp == 1 { + return 1 + } else { + return -1 + } + } + + // If all pr versions are the equal but one has further prversion, this one greater + if i == len(v.Pre) && i == len(o.Pre) { + return 0 + } else if i == len(v.Pre) && i < len(o.Pre) { + return -1 + } else { + return 1 + } + +} + +// Validate validates v and returns error in case +func (v Version) Validate() error { + // Major, Minor, Patch already validated using uint64 + + for _, pre := range v.Pre { + if !pre.IsNum { //Numeric prerelease versions already uint64 + if len(pre.VersionStr) == 0 { + return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) + } + if !containsOnly(pre.VersionStr, alphanum) { + return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) + } + } + } + + for _, build := range v.Build { + if len(build) == 0 { + return fmt.Errorf("Build meta data can not be empty %q", build) + } + if !containsOnly(build, alphanum) { + return fmt.Errorf("Invalid character(s) found in build meta data %q", build) + } + } + + return nil +} + +// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error +func New(s string) (vp *Version, err error) { + v, err := Parse(s) + vp = &v + return +} + +// Make is an alias for Parse, parses version string and returns a validated Version or error +func Make(s string) (Version, error) { + return Parse(s) +} + +// ParseTolerant allows for certain version specifications that do not strictly adhere to semver +// specs to be parsed by this library. It does so by normalizing versions before passing them to +// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions +// with only major and minor components specified +func ParseTolerant(s string) (Version, error) { + s = strings.TrimSpace(s) + s = strings.TrimPrefix(s, "v") + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) < 3 { + if strings.ContainsAny(parts[len(parts)-1], "+-") { + return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") + } + for len(parts) < 3 { + parts = append(parts, "0") + } + s = strings.Join(parts, ".") + } + + return Parse(s) +} + +// Parse parses version string and returns a validated Version or error +func Parse(s string) (Version, error) { + if len(s) == 0 { + return Version{}, errors.New("Version string empty") + } + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) != 3 { + return Version{}, errors.New("No Major.Minor.Patch elements found") + } + + // Major + if !containsOnly(parts[0], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) + } + if hasLeadingZeroes(parts[0]) { + return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) + } + major, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return Version{}, err + } + + // Minor + if !containsOnly(parts[1], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) + } + if hasLeadingZeroes(parts[1]) { + return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) + } + minor, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return Version{}, err + } + + v := Version{} + v.Major = major + v.Minor = minor + + var build, prerelease []string + patchStr := parts[2] + + if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { + build = strings.Split(patchStr[buildIndex+1:], ".") + patchStr = patchStr[:buildIndex] + } + + if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { + prerelease = strings.Split(patchStr[preIndex+1:], ".") + patchStr = patchStr[:preIndex] + } + + if !containsOnly(patchStr, numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) + } + if hasLeadingZeroes(patchStr) { + return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) + } + patch, err := strconv.ParseUint(patchStr, 10, 64) + if err != nil { + return Version{}, err + } + + v.Patch = patch + + // Prerelease + for _, prstr := range prerelease { + parsedPR, err := NewPRVersion(prstr) + if err != nil { + return Version{}, err + } + v.Pre = append(v.Pre, parsedPR) + } + + // Build meta data + for _, str := range build { + if len(str) == 0 { + return Version{}, errors.New("Build meta data is empty") + } + if !containsOnly(str, alphanum) { + return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) + } + v.Build = append(v.Build, str) + } + + return v, nil +} + +// MustParse is like Parse but panics if the version cannot be parsed. +func MustParse(s string) Version { + v, err := Parse(s) + if err != nil { + panic(`semver: Parse(` + s + `): ` + err.Error()) + } + return v +} + +// PRVersion represents a PreRelease Version +type PRVersion struct { + VersionStr string + VersionNum uint64 + IsNum bool +} + +// NewPRVersion creates a new valid prerelease version +func NewPRVersion(s string) (PRVersion, error) { + if len(s) == 0 { + return PRVersion{}, errors.New("Prerelease is empty") + } + v := PRVersion{} + if containsOnly(s, numbers) { + if hasLeadingZeroes(s) { + return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) + } + num, err := strconv.ParseUint(s, 10, 64) + + // Might never be hit, but just in case + if err != nil { + return PRVersion{}, err + } + v.VersionNum = num + v.IsNum = true + } else if containsOnly(s, alphanum) { + v.VersionStr = s + v.IsNum = false + } else { + return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) + } + return v, nil +} + +// IsNumeric checks if prerelease-version is numeric +func (v PRVersion) IsNumeric() bool { + return v.IsNum +} + +// Compare compares two PreRelease Versions v and o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v PRVersion) Compare(o PRVersion) int { + if v.IsNum && !o.IsNum { + return -1 + } else if !v.IsNum && o.IsNum { + return 1 + } else if v.IsNum && o.IsNum { + if v.VersionNum == o.VersionNum { + return 0 + } else if v.VersionNum > o.VersionNum { + return 1 + } else { + return -1 + } + } else { // both are Alphas + if v.VersionStr == o.VersionStr { + return 0 + } else if v.VersionStr > o.VersionStr { + return 1 + } else { + return -1 + } + } +} + +// PreRelease version to string +func (v PRVersion) String() string { + if v.IsNum { + return strconv.FormatUint(v.VersionNum, 10) + } + return v.VersionStr +} + +func containsOnly(s string, set string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(set, r) + }) == -1 +} + +func hasLeadingZeroes(s string) bool { + return len(s) > 1 && s[0] == '0' +} + +// NewBuildVersion creates a new valid build version +func NewBuildVersion(s string) (string, error) { + if len(s) == 0 { + return "", errors.New("Buildversion is empty") + } + if !containsOnly(s, alphanum) { + return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) + } + return s, nil +} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go new file mode 100644 index 00000000..e18f8808 --- /dev/null +++ b/vendor/github.com/blang/semver/sort.go @@ -0,0 +1,28 @@ +package semver + +import ( + "sort" +) + +// Versions represents multiple versions. +type Versions []Version + +// Len returns length of version collection +func (s Versions) Len() int { + return len(s) +} + +// Swap swaps two versions inside the collection by its indices +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less checks if version at index i is less than version at index j +func (s Versions) Less(i, j int) bool { + return s[i].LT(s[j]) +} + +// Sort sorts a slice of versions +func Sort(versions []Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go new file mode 100644 index 00000000..eb4d8026 --- /dev/null +++ b/vendor/github.com/blang/semver/sql.go @@ -0,0 +1,30 @@ +package semver + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements the database/sql.Scanner interface. +func (v *Version) Scan(src interface{}) (err error) { + var str string + switch src := src.(type) { + case string: + str = src + case []byte: + str = string(src) + default: + return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) + } + + if t, err := Parse(str); err == nil { + *v = t + } + + return +} + +// Value implements the database/sql/driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE deleted file mode 100644 index 37ec93a1..00000000 --- a/vendor/github.com/go-ini/ini/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md deleted file mode 100644 index c52f63a4..00000000 --- a/vendor/github.com/go-ini/ini/README.md +++ /dev/null @@ -1,590 +0,0 @@ -ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) -=== - -![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) - -Package ini provides INI file read and write functionality in Go. - -[简体中文](README_ZH.md) - -## Feature - -- Load multiple data sources(`[]byte` or file) with overwrites. -- Read with recursion values. -- Read with parent-child sections. -- Read with auto-increment key names. -- Read with multiple-line values. -- Read with tons of helper methods. -- Read and convert values to Go types. -- Read and **WRITE** comments of sections and keys. -- Manipulate sections, keys and comments with ease. -- Keep sections and keys in order as you parse and save. - -## Installation - -To use a tagged revision: - - go get gopkg.in/ini.v1 - -To use with latest changes: - - go get github.com/go-ini/ini - -### Testing - -If you want to test on your machine, please apply `-t` flag: - - go get -t gopkg.in/ini.v1 - -## Getting Started - -### Loading from data sources - -A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. - -```go -cfg, err := ini.Load([]byte("raw data"), "filename") -``` - -Or start with an empty object: - -```go -cfg := ini.Empty() -``` - -When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. - -```go -err := cfg.Append("other file", []byte("other raw data")) -``` - -### Working with sections - -To get a section, you would need to: - -```go -section, err := cfg.GetSection("section name") -``` - -For a shortcut for default section, just give an empty string as name: - -```go -section, err := cfg.GetSection("") -``` - -When you're pretty sure the section exists, following code could make your life easier: - -```go -section := cfg.Section("") -``` - -What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. - -To create a new section: - -```go -err := cfg.NewSection("new section") -``` - -To get a list of sections or section names: - -```go -sections := cfg.Sections() -names := cfg.SectionStrings() -``` - -### Working with keys - -To get a key under a section: - -```go -key, err := cfg.Section("").GetKey("key name") -``` - -Same rule applies to key operations: - -```go -key := cfg.Section("").Key("key name") -``` - -To check if a key exists: - -```go -yes := cfg.Section("").HasKey("key name") -``` - -To create a new key: - -```go -err := cfg.Section("").NewKey("name", "value") -``` - -To get a list of keys or key names: - -```go -keys := cfg.Section("").Keys() -names := cfg.Section("").KeyStrings() -``` - -To get a clone hash of keys and corresponding values: - -```go -hash := cfg.GetSection("").KeysHash() -``` - -### Working with values - -To get a string value: - -```go -val := cfg.Section("").Key("key name").String() -``` - -To validate key value on the fly: - -```go -val := cfg.Section("").Key("key name").Validate(func(in string) string { - if len(in) == 0 { - return "default" - } - return in -}) -``` - -If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): - -```go -val := cfg.Section("").Key("key name").Value() -``` - -To check if raw value exists: - -```go -yes := cfg.Section("").HasValue("test value") -``` - -To get value with types: - -```go -// For boolean values: -// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On -// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off -v, err = cfg.Section("").Key("BOOL").Bool() -v, err = cfg.Section("").Key("FLOAT64").Float64() -v, err = cfg.Section("").Key("INT").Int() -v, err = cfg.Section("").Key("INT64").Int64() -v, err = cfg.Section("").Key("UINT").Uint() -v, err = cfg.Section("").Key("UINT64").Uint64() -v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) -v, err = cfg.Section("").Key("TIME").Time() // RFC3339 - -v = cfg.Section("").Key("BOOL").MustBool() -v = cfg.Section("").Key("FLOAT64").MustFloat64() -v = cfg.Section("").Key("INT").MustInt() -v = cfg.Section("").Key("INT64").MustInt64() -v = cfg.Section("").Key("UINT").MustUint() -v = cfg.Section("").Key("UINT64").MustUint64() -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) -v = cfg.Section("").Key("TIME").MustTime() // RFC3339 - -// Methods start with Must also accept one argument for default value -// when key not found or fail to parse value to given type. -// Except method MustString, which you have to pass a default value. - -v = cfg.Section("").Key("String").MustString("default") -v = cfg.Section("").Key("BOOL").MustBool(true) -v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) -v = cfg.Section("").Key("INT").MustInt(10) -v = cfg.Section("").Key("INT64").MustInt64(99) -v = cfg.Section("").Key("UINT").MustUint(3) -v = cfg.Section("").Key("UINT64").MustUint64(6) -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) -v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 -``` - -What if my value is three-line long? - -```ini -[advance] -ADDRESS = """404 road, -NotFound, State, 5000 -Earth""" -``` - -Not a problem! - -```go -cfg.Section("advance").Key("ADDRESS").String() - -/* --- start --- -404 road, -NotFound, State, 5000 -Earth ------- end --- */ -``` - -That's cool, how about continuation lines? - -```ini -[advance] -two_lines = how about \ - continuation lines? -lots_of_lines = 1 \ - 2 \ - 3 \ - 4 -``` - -Piece of cake! - -```go -cfg.Section("advance").Key("two_lines").String() // how about continuation lines? -cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 -``` - -Note that single quotes around values will be stripped: - -```ini -foo = "some value" // foo: some value -bar = 'some value' // bar: some value -``` - -That's all? Hmm, no. - -#### Helper methods of working with values - -To get value with given candidates: - -```go -v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) -v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) -v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) -v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) -v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) -v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) -v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) -v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 -``` - -Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. - -To validate value in a given range: - -```go -vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) -vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) -vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) -vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) -vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) -vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) -vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 -``` - -To auto-split value into slice: - -```go -vals = cfg.Section("").Key("STRINGS").Strings(",") -vals = cfg.Section("").Key("FLOAT64S").Float64s(",") -vals = cfg.Section("").Key("INTS").Ints(",") -vals = cfg.Section("").Key("INT64S").Int64s(",") -vals = cfg.Section("").Key("UINTS").Uints(",") -vals = cfg.Section("").Key("UINT64S").Uint64s(",") -vals = cfg.Section("").Key("TIMES").Times(",") -``` - -### Save your configuration - -Finally, it's time to save your configuration to somewhere. - -A typical way to save configuration is writing it to a file: - -```go -// ... -err = cfg.SaveTo("my.ini") -err = cfg.SaveToIndent("my.ini", "\t") -``` - -Another way to save is writing to a `io.Writer` interface: - -```go -// ... -cfg.WriteTo(writer) -cfg.WriteToIndent(writer, "\t") -``` - -## Advanced Usage - -### Recursive Values - -For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. - -```ini -NAME = ini - -[author] -NAME = Unknwon -GITHUB = https://github.com/%(NAME)s - -[package] -FULL_NAME = github.com/go-ini/%(NAME)s -``` - -```go -cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon -cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini -``` - -### Parent-child Sections - -You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. - -```ini -NAME = ini -VERSION = v1 -IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s - -[package] -CLONE_URL = https://%(IMPORT_PATH)s - -[package.sub] -``` - -```go -cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 -``` - -### Auto-increment Key Names - -If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. - -```ini -[features] --: Support read/write comments of keys and sections --: Support auto-increment of key names --: Support load multiple files to overwrite key values -``` - -```go -cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} -``` - -### Map To Struct - -Want more objective way to play with INI? Cool. - -```ini -Name = Unknwon -age = 21 -Male = true -Born = 1993-01-01T20:17:05Z - -[Note] -Content = Hi is a good man! -Cities = HangZhou, Boston -``` - -```go -type Note struct { - Content string - Cities []string -} - -type Person struct { - Name string - Age int `ini:"age"` - Male bool - Born time.Time - Note - Created time.Time `ini:"-"` -} - -func main() { - cfg, err := ini.Load("path/to/ini") - // ... - p := new(Person) - err = cfg.MapTo(p) - // ... - - // Things can be simpler. - err = ini.MapTo(p, "path/to/ini") - // ... - - // Just map a section? Fine. - n := new(Note) - err = cfg.Section("Note").MapTo(n) - // ... -} -``` - -Can I have default value for field? Absolutely. - -Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. - -```go -// ... -p := &Person{ - Name: "Joe", -} -// ... -``` - -It's really cool, but what's the point if you can't give me my file back from struct? - -### Reflect From Struct - -Why not? - -```go -type Embeded struct { - Dates []time.Time `delim:"|"` - Places []string - None []int -} - -type Author struct { - Name string `ini:"NAME"` - Male bool - Age int - GPA float64 - NeverMind string `ini:"-"` - *Embeded -} - -func main() { - a := &Author{"Unknwon", true, 21, 2.8, "", - &Embeded{ - []time.Time{time.Now(), time.Now()}, - []string{"HangZhou", "Boston"}, - []int{}, - }} - cfg := ini.Empty() - err = ini.ReflectFrom(cfg, a) - // ... -} -``` - -So, what do I get? - -```ini -NAME = Unknwon -Male = true -Age = 21 -GPA = 2.8 - -[Embeded] -Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -Places = HangZhou,Boston -None = -``` - -#### Name Mapper - -To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. - -There are 2 built-in name mappers: - -- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. -- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. - -To use them: - -```go -type Info struct { - PackageName string -} - -func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) - // ... - - cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) - // ... - info := new(Info) - cfg.NameMapper = ini.AllCapsUnderscore - err = cfg.MapTo(info) - // ... -} -``` - -Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. - -#### Other Notes On Map/Reflect - -Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child -} - -type Config struct { - City string - Parent -} -``` - -Example configuration: - -```ini -City = Boston - -[Parent] -Name = Unknwon - -[Child] -Age = 21 -``` - -What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child `ini:"Parent"` -} - -type Config struct { - City string - Parent -} -``` - -Example configuration: - -```ini -City = Boston - -[Parent] -Name = Unknwon -Age = 21 -``` - -## Getting Help - -- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) -- [File An Issue](https://github.com/go-ini/ini/issues/new) - -## FAQs - -### What does `BlockMode` field do? - -By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. - -### Why another INI library? - -Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. - -To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md deleted file mode 100644 index ce6d8eb4..00000000 --- a/vendor/github.com/go-ini/ini/README_ZH.md +++ /dev/null @@ -1,577 +0,0 @@ -本包提供了 Go 语言中读写 INI 文件的功能。 - -## 功能特性 - -- 支持覆盖加载多个数据源(`[]byte` 或文件) -- 支持递归读取键值 -- 支持读取父子分区 -- 支持读取自增键名 -- 支持读取多行的键值 -- 支持大量辅助方法 -- 支持在读取时直接转换为 Go 语言类型 -- 支持读取和 **写入** 分区和键的注释 -- 轻松操作分区、键值和注释 -- 在保存文件时分区和键值会保持原有的顺序 - -## 下载安装 - -使用一个特定版本: - - go get gopkg.in/ini.v1 - -使用最新版: - - go get github.com/go-ini/ini - -### 测试安装 - -如果您想要在自己的机器上运行测试,请使用 `-t` 标记: - - go get -t gopkg.in/ini.v1 - -## 开始使用 - -### 从数据源加载 - -一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 - -```go -cfg, err := ini.Load([]byte("raw data"), "filename") -``` - -或者从一个空白的文件开始: - -```go -cfg := ini.Empty() -``` - -当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 - -```go -err := cfg.Append("other file", []byte("other raw data")) -``` - -### 操作分区(Section) - -获取指定分区: - -```go -section, err := cfg.GetSection("section name") -``` - -如果您想要获取默认分区,则可以用空字符串代替分区名: - -```go -section, err := cfg.GetSection("") -``` - -当您非常确定某个分区是存在的,可以使用以下简便方法: - -```go -section := cfg.Section("") -``` - -如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 - -创建一个分区: - -```go -err := cfg.NewSection("new section") -``` - -获取所有分区对象或名称: - -```go -sections := cfg.Sections() -names := cfg.SectionStrings() -``` - -### 操作键(Key) - -获取某个分区下的键: - -```go -key, err := cfg.Section("").GetKey("key name") -``` - -和分区一样,您也可以直接获取键而忽略错误处理: - -```go -key := cfg.Section("").Key("key name") -``` - -判断某个键是否存在: - -```go -yes := cfg.Section("").HasKey("key name") -``` - -创建一个新的键: - -```go -err := cfg.Section("").NewKey("name", "value") -``` - -获取分区下的所有键或键名: - -```go -keys := cfg.Section("").Keys() -names := cfg.Section("").KeyStrings() -``` - -获取分区下的所有键值对的克隆: - -```go -hash := cfg.GetSection("").KeysHash() -``` - -### 操作键值(Value) - -获取一个类型为字符串(string)的值: - -```go -val := cfg.Section("").Key("key name").String() -``` - -获取值的同时通过自定义函数进行处理验证: - -```go -val := cfg.Section("").Key("key name").Validate(func(in string) string { - if len(in) == 0 { - return "default" - } - return in -}) -``` - -如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): - -```go -val := cfg.Section("").Key("key name").Value() -``` - -判断某个原值是否存在: - -```go -yes := cfg.Section("").HasValue("test value") -``` - -获取其它类型的值: - -```go -// 布尔值的规则: -// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On -// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off -v, err = cfg.Section("").Key("BOOL").Bool() -v, err = cfg.Section("").Key("FLOAT64").Float64() -v, err = cfg.Section("").Key("INT").Int() -v, err = cfg.Section("").Key("INT64").Int64() -v, err = cfg.Section("").Key("UINT").Uint() -v, err = cfg.Section("").Key("UINT64").Uint64() -v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) -v, err = cfg.Section("").Key("TIME").Time() // RFC3339 - -v = cfg.Section("").Key("BOOL").MustBool() -v = cfg.Section("").Key("FLOAT64").MustFloat64() -v = cfg.Section("").Key("INT").MustInt() -v = cfg.Section("").Key("INT64").MustInt64() -v = cfg.Section("").Key("UINT").MustUint() -v = cfg.Section("").Key("UINT64").MustUint64() -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) -v = cfg.Section("").Key("TIME").MustTime() // RFC3339 - -// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, -// 当键不存在或者转换失败时,则会直接返回该默认值。 -// 但是,MustString 方法必须传递一个默认值。 - -v = cfg.Seciont("").Key("String").MustString("default") -v = cfg.Section("").Key("BOOL").MustBool(true) -v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) -v = cfg.Section("").Key("INT").MustInt(10) -v = cfg.Section("").Key("INT64").MustInt64(99) -v = cfg.Section("").Key("UINT").MustUint(3) -v = cfg.Section("").Key("UINT64").MustUint64(6) -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) -v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 -``` - -如果我的值有好多行怎么办? - -```ini -[advance] -ADDRESS = """404 road, -NotFound, State, 5000 -Earth""" -``` - -嗯哼?小 case! - -```go -cfg.Section("advance").Key("ADDRESS").String() - -/* --- start --- -404 road, -NotFound, State, 5000 -Earth ------- end --- */ -``` - -赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? - -```ini -[advance] -two_lines = how about \ - continuation lines? -lots_of_lines = 1 \ - 2 \ - 3 \ - 4 -``` - -简直是小菜一碟! - -```go -cfg.Section("advance").Key("two_lines").String() // how about continuation lines? -cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 -``` - -需要注意的是,值两侧的单引号会被自动剔除: - -```ini -foo = "some value" // foo: some value -bar = 'some value' // bar: some value -``` - -这就是全部了?哈哈,当然不是。 - -#### 操作键值的辅助方法 - -获取键值时设定候选值: - -```go -v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) -v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) -v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) -v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) -v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) -v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) -v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) -v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 -``` - -如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 - -验证获取的值是否在指定范围内: - -```go -vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) -vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) -vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) -vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) -vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) -vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) -vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 -``` - -自动分割键值为切片(slice): - -```go -vals = cfg.Section("").Key("STRINGS").Strings(",") -vals = cfg.Section("").Key("FLOAT64S").Float64s(",") -vals = cfg.Section("").Key("INTS").Ints(",") -vals = cfg.Section("").Key("INT64S").Int64s(",") -vals = cfg.Section("").Key("UINTS").Uints(",") -vals = cfg.Section("").Key("UINT64S").Uint64s(",") -vals = cfg.Section("").Key("TIMES").Times(",") -``` - -### 保存配置 - -终于到了这个时刻,是时候保存一下配置了。 - -比较原始的做法是输出配置到某个文件: - -```go -// ... -err = cfg.SaveTo("my.ini") -err = cfg.SaveToIndent("my.ini", "\t") -``` - -另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: - -```go -// ... -cfg.WriteTo(writer) -cfg.WriteToIndent(writer, "\t") -``` - -### 高级用法 - -#### 递归读取键值 - -在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 - -```ini -NAME = ini - -[author] -NAME = Unknwon -GITHUB = https://github.com/%(NAME)s - -[package] -FULL_NAME = github.com/go-ini/%(NAME)s -``` - -```go -cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon -cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini -``` - -#### 读取父子分区 - -您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 - -```ini -NAME = ini -VERSION = v1 -IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s - -[package] -CLONE_URL = https://%(IMPORT_PATH)s - -[package.sub] -``` - -```go -cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 -``` - -#### 读取自增键名 - -如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 - -```ini -[features] --: Support read/write comments of keys and sections --: Support auto-increment of key names --: Support load multiple files to overwrite key values -``` - -```go -cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} -``` - -### 映射到结构 - -想要使用更加面向对象的方式玩转 INI 吗?好主意。 - -```ini -Name = Unknwon -age = 21 -Male = true -Born = 1993-01-01T20:17:05Z - -[Note] -Content = Hi is a good man! -Cities = HangZhou, Boston -``` - -```go -type Note struct { - Content string - Cities []string -} - -type Person struct { - Name string - Age int `ini:"age"` - Male bool - Born time.Time - Note - Created time.Time `ini:"-"` -} - -func main() { - cfg, err := ini.Load("path/to/ini") - // ... - p := new(Person) - err = cfg.MapTo(p) - // ... - - // 一切竟可以如此的简单。 - err = ini.MapTo(p, "path/to/ini") - // ... - - // 嗯哼?只需要映射一个分区吗? - n := new(Note) - err = cfg.Section("Note").MapTo(n) - // ... -} -``` - -结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 - -```go -// ... -p := &Person{ - Name: "Joe", -} -// ... -``` - -这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? - -### 从结构反射 - -可是,我有说不能吗? - -```go -type Embeded struct { - Dates []time.Time `delim:"|"` - Places []string - None []int -} - -type Author struct { - Name string `ini:"NAME"` - Male bool - Age int - GPA float64 - NeverMind string `ini:"-"` - *Embeded -} - -func main() { - a := &Author{"Unknwon", true, 21, 2.8, "", - &Embeded{ - []time.Time{time.Now(), time.Now()}, - []string{"HangZhou", "Boston"}, - []int{}, - }} - cfg := ini.Empty() - err = ini.ReflectFrom(cfg, a) - // ... -} -``` - -瞧瞧,奇迹发生了。 - -```ini -NAME = Unknwon -Male = true -Age = 21 -GPA = 2.8 - -[Embeded] -Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -Places = HangZhou,Boston -None = -``` - -#### 名称映射器(Name Mapper) - -为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 - -目前有 2 款内置的映射器: - -- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 -- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 - -使用方法: - -```go -type Info struct{ - PackageName string -} - -func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) - // ... - - cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) - // ... - info := new(Info) - cfg.NameMapper = ini.AllCapsUnderscore - err = cfg.MapTo(info) - // ... -} -``` - -使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 - -#### 映射/反射的其它说明 - -任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child -} - -type Config struct { - City string - Parent -} -``` - -示例配置文件: - -```ini -City = Boston - -[Parent] -Name = Unknwon - -[Child] -Age = 21 -``` - -很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child `ini:"Parent"` -} - -type Config struct { - City string - Parent -} -``` - -示例配置文件: - -```ini -City = Boston - -[Parent] -Name = Unknwon -Age = 21 -``` - -## 获取帮助 - -- [API 文档](https://gowalker.org/gopkg.in/ini.v1) -- [创建工单](https://github.com/go-ini/ini/issues/new) - -## 常见问题 - -### 字段 `BlockMode` 是什么? - -默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 - -### 为什么要写另一个 INI 解析库? - -许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 - -为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go deleted file mode 100644 index 1a27f068..00000000 --- a/vendor/github.com/go-ini/ini/ini.go +++ /dev/null @@ -1,1027 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package ini provides INI file read and write functionality in Go. -package ini - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "regexp" - "runtime" - "strconv" - "strings" - "sync" - "time" -) - -const ( - DEFAULT_SECTION = "DEFAULT" - // Maximum allowed depth when recursively substituing variable names. - _DEPTH_VALUES = 99 - - _VERSION = "1.8.6" -) - -func Version() string { - return _VERSION -} - -var ( - LineBreak = "\n" - - // Variable regexp pattern: %(variable)s - varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) - - // Write spaces around "=" to look better. - PrettyFormat = true -) - -func init() { - if runtime.GOOS == "windows" { - LineBreak = "\r\n" - } -} - -func inSlice(str string, s []string) bool { - for _, v := range s { - if str == v { - return true - } - } - return false -} - -// dataSource is a interface that returns file content. -type dataSource interface { - ReadCloser() (io.ReadCloser, error) -} - -type sourceFile struct { - name string -} - -func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { - return os.Open(s.name) -} - -type bytesReadCloser struct { - reader io.Reader -} - -func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { - return rc.reader.Read(p) -} - -func (rc *bytesReadCloser) Close() error { - return nil -} - -type sourceData struct { - data []byte -} - -func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return &bytesReadCloser{bytes.NewReader(s.data)}, nil -} - -// ____ __. -// | |/ _|____ ___.__. -// | <_/ __ < | | -// | | \ ___/\___ | -// |____|__ \___ > ____| -// \/ \/\/ - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncr bool -} - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// String returns string representation of value. -func (k *Key) String() string { - val := k.value - if strings.Index(val, "%") == -1 { - return val - } - - for i := 0; i < _DEPTH_VALUES; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := strings.TrimLeft(vr, "%(") - noption = strings.TrimRight(noption, ")s") - - // Search in the same section. - nk, err := k.s.GetKey(noption) - if err != nil { - // Search again in default section. - nk, _ = k.s.f.Section("").GetKey(noption) - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - return strconv.Atoi(k.String()) -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 10, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 10, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 10, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string divided by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - vals := strings.Split(str, delim) - for i := range vals { - vals[i] = strings.TrimSpace(vals[i]) - } - return vals -} - -// Float64s returns list of float64 divided by given delimiter. -func (k *Key) Float64s(delim string) []float64 { - strs := k.Strings(delim) - vals := make([]float64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseFloat(strs[i], 64) - } - return vals -} - -// Ints returns list of int divided by given delimiter. -func (k *Key) Ints(delim string) []int { - strs := k.Strings(delim) - vals := make([]int, len(strs)) - for i := range strs { - vals[i], _ = strconv.Atoi(strs[i]) - } - return vals -} - -// Int64s returns list of int64 divided by given delimiter. -func (k *Key) Int64s(delim string) []int64 { - strs := k.Strings(delim) - vals := make([]int64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseInt(strs[i], 10, 64) - } - return vals -} - -// Uints returns list of uint divided by given delimiter. -func (k *Key) Uints(delim string) []uint { - strs := k.Strings(delim) - vals := make([]uint, len(strs)) - for i := range strs { - u, _ := strconv.ParseUint(strs[i], 10, 0) - vals[i] = uint(u) - } - return vals -} - -// Uint64s returns list of uint64 divided by given delimiter. -func (k *Key) Uint64s(delim string) []uint64 { - strs := k.Strings(delim) - vals := make([]uint64, len(strs)) - for i := range strs { - vals[i], _ = strconv.ParseUint(strs[i], 10, 64) - } - return vals -} - -// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. -func (k *Key) TimesFormat(format, delim string) []time.Time { - strs := k.Strings(delim) - vals := make([]time.Time, len(strs)) - for i := range strs { - vals[i], _ = time.Parse(format, strs[i]) - } - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - if k.s.f.BlockMode { - k.s.f.lock.Lock() - defer k.s.f.lock.Unlock() - } - - k.value = v - k.s.keysHash[k.name] = v -} - -// _________ __ .__ -// / _____/ ____ _____/ |_|__| ____ ____ -// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \ -// / \ ___/\ \___| | | ( <_> ) | \ -// /_______ /\___ >\___ >__| |__|\____/|___| / -// \/ \/ \/ \/ - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string -} - -func newSection(f *File, name string) *Section { - return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - s.keys[name].value = val - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = &Key{s, "", name, val, false} - s.keysHash[name] = val - return s.keys[name], nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - // FIXME: change to section level lock? - if s.f.BlockMode { - s.f.lock.RLock() - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } else { - break - } - } - return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) - } - return key, nil -} - -// HasKey returns true if section contains a key with given name. -func (s *Section) HasKey(name string) bool { - key, _ := s.GetKey(name) - return key != nil -} - -// Haskey is a backwards-compatible name for HasKey. -func (s *Section) Haskey(name string) bool { - return s.HasKey(name) -} - -// HasValue returns true if section contains given raw value. -func (s *Section) HasValue(value string) bool { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - for _, k := range s.keys { - if value == k.value { - return true - } - } - return false -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := map[string]string{} - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - return - } - } -} - -// ___________.__.__ -// \_ _____/|__| | ____ -// | __) | | | _/ __ \ -// | \ | | |_\ ___/ -// \___ / |__|____/\___ > -// \/ \/ - -// File represents a combination of a or more INI file(s) in memory. -type File struct { - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - // Make sure data is safe in multiple goroutines. - lock sync.RWMutex - - // Allow combination of multiple data sources. - dataSources []dataSource - // Actual data is stored here. - sections map[string]*Section - - // To keep data in order. - sectionList []string - - NameMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource) *File { - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string]*Section), - sectionList: make([]string, 0, 10), - } -} - -func parseDataSource(source interface{}) (dataSource, error) { - switch s := source.(type) { - case string: - return sourceFile{s}, nil - case []byte: - return &sourceData{s}, nil - default: - return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) - } -} - -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -func Load(source interface{}, others ...interface{}) (_ *File, err error) { - sources := make([]dataSource, len(others)+1) - sources[0], err = parseDataSource(source) - if err != nil { - return nil, err - } - for i := range others { - sources[i+1], err = parseDataSource(others[i]) - if err != nil { - return nil, err - } - } - f := newFile(sources) - if err = f.Reload(); err != nil { - return nil, err - } - return f, nil -} - -// Empty returns an empty file object. -func Empty() *File { - // Ignore error here, we sure our data is good. - f, _ := Load([]byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("error creating new section: empty section name") - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if inSlice(name, f.sectionList) { - return f.sections[name], nil - } - - f.sectionList = append(f.sectionList, name) - f.sections[name] = newSection(f, name) - return f.sections[name], nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - if len(name) == 0 { - name = DEFAULT_SECTION - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sec := f.sections[name] - if sec == nil { - return nil, fmt.Errorf("error when getting section: section '%s' not exists", name) - } - return sec, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - // Note: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// Section returns list of Section. -func (f *File) Sections() []*Section { - sections := make([]*Section, len(f.sectionList)) - for i := range f.sectionList { - sections[i] = f.Section(f.sectionList[i]) - } - return sections -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section. -func (f *File) DeleteSection(name string) { - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if len(name) == 0 { - name = DEFAULT_SECTION - } - - for i, s := range f.sectionList { - if s == name { - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - delete(f.sections, name) - return - } - } -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - return err - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -// WriteToIndent writes file content into io.Writer with given value indention. -func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { - equalSign := "=" - if PrettyFormat { - equalSign = " = " - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - for i, sname := range f.sectionList { - sec := f.Section(sname) - if len(sec.Comment) > 0 { - if sec.Comment[0] != '#' && sec.Comment[0] != ';' { - sec.Comment = "; " + sec.Comment - } - if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { - return 0, err - } - } - - if i > 0 { - if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return 0, err - } - } else { - // Write nothing if default section is empty. - if len(sec.keyList) == 0 { - continue - } - } - - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - if key.Comment[0] != '#' && key.Comment[0] != ';' { - key.Comment = "; " + key.Comment - } - if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { - return 0, err - } - } - - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncr: - kname = "-" - case strings.ContainsAny(kname, "\"=:"): - kname = "`" + kname + "`" - case strings.Contains(kname, "`"): - kname = `"""` + kname + `"""` - } - - val := key.value - // In case key value contains "\n", "`", "\"", "#" or ";". - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } - if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil { - return 0, err - } - } - - // Put a line between sections. - if _, err = buf.WriteString(LineBreak); err != nil { - return 0, err - } - } - - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename afte done. - tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" - defer os.Remove(tmpPath) - - fw, err := os.Create(tmpPath) - if err != nil { - return err - } - - if _, err = f.WriteToIndent(fw, indent); err != nil { - fw.Close() - return err - } - fw.Close() - - // Remove old file and rename the new one. - os.Remove(filename) - return os.Rename(tmpPath, filename) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go deleted file mode 100644 index 1c1bf91f..00000000 --- a/vendor/github.com/go-ini/ini/parser.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2015 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - "unicode" -) - -type tokenType int - -const ( - _TOKEN_INVALID tokenType = iota - _TOKEN_COMMENT - _TOKEN_SECTION - _TOKEN_KEY -) - -type parser struct { - buf *bufio.Reader - isEOF bool - count int - comment *bytes.Buffer -} - -func newParser(r io.Reader) *parser { - return &parser{ - buf: bufio.NewReader(r), - count: 1, - comment: &bytes.Buffer{}, - } -} - -// BOM handles header of BOM-UTF8 format. -// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding -func (p *parser) BOM() error { - mask, err := p.buf.Peek(3) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 3 { - return nil - } else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { - p.buf.Read(mask) - } - return nil -} - -func (p *parser) readUntil(delim byte) ([]byte, error) { - data, err := p.buf.ReadBytes(delim) - if err != nil { - if err == io.EOF { - p.isEOF = true - } else { - return nil, err - } - } - return data, nil -} - -func cleanComment(in []byte) ([]byte, bool) { - i := bytes.IndexAny(in, "#;") - if i == -1 { - return nil, false - } - return in[i:], true -} - -func readKeyName(in []byte) (string, int, error) { - line := string(in) - - // Check if key name surrounded by quotes. - var keyQuote string - if line[0] == '"' { - if len(line) > 6 && string(line[0:3]) == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - - // Get out key name - endIdx := -1 - if len(keyQuote) > 0 { - startIdx := len(keyQuote) - // FIXME: fail case -> """"""name"""=value - pos := strings.Index(line[startIdx:], keyQuote) - if pos == -1 { - return "", -1, fmt.Errorf("missing closing key quote: %s", line) - } - pos += startIdx - - // Find key-value delimiter - i := strings.IndexAny(line[pos+startIdx:], "=:") - if i < 0 { - return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) - } - endIdx = pos + i - return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil - } - - endIdx = strings.IndexAny(line, "=:") - if endIdx < 0 { - return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) - } - return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil -} - -func (p *parser) readMultilines(line, val, valQuote string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := string(data) - - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - - comment, has := cleanComment([]byte(next[pos:])) - if has { - p.comment.Write(bytes.TrimSpace(comment)) - } - break - } - val += next - if p.isEOF { - return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) - } - } - return val, nil -} - -func (p *parser) readContinuationLines(val string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := strings.TrimSpace(string(data)) - - if len(next) == 0 { - break - } - val += next - if val[len(val)-1] != '\\' { - break - } - val = val[:len(val)-1] - } - return val, nil -} - -// hasSurroundedQuote check if and only if the first and last characters -// are quotes \" or \'. -// It returns false if any other parts also contain same kind of quotes. -func hasSurroundedQuote(in string, quote byte) bool { - return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && - strings.IndexByte(in[1:], quote) == len(in)-2 -} - -func (p *parser) readValue(in []byte) (string, error) { - line := strings.TrimLeftFunc(string(in), unicode.IsSpace) - if len(line) == 0 { - return "", nil - } - - var valQuote string - if len(line) > 3 && string(line[0:3]) == `"""` { - valQuote = `"""` - } else if line[0] == '`' { - valQuote = "`" - } - - if len(valQuote) > 0 { - startIdx := len(valQuote) - pos := strings.LastIndex(line[startIdx:], valQuote) - // Check for multi-line value - if pos == -1 { - return p.readMultilines(line, line[startIdx:], valQuote) - } - - return line[startIdx : pos+startIdx], nil - } - - // Won't be able to reach here if value only contains whitespace. - line = strings.TrimSpace(line) - - // Check continuation lines - if line[len(line)-1] == '\\' { - return p.readContinuationLines(line[:len(line)-1]) - } - - i := strings.IndexAny(line, "#;") - if i > -1 { - p.comment.WriteString(line[i:]) - line = strings.TrimSpace(line[:i]) - } - - // Trim single quotes - if hasSurroundedQuote(line, '\'') || - hasSurroundedQuote(line, '"') { - line = line[1 : len(line)-1] - } - return line, nil -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) (err error) { - p := newParser(reader) - if err = p.BOM(); err != nil { - return fmt.Errorf("BOM: %v", err) - } - - // Ignore error because default section name is never empty string. - section, _ := f.NewSection(DEFAULT_SECTION) - - var line []byte - for !p.isEOF { - line, err = p.readUntil('\n') - if err != nil { - return err - } - - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - if len(line) == 0 { - continue - } - - // Comments - if line[0] == '#' || line[0] == ';' { - // Note: we do not care ending line break, - // it is needed for adding second line, - // so just clean it once at the end when set to value. - p.comment.Write(line) - continue - } - - // Section - if line[0] == '[' { - // Read to the next ']' (TODO: support quoted strings) - closeIdx := bytes.IndexByte(line, ']') - if closeIdx == -1 { - return fmt.Errorf("unclosed section: %s", line) - } - - section, err = f.NewSection(string(line[1:closeIdx])) - if err != nil { - return err - } - - comment, has := cleanComment(line[closeIdx+1:]) - if has { - p.comment.Write(comment) - } - - section.Comment = strings.TrimSpace(p.comment.String()) - - // Reset aotu-counter and comments - p.comment.Reset() - p.count = 1 - continue - } - - kname, offset, err := readKeyName(line) - if err != nil { - return err - } - - // Auto increment. - isAutoIncr := false - if kname == "-" { - isAutoIncr = true - kname = "#" + strconv.Itoa(p.count) - p.count++ - } - - key, err := section.NewKey(kname, "") - if err != nil { - return err - } - key.isAutoIncr = isAutoIncr - - value, err := p.readValue(line[offset:]) - if err != nil { - return err - } - key.SetValue(value) - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - } - return nil -} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go deleted file mode 100644 index 3fb92c39..00000000 --- a/vendor/github.com/go-ini/ini/struct.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "time" - "unicode" -) - -// NameMapper represents a ini tag name mapper. -type NameMapper func(string) string - -// Built-in name getters. -var ( - // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. - AllCapsUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - } - newstr = append(newstr, unicode.ToUpper(chr)) - } - return string(newstr) - } - // TitleUnderscore converts to format title_underscore. - TitleUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - chr -= ('A' - 'a') - } - newstr = append(newstr, chr) - } - return string(newstr) - } -) - -func (s *Section) parseFieldName(raw, actual string) string { - if len(actual) > 0 { - return actual - } - if s.f.NameMapper != nil { - return s.f.NameMapper(raw) - } - return raw -} - -func parseDelim(actual string) string { - if len(actual) > 0 { - return actual - } - return "," -} - -var reflectTime = reflect.TypeOf(time.Now()).Kind() - -// setWithProperType sets proper value to field based on its type, -// but it does not return error for failing parsing, -// because we want to use default value that is already assigned to strcut. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { - switch t.Kind() { - case reflect.String: - if len(key.String()) == 0 { - return nil - } - field.SetString(key.String()) - case reflect.Bool: - boolVal, err := key.Bool() - if err != nil { - return nil - } - field.SetBool(boolVal) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && int(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - intVal, err := key.Int64() - if err != nil || intVal == 0 { - return nil - } - field.SetInt(intVal) - // byte is an alias for uint8, so supporting uint8 breaks support for byte - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - durationVal, err := key.Duration() - if err == nil { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - uintVal, err := key.Uint64() - if err != nil { - return nil - } - field.SetUint(uintVal) - - case reflect.Float64: - floatVal, err := key.Float64() - if err != nil { - return nil - } - field.SetFloat(floatVal) - case reflectTime: - timeVal, err := key.Time() - if err != nil { - return nil - } - field.Set(reflect.ValueOf(timeVal)) - case reflect.Slice: - vals := key.Strings(delim) - numVals := len(vals) - if numVals == 0 { - return nil - } - - sliceOf := field.Type().Elem().Kind() - - var times []time.Time - if sliceOf == reflectTime { - times = key.Times(delim) - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(times[i])) - default: - slice.Index(i).Set(reflect.ValueOf(vals[i])) - } - } - field.Set(slice) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -func (s *Section) mapTo(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - fieldName := s.parseFieldName(tpField.Name, tag) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - isStruct := tpField.Type.Kind() == reflect.Struct - if isAnonymous { - field.Set(reflect.New(tpField.Type.Elem())) - } - - if isAnonymous || isStruct { - if sec, err := s.f.GetSection(fieldName); err == nil { - if err = sec.mapTo(field); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - continue - } - } - - if key, err := s.GetKey(fieldName); err == nil { - if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - } - } - return nil -} - -// MapTo maps section to given struct. -func (s *Section) MapTo(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot map to non-pointer struct") - } - - return s.mapTo(val) -} - -// MapTo maps file to given struct. -func (f *File) MapTo(v interface{}) error { - return f.Section("").MapTo(v) -} - -// MapTo maps data sources to given struct with name mapper. -func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.MapTo(v) -} - -// MapTo maps data sources to given struct. -func MapTo(v, source interface{}, others ...interface{}) error { - return MapToWithMapper(v, nil, source, others...) -} - -// reflectWithProperType does the opposite thing with setWithProperType. -func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { - switch t.Kind() { - case reflect.String: - key.SetValue(field.String()) - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float64, - reflectTime: - key.SetValue(fmt.Sprint(field)) - case reflect.Slice: - vals := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - - var buf bytes.Buffer - isTime := fmt.Sprint(field.Type()) == "[]time.Time" - for i := 0; i < field.Len(); i++ { - if isTime { - buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) - } else { - buf.WriteString(fmt.Sprint(vals.Index(i))) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-1]) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -func (s *Section) reflectFrom(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - fieldName := s.parseFieldName(tpField.Name, tag) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || - (tpField.Type.Kind() == reflect.Struct) { - // Note: The only error here is section doesn't exist. - sec, err := s.f.GetSection(fieldName) - if err != nil { - // Note: fieldName can never be empty here, ignore error. - sec, _ = s.f.NewSection(fieldName) - } - if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) - } - continue - } - - // Note: Same reason as secion. - key, err := s.GetKey(fieldName) - if err != nil { - key, _ = s.NewKey(fieldName, "") - } - if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) - } - - } - return nil -} - -// ReflectFrom reflects secion from given struct. -func (s *Section) ReflectFrom(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot reflect from non-pointer struct") - } - - return s.reflectFrom(val) -} - -// ReflectFrom reflects file from given struct. -func (f *File) ReflectFrom(v interface{}) error { - return f.Section("").ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct with name mapper. -func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { - cfg.NameMapper = mapper - return cfg.ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct. -func ReflectFrom(cfg *File, v interface{}) error { - return ReflectFromWithMapper(cfg, v, nil) -} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md new file mode 100644 index 00000000..036e5313 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md @@ -0,0 +1,30 @@ +# cleanhttp + +Functions for accessing "clean" Go http.Client values + +------------- + +The Go standard library contains a default `http.Client` called +`http.DefaultClient`. It is a common idiom in Go code to start with +`http.DefaultClient` and tweak it as necessary, and in fact, this is +encouraged; from the `http` package documentation: + +> The Client's Transport typically has internal state (cached TCP connections), +so Clients should be reused instead of created as needed. Clients are safe for +concurrent use by multiple goroutines. + +Unfortunately, this is a shared value, and it is not uncommon for libraries to +assume that they are free to modify it at will. With enough dependencies, it +can be very easy to encounter strange problems and race conditions due to +manipulation of this shared value across libraries and goroutines (clients are +safe for concurrent use, but writing values to the client struct itself is not +protected). + +Making things worse is the fact that a bare `http.Client` will use a default +`http.Transport` called `http.DefaultTransport`, which is another global value +that behaves the same way. So it is not simply enough to replace +`http.DefaultClient` with `&http.Client{}`. + +This repository provides some simple functions to get a "clean" `http.Client` +-- one that uses the same default values as the Go standard library, but +returns a client that does not share any state with other clients. diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 00000000..8d306bf5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,57 @@ +package cleanhttp + +import ( + "net" + "net/http" + "runtime" + "time" +) + +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. +func DefaultTransport() *http.Transport { + transport := DefaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// DefaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func DefaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, + } + return transport +} + +// DefaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultPooledClient returns a new http.Client with similar default values to +// http.Client, but with a shared Transport. Do not use this function for +// transient clients as it can leak file descriptors over time. Only use this +// for clients that will be re-used for the same host(s). +func DefaultPooledClient() *http.Client { + return &http.Client{ + Transport: DefaultPooledTransport(), + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go new file mode 100644 index 00000000..05841092 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go @@ -0,0 +1,20 @@ +// Package cleanhttp offers convenience utilities for acquiring "clean" +// http.Transport and http.Client structs. +// +// Values set on http.DefaultClient and http.DefaultTransport affect all +// callers. This can have detrimental effects, esepcially in TLS contexts, +// where client or root certificates set to talk to multiple endpoints can end +// up displacing each other, leading to hard-to-debug issues. This package +// provides non-shared http.Client and http.Transport structs to ensure that +// the configuration will not be overwritten by other parts of the application +// or dependencies. +// +// The DefaultClient and DefaultTransport functions disable idle connections +// and keepalives. Without ensuring that idle connections are closed before +// garbage collection, short-term clients/transports can leak file descriptors, +// eventually leading to "too many open files" errors. If you will be +// connecting to the same hosts repeatedly from the same client, you can use +// DefaultPooledClient to receive a client that has connection pooling +// semantics similar to http.DefaultClient. +// +package cleanhttp diff --git a/vendor/github.com/hashicorp/go-getter/.travis.yml b/vendor/github.com/hashicorp/go-getter/.travis.yml new file mode 100644 index 00000000..da804c29 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/.travis.yml @@ -0,0 +1,23 @@ +sudo: false + +addons: + apt: + sources: + - sourceline: 'ppa:git-core/ppa' + packages: + - git + +language: go + +go: + - 1.8.x + - 1.9.x + - master + +branches: + only: + - master + +matrix: + allow_failures: + - go: master diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md index 30aed323..40ace74d 100644 --- a/vendor/github.com/hashicorp/go-getter/README.md +++ b/vendor/github.com/hashicorp/go-getter/README.md @@ -21,8 +21,7 @@ URLs. For example: "github.com/hashicorp/go-getter" would turn into a Git URL. Or "./foo" would turn into a file URL. These are extensible. This library is used by [Terraform](https://terraform.io) for -downloading modules, [Otto](https://ottoproject.io) for dependencies and -Appfile imports, and [Nomad](https://nomadproject.io) for downloading +downloading modules and [Nomad](https://nomadproject.io) for downloading binaries. ## Installation and Usage @@ -119,6 +118,37 @@ The protocol-specific options are documented below the URL format section. But because they are part of the URL, we point it out here so you know they exist. +### Subdirectories + +If you want to download only a specific subdirectory from a downloaded +directory, you can specify a subdirectory after a double-slash `//`. +go-getter will first download the URL specified _before_ the double-slash +(as if you didn't specify a double-slash), but will then copy the +path after the double slash into the target directory. + +For example, if you're downloading this GitHub repository, but you only +want to download the `test-fixtures` directory, you can do the following: + +``` +https://github.com/hashicorp/go-getter.git//test-fixtures +``` + +If you downloaded this to the `/tmp` directory, then the file +`/tmp/archive.gz` would exist. Notice that this file is in the `test-fixtures` +directory in this repository, but because we specified a subdirectory, +go-getter automatically copied only that directory contents. + +Subdirectory paths may contain may also use filesystem glob patterns. +The path must match _exactly one_ entry or go-getter will return an error. +This is useful if you're not sure the exact directory name but it follows +a predictable naming structure. + +For example, the following URL would also work: + +``` +https://github.com/hashicorp/go-getter.git//test-* +``` + ### Checksumming For file downloads of any protocol, go-getter can automatically verify @@ -154,9 +184,11 @@ The following archive formats are supported: * `tar.gz` and `tgz` * `tar.bz2` and `tbz2` + * `tar.xz` and `txz` * `zip` * `gz` * `bz2` + * `xz` For example, an example URL is shown below: @@ -200,6 +232,9 @@ The options below are available to all protocols: * `checksum` - Checksum to verify the downloaded file or archive. See the entire section on checksumming above for format and more details. + * `filename` - When in file download mode, allows specifying the name of the + downloaded file on disk. Has no effect in directory mode. + ### Local Files (`file`) None @@ -210,19 +245,29 @@ None a commit SHA, a branch name, etc. If it is a named ref such as a branch name, go-getter will update it to the latest on each get. + * `sshkey` - An SSH private key to use during clones. The provided key must + be a base64-encoded string. For example, to generate a suitable `sshkey` + from a private key file on disk, you would run `base64 -w0 `. + + **Note**: Git 2.3+ is required to use this feature. + ### Mercurial (`hg`) * `rev` - The Mercurial revision to checkout. ### HTTP (`http`) -None +#### Basic Authentication + +To use HTTP basic authentication with go-getter, simply prepend `username:password@` to the +hostname in the URL such as `https://Aladdin:OpenSesame@www.example.com/index.html`. All special +characters, including the username and password, must be URL encoded. ### S3 (`s3`) S3 takes various access configurations in the URL. Note that it will also -read these from standard AWS environment variables if they're set. If -the query parameters are present, these take priority. +read these from standard AWS environment variables if they're set. S3 compliant servers like Minio +are also supported. If the query parameters are present, these take priority. * `aws_access_key_id` - AWS access key. * `aws_access_key_secret` - AWS access key secret. @@ -234,6 +279,14 @@ If you use go-getter and want to use an EC2 IAM Instance Profile to avoid using credentials, then just omit these and the profile, if available will be used automatically. +### Using S3 with Minio + If you use go-gitter for Minio support, you must consider the following: + + * `aws_access_key_id` (required) - Minio access key. + * `aws_access_key_secret` (required) - Minio access key secret. + * `region` (optional - defaults to us-east-1) - Region identifier to use. + * `version` (optional - defaults to Minio default) - Configuration file format. + #### S3 Bucket Examples S3 has several addressing schemes used to reference your bucket. These are @@ -244,4 +297,5 @@ Some examples for these addressing schemes: - s3::https://s3-eu-west-1.amazonaws.com/bucket/foo - bucket.s3.amazonaws.com/foo - bucket.s3-eu-west-1.amazonaws.com/foo/bar +- "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=KEYID&aws_access_key_secret=SECRETKEY®ion=us-east-2" diff --git a/vendor/github.com/hashicorp/go-getter/appveyor.yml b/vendor/github.com/hashicorp/go-getter/appveyor.yml index 159dad4d..ec48d45e 100644 --- a/vendor/github.com/hashicorp/go-getter/appveyor.yml +++ b/vendor/github.com/hashicorp/go-getter/appveyor.yml @@ -1,5 +1,5 @@ version: "build-{branch}-{build}" -image: Visual Studio 2015 +image: Visual Studio 2017 clone_folder: c:\gopath\github.com\hashicorp\go-getter environment: GOPATH: c:\gopath diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go index 5a039dba..300301c2 100644 --- a/vendor/github.com/hashicorp/go-getter/client.go +++ b/vendor/github.com/hashicorp/go-getter/client.go @@ -17,6 +17,7 @@ import ( "strings" urlhelper "github.com/hashicorp/go-getter/helper/url" + "github.com/hashicorp/go-safetemp" ) // Client is a client for downloading things. @@ -100,17 +101,14 @@ func (c *Client) Get() error { dst := c.Dst src, subDir := SourceDirSubdir(src) if subDir != "" { - tmpDir, err := ioutil.TempDir("", "tf") + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - if err := os.RemoveAll(tmpDir); err != nil { - return err - } - defer os.RemoveAll(tmpDir) + defer tdcloser.Close() realDst = dst - dst = tmpDir + dst = td } u, err := urlhelper.Parse(src) @@ -153,7 +151,7 @@ func (c *Client) Get() error { // We don't appear to... but is it part of the filename? matchingLen := 0 for k, _ := range decompressors { - if strings.HasSuffix(u.Path, k) && len(k) > matchingLen { + if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen { archiveV = k matchingLen = len(k) } @@ -222,13 +220,29 @@ func (c *Client) Get() error { checksumValue = b } - // For now, any means file. In the future, we'll ask the getter - // what it thinks it is. if mode == ClientModeAny { - mode = ClientModeFile + // Ask the getter which client mode to use + mode, err = g.ClientMode(u) + if err != nil { + return err + } + + // Destination is the base name of the URL path in "any" mode when + // a file source is detected. + if mode == ClientModeFile { + filename := filepath.Base(u.Path) - // Destination is the base name of the URL path - dst = filepath.Join(dst, filepath.Base(u.Path)) + // Determine if we have a custom file name + if v := q.Get("filename"); v != "" { + // Delete the query parameter if we have it. + q.Del("filename") + u.RawQuery = q.Encode() + + filename = v + } + + dst = filepath.Join(dst, filename) + } } // If we're not downloading a directory, then just download the file @@ -300,7 +314,13 @@ func (c *Client) Get() error { return err } - return copyDir(realDst, filepath.Join(dst, subDir), false) + // Process any globs + subDir, err := SubdirGlob(dst, subDir) + if err != nil { + return err + } + + return copyDir(realDst, subDir, false) } return nil diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go index d18174cc..198bb0ed 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress.go +++ b/vendor/github.com/hashicorp/go-getter/decompress.go @@ -1,7 +1,15 @@ package getter +import ( + "strings" +) + // Decompressor defines the interface that must be implemented to add // support for decompressing a type. +// +// Important: if you're implementing a decompressor, please use the +// containsDotDot helper in this file to ensure that files can't be +// decompressed outside of the specified directory. type Decompressor interface { // Decompress should decompress src to dst. dir specifies whether dst // is a directory or single file. src is guaranteed to be a single file @@ -16,14 +24,35 @@ var Decompressors map[string]Decompressor func init() { tbzDecompressor := new(TarBzip2Decompressor) tgzDecompressor := new(TarGzipDecompressor) + txzDecompressor := new(TarXzDecompressor) Decompressors = map[string]Decompressor{ "bz2": new(Bzip2Decompressor), "gz": new(GzipDecompressor), + "xz": new(XzDecompressor), "tar.bz2": tbzDecompressor, "tar.gz": tgzDecompressor, + "tar.xz": txzDecompressor, "tbz2": tbzDecompressor, "tgz": tgzDecompressor, + "txz": txzDecompressor, "zip": new(ZipDecompressor), } } + +// containsDotDot checks if the filepath value v contains a ".." entry. +// This will check filepath components by splitting along / or \. This +// function is copied directly from the Go net/http implementation. +func containsDotDot(v string) bool { + if !strings.Contains(v, "..") { + return false + } + for _, ent := range strings.FieldsFunc(v, isSlashRune) { + if ent == ".." { + return true + } + } + return false +} + +func isSlashRune(r rune) bool { return r == '/' || r == '\\' } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go index 20010540..5ebf709b 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go @@ -9,7 +9,7 @@ import ( ) // GzipDecompressor is an implementation of Decompressor that can -// decompress bz2 files. +// decompress gzip files. type GzipDecompressor struct{} func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error { diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tar.go b/vendor/github.com/hashicorp/go-getter/decompress_tar.go new file mode 100644 index 00000000..39cb392e --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_tar.go @@ -0,0 +1,138 @@ +package getter + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" +) + +// untar is a shared helper for untarring an archive. The reader should provide +// an uncompressed view of the tar archive. +func untar(input io.Reader, dst, src string, dir bool) error { + tarR := tar.NewReader(input) + done := false + dirHdrs := []*tar.Header{} + for { + hdr, err := tarR.Next() + if err == io.EOF { + if !done { + // Empty archive + return fmt.Errorf("empty archive: %s", src) + } + + break + } + if err != nil { + return err + } + + if hdr.Typeflag == tar.TypeXGlobalHeader || hdr.Typeflag == tar.TypeXHeader { + // don't unpack extended headers as files + continue + } + + path := dst + if dir { + // Disallow parent traversal + if containsDotDot(hdr.Name) { + return fmt.Errorf("entry contains '..': %s", hdr.Name) + } + + path = filepath.Join(path, hdr.Name) + } + + if hdr.FileInfo().IsDir() { + if !dir { + return fmt.Errorf("expected a single file: %s", src) + } + + // A directory, just make the directory and continue unarchiving... + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + // Record the directory information so that we may set its attributes + // after all files have been extracted + dirHdrs = append(dirHdrs, hdr) + + continue + } else { + // There is no ordering guarantee that a file in a directory is + // listed before the directory + dstPath := filepath.Dir(path) + + // Check that the directory exists, otherwise create it + if _, err := os.Stat(dstPath); os.IsNotExist(err) { + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + } + } + + // We have a file. If we already decoded, then it is an error + if !dir && done { + return fmt.Errorf("expected a single file, got multiple: %s", src) + } + + // Mark that we're done so future in single file mode errors + done = true + + // Open the file for writing + dstF, err := os.Create(path) + if err != nil { + return err + } + _, err = io.Copy(dstF, tarR) + dstF.Close() + if err != nil { + return err + } + + // Chmod the file + if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { + return err + } + + // Set the access and modification time + if err := os.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + + // Adding a file or subdirectory changes the mtime of a directory + // We therefore wait until we've extracted everything and then set the mtime and atime attributes + for _, dirHdr := range dirHdrs { + path := filepath.Join(dst, dirHdr.Name) + if err := os.Chtimes(path, dirHdr.AccessTime, dirHdr.ModTime); err != nil { + return err + } + } + + return nil +} + +// tarDecompressor is an implementation of Decompressor that can +// unpack tar files. +type tarDecompressor struct{} + +func (d *tarDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + return untar(f, dst, src, dir) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go index c46ed445..5391b5c8 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go @@ -1,10 +1,7 @@ package getter import ( - "archive/tar" "compress/bzip2" - "fmt" - "io" "os" "path/filepath" ) @@ -32,64 +29,5 @@ func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error { // Bzip2 compression is second bzipR := bzip2.NewReader(f) - - // Once bzip decompressed we have a tar format - tarR := tar.NewReader(bzipR) - done := false - for { - hdr, err := tarR.Next() - if err == io.EOF { - if !done { - // Empty archive - return fmt.Errorf("empty archive: %s", src) - } - - return nil - } - if err != nil { - return err - } - - path := dst - if dir { - path = filepath.Join(path, hdr.Name) - } - - if hdr.FileInfo().IsDir() { - if dir { - return fmt.Errorf("expected a single file: %s", src) - } - - // A directory, just make the directory and continue unarchiving... - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - - continue - } - - // We have a file. If we already decoded, then it is an error - if !dir && done { - return fmt.Errorf("expected a single file, got multiple: %s", src) - } - - // Mark that we're done so future in single file mode errors - done = true - - // Open the file for writing - dstF, err := os.Create(path) - if err != nil { - return err - } - _, err = io.Copy(dstF, tarR) - dstF.Close() - if err != nil { - return err - } - - // Chmod the file - if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { - return err - } - } + return untar(bzipR, dst, src, dir) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go index 686d6c2b..91cf33d9 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_testing.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_testing.go @@ -11,7 +11,9 @@ import ( "runtime" "sort" "strings" - "testing" + "time" + + "github.com/mitchellh/go-testing-interface" ) // TestDecompressCase is a single test case for testing decompressors @@ -21,10 +23,11 @@ type TestDecompressCase struct { Err bool // Err is whether we expect an error or not DirList []string // DirList is the list of files for Dir mode FileMD5 string // FileMD5 is the expected MD5 for a single file + Mtime *time.Time // Mtime is the optionally expected mtime for a single file (or all files if in Dir mode) } // TestDecompressor is a helper function for testing generic decompressors. -func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) { +func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) { for _, tc := range cases { t.Logf("Testing: %s", tc.Input) @@ -67,6 +70,14 @@ func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) } } + if tc.Mtime != nil { + actual := fi.ModTime() + expected := *tc.Mtime + if actual != expected { + t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), dst, actual.String()) + } + } + return } @@ -83,11 +94,26 @@ func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected) } + // Check for correct atime/mtime + for _, dir := range actual { + path := filepath.Join(dst, dir) + if tc.Mtime != nil { + fi, err := os.Stat(path) + if err != nil { + t.Fatalf("err: %s", err) + } + actual := fi.ModTime() + expected := *tc.Mtime + if actual != expected { + t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), path, actual.String()) + } + } + } }() } } -func testListDir(t *testing.T, path string) []string { +func testListDir(t testing.T, path string) []string { var result []string err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error { if err != nil { @@ -102,7 +128,7 @@ func testListDir(t *testing.T, path string) []string { // If it is a dir, add trailing sep if info.IsDir() { - sub += "/" + sub += string(os.PathSeparator) } result = append(result, sub) @@ -116,7 +142,7 @@ func testListDir(t *testing.T, path string) []string { return result } -func testMD5(t *testing.T, path string) string { +func testMD5(t testing.T, path string) string { f, err := os.Open(path) if err != nil { t.Fatalf("err: %s", err) diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go index e8b1c31c..65eb70dd 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go @@ -1,10 +1,8 @@ package getter import ( - "archive/tar" "compress/gzip" "fmt" - "io" "os" "path/filepath" ) @@ -37,63 +35,5 @@ func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error { } defer gzipR.Close() - // Once gzip decompressed we have a tar format - tarR := tar.NewReader(gzipR) - done := false - for { - hdr, err := tarR.Next() - if err == io.EOF { - if !done { - // Empty archive - return fmt.Errorf("empty archive: %s", src) - } - - return nil - } - if err != nil { - return err - } - - path := dst - if dir { - path = filepath.Join(path, hdr.Name) - } - - if hdr.FileInfo().IsDir() { - if !dir { - return fmt.Errorf("expected a single file: %s", src) - } - - // A directory, just make the directory and continue unarchiving... - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - - continue - } - - // We have a file. If we already decoded, then it is an error - if !dir && done { - return fmt.Errorf("expected a single file, got multiple: %s", src) - } - - // Mark that we're done so future in single file mode errors - done = true - - // Open the file for writing - dstF, err := os.Create(path) - if err != nil { - return err - } - _, err = io.Copy(dstF, tarR) - dstF.Close() - if err != nil { - return err - } - - // Chmod the file - if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { - return err - } - } + return untar(gzipR, dst, src, dir) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_txz.go b/vendor/github.com/hashicorp/go-getter/decompress_txz.go new file mode 100644 index 00000000..5e151c12 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_txz.go @@ -0,0 +1,39 @@ +package getter + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/ulikunitz/xz" +) + +// TarXzDecompressor is an implementation of Decompressor that can +// decompress tar.xz files. +type TarXzDecompressor struct{} + +func (d *TarXzDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // xz compression is second + txzR, err := xz.NewReader(f) + if err != nil { + return fmt.Errorf("Error opening an xz reader for %s: %s", src, err) + } + + return untar(txzR, dst, src, dir) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_xz.go b/vendor/github.com/hashicorp/go-getter/decompress_xz.go new file mode 100644 index 00000000..4e37abab --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_xz.go @@ -0,0 +1,49 @@ +package getter + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/ulikunitz/xz" +) + +// XzDecompressor is an implementation of Decompressor that can +// decompress xz files. +type XzDecompressor struct{} + +func (d *XzDecompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("xz-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // xz compression is second + xzR, err := xz.NewReader(f) + if err != nil { + return err + } + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, xzR) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go index a065c076..b0e70cac 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_zip.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_zip.go @@ -42,6 +42,11 @@ func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { for _, f := range zipR.File { path := dst if dir { + // Disallow parent traversal + if containsDotDot(f.Name) { + return fmt.Errorf("entry contains '..': %s", f.Name) + } + path = filepath.Join(path, f.Name) } diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go index 481b737c..c3695510 100644 --- a/vendor/github.com/hashicorp/go-getter/detect.go +++ b/vendor/github.com/hashicorp/go-getter/detect.go @@ -72,12 +72,18 @@ func Detect(src string, pwd string, ds []Detector) (string, error) { subDir = detectSubdir } } + if subDir != "" { u, err := url.Parse(result) if err != nil { return "", fmt.Errorf("Error parsing URL: %s", err) } u.Path += "//" + subDir + + // a subdir may contain wildcards, but in order to support them we + // have to ensure the path isn't escaped. + u.RawPath = u.Path + result = u.String() } diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/detect_file.go index 756ea43f..4ef41ea7 100644 --- a/vendor/github.com/hashicorp/go-getter/detect_file.go +++ b/vendor/github.com/hashicorp/go-getter/detect_file.go @@ -32,7 +32,7 @@ func (d *FileDetector) Detect(src, pwd string) (string, bool, error) { return "", true, err } if fi.Mode()&os.ModeSymlink != 0 { - pwd, err = os.Readlink(pwd) + pwd, err = filepath.EvalSymlinks(pwd) if err != nil { return "", true, err } diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go index cd9a411b..e6053d93 100644 --- a/vendor/github.com/hashicorp/go-getter/get.go +++ b/vendor/github.com/hashicorp/go-getter/get.go @@ -18,6 +18,8 @@ import ( "os/exec" "regexp" "syscall" + + cleanhttp "github.com/hashicorp/go-cleanhttp" ) // Getter defines the interface that schemes must implement to download @@ -35,6 +37,10 @@ type Getter interface { // reference a single file. If possible, the Getter should check if // the remote end contains the same file and no-op this operation. GetFile(string, *url.URL) error + + // ClientMode returns the mode based on the given URL. This is used to + // allow clients to let the getters decide which mode to use. + ClientMode(*url.URL) (ClientMode, error) } // Getters is the mapping of scheme to the Getter implementation that will @@ -45,8 +51,13 @@ var Getters map[string]Getter // syntax is schema::url, example: git::https://foo.com var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`) +// httpClient is the default client to be used by HttpGetters. +var httpClient = cleanhttp.DefaultClient() + func init() { - httpGetter := &HttpGetter{Netrc: true} + httpGetter := &HttpGetter{ + Netrc: true, + } Getters = map[string]Getter{ "file": new(FileGetter), diff --git a/vendor/github.com/hashicorp/go-getter/get_file.go b/vendor/github.com/hashicorp/go-getter/get_file.go index 341cd0ed..e5d2d61d 100644 --- a/vendor/github.com/hashicorp/go-getter/get_file.go +++ b/vendor/github.com/hashicorp/go-getter/get_file.go @@ -1,8 +1,32 @@ package getter +import ( + "net/url" + "os" +) + // FileGetter is a Getter implementation that will download a module from // a file scheme. type FileGetter struct { // Copy, if set to true, will copy data instead of using a symlink Copy bool } + +func (g *FileGetter) ClientMode(u *url.URL) (ClientMode, error) { + path := u.Path + if u.RawPath != "" { + path = u.RawPath + } + + fi, err := os.Stat(path) + if err != nil { + return 0, err + } + + // Check if the source is a directory. + if fi.IsDir() { + return ClientModeDir, nil + } + + return ClientModeFile, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/vendor/github.com/hashicorp/go-getter/get_file_unix.go index 693680ac..c89a2d5a 100644 --- a/vendor/github.com/hashicorp/go-getter/get_file_unix.go +++ b/vendor/github.com/hashicorp/go-getter/get_file_unix.go @@ -55,7 +55,7 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { path = u.RawPath } - // The source path must exist and be a directory to be usable. + // The source path must exist and be a file to be usable. if fi, err := os.Stat(path); err != nil { return fmt.Errorf("source path error: %s", err) } else if fi.IsDir() { diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go index 1bf0dc7e..cb1d0294 100644 --- a/vendor/github.com/hashicorp/go-getter/get_git.go +++ b/vendor/github.com/hashicorp/go-getter/get_git.go @@ -1,70 +1,116 @@ package getter import ( + "encoding/base64" "fmt" "io/ioutil" "net/url" "os" "os/exec" "path/filepath" + "strings" urlhelper "github.com/hashicorp/go-getter/helper/url" + "github.com/hashicorp/go-safetemp" + "github.com/hashicorp/go-version" ) // GitGetter is a Getter implementation that will download a module from // a git repository. type GitGetter struct{} +func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) { + return ClientModeDir, nil +} + func (g *GitGetter) Get(dst string, u *url.URL) error { if _, err := exec.LookPath("git"); err != nil { return fmt.Errorf("git must be available and on the PATH") } // Extract some query parameters we use - var ref string + var ref, sshKey string q := u.Query() if len(q) > 0 { ref = q.Get("ref") q.Del("ref") + sshKey = q.Get("sshkey") + q.Del("sshkey") + // Copy the URL var newU url.URL = *u u = &newU u.RawQuery = q.Encode() } - // First: clone or update the repository + var sshKeyFile string + if sshKey != "" { + // Check that the git version is sufficiently new. + if err := checkGitVersion("2.3"); err != nil { + return fmt.Errorf("Error using ssh key: %v", err) + } + + // We have an SSH key - decode it. + raw, err := base64.StdEncoding.DecodeString(sshKey) + if err != nil { + return err + } + + // Create a temp file for the key and ensure it is removed. + fh, err := ioutil.TempFile("", "go-getter") + if err != nil { + return err + } + sshKeyFile = fh.Name() + defer os.Remove(sshKeyFile) + + // Set the permissions prior to writing the key material. + if err := os.Chmod(sshKeyFile, 0600); err != nil { + return err + } + + // Write the raw key into the temp file. + _, err = fh.Write(raw) + fh.Close() + if err != nil { + return err + } + } + + // Clone or update the repository _, err := os.Stat(dst) if err != nil && !os.IsNotExist(err) { return err } if err == nil { - err = g.update(dst, ref) + err = g.update(dst, sshKeyFile, ref) } else { - err = g.clone(dst, u) + err = g.clone(dst, sshKeyFile, u) } if err != nil { return err } // Next: check out the proper tag/branch if it is specified, and checkout - if ref == "" { - return nil + if ref != "" { + if err := g.checkout(dst, ref); err != nil { + return err + } } - return g.checkout(dst, ref) + // Lastly, download any/all submodules. + return g.fetchSubmodules(dst, sshKeyFile) } // GetFile for Git doesn't support updating at this time. It will download // the file every time. func (g *GitGetter) GetFile(dst string, u *url.URL) error { - td, err := ioutil.TempDir("", "getter-git") + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - if err := os.RemoveAll(td); err != nil { - return err - } + defer tdcloser.Close() // Get the filename, and strip the filename from the URL so we can // just get the repository directly. @@ -92,16 +138,18 @@ func (g *GitGetter) checkout(dst string, ref string) error { return getRunCommand(cmd) } -func (g *GitGetter) clone(dst string, u *url.URL) error { +func (g *GitGetter) clone(dst, sshKeyFile string, u *url.URL) error { cmd := exec.Command("git", "clone", u.String(), dst) + setupGitEnv(cmd, sshKeyFile) return getRunCommand(cmd) } -func (g *GitGetter) update(dst string, ref string) error { +func (g *GitGetter) update(dst, sshKeyFile, ref string) error { // Determine if we're a branch. If we're NOT a branch, then we just // switch to master prior to checking out cmd := exec.Command("git", "show-ref", "-q", "--verify", "refs/heads/"+ref) cmd.Dir = dst + if getRunCommand(cmd) != nil { // Not a branch, switch to master. This will also catch non-existent // branches, in which case we want to switch to master and then @@ -116,5 +164,78 @@ func (g *GitGetter) update(dst string, ref string) error { cmd = exec.Command("git", "pull", "--ff-only") cmd.Dir = dst + setupGitEnv(cmd, sshKeyFile) return getRunCommand(cmd) } + +// fetchSubmodules downloads any configured submodules recursively. +func (g *GitGetter) fetchSubmodules(dst, sshKeyFile string) error { + cmd := exec.Command("git", "submodule", "update", "--init", "--recursive") + cmd.Dir = dst + setupGitEnv(cmd, sshKeyFile) + return getRunCommand(cmd) +} + +// setupGitEnv sets up the environment for the given command. This is used to +// pass configuration data to git and ssh and enables advanced cloning methods. +func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { + const gitSSHCommand = "GIT_SSH_COMMAND=" + var sshCmd []string + + // If we have an existing GIT_SSH_COMMAND, we need to append our options. + // We will also remove our old entry to make sure the behavior is the same + // with versions of Go < 1.9. + env := os.Environ() + for i, v := range env { + if strings.HasPrefix(v, gitSSHCommand) { + sshCmd = []string{v} + + env[i], env[len(env)-1] = env[len(env)-1], env[i] + env = env[:len(env)-1] + break + } + } + + if len(sshCmd) == 0 { + sshCmd = []string{gitSSHCommand + "ssh"} + } + + if sshKeyFile != "" { + // We have an SSH key temp file configured, tell ssh about this. + sshCmd = append(sshCmd, "-i", sshKeyFile) + } + + env = append(env, strings.Join(sshCmd, " ")) + cmd.Env = env +} + +// checkGitVersion is used to check the version of git installed on the system +// against a known minimum version. Returns an error if the installed version +// is older than the given minimum. +func checkGitVersion(min string) error { + want, err := version.NewVersion(min) + if err != nil { + return err + } + + out, err := exec.Command("git", "version").Output() + if err != nil { + return err + } + + fields := strings.Fields(string(out)) + if len(fields) != 3 { + return fmt.Errorf("Unexpected 'git version' output: %q", string(out)) + } + + have, err := version.NewVersion(fields[2]) + if err != nil { + return err + } + + if have.LessThan(want) { + return fmt.Errorf("Required git version = %s, have %s", want, have) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go index 542bef1f..f3869227 100644 --- a/vendor/github.com/hashicorp/go-getter/get_hg.go +++ b/vendor/github.com/hashicorp/go-getter/get_hg.go @@ -2,7 +2,6 @@ package getter import ( "fmt" - "io/ioutil" "net/url" "os" "os/exec" @@ -10,12 +9,17 @@ import ( "runtime" urlhelper "github.com/hashicorp/go-getter/helper/url" + "github.com/hashicorp/go-safetemp" ) // HgGetter is a Getter implementation that will download a module from // a Mercurial repository. type HgGetter struct{} +func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) { + return ClientModeDir, nil +} + func (g *HgGetter) Get(dst string, u *url.URL) error { if _, err := exec.LookPath("hg"); err != nil { return fmt.Errorf("hg must be available and on the PATH") @@ -60,13 +64,13 @@ func (g *HgGetter) Get(dst string, u *url.URL) error { // GetFile for Hg doesn't support updating at this time. It will download // the file every time. func (g *HgGetter) GetFile(dst string, u *url.URL) error { - td, err := ioutil.TempDir("", "getter-hg") + // Create a temporary directory to store the full source. This has to be + // a non-existent directory. + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - if err := os.RemoveAll(td); err != nil { - return err - } + defer tdcloser.Close() // Get the filename, and strip the filename from the URL so we can // just get the repository directly. diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go index d64b2383..d2e28796 100644 --- a/vendor/github.com/hashicorp/go-getter/get_http.go +++ b/vendor/github.com/hashicorp/go-getter/get_http.go @@ -4,12 +4,13 @@ import ( "encoding/xml" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" "path/filepath" "strings" + + "github.com/hashicorp/go-safetemp" ) // HttpGetter is a Getter implementation that will download from an HTTP @@ -36,6 +37,17 @@ type HttpGetter struct { // Netrc, if true, will lookup and use auth information found // in the user's netrc file if available. Netrc bool + + // Client is the http.Client to use for Get requests. + // This defaults to a cleanhttp.DefaultClient if left unset. + Client *http.Client +} + +func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { + if strings.HasSuffix(u.Path, "/") { + return ClientModeDir, nil + } + return ClientModeFile, nil } func (g *HttpGetter) Get(dst string, u *url.URL) error { @@ -50,13 +62,17 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { } } + if g.Client == nil { + g.Client = httpClient + } + // Add terraform-get to the parameter. q := u.Query() q.Add("terraform-get", "1") u.RawQuery = q.Encode() // Get the URL - resp, err := http.Get(u.String()) + resp, err := g.Client.Get(u.String()) if err != nil { return err } @@ -91,7 +107,18 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { } func (g *HttpGetter) GetFile(dst string, u *url.URL) error { - resp, err := http.Get(u.String()) + if g.Netrc { + // Add auth from netrc if we can + if err := addAuthFromNetrc(u); err != nil { + return err + } + } + + if g.Client == nil { + g.Client = httpClient + } + + resp, err := g.Client.Get(u.String()) if err != nil { return err } @@ -109,29 +136,40 @@ func (g *HttpGetter) GetFile(dst string, u *url.URL) error { if err != nil { return err } - defer f.Close() - _, err = io.Copy(f, resp.Body) + n, err := io.Copy(f, resp.Body) + if err == nil && n < resp.ContentLength { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } return err } // getSubdir downloads the source into the destination, but with // the proper subdir. func (g *HttpGetter) getSubdir(dst, source, subDir string) error { - // Create a temporary directory to store the full source - td, err := ioutil.TempDir("", "tf") + // Create a temporary directory to store the full source. This has to be + // a non-existent directory. + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - defer os.RemoveAll(td) + defer tdcloser.Close() // Download that into the given directory if err := Get(td, source); err != nil { return err } + // Process any globbing + sourcePath, err := SubdirGlob(td, subDir) + if err != nil { + return err + } + // Make sure the subdir path actually exists - sourcePath := filepath.Join(td, subDir) if _, err := os.Stat(sourcePath); err != nil { return fmt.Errorf( "Error downloading %s: %s", source, err) diff --git a/vendor/github.com/hashicorp/go-getter/get_mock.go b/vendor/github.com/hashicorp/go-getter/get_mock.go index a7d3d305..882e694d 100644 --- a/vendor/github.com/hashicorp/go-getter/get_mock.go +++ b/vendor/github.com/hashicorp/go-getter/get_mock.go @@ -43,3 +43,10 @@ func (g *MockGetter) GetFile(dst string, u *url.URL) error { } return g.GetFileErr } + +func (g *MockGetter) ClientMode(u *url.URL) (ClientMode, error) { + if l := len(u.Path); l > 0 && u.Path[l-1:] == "/" { + return ClientModeDir, nil + } + return ClientModeFile, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go index bcfcbfc9..ebb32174 100644 --- a/vendor/github.com/hashicorp/go-getter/get_s3.go +++ b/vendor/github.com/hashicorp/go-getter/get_s3.go @@ -20,6 +20,45 @@ import ( // a S3 bucket. type S3Getter struct{} +func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { + // Parse URL + region, bucket, path, _, creds, err := g.parseUrl(u) + if err != nil { + return 0, err + } + + // Create client config + config := g.getAWSConfig(region, u, creds) + sess := session.New(config) + client := s3.New(sess) + + // List the object(s) at the given prefix + req := &s3.ListObjectsInput{ + Bucket: aws.String(bucket), + Prefix: aws.String(path), + } + resp, err := client.ListObjects(req) + if err != nil { + return 0, err + } + + for _, o := range resp.Contents { + // Use file mode on exact match. + if *o.Key == path { + return ClientModeFile, nil + } + + // Use dir mode if child keys are found. + if strings.HasPrefix(*o.Key, path+"/") { + return ClientModeDir, nil + } + } + + // There was no match, so just return file mode. The download is going + // to fail but we will let S3 return the proper error later. + return ClientModeFile, nil +} + func (g *S3Getter) Get(dst string, u *url.URL) error { // Parse URL region, bucket, path, _, creds, err := g.parseUrl(u) @@ -45,7 +84,7 @@ func (g *S3Getter) Get(dst string, u *url.URL) error { return err } - config := g.getAWSConfig(region, creds) + config := g.getAWSConfig(region, u, creds) sess := session.New(config) client := s3.New(sess) @@ -100,7 +139,7 @@ func (g *S3Getter) GetFile(dst string, u *url.URL) error { return err } - config := g.getAWSConfig(region, creds) + config := g.getAWSConfig(region, u, creds) sess := session.New(config) client := s3.New(sess) return g.getObject(client, dst, bucket, path, version) @@ -135,7 +174,7 @@ func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) er return err } -func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *aws.Config { +func (g *S3Getter) getAWSConfig(region string, url *url.URL, creds *credentials.Credentials) *aws.Config { conf := &aws.Config{} if creds == nil { // Grab the metadata URL @@ -156,6 +195,14 @@ func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) * }) } + if creds != nil { + conf.Endpoint = &url.Host + conf.S3ForcePathStyle = aws.Bool(true) + if url.Scheme == "http" { + conf.DisableSSL = aws.Bool(true) + } + } + conf.Credentials = creds if region != "" { conf.Region = aws.String(region) @@ -165,29 +212,48 @@ func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) * } func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) { - // Expected host style: s3.amazonaws.com. They always have 3 parts, - // although the first may differ if we're accessing a specific region. - hostParts := strings.Split(u.Host, ".") - if len(hostParts) != 3 { - err = fmt.Errorf("URL is not a valid S3 URL") - return - } + // This just check whether we are dealing with S3 or + // any other S3 compliant service. S3 has a predictable + // url as others do not + if strings.Contains(u.Host, "amazonaws.com") { + // Expected host style: s3.amazonaws.com. They always have 3 parts, + // although the first may differ if we're accessing a specific region. + hostParts := strings.Split(u.Host, ".") + if len(hostParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } - // Parse the region out of the first part of the host - region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") - if region == "" { - region = "us-east-1" - } + // Parse the region out of the first part of the host + region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") + if region == "" { + region = "us-east-1" + } - pathParts := strings.SplitN(u.Path, "/", 3) - if len(pathParts) != 3 { - err = fmt.Errorf("URL is not a valid S3 URL") - return - } + pathParts := strings.SplitN(u.Path, "/", 3) + if len(pathParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } - bucket = pathParts[1] - path = pathParts[2] - version = u.Query().Get("version") + bucket = pathParts[1] + path = pathParts[2] + version = u.Query().Get("version") + + } else { + pathParts := strings.SplitN(u.Path, "/", 3) + if len(pathParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 complaint URL") + return + } + bucket = pathParts[1] + path = pathParts[2] + version = u.Query().Get("version") + region = u.Query().Get("region") + if region == "" { + region = "us-east-1" + } + } _, hasAwsId := u.Query()["aws_access_key_id"] _, hasAwsSecret := u.Query()["aws_access_key_secret"] diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go index 4d5ee3cc..c63f2bba 100644 --- a/vendor/github.com/hashicorp/go-getter/source.go +++ b/vendor/github.com/hashicorp/go-getter/source.go @@ -1,6 +1,8 @@ package getter import ( + "fmt" + "path/filepath" "strings" ) @@ -34,3 +36,27 @@ func SourceDirSubdir(src string) (string, string) { return src, subdir } + +// SubdirGlob returns the actual subdir with globbing processed. +// +// dst should be a destination directory that is already populated (the +// download is complete) and subDir should be the set subDir. If subDir +// is an empty string, this returns an empty string. +// +// The returned path is the full absolute path. +func SubdirGlob(dst, subDir string) (string, error) { + matches, err := filepath.Glob(filepath.Join(dst, subDir)) + if err != nil { + return "", err + } + + if len(matches) == 0 { + return "", fmt.Errorf("subdir %q not found", subDir) + } + + if len(matches) > 1 { + return "", fmt.Errorf("subdir %q matches multiple paths", subDir) + } + + return matches[0], nil +} diff --git a/vendor/github.com/hashicorp/go-getter/test-fixtures/detect-file-symlink-pwd/syml/pwd b/vendor/github.com/hashicorp/go-getter/test-fixtures/detect-file-symlink-pwd/syml/pwd new file mode 120000 index 00000000..05b44e00 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/test-fixtures/detect-file-symlink-pwd/syml/pwd @@ -0,0 +1 @@ +../real \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-safetemp/LICENSE b/vendor/github.com/hashicorp/go-safetemp/LICENSE new file mode 100644 index 00000000..be2cc4df --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-safetemp/README.md b/vendor/github.com/hashicorp/go-safetemp/README.md new file mode 100644 index 00000000..02ece331 --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/README.md @@ -0,0 +1,10 @@ +# go-safetemp +[![Godoc](https://godoc.org/github.com/hashcorp/go-safetemp?status.svg)](https://godoc.org/github.com/hashicorp/go-safetemp) + +Functions for safely working with temporary directories and files. + +## Why? + +The Go standard library provides the excellent `ioutil` package for +working with temporary directories and files. This library builds on top +of that to provide safe abstractions above that. diff --git a/vendor/github.com/hashicorp/go-safetemp/go.mod b/vendor/github.com/hashicorp/go-safetemp/go.mod new file mode 100644 index 00000000..02bc5f5b --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-safetemp diff --git a/vendor/github.com/hashicorp/go-safetemp/safetemp.go b/vendor/github.com/hashicorp/go-safetemp/safetemp.go new file mode 100644 index 00000000..c4ae72b7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/safetemp.go @@ -0,0 +1,40 @@ +package safetemp + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// Dir creates a new temporary directory that isn't yet created. This +// can be used with calls that expect a non-existent directory. +// +// The directory is created as a child of a temporary directory created +// within the directory dir starting with prefix. The temporary directory +// returned is always named "temp". The parent directory has the specified +// prefix. +// +// The returned io.Closer should be used to clean up the returned directory. +// This will properly remove the returned directory and any other temporary +// files created. +// +// If an error is returned, the Closer does not need to be called (and will +// be nil). +func Dir(dir, prefix string) (string, io.Closer, error) { + // Create the temporary directory + td, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", nil, err + } + + return filepath.Join(td, "temp"), pathCloser(td), nil +} + +// pathCloser implements io.Closer to remove the given path on Close. +type pathCloser string + +// Close deletes this path. +func (p pathCloser) Close() error { + return os.RemoveAll(string(p)) +} diff --git a/vendor/github.com/hashicorp/go-version/.travis.yml b/vendor/github.com/hashicorp/go-version/.travis.yml new file mode 100644 index 00000000..3f45b1e8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.9 + +script: + - go test diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md index 1d50070f..6f3a15ce 100644 --- a/vendor/github.com/hashicorp/go-version/README.md +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -1,5 +1,5 @@ # Versioning Library for Go -[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) +[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) go-version is a library for parsing versions and version constraints, and verifying versions against a set of constraints. go-version diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go index 091cfab3..8c73df06 100644 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -37,7 +37,7 @@ func init() { } ops := make([]string, 0, len(constraintOperators)) - for k, _ := range constraintOperators { + for k := range constraintOperators { ops = append(ops, regexp.QuoteMeta(k)) } @@ -142,15 +142,37 @@ func constraintLessThanEqual(v, c *Version) bool { } func constraintPessimistic(v, c *Version) bool { + // If the version being checked is naturally less than the constraint, then there + // is no way for the version to be valid against the constraint if v.LessThan(c) { return false } + // We'll use this more than once, so grab the length now so it's a little cleaner + // to write the later checks + cs := len(c.segments) + // If the version being checked has less specificity than the constraint, then there + // is no way for the version to be valid against the constraint + if cs > len(v.segments) { + return false + } + + // Check the segments in the constraint against those in the version. If the version + // being checked, at any point, does not have the same values in each index of the + // constraints segments, then it cannot be valid against the constraint. for i := 0; i < c.si-1; i++ { if v.segments[i] != c.segments[i] { return false } } + // Check the last part of the segment in the constraint. If the version segment at + // this index is less than the constraints segment at this index, then it cannot + // be valid against the constraint + if c.segments[cs-1] > v.segments[cs-1] { + return false + } + + // If nothing has rejected the version by now, it's valid return true } diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index d0e0b0c8..bee527eb 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -14,16 +14,16 @@ var versionRegexp *regexp.Regexp // The raw regular expression string used for testing the validity // of a version. -const VersionRegexpRaw string = `([0-9]+(\.[0-9]+){0,2})` + - `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-?([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + `?` // Version represents a single version. type Version struct { metadata string pre string - segments []int + segments []int64 si int } @@ -38,20 +38,23 @@ func NewVersion(v string) (*Version, error) { if matches == nil { return nil, fmt.Errorf("Malformed version: %s", v) } - segmentsStr := strings.Split(matches[1], ".") - segments := make([]int, len(segmentsStr), 3) + segments := make([]int64, len(segmentsStr)) si := 0 for i, str := range segmentsStr { - val, err := strconv.ParseInt(str, 10, 32) + val, err := strconv.ParseInt(str, 10, 64) if err != nil { return nil, fmt.Errorf( "Error parsing version: %s", err) } - segments[i] = int(val) - si += 1 + segments[i] = int64(val) + si++ } + + // Even though we could support more than three segments, if we + // got less than three, pad it with 0s. This is to cover the basic + // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum for i := len(segments); i < 3; i++ { segments = append(segments, 0) } @@ -86,8 +89,8 @@ func (v *Version) Compare(other *Version) int { return 0 } - segmentsSelf := v.Segments() - segmentsOther := other.Segments() + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() // If the segments are the same, we must compare on prerelease info if reflect.DeepEqual(segmentsSelf, segmentsOther) { @@ -106,21 +109,56 @@ func (v *Version) Compare(other *Version) int { return comparePrereleases(preSelf, preOther) } + // Get the highest specificity (hS), or if they're equal, just use segmentSelf length + lenSelf := len(segmentsSelf) + lenOther := len(segmentsOther) + hS := lenSelf + if lenSelf < lenOther { + hS = lenOther + } // Compare the segments - for i := 0; i < len(segmentsSelf); i++ { + // Because a constraint could have more/less specificity than the version it's + // checking, we need to account for a lopsided or jagged comparison + for i := 0; i < hS; i++ { + if i > lenSelf-1 { + // This means Self had the lower specificity + // Check to see if the remaining segments in Other are all zeros + if !allZero(segmentsOther[i:]) { + // if not, it means that Other has to be greater than Self + return -1 + } + break + } else if i > lenOther-1 { + // this means Other had the lower specificity + // Check to see if the remaining segments in Self are all zeros - + if !allZero(segmentsSelf[i:]) { + //if not, it means that Self has to be greater than Other + return 1 + } + break + } lhs := segmentsSelf[i] rhs := segmentsOther[i] - if lhs == rhs { continue } else if lhs < rhs { return -1 - } else { - return 1 } + // Otherwis, rhs was > lhs, they're not equal + return 1 } - panic("should not be reached") + // if we got this far, they're equal + return 0 +} + +func allZero(segs []int64) bool { + for _, s := range segs { + if s != 0 { + return false + } + } + return true } func comparePart(preSelf string, preOther string) int { @@ -128,24 +166,42 @@ func comparePart(preSelf string, preOther string) int { return 0 } + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + // if a part is empty, we use the other to decide if preSelf == "" { - _, notIsNumeric := strconv.ParseInt(preOther, 10, 64) - if notIsNumeric == nil { + if otherNumeric { return -1 } return 1 } if preOther == "" { - _, notIsNumeric := strconv.ParseInt(preSelf, 10, 64) - if notIsNumeric == nil { + if selfNumeric { return 1 } return -1 } - if preSelf > preOther { + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { return 1 } @@ -226,12 +282,25 @@ func (v *Version) Prerelease() string { return v.pre } -// Segments returns the numeric segments of the version as a slice. +// Segments returns the numeric segments of the version as a slice of ints. // // This excludes any metadata or pre-release information. For example, // for a version "1.2.3-beta", segments will return a slice of // 1, 2, 3. func (v *Version) Segments() []int { + segmentSlice := make([]int, len(v.segments)) + for i, v := range v.segments { + segmentSlice[i] = int(v) + } + return segmentSlice +} + +// Segments64 returns the numeric segments of the version as a slice of int64s. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments64() []int64 { return v.segments } @@ -239,7 +308,13 @@ func (v *Version) Segments() []int { // and metadata information. func (v *Version) String() string { var buf bytes.Buffer - fmt.Fprintf(&buf, "%d.%d.%d", v.segments[0], v.segments[1], v.segments[2]) + fmtParts := make([]string, len(v.segments)) + for i, s := range v.segments { + // We can ignore err here since we've pre-parsed the values in segments + str := strconv.FormatInt(s, 10) + fmtParts[i] = str + } + fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) if v.pre != "" { fmt.Fprintf(&buf, "-%s", v.pre) } diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore new file mode 100644 index 00000000..15586a2b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.gitignore @@ -0,0 +1,9 @@ +y.output + +# ignore intellij files +.idea +*.iml +*.ipr +*.iws + +*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml new file mode 100644 index 00000000..3f83d902 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/.travis.yml @@ -0,0 +1,12 @@ +sudo: false + +language: go + +go: + - 1.8 + +branches: + only: + - master + +script: make test diff --git a/vendor/github.com/hashicorp/hcl/appveyor.yml b/vendor/github.com/hashicorp/hcl/appveyor.yml index 3c8cdf8e..4db0b711 100644 --- a/vendor/github.com/hashicorp/hcl/appveyor.yml +++ b/vendor/github.com/hashicorp/hcl/appveyor.yml @@ -4,7 +4,7 @@ clone_folder: c:\gopath\src\github.com\hashicorp\hcl environment: GOPATH: c:\gopath init: - - git config --global core.autocrlf true + - git config --global core.autocrlf false install: - cmd: >- echo %Path% diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go index c8a077d4..0b39c1b9 100644 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -91,7 +91,7 @@ func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error return d.decodeBool(name, node, result) case reflect.Float64: return d.decodeFloat(name, node, result) - case reflect.Int: + case reflect.Int, reflect.Int32, reflect.Int64: return d.decodeInt(name, node, result) case reflect.Interface: // When we see an interface, we make our own thing @@ -164,7 +164,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er return err } - result.Set(reflect.ValueOf(int(v))) + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } return nil case token.STRING: v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) @@ -172,7 +176,11 @@ func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) er return err } - result.Set(reflect.ValueOf(int(v))) + if result.Kind() == reflect.Interface { + result.Set(reflect.ValueOf(int(v))) + } else { + result.SetInt(v) + } return nil } } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go index ea3734f0..6e5ef654 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go @@ -156,7 +156,8 @@ func (o *ObjectKey) Pos() token.Pos { type LiteralType struct { Token token.Token - // associated line comment, only when used in a list + // comment types, only used when in a list + LeadComment *CommentGroup LineComment *CommentGroup } diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go index f46ed4cc..b4881806 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -3,6 +3,7 @@ package parser import ( + "bytes" "errors" "fmt" "strings" @@ -36,6 +37,11 @@ func newParser(src []byte) *Parser { // Parse returns the fully parsed source and returns the abstract syntax tree. func Parse(src []byte) (*ast.File, error) { + // normalize all line endings + // since the scanner and output only work with "\n" line endings, we may + // end up with dangling "\r" characters in the parsed data. + src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) + p := newParser(src) return p.Parse() } @@ -50,7 +56,7 @@ func (p *Parser) Parse() (*ast.File, error) { scerr = &PosError{Pos: pos, Err: errors.New(msg)} } - f.Node, err = p.objectList() + f.Node, err = p.objectList(false) if scerr != nil { return nil, scerr } @@ -62,11 +68,23 @@ func (p *Parser) Parse() (*ast.File, error) { return f, nil } -func (p *Parser) objectList() (*ast.ObjectList, error) { +// objectList parses a list of items within an object (generally k/v pairs). +// The parameter" obj" tells this whether to we are within an object (braces: +// '{', '}') or just at the top level. If we're within an object, we end +// at an RBRACE. +func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { defer un(trace(p, "ParseObjectList")) node := &ast.ObjectList{} for { + if obj { + tok := p.scan() + p.unscan() + if tok.Type == token.RBRACE { + break + } + } + n, err := p.objectItem() if err == errEofToken { break // we are finished @@ -244,7 +262,10 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { keyCount++ keys = append(keys, &ast.ObjectKey{Token: p.tok}) case token.ILLEGAL: - fmt.Println("illegal") + return keys, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf("illegal character"), + } default: return keys, &PosError{ Pos: p.tok.Pos, @@ -288,7 +309,7 @@ func (p *Parser) objectType() (*ast.ObjectType, error) { Lbrace: p.tok.Pos, } - l, err := p.objectList() + l, err := p.objectList(true) // if we hit RBRACE, we are good to go (means we parsed all Items), if it's // not a RBRACE, it's an syntax error and we just return it. @@ -296,9 +317,9 @@ func (p *Parser) objectType() (*ast.ObjectType, error) { return nil, err } - // If there is no error, we should be at a RBRACE to end the object - if p.tok.Type != token.RBRACE { - return nil, fmt.Errorf("object expected closing RBRACE got: %s", p.tok.Type) + // No error, scan and expect the ending to be a brace + if tok := p.scan(); tok.Type != token.RBRACE { + return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type) } o.List = l @@ -331,12 +352,18 @@ func (p *Parser) listType() (*ast.ListType, error) { } } switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: + case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: node, err := p.literalType() if err != nil { return nil, err } + // If there is a lead comment, apply it + if p.leadComment != nil { + node.LeadComment = p.leadComment + p.leadComment = nil + } + l.Add(node) needComma = true case token.COMMA: @@ -367,12 +394,16 @@ func (p *Parser) listType() (*ast.ListType, error) { } l.Add(node) needComma = true - case token.BOOL: - // TODO(arslan) should we support? not supported by HCL yet case token.LBRACK: - // TODO(arslan) should we support nested lists? Even though it's - // written in README of HCL, it's not a part of the grammar - // (not defined in parse.y) + node, err := p.listType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse list within list: %s", err), + } + } + l.Add(node) case token.RBRACK: // finished l.Rbrack = p.tok.Pos diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go index 0735d95e..69662367 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -95,6 +95,12 @@ func (s *Scanner) next() rune { s.srcPos.Column = 0 } + // If we see a null character with data left, then that is an error + if ch == '\x00' && s.buf.Len() > 0 { + s.err("unexpected null character (0x00)") + return eof + } + // debug // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) return ch @@ -474,7 +480,7 @@ func (s *Scanner) scanString() { // read character after quote ch := s.next() - if ch < 0 || ch == eof { + if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { s.err("literal not terminated") return } diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go index 956c8991..5f981eaa 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -27,6 +27,9 @@ func Unquote(s string) (t string, err error) { if quote != '"' { return "", ErrSyntax } + if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { + return "", ErrSyntax + } // Is it trivial? Avoid allocation. if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { @@ -46,7 +49,7 @@ func Unquote(s string) (t string, err error) { for len(s) > 0 { // If we're starting a '${}' then let it through un-unquoted. // Specifically: we don't unquote any characters within the `${}` - // section, except for escaped backslashes, which we handle specifically. + // section. if s[0] == '$' && len(s) > 1 && s[1] == '{' { buf = append(buf, '$', '{') s = s[2:] @@ -61,16 +64,6 @@ func Unquote(s string) (t string, err error) { s = s[size:] - // We special case escaped backslashes in interpolations, converting - // them to their unescaped equivalents. - if r == '\\' { - q, _ := utf8.DecodeRuneInString(s) - switch q { - case '\\': - continue - } - } - n := utf8.EncodeRune(runeTmp[:], r) buf = append(buf, runeTmp[:n]...) @@ -94,6 +87,10 @@ func Unquote(s string) (t string, err error) { } } + if s[0] == '\n' { + return "", ErrSyntax + } + c, multibyte, ss, err := unquoteChar(s, quote) if err != nil { return "", err diff --git a/vendor/github.com/hashicorp/hcl/json/parser/flatten.go b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go index 6eb14a25..f652d6fe 100644 --- a/vendor/github.com/hashicorp/hcl/json/parser/flatten.go +++ b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go @@ -48,6 +48,12 @@ func flattenListType( item *ast.ObjectItem, items []*ast.ObjectItem, frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { + // If the list is empty, keep the original list + if len(ot.List) == 0 { + items = append(items, item) + return items, frontier + } + // All the elements of this object must also be objects! for _, subitem := range ot.List { if _, ok := subitem.(*ast.ObjectType); !ok { diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go index 3a62ec3f..125a5f07 100644 --- a/vendor/github.com/hashicorp/hcl/json/parser/parser.go +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/hashicorp/hcl/hcl/ast" + hcltoken "github.com/hashicorp/hcl/hcl/token" "github.com/hashicorp/hcl/json/scanner" "github.com/hashicorp/hcl/json/token" ) @@ -85,6 +86,7 @@ func (p *Parser) objectList() (*ast.ObjectList, error) { break } } + return node, nil } @@ -103,6 +105,14 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) { switch p.tok.Type { case token.COLON: + pos := p.tok.Pos + o.Assign = hcltoken.Pos{ + Filename: pos.Filename, + Offset: pos.Offset, + Line: pos.Line, + Column: pos.Column, + } + o.Val, err = p.objectValue() if err != nil { return nil, err @@ -137,7 +147,7 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { // Done return keys, nil case token.ILLEGAL: - fmt.Println("illegal") + return nil, errors.New("illegal") default: return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) } diff --git a/vendor/github.com/hashicorp/hcl2/LICENSE b/vendor/github.com/hashicorp/hcl2/LICENSE new file mode 100644 index 00000000..82b4de97 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/decode.go b/vendor/github.com/hashicorp/hcl2/gohcl/decode.go new file mode 100644 index 00000000..3a149a8c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/decode.go @@ -0,0 +1,304 @@ +package gohcl + +import ( + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/gocty" +) + +// DecodeBody extracts the configuration within the given body into the given +// value. This value must be a non-nil pointer to either a struct or +// a map, where in the former case the configuration will be decoded using +// struct tags and in the latter case only attributes are allowed and their +// values are decoded into the map. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + rv := reflect.ValueOf(val) + if rv.Kind() != reflect.Ptr { + panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String())) + } + + return decodeBodyToValue(body, ctx, rv.Elem()) +} + +func decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { + et := val.Type() + switch et.Kind() { + case reflect.Struct: + return decodeBodyToStruct(body, ctx, val) + case reflect.Map: + return decodeBodyToMap(body, ctx, val) + default: + panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String())) + } +} + +func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { + schema, partial := ImpliedBodySchema(val.Interface()) + + var content *hcl.BodyContent + var leftovers hcl.Body + var diags hcl.Diagnostics + if partial { + content, leftovers, diags = body.PartialContent(schema) + } else { + content, diags = body.Content(schema) + } + if content == nil { + return diags + } + + tags := getFieldTags(val.Type()) + + if tags.Remain != nil { + fieldIdx := *tags.Remain + field := val.Type().Field(fieldIdx) + fieldV := val.Field(fieldIdx) + switch { + case bodyType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(leftovers)) + case attrsType.AssignableTo(field.Type): + attrs, attrsDiags := leftovers.JustAttributes() + if len(attrsDiags) > 0 { + diags = append(diags, attrsDiags...) + } + fieldV.Set(reflect.ValueOf(attrs)) + default: + diags = append(diags, decodeBodyToValue(leftovers, ctx, fieldV)...) + } + } + + for name, fieldIdx := range tags.Attributes { + attr := content.Attributes[name] + field := val.Type().Field(fieldIdx) + fieldV := val.Field(fieldIdx) + + if attr == nil { + if !exprType.AssignableTo(field.Type) { + continue + } + + // As a special case, if the target is of type hcl.Expression then + // we'll assign an actual expression that evalues to a cty null, + // so the caller can deal with it within the cty realm rather + // than within the Go realm. + synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange()) + fieldV.Set(reflect.ValueOf(synthExpr)) + continue + } + + switch { + case attrType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(attr)) + case exprType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(attr.Expr)) + default: + diags = append(diags, DecodeExpression( + attr.Expr, ctx, fieldV.Addr().Interface(), + )...) + } + } + + blocksByType := content.Blocks.ByType() + + for typeName, fieldIdx := range tags.Blocks { + blocks := blocksByType[typeName] + field := val.Type().Field(fieldIdx) + + ty := field.Type + isSlice := false + isPtr := false + if ty.Kind() == reflect.Slice { + isSlice = true + ty = ty.Elem() + } + if ty.Kind() == reflect.Ptr { + isPtr = true + ty = ty.Elem() + } + + if len(blocks) > 1 && !isSlice { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", typeName), + Detail: fmt.Sprintf( + "Only one %s block is allowed. Another was defined at %s.", + typeName, blocks[0].DefRange.String(), + ), + Subject: &blocks[1].DefRange, + }) + continue + } + + if len(blocks) == 0 { + if isSlice || isPtr { + val.Field(fieldIdx).Set(reflect.Zero(field.Type)) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", typeName), + Detail: fmt.Sprintf("A %s block is required.", typeName), + Subject: body.MissingItemRange().Ptr(), + }) + } + continue + } + + switch { + + case isSlice: + elemType := ty + if isPtr { + elemType = reflect.PtrTo(ty) + } + sli := reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks)) + + for i, block := range blocks { + if isPtr { + v := reflect.New(ty) + diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) + sli.Index(i).Set(v) + } else { + diags = append(diags, decodeBlockToValue(block, ctx, sli.Index(i))...) + } + } + + val.Field(fieldIdx).Set(sli) + + default: + block := blocks[0] + if isPtr { + v := reflect.New(ty) + diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) + val.Field(fieldIdx).Set(v) + } else { + diags = append(diags, decodeBlockToValue(block, ctx, val.Field(fieldIdx))...) + } + + } + + } + + return diags +} + +func decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { + attrs, diags := body.JustAttributes() + if attrs == nil { + return diags + } + + mv := reflect.MakeMap(v.Type()) + + for k, attr := range attrs { + switch { + case attrType.AssignableTo(v.Type().Elem()): + mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr)) + case exprType.AssignableTo(v.Type().Elem()): + mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr)) + default: + ev := reflect.New(v.Type().Elem()) + diags = append(diags, DecodeExpression(attr.Expr, ctx, ev.Interface())...) + mv.SetMapIndex(reflect.ValueOf(k), ev.Elem()) + } + } + + v.Set(mv) + + return diags +} + +func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { + var diags hcl.Diagnostics + + ty := v.Type() + + switch { + case blockType.AssignableTo(ty): + v.Elem().Set(reflect.ValueOf(block)) + case bodyType.AssignableTo(ty): + v.Elem().Set(reflect.ValueOf(block.Body)) + case attrsType.AssignableTo(ty): + attrs, attrsDiags := block.Body.JustAttributes() + if len(attrsDiags) > 0 { + diags = append(diags, attrsDiags...) + } + v.Elem().Set(reflect.ValueOf(attrs)) + default: + diags = append(diags, decodeBodyToValue(block.Body, ctx, v)...) + + if len(block.Labels) > 0 { + blockTags := getFieldTags(ty) + for li, lv := range block.Labels { + lfieldIdx := blockTags.Labels[li].FieldIndex + v.Field(lfieldIdx).Set(reflect.ValueOf(lv)) + } + } + + } + + return diags +} + +// DecodeExpression extracts the value of the given expression into the given +// value. This value must be something that gocty is able to decode into, +// since the final decoding is delegated to that package. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + srcVal, diags := expr.Value(ctx) + + convTy, err := gocty.ImpliedType(val) + if err != nil { + panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err)) + } + + srcVal, err = convert.Convert(srcVal, convTy) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + return diags + } + + err = gocty.FromCtyValue(srcVal, val) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/doc.go b/vendor/github.com/hashicorp/hcl2/gohcl/doc.go new file mode 100644 index 00000000..8500214b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/doc.go @@ -0,0 +1,49 @@ +// Package gohcl allows decoding HCL configurations into Go data structures. +// +// It provides a convenient and concise way of describing the schema for +// configuration and then accessing the resulting data via native Go +// types. +// +// A struct field tag scheme is used, similar to other decoding and +// unmarshalling libraries. The tags are formatted as in the following example: +// +// ThingType string `hcl:"thing_type,attr"` +// +// Within each tag there are two comma-separated tokens. The first is the +// name of the corresponding construct in configuration, while the second +// is a keyword giving the kind of construct expected. The following +// kind keywords are supported: +// +// attr (the default) indicates that the value is to be populated from an attribute +// block indicates that the value is to populated from a block +// label indicates that the value is to populated from a block label +// remain indicates that the value is to be populated from the remaining body after populating other fields +// +// "attr" fields may either be of type *hcl.Expression, in which case the raw +// expression is assigned, or of any type accepted by gocty, in which case +// gocty will be used to assign the value to a native Go type. +// +// "block" fields may be of type *hcl.Block or hcl.Body, in which case the +// corresponding raw value is assigned, or may be a struct that recursively +// uses the same tags. Block fields may also be slices of any of these types, +// in which case multiple blocks of the corresponding type are decoded into +// the slice. +// +// "label" fields are considered only in a struct used as the type of a field +// marked as "block", and are used sequentially to capture the labels of +// the blocks being decoded. In this case, the name token is used only as +// an identifier for the label in diagnostic messages. +// +// "remain" can be placed on a single field that may be either of type +// hcl.Body or hcl.Attributes, in which case any remaining body content is +// placed into this field for delayed processing. If no "remain" field is +// present then any attributes or blocks not matched by another valid tag +// will cause an error diagnostic. +// +// Broadly-speaking this package deals with two types of error. The first is +// errors in the configuration itself, which are returned as diagnostics +// written with the configuration author as the target audience. The second +// is bugs in the calling program, such as invalid struct tags, which are +// surfaced via panics since there can be no useful runtime handling of such +// errors and they should certainly not be returned to the user as diagnostics. +package gohcl diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/schema.go b/vendor/github.com/hashicorp/hcl2/gohcl/schema.go new file mode 100644 index 00000000..88164cb0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/schema.go @@ -0,0 +1,174 @@ +package gohcl + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the +// given value, which must be a struct value or a pointer to one. If an +// inappropriate value is passed, this function will panic. +// +// The second return argument indicates whether the given struct includes +// a "remain" field, and thus the returned schema is non-exhaustive. +// +// This uses the tags on the fields of the struct to discover how each +// field's value should be expressed within configuration. If an invalid +// mapping is attempted, this function will panic. +func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) { + ty := reflect.TypeOf(val) + + if ty.Kind() == reflect.Ptr { + ty = ty.Elem() + } + + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("given value must be struct, not %T", val)) + } + + var attrSchemas []hcl.AttributeSchema + var blockSchemas []hcl.BlockHeaderSchema + + tags := getFieldTags(ty) + + attrNames := make([]string, 0, len(tags.Attributes)) + for n := range tags.Attributes { + attrNames = append(attrNames, n) + } + sort.Strings(attrNames) + for _, n := range attrNames { + idx := tags.Attributes[n] + optional := tags.Optional[n] + field := ty.Field(idx) + + var required bool + + switch { + case field.Type.AssignableTo(exprType): + // If we're decoding to hcl.Expression then absense can be + // indicated via a null value, so we don't specify that + // the field is required during decoding. + required = false + case field.Type.Kind() != reflect.Ptr && !optional: + required = true + default: + required = false + } + + attrSchemas = append(attrSchemas, hcl.AttributeSchema{ + Name: n, + Required: required, + }) + } + + blockNames := make([]string, 0, len(tags.Blocks)) + for n := range tags.Blocks { + blockNames = append(blockNames, n) + } + sort.Strings(blockNames) + for _, n := range blockNames { + idx := tags.Blocks[n] + field := ty.Field(idx) + fty := field.Type + if fty.Kind() == reflect.Slice { + fty = fty.Elem() + } + if fty.Kind() == reflect.Ptr { + fty = fty.Elem() + } + if fty.Kind() != reflect.Struct { + panic(fmt.Sprintf( + "hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name, + )) + } + ftags := getFieldTags(fty) + var labelNames []string + if len(ftags.Labels) > 0 { + labelNames = make([]string, len(ftags.Labels)) + for i, l := range ftags.Labels { + labelNames[i] = l.Name + } + } + + blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{ + Type: n, + LabelNames: labelNames, + }) + } + + partial = tags.Remain != nil + schema = &hcl.BodySchema{ + Attributes: attrSchemas, + Blocks: blockSchemas, + } + return schema, partial +} + +type fieldTags struct { + Attributes map[string]int + Blocks map[string]int + Labels []labelField + Remain *int + Optional map[string]bool +} + +type labelField struct { + FieldIndex int + Name string +} + +func getFieldTags(ty reflect.Type) *fieldTags { + ret := &fieldTags{ + Attributes: map[string]int{}, + Blocks: map[string]int{}, + Optional: map[string]bool{}, + } + + ct := ty.NumField() + for i := 0; i < ct; i++ { + field := ty.Field(i) + tag := field.Tag.Get("hcl") + if tag == "" { + continue + } + + comma := strings.Index(tag, ",") + var name, kind string + if comma != -1 { + name = tag[:comma] + kind = tag[comma+1:] + } else { + name = tag + kind = "attr" + } + + switch kind { + case "attr": + ret.Attributes[name] = i + case "block": + ret.Blocks[name] = i + case "label": + ret.Labels = append(ret.Labels, labelField{ + FieldIndex: i, + Name: name, + }) + case "remain": + if ret.Remain != nil { + panic("only one 'remain' tag is permitted") + } + idx := i // copy, because this loop will continue assigning to i + ret.Remain = &idx + case "optional": + ret.Attributes[name] = i + ret.Optional[name] = true + default: + panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name)) + } + } + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/types.go b/vendor/github.com/hashicorp/hcl2/gohcl/types.go new file mode 100644 index 00000000..a94f275a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/types.go @@ -0,0 +1,16 @@ +package gohcl + +import ( + "reflect" + + "github.com/hashicorp/hcl2/hcl" +) + +var victimExpr hcl.Expression +var victimBody hcl.Body + +var exprType = reflect.TypeOf(&victimExpr).Elem() +var bodyType = reflect.TypeOf(&victimBody).Elem() +var blockType = reflect.TypeOf((*hcl.Block)(nil)) +var attrType = reflect.TypeOf((*hcl.Attribute)(nil)) +var attrsType = reflect.TypeOf(hcl.Attributes(nil)) diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go new file mode 100644 index 00000000..6ecf7447 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go @@ -0,0 +1,103 @@ +package hcl + +import ( + "fmt" +) + +// DiagnosticSeverity represents the severity of a diagnostic. +type DiagnosticSeverity int + +const ( + // DiagInvalid is the invalid zero value of DiagnosticSeverity + DiagInvalid DiagnosticSeverity = iota + + // DiagError indicates that the problem reported by a diagnostic prevents + // further progress in parsing and/or evaluating the subject. + DiagError + + // DiagWarning indicates that the problem reported by a diagnostic warrants + // user attention but does not prevent further progress. It is most + // commonly used for showing deprecation notices. + DiagWarning +) + +// Diagnostic represents information to be presented to a user about an +// error or anomoly in parsing or evaluating configuration. +type Diagnostic struct { + Severity DiagnosticSeverity + + // Summary and detail contain the English-language description of the + // problem. Summary is a terse description of the general problem and + // detail is a more elaborate, often-multi-sentence description of + // the probem and what might be done to solve it. + Summary string + Detail string + Subject *Range + Context *Range +} + +// Diagnostics is a list of Diagnostic instances. +type Diagnostics []*Diagnostic + +// error implementation, so that diagnostics can be returned via APIs +// that normally deal in vanilla Go errors. +// +// This presents only minimal context about the error, for compatibility +// with usual expectations about how errors will present as strings. +func (d *Diagnostic) Error() string { + return fmt.Sprintf("%s: %s; %s", d.Subject, d.Summary, d.Detail) +} + +// error implementation, so that sets of diagnostics can be returned via +// APIs that normally deal in vanilla Go errors. +func (d Diagnostics) Error() string { + count := len(d) + switch { + case count == 0: + return "no diagnostics" + case count == 1: + return d[0].Error() + default: + return fmt.Sprintf("%s, and %d other diagnostic(s)", d[0].Error(), count-1) + } +} + +// Append appends a new error to a Diagnostics and return the whole Diagnostics. +// +// This is provided as a convenience for returning from a function that +// collects and then returns a set of diagnostics: +// +// return nil, diags.Append(&hcl.Diagnostic{ ... }) +// +// Note that this modifies the array underlying the diagnostics slice, so +// must be used carefully within a single codepath. It is incorrect (and rude) +// to extend a diagnostics created by a different subsystem. +func (d Diagnostics) Append(diag *Diagnostic) Diagnostics { + return append(d, diag) +} + +// Extend concatenates the given Diagnostics with the receiver and returns +// the whole new Diagnostics. +// +// This is similar to Append but accepts multiple diagnostics to add. It has +// all the same caveats and constraints. +func (d Diagnostics) Extend(diags Diagnostics) Diagnostics { + return append(d, diags...) +} + +// HasErrors returns true if the receiver contains any diagnostics of +// severity DiagError. +func (d Diagnostics) HasErrors() bool { + for _, diag := range d { + if diag.Severity == DiagError { + return true + } + } + return false +} + +// A DiagnosticWriter emits diagnostics somehow. +type DiagnosticWriter interface { + WriteDiagnostic(*Diagnostic) error + WriteDiagnostics(Diagnostics) error +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go new file mode 100644 index 00000000..dfa473ad --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go @@ -0,0 +1,168 @@ +package hcl + +import ( + "bufio" + "errors" + "fmt" + "io" + + wordwrap "github.com/mitchellh/go-wordwrap" +) + +type diagnosticTextWriter struct { + files map[string]*File + wr io.Writer + width uint + color bool +} + +// NewDiagnosticTextWriter creates a DiagnosticWriter that writes diagnostics +// to the given writer as formatted text. +// +// It is designed to produce text appropriate to print in a monospaced font +// in a terminal of a particular width, or optionally with no width limit. +// +// The given width may be zero to disable word-wrapping of the detail text +// and truncation of source code snippets. +// +// If color is set to true, the output will include VT100 escape sequences to +// color-code the severity indicators. It is suggested to turn this off if +// the target writer is not a terminal. +func NewDiagnosticTextWriter(wr io.Writer, files map[string]*File, width uint, color bool) DiagnosticWriter { + return &diagnosticTextWriter{ + files: files, + wr: wr, + width: width, + color: color, + } +} + +func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error { + if diag == nil { + return errors.New("nil diagnostic") + } + + var colorCode, highlightCode, resetCode string + if w.color { + switch diag.Severity { + case DiagError: + colorCode = "\x1b[31m" + case DiagWarning: + colorCode = "\x1b[33m" + } + resetCode = "\x1b[0m" + highlightCode = "\x1b[1;4m" + } + + var severityStr string + switch diag.Severity { + case DiagError: + severityStr = "Error" + case DiagWarning: + severityStr = "Warning" + default: + // should never happen + severityStr = "???????" + } + + fmt.Fprintf(w.wr, "%s%s%s: %s\n\n", colorCode, severityStr, resetCode, diag.Summary) + + if diag.Subject != nil { + snipRange := *diag.Subject + highlightRange := snipRange + if diag.Context != nil { + // Show enough of the source code to include both the subject + // and context ranges, which overlap in all reasonable + // situations. + snipRange = RangeOver(snipRange, *diag.Context) + } + // We can't illustrate an empty range, so we'll turn such ranges into + // single-character ranges, which might not be totally valid (may point + // off the end of a line, or off the end of the file) but are good + // enough for the bounds checks we do below. + if snipRange.Empty() { + snipRange.End.Byte++ + snipRange.End.Column++ + } + if highlightRange.Empty() { + highlightRange.End.Byte++ + highlightRange.End.Column++ + } + + file := w.files[diag.Subject.Filename] + if file == nil || file.Bytes == nil { + fmt.Fprintf(w.wr, " on %s line %d:\n (source code not available)\n\n", diag.Subject.Filename, diag.Subject.Start.Line) + } else { + + var contextLine string + if diag.Subject != nil { + contextLine = contextString(file, diag.Subject.Start.Byte) + if contextLine != "" { + contextLine = ", in " + contextLine + } + } + + fmt.Fprintf(w.wr, " on %s line %d%s:\n", diag.Subject.Filename, diag.Subject.Start.Line, contextLine) + + src := file.Bytes + sc := NewRangeScanner(src, diag.Subject.Filename, bufio.ScanLines) + + for sc.Scan() { + lineRange := sc.Range() + if !lineRange.Overlaps(snipRange) { + continue + } + + beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange) + if highlightedRange.Empty() { + fmt.Fprintf(w.wr, "%4d: %s\n", lineRange.Start.Line, sc.Bytes()) + } else { + before := beforeRange.SliceBytes(src) + highlighted := highlightedRange.SliceBytes(src) + after := afterRange.SliceBytes(src) + fmt.Fprintf( + w.wr, "%4d: %s%s%s%s%s\n", + lineRange.Start.Line, + before, + highlightCode, highlighted, resetCode, + after, + ) + } + + } + + w.wr.Write([]byte{'\n'}) + } + } + + if diag.Detail != "" { + detail := diag.Detail + if w.width != 0 { + detail = wordwrap.WrapString(detail, w.width) + } + fmt.Fprintf(w.wr, "%s\n\n", detail) + } + + return nil +} + +func (w *diagnosticTextWriter) WriteDiagnostics(diags Diagnostics) error { + for _, diag := range diags { + err := w.WriteDiagnostic(diag) + if err != nil { + return err + } + } + return nil +} + +func contextString(file *File, offset int) string { + type contextStringer interface { + ContextString(offset int) string + } + + if cser, ok := file.Nav.(contextStringer); ok { + return cser.ContextString(offset) + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go b/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go new file mode 100644 index 00000000..c1283344 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go @@ -0,0 +1,24 @@ +package hcl + +import ( + "github.com/agext/levenshtein" +) + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/doc.go b/vendor/github.com/hashicorp/hcl2/hcl/doc.go new file mode 100644 index 00000000..01318c96 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/doc.go @@ -0,0 +1 @@ +package hcl diff --git a/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go b/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go new file mode 100644 index 00000000..915910ad --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go @@ -0,0 +1,25 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// An EvalContext provides the variables and functions that should be used +// to evaluate an expression. +type EvalContext struct { + Variables map[string]cty.Value + Functions map[string]function.Function + parent *EvalContext +} + +// NewChild returns a new EvalContext that is a child of the receiver. +func (ctx *EvalContext) NewChild() *EvalContext { + return &EvalContext{parent: ctx} +} + +// Parent returns the parent of the receiver, or nil if the receiver has +// no parent. +func (ctx *EvalContext) Parent() *EvalContext { + return ctx.parent +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go new file mode 100644 index 00000000..6963fbae --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go @@ -0,0 +1,46 @@ +package hcl + +// ExprCall tests if the given expression is a function call and, +// if so, extracts the function name and the expressions that represent +// the arguments. If the given expression is not statically a function call, +// error diagnostics are returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprCall that takes no arguments and returns +// *StaticCall. This method should return nil if a static call cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprCall(expr Expression) (*StaticCall, Diagnostics) { + type exprCall interface { + ExprCall() *StaticCall + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprCall) + return supported + }) + + if exC, supported := physExpr.(exprCall); supported { + if call := exC.ExprCall(); call != nil { + return call, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static function call is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} + +// StaticCall represents a function call that was extracted statically from +// an expression using ExprCall. +type StaticCall struct { + Name string + NameRange Range + Arguments []Expression + ArgsRange Range +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go new file mode 100644 index 00000000..d05cca0b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go @@ -0,0 +1,37 @@ +package hcl + +// ExprList tests if the given expression is a static list construct and, +// if so, extracts the expressions that represent the list elements. +// If the given expression is not a static list, error diagnostics are +// returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprList that takes no arguments and returns +// []Expression. This method should return nil if a static list cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprList(expr Expression) ([]Expression, Diagnostics) { + type exprList interface { + ExprList() []Expression + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprList) + return supported + }) + + if exL, supported := physExpr.(exprList); supported { + if list := exL.ExprList(); list != nil { + return list, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static list expression is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_map.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_map.go new file mode 100644 index 00000000..96d1ce4b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_map.go @@ -0,0 +1,44 @@ +package hcl + +// ExprMap tests if the given expression is a static map construct and, +// if so, extracts the expressions that represent the map elements. +// If the given expression is not a static map, error diagnostics are +// returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprMap that takes no arguments and returns +// []KeyValuePair. This method should return nil if a static map cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprMap(expr Expression) ([]KeyValuePair, Diagnostics) { + type exprMap interface { + ExprMap() []KeyValuePair + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprMap) + return supported + }) + + if exM, supported := physExpr.(exprMap); supported { + if pairs := exM.ExprMap(); pairs != nil { + return pairs, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static map expression is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} + +// KeyValuePair represents a pair of expressions that serve as a single item +// within a map or object definition construct. +type KeyValuePair struct { + Key Expression + Value Expression +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go new file mode 100644 index 00000000..6d5d205c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go @@ -0,0 +1,68 @@ +package hcl + +type unwrapExpression interface { + UnwrapExpression() Expression +} + +// UnwrapExpression removes any "wrapper" expressions from the given expression, +// to recover the representation of the physical expression given in source +// code. +// +// Sometimes wrapping expressions are used to modify expression behavior, e.g. +// in extensions that need to make some local variables available to certain +// sub-trees of the configuration. This can make it difficult to reliably +// type-assert on the physical AST types used by the underlying syntax. +// +// Unwrapping an expression may modify its behavior by stripping away any +// additional constraints or capabilities being applied to the Value and +// Variables methods, so this function should generally only be used prior +// to operations that concern themselves with the static syntax of the input +// configuration, and not with the effective value of the expression. +// +// Wrapper expression types must support unwrapping by implementing a method +// called UnwrapExpression that takes no arguments and returns the embedded +// Expression. Implementations of this method should peel away only one level +// of wrapping, if multiple are present. This method may return nil to +// indicate _dynamically_ that no wrapped expression is available, for +// expression types that might only behave as wrappers in certain cases. +func UnwrapExpression(expr Expression) Expression { + for { + unwrap, wrapped := expr.(unwrapExpression) + if !wrapped { + return expr + } + innerExpr := unwrap.UnwrapExpression() + if innerExpr == nil { + return expr + } + expr = innerExpr + } +} + +// UnwrapExpressionUntil is similar to UnwrapExpression except it gives the +// caller an opportunity to test each level of unwrapping to see each a +// particular expression is accepted. +// +// This could be used, for example, to unwrap until a particular other +// interface is satisfied, regardless of wrap wrapping level it is satisfied +// at. +// +// The given callback function must return false to continue wrapping, or +// true to accept and return the proposed expression given. If the callback +// function rejects even the final, physical expression then the result of +// this function is nil. +func UnwrapExpressionUntil(expr Expression, until func(Expression) bool) Expression { + for { + if until(expr) { + return expr + } + unwrap, wrapped := expr.(unwrapExpression) + if !wrapped { + return nil + } + expr = unwrap.UnwrapExpression() + if expr == nil { + return nil + } + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go new file mode 100644 index 00000000..ccc1c0ae --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go @@ -0,0 +1,24 @@ +package hclsyntax + +import ( + "github.com/agext/levenshtein" +) + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go new file mode 100644 index 00000000..617bc29d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go @@ -0,0 +1,7 @@ +// Package hclsyntax contains the parser, AST, etc for HCL's native language, +// as opposed to the JSON variant. +// +// In normal use applications should rarely depend on this package directly, +// instead preferring the higher-level interface of the main hcl package and +// its companion package hclparse. +package hclsyntax diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go new file mode 100644 index 00000000..cfc7cd92 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go @@ -0,0 +1,1275 @@ +package hclsyntax + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// Expression is the abstract type for nodes that behave as HCL expressions. +type Expression interface { + Node + + // The hcl.Expression methods are duplicated here, rather than simply + // embedded, because both Node and hcl.Expression have a Range method + // and so they conflict. + + Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) + Variables() []hcl.Traversal + StartRange() hcl.Range +} + +// Assert that Expression implements hcl.Expression +var assertExprImplExpr hcl.Expression = Expression(nil) + +// LiteralValueExpr is an expression that just always returns a given value. +type LiteralValueExpr struct { + Val cty.Value + SrcRange hcl.Range +} + +func (e *LiteralValueExpr) walkChildNodes(w internalWalkFunc) { + // Literal values have no child nodes +} + +func (e *LiteralValueExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Val, nil +} + +func (e *LiteralValueExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *LiteralValueExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *LiteralValueExpr) AsTraversal() hcl.Traversal { + // This one's a little weird: the contract for AsTraversal is to interpret + // an expression as if it were traversal syntax, and traversal syntax + // doesn't have the special keywords "null", "true", and "false" so these + // are expected to be treated like variables in that case. + // Since our parser already turned them into LiteralValueExpr by the time + // we get here, we need to undo this and infer the name that would've + // originally led to our value. + // We don't do anything for any other values, since they don't overlap + // with traversal roots. + + if e.Val.IsNull() { + // In practice the parser only generates null values of the dynamic + // pseudo-type for literals, so we can safely assume that any null + // was orignally the keyword "null". + return hcl.Traversal{ + hcl.TraverseRoot{ + Name: "null", + SrcRange: e.SrcRange, + }, + } + } + + switch e.Val { + case cty.True: + return hcl.Traversal{ + hcl.TraverseRoot{ + Name: "true", + SrcRange: e.SrcRange, + }, + } + case cty.False: + return hcl.Traversal{ + hcl.TraverseRoot{ + Name: "false", + SrcRange: e.SrcRange, + }, + } + default: + // No traversal is possible for any other value. + return nil + } +} + +// ScopeTraversalExpr is an Expression that retrieves a value from the scope +// using a traversal. +type ScopeTraversalExpr struct { + Traversal hcl.Traversal + SrcRange hcl.Range +} + +func (e *ScopeTraversalExpr) walkChildNodes(w internalWalkFunc) { + // Scope traversals have no child nodes +} + +func (e *ScopeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Traversal.TraverseAbs(ctx) +} + +func (e *ScopeTraversalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ScopeTraversalExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *ScopeTraversalExpr) AsTraversal() hcl.Traversal { + return e.Traversal +} + +// RelativeTraversalExpr is an Expression that retrieves a value from another +// value using a _relative_ traversal. +type RelativeTraversalExpr struct { + Source Expression + Traversal hcl.Traversal + SrcRange hcl.Range +} + +func (e *RelativeTraversalExpr) walkChildNodes(w internalWalkFunc) { + // Scope traversals have no child nodes +} + +func (e *RelativeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + src, diags := e.Source.Value(ctx) + ret, travDiags := e.Traversal.TraverseRel(src) + diags = append(diags, travDiags...) + return ret, diags +} + +func (e *RelativeTraversalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *RelativeTraversalExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *RelativeTraversalExpr) AsTraversal() hcl.Traversal { + // We can produce a traversal only if our source can. + st, diags := hcl.AbsTraversalForExpr(e.Source) + if diags.HasErrors() { + return nil + } + + ret := make(hcl.Traversal, len(st)+len(e.Traversal)) + copy(ret, st) + copy(ret[len(st):], e.Traversal) + return ret +} + +// FunctionCallExpr is an Expression that calls a function from the EvalContext +// and returns its result. +type FunctionCallExpr struct { + Name string + Args []Expression + + // If true, the final argument should be a tuple, list or set which will + // expand to be one argument per element. + ExpandFinal bool + + NameRange hcl.Range + OpenParenRange hcl.Range + CloseParenRange hcl.Range +} + +func (e *FunctionCallExpr) walkChildNodes(w internalWalkFunc) { + for i, arg := range e.Args { + e.Args[i] = w(arg).(Expression) + } +} + +func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var f function.Function + exists := false + hasNonNilMap := false + thisCtx := ctx + for thisCtx != nil { + if thisCtx.Functions == nil { + thisCtx = thisCtx.Parent() + continue + } + hasNonNilMap = true + f, exists = thisCtx.Functions[e.Name] + if exists { + break + } + thisCtx = thisCtx.Parent() + } + + if !exists { + if !hasNonNilMap { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Function calls not allowed", + Detail: "Functions may not be called here.", + Subject: e.Range().Ptr(), + }, + } + } + + avail := make([]string, 0, len(ctx.Functions)) + for name := range ctx.Functions { + avail = append(avail, name) + } + suggestion := nameSuggestion(e.Name, avail) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Call to unknown function", + Detail: fmt.Sprintf("There is no function named %q.%s", e.Name, suggestion), + Subject: &e.NameRange, + Context: e.Range().Ptr(), + }, + } + } + + params := f.Params() + varParam := f.VarParam() + + args := e.Args + if e.ExpandFinal { + if len(args) < 1 { + // should never happen if the parser is behaving + panic("ExpandFinal set on function call with no arguments") + } + expandExpr := args[len(args)-1] + expandVal, expandDiags := expandExpr.Value(ctx) + diags = append(diags, expandDiags...) + if expandDiags.HasErrors() { + return cty.DynamicVal, diags + } + + switch { + case expandVal.Type().IsTupleType() || expandVal.Type().IsListType() || expandVal.Type().IsSetType(): + if expandVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must not be null.", + Context: expandExpr.Range().Ptr(), + Subject: e.Range().Ptr(), + }) + return cty.DynamicVal, diags + } + if !expandVal.IsKnown() { + return cty.DynamicVal, diags + } + + newArgs := make([]Expression, 0, (len(args)-1)+expandVal.LengthInt()) + newArgs = append(newArgs, args[:len(args)-1]...) + it := expandVal.ElementIterator() + for it.Next() { + _, val := it.Element() + newArgs = append(newArgs, &LiteralValueExpr{ + Val: val, + SrcRange: expandExpr.Range(), + }) + } + args = newArgs + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must be of a tuple, list, or set type.", + Context: expandExpr.Range().Ptr(), + Subject: e.Range().Ptr(), + }) + return cty.DynamicVal, diags + } + } + + if len(args) < len(params) { + missing := params[len(args)] + qual := "" + if varParam != nil { + qual = " at least" + } + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Not enough function arguments", + Detail: fmt.Sprintf( + "Function %q expects%s %d argument(s). Missing value for %q.", + e.Name, qual, len(params), missing.Name, + ), + Subject: &e.CloseParenRange, + Context: e.Range().Ptr(), + }, + } + } + + if varParam == nil && len(args) > len(params) { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Too many function arguments", + Detail: fmt.Sprintf( + "Function %q expects only %d argument(s).", + e.Name, len(params), + ), + Subject: args[len(params)].StartRange().Ptr(), + Context: e.Range().Ptr(), + }, + } + } + + argVals := make([]cty.Value, len(args)) + + for i, argExpr := range args { + var param *function.Parameter + if i < len(params) { + param = ¶ms[i] + } else { + param = varParam + } + + val, argDiags := argExpr.Value(ctx) + if len(argDiags) > 0 { + diags = append(diags, argDiags...) + } + + // Try to convert our value to the parameter type + val, err := convert.Convert(val, param.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + }) + } + + argVals[i] = val + } + + if diags.HasErrors() { + // Don't try to execute the function if we already have errors with + // the arguments, because the result will probably be a confusing + // error message. + return cty.DynamicVal, diags + } + + resultVal, err := f.Call(argVals) + if err != nil { + switch terr := err.(type) { + case function.ArgError: + i := terr.Index + var param *function.Parameter + if i < len(params) { + param = ¶ms[i] + } else { + param = varParam + } + argExpr := e.Args[i] + + // TODO: we should also unpick a PathError here and show the + // path to the deep value where the error was detected. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + }) + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Error in function call", + Detail: fmt.Sprintf( + "Call to function %q failed: %s.", + e.Name, err, + ), + Subject: e.StartRange().Ptr(), + Context: e.Range().Ptr(), + }) + } + + return cty.DynamicVal, diags + } + + return resultVal, diags +} + +func (e *FunctionCallExpr) Range() hcl.Range { + return hcl.RangeBetween(e.NameRange, e.CloseParenRange) +} + +func (e *FunctionCallExpr) StartRange() hcl.Range { + return hcl.RangeBetween(e.NameRange, e.OpenParenRange) +} + +// Implementation for hcl.ExprCall. +func (e *FunctionCallExpr) ExprCall() *hcl.StaticCall { + ret := &hcl.StaticCall{ + Name: e.Name, + NameRange: e.NameRange, + Arguments: make([]hcl.Expression, len(e.Args)), + ArgsRange: hcl.RangeBetween(e.OpenParenRange, e.CloseParenRange), + } + // Need to convert our own Expression objects into hcl.Expression. + for i, arg := range e.Args { + ret.Arguments[i] = arg + } + return ret +} + +type ConditionalExpr struct { + Condition Expression + TrueResult Expression + FalseResult Expression + + SrcRange hcl.Range +} + +func (e *ConditionalExpr) walkChildNodes(w internalWalkFunc) { + e.Condition = w(e.Condition).(Expression) + e.TrueResult = w(e.TrueResult).(Expression) + e.FalseResult = w(e.FalseResult).(Expression) +} + +func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + trueResult, trueDiags := e.TrueResult.Value(ctx) + falseResult, falseDiags := e.FalseResult.Value(ctx) + var diags hcl.Diagnostics + + // Try to find a type that both results can be converted to. + resultType, convs := convert.UnifyUnsafe([]cty.Type{trueResult.Type(), falseResult.Type()}) + if resultType == cty.NilType { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + // FIXME: Need a helper function for showing natural-language type diffs, + // since this will generate some useless messages in some cases, like + // "These expressions are object and object respectively" if the + // object types don't exactly match. + "The true and false result expressions must have consistent types. The given expressions are %s and %s, respectively.", + trueResult.Type(), falseResult.Type(), + ), + Subject: hcl.RangeBetween(e.TrueResult.Range(), e.FalseResult.Range()).Ptr(), + Context: &e.SrcRange, + }, + } + } + + condResult, condDiags := e.Condition.Value(ctx) + diags = append(diags, condDiags...) + if condResult.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Null condition", + Detail: "The condition value is null. Conditions must either be true or false.", + Subject: e.Condition.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.UnknownVal(resultType), diags + } + if !condResult.IsKnown() { + return cty.UnknownVal(resultType), diags + } + condResult, err := convert.Convert(condResult, cty.Bool) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect condition type", + Detail: fmt.Sprintf("The condition expression must be of type bool."), + Subject: e.Condition.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.UnknownVal(resultType), diags + } + + if condResult.True() { + diags = append(diags, trueDiags...) + if convs[0] != nil { + var err error + trueResult, err = convs[0](trueResult) + if err != nil { + // Unsafe conversion failed with the concrete result value + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + "The true result value has the wrong type: %s.", + err.Error(), + ), + Subject: e.TrueResult.Range().Ptr(), + Context: &e.SrcRange, + }) + trueResult = cty.UnknownVal(resultType) + } + } + return trueResult, diags + } else { + diags = append(diags, falseDiags...) + if convs[1] != nil { + var err error + falseResult, err = convs[1](falseResult) + if err != nil { + // Unsafe conversion failed with the concrete result value + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + "The false result value has the wrong type: %s.", + err.Error(), + ), + Subject: e.TrueResult.Range().Ptr(), + Context: &e.SrcRange, + }) + falseResult = cty.UnknownVal(resultType) + } + } + return falseResult, diags + } +} + +func (e *ConditionalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ConditionalExpr) StartRange() hcl.Range { + return e.Condition.StartRange() +} + +type IndexExpr struct { + Collection Expression + Key Expression + + SrcRange hcl.Range + OpenRange hcl.Range +} + +func (e *IndexExpr) walkChildNodes(w internalWalkFunc) { + e.Collection = w(e.Collection).(Expression) + e.Key = w(e.Key).(Expression) +} + +func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + coll, collDiags := e.Collection.Value(ctx) + key, keyDiags := e.Key.Value(ctx) + diags = append(diags, collDiags...) + diags = append(diags, keyDiags...) + + return hcl.Index(coll, key, &e.SrcRange) +} + +func (e *IndexExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *IndexExpr) StartRange() hcl.Range { + return e.OpenRange +} + +type TupleConsExpr struct { + Exprs []Expression + + SrcRange hcl.Range + OpenRange hcl.Range +} + +func (e *TupleConsExpr) walkChildNodes(w internalWalkFunc) { + for i, expr := range e.Exprs { + e.Exprs[i] = w(expr).(Expression) + } +} + +func (e *TupleConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var vals []cty.Value + var diags hcl.Diagnostics + + vals = make([]cty.Value, len(e.Exprs)) + for i, expr := range e.Exprs { + val, valDiags := expr.Value(ctx) + vals[i] = val + diags = append(diags, valDiags...) + } + + return cty.TupleVal(vals), diags +} + +func (e *TupleConsExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TupleConsExpr) StartRange() hcl.Range { + return e.OpenRange +} + +// Implementation for hcl.ExprList +func (e *TupleConsExpr) ExprList() []hcl.Expression { + ret := make([]hcl.Expression, len(e.Exprs)) + for i, expr := range e.Exprs { + ret[i] = expr + } + return ret +} + +type ObjectConsExpr struct { + Items []ObjectConsItem + + SrcRange hcl.Range + OpenRange hcl.Range +} + +type ObjectConsItem struct { + KeyExpr Expression + ValueExpr Expression +} + +func (e *ObjectConsExpr) walkChildNodes(w internalWalkFunc) { + for i, item := range e.Items { + e.Items[i].KeyExpr = w(item.KeyExpr).(Expression) + e.Items[i].ValueExpr = w(item.ValueExpr).(Expression) + } +} + +func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var vals map[string]cty.Value + var diags hcl.Diagnostics + + // This will get set to true if we fail to produce any of our keys, + // either because they are actually unknown or if the evaluation produces + // errors. In all of these case we must return DynamicPseudoType because + // we're unable to know the full set of keys our object has, and thus + // we can't produce a complete value of the intended type. + // + // We still evaluate all of the item keys and values to make sure that we + // get as complete as possible a set of diagnostics. + known := true + + vals = make(map[string]cty.Value, len(e.Items)) + for _, item := range e.Items { + key, keyDiags := item.KeyExpr.Value(ctx) + diags = append(diags, keyDiags...) + + val, valDiags := item.ValueExpr.Value(ctx) + diags = append(diags, valDiags...) + + if keyDiags.HasErrors() { + known = false + continue + } + + if key.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Null value as key", + Detail: "Can't use a null value as a key.", + Subject: item.ValueExpr.Range().Ptr(), + }) + known = false + continue + } + + var err error + key, err = convert.Convert(key, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect key type", + Detail: fmt.Sprintf("Can't use this value as a key: %s.", err.Error()), + Subject: item.ValueExpr.Range().Ptr(), + }) + known = false + continue + } + + if !key.IsKnown() { + known = false + continue + } + + keyStr := key.AsString() + + vals[keyStr] = val + } + + if !known { + return cty.DynamicVal, diags + } + + return cty.ObjectVal(vals), diags +} + +func (e *ObjectConsExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ObjectConsExpr) StartRange() hcl.Range { + return e.OpenRange +} + +// Implementation for hcl.ExprMap +func (e *ObjectConsExpr) ExprMap() []hcl.KeyValuePair { + ret := make([]hcl.KeyValuePair, len(e.Items)) + for i, item := range e.Items { + ret[i] = hcl.KeyValuePair{ + Key: item.KeyExpr, + Value: item.ValueExpr, + } + } + return ret +} + +// ObjectConsKeyExpr is a special wrapper used only for ObjectConsExpr keys, +// which deals with the special case that a naked identifier in that position +// must be interpreted as a literal string rather than evaluated directly. +type ObjectConsKeyExpr struct { + Wrapped Expression +} + +func (e *ObjectConsKeyExpr) literalName() string { + // This is our logic for deciding whether to behave like a literal string. + // We lean on our AbsTraversalForExpr implementation here, which already + // deals with some awkward cases like the expression being the result + // of the keywords "null", "true" and "false" which we'd want to interpret + // as keys here too. + return hcl.ExprAsKeyword(e.Wrapped) +} + +func (e *ObjectConsKeyExpr) walkChildNodes(w internalWalkFunc) { + // We only treat our wrapped expression as a real expression if we're + // not going to interpret it as a literal. + if e.literalName() == "" { + e.Wrapped = w(e.Wrapped).(Expression) + } +} + +func (e *ObjectConsKeyExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + if ln := e.literalName(); ln != "" { + return cty.StringVal(ln), nil + } + return e.Wrapped.Value(ctx) +} + +func (e *ObjectConsKeyExpr) Range() hcl.Range { + return e.Wrapped.Range() +} + +func (e *ObjectConsKeyExpr) StartRange() hcl.Range { + return e.Wrapped.StartRange() +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *ObjectConsKeyExpr) AsTraversal() hcl.Traversal { + // We can produce a traversal only if our wrappee can. + st, diags := hcl.AbsTraversalForExpr(e.Wrapped) + if diags.HasErrors() { + return nil + } + + return st +} + +func (e *ObjectConsKeyExpr) UnwrapExpression() Expression { + return e.Wrapped +} + +// ForExpr represents iteration constructs: +// +// tuple = [for i, v in list: upper(v) if i > 2] +// object = {for k, v in map: k => upper(v)} +// object_of_tuples = {for v in list: v.key: v...} +type ForExpr struct { + KeyVar string // empty if ignoring the key + ValVar string + + CollExpr Expression + + KeyExpr Expression // nil when producing a tuple + ValExpr Expression + CondExpr Expression // null if no "if" clause is present + + Group bool // set if the ellipsis is used on the value in an object for + + SrcRange hcl.Range + OpenRange hcl.Range + CloseRange hcl.Range +} + +func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + collVal, collDiags := e.CollExpr.Value(ctx) + diags = append(diags, collDiags...) + + if collVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Iteration over null value", + Detail: "A null value cannot be used as the collection in a 'for' expression.", + Subject: e.CollExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.DynamicVal, diags + } + if collVal.Type() == cty.DynamicPseudoType { + return cty.DynamicVal, diags + } + if !collVal.CanIterateElements() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Iteration over non-iterable value", + Detail: fmt.Sprintf( + "A value of type %s cannot be used as the collection in a 'for' expression.", + collVal.Type().FriendlyName(), + ), + Subject: e.CollExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.DynamicVal, diags + } + if !collVal.IsKnown() { + return cty.DynamicVal, diags + } + + childCtx := ctx.NewChild() + childCtx.Variables = map[string]cty.Value{} + + // Before we start we'll do an early check to see if any CondExpr we've + // been given is of the wrong type. This isn't 100% reliable (it may + // be DynamicVal until real values are given) but it should catch some + // straightforward cases and prevent a barrage of repeated errors. + if e.CondExpr != nil { + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = cty.DynamicVal + } + childCtx.Variables[e.ValVar] = cty.DynamicVal + + result, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if result.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Condition is null", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.DynamicVal, diags + } + _, err := convert.Convert(result, cty.Bool) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + return cty.DynamicVal, diags + } + if condDiags.HasErrors() { + return cty.DynamicVal, diags + } + } + + if e.KeyExpr != nil { + // Producing an object + var vals map[string]cty.Value + var groupVals map[string][]cty.Value + if e.Group { + groupVals = map[string][]cty.Value{} + } else { + vals = map[string]cty.Value{} + } + + it := collVal.ElementIterator() + + known := true + for it.Next() { + k, v := it.Element() + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = k + } + childCtx.Variables[e.ValVar] = v + + if e.CondExpr != nil { + includeRaw, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if includeRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Condition is null", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + include, err := convert.Convert(includeRaw, cty.Bool) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + if !include.IsKnown() { + known = false + continue + } + + if include.False() { + // Skip this element + continue + } + } + + keyRaw, keyDiags := e.KeyExpr.Value(childCtx) + diags = append(diags, keyDiags...) + if keyRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key", + Detail: "Key expression in 'for' expression must not produce a null value.", + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + if !keyRaw.IsKnown() { + known = false + continue + } + + key, err := convert.Convert(keyRaw, cty.String) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key", + Detail: fmt.Sprintf("The key expression produced an invalid result: %s.", err.Error()), + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + + val, valDiags := e.ValExpr.Value(childCtx) + diags = append(diags, valDiags...) + + if e.Group { + k := key.AsString() + groupVals[k] = append(groupVals[k], val) + } else { + k := key.AsString() + if _, exists := vals[k]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate object key", + Detail: fmt.Sprintf( + "Two different items produced the key %q in this for expression. If duplicates are expected, use the ellipsis (...) after the value expression to enable grouping by key.", + k, + ), + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } else { + vals[key.AsString()] = val + } + } + } + + if !known { + return cty.DynamicVal, diags + } + + if e.Group { + vals = map[string]cty.Value{} + for k, gvs := range groupVals { + vals[k] = cty.TupleVal(gvs) + } + } + + return cty.ObjectVal(vals), diags + + } else { + // Producing a tuple + vals := []cty.Value{} + + it := collVal.ElementIterator() + + known := true + for it.Next() { + k, v := it.Element() + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = k + } + childCtx.Variables[e.ValVar] = v + + if e.CondExpr != nil { + includeRaw, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if includeRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Condition is null", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + if !includeRaw.IsKnown() { + // We will eventually return DynamicVal, but we'll continue + // iterating in case there are other diagnostics to gather + // for later elements. + known = false + continue + } + + include, err := convert.Convert(includeRaw, cty.Bool) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + }) + } + known = false + continue + } + + if include.False() { + // Skip this element + continue + } + } + + val, valDiags := e.ValExpr.Value(childCtx) + diags = append(diags, valDiags...) + vals = append(vals, val) + } + + if !known { + return cty.DynamicVal, diags + } + + return cty.TupleVal(vals), diags + } +} + +func (e *ForExpr) walkChildNodes(w internalWalkFunc) { + e.CollExpr = w(e.CollExpr).(Expression) + + scopeNames := map[string]struct{}{} + if e.KeyVar != "" { + scopeNames[e.KeyVar] = struct{}{} + } + if e.ValVar != "" { + scopeNames[e.ValVar] = struct{}{} + } + + if e.KeyExpr != nil { + w(ChildScope{ + LocalNames: scopeNames, + Expr: &e.KeyExpr, + }) + } + w(ChildScope{ + LocalNames: scopeNames, + Expr: &e.ValExpr, + }) + if e.CondExpr != nil { + w(ChildScope{ + LocalNames: scopeNames, + Expr: &e.CondExpr, + }) + } +} + +func (e *ForExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ForExpr) StartRange() hcl.Range { + return e.OpenRange +} + +type SplatExpr struct { + Source Expression + Each Expression + Item *AnonSymbolExpr + + SrcRange hcl.Range + MarkerRange hcl.Range +} + +func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + sourceVal, diags := e.Source.Value(ctx) + if diags.HasErrors() { + // We'll evaluate our "Each" expression here just to see if it + // produces any more diagnostics we can report. Since we're not + // assigning a value to our AnonSymbolExpr here it will return + // DynamicVal, which should short-circuit any use of it. + _, itemDiags := e.Item.Value(ctx) + diags = append(diags, itemDiags...) + return cty.DynamicVal, diags + } + + if sourceVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Splat of null value", + Detail: "Splat expressions (with the * symbol) cannot be applied to null values.", + Subject: e.Source.Range().Ptr(), + Context: hcl.RangeBetween(e.Source.Range(), e.MarkerRange).Ptr(), + }) + return cty.DynamicVal, diags + } + if !sourceVal.IsKnown() { + return cty.DynamicVal, diags + } + + // A "special power" of splat expressions is that they can be applied + // both to tuples/lists and to other values, and in the latter case + // the value will be treated as an implicit single-value list. We'll + // deal with that here first. + if !(sourceVal.Type().IsTupleType() || sourceVal.Type().IsListType()) { + sourceVal = cty.ListVal([]cty.Value{sourceVal}) + } + + vals := make([]cty.Value, 0, sourceVal.LengthInt()) + it := sourceVal.ElementIterator() + if ctx == nil { + // we need a context to use our AnonSymbolExpr, so we'll just + // make an empty one here to use as a placeholder. + ctx = ctx.NewChild() + } + isKnown := true + for it.Next() { + _, sourceItem := it.Element() + e.Item.setValue(ctx, sourceItem) + newItem, itemDiags := e.Each.Value(ctx) + diags = append(diags, itemDiags...) + if itemDiags.HasErrors() { + isKnown = false + } + vals = append(vals, newItem) + } + e.Item.clearValue(ctx) // clean up our temporary value + + if !isKnown { + return cty.DynamicVal, diags + } + + return cty.TupleVal(vals), diags +} + +func (e *SplatExpr) walkChildNodes(w internalWalkFunc) { + e.Source = w(e.Source).(Expression) + e.Each = w(e.Each).(Expression) +} + +func (e *SplatExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *SplatExpr) StartRange() hcl.Range { + return e.MarkerRange +} + +// AnonSymbolExpr is used as a placeholder for a value in an expression that +// can be applied dynamically to any value at runtime. +// +// This is a rather odd, synthetic expression. It is used as part of the +// representation of splat expressions as a placeholder for the current item +// being visited in the splat evaluation. +// +// AnonSymbolExpr cannot be evaluated in isolation. If its Value is called +// directly then cty.DynamicVal will be returned. Instead, it is evaluated +// in terms of another node (i.e. a splat expression) which temporarily +// assigns it a value. +type AnonSymbolExpr struct { + SrcRange hcl.Range + values map[*hcl.EvalContext]cty.Value +} + +func (e *AnonSymbolExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + if ctx == nil { + return cty.DynamicVal, nil + } + val, exists := e.values[ctx] + if !exists { + return cty.DynamicVal, nil + } + return val, nil +} + +// setValue sets a temporary local value for the expression when evaluated +// in the given context, which must be non-nil. +func (e *AnonSymbolExpr) setValue(ctx *hcl.EvalContext, val cty.Value) { + if e.values == nil { + e.values = make(map[*hcl.EvalContext]cty.Value) + } + if ctx == nil { + panic("can't setValue for a nil EvalContext") + } + e.values[ctx] = val +} + +func (e *AnonSymbolExpr) clearValue(ctx *hcl.EvalContext) { + if e.values == nil { + return + } + if ctx == nil { + panic("can't clearValue for a nil EvalContext") + } + delete(e.values, ctx) +} + +func (e *AnonSymbolExpr) walkChildNodes(w internalWalkFunc) { + // AnonSymbolExpr is a leaf node in the tree +} + +func (e *AnonSymbolExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *AnonSymbolExpr) StartRange() hcl.Range { + return e.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go new file mode 100644 index 00000000..9a5da043 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go @@ -0,0 +1,258 @@ +package hclsyntax + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" +) + +type Operation struct { + Impl function.Function + Type cty.Type +} + +var ( + OpLogicalOr = &Operation{ + Impl: stdlib.OrFunc, + Type: cty.Bool, + } + OpLogicalAnd = &Operation{ + Impl: stdlib.AndFunc, + Type: cty.Bool, + } + OpLogicalNot = &Operation{ + Impl: stdlib.NotFunc, + Type: cty.Bool, + } + + OpEqual = &Operation{ + Impl: stdlib.EqualFunc, + Type: cty.Bool, + } + OpNotEqual = &Operation{ + Impl: stdlib.NotEqualFunc, + Type: cty.Bool, + } + + OpGreaterThan = &Operation{ + Impl: stdlib.GreaterThanFunc, + Type: cty.Bool, + } + OpGreaterThanOrEqual = &Operation{ + Impl: stdlib.GreaterThanOrEqualToFunc, + Type: cty.Bool, + } + OpLessThan = &Operation{ + Impl: stdlib.LessThanFunc, + Type: cty.Bool, + } + OpLessThanOrEqual = &Operation{ + Impl: stdlib.LessThanOrEqualToFunc, + Type: cty.Bool, + } + + OpAdd = &Operation{ + Impl: stdlib.AddFunc, + Type: cty.Number, + } + OpSubtract = &Operation{ + Impl: stdlib.SubtractFunc, + Type: cty.Number, + } + OpMultiply = &Operation{ + Impl: stdlib.MultiplyFunc, + Type: cty.Number, + } + OpDivide = &Operation{ + Impl: stdlib.DivideFunc, + Type: cty.Number, + } + OpModulo = &Operation{ + Impl: stdlib.ModuloFunc, + Type: cty.Number, + } + OpNegate = &Operation{ + Impl: stdlib.NegateFunc, + Type: cty.Number, + } +) + +var binaryOps []map[TokenType]*Operation + +func init() { + // This operation table maps from the operator's token type + // to the AST operation type. All expressions produced from + // binary operators are BinaryOp nodes. + // + // Binary operator groups are listed in order of precedence, with + // the *lowest* precedence first. Operators within the same group + // have left-to-right associativity. + binaryOps = []map[TokenType]*Operation{ + { + TokenOr: OpLogicalOr, + }, + { + TokenAnd: OpLogicalAnd, + }, + { + TokenEqualOp: OpEqual, + TokenNotEqual: OpNotEqual, + }, + { + TokenGreaterThan: OpGreaterThan, + TokenGreaterThanEq: OpGreaterThanOrEqual, + TokenLessThan: OpLessThan, + TokenLessThanEq: OpLessThanOrEqual, + }, + { + TokenPlus: OpAdd, + TokenMinus: OpSubtract, + }, + { + TokenStar: OpMultiply, + TokenSlash: OpDivide, + TokenPercent: OpModulo, + }, + } +} + +type BinaryOpExpr struct { + LHS Expression + Op *Operation + RHS Expression + + SrcRange hcl.Range +} + +func (e *BinaryOpExpr) walkChildNodes(w internalWalkFunc) { + e.LHS = w(e.LHS).(Expression) + e.RHS = w(e.RHS).(Expression) +} + +func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + impl := e.Op.Impl // assumed to be a function taking exactly two arguments + params := impl.Params() + lhsParam := params[0] + rhsParam := params[1] + + var diags hcl.Diagnostics + + givenLHSVal, lhsDiags := e.LHS.Value(ctx) + givenRHSVal, rhsDiags := e.RHS.Value(ctx) + diags = append(diags, lhsDiags...) + diags = append(diags, rhsDiags...) + + lhsVal, err := convert.Convert(givenLHSVal, lhsParam.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for left operand: %s.", err), + Subject: e.LHS.Range().Ptr(), + Context: &e.SrcRange, + }) + } + rhsVal, err := convert.Convert(givenRHSVal, rhsParam.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for right operand: %s.", err), + Subject: e.RHS.Range().Ptr(), + Context: &e.SrcRange, + }) + } + + if diags.HasErrors() { + // Don't actually try the call if we have errors already, since the + // this will probably just produce a confusing duplicative diagnostic. + return cty.UnknownVal(e.Op.Type), diags + } + + args := []cty.Value{lhsVal, rhsVal} + result, err := impl.Call(args) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + // FIXME: This diagnostic is useless. + Severity: hcl.DiagError, + Summary: "Operation failed", + Detail: fmt.Sprintf("Error during operation: %s.", err), + Subject: &e.SrcRange, + }) + return cty.UnknownVal(e.Op.Type), diags + } + + return result, diags +} + +func (e *BinaryOpExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *BinaryOpExpr) StartRange() hcl.Range { + return e.LHS.StartRange() +} + +type UnaryOpExpr struct { + Op *Operation + Val Expression + + SrcRange hcl.Range + SymbolRange hcl.Range +} + +func (e *UnaryOpExpr) walkChildNodes(w internalWalkFunc) { + e.Val = w(e.Val).(Expression) +} + +func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + impl := e.Op.Impl // assumed to be a function taking exactly one argument + params := impl.Params() + param := params[0] + + givenVal, diags := e.Val.Value(ctx) + + val, err := convert.Convert(givenVal, param.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for unary operand: %s.", err), + Subject: e.Val.Range().Ptr(), + Context: &e.SrcRange, + }) + } + + if diags.HasErrors() { + // Don't actually try the call if we have errors already, since the + // this will probably just produce a confusing duplicative diagnostic. + return cty.UnknownVal(e.Op.Type), diags + } + + args := []cty.Value{val} + result, err := impl.Call(args) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + // FIXME: This diagnostic is useless. + Severity: hcl.DiagError, + Summary: "Operation failed", + Detail: fmt.Sprintf("Error during operation: %s.", err), + Subject: &e.SrcRange, + }) + return cty.UnknownVal(e.Op.Type), diags + } + + return result, diags +} + +func (e *UnaryOpExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *UnaryOpExpr) StartRange() hcl.Range { + return e.SymbolRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go new file mode 100644 index 00000000..a1c47275 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go @@ -0,0 +1,192 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +type TemplateExpr struct { + Parts []Expression + + SrcRange hcl.Range +} + +func (e *TemplateExpr) walkChildNodes(w internalWalkFunc) { + for i, part := range e.Parts { + e.Parts[i] = w(part).(Expression) + } +} + +func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + buf := &bytes.Buffer{} + var diags hcl.Diagnostics + isKnown := true + + for _, part := range e.Parts { + partVal, partDiags := part.Value(ctx) + diags = append(diags, partDiags...) + + if partVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "The expression result is null. Cannot include a null value in a string template.", + ), + Subject: part.Range().Ptr(), + Context: &e.SrcRange, + }) + continue + } + + if !partVal.IsKnown() { + // If any part is unknown then the result as a whole must be + // unknown too. We'll keep on processing the rest of the parts + // anyway, because we want to still emit any diagnostics resulting + // from evaluating those. + isKnown = false + continue + } + + strVal, err := convert.Convert(partVal, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "Cannot include the given value in a string template: %s.", + err.Error(), + ), + Subject: part.Range().Ptr(), + Context: &e.SrcRange, + }) + continue + } + + buf.WriteString(strVal.AsString()) + } + + if !isKnown { + return cty.UnknownVal(cty.String), diags + } + + return cty.StringVal(buf.String()), diags +} + +func (e *TemplateExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TemplateExpr) StartRange() hcl.Range { + return e.Parts[0].StartRange() +} + +// TemplateJoinExpr is used to convert tuples of strings produced by template +// constructs (i.e. for loops) into flat strings, by converting the values +// tos strings and joining them. This AST node is not used directly; it's +// produced as part of the AST of a "for" loop in a template. +type TemplateJoinExpr struct { + Tuple Expression +} + +func (e *TemplateJoinExpr) walkChildNodes(w internalWalkFunc) { + e.Tuple = w(e.Tuple).(Expression) +} + +func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + tuple, diags := e.Tuple.Value(ctx) + + if tuple.IsNull() { + // This indicates a bug in the code that constructed the AST. + panic("TemplateJoinExpr got null tuple") + } + if tuple.Type() == cty.DynamicPseudoType { + return cty.UnknownVal(cty.String), diags + } + if !tuple.Type().IsTupleType() { + // This indicates a bug in the code that constructed the AST. + panic("TemplateJoinExpr got non-tuple tuple") + } + if !tuple.IsKnown() { + return cty.UnknownVal(cty.String), diags + } + + buf := &bytes.Buffer{} + it := tuple.ElementIterator() + for it.Next() { + _, val := it.Element() + + if val.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "An iteration result is null. Cannot include a null value in a string template.", + ), + Subject: e.Range().Ptr(), + }) + continue + } + if val.Type() == cty.DynamicPseudoType { + return cty.UnknownVal(cty.String), diags + } + strVal, err := convert.Convert(val, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "Cannot include one of the interpolation results into the string template: %s.", + err.Error(), + ), + Subject: e.Range().Ptr(), + }) + continue + } + if !val.IsKnown() { + return cty.UnknownVal(cty.String), diags + } + + buf.WriteString(strVal.AsString()) + } + + return cty.StringVal(buf.String()), diags +} + +func (e *TemplateJoinExpr) Range() hcl.Range { + return e.Tuple.Range() +} + +func (e *TemplateJoinExpr) StartRange() hcl.Range { + return e.Tuple.StartRange() +} + +// TemplateWrapExpr is used instead of a TemplateExpr when a template +// consists _only_ of a single interpolation sequence. In that case, the +// template's result is the single interpolation's result, verbatim with +// no type conversions. +type TemplateWrapExpr struct { + Wrapped Expression + + SrcRange hcl.Range +} + +func (e *TemplateWrapExpr) walkChildNodes(w internalWalkFunc) { + e.Wrapped = w(e.Wrapped).(Expression) +} + +func (e *TemplateWrapExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Wrapped.Value(ctx) +} + +func (e *TemplateWrapExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TemplateWrapExpr) StartRange() hcl.Range { + return e.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go new file mode 100755 index 00000000..9177092c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go @@ -0,0 +1,76 @@ +package hclsyntax + +// Generated by expression_vars_get.go. DO NOT EDIT. +// Run 'go generate' on this package to update the set of functions here. + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +func (e *AnonSymbolExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *BinaryOpExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ConditionalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ForExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *FunctionCallExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *IndexExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *LiteralValueExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ObjectConsExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ObjectConsKeyExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *RelativeTraversalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ScopeTraversalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *SplatExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateJoinExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateWrapExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TupleConsExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *UnaryOpExpr) Variables() []hcl.Traversal { + return Variables(e) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars_gen.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars_gen.go new file mode 100644 index 00000000..88f19800 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars_gen.go @@ -0,0 +1,99 @@ +// This is a 'go generate'-oriented program for producing the "Variables" +// method on every Expression implementation found within this package. +// All expressions share the same implementation for this method, which +// just wraps the package-level function "Variables" and uses an AST walk +// to do its work. + +// +build ignore + +package main + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "os" + "sort" +) + +func main() { + fs := token.NewFileSet() + pkgs, err := parser.ParseDir(fs, ".", nil, 0) + if err != nil { + fmt.Fprintf(os.Stderr, "error while parsing: %s\n", err) + os.Exit(1) + } + pkg := pkgs["hclsyntax"] + + // Walk all the files and collect the receivers of any "Value" methods + // that look like they are trying to implement Expression. + var recvs []string + for _, f := range pkg.Files { + for _, decl := range f.Decls { + fd, ok := decl.(*ast.FuncDecl) + if !ok { + continue + } + if fd.Name.Name != "Value" { + continue + } + results := fd.Type.Results.List + if len(results) != 2 { + continue + } + valResult := fd.Type.Results.List[0].Type.(*ast.SelectorExpr).X.(*ast.Ident) + diagsResult := fd.Type.Results.List[1].Type.(*ast.SelectorExpr).X.(*ast.Ident) + + if valResult.Name != "cty" && diagsResult.Name != "hcl" { + continue + } + + // If we have a method called Value and it returns something in + // "cty" followed by something in "hcl" then that's specific enough + // for now, even though this is not 100% exact as a correct + // implementation of Value. + + recvTy := fd.Recv.List[0].Type + + switch rtt := recvTy.(type) { + case *ast.StarExpr: + name := rtt.X.(*ast.Ident).Name + recvs = append(recvs, fmt.Sprintf("*%s", name)) + default: + fmt.Fprintf(os.Stderr, "don't know what to do with a %T receiver\n", recvTy) + } + + } + } + + sort.Strings(recvs) + + of, err := os.OpenFile("expression_vars.go", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to open output file: %s\n", err) + os.Exit(1) + } + + fmt.Fprint(of, outputPreamble) + for _, recv := range recvs { + fmt.Fprintf(of, outputMethodFmt, recv) + } + fmt.Fprint(of, "\n") + +} + +const outputPreamble = `package hclsyntax + +// Generated by expression_vars_get.go. DO NOT EDIT. +// Run 'go generate' on this package to update the set of functions here. + +import ( + "github.com/hashicorp/hcl2/hcl" +)` + +const outputMethodFmt = ` + +func (e %s) Variables() []hcl.Traversal { + return Variables(e) +}` diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go new file mode 100644 index 00000000..490c0255 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go @@ -0,0 +1,20 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// File is the top-level object resulting from parsing a configuration file. +type File struct { + Body *Body + Bytes []byte +} + +func (f *File) AsHCLFile() *hcl.File { + return &hcl.File{ + Body: f.Body, + Bytes: f.Bytes, + + // TODO: The Nav object, once we have an implementation of it + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go new file mode 100644 index 00000000..841656a6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go @@ -0,0 +1,9 @@ +package hclsyntax + +//go:generate go run expression_vars_gen.go +//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/DerivedCoreProperties.txt -m UnicodeDerived -p ID_Start,ID_Continue -o unicode_derived.rl +//go:generate ragel -Z scan_tokens.rl +//go:generate gofmt -w scan_tokens.go +//go:generate ragel -Z scan_string_lit.rl +//go:generate gofmt -w scan_string_lit.go +//go:generate stringer -type TokenType -output token_type_string.go diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go new file mode 100644 index 00000000..eef8b962 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go @@ -0,0 +1,21 @@ +package hclsyntax + +import ( + "bytes" +) + +type Keyword []byte + +var forKeyword = Keyword([]byte{'f', 'o', 'r'}) +var inKeyword = Keyword([]byte{'i', 'n'}) +var ifKeyword = Keyword([]byte{'i', 'f'}) +var elseKeyword = Keyword([]byte{'e', 'l', 's', 'e'}) +var endifKeyword = Keyword([]byte{'e', 'n', 'd', 'i', 'f'}) +var endforKeyword = Keyword([]byte{'e', 'n', 'd', 'f', 'o', 'r'}) + +func (kw Keyword) TokenMatches(token Token) bool { + if token.Type != TokenIdent { + return false + } + return bytes.Equal([]byte(kw), token.Bytes) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go new file mode 100644 index 00000000..4d41b6b6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go @@ -0,0 +1,41 @@ +package hclsyntax + +import ( + "bytes" + "fmt" +) + +type navigation struct { + root *Body +} + +// Implementation of hcled.ContextString +func (n navigation) ContextString(offset int) string { + // We will walk our top-level blocks until we find one that contains + // the given offset, and then construct a representation of the header + // of the block. + + var block *Block + for _, candidate := range n.root.Blocks { + if candidate.Range().ContainsOffset(offset) { + block = candidate + break + } + } + + if block == nil { + return "" + } + + if len(block.Labels) == 0 { + // Easy case! + return block.Type + } + + buf := &bytes.Buffer{} + buf.WriteString(block.Type) + for _, label := range block.Labels { + fmt.Fprintf(buf, " %q", label) + } + return buf.String() +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go new file mode 100644 index 00000000..fd426d4a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go @@ -0,0 +1,22 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// Node is the abstract type that every AST node implements. +// +// This is a closed interface, so it cannot be implemented from outside of +// this package. +type Node interface { + // This is the mechanism by which the public-facing walk functions + // are implemented. Implementations should call the given function + // for each child node and then replace that node with its return value. + // The return value might just be the same node, for non-transforming + // walks. + walkChildNodes(w internalWalkFunc) + + Range() hcl.Range +} + +type internalWalkFunc func(Node) Node diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go new file mode 100644 index 00000000..002858f4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go @@ -0,0 +1,1836 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + "strconv" + "unicode/utf8" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +type parser struct { + *peeker + + // set to true if any recovery is attempted. The parser can use this + // to attempt to reduce error noise by suppressing "bad token" errors + // in recovery mode, assuming that the recovery heuristics have failed + // in this case and left the peeker in a wrong place. + recovery bool +} + +func (p *parser) ParseBody(end TokenType) (*Body, hcl.Diagnostics) { + attrs := Attributes{} + blocks := Blocks{} + var diags hcl.Diagnostics + + startRange := p.PrevRange() + var endRange hcl.Range + +Token: + for { + next := p.Peek() + if next.Type == end { + endRange = p.NextRange() + p.Read() + break Token + } + + switch next.Type { + case TokenNewline: + p.Read() + continue + case TokenIdent: + item, itemDiags := p.ParseBodyItem() + diags = append(diags, itemDiags...) + switch titem := item.(type) { + case *Block: + blocks = append(blocks, titem) + case *Attribute: + if existing, exists := attrs[titem.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute redefined", + Detail: fmt.Sprintf( + "The attribute %q was already defined at %s. Each attribute may be defined only once.", + titem.Name, existing.NameRange.String(), + ), + Subject: &titem.NameRange, + }) + } else { + attrs[titem.Name] = titem + } + default: + // This should never happen for valid input, but may if a + // syntax error was detected in ParseBodyItem that prevented + // it from even producing a partially-broken item. In that + // case, it would've left at least one error in the diagnostics + // slice we already dealt with above. + // + // We'll assume ParseBodyItem attempted recovery to leave + // us in a reasonable position to try parsing the next item. + continue + } + default: + bad := p.Read() + if !p.recovery { + if bad.Type == TokenOQuote { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute name", + Detail: "Attribute names must not be quoted.", + Subject: &bad.Range, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute or block definition required", + Detail: "An attribute or block definition is required here.", + Subject: &bad.Range, + }) + } + } + endRange = p.PrevRange() // arbitrary, but somewhere inside the body means better diagnostics + + p.recover(end) // attempt to recover to the token after the end of this body + break Token + } + } + + return &Body{ + Attributes: attrs, + Blocks: blocks, + + SrcRange: hcl.RangeBetween(startRange, endRange), + EndRange: hcl.Range{ + Filename: endRange.Filename, + Start: endRange.End, + End: endRange.End, + }, + }, diags +} + +func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) { + ident := p.Read() + if ident.Type != TokenIdent { + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Attribute or block definition required", + Detail: "An attribute or block definition is required here.", + Subject: &ident.Range, + }, + } + } + + next := p.Peek() + + switch next.Type { + case TokenEqual: + return p.finishParsingBodyAttribute(ident) + case TokenOQuote, TokenOBrace, TokenIdent: + return p.finishParsingBodyBlock(ident) + default: + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Attribute or block definition required", + Detail: "An attribute or block definition is required here. To define an attribute, use the equals sign \"=\" to introduce the attribute value.", + Subject: &ident.Range, + }, + } + } + + return nil, nil +} + +func (p *parser) finishParsingBodyAttribute(ident Token) (Node, hcl.Diagnostics) { + eqTok := p.Read() // eat equals token + if eqTok.Type != TokenEqual { + // should never happen if caller behaves + panic("finishParsingBodyAttribute called with next not equals") + } + + var endRange hcl.Range + + expr, diags := p.ParseExpression() + if p.recovery && diags.HasErrors() { + // recovery within expressions tends to be tricky, so we've probably + // landed somewhere weird. We'll try to reset to the start of a body + // item so parsing can continue. + endRange = p.PrevRange() + p.recoverAfterBodyItem() + } else { + end := p.Peek() + if end.Type != TokenNewline && end.Type != TokenEOF { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing newline after attribute definition", + Detail: "An attribute definition must end with a newline.", + Subject: &end.Range, + Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(), + }) + } + endRange = p.PrevRange() + p.recoverAfterBodyItem() + } else { + endRange = p.PrevRange() + p.Read() // eat newline + } + } + + return &Attribute{ + Name: string(ident.Bytes), + Expr: expr, + + SrcRange: hcl.RangeBetween(ident.Range, endRange), + NameRange: ident.Range, + EqualsRange: eqTok.Range, + }, diags +} + +func (p *parser) finishParsingBodyBlock(ident Token) (Node, hcl.Diagnostics) { + var blockType = string(ident.Bytes) + var diags hcl.Diagnostics + var labels []string + var labelRanges []hcl.Range + + var oBrace Token + +Token: + for { + tok := p.Peek() + + switch tok.Type { + + case TokenOBrace: + oBrace = p.Read() + break Token + + case TokenOQuote: + label, labelRange, labelDiags := p.parseQuotedStringLiteral() + diags = append(diags, labelDiags...) + labels = append(labels, label) + labelRanges = append(labelRanges, labelRange) + if labelDiags.HasErrors() { + p.recoverAfterBodyItem() + return &Block{ + Type: blockType, + Labels: labels, + Body: nil, + + TypeRange: ident.Range, + LabelRanges: labelRanges, + OpenBraceRange: ident.Range, // placeholder + CloseBraceRange: ident.Range, // placeholder + }, diags + } + + case TokenIdent: + tok = p.Read() // eat token + label, labelRange := string(tok.Bytes), tok.Range + labels = append(labels, label) + labelRanges = append(labelRanges, labelRange) + + default: + switch tok.Type { + case TokenEqual: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "The equals sign \"=\" indicates an attribute definition, and must not be used when defining a block.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + case TokenNewline: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "A block definition must have block content delimited by \"{\" and \"}\", starting on the same line as the block header.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + default: + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "Either a quoted string block label or an opening brace (\"{\") is expected here.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + } + } + + p.recoverAfterBodyItem() + + return &Block{ + Type: blockType, + Labels: labels, + Body: nil, + + TypeRange: ident.Range, + LabelRanges: labelRanges, + OpenBraceRange: ident.Range, // placeholder + CloseBraceRange: ident.Range, // placeholder + }, diags + } + } + + // Once we fall out here, the peeker is pointed just after our opening + // brace, so we can begin our nested body parsing. + body, bodyDiags := p.ParseBody(TokenCBrace) + diags = append(diags, bodyDiags...) + cBraceRange := p.PrevRange() + + eol := p.Peek() + if eol.Type == TokenNewline || eol.Type == TokenEOF { + p.Read() // eat newline + } else { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing newline after block definition", + Detail: "A block definition must end with a newline.", + Subject: &eol.Range, + Context: hcl.RangeBetween(ident.Range, eol.Range).Ptr(), + }) + } + p.recoverAfterBodyItem() + } + + return &Block{ + Type: blockType, + Labels: labels, + Body: body, + + TypeRange: ident.Range, + LabelRanges: labelRanges, + OpenBraceRange: oBrace.Range, + CloseBraceRange: cBraceRange, + }, diags +} + +func (p *parser) ParseExpression() (Expression, hcl.Diagnostics) { + return p.parseTernaryConditional() +} + +func (p *parser) parseTernaryConditional() (Expression, hcl.Diagnostics) { + // The ternary conditional operator (.. ? .. : ..) behaves somewhat + // like a binary operator except that the "symbol" is itself + // an expression enclosed in two punctuation characters. + // The middle expression is parsed as if the ? and : symbols + // were parentheses. The "rhs" (the "false expression") is then + // treated right-associatively so it behaves similarly to the + // middle in terms of precedence. + + startRange := p.NextRange() + var condExpr, trueExpr, falseExpr Expression + var diags hcl.Diagnostics + + condExpr, condDiags := p.parseBinaryOps(binaryOps) + diags = append(diags, condDiags...) + if p.recovery && condDiags.HasErrors() { + return condExpr, diags + } + + questionMark := p.Peek() + if questionMark.Type != TokenQuestion { + return condExpr, diags + } + + p.Read() // eat question mark + + trueExpr, trueDiags := p.ParseExpression() + diags = append(diags, trueDiags...) + if p.recovery && trueDiags.HasErrors() { + return condExpr, diags + } + + colon := p.Peek() + if colon.Type != TokenColon { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing false expression in conditional", + Detail: "The conditional operator (...?...:...) requires a false expression, delimited by a colon.", + Subject: &colon.Range, + Context: hcl.RangeBetween(startRange, colon.Range).Ptr(), + }) + return condExpr, diags + } + + p.Read() // eat colon + + falseExpr, falseDiags := p.ParseExpression() + diags = append(diags, falseDiags...) + if p.recovery && falseDiags.HasErrors() { + return condExpr, diags + } + + return &ConditionalExpr{ + Condition: condExpr, + TrueResult: trueExpr, + FalseResult: falseExpr, + + SrcRange: hcl.RangeBetween(startRange, falseExpr.Range()), + }, diags +} + +// parseBinaryOps calls itself recursively to work through all of the +// operator precedence groups, and then eventually calls parseExpressionTerm +// for each operand. +func (p *parser) parseBinaryOps(ops []map[TokenType]*Operation) (Expression, hcl.Diagnostics) { + if len(ops) == 0 { + // We've run out of operators, so now we'll just try to parse a term. + return p.parseExpressionWithTraversals() + } + + thisLevel := ops[0] + remaining := ops[1:] + + var lhs, rhs Expression + var operation *Operation + var diags hcl.Diagnostics + + // Parse a term that might be the first operand of a binary + // operation or it might just be a standalone term. + // We won't know until we've parsed it and can look ahead + // to see if there's an operator token for this level. + lhs, lhsDiags := p.parseBinaryOps(remaining) + diags = append(diags, lhsDiags...) + if p.recovery && lhsDiags.HasErrors() { + return lhs, diags + } + + // We'll keep eating up operators until we run out, so that operators + // with the same precedence will combine in a left-associative manner: + // a+b+c => (a+b)+c, not a+(b+c) + // + // Should we later want to have right-associative operators, a way + // to achieve that would be to call back up to ParseExpression here + // instead of iteratively parsing only the remaining operators. + for { + next := p.Peek() + var newOp *Operation + var ok bool + if newOp, ok = thisLevel[next.Type]; !ok { + break + } + + // Are we extending an expression started on the previous iteration? + if operation != nil { + lhs = &BinaryOpExpr{ + LHS: lhs, + Op: operation, + RHS: rhs, + + SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()), + } + } + + operation = newOp + p.Read() // eat operator token + var rhsDiags hcl.Diagnostics + rhs, rhsDiags = p.parseBinaryOps(remaining) + diags = append(diags, rhsDiags...) + if p.recovery && rhsDiags.HasErrors() { + return lhs, diags + } + } + + if operation == nil { + return lhs, diags + } + + return &BinaryOpExpr{ + LHS: lhs, + Op: operation, + RHS: rhs, + + SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()), + }, diags +} + +func (p *parser) parseExpressionWithTraversals() (Expression, hcl.Diagnostics) { + term, diags := p.parseExpressionTerm() + ret := term + +Traversal: + for { + next := p.Peek() + + switch next.Type { + case TokenDot: + // Attribute access or splat + dot := p.Read() + attrTok := p.Peek() + + switch attrTok.Type { + case TokenIdent: + attrTok = p.Read() // eat token + name := string(attrTok.Bytes) + rng := hcl.RangeBetween(dot.Range, attrTok.Range) + step := hcl.TraverseAttr{ + Name: name, + SrcRange: rng, + } + + ret = makeRelativeTraversal(ret, step, rng) + + case TokenNumberLit: + // This is a weird form we inherited from HIL, allowing numbers + // to be used as attributes as a weird way of writing [n]. + // This was never actually a first-class thing in HIL, but + // HIL tolerated sequences like .0. in its variable names and + // calling applications like Terraform exploited that to + // introduce indexing syntax where none existed. + numTok := p.Read() // eat token + attrTok = numTok + + // This syntax is ambiguous if multiple indices are used in + // succession, like foo.0.1.baz: that actually parses as + // a fractional number 0.1. Since we're only supporting this + // syntax for compatibility with legacy Terraform + // configurations, and Terraform does not tend to have lists + // of lists, we'll choose to reject that here with a helpful + // error message, rather than failing later because the index + // isn't a whole number. + if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 { + first := numTok.Bytes[:dotIdx] + second := numTok.Bytes[dotIdx+1:] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid legacy index syntax", + Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax instead, like [%s][%s].", first, second), + Subject: &attrTok.Range, + }) + rng := hcl.RangeBetween(dot.Range, numTok.Range) + step := hcl.TraverseIndex{ + Key: cty.DynamicVal, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + break + } + + numVal, numDiags := p.numberLitValue(numTok) + diags = append(diags, numDiags...) + + rng := hcl.RangeBetween(dot.Range, numTok.Range) + step := hcl.TraverseIndex{ + Key: numVal, + SrcRange: rng, + } + + ret = makeRelativeTraversal(ret, step, rng) + + case TokenStar: + // "Attribute-only" splat expression. + // (This is a kinda weird construct inherited from HIL, which + // behaves a bit like a [*] splat except that it is only able + // to do attribute traversals into each of its elements, + // whereas foo[*] can support _any_ traversal. + marker := p.Read() // eat star + trav := make(hcl.Traversal, 0, 1) + var firstRange, lastRange hcl.Range + firstRange = p.NextRange() + for p.Peek().Type == TokenDot { + dot := p.Read() + + if p.Peek().Type == TokenNumberLit { + // Continuing the "weird stuff inherited from HIL" + // theme, we also allow numbers as attribute names + // inside splats and interpret them as indexing + // into a list, for expressions like: + // foo.bar.*.baz.0.foo + numTok := p.Read() + + // Weird special case if the user writes something + // like foo.bar.*.baz.0.0.foo, where 0.0 parses + // as a number. + if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 { + first := numTok.Bytes[:dotIdx] + second := numTok.Bytes[dotIdx+1:] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid legacy index syntax", + Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax with a full splat expression [*] instead, like [%s][%s].", first, second), + Subject: &attrTok.Range, + }) + trav = append(trav, hcl.TraverseIndex{ + Key: cty.DynamicVal, + SrcRange: hcl.RangeBetween(dot.Range, numTok.Range), + }) + lastRange = numTok.Range + continue + } + + numVal, numDiags := p.numberLitValue(numTok) + diags = append(diags, numDiags...) + trav = append(trav, hcl.TraverseIndex{ + Key: numVal, + SrcRange: hcl.RangeBetween(dot.Range, numTok.Range), + }) + lastRange = numTok.Range + continue + } + + if p.Peek().Type != TokenIdent { + if !p.recovery { + if p.Peek().Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Nested splat expression not allowed", + Detail: "A splat expression (*) cannot be used inside another attribute-only splat expression.", + Subject: p.Peek().Range.Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute name", + Detail: "An attribute name is required after a dot.", + Subject: &attrTok.Range, + }) + } + } + p.setRecovery() + continue Traversal + } + + attrTok := p.Read() + trav = append(trav, hcl.TraverseAttr{ + Name: string(attrTok.Bytes), + SrcRange: hcl.RangeBetween(dot.Range, attrTok.Range), + }) + lastRange = attrTok.Range + } + + itemExpr := &AnonSymbolExpr{ + SrcRange: hcl.RangeBetween(dot.Range, marker.Range), + } + var travExpr Expression + if len(trav) == 0 { + travExpr = itemExpr + } else { + travExpr = &RelativeTraversalExpr{ + Source: itemExpr, + Traversal: trav, + SrcRange: hcl.RangeBetween(firstRange, lastRange), + } + } + + ret = &SplatExpr{ + Source: ret, + Each: travExpr, + Item: itemExpr, + + SrcRange: hcl.RangeBetween(dot.Range, lastRange), + MarkerRange: hcl.RangeBetween(dot.Range, marker.Range), + } + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute name", + Detail: "An attribute name is required after a dot.", + Subject: &attrTok.Range, + }) + // This leaves the peeker in a bad place, so following items + // will probably be misparsed until we hit something that + // allows us to re-sync. + // + // We will probably need to do something better here eventually + // in order to support autocomplete triggered by typing a + // period. + p.setRecovery() + } + + case TokenOBrack: + // Indexing of a collection. + // This may or may not be a hcl.Traverser, depending on whether + // the key value is something constant. + + open := p.Read() + // TODO: If we have a TokenStar inside our brackets, parse as + // a Splat expression: foo[*].baz[0]. + var close Token + p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets + keyExpr, keyDiags := p.ParseExpression() + diags = append(diags, keyDiags...) + if p.recovery && keyDiags.HasErrors() { + close = p.recover(TokenCBrack) + } else { + close = p.Read() + if close.Type != TokenCBrack && !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing close bracket on index", + Detail: "The index operator must end with a closing bracket (\"]\").", + Subject: &close.Range, + }) + close = p.recover(TokenCBrack) + } + } + p.PopIncludeNewlines() + + if lit, isLit := keyExpr.(*LiteralValueExpr); isLit { + litKey, _ := lit.Value(nil) + rng := hcl.RangeBetween(open.Range, close.Range) + step := hcl.TraverseIndex{ + Key: litKey, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + } else { + rng := hcl.RangeBetween(open.Range, close.Range) + ret = &IndexExpr{ + Collection: ret, + Key: keyExpr, + + SrcRange: rng, + OpenRange: open.Range, + } + } + + default: + break Traversal + } + } + + return ret, diags +} + +// makeRelativeTraversal takes an expression and a traverser and returns +// a traversal expression that combines the two. If the given expression +// is already a traversal, it is extended in place (mutating it) and +// returned. If it isn't, a new RelativeTraversalExpr is created and returned. +func makeRelativeTraversal(expr Expression, next hcl.Traverser, rng hcl.Range) Expression { + switch texpr := expr.(type) { + case *ScopeTraversalExpr: + texpr.Traversal = append(texpr.Traversal, next) + texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng) + return texpr + case *RelativeTraversalExpr: + texpr.Traversal = append(texpr.Traversal, next) + texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng) + return texpr + default: + return &RelativeTraversalExpr{ + Source: expr, + Traversal: hcl.Traversal{next}, + SrcRange: rng, + } + } +} + +func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { + start := p.Peek() + + switch start.Type { + case TokenOParen: + p.Read() // eat open paren + + p.PushIncludeNewlines(false) + + expr, diags := p.ParseExpression() + if diags.HasErrors() { + // attempt to place the peeker after our closing paren + // before we return, so that the next parser has some + // chance of finding a valid expression. + p.recover(TokenCParen) + p.PopIncludeNewlines() + return expr, diags + } + + close := p.Peek() + if close.Type != TokenCParen { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unbalanced parentheses", + Detail: "Expected a closing parenthesis to terminate the expression.", + Subject: &close.Range, + Context: hcl.RangeBetween(start.Range, close.Range).Ptr(), + }) + p.setRecovery() + } + + p.Read() // eat closing paren + p.PopIncludeNewlines() + + return expr, diags + + case TokenNumberLit: + tok := p.Read() // eat number token + + numVal, diags := p.numberLitValue(tok) + return &LiteralValueExpr{ + Val: numVal, + SrcRange: tok.Range, + }, diags + + case TokenIdent: + tok := p.Read() // eat identifier token + + if p.Peek().Type == TokenOParen { + return p.finishParsingFunctionCall(tok) + } + + name := string(tok.Bytes) + switch name { + case "true": + return &LiteralValueExpr{ + Val: cty.True, + SrcRange: tok.Range, + }, nil + case "false": + return &LiteralValueExpr{ + Val: cty.False, + SrcRange: tok.Range, + }, nil + case "null": + return &LiteralValueExpr{ + Val: cty.NullVal(cty.DynamicPseudoType), + SrcRange: tok.Range, + }, nil + default: + return &ScopeTraversalExpr{ + Traversal: hcl.Traversal{ + hcl.TraverseRoot{ + Name: name, + SrcRange: tok.Range, + }, + }, + SrcRange: tok.Range, + }, nil + } + + case TokenOQuote, TokenOHeredoc: + open := p.Read() // eat opening marker + closer := p.oppositeBracket(open.Type) + exprs, passthru, _, diags := p.parseTemplateInner(closer) + + closeRange := p.PrevRange() + + if passthru { + if len(exprs) != 1 { + panic("passthru set with len(exprs) != 1") + } + return &TemplateWrapExpr{ + Wrapped: exprs[0], + SrcRange: hcl.RangeBetween(open.Range, closeRange), + }, diags + } + + return &TemplateExpr{ + Parts: exprs, + SrcRange: hcl.RangeBetween(open.Range, closeRange), + }, diags + + case TokenMinus: + tok := p.Read() // eat minus token + + // Important to use parseExpressionWithTraversals rather than parseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + // e.g. -46+5 should parse as (-46)+5, not -(46+5) + operand, diags := p.parseExpressionWithTraversals() + return &UnaryOpExpr{ + Op: OpNegate, + Val: operand, + + SrcRange: hcl.RangeBetween(tok.Range, operand.Range()), + SymbolRange: tok.Range, + }, diags + + case TokenBang: + tok := p.Read() // eat bang token + + // Important to use parseExpressionWithTraversals rather than parseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + operand, diags := p.parseExpressionWithTraversals() + return &UnaryOpExpr{ + Op: OpLogicalNot, + Val: operand, + + SrcRange: hcl.RangeBetween(tok.Range, operand.Range()), + SymbolRange: tok.Range, + }, diags + + case TokenOBrack: + return p.parseTupleCons() + + case TokenOBrace: + return p.parseObjectCons() + + default: + var diags hcl.Diagnostics + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expression", + Detail: "Expected the start of an expression, but found an invalid expression token.", + Subject: &start.Range, + }) + } + p.setRecovery() + + // Return a placeholder so that the AST is still structurally sound + // even in the presence of parse errors. + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: start.Range, + }, diags + } +} + +func (p *parser) numberLitValue(tok Token) (cty.Value, hcl.Diagnostics) { + // We'll lean on the cty converter to do the conversion, to ensure that + // the behavior is the same as what would happen if converting a + // non-literal string to a number. + numStrVal := cty.StringVal(string(tok.Bytes)) + numVal, err := convert.Convert(numStrVal, cty.Number) + if err != nil { + ret := cty.UnknownVal(cty.Number) + return ret, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid number literal", + // FIXME: not a very good error message, but convert only + // gives us "a number is required", so not much help either. + Detail: "Failed to recognize the value of this number literal.", + Subject: &tok.Range, + }, + } + } + return numVal, nil +} + +// finishParsingFunctionCall parses a function call assuming that the function +// name was already read, and so the peeker should be pointing at the opening +// parenthesis after the name. +func (p *parser) finishParsingFunctionCall(name Token) (Expression, hcl.Diagnostics) { + openTok := p.Read() + if openTok.Type != TokenOParen { + // should never happen if callers behave + panic("finishParsingFunctionCall called with non-parenthesis as next token") + } + + var args []Expression + var diags hcl.Diagnostics + var expandFinal bool + var closeTok Token + + // Arbitrary newlines are allowed inside the function call parentheses. + p.PushIncludeNewlines(false) + +Token: + for { + tok := p.Peek() + + if tok.Type == TokenCParen { + closeTok = p.Read() // eat closing paren + break Token + } + + arg, argDiags := p.ParseExpression() + args = append(args, arg) + diags = append(diags, argDiags...) + if p.recovery && argDiags.HasErrors() { + // if there was a parse error in the argument then we've + // probably been left in a weird place in the token stream, + // so we'll bail out with a partial argument list. + p.recover(TokenCParen) + break Token + } + + sep := p.Read() + if sep.Type == TokenCParen { + closeTok = sep + break Token + } + + if sep.Type == TokenEllipsis { + expandFinal = true + + if p.Peek().Type != TokenCParen { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing closing parenthesis", + Detail: "An expanded function argument (with ...) must be immediately followed by closing parentheses.", + Subject: &sep.Range, + Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), + }) + } + closeTok = p.recover(TokenCParen) + } else { + closeTok = p.Read() // eat closing paren + } + break Token + } + + if sep.Type != TokenComma { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing argument separator", + Detail: "A comma is required to separate each function argument from the next.", + Subject: &sep.Range, + Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), + }) + closeTok = p.recover(TokenCParen) + break Token + } + + if p.Peek().Type == TokenCParen { + // A trailing comma after the last argument gets us in here. + closeTok = p.Read() // eat closing paren + break Token + } + + } + + p.PopIncludeNewlines() + + return &FunctionCallExpr{ + Name: string(name.Bytes), + Args: args, + + ExpandFinal: expandFinal, + + NameRange: name.Range, + OpenParenRange: openTok.Range, + CloseParenRange: closeTok.Range, + }, diags +} + +func (p *parser) parseTupleCons() (Expression, hcl.Diagnostics) { + open := p.Read() + if open.Type != TokenOBrack { + // Should never happen if callers are behaving + panic("parseTupleCons called without peeker pointing to open bracket") + } + + p.PushIncludeNewlines(false) + defer p.PopIncludeNewlines() + + if forKeyword.TokenMatches(p.Peek()) { + return p.finishParsingForExpr(open) + } + + var close Token + + var diags hcl.Diagnostics + var exprs []Expression + + for { + next := p.Peek() + if next.Type == TokenCBrack { + close = p.Read() // eat closer + break + } + + expr, exprDiags := p.ParseExpression() + exprs = append(exprs, expr) + diags = append(diags, exprDiags...) + + if p.recovery && exprDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing bracket to allow parsing to continue. + close = p.recover(TokenCBrack) + break + } + + next = p.Peek() + if next.Type == TokenCBrack { + close = p.Read() // eat closer + break + } + + if next.Type != TokenComma { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing item separator", + Detail: "Expected a comma to mark the beginning of the next item.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + close = p.recover(TokenCBrack) + break + } + + p.Read() // eat comma + + } + + return &TupleConsExpr{ + Exprs: exprs, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { + open := p.Read() + if open.Type != TokenOBrace { + // Should never happen if callers are behaving + panic("parseObjectCons called without peeker pointing to open brace") + } + + p.PushIncludeNewlines(true) + defer p.PopIncludeNewlines() + + if forKeyword.TokenMatches(p.Peek()) { + return p.finishParsingForExpr(open) + } + + var close Token + + var diags hcl.Diagnostics + var items []ObjectConsItem + + for { + next := p.Peek() + if next.Type == TokenNewline { + p.Read() // eat newline + continue + } + + if next.Type == TokenCBrace { + close = p.Read() // eat closer + break + } + + var key Expression + var keyDiags hcl.Diagnostics + key, keyDiags = p.ParseExpression() + diags = append(diags, keyDiags...) + + if p.recovery && keyDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing brace to allow parsing to continue. + close = p.recover(TokenCBrace) + break + } + + // We wrap up the key expression in a special wrapper that deals + // with our special case that naked identifiers as object keys + // are interpreted as literal strings. + key = &ObjectConsKeyExpr{Wrapped: key} + + next = p.Peek() + if next.Type != TokenEqual && next.Type != TokenColon { + if !p.recovery { + if next.Type == TokenNewline || next.Type == TokenComma { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing item value", + Detail: "Expected an item value, introduced by an equals sign (\"=\").", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing key/value separator", + Detail: "Expected an equals sign (\"=\") to mark the beginning of the item value.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + } + close = p.recover(TokenCBrace) + break + } + + p.Read() // eat equals sign or colon + + value, valueDiags := p.ParseExpression() + diags = append(diags, valueDiags...) + + if p.recovery && valueDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing brace to allow parsing to continue. + close = p.recover(TokenCBrace) + break + } + + items = append(items, ObjectConsItem{ + KeyExpr: key, + ValueExpr: value, + }) + + next = p.Peek() + if next.Type == TokenCBrace { + close = p.Read() // eat closer + break + } + + if next.Type != TokenComma && next.Type != TokenNewline { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing item separator", + Detail: "Expected a newline or comma to mark the beginning of the next item.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + close = p.recover(TokenCBrace) + break + } + + p.Read() // eat comma or newline + + } + + return &ObjectConsExpr{ + Items: items, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics) { + introducer := p.Read() + if !forKeyword.TokenMatches(introducer) { + // Should never happen if callers are behaving + panic("finishParsingForExpr called without peeker pointing to 'for' identifier") + } + + var makeObj bool + var closeType TokenType + switch open.Type { + case TokenOBrace: + makeObj = true + closeType = TokenCBrace + case TokenOBrack: + makeObj = false // making a tuple + closeType = TokenCBrack + default: + // Should never happen if callers are behaving + panic("finishParsingForExpr called with invalid open token") + } + + var diags hcl.Diagnostics + var keyName, valName string + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires variable name after 'for'.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + valName = string(p.Read().Bytes) + + if p.Peek().Type == TokenComma { + // What we just read was actually the key, then. + keyName = valName + p.Read() // eat comma + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires value variable name after comma.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + valName = string(p.Read().Bytes) + } + + if !inKeyword.TokenMatches(p.Peek()) { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires 'in' keyword after names.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + p.Read() // eat 'in' keyword + + collExpr, collDiags := p.ParseExpression() + diags = append(diags, collDiags...) + if p.recovery && collDiags.HasErrors() { + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + if p.Peek().Type != TokenColon { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires colon after collection expression.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + p.Read() // eat colon + + var keyExpr, valExpr Expression + var keyDiags, valDiags hcl.Diagnostics + valExpr, valDiags = p.ParseExpression() + if p.Peek().Type == TokenFatArrow { + // What we just parsed was actually keyExpr + p.Read() // eat the fat arrow + keyExpr, keyDiags = valExpr, valDiags + + valExpr, valDiags = p.ParseExpression() + } + diags = append(diags, keyDiags...) + diags = append(diags, valDiags...) + if p.recovery && (keyDiags.HasErrors() || valDiags.HasErrors()) { + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + group := false + var ellipsis Token + if p.Peek().Type == TokenEllipsis { + ellipsis = p.Read() + group = true + } + + var condExpr Expression + var condDiags hcl.Diagnostics + if ifKeyword.TokenMatches(p.Peek()) { + p.Read() // eat "if" + condExpr, condDiags = p.ParseExpression() + diags = append(diags, condDiags...) + if p.recovery && condDiags.HasErrors() { + close := p.recover(p.oppositeBracket(open.Type)) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + } + + var close Token + if p.Peek().Type == closeType { + close = p.Read() + } else { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Extra characters after the end of the 'for' expression.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close = p.recover(closeType) + } + + if !makeObj { + if keyExpr != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Key expression is not valid when building a tuple.", + Subject: keyExpr.Range().Ptr(), + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + if group { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Grouping ellipsis (...) cannot be used when building a tuple.", + Subject: &ellipsis.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + } else { + if keyExpr == nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Key expression is required when building an object.", + Subject: valExpr.Range().Ptr(), + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + } + + return &ForExpr{ + KeyVar: keyName, + ValVar: valName, + CollExpr: collExpr, + KeyExpr: keyExpr, + ValExpr: valExpr, + CondExpr: condExpr, + Group: group, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + CloseRange: close.Range, + }, diags +} + +// parseQuotedStringLiteral is a helper for parsing quoted strings that +// aren't allowed to contain any interpolations, such as block labels. +func (p *parser) parseQuotedStringLiteral() (string, hcl.Range, hcl.Diagnostics) { + oQuote := p.Read() + if oQuote.Type != TokenOQuote { + return "", oQuote.Range, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: "A quoted string is required here.", + Subject: &oQuote.Range, + }, + } + } + + var diags hcl.Diagnostics + ret := &bytes.Buffer{} + var cQuote Token + +Token: + for { + tok := p.Read() + switch tok.Type { + + case TokenCQuote: + cQuote = tok + break Token + + case TokenQuotedLit: + s, sDiags := p.decodeStringLit(tok) + diags = append(diags, sDiags...) + ret.WriteString(s) + + case TokenTemplateControl, TokenTemplateInterp: + which := "$" + if tok.Type == TokenTemplateControl { + which = "!" + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: fmt.Sprintf( + "Template sequences are not allowed in this string. To include a literal %q, double it (as \"%s%s\") to escape it.", + which, which, which, + ), + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + p.recover(TokenTemplateSeqEnd) + + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated string literal", + Detail: "Unable to find the closing quote mark before the end of the file.", + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + break Token + + default: + // Should never happen, as long as the scanner is behaving itself + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: "This item is not valid in a string literal.", + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + p.recover(TokenOQuote) + break Token + + } + + } + + return ret.String(), hcl.RangeBetween(oQuote.Range, cQuote.Range), diags +} + +// decodeStringLit processes the given token, which must be either a +// TokenQuotedLit or a TokenStringLit, returning the string resulting from +// resolving any escape sequences. +// +// If any error diagnostics are returned, the returned string may be incomplete +// or otherwise invalid. +func (p *parser) decodeStringLit(tok Token) (string, hcl.Diagnostics) { + var quoted bool + switch tok.Type { + case TokenQuotedLit: + quoted = true + case TokenStringLit: + quoted = false + default: + panic("decodeQuotedLit can only be used with TokenStringLit and TokenQuotedLit tokens") + } + var diags hcl.Diagnostics + + ret := make([]byte, 0, len(tok.Bytes)) + slices := scanStringLit(tok.Bytes, quoted) + + // We will mutate rng constantly as we walk through our token slices below. + // Any diagnostics must take a copy of this rng rather than simply pointing + // to it, e.g. by using rng.Ptr() rather than &rng. + rng := tok.Range + rng.End = rng.Start + +Slices: + for _, slice := range slices { + if len(slice) == 0 { + continue + } + + // Advance the start of our range to where the previous token ended + rng.Start = rng.End + + // Advance the end of our range to after our token. + b := slice + for len(b) > 0 { + adv, ch, _ := textseg.ScanGraphemeClusters(b, true) + rng.End.Byte += adv + switch ch[0] { + case '\r', '\n': + rng.End.Line++ + rng.End.Column = 1 + default: + rng.End.Column++ + } + b = b[adv:] + } + + TokenType: + switch slice[0] { + case '\\': + if !quoted { + // If we're not in quoted mode then just treat this token as + // normal. (Slices can still start with backslash even if we're + // not specifically looking for backslash sequences.) + break TokenType + } + if len(slice) < 2 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: "Backslash must be followed by an escape sequence selector character.", + Subject: rng.Ptr(), + }) + break TokenType + } + + switch slice[1] { + + case 'n': + ret = append(ret, '\n') + continue Slices + case 'r': + ret = append(ret, '\r') + continue Slices + case 't': + ret = append(ret, '\t') + continue Slices + case '"': + ret = append(ret, '"') + continue Slices + case '\\': + ret = append(ret, '\\') + continue Slices + case 'u', 'U': + if slice[1] == 'u' && len(slice) != 6 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: "The \\u escape sequence must be followed by four hexadecimal digits.", + Subject: rng.Ptr(), + }) + break TokenType + } else if slice[1] == 'U' && len(slice) != 10 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: "The \\U escape sequence must be followed by eight hexadecimal digits.", + Subject: rng.Ptr(), + }) + break TokenType + } + + numHex := string(slice[2:]) + num, err := strconv.ParseUint(numHex, 16, 32) + if err != nil { + // Should never happen because the scanner won't match + // a sequence of digits that isn't valid. + panic(err) + } + + r := rune(num) + l := utf8.RuneLen(r) + if l == -1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: fmt.Sprintf("Cannot encode character U+%04x in UTF-8.", num), + Subject: rng.Ptr(), + }) + break TokenType + } + for i := 0; i < l; i++ { + ret = append(ret, 0) + } + rb := ret[len(ret)-l:] + utf8.EncodeRune(rb, r) + + continue Slices + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: fmt.Sprintf("The symbol %q is not a valid escape sequence selector.", slice[1:]), + Subject: rng.Ptr(), + }) + ret = append(ret, slice[1:]...) + continue Slices + } + + case '$', '%': + if len(slice) != 3 { + // Not long enough to be our escape sequence, so it's literal. + break TokenType + } + + if slice[1] == slice[0] && slice[2] == '{' { + ret = append(ret, slice[0]) + ret = append(ret, '{') + continue Slices + } + + break TokenType + } + + // If we fall out here or break out of here from the switch above + // then this slice is just a literal. + ret = append(ret, slice...) + } + + return string(ret), diags +} + +// setRecovery turns on recovery mode without actually doing any recovery. +// This can be used when a parser knowingly leaves the peeker in a useless +// place and wants to suppress errors that might result from that decision. +func (p *parser) setRecovery() { + p.recovery = true +} + +// recover seeks forward in the token stream until it finds TokenType "end", +// then returns with the peeker pointed at the following token. +// +// If the given token type is a bracketer, this function will additionally +// count nested instances of the brackets to try to leave the peeker at +// the end of the _current_ instance of that bracketer, skipping over any +// nested instances. This is a best-effort operation and may have +// unpredictable results on input with bad bracketer nesting. +func (p *parser) recover(end TokenType) Token { + start := p.oppositeBracket(end) + p.recovery = true + + nest := 0 + for { + tok := p.Read() + ty := tok.Type + if end == TokenTemplateSeqEnd && ty == TokenTemplateControl { + // normalize so that our matching behavior can work, since + // TokenTemplateControl/TokenTemplateInterp are asymmetrical + // with TokenTemplateSeqEnd and thus we need to count both + // openers if that's the closer we're looking for. + ty = TokenTemplateInterp + } + + switch ty { + case start: + nest++ + case end: + if nest < 1 { + return tok + } + + nest-- + case TokenEOF: + return tok + } + } +} + +// recoverOver seeks forward in the token stream until it finds a block +// starting with TokenType "start", then finds the corresponding end token, +// leaving the peeker pointed at the token after that end token. +// +// The given token type _must_ be a bracketer. For example, if the given +// start token is TokenOBrace then the parser will be left at the _end_ of +// the next brace-delimited block encountered, or at EOF if no such block +// is found or it is unclosed. +func (p *parser) recoverOver(start TokenType) { + end := p.oppositeBracket(start) + + // find the opening bracket first +Token: + for { + tok := p.Read() + switch tok.Type { + case start, TokenEOF: + break Token + } + } + + // Now use our existing recover function to locate the _end_ of the + // container we've found. + p.recover(end) +} + +func (p *parser) recoverAfterBodyItem() { + p.recovery = true + var open []TokenType + +Token: + for { + tok := p.Read() + + switch tok.Type { + + case TokenNewline: + if len(open) == 0 { + break Token + } + + case TokenEOF: + break Token + + case TokenOBrace, TokenOBrack, TokenOParen, TokenOQuote, TokenOHeredoc, TokenTemplateInterp, TokenTemplateControl: + open = append(open, tok.Type) + + case TokenCBrace, TokenCBrack, TokenCParen, TokenCQuote, TokenCHeredoc: + opener := p.oppositeBracket(tok.Type) + for len(open) > 0 && open[len(open)-1] != opener { + open = open[:len(open)-1] + } + if len(open) > 0 { + open = open[:len(open)-1] + } + + case TokenTemplateSeqEnd: + for len(open) > 0 && open[len(open)-1] != TokenTemplateInterp && open[len(open)-1] != TokenTemplateControl { + open = open[:len(open)-1] + } + if len(open) > 0 { + open = open[:len(open)-1] + } + + } + } +} + +// oppositeBracket finds the bracket that opposes the given bracketer, or +// NilToken if the given token isn't a bracketer. +// +// "Bracketer", for the sake of this function, is one end of a matching +// open/close set of tokens that establish a bracketing context. +func (p *parser) oppositeBracket(ty TokenType) TokenType { + switch ty { + + case TokenOBrace: + return TokenCBrace + case TokenOBrack: + return TokenCBrack + case TokenOParen: + return TokenCParen + case TokenOQuote: + return TokenCQuote + case TokenOHeredoc: + return TokenCHeredoc + + case TokenCBrace: + return TokenOBrace + case TokenCBrack: + return TokenOBrack + case TokenCParen: + return TokenOParen + case TokenCQuote: + return TokenOQuote + case TokenCHeredoc: + return TokenOHeredoc + + case TokenTemplateControl: + return TokenTemplateSeqEnd + case TokenTemplateInterp: + return TokenTemplateSeqEnd + case TokenTemplateSeqEnd: + // This is ambigous, but we return Interp here because that's + // what's assumed by the "recover" method. + return TokenTemplateInterp + + default: + return TokenNil + } +} + +func errPlaceholderExpr(rng hcl.Range) Expression { + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: rng, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go new file mode 100644 index 00000000..3711067e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go @@ -0,0 +1,728 @@ +package hclsyntax + +import ( + "fmt" + "strings" + "unicode" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) { + return p.parseTemplate(TokenEOF) +} + +func (p *parser) parseTemplate(end TokenType) (Expression, hcl.Diagnostics) { + exprs, passthru, rng, diags := p.parseTemplateInner(end) + + if passthru { + if len(exprs) != 1 { + panic("passthru set with len(exprs) != 1") + } + return &TemplateWrapExpr{ + Wrapped: exprs[0], + SrcRange: rng, + }, diags + } + + return &TemplateExpr{ + Parts: exprs, + SrcRange: rng, + }, diags +} + +func (p *parser) parseTemplateInner(end TokenType) ([]Expression, bool, hcl.Range, hcl.Diagnostics) { + parts, diags := p.parseTemplateParts(end) + tp := templateParser{ + Tokens: parts.Tokens, + SrcRange: parts.SrcRange, + } + exprs, exprsDiags := tp.parseRoot() + diags = append(diags, exprsDiags...) + + passthru := false + if len(parts.Tokens) == 2 { // one real token and one synthetic "end" token + if _, isInterp := parts.Tokens[0].(*templateInterpToken); isInterp { + passthru = true + } + } + + return exprs, passthru, parts.SrcRange, diags +} + +type templateParser struct { + Tokens []templateToken + SrcRange hcl.Range + + pos int +} + +func (p *templateParser) parseRoot() ([]Expression, hcl.Diagnostics) { + var exprs []Expression + var diags hcl.Diagnostics + + for { + next := p.Peek() + if _, isEnd := next.(*templateEndToken); isEnd { + break + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + exprs = append(exprs, expr) + } + + return exprs, diags +} + +func (p *templateParser) parseExpr() (Expression, hcl.Diagnostics) { + next := p.Peek() + switch tok := next.(type) { + + case *templateLiteralToken: + p.Read() // eat literal + return &LiteralValueExpr{ + Val: cty.StringVal(tok.Val), + SrcRange: tok.SrcRange, + }, nil + + case *templateInterpToken: + p.Read() // eat interp + return tok.Expr, nil + + case *templateIfToken: + return p.parseIf() + + case *templateForToken: + return p.parseFor() + + case *templateEndToken: + p.Read() // eat erroneous token + return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{ + { + // This is a particularly unhelpful diagnostic, so callers + // should attempt to pre-empt it and produce a more helpful + // diagnostic that is context-aware. + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: "The control directives within this template are unbalanced.", + Subject: &tok.SrcRange, + }, + } + + case *templateEndCtrlToken: + p.Read() // eat erroneous token + return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", tok.Name()), + Detail: "The control directives within this template are unbalanced.", + Subject: &tok.SrcRange, + }, + } + + default: + // should never happen, because above should be exhaustive + panic(fmt.Sprintf("unhandled template token type %T", next)) + } +} + +func (p *templateParser) parseIf() (Expression, hcl.Diagnostics) { + open := p.Read() + openIf, isIf := open.(*templateIfToken) + if !isIf { + // should never happen if caller is behaving + panic("parseIf called with peeker not pointing at if token") + } + + var ifExprs, elseExprs []Expression + var diags hcl.Diagnostics + var endifRange hcl.Range + + currentExprs := &ifExprs +Token: + for { + next := p.Peek() + if end, isEnd := next.(*templateEndToken); isEnd { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: fmt.Sprintf( + "The if directive at %s is missing its corresponding endif directive.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + return errPlaceholderExpr(end.SrcRange), diags + } + if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd { + p.Read() // eat end directive + + switch end.Type { + + case templateElse: + if currentExprs == &ifExprs { + currentExprs = &elseExprs + continue Token + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected else directive", + Detail: fmt.Sprintf( + "Already in the else clause for the if started at %s.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + + case templateEndIf: + endifRange = end.SrcRange + break Token + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", end.Name()), + Detail: fmt.Sprintf( + "Expecting an endif directive for the if started at %s.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + } + + return errPlaceholderExpr(end.SrcRange), diags + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + *currentExprs = append(*currentExprs, expr) + } + + if len(ifExprs) == 0 { + ifExprs = append(ifExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: openIf.SrcRange.Filename, + Start: openIf.SrcRange.End, + End: openIf.SrcRange.End, + }, + }) + } + if len(elseExprs) == 0 { + elseExprs = append(elseExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: endifRange.Filename, + Start: endifRange.Start, + End: endifRange.Start, + }, + }) + } + + trueExpr := &TemplateExpr{ + Parts: ifExprs, + SrcRange: hcl.RangeBetween(ifExprs[0].Range(), ifExprs[len(ifExprs)-1].Range()), + } + falseExpr := &TemplateExpr{ + Parts: elseExprs, + SrcRange: hcl.RangeBetween(elseExprs[0].Range(), elseExprs[len(elseExprs)-1].Range()), + } + + return &ConditionalExpr{ + Condition: openIf.CondExpr, + TrueResult: trueExpr, + FalseResult: falseExpr, + + SrcRange: hcl.RangeBetween(openIf.SrcRange, endifRange), + }, diags +} + +func (p *templateParser) parseFor() (Expression, hcl.Diagnostics) { + open := p.Read() + openFor, isFor := open.(*templateForToken) + if !isFor { + // should never happen if caller is behaving + panic("parseFor called with peeker not pointing at for token") + } + + var contentExprs []Expression + var diags hcl.Diagnostics + var endforRange hcl.Range + +Token: + for { + next := p.Peek() + if end, isEnd := next.(*templateEndToken); isEnd { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: fmt.Sprintf( + "The for directive at %s is missing its corresponding endfor directive.", + openFor.SrcRange, + ), + Subject: &end.SrcRange, + }) + return errPlaceholderExpr(end.SrcRange), diags + } + if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd { + p.Read() // eat end directive + + switch end.Type { + + case templateElse: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected else directive", + Detail: "An else clause is not expected for a for directive.", + Subject: &end.SrcRange, + }) + + case templateEndFor: + endforRange = end.SrcRange + break Token + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", end.Name()), + Detail: fmt.Sprintf( + "Expecting an endfor directive corresponding to the for directive at %s.", + openFor.SrcRange, + ), + Subject: &end.SrcRange, + }) + } + + return errPlaceholderExpr(end.SrcRange), diags + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + contentExprs = append(contentExprs, expr) + } + + if len(contentExprs) == 0 { + contentExprs = append(contentExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: openFor.SrcRange.Filename, + Start: openFor.SrcRange.End, + End: openFor.SrcRange.End, + }, + }) + } + + contentExpr := &TemplateExpr{ + Parts: contentExprs, + SrcRange: hcl.RangeBetween(contentExprs[0].Range(), contentExprs[len(contentExprs)-1].Range()), + } + + forExpr := &ForExpr{ + KeyVar: openFor.KeyVar, + ValVar: openFor.ValVar, + + CollExpr: openFor.CollExpr, + ValExpr: contentExpr, + + SrcRange: hcl.RangeBetween(openFor.SrcRange, endforRange), + OpenRange: openFor.SrcRange, + CloseRange: endforRange, + } + + return &TemplateJoinExpr{ + Tuple: forExpr, + }, diags +} + +func (p *templateParser) Peek() templateToken { + return p.Tokens[p.pos] +} + +func (p *templateParser) Read() templateToken { + ret := p.Peek() + if _, end := ret.(*templateEndToken); !end { + p.pos++ + } + return ret +} + +// parseTemplateParts produces a flat sequence of "template tokens", which are +// either literal values (with any "trimming" already applied), interpolation +// sequences, or control flow markers. +// +// A further pass is required on the result to turn it into an AST. +func (p *parser) parseTemplateParts(end TokenType) (*templateParts, hcl.Diagnostics) { + var parts []templateToken + var diags hcl.Diagnostics + + startRange := p.NextRange() + ltrimNext := false + nextCanTrimPrev := false + var endRange hcl.Range + +Token: + for { + next := p.Read() + if next.Type == end { + // all done! + endRange = next.Range + break + } + + ltrim := ltrimNext + ltrimNext = false + canTrimPrev := nextCanTrimPrev + nextCanTrimPrev = false + + switch next.Type { + case TokenStringLit, TokenQuotedLit: + str, strDiags := p.decodeStringLit(next) + diags = append(diags, strDiags...) + + if ltrim { + str = strings.TrimLeftFunc(str, unicode.IsSpace) + } + + parts = append(parts, &templateLiteralToken{ + Val: str, + SrcRange: next.Range, + }) + nextCanTrimPrev = true + + case TokenTemplateInterp: + // if the opener is ${~ then we want to eat any trailing whitespace + // in the preceding literal token, assuming it is indeed a literal + // token. + if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 { + prevExpr := parts[len(parts)-1] + if lexpr, ok := prevExpr.(*templateLiteralToken); ok { + lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace) + } + } + + p.PushIncludeNewlines(false) + expr, exprDiags := p.ParseExpression() + diags = append(diags, exprDiags...) + close := p.Peek() + if close.Type != TokenTemplateSeqEnd { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after interpolation expression", + Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + } else { + p.Read() // eat closing brace + + // If the closer is ~} then we want to eat any leading + // whitespace on the next token, if it turns out to be a + // literal token. + if len(close.Bytes) == 2 && close.Bytes[0] == '~' { + ltrimNext = true + } + } + p.PopIncludeNewlines() + parts = append(parts, &templateInterpToken{ + Expr: expr, + SrcRange: hcl.RangeBetween(next.Range, close.Range), + }) + + case TokenTemplateControl: + // if the opener is %{~ then we want to eat any trailing whitespace + // in the preceding literal token, assuming it is indeed a literal + // token. + if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 { + prevExpr := parts[len(parts)-1] + if lexpr, ok := prevExpr.(*templateLiteralToken); ok { + lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace) + } + } + p.PushIncludeNewlines(false) + + kw := p.Peek() + if kw.Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template directive", + Detail: "A template directive keyword (\"if\", \"for\", etc) is expected at the beginning of a %{ sequence.", + Subject: &kw.Range, + Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + p.Read() // eat keyword token + + switch { + + case ifKeyword.TokenMatches(kw): + condExpr, exprDiags := p.ParseExpression() + diags = append(diags, exprDiags...) + parts = append(parts, &templateIfToken{ + CondExpr: condExpr, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case elseKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateElse, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case endifKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateEndIf, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case forKeyword.TokenMatches(kw): + var keyName, valName string + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires variable name after 'for'.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + + valName = string(p.Read().Bytes) + + if p.Peek().Type == TokenComma { + // What we just read was actually the key, then. + keyName = valName + p.Read() // eat comma + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires value variable name after comma.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + + valName = string(p.Read().Bytes) + } + + if !inKeyword.TokenMatches(p.Peek()) { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires 'in' keyword after names.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + p.Read() // eat 'in' keyword + + collExpr, collDiags := p.ParseExpression() + diags = append(diags, collDiags...) + parts = append(parts, &templateForToken{ + KeyVar: keyName, + ValVar: valName, + CollExpr: collExpr, + + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case endforKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateEndFor, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + default: + if !p.recovery { + suggestions := []string{"if", "for", "else", "endif", "endfor"} + given := string(kw.Bytes) + suggestion := nameSuggestion(given, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template control keyword", + Detail: fmt.Sprintf("%q is not a valid template control keyword.%s", given, suggestion), + Subject: &kw.Range, + Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + + } + + close := p.Peek() + if close.Type != TokenTemplateSeqEnd { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extra characters in %s marker", kw.Bytes), + Detail: "Expected a closing brace to end the sequence, but found extra characters.", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + } else { + p.Read() // eat closing brace + + // If the closer is ~} then we want to eat any leading + // whitespace on the next token, if it turns out to be a + // literal token. + if len(close.Bytes) == 2 && close.Bytes[0] == '~' { + ltrimNext = true + } + } + p.PopIncludeNewlines() + + default: + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated template string", + Detail: "No closing marker was found for the string.", + Subject: &next.Range, + Context: hcl.RangeBetween(startRange, next.Range).Ptr(), + }) + } + final := p.recover(end) + endRange = final.Range + break Token + } + } + + if len(parts) == 0 { + // If a sequence has no content, we'll treat it as if it had an + // empty string in it because that's what the user probably means + // if they write "" in configuration. + parts = append(parts, &templateLiteralToken{ + Val: "", + SrcRange: hcl.Range{ + // Range is the zero-character span immediately after the + // opening quote. + Filename: startRange.Filename, + Start: startRange.End, + End: startRange.End, + }, + }) + } + + // Always end with an end token, so the parser can produce diagnostics + // about unclosed items with proper position information. + parts = append(parts, &templateEndToken{ + SrcRange: endRange, + }) + + ret := &templateParts{ + Tokens: parts, + SrcRange: hcl.RangeBetween(startRange, endRange), + } + + return ret, diags +} + +type templateParts struct { + Tokens []templateToken + SrcRange hcl.Range +} + +// templateToken is a higher-level token that represents a single atom within +// the template language. Our template parsing first raises the raw token +// stream to a sequence of templateToken, and then transforms the result into +// an expression tree. +type templateToken interface { + templateToken() templateToken +} + +type templateLiteralToken struct { + Val string + SrcRange hcl.Range + isTemplateToken +} + +type templateInterpToken struct { + Expr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateIfToken struct { + CondExpr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateForToken struct { + KeyVar string // empty if ignoring key + ValVar string + CollExpr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateEndCtrlType int + +const ( + templateEndIf templateEndCtrlType = iota + templateElse + templateEndFor +) + +type templateEndCtrlToken struct { + Type templateEndCtrlType + SrcRange hcl.Range + isTemplateToken +} + +func (t *templateEndCtrlToken) Name() string { + switch t.Type { + case templateEndIf: + return "endif" + case templateElse: + return "else" + case templateEndFor: + return "endfor" + default: + // should never happen + panic("invalid templateEndCtrlType") + } +} + +type templateEndToken struct { + SrcRange hcl.Range + isTemplateToken +} + +type isTemplateToken [0]int + +func (t isTemplateToken) templateToken() templateToken { + return t +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go new file mode 100644 index 00000000..2ff3ed6c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go @@ -0,0 +1,159 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// ParseTraversalAbs parses an absolute traversal that is assumed to consume +// all of the remaining tokens in the peeker. The usual parser recovery +// behavior is not supported here because traversals are not expected to +// be parsed as part of a larger program. +func (p *parser) ParseTraversalAbs() (hcl.Traversal, hcl.Diagnostics) { + var ret hcl.Traversal + var diags hcl.Diagnostics + + // Absolute traversal must always begin with a variable name + varTok := p.Read() + if varTok.Type != TokenIdent { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Variable name required", + Detail: "Must begin with a variable name.", + Subject: &varTok.Range, + }) + return ret, diags + } + + varName := string(varTok.Bytes) + ret = append(ret, hcl.TraverseRoot{ + Name: varName, + SrcRange: varTok.Range, + }) + + for { + next := p.Peek() + + if next.Type == TokenEOF { + return ret, diags + } + + switch next.Type { + case TokenDot: + // Attribute access + dot := p.Read() // eat dot + nameTok := p.Read() + if nameTok.Type != TokenIdent { + if nameTok.Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Splat expressions (.*) may not be used here.", + Subject: &nameTok.Range, + Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Dot must be followed by attribute name.", + Subject: &nameTok.Range, + Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(), + }) + } + return ret, diags + } + + attrName := string(nameTok.Bytes) + ret = append(ret, hcl.TraverseAttr{ + Name: attrName, + SrcRange: hcl.RangeBetween(dot.Range, nameTok.Range), + }) + case TokenOBrack: + // Index + open := p.Read() // eat open bracket + next := p.Peek() + + switch next.Type { + case TokenNumberLit: + tok := p.Read() // eat number + numVal, numDiags := p.numberLitValue(tok) + diags = append(diags, numDiags...) + + close := p.Read() + if close.Type != TokenCBrack { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed index brackets", + Detail: "Index key must be followed by a closing bracket.", + Subject: &close.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + ret = append(ret, hcl.TraverseIndex{ + Key: numVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }) + + if diags.HasErrors() { + return ret, diags + } + + case TokenOQuote: + str, _, strDiags := p.parseQuotedStringLiteral() + diags = append(diags, strDiags...) + + close := p.Read() + if close.Type != TokenCBrack { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed index brackets", + Detail: "Index key must be followed by a closing bracket.", + Subject: &close.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + ret = append(ret, hcl.TraverseIndex{ + Key: cty.StringVal(str), + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }) + + if diags.HasErrors() { + return ret, diags + } + + default: + if next.Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Splat expressions ([*]) may not be used here.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Index value required", + Detail: "Index brackets must contain either a literal number or a literal string.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + } + return ret, diags + } + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid character", + Detail: "Expected an attribute access or an index operator.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + return ret, diags + } + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go new file mode 100644 index 00000000..5a4b50e2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go @@ -0,0 +1,212 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + "path/filepath" + "runtime" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// This is set to true at init() time in tests, to enable more useful output +// if a stack discipline error is detected. It should not be enabled in +// normal mode since there is a performance penalty from accessing the +// runtime stack to produce the traces, but could be temporarily set to +// true for debugging if desired. +var tracePeekerNewlinesStack = false + +type peeker struct { + Tokens Tokens + NextIndex int + + IncludeComments bool + IncludeNewlinesStack []bool + + // used only when tracePeekerNewlinesStack is set + newlineStackChanges []peekerNewlineStackChange +} + +// for use in debugging the stack usage only +type peekerNewlineStackChange struct { + Pushing bool // if false, then popping + Frame runtime.Frame + Include bool +} + +func newPeeker(tokens Tokens, includeComments bool) *peeker { + return &peeker{ + Tokens: tokens, + IncludeComments: includeComments, + + IncludeNewlinesStack: []bool{true}, + } +} + +func (p *peeker) Peek() Token { + ret, _ := p.nextToken() + return ret +} + +func (p *peeker) Read() Token { + ret, nextIdx := p.nextToken() + p.NextIndex = nextIdx + return ret +} + +func (p *peeker) NextRange() hcl.Range { + return p.Peek().Range +} + +func (p *peeker) PrevRange() hcl.Range { + if p.NextIndex == 0 { + return p.NextRange() + } + + return p.Tokens[p.NextIndex-1].Range +} + +func (p *peeker) nextToken() (Token, int) { + for i := p.NextIndex; i < len(p.Tokens); i++ { + tok := p.Tokens[i] + switch tok.Type { + case TokenComment: + if !p.IncludeComments { + // Single-line comment tokens, starting with # or //, absorb + // the trailing newline that terminates them as part of their + // bytes. When we're filtering out comments, we must as a + // special case transform these to newline tokens in order + // to properly parse newline-terminated block items. + + if p.includingNewlines() { + if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { + fakeNewline := Token{ + Type: TokenNewline, + Bytes: tok.Bytes[len(tok.Bytes)-1 : len(tok.Bytes)], + + // We use the whole token range as the newline + // range, even though that's a little... weird, + // because otherwise we'd need to go count + // characters again in order to figure out the + // column of the newline, and that complexity + // isn't justified when ranges of newlines are + // so rarely printed anyway. + Range: tok.Range, + } + return fakeNewline, i + 1 + } + } + + continue + } + case TokenNewline: + if !p.includingNewlines() { + continue + } + } + + return tok, i + 1 + } + + // if we fall out here then we'll return the EOF token, and leave + // our index pointed off the end of the array so we'll keep + // returning EOF in future too. + return p.Tokens[len(p.Tokens)-1], len(p.Tokens) +} + +func (p *peeker) includingNewlines() bool { + return p.IncludeNewlinesStack[len(p.IncludeNewlinesStack)-1] +} + +func (p *peeker) PushIncludeNewlines(include bool) { + if tracePeekerNewlinesStack { + // Record who called us so that we can more easily track down any + // mismanagement of the stack in the parser. + callers := []uintptr{0} + runtime.Callers(2, callers) + frames := runtime.CallersFrames(callers) + frame, _ := frames.Next() + p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{ + true, frame, include, + }) + } + + p.IncludeNewlinesStack = append(p.IncludeNewlinesStack, include) +} + +func (p *peeker) PopIncludeNewlines() bool { + stack := p.IncludeNewlinesStack + remain, ret := stack[:len(stack)-1], stack[len(stack)-1] + p.IncludeNewlinesStack = remain + + if tracePeekerNewlinesStack { + // Record who called us so that we can more easily track down any + // mismanagement of the stack in the parser. + callers := []uintptr{0} + runtime.Callers(2, callers) + frames := runtime.CallersFrames(callers) + frame, _ := frames.Next() + p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{ + false, frame, ret, + }) + } + + return ret +} + +// AssertEmptyNewlinesStack checks if the IncludeNewlinesStack is empty, doing +// panicking if it is not. This can be used to catch stack mismanagement that +// might otherwise just cause confusing downstream errors. +// +// This function is a no-op if the stack is empty when called. +// +// If newlines stack tracing is enabled by setting the global variable +// tracePeekerNewlinesStack at init time, a full log of all of the push/pop +// calls will be produced to help identify which caller in the parser is +// misbehaving. +func (p *peeker) AssertEmptyIncludeNewlinesStack() { + if len(p.IncludeNewlinesStack) != 1 { + // Should never happen; indicates mismanagement of the stack inside + // the parser. + if p.newlineStackChanges != nil { // only if traceNewlinesStack is enabled above + panic(fmt.Errorf( + "non-empty IncludeNewlinesStack after parse with %d calls unaccounted for:\n%s", + len(p.IncludeNewlinesStack)-1, + formatPeekerNewlineStackChanges(p.newlineStackChanges), + )) + } else { + panic(fmt.Errorf("non-empty IncludeNewlinesStack after parse: %#v", p.IncludeNewlinesStack)) + } + } +} + +func formatPeekerNewlineStackChanges(changes []peekerNewlineStackChange) string { + indent := 0 + var buf bytes.Buffer + for _, change := range changes { + funcName := change.Frame.Function + if idx := strings.LastIndexByte(funcName, '.'); idx != -1 { + funcName = funcName[idx+1:] + } + filename := change.Frame.File + if idx := strings.LastIndexByte(filename, filepath.Separator); idx != -1 { + filename = filename[idx+1:] + } + + switch change.Pushing { + + case true: + buf.WriteString(strings.Repeat(" ", indent)) + fmt.Fprintf(&buf, "PUSH %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line) + indent++ + + case false: + indent-- + buf.WriteString(strings.Repeat(" ", indent)) + fmt.Fprintf(&buf, "POP %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line) + + } + } + return buf.String() +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go new file mode 100644 index 00000000..cf0ee297 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go @@ -0,0 +1,171 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// ParseConfig parses the given buffer as a whole HCL config file, returning +// a *hcl.File representing its contents. If HasErrors called on the returned +// diagnostics returns true, the returned body is likely to be incomplete +// and should therefore be used with care. +// +// The body in the returned file has dynamic type *hclsyntax.Body, so callers +// may freely type-assert this to get access to the full hclsyntax API in +// situations where detailed access is required. However, most common use-cases +// should be served using the hcl.Body interface to ensure compatibility with +// other configurationg syntaxes, such as JSON. +func ParseConfig(src []byte, filename string, start hcl.Pos) (*hcl.File, hcl.Diagnostics) { + tokens, diags := LexConfig(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + body, parseDiags := parser.ParseBody(TokenEOF) + diags = append(diags, parseDiags...) + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return &hcl.File{ + Body: body, + Bytes: src, + + Nav: navigation{ + root: body, + }, + }, diags +} + +// ParseExpression parses the given buffer as a standalone HCL expression, +// returning it as an instance of Expression. +func ParseExpression(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) { + tokens, diags := LexExpression(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + + // Bare expressions are always parsed in "ignore newlines" mode, as if + // they were wrapped in parentheses. + parser.PushIncludeNewlines(false) + + expr, parseDiags := parser.ParseExpression() + diags = append(diags, parseDiags...) + + next := parser.Peek() + if next.Type != TokenEOF && !parser.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after expression", + Detail: "An expression was successfully parsed, but extra characters were found after it.", + Subject: &next.Range, + }) + } + + parser.PopIncludeNewlines() + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + +// ParseTemplate parses the given buffer as a standalone HCL template, +// returning it as an instance of Expression. +func ParseTemplate(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) { + tokens, diags := LexTemplate(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + expr, parseDiags := parser.ParseTemplate() + diags = append(diags, parseDiags...) + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + +// ParseTraversalAbs parses the given buffer as a standalone absolute traversal. +// +// Parsing as a traversal is more limited than parsing as an expession since +// it allows only attribute and indexing operations on variables. Traverals +// are useful as a syntax for referring to objects without necessarily +// evaluating them. +func ParseTraversalAbs(src []byte, filename string, start hcl.Pos) (hcl.Traversal, hcl.Diagnostics) { + tokens, diags := LexExpression(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + + // Bare traverals are always parsed in "ignore newlines" mode, as if + // they were wrapped in parentheses. + parser.PushIncludeNewlines(false) + + expr, parseDiags := parser.ParseTraversalAbs() + diags = append(diags, parseDiags...) + + parser.PopIncludeNewlines() + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + +// LexConfig performs lexical analysis on the given buffer, treating it as a +// whole HCL config file, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexConfig(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + tokens := scanTokens(src, filename, start, scanNormal) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// LexExpression performs lexical analysis on the given buffer, treating it as +// a standalone HCL expression, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexExpression(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + // This is actually just the same thing as LexConfig, since configs + // and expressions lex in the same way. + tokens := scanTokens(src, filename, start, scanNormal) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// LexTemplate performs lexical analysis on the given buffer, treating it as a +// standalone HCL template, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexTemplate(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + tokens := scanTokens(src, filename, start, scanTemplate) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// ValidIdentifier tests if the given string could be a valid identifier in +// a native syntax expression. +// +// This is useful when accepting names from the user that will be used as +// variable or attribute names in the scope, to ensure that any name chosen +// will be traversable using the variable or attribute traversal syntax. +func ValidIdentifier(s string) bool { + // This is a kinda-expensive way to do something pretty simple, but it + // is easiest to do with our existing scanner-related infrastructure here + // and nobody should be validating identifiers in a tight loop. + tokens := scanTokens([]byte(s), "", hcl.Pos{}, scanIdentOnly) + return len(tokens) == 2 && tokens[0].Type == TokenIdent && tokens[1].Type == TokenEOF +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go new file mode 100644 index 00000000..de1f524c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go @@ -0,0 +1,301 @@ +// line 1 "scan_string_lit.rl" + +package hclsyntax + +// This file is generated from scan_string_lit.rl. DO NOT EDIT. + +// line 9 "scan_string_lit.go" +var _hclstrtok_actions []byte = []byte{ + 0, 1, 0, 1, 1, 2, 1, 0, +} + +var _hclstrtok_key_offsets []byte = []byte{ + 0, 0, 2, 4, 6, 10, 14, 18, + 22, 27, 31, 36, 41, 46, 51, 57, + 62, 74, 85, 96, 107, 118, 129, 140, + 151, +} + +var _hclstrtok_trans_keys []byte = []byte{ + 128, 191, 128, 191, 128, 191, 10, 13, + 36, 37, 10, 13, 36, 37, 10, 13, + 36, 37, 10, 13, 36, 37, 10, 13, + 36, 37, 123, 10, 13, 36, 37, 10, + 13, 36, 37, 92, 10, 13, 36, 37, + 92, 10, 13, 36, 37, 92, 10, 13, + 36, 37, 92, 10, 13, 36, 37, 92, + 123, 10, 13, 36, 37, 92, 85, 117, + 128, 191, 192, 223, 224, 239, 240, 247, + 248, 255, 10, 13, 36, 37, 92, 48, + 57, 65, 70, 97, 102, 10, 13, 36, + 37, 92, 48, 57, 65, 70, 97, 102, + 10, 13, 36, 37, 92, 48, 57, 65, + 70, 97, 102, 10, 13, 36, 37, 92, + 48, 57, 65, 70, 97, 102, 10, 13, + 36, 37, 92, 48, 57, 65, 70, 97, + 102, 10, 13, 36, 37, 92, 48, 57, + 65, 70, 97, 102, 10, 13, 36, 37, + 92, 48, 57, 65, 70, 97, 102, 10, + 13, 36, 37, 92, 48, 57, 65, 70, + 97, 102, +} + +var _hclstrtok_single_lengths []byte = []byte{ + 0, 0, 0, 0, 4, 4, 4, 4, + 5, 4, 5, 5, 5, 5, 6, 5, + 2, 5, 5, 5, 5, 5, 5, 5, + 5, +} + +var _hclstrtok_range_lengths []byte = []byte{ + 0, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 5, 3, 3, 3, 3, 3, 3, 3, + 3, +} + +var _hclstrtok_index_offsets []byte = []byte{ + 0, 0, 2, 4, 6, 11, 16, 21, + 26, 32, 37, 43, 49, 55, 61, 68, + 74, 82, 91, 100, 109, 118, 127, 136, + 145, +} + +var _hclstrtok_indicies []byte = []byte{ + 0, 1, 2, 1, 3, 1, 5, 6, + 7, 8, 4, 10, 11, 12, 13, 9, + 14, 11, 12, 13, 9, 10, 11, 15, + 13, 9, 10, 11, 12, 13, 14, 9, + 10, 11, 12, 15, 9, 17, 18, 19, + 20, 21, 16, 23, 24, 25, 26, 27, + 22, 0, 24, 25, 26, 27, 22, 23, + 24, 28, 26, 27, 22, 23, 24, 25, + 26, 27, 0, 22, 23, 24, 25, 28, + 27, 22, 29, 30, 22, 2, 3, 31, + 22, 0, 23, 24, 25, 26, 27, 32, + 32, 32, 22, 23, 24, 25, 26, 27, + 33, 33, 33, 22, 23, 24, 25, 26, + 27, 34, 34, 34, 22, 23, 24, 25, + 26, 27, 30, 30, 30, 22, 23, 24, + 25, 26, 27, 35, 35, 35, 22, 23, + 24, 25, 26, 27, 36, 36, 36, 22, + 23, 24, 25, 26, 27, 37, 37, 37, + 22, 23, 24, 25, 26, 27, 0, 0, + 0, 22, +} + +var _hclstrtok_trans_targs []byte = []byte{ + 11, 0, 1, 2, 4, 5, 6, 7, + 9, 4, 5, 6, 7, 9, 5, 8, + 10, 11, 12, 13, 15, 16, 10, 11, + 12, 13, 15, 16, 14, 17, 21, 3, + 18, 19, 20, 22, 23, 24, +} + +var _hclstrtok_trans_actions []byte = []byte{ + 0, 0, 0, 0, 0, 1, 1, 1, + 1, 3, 5, 5, 5, 5, 0, 0, + 0, 1, 1, 1, 1, 1, 3, 5, + 5, 5, 5, 5, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _hclstrtok_eof_actions []byte = []byte{ + 0, 0, 0, 0, 0, 3, 3, 3, + 3, 3, 0, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, +} + +const hclstrtok_start int = 4 +const hclstrtok_first_final int = 4 +const hclstrtok_error int = 0 + +const hclstrtok_en_quoted int = 10 +const hclstrtok_en_unquoted int = 4 + +// line 10 "scan_string_lit.rl" + +func scanStringLit(data []byte, quoted bool) [][]byte { + var ret [][]byte + + // line 61 "scan_string_lit.rl" + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + eof := pe + + var cs int // current state + switch { + case quoted: + cs = hclstrtok_en_quoted + default: + cs = hclstrtok_en_unquoted + } + + // Make Go compiler happy + _ = ts + _ = eof + + /*token := func () { + ret = append(ret, data[ts:te]) + }*/ + + // line 154 "scan_string_lit.go" + { + } + + // line 158 "scan_string_lit.go" + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } + _resume: + _keys = int(_hclstrtok_key_offsets[cs]) + _trans = int(_hclstrtok_index_offsets[cs]) + + _klen = int(_hclstrtok_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _hclstrtok_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _hclstrtok_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_hclstrtok_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _hclstrtok_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _hclstrtok_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_hclstrtok_indicies[_trans]) + cs = int(_hclstrtok_trans_targs[_trans]) + + if _hclstrtok_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_hclstrtok_trans_actions[_trans]) + _nacts = uint(_hclstrtok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _hclstrtok_actions[_acts-1] { + case 0: + // line 40 "scan_string_lit.rl" + + // If te is behind p then we've skipped over some literal + // characters which we must now return. + if te < p { + ret = append(ret, data[te:p]) + } + ts = p + + case 1: + // line 48 "scan_string_lit.rl" + + te = p + ret = append(ret, data[ts:te]) + + // line 255 "scan_string_lit.go" + } + } + + _again: + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + __acts := _hclstrtok_eof_actions[cs] + __nacts := uint(_hclstrtok_actions[__acts]) + __acts++ + for ; __nacts > 0; __nacts-- { + __acts++ + switch _hclstrtok_actions[__acts-1] { + case 1: + // line 48 "scan_string_lit.rl" + + te = p + ret = append(ret, data[ts:te]) + + // line 281 "scan_string_lit.go" + } + } + } + + _out: + { + } + } + + // line 89 "scan_string_lit.rl" + + if te < p { + // Collect any leftover literal characters at the end of the input + ret = append(ret, data[te:p]) + } + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // tolerate it and let the caller deal with it. + if cs < hclstrtok_first_final { + ret = append(ret, data[p:len(data)]) + } + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.rl b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.rl new file mode 100644 index 00000000..f8ac1175 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.rl @@ -0,0 +1,105 @@ + +package hclsyntax + +// This file is generated from scan_string_lit.rl. DO NOT EDIT. +%%{ + # (except you are actually in scan_string_lit.rl here, so edit away!) + + machine hclstrtok; + write data; +}%% + +func scanStringLit(data []byte, quoted bool) [][]byte { + var ret [][]byte + + %%{ + include UnicodeDerived "unicode_derived.rl"; + + UTF8Cont = 0x80 .. 0xBF; + AnyUTF8 = ( + 0x00..0x7F | + 0xC0..0xDF . UTF8Cont | + 0xE0..0xEF . UTF8Cont . UTF8Cont | + 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont + ); + BadUTF8 = any - AnyUTF8; + + Hex = ('0'..'9' | 'a'..'f' | 'A'..'F'); + + # Our goal with this patterns is to capture user intent as best as + # possible, even if the input is invalid. The caller will then verify + # whether each token is valid and generate suitable error messages + # if not. + UnicodeEscapeShort = "\\u" . Hex{0,4}; + UnicodeEscapeLong = "\\U" . Hex{0,8}; + UnicodeEscape = (UnicodeEscapeShort | UnicodeEscapeLong); + SimpleEscape = "\\" . (AnyUTF8 - ('U'|'u'))?; + TemplateEscape = ("$" . ("$" . ("{"?))?) | ("%" . ("%" . ("{"?))?); + Newline = ("\r\n" | "\r" | "\n"); + + action Begin { + // If te is behind p then we've skipped over some literal + // characters which we must now return. + if te < p { + ret = append(ret, data[te:p]) + } + ts = p; + } + action End { + te = p; + ret = append(ret, data[ts:te]); + } + + QuotedToken = (UnicodeEscape | SimpleEscape | TemplateEscape | Newline) >Begin %End; + UnquotedToken = (TemplateEscape | Newline) >Begin %End; + QuotedLiteral = (any - ("\\" | "$" | "%" | "\r" | "\n")); + UnquotedLiteral = (any - ("$" | "%" | "\r" | "\n")); + + quoted := (QuotedToken | QuotedLiteral)**; + unquoted := (UnquotedToken | UnquotedLiteral)**; + + }%% + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + eof := pe + + var cs int // current state + switch { + case quoted: + cs = hclstrtok_en_quoted + default: + cs = hclstrtok_en_unquoted + } + + // Make Go compiler happy + _ = ts + _ = eof + + /*token := func () { + ret = append(ret, data[ts:te]) + }*/ + + %%{ + write init nocs; + write exec; + }%% + + if te < p { + // Collect any leftover literal characters at the end of the input + ret = append(ret, data[te:p]) + } + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // tolerate it and let the caller deal with it. + if cs < hclstrtok_first_final { + ret = append(ret, data[p:len(data)]) + } + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go new file mode 100644 index 00000000..395e9c1c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go @@ -0,0 +1,5443 @@ +// line 1 "scan_tokens.rl" + +package hclsyntax + +import ( + "bytes" + + "github.com/hashicorp/hcl2/hcl" +) + +// This file is generated from scan_tokens.rl. DO NOT EDIT. + +// line 15 "scan_tokens.go" +var _hcltok_actions []byte = []byte{ + 0, 1, 0, 1, 1, 1, 2, 1, 3, + 1, 4, 1, 6, 1, 7, 1, 8, + 1, 9, 1, 10, 1, 11, 1, 12, + 1, 13, 1, 14, 1, 15, 1, 16, + 1, 17, 1, 18, 1, 19, 1, 22, + 1, 23, 1, 24, 1, 25, 1, 26, + 1, 27, 1, 28, 1, 29, 1, 30, + 1, 31, 1, 34, 1, 35, 1, 36, + 1, 37, 1, 38, 1, 39, 1, 40, + 1, 41, 1, 42, 1, 43, 1, 46, + 1, 47, 1, 48, 1, 49, 1, 50, + 1, 51, 1, 52, 1, 58, 1, 59, + 1, 60, 1, 61, 1, 62, 1, 63, + 1, 64, 1, 65, 1, 66, 1, 67, + 1, 68, 1, 69, 1, 70, 1, 71, + 1, 72, 1, 73, 1, 74, 1, 75, + 1, 76, 1, 77, 1, 78, 1, 79, + 1, 80, 1, 81, 1, 82, 1, 83, + 1, 84, 1, 85, 1, 86, 1, 87, + 2, 0, 15, 2, 1, 15, 2, 2, + 24, 2, 2, 28, 2, 3, 24, 2, + 3, 28, 2, 4, 5, 2, 7, 0, + 2, 7, 1, 2, 7, 20, 2, 7, + 21, 2, 7, 32, 2, 7, 33, 2, + 7, 44, 2, 7, 45, 2, 7, 53, + 2, 7, 54, 2, 7, 55, 2, 7, + 56, 2, 7, 57, 3, 7, 2, 20, + 3, 7, 3, 20, +} + +var _hcltok_key_offsets []int16 = []int16{ + 0, 0, 1, 2, 3, 5, 10, 14, + 16, 58, 99, 145, 146, 150, 156, 156, + 158, 160, 169, 175, 182, 183, 186, 187, + 191, 196, 205, 209, 213, 221, 223, 225, + 227, 230, 262, 264, 266, 270, 274, 277, + 288, 301, 320, 333, 349, 361, 377, 392, + 413, 423, 435, 446, 460, 475, 485, 497, + 506, 518, 520, 524, 545, 554, 564, 570, + 576, 577, 626, 628, 632, 634, 640, 647, + 655, 662, 665, 671, 675, 679, 681, 685, + 689, 693, 699, 707, 715, 721, 723, 727, + 729, 735, 739, 743, 747, 751, 756, 763, + 769, 771, 773, 777, 779, 785, 789, 793, + 803, 808, 822, 837, 839, 847, 849, 854, + 868, 873, 875, 879, 880, 884, 890, 896, + 906, 916, 927, 935, 938, 941, 945, 949, + 951, 954, 954, 957, 959, 989, 991, 993, + 997, 1002, 1006, 1011, 1013, 1015, 1017, 1026, + 1030, 1034, 1040, 1042, 1050, 1058, 1070, 1073, + 1079, 1083, 1085, 1089, 1109, 1111, 1113, 1124, + 1130, 1132, 1134, 1136, 1140, 1146, 1152, 1154, + 1159, 1163, 1165, 1173, 1191, 1231, 1241, 1245, + 1247, 1249, 1250, 1254, 1258, 1262, 1266, 1270, + 1275, 1279, 1283, 1287, 1289, 1291, 1295, 1305, + 1309, 1311, 1315, 1319, 1323, 1336, 1338, 1340, + 1344, 1346, 1350, 1352, 1354, 1384, 1388, 1392, + 1396, 1399, 1406, 1411, 1422, 1426, 1442, 1456, + 1460, 1465, 1469, 1473, 1479, 1481, 1487, 1489, + 1493, 1495, 1501, 1506, 1511, 1521, 1523, 1525, + 1529, 1533, 1535, 1548, 1550, 1554, 1558, 1566, + 1568, 1572, 1574, 1575, 1578, 1583, 1585, 1587, + 1591, 1593, 1597, 1603, 1623, 1629, 1635, 1637, + 1638, 1648, 1649, 1657, 1664, 1666, 1669, 1671, + 1673, 1675, 1680, 1684, 1688, 1693, 1703, 1713, + 1717, 1721, 1735, 1761, 1771, 1773, 1775, 1778, + 1780, 1783, 1785, 1789, 1791, 1792, 1796, 1798, + 1801, 1808, 1816, 1818, 1820, 1824, 1826, 1832, + 1843, 1846, 1848, 1852, 1857, 1887, 1892, 1894, + 1897, 1902, 1916, 1923, 1937, 1942, 1955, 1959, + 1972, 1977, 1995, 1996, 2005, 2009, 2021, 2026, + 2033, 2040, 2047, 2049, 2053, 2075, 2080, 2081, + 2085, 2087, 2137, 2140, 2151, 2155, 2157, 2163, + 2169, 2171, 2176, 2178, 2182, 2184, 2185, 2187, + 2189, 2195, 2197, 2199, 2203, 2209, 2222, 2224, + 2230, 2234, 2242, 2253, 2261, 2264, 2294, 2300, + 2303, 2308, 2310, 2314, 2318, 2322, 2324, 2331, + 2333, 2342, 2349, 2357, 2359, 2379, 2391, 2395, + 2397, 2415, 2454, 2456, 2460, 2462, 2469, 2473, + 2501, 2503, 2505, 2507, 2509, 2512, 2514, 2518, + 2522, 2524, 2527, 2529, 2531, 2534, 2536, 2538, + 2539, 2541, 2543, 2547, 2551, 2554, 2567, 2569, + 2575, 2579, 2581, 2585, 2589, 2603, 2606, 2615, + 2617, 2621, 2627, 2627, 2629, 2631, 2640, 2646, + 2653, 2654, 2657, 2658, 2662, 2667, 2676, 2680, + 2684, 2692, 2694, 2696, 2698, 2701, 2733, 2735, + 2737, 2741, 2745, 2748, 2759, 2772, 2791, 2804, + 2820, 2832, 2848, 2863, 2884, 2894, 2906, 2917, + 2931, 2946, 2956, 2968, 2977, 2989, 2991, 2995, + 3016, 3025, 3035, 3041, 3047, 3048, 3097, 3099, + 3103, 3105, 3111, 3118, 3126, 3133, 3136, 3142, + 3146, 3150, 3152, 3156, 3160, 3164, 3170, 3178, + 3186, 3192, 3194, 3198, 3200, 3206, 3210, 3214, + 3218, 3222, 3227, 3234, 3240, 3242, 3244, 3248, + 3250, 3256, 3260, 3264, 3274, 3279, 3293, 3308, + 3310, 3318, 3320, 3325, 3339, 3344, 3346, 3350, + 3351, 3355, 3361, 3367, 3377, 3387, 3398, 3406, + 3409, 3412, 3416, 3420, 3422, 3425, 3425, 3428, + 3430, 3460, 3462, 3464, 3468, 3473, 3477, 3482, + 3484, 3486, 3488, 3497, 3501, 3505, 3511, 3513, + 3521, 3529, 3541, 3544, 3550, 3554, 3556, 3560, + 3580, 3582, 3584, 3595, 3601, 3603, 3605, 3607, + 3611, 3617, 3623, 3625, 3630, 3634, 3636, 3644, + 3662, 3702, 3712, 3716, 3718, 3720, 3721, 3725, + 3729, 3733, 3737, 3741, 3746, 3750, 3754, 3758, + 3760, 3762, 3766, 3776, 3780, 3782, 3786, 3790, + 3794, 3807, 3809, 3811, 3815, 3817, 3821, 3823, + 3825, 3855, 3859, 3863, 3867, 3870, 3877, 3882, + 3893, 3897, 3913, 3927, 3931, 3936, 3940, 3944, + 3950, 3952, 3958, 3960, 3964, 3966, 3972, 3977, + 3982, 3992, 3994, 3996, 4000, 4004, 4006, 4019, + 4021, 4025, 4029, 4037, 4039, 4043, 4045, 4046, + 4049, 4054, 4056, 4058, 4062, 4064, 4068, 4074, + 4094, 4100, 4106, 4108, 4109, 4119, 4120, 4128, + 4135, 4137, 4140, 4142, 4144, 4146, 4151, 4155, + 4159, 4164, 4174, 4184, 4188, 4192, 4206, 4232, + 4242, 4244, 4246, 4249, 4251, 4254, 4256, 4260, + 4262, 4263, 4267, 4269, 4271, 4278, 4282, 4289, + 4296, 4305, 4321, 4333, 4351, 4362, 4374, 4382, + 4400, 4408, 4438, 4441, 4451, 4461, 4473, 4484, + 4493, 4506, 4518, 4522, 4528, 4555, 4564, 4567, + 4572, 4578, 4583, 4604, 4608, 4614, 4614, 4621, + 4630, 4638, 4641, 4645, 4651, 4657, 4660, 4664, + 4671, 4677, 4686, 4695, 4699, 4703, 4707, 4711, + 4718, 4722, 4726, 4736, 4742, 4746, 4752, 4756, + 4759, 4765, 4771, 4783, 4787, 4791, 4801, 4805, + 4816, 4818, 4820, 4824, 4836, 4841, 4865, 4869, + 4875, 4897, 4906, 4910, 4913, 4914, 4922, 4930, + 4936, 4946, 4953, 4971, 4974, 4977, 4985, 4991, + 4995, 4999, 5003, 5009, 5017, 5022, 5028, 5032, + 5040, 5047, 5051, 5058, 5064, 5072, 5080, 5086, + 5092, 5103, 5107, 5119, 5128, 5145, 5162, 5165, + 5169, 5171, 5177, 5179, 5183, 5198, 5202, 5206, + 5210, 5214, 5218, 5220, 5226, 5231, 5235, 5241, + 5248, 5251, 5269, 5271, 5316, 5322, 5328, 5332, + 5336, 5342, 5346, 5352, 5358, 5365, 5367, 5373, + 5379, 5383, 5387, 5395, 5408, 5414, 5421, 5429, + 5435, 5444, 5450, 5454, 5459, 5463, 5471, 5475, + 5479, 5509, 5515, 5521, 5527, 5533, 5540, 5546, + 5553, 5558, 5568, 5572, 5579, 5585, 5589, 5596, + 5600, 5606, 5609, 5613, 5617, 5621, 5625, 5630, + 5635, 5639, 5650, 5654, 5658, 5664, 5672, 5676, + 5693, 5697, 5703, 5713, 5719, 5725, 5728, 5733, + 5742, 5746, 5750, 5756, 5760, 5766, 5774, 5792, + 5793, 5803, 5804, 5813, 5821, 5823, 5826, 5828, + 5830, 5832, 5837, 5850, 5854, 5869, 5898, 5909, + 5911, 5915, 5919, 5924, 5928, 5930, 5937, 5941, + 5949, 5953, 5954, 5955, 5957, 5959, 5961, 5963, + 5965, 5966, 5967, 5968, 5970, 5972, 5974, 5975, + 5976, 5977, 5978, 5980, 5982, 5984, 5985, 5986, + 5990, 5996, 5996, 5998, 6000, 6009, 6015, 6022, + 6023, 6026, 6027, 6031, 6036, 6045, 6049, 6053, + 6061, 6063, 6065, 6067, 6070, 6102, 6104, 6106, + 6110, 6114, 6117, 6128, 6141, 6160, 6173, 6189, + 6201, 6217, 6232, 6253, 6263, 6275, 6286, 6300, + 6315, 6325, 6337, 6346, 6358, 6360, 6364, 6385, + 6394, 6404, 6410, 6416, 6417, 6466, 6468, 6472, + 6474, 6480, 6487, 6495, 6502, 6505, 6511, 6515, + 6519, 6521, 6525, 6529, 6533, 6539, 6547, 6555, + 6561, 6563, 6567, 6569, 6575, 6579, 6583, 6587, + 6591, 6596, 6603, 6609, 6611, 6613, 6617, 6619, + 6625, 6629, 6633, 6643, 6648, 6662, 6677, 6679, + 6687, 6689, 6694, 6708, 6713, 6715, 6719, 6720, + 6724, 6730, 6736, 6746, 6756, 6767, 6775, 6778, + 6781, 6785, 6789, 6791, 6794, 6794, 6797, 6799, + 6829, 6831, 6833, 6837, 6842, 6846, 6851, 6853, + 6855, 6857, 6866, 6870, 6874, 6880, 6882, 6890, + 6898, 6910, 6913, 6919, 6923, 6925, 6929, 6949, + 6951, 6953, 6964, 6970, 6972, 6974, 6976, 6980, + 6986, 6992, 6994, 6999, 7003, 7005, 7013, 7031, + 7071, 7081, 7085, 7087, 7089, 7090, 7094, 7098, + 7102, 7106, 7110, 7115, 7119, 7123, 7127, 7129, + 7131, 7135, 7145, 7149, 7151, 7155, 7159, 7163, + 7176, 7178, 7180, 7184, 7186, 7190, 7192, 7194, + 7224, 7228, 7232, 7236, 7239, 7246, 7251, 7262, + 7266, 7282, 7296, 7300, 7305, 7309, 7313, 7319, + 7321, 7327, 7329, 7333, 7335, 7341, 7346, 7351, + 7361, 7363, 7365, 7369, 7373, 7375, 7388, 7390, + 7394, 7398, 7406, 7408, 7412, 7414, 7415, 7418, + 7423, 7425, 7427, 7431, 7433, 7437, 7443, 7463, + 7469, 7475, 7477, 7478, 7488, 7489, 7497, 7504, + 7506, 7509, 7511, 7513, 7515, 7520, 7524, 7528, + 7533, 7543, 7553, 7557, 7561, 7575, 7601, 7611, + 7613, 7615, 7618, 7620, 7623, 7625, 7629, 7631, + 7632, 7636, 7638, 7640, 7647, 7651, 7658, 7665, + 7674, 7690, 7702, 7720, 7731, 7743, 7751, 7769, + 7777, 7807, 7810, 7820, 7830, 7842, 7853, 7862, + 7875, 7887, 7891, 7897, 7924, 7933, 7936, 7941, + 7947, 7952, 7973, 7977, 7983, 7983, 7990, 7999, + 8007, 8010, 8014, 8020, 8026, 8029, 8033, 8040, + 8046, 8055, 8064, 8068, 8072, 8076, 8080, 8087, + 8091, 8095, 8105, 8111, 8115, 8121, 8125, 8128, + 8134, 8140, 8152, 8156, 8160, 8170, 8174, 8185, + 8187, 8189, 8193, 8205, 8210, 8234, 8238, 8244, + 8266, 8275, 8279, 8282, 8283, 8291, 8299, 8305, + 8315, 8322, 8340, 8343, 8346, 8354, 8360, 8364, + 8368, 8372, 8378, 8386, 8391, 8397, 8401, 8409, + 8416, 8420, 8427, 8433, 8441, 8449, 8455, 8461, + 8472, 8476, 8488, 8497, 8514, 8531, 8534, 8538, + 8540, 8546, 8548, 8552, 8567, 8571, 8575, 8579, + 8583, 8587, 8589, 8595, 8600, 8604, 8610, 8617, + 8620, 8638, 8640, 8685, 8691, 8697, 8701, 8705, + 8711, 8715, 8721, 8727, 8734, 8736, 8742, 8748, + 8752, 8756, 8764, 8777, 8783, 8790, 8798, 8804, + 8813, 8819, 8823, 8828, 8832, 8840, 8844, 8848, + 8878, 8884, 8890, 8896, 8902, 8909, 8915, 8922, + 8927, 8937, 8941, 8948, 8954, 8958, 8965, 8969, + 8975, 8978, 8982, 8986, 8990, 8994, 8999, 9004, + 9008, 9019, 9023, 9027, 9033, 9041, 9045, 9062, + 9066, 9072, 9082, 9088, 9094, 9097, 9102, 9111, + 9115, 9119, 9125, 9129, 9135, 9143, 9161, 9162, + 9172, 9173, 9182, 9190, 9192, 9195, 9197, 9199, + 9201, 9206, 9219, 9223, 9238, 9267, 9278, 9280, + 9284, 9288, 9293, 9297, 9299, 9306, 9310, 9318, + 9322, 9398, 9400, 9401, 9402, 9403, 9404, 9405, + 9407, 9408, 9413, 9415, 9417, 9418, 9462, 9463, + 9464, 9466, 9471, 9475, 9475, 9477, 9479, 9490, + 9500, 9508, 9509, 9511, 9512, 9516, 9520, 9530, + 9534, 9541, 9552, 9559, 9563, 9569, 9580, 9612, + 9661, 9676, 9691, 9696, 9698, 9703, 9735, 9743, + 9745, 9767, 9789, 9791, 9807, 9823, 9839, 9855, + 9870, 9880, 9897, 9914, 9931, 9947, 9957, 9974, + 9990, 10006, 10022, 10038, 10054, 10070, 10086, 10087, + 10088, 10089, 10090, 10092, 10094, 10096, 10110, 10124, + 10138, 10152, 10153, 10154, 10156, 10158, 10160, 10174, + 10188, 10189, 10190, 10192, 10194, 10196, 10245, 10289, + 10291, 10296, 10300, 10300, 10302, 10304, 10315, 10325, + 10333, 10334, 10336, 10337, 10341, 10345, 10355, 10359, + 10366, 10377, 10384, 10388, 10394, 10405, 10437, 10486, + 10501, 10516, 10521, 10523, 10528, 10560, 10568, 10570, + 10592, 10614, +} + +var _hcltok_trans_keys []byte = []byte{ + 10, 46, 42, 42, 47, 46, 69, 101, + 48, 57, 43, 45, 48, 57, 48, 57, + 45, 95, 194, 195, 198, 199, 203, 205, + 206, 207, 210, 212, 213, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 233, 234, 237, 239, 240, + 65, 90, 97, 122, 196, 202, 208, 218, + 229, 236, 95, 194, 195, 198, 199, 203, + 205, 206, 207, 210, 212, 213, 214, 215, + 216, 217, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 233, 234, 237, 239, + 240, 65, 90, 97, 122, 196, 202, 208, + 218, 229, 236, 10, 13, 45, 95, 194, + 195, 198, 199, 203, 204, 205, 206, 207, + 210, 212, 213, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 228, 233, 234, 237, 239, 240, 243, 48, + 57, 65, 90, 97, 122, 196, 218, 229, + 236, 10, 170, 181, 183, 186, 128, 150, + 152, 182, 184, 255, 192, 255, 0, 127, + 173, 130, 133, 146, 159, 165, 171, 175, + 255, 181, 190, 184, 185, 192, 255, 140, + 134, 138, 142, 161, 163, 255, 182, 130, + 136, 137, 176, 151, 152, 154, 160, 190, + 136, 144, 192, 255, 135, 129, 130, 132, + 133, 144, 170, 176, 178, 144, 154, 160, + 191, 128, 169, 174, 255, 148, 169, 157, + 158, 189, 190, 192, 255, 144, 255, 139, + 140, 178, 255, 186, 128, 181, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 128, 173, + 128, 155, 160, 180, 182, 189, 148, 161, + 163, 255, 176, 164, 165, 132, 169, 177, + 141, 142, 145, 146, 179, 181, 186, 187, + 158, 133, 134, 137, 138, 143, 150, 152, + 155, 164, 165, 178, 255, 188, 129, 131, + 133, 138, 143, 144, 147, 168, 170, 176, + 178, 179, 181, 182, 184, 185, 190, 255, + 157, 131, 134, 137, 138, 142, 144, 146, + 152, 159, 165, 182, 255, 129, 131, 133, + 141, 143, 145, 147, 168, 170, 176, 178, + 179, 181, 185, 188, 255, 134, 138, 142, + 143, 145, 159, 164, 165, 176, 184, 186, + 255, 129, 131, 133, 140, 143, 144, 147, + 168, 170, 176, 178, 179, 181, 185, 188, + 191, 177, 128, 132, 135, 136, 139, 141, + 150, 151, 156, 157, 159, 163, 166, 175, + 156, 130, 131, 133, 138, 142, 144, 146, + 149, 153, 154, 158, 159, 163, 164, 168, + 170, 174, 185, 190, 191, 144, 151, 128, + 130, 134, 136, 138, 141, 166, 175, 128, + 131, 133, 140, 142, 144, 146, 168, 170, + 185, 189, 255, 133, 137, 151, 142, 148, + 155, 159, 164, 165, 176, 255, 128, 131, + 133, 140, 142, 144, 146, 168, 170, 179, + 181, 185, 188, 191, 158, 128, 132, 134, + 136, 138, 141, 149, 150, 160, 163, 166, + 175, 177, 178, 129, 131, 133, 140, 142, + 144, 146, 186, 189, 255, 133, 137, 143, + 147, 152, 158, 164, 165, 176, 185, 192, + 255, 189, 130, 131, 133, 150, 154, 177, + 179, 187, 138, 150, 128, 134, 143, 148, + 152, 159, 166, 175, 178, 179, 129, 186, + 128, 142, 144, 153, 132, 138, 141, 165, + 167, 129, 130, 135, 136, 148, 151, 153, + 159, 161, 163, 170, 171, 173, 185, 187, + 189, 134, 128, 132, 136, 141, 144, 153, + 156, 159, 128, 181, 183, 185, 152, 153, + 160, 169, 190, 191, 128, 135, 137, 172, + 177, 191, 128, 132, 134, 151, 153, 188, + 134, 128, 129, 130, 131, 137, 138, 139, + 140, 141, 142, 143, 144, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 173, + 175, 176, 177, 178, 179, 181, 182, 183, + 188, 189, 190, 191, 132, 152, 172, 184, + 185, 187, 128, 191, 128, 137, 144, 255, + 158, 159, 134, 187, 136, 140, 142, 143, + 137, 151, 153, 142, 143, 158, 159, 137, + 177, 142, 143, 182, 183, 191, 255, 128, + 130, 133, 136, 150, 152, 255, 145, 150, + 151, 155, 156, 160, 168, 178, 255, 128, + 143, 160, 255, 182, 183, 190, 255, 129, + 255, 173, 174, 192, 255, 129, 154, 160, + 255, 171, 173, 185, 255, 128, 140, 142, + 148, 160, 180, 128, 147, 160, 172, 174, + 176, 178, 179, 148, 150, 152, 155, 158, + 159, 170, 255, 139, 141, 144, 153, 160, + 255, 184, 255, 128, 170, 176, 255, 182, + 255, 128, 158, 160, 171, 176, 187, 134, + 173, 176, 180, 128, 171, 176, 255, 138, + 143, 155, 255, 128, 155, 160, 255, 159, + 189, 190, 192, 255, 167, 128, 137, 144, + 153, 176, 189, 140, 143, 154, 170, 180, + 255, 180, 255, 128, 183, 128, 137, 141, + 189, 128, 136, 144, 146, 148, 182, 184, + 185, 128, 181, 187, 191, 150, 151, 158, + 159, 152, 154, 156, 158, 134, 135, 142, + 143, 190, 255, 190, 128, 180, 182, 188, + 130, 132, 134, 140, 144, 147, 150, 155, + 160, 172, 178, 180, 182, 188, 128, 129, + 130, 131, 132, 133, 134, 176, 177, 178, + 179, 180, 181, 182, 183, 191, 255, 129, + 147, 149, 176, 178, 190, 192, 255, 144, + 156, 161, 144, 156, 165, 176, 130, 135, + 149, 164, 166, 168, 138, 147, 152, 157, + 170, 185, 188, 191, 142, 133, 137, 160, + 255, 137, 255, 128, 174, 176, 255, 159, + 165, 170, 180, 255, 167, 173, 128, 165, + 176, 255, 168, 174, 176, 190, 192, 255, + 128, 150, 160, 166, 168, 174, 176, 182, + 184, 190, 128, 134, 136, 142, 144, 150, + 152, 158, 160, 191, 128, 129, 130, 131, + 132, 133, 134, 135, 144, 145, 255, 133, + 135, 161, 175, 177, 181, 184, 188, 160, + 151, 152, 187, 192, 255, 133, 173, 177, + 255, 143, 159, 187, 255, 176, 191, 182, + 183, 184, 191, 192, 255, 150, 255, 128, + 146, 147, 148, 152, 153, 154, 155, 156, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 129, 255, 141, 255, 144, + 189, 141, 143, 172, 255, 191, 128, 175, + 180, 189, 151, 159, 162, 255, 175, 137, + 138, 184, 255, 183, 255, 168, 255, 128, + 179, 188, 134, 143, 154, 159, 184, 186, + 190, 255, 128, 173, 176, 255, 148, 159, + 189, 255, 129, 142, 154, 159, 191, 255, + 128, 182, 128, 141, 144, 153, 160, 182, + 186, 255, 128, 130, 155, 157, 160, 175, + 178, 182, 129, 134, 137, 142, 145, 150, + 160, 166, 168, 174, 176, 255, 155, 166, + 175, 128, 170, 172, 173, 176, 185, 158, + 159, 160, 255, 164, 175, 135, 138, 188, + 255, 164, 169, 171, 172, 173, 174, 175, + 180, 181, 182, 183, 184, 185, 187, 188, + 189, 190, 191, 165, 186, 174, 175, 154, + 255, 190, 128, 134, 147, 151, 157, 168, + 170, 182, 184, 188, 128, 129, 131, 132, + 134, 255, 147, 255, 190, 255, 144, 145, + 136, 175, 188, 255, 128, 143, 160, 175, + 179, 180, 141, 143, 176, 180, 182, 255, + 189, 255, 191, 144, 153, 161, 186, 129, + 154, 166, 255, 191, 255, 130, 135, 138, + 143, 146, 151, 154, 156, 144, 145, 146, + 147, 148, 150, 151, 152, 155, 157, 158, + 160, 170, 171, 172, 175, 161, 169, 128, + 129, 130, 131, 133, 135, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, + 149, 152, 156, 157, 160, 161, 162, 163, + 164, 166, 168, 169, 170, 171, 172, 173, + 174, 176, 177, 153, 155, 178, 179, 128, + 139, 141, 166, 168, 186, 188, 189, 191, + 255, 142, 143, 158, 255, 187, 255, 128, + 180, 189, 128, 156, 160, 255, 145, 159, + 161, 255, 128, 159, 176, 255, 139, 143, + 187, 255, 128, 157, 160, 255, 144, 132, + 135, 150, 255, 158, 159, 170, 175, 148, + 151, 188, 255, 128, 167, 176, 255, 164, + 255, 183, 255, 128, 149, 160, 167, 136, + 188, 128, 133, 138, 181, 183, 184, 191, + 255, 150, 159, 183, 255, 128, 158, 160, + 178, 180, 181, 128, 149, 160, 185, 128, + 183, 190, 191, 191, 128, 131, 133, 134, + 140, 147, 149, 151, 153, 179, 184, 186, + 160, 188, 128, 156, 128, 135, 137, 166, + 128, 181, 128, 149, 160, 178, 128, 145, + 128, 178, 129, 130, 131, 132, 133, 135, + 136, 138, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 155, 156, + 162, 163, 171, 176, 177, 178, 128, 134, + 135, 165, 176, 190, 144, 168, 176, 185, + 128, 180, 182, 191, 182, 144, 179, 155, + 133, 137, 141, 143, 157, 255, 190, 128, + 145, 147, 183, 136, 128, 134, 138, 141, + 143, 157, 159, 168, 176, 255, 171, 175, + 186, 255, 128, 131, 133, 140, 143, 144, + 147, 168, 170, 176, 178, 179, 181, 185, + 188, 191, 144, 151, 128, 132, 135, 136, + 139, 141, 157, 163, 166, 172, 176, 180, + 128, 138, 144, 153, 134, 136, 143, 154, + 255, 128, 181, 184, 255, 129, 151, 158, + 255, 129, 131, 133, 143, 154, 255, 128, + 137, 128, 153, 157, 171, 176, 185, 160, + 255, 170, 190, 192, 255, 128, 184, 128, + 136, 138, 182, 184, 191, 128, 144, 153, + 178, 255, 168, 144, 145, 183, 255, 128, + 142, 145, 149, 129, 141, 144, 146, 147, + 148, 175, 255, 132, 255, 128, 144, 129, + 143, 144, 153, 145, 152, 135, 255, 160, + 168, 169, 171, 172, 173, 174, 188, 189, + 190, 191, 161, 167, 185, 255, 128, 158, + 160, 169, 144, 173, 176, 180, 128, 131, + 144, 153, 163, 183, 189, 255, 144, 255, + 133, 143, 191, 255, 143, 159, 160, 128, + 129, 255, 159, 160, 171, 172, 255, 173, + 255, 179, 255, 128, 176, 177, 178, 128, + 129, 171, 175, 189, 255, 128, 136, 144, + 153, 157, 158, 133, 134, 137, 144, 145, + 146, 147, 148, 149, 154, 155, 156, 157, + 158, 159, 168, 169, 170, 150, 153, 165, + 169, 173, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 149, 157, 173, + 186, 188, 160, 161, 163, 164, 167, 168, + 132, 134, 149, 157, 186, 139, 140, 191, + 255, 134, 128, 132, 138, 144, 146, 255, + 166, 167, 129, 155, 187, 149, 181, 143, + 175, 137, 169, 131, 140, 141, 192, 255, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 160, 163, + 164, 165, 184, 185, 186, 161, 162, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 133, 143, 151, 255, 139, 143, 154, + 255, 164, 167, 185, 187, 128, 131, 133, + 159, 161, 162, 169, 178, 180, 183, 130, + 135, 137, 139, 148, 151, 153, 155, 157, + 159, 164, 190, 141, 143, 145, 146, 161, + 162, 167, 170, 172, 178, 180, 183, 185, + 188, 128, 137, 139, 155, 161, 163, 165, + 169, 171, 187, 155, 156, 151, 255, 156, + 157, 160, 181, 255, 186, 187, 255, 162, + 255, 160, 168, 161, 167, 158, 255, 160, + 132, 135, 133, 134, 176, 255, 170, 181, + 186, 191, 176, 180, 182, 183, 186, 189, + 134, 140, 136, 138, 142, 161, 163, 255, + 130, 137, 136, 255, 144, 170, 176, 178, + 160, 191, 128, 138, 174, 175, 177, 255, + 148, 150, 164, 167, 173, 176, 185, 189, + 190, 192, 255, 144, 146, 175, 141, 255, + 166, 176, 178, 255, 186, 138, 170, 180, + 181, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 184, + 186, 187, 188, 189, 190, 183, 185, 154, + 164, 168, 128, 149, 128, 152, 189, 132, + 185, 144, 152, 161, 177, 255, 169, 177, + 129, 132, 141, 142, 145, 146, 179, 181, + 186, 188, 190, 255, 142, 156, 157, 159, + 161, 176, 177, 133, 138, 143, 144, 147, + 168, 170, 176, 178, 179, 181, 182, 184, + 185, 158, 153, 156, 178, 180, 189, 133, + 141, 143, 145, 147, 168, 170, 176, 178, + 179, 181, 185, 144, 185, 160, 161, 189, + 133, 140, 143, 144, 147, 168, 170, 176, + 178, 179, 181, 185, 177, 156, 157, 159, + 161, 131, 156, 133, 138, 142, 144, 146, + 149, 153, 154, 158, 159, 163, 164, 168, + 170, 174, 185, 144, 189, 133, 140, 142, + 144, 146, 168, 170, 185, 152, 154, 160, + 161, 128, 189, 133, 140, 142, 144, 146, + 168, 170, 179, 181, 185, 158, 160, 161, + 177, 178, 189, 133, 140, 142, 144, 146, + 186, 142, 148, 150, 159, 161, 186, 191, + 189, 133, 150, 154, 177, 179, 187, 128, + 134, 129, 176, 178, 179, 132, 138, 141, + 165, 167, 189, 129, 130, 135, 136, 148, + 151, 153, 159, 161, 163, 170, 171, 173, + 176, 178, 179, 134, 128, 132, 156, 159, + 128, 128, 135, 137, 172, 136, 140, 128, + 129, 130, 131, 137, 138, 139, 140, 141, + 142, 143, 144, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, + 184, 188, 189, 190, 191, 132, 152, 185, + 187, 191, 128, 170, 161, 144, 149, 154, + 157, 165, 166, 174, 176, 181, 255, 130, + 141, 143, 159, 155, 255, 128, 140, 142, + 145, 160, 177, 128, 145, 160, 172, 174, + 176, 151, 156, 170, 128, 168, 176, 255, + 138, 255, 128, 150, 160, 255, 149, 255, + 167, 133, 179, 133, 139, 131, 160, 174, + 175, 186, 255, 166, 255, 128, 163, 141, + 143, 154, 189, 169, 172, 174, 177, 181, + 182, 129, 130, 132, 133, 134, 176, 177, + 178, 179, 180, 181, 182, 183, 177, 191, + 165, 170, 175, 177, 180, 255, 168, 174, + 176, 255, 128, 134, 136, 142, 144, 150, + 152, 158, 128, 129, 130, 131, 132, 133, + 134, 135, 144, 145, 255, 133, 135, 161, + 169, 177, 181, 184, 188, 160, 151, 154, + 128, 146, 147, 148, 152, 153, 154, 155, + 156, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 129, 255, 141, 143, + 160, 169, 172, 255, 191, 128, 174, 130, + 134, 139, 163, 255, 130, 179, 187, 189, + 178, 183, 138, 165, 176, 255, 135, 159, + 189, 255, 132, 178, 143, 160, 164, 166, + 175, 186, 190, 128, 168, 186, 128, 130, + 132, 139, 160, 182, 190, 255, 176, 178, + 180, 183, 184, 190, 255, 128, 130, 155, + 157, 160, 170, 178, 180, 128, 162, 164, + 169, 171, 172, 173, 174, 175, 180, 181, + 182, 183, 185, 186, 187, 188, 189, 190, + 191, 165, 179, 157, 190, 128, 134, 147, + 151, 159, 168, 170, 182, 184, 188, 176, + 180, 182, 255, 161, 186, 144, 145, 146, + 147, 148, 150, 151, 152, 155, 157, 158, + 160, 170, 171, 172, 175, 161, 169, 128, + 129, 130, 131, 133, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, + 152, 156, 157, 160, 161, 162, 163, 164, + 166, 168, 169, 170, 171, 172, 173, 174, + 176, 177, 153, 155, 178, 179, 145, 255, + 139, 143, 182, 255, 158, 175, 128, 144, + 147, 149, 151, 153, 179, 128, 135, 137, + 164, 128, 130, 131, 132, 133, 134, 135, + 136, 138, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 156, 162, + 163, 171, 176, 177, 178, 131, 183, 131, + 175, 144, 168, 131, 166, 182, 144, 178, + 131, 178, 154, 156, 129, 132, 128, 145, + 147, 171, 159, 255, 144, 157, 161, 135, + 138, 128, 175, 135, 132, 133, 128, 174, + 152, 155, 132, 128, 170, 128, 153, 160, + 190, 192, 255, 128, 136, 138, 174, 128, + 178, 255, 160, 168, 169, 171, 172, 173, + 174, 188, 189, 190, 191, 161, 167, 144, + 173, 128, 131, 163, 183, 189, 255, 133, + 143, 145, 255, 147, 159, 128, 176, 177, + 178, 128, 136, 144, 153, 144, 145, 146, + 147, 148, 149, 154, 155, 156, 157, 158, + 159, 150, 153, 131, 140, 255, 160, 163, + 164, 165, 184, 185, 186, 161, 162, 133, + 255, 170, 181, 183, 186, 128, 150, 152, + 182, 184, 255, 192, 255, 128, 255, 173, + 130, 133, 146, 159, 165, 171, 175, 255, + 181, 190, 184, 185, 192, 255, 140, 134, + 138, 142, 161, 163, 255, 182, 130, 136, + 137, 176, 151, 152, 154, 160, 190, 136, + 144, 192, 255, 135, 129, 130, 132, 133, + 144, 170, 176, 178, 144, 154, 160, 191, + 128, 169, 174, 255, 148, 169, 157, 158, + 189, 190, 192, 255, 144, 255, 139, 140, + 178, 255, 186, 128, 181, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 128, 173, 128, + 155, 160, 180, 182, 189, 148, 161, 163, + 255, 176, 164, 165, 132, 169, 177, 141, + 142, 145, 146, 179, 181, 186, 187, 158, + 133, 134, 137, 138, 143, 150, 152, 155, + 164, 165, 178, 255, 188, 129, 131, 133, + 138, 143, 144, 147, 168, 170, 176, 178, + 179, 181, 182, 184, 185, 190, 255, 157, + 131, 134, 137, 138, 142, 144, 146, 152, + 159, 165, 182, 255, 129, 131, 133, 141, + 143, 145, 147, 168, 170, 176, 178, 179, + 181, 185, 188, 255, 134, 138, 142, 143, + 145, 159, 164, 165, 176, 184, 186, 255, + 129, 131, 133, 140, 143, 144, 147, 168, + 170, 176, 178, 179, 181, 185, 188, 191, + 177, 128, 132, 135, 136, 139, 141, 150, + 151, 156, 157, 159, 163, 166, 175, 156, + 130, 131, 133, 138, 142, 144, 146, 149, + 153, 154, 158, 159, 163, 164, 168, 170, + 174, 185, 190, 191, 144, 151, 128, 130, + 134, 136, 138, 141, 166, 175, 128, 131, + 133, 140, 142, 144, 146, 168, 170, 185, + 189, 255, 133, 137, 151, 142, 148, 155, + 159, 164, 165, 176, 255, 128, 131, 133, + 140, 142, 144, 146, 168, 170, 179, 181, + 185, 188, 191, 158, 128, 132, 134, 136, + 138, 141, 149, 150, 160, 163, 166, 175, + 177, 178, 129, 131, 133, 140, 142, 144, + 146, 186, 189, 255, 133, 137, 143, 147, + 152, 158, 164, 165, 176, 185, 192, 255, + 189, 130, 131, 133, 150, 154, 177, 179, + 187, 138, 150, 128, 134, 143, 148, 152, + 159, 166, 175, 178, 179, 129, 186, 128, + 142, 144, 153, 132, 138, 141, 165, 167, + 129, 130, 135, 136, 148, 151, 153, 159, + 161, 163, 170, 171, 173, 185, 187, 189, + 134, 128, 132, 136, 141, 144, 153, 156, + 159, 128, 181, 183, 185, 152, 153, 160, + 169, 190, 191, 128, 135, 137, 172, 177, + 191, 128, 132, 134, 151, 153, 188, 134, + 128, 129, 130, 131, 137, 138, 139, 140, + 141, 142, 143, 144, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 173, 175, + 176, 177, 178, 179, 181, 182, 183, 188, + 189, 190, 191, 132, 152, 172, 184, 185, + 187, 128, 191, 128, 137, 144, 255, 158, + 159, 134, 187, 136, 140, 142, 143, 137, + 151, 153, 142, 143, 158, 159, 137, 177, + 142, 143, 182, 183, 191, 255, 128, 130, + 133, 136, 150, 152, 255, 145, 150, 151, + 155, 156, 160, 168, 178, 255, 128, 143, + 160, 255, 182, 183, 190, 255, 129, 255, + 173, 174, 192, 255, 129, 154, 160, 255, + 171, 173, 185, 255, 128, 140, 142, 148, + 160, 180, 128, 147, 160, 172, 174, 176, + 178, 179, 148, 150, 152, 155, 158, 159, + 170, 255, 139, 141, 144, 153, 160, 255, + 184, 255, 128, 170, 176, 255, 182, 255, + 128, 158, 160, 171, 176, 187, 134, 173, + 176, 180, 128, 171, 176, 255, 138, 143, + 155, 255, 128, 155, 160, 255, 159, 189, + 190, 192, 255, 167, 128, 137, 144, 153, + 176, 189, 140, 143, 154, 170, 180, 255, + 180, 255, 128, 183, 128, 137, 141, 189, + 128, 136, 144, 146, 148, 182, 184, 185, + 128, 181, 187, 191, 150, 151, 158, 159, + 152, 154, 156, 158, 134, 135, 142, 143, + 190, 255, 190, 128, 180, 182, 188, 130, + 132, 134, 140, 144, 147, 150, 155, 160, + 172, 178, 180, 182, 188, 128, 129, 130, + 131, 132, 133, 134, 176, 177, 178, 179, + 180, 181, 182, 183, 191, 255, 129, 147, + 149, 176, 178, 190, 192, 255, 144, 156, + 161, 144, 156, 165, 176, 130, 135, 149, + 164, 166, 168, 138, 147, 152, 157, 170, + 185, 188, 191, 142, 133, 137, 160, 255, + 137, 255, 128, 174, 176, 255, 159, 165, + 170, 180, 255, 167, 173, 128, 165, 176, + 255, 168, 174, 176, 190, 192, 255, 128, + 150, 160, 166, 168, 174, 176, 182, 184, + 190, 128, 134, 136, 142, 144, 150, 152, + 158, 160, 191, 128, 129, 130, 131, 132, + 133, 134, 135, 144, 145, 255, 133, 135, + 161, 175, 177, 181, 184, 188, 160, 151, + 152, 187, 192, 255, 133, 173, 177, 255, + 143, 159, 187, 255, 176, 191, 182, 183, + 184, 191, 192, 255, 150, 255, 128, 146, + 147, 148, 152, 153, 154, 155, 156, 158, + 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 129, 255, 141, 255, 144, 189, + 141, 143, 172, 255, 191, 128, 175, 180, + 189, 151, 159, 162, 255, 175, 137, 138, + 184, 255, 183, 255, 168, 255, 128, 179, + 188, 134, 143, 154, 159, 184, 186, 190, + 255, 128, 173, 176, 255, 148, 159, 189, + 255, 129, 142, 154, 159, 191, 255, 128, + 182, 128, 141, 144, 153, 160, 182, 186, + 255, 128, 130, 155, 157, 160, 175, 178, + 182, 129, 134, 137, 142, 145, 150, 160, + 166, 168, 174, 176, 255, 155, 166, 175, + 128, 170, 172, 173, 176, 185, 158, 159, + 160, 255, 164, 175, 135, 138, 188, 255, + 164, 169, 171, 172, 173, 174, 175, 180, + 181, 182, 183, 184, 185, 187, 188, 189, + 190, 191, 165, 186, 174, 175, 154, 255, + 190, 128, 134, 147, 151, 157, 168, 170, + 182, 184, 188, 128, 129, 131, 132, 134, + 255, 147, 255, 190, 255, 144, 145, 136, + 175, 188, 255, 128, 143, 160, 175, 179, + 180, 141, 143, 176, 180, 182, 255, 189, + 255, 191, 144, 153, 161, 186, 129, 154, + 166, 255, 191, 255, 130, 135, 138, 143, + 146, 151, 154, 156, 144, 145, 146, 147, + 148, 150, 151, 152, 155, 157, 158, 160, + 170, 171, 172, 175, 161, 169, 128, 129, + 130, 131, 133, 135, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, + 152, 156, 157, 160, 161, 162, 163, 164, + 166, 168, 169, 170, 171, 172, 173, 174, + 176, 177, 153, 155, 178, 179, 128, 139, + 141, 166, 168, 186, 188, 189, 191, 255, + 142, 143, 158, 255, 187, 255, 128, 180, + 189, 128, 156, 160, 255, 145, 159, 161, + 255, 128, 159, 176, 255, 139, 143, 187, + 255, 128, 157, 160, 255, 144, 132, 135, + 150, 255, 158, 159, 170, 175, 148, 151, + 188, 255, 128, 167, 176, 255, 164, 255, + 183, 255, 128, 149, 160, 167, 136, 188, + 128, 133, 138, 181, 183, 184, 191, 255, + 150, 159, 183, 255, 128, 158, 160, 178, + 180, 181, 128, 149, 160, 185, 128, 183, + 190, 191, 191, 128, 131, 133, 134, 140, + 147, 149, 151, 153, 179, 184, 186, 160, + 188, 128, 156, 128, 135, 137, 166, 128, + 181, 128, 149, 160, 178, 128, 145, 128, + 178, 129, 130, 131, 132, 133, 135, 136, + 138, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 155, 156, 162, + 163, 171, 176, 177, 178, 128, 134, 135, + 165, 176, 190, 144, 168, 176, 185, 128, + 180, 182, 191, 182, 144, 179, 155, 133, + 137, 141, 143, 157, 255, 190, 128, 145, + 147, 183, 136, 128, 134, 138, 141, 143, + 157, 159, 168, 176, 255, 171, 175, 186, + 255, 128, 131, 133, 140, 143, 144, 147, + 168, 170, 176, 178, 179, 181, 185, 188, + 191, 144, 151, 128, 132, 135, 136, 139, + 141, 157, 163, 166, 172, 176, 180, 128, + 138, 144, 153, 134, 136, 143, 154, 255, + 128, 181, 184, 255, 129, 151, 158, 255, + 129, 131, 133, 143, 154, 255, 128, 137, + 128, 153, 157, 171, 176, 185, 160, 255, + 170, 190, 192, 255, 128, 184, 128, 136, + 138, 182, 184, 191, 128, 144, 153, 178, + 255, 168, 144, 145, 183, 255, 128, 142, + 145, 149, 129, 141, 144, 146, 147, 148, + 175, 255, 132, 255, 128, 144, 129, 143, + 144, 153, 145, 152, 135, 255, 160, 168, + 169, 171, 172, 173, 174, 188, 189, 190, + 191, 161, 167, 185, 255, 128, 158, 160, + 169, 144, 173, 176, 180, 128, 131, 144, + 153, 163, 183, 189, 255, 144, 255, 133, + 143, 191, 255, 143, 159, 160, 128, 129, + 255, 159, 160, 171, 172, 255, 173, 255, + 179, 255, 128, 176, 177, 178, 128, 129, + 171, 175, 189, 255, 128, 136, 144, 153, + 157, 158, 133, 134, 137, 144, 145, 146, + 147, 148, 149, 154, 155, 156, 157, 158, + 159, 168, 169, 170, 150, 153, 165, 169, + 173, 178, 187, 255, 131, 132, 140, 169, + 174, 255, 130, 132, 149, 157, 173, 186, + 188, 160, 161, 163, 164, 167, 168, 132, + 134, 149, 157, 186, 139, 140, 191, 255, + 134, 128, 132, 138, 144, 146, 255, 166, + 167, 129, 155, 187, 149, 181, 143, 175, + 137, 169, 131, 140, 141, 192, 255, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 160, 163, 164, + 165, 184, 185, 186, 161, 162, 128, 134, + 136, 152, 155, 161, 163, 164, 166, 170, + 133, 143, 151, 255, 139, 143, 154, 255, + 164, 167, 185, 187, 128, 131, 133, 159, + 161, 162, 169, 178, 180, 183, 130, 135, + 137, 139, 148, 151, 153, 155, 157, 159, + 164, 190, 141, 143, 145, 146, 161, 162, + 167, 170, 172, 178, 180, 183, 185, 188, + 128, 137, 139, 155, 161, 163, 165, 169, + 171, 187, 155, 156, 151, 255, 156, 157, + 160, 181, 255, 186, 187, 255, 162, 255, + 160, 168, 161, 167, 158, 255, 160, 132, + 135, 133, 134, 176, 255, 128, 191, 154, + 164, 168, 128, 149, 150, 191, 128, 152, + 153, 191, 181, 128, 159, 160, 189, 190, + 191, 189, 128, 131, 132, 185, 186, 191, + 144, 128, 151, 152, 161, 162, 176, 177, + 255, 169, 177, 129, 132, 141, 142, 145, + 146, 179, 181, 186, 188, 190, 191, 192, + 255, 142, 158, 128, 155, 156, 161, 162, + 175, 176, 177, 178, 191, 169, 177, 180, + 183, 128, 132, 133, 138, 139, 142, 143, + 144, 145, 146, 147, 185, 186, 191, 157, + 128, 152, 153, 158, 159, 177, 178, 180, + 181, 191, 142, 146, 169, 177, 180, 189, + 128, 132, 133, 185, 186, 191, 144, 185, + 128, 159, 160, 161, 162, 191, 169, 177, + 180, 189, 128, 132, 133, 140, 141, 142, + 143, 144, 145, 146, 147, 185, 186, 191, + 158, 177, 128, 155, 156, 161, 162, 191, + 131, 145, 155, 157, 128, 132, 133, 138, + 139, 141, 142, 149, 150, 152, 153, 159, + 160, 162, 163, 164, 165, 167, 168, 170, + 171, 173, 174, 185, 186, 191, 144, 128, + 191, 141, 145, 169, 189, 128, 132, 133, + 185, 186, 191, 128, 151, 152, 154, 155, + 159, 160, 161, 162, 191, 128, 141, 145, + 169, 180, 189, 129, 132, 133, 185, 186, + 191, 158, 128, 159, 160, 161, 162, 176, + 177, 178, 179, 191, 141, 145, 189, 128, + 132, 133, 186, 187, 191, 142, 128, 147, + 148, 150, 151, 158, 159, 161, 162, 185, + 186, 191, 178, 188, 128, 132, 133, 150, + 151, 153, 154, 189, 190, 191, 128, 134, + 135, 191, 128, 177, 129, 179, 180, 191, + 128, 131, 137, 141, 152, 160, 164, 166, + 172, 177, 189, 129, 132, 133, 134, 135, + 138, 139, 147, 148, 167, 168, 169, 170, + 179, 180, 191, 133, 128, 134, 135, 155, + 156, 159, 160, 191, 128, 129, 191, 136, + 128, 172, 173, 191, 128, 135, 136, 140, + 141, 191, 191, 128, 170, 171, 190, 161, + 128, 143, 144, 149, 150, 153, 154, 157, + 158, 164, 165, 166, 167, 173, 174, 176, + 177, 180, 181, 255, 130, 141, 143, 159, + 134, 187, 136, 140, 142, 143, 137, 151, + 153, 142, 143, 158, 159, 137, 177, 191, + 142, 143, 182, 183, 192, 255, 129, 151, + 128, 133, 134, 135, 136, 255, 145, 150, + 151, 155, 191, 192, 255, 128, 143, 144, + 159, 160, 255, 182, 183, 190, 191, 192, + 255, 128, 129, 255, 173, 174, 192, 255, + 128, 129, 154, 155, 159, 160, 255, 171, + 173, 185, 191, 192, 255, 141, 128, 145, + 146, 159, 160, 177, 178, 191, 173, 128, + 145, 146, 159, 160, 176, 177, 191, 128, + 179, 180, 191, 151, 156, 128, 191, 128, + 159, 160, 255, 184, 191, 192, 255, 169, + 128, 170, 171, 175, 176, 255, 182, 191, + 192, 255, 128, 158, 159, 191, 128, 143, + 144, 173, 174, 175, 176, 180, 181, 191, + 128, 171, 172, 175, 176, 255, 138, 191, + 192, 255, 128, 150, 151, 159, 160, 255, + 149, 191, 192, 255, 167, 128, 191, 128, + 132, 133, 179, 180, 191, 128, 132, 133, + 139, 140, 191, 128, 130, 131, 160, 161, + 173, 174, 175, 176, 185, 186, 255, 166, + 191, 192, 255, 128, 163, 164, 191, 128, + 140, 141, 143, 144, 153, 154, 189, 190, + 191, 128, 136, 137, 191, 173, 128, 168, + 169, 177, 178, 180, 181, 182, 183, 191, + 0, 127, 192, 255, 150, 151, 158, 159, + 152, 154, 156, 158, 134, 135, 142, 143, + 190, 191, 192, 255, 181, 189, 191, 128, + 190, 133, 181, 128, 129, 130, 140, 141, + 143, 144, 147, 148, 149, 150, 155, 156, + 159, 160, 172, 173, 177, 178, 188, 189, + 191, 177, 191, 128, 190, 128, 143, 144, + 156, 157, 191, 130, 135, 148, 164, 166, + 168, 128, 137, 138, 149, 150, 151, 152, + 157, 158, 169, 170, 185, 186, 187, 188, + 191, 142, 128, 132, 133, 137, 138, 159, + 160, 255, 137, 191, 192, 255, 175, 128, + 255, 159, 165, 170, 175, 177, 180, 191, + 192, 255, 166, 173, 128, 167, 168, 175, + 176, 255, 168, 174, 176, 191, 192, 255, + 167, 175, 183, 191, 128, 150, 151, 159, + 160, 190, 135, 143, 151, 128, 158, 159, + 191, 128, 132, 133, 135, 136, 160, 161, + 169, 170, 176, 177, 181, 182, 183, 184, + 188, 189, 191, 160, 151, 154, 187, 192, + 255, 128, 132, 133, 173, 174, 176, 177, + 255, 143, 159, 187, 191, 192, 255, 128, + 175, 176, 191, 150, 191, 192, 255, 141, + 191, 192, 255, 128, 143, 144, 189, 190, + 191, 141, 143, 160, 169, 172, 191, 192, + 255, 191, 128, 174, 175, 190, 128, 157, + 158, 159, 160, 255, 176, 191, 192, 255, + 128, 150, 151, 159, 160, 161, 162, 255, + 175, 137, 138, 184, 191, 192, 255, 128, + 182, 183, 255, 130, 134, 139, 163, 191, + 192, 255, 128, 129, 130, 179, 180, 191, + 187, 189, 128, 177, 178, 183, 184, 191, + 128, 137, 138, 165, 166, 175, 176, 255, + 135, 159, 189, 191, 192, 255, 128, 131, + 132, 178, 179, 191, 143, 165, 191, 128, + 159, 160, 175, 176, 185, 186, 190, 128, + 168, 169, 191, 131, 186, 128, 139, 140, + 159, 160, 182, 183, 189, 190, 255, 176, + 178, 180, 183, 184, 190, 191, 192, 255, + 129, 128, 130, 131, 154, 155, 157, 158, + 159, 160, 170, 171, 177, 178, 180, 181, + 191, 128, 167, 175, 129, 134, 135, 136, + 137, 142, 143, 144, 145, 150, 151, 159, + 160, 255, 155, 166, 175, 128, 162, 163, + 191, 164, 175, 135, 138, 188, 191, 192, + 255, 174, 175, 154, 191, 192, 255, 157, + 169, 183, 189, 191, 128, 134, 135, 146, + 147, 151, 152, 158, 159, 190, 130, 133, + 128, 255, 178, 191, 192, 255, 128, 146, + 147, 255, 190, 191, 192, 255, 128, 143, + 144, 255, 144, 145, 136, 175, 188, 191, + 192, 255, 181, 128, 175, 176, 255, 189, + 191, 192, 255, 128, 160, 161, 186, 187, + 191, 128, 129, 154, 155, 165, 166, 255, + 191, 192, 255, 128, 129, 130, 135, 136, + 137, 138, 143, 144, 145, 146, 151, 152, + 153, 154, 156, 157, 191, 128, 191, 128, + 129, 130, 131, 133, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, + 152, 156, 157, 160, 161, 162, 163, 164, + 166, 168, 169, 170, 171, 172, 173, 174, + 176, 177, 132, 151, 153, 155, 158, 175, + 178, 179, 180, 191, 140, 167, 187, 190, + 128, 255, 142, 143, 158, 191, 192, 255, + 187, 191, 192, 255, 128, 180, 181, 191, + 128, 156, 157, 159, 160, 255, 145, 191, + 192, 255, 128, 159, 160, 175, 176, 255, + 139, 143, 182, 191, 192, 255, 144, 132, + 135, 150, 191, 192, 255, 158, 175, 148, + 151, 188, 191, 192, 255, 128, 167, 168, + 175, 176, 255, 164, 191, 192, 255, 183, + 191, 192, 255, 128, 149, 150, 159, 160, + 167, 168, 191, 136, 182, 188, 128, 133, + 134, 137, 138, 184, 185, 190, 191, 255, + 150, 159, 183, 191, 192, 255, 179, 128, + 159, 160, 181, 182, 191, 128, 149, 150, + 159, 160, 185, 186, 191, 128, 183, 184, + 189, 190, 191, 128, 148, 152, 129, 143, + 144, 179, 180, 191, 128, 159, 160, 188, + 189, 191, 128, 156, 157, 191, 136, 128, + 164, 165, 191, 128, 181, 182, 191, 128, + 149, 150, 159, 160, 178, 179, 191, 128, + 145, 146, 191, 128, 178, 179, 191, 128, + 130, 131, 132, 133, 134, 135, 136, 138, + 139, 140, 141, 144, 145, 146, 147, 150, + 151, 152, 153, 154, 156, 162, 163, 171, + 176, 177, 178, 129, 191, 128, 130, 131, + 183, 184, 191, 128, 130, 131, 175, 176, + 191, 128, 143, 144, 168, 169, 191, 128, + 130, 131, 166, 167, 191, 182, 128, 143, + 144, 178, 179, 191, 128, 130, 131, 178, + 179, 191, 128, 154, 156, 129, 132, 133, + 191, 146, 128, 171, 172, 191, 135, 137, + 142, 158, 128, 168, 169, 175, 176, 255, + 159, 191, 192, 255, 144, 128, 156, 157, + 161, 162, 191, 128, 134, 135, 138, 139, + 191, 128, 175, 176, 191, 134, 128, 131, + 132, 135, 136, 191, 128, 174, 175, 191, + 128, 151, 152, 155, 156, 191, 132, 128, + 191, 128, 170, 171, 191, 128, 153, 154, + 191, 160, 190, 192, 255, 128, 184, 185, + 191, 137, 128, 174, 175, 191, 128, 129, + 177, 178, 255, 144, 191, 192, 255, 128, + 142, 143, 144, 145, 146, 149, 129, 148, + 150, 191, 175, 191, 192, 255, 132, 191, + 192, 255, 128, 144, 129, 143, 145, 191, + 144, 153, 128, 143, 145, 152, 154, 191, + 135, 191, 192, 255, 160, 168, 169, 171, + 172, 173, 174, 188, 189, 190, 191, 128, + 159, 161, 167, 170, 187, 185, 191, 192, + 255, 128, 143, 144, 173, 174, 191, 128, + 131, 132, 162, 163, 183, 184, 188, 189, + 255, 133, 143, 145, 191, 192, 255, 128, + 146, 147, 159, 160, 191, 160, 128, 191, + 128, 129, 191, 192, 255, 159, 160, 171, + 128, 170, 172, 191, 192, 255, 173, 191, + 192, 255, 179, 191, 192, 255, 128, 176, + 177, 178, 129, 191, 128, 129, 130, 191, + 171, 175, 189, 191, 192, 255, 128, 136, + 137, 143, 144, 153, 154, 191, 144, 145, + 146, 147, 148, 149, 154, 155, 156, 157, + 158, 159, 128, 143, 150, 153, 160, 191, + 149, 157, 173, 186, 188, 160, 161, 163, + 164, 167, 168, 132, 134, 149, 157, 186, + 191, 139, 140, 192, 255, 133, 145, 128, + 134, 135, 137, 138, 255, 166, 167, 129, + 155, 187, 149, 181, 143, 175, 137, 169, + 131, 140, 191, 192, 255, 160, 163, 164, + 165, 184, 185, 186, 128, 159, 161, 162, + 166, 191, 133, 191, 192, 255, 132, 160, + 163, 167, 179, 184, 186, 128, 164, 165, + 168, 169, 187, 188, 191, 130, 135, 137, + 139, 144, 147, 151, 153, 155, 157, 159, + 163, 171, 179, 184, 189, 191, 128, 140, + 141, 148, 149, 160, 161, 164, 165, 166, + 167, 190, 138, 164, 170, 128, 155, 156, + 160, 161, 187, 188, 191, 128, 191, 155, + 156, 128, 191, 151, 191, 192, 255, 156, + 157, 160, 128, 191, 181, 191, 192, 255, + 158, 159, 186, 128, 185, 187, 191, 192, + 255, 162, 191, 192, 255, 160, 168, 128, + 159, 161, 167, 169, 191, 158, 191, 192, + 255, 123, 123, 128, 191, 128, 191, 128, + 191, 128, 191, 128, 191, 10, 123, 123, + 128, 191, 128, 191, 128, 191, 123, 123, + 10, 123, 128, 191, 128, 191, 128, 191, + 123, 123, 170, 181, 183, 186, 128, 150, + 152, 182, 184, 255, 192, 255, 128, 255, + 173, 130, 133, 146, 159, 165, 171, 175, + 255, 181, 190, 184, 185, 192, 255, 140, + 134, 138, 142, 161, 163, 255, 182, 130, + 136, 137, 176, 151, 152, 154, 160, 190, + 136, 144, 192, 255, 135, 129, 130, 132, + 133, 144, 170, 176, 178, 144, 154, 160, + 191, 128, 169, 174, 255, 148, 169, 157, + 158, 189, 190, 192, 255, 144, 255, 139, + 140, 178, 255, 186, 128, 181, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 128, 173, + 128, 155, 160, 180, 182, 189, 148, 161, + 163, 255, 176, 164, 165, 132, 169, 177, + 141, 142, 145, 146, 179, 181, 186, 187, + 158, 133, 134, 137, 138, 143, 150, 152, + 155, 164, 165, 178, 255, 188, 129, 131, + 133, 138, 143, 144, 147, 168, 170, 176, + 178, 179, 181, 182, 184, 185, 190, 255, + 157, 131, 134, 137, 138, 142, 144, 146, + 152, 159, 165, 182, 255, 129, 131, 133, + 141, 143, 145, 147, 168, 170, 176, 178, + 179, 181, 185, 188, 255, 134, 138, 142, + 143, 145, 159, 164, 165, 176, 184, 186, + 255, 129, 131, 133, 140, 143, 144, 147, + 168, 170, 176, 178, 179, 181, 185, 188, + 191, 177, 128, 132, 135, 136, 139, 141, + 150, 151, 156, 157, 159, 163, 166, 175, + 156, 130, 131, 133, 138, 142, 144, 146, + 149, 153, 154, 158, 159, 163, 164, 168, + 170, 174, 185, 190, 191, 144, 151, 128, + 130, 134, 136, 138, 141, 166, 175, 128, + 131, 133, 140, 142, 144, 146, 168, 170, + 185, 189, 255, 133, 137, 151, 142, 148, + 155, 159, 164, 165, 176, 255, 128, 131, + 133, 140, 142, 144, 146, 168, 170, 179, + 181, 185, 188, 191, 158, 128, 132, 134, + 136, 138, 141, 149, 150, 160, 163, 166, + 175, 177, 178, 129, 131, 133, 140, 142, + 144, 146, 186, 189, 255, 133, 137, 143, + 147, 152, 158, 164, 165, 176, 185, 192, + 255, 189, 130, 131, 133, 150, 154, 177, + 179, 187, 138, 150, 128, 134, 143, 148, + 152, 159, 166, 175, 178, 179, 129, 186, + 128, 142, 144, 153, 132, 138, 141, 165, + 167, 129, 130, 135, 136, 148, 151, 153, + 159, 161, 163, 170, 171, 173, 185, 187, + 189, 134, 128, 132, 136, 141, 144, 153, + 156, 159, 128, 181, 183, 185, 152, 153, + 160, 169, 190, 191, 128, 135, 137, 172, + 177, 191, 128, 132, 134, 151, 153, 188, + 134, 128, 129, 130, 131, 137, 138, 139, + 140, 141, 142, 143, 144, 153, 154, 155, + 156, 157, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 173, + 175, 176, 177, 178, 179, 181, 182, 183, + 188, 189, 190, 191, 132, 152, 172, 184, + 185, 187, 128, 191, 128, 137, 144, 255, + 158, 159, 134, 187, 136, 140, 142, 143, + 137, 151, 153, 142, 143, 158, 159, 137, + 177, 142, 143, 182, 183, 191, 255, 128, + 130, 133, 136, 150, 152, 255, 145, 150, + 151, 155, 156, 160, 168, 178, 255, 128, + 143, 160, 255, 182, 183, 190, 255, 129, + 255, 173, 174, 192, 255, 129, 154, 160, + 255, 171, 173, 185, 255, 128, 140, 142, + 148, 160, 180, 128, 147, 160, 172, 174, + 176, 178, 179, 148, 150, 152, 155, 158, + 159, 170, 255, 139, 141, 144, 153, 160, + 255, 184, 255, 128, 170, 176, 255, 182, + 255, 128, 158, 160, 171, 176, 187, 134, + 173, 176, 180, 128, 171, 176, 255, 138, + 143, 155, 255, 128, 155, 160, 255, 159, + 189, 190, 192, 255, 167, 128, 137, 144, + 153, 176, 189, 140, 143, 154, 170, 180, + 255, 180, 255, 128, 183, 128, 137, 141, + 189, 128, 136, 144, 146, 148, 182, 184, + 185, 128, 181, 187, 191, 150, 151, 158, + 159, 152, 154, 156, 158, 134, 135, 142, + 143, 190, 255, 190, 128, 180, 182, 188, + 130, 132, 134, 140, 144, 147, 150, 155, + 160, 172, 178, 180, 182, 188, 128, 129, + 130, 131, 132, 133, 134, 176, 177, 178, + 179, 180, 181, 182, 183, 191, 255, 129, + 147, 149, 176, 178, 190, 192, 255, 144, + 156, 161, 144, 156, 165, 176, 130, 135, + 149, 164, 166, 168, 138, 147, 152, 157, + 170, 185, 188, 191, 142, 133, 137, 160, + 255, 137, 255, 128, 174, 176, 255, 159, + 165, 170, 180, 255, 167, 173, 128, 165, + 176, 255, 168, 174, 176, 190, 192, 255, + 128, 150, 160, 166, 168, 174, 176, 182, + 184, 190, 128, 134, 136, 142, 144, 150, + 152, 158, 160, 191, 128, 129, 130, 131, + 132, 133, 134, 135, 144, 145, 255, 133, + 135, 161, 175, 177, 181, 184, 188, 160, + 151, 152, 187, 192, 255, 133, 173, 177, + 255, 143, 159, 187, 255, 176, 191, 182, + 183, 184, 191, 192, 255, 150, 255, 128, + 146, 147, 148, 152, 153, 154, 155, 156, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 129, 255, 141, 255, 144, + 189, 141, 143, 172, 255, 191, 128, 175, + 180, 189, 151, 159, 162, 255, 175, 137, + 138, 184, 255, 183, 255, 168, 255, 128, + 179, 188, 134, 143, 154, 159, 184, 186, + 190, 255, 128, 173, 176, 255, 148, 159, + 189, 255, 129, 142, 154, 159, 191, 255, + 128, 182, 128, 141, 144, 153, 160, 182, + 186, 255, 128, 130, 155, 157, 160, 175, + 178, 182, 129, 134, 137, 142, 145, 150, + 160, 166, 168, 174, 176, 255, 155, 166, + 175, 128, 170, 172, 173, 176, 185, 158, + 159, 160, 255, 164, 175, 135, 138, 188, + 255, 164, 169, 171, 172, 173, 174, 175, + 180, 181, 182, 183, 184, 185, 187, 188, + 189, 190, 191, 165, 186, 174, 175, 154, + 255, 190, 128, 134, 147, 151, 157, 168, + 170, 182, 184, 188, 128, 129, 131, 132, + 134, 255, 147, 255, 190, 255, 144, 145, + 136, 175, 188, 255, 128, 143, 160, 175, + 179, 180, 141, 143, 176, 180, 182, 255, + 189, 255, 191, 144, 153, 161, 186, 129, + 154, 166, 255, 191, 255, 130, 135, 138, + 143, 146, 151, 154, 156, 144, 145, 146, + 147, 148, 150, 151, 152, 155, 157, 158, + 160, 170, 171, 172, 175, 161, 169, 128, + 129, 130, 131, 133, 135, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, + 149, 152, 156, 157, 160, 161, 162, 163, + 164, 166, 168, 169, 170, 171, 172, 173, + 174, 176, 177, 153, 155, 178, 179, 128, + 139, 141, 166, 168, 186, 188, 189, 191, + 255, 142, 143, 158, 255, 187, 255, 128, + 180, 189, 128, 156, 160, 255, 145, 159, + 161, 255, 128, 159, 176, 255, 139, 143, + 187, 255, 128, 157, 160, 255, 144, 132, + 135, 150, 255, 158, 159, 170, 175, 148, + 151, 188, 255, 128, 167, 176, 255, 164, + 255, 183, 255, 128, 149, 160, 167, 136, + 188, 128, 133, 138, 181, 183, 184, 191, + 255, 150, 159, 183, 255, 128, 158, 160, + 178, 180, 181, 128, 149, 160, 185, 128, + 183, 190, 191, 191, 128, 131, 133, 134, + 140, 147, 149, 151, 153, 179, 184, 186, + 160, 188, 128, 156, 128, 135, 137, 166, + 128, 181, 128, 149, 160, 178, 128, 145, + 128, 178, 129, 130, 131, 132, 133, 135, + 136, 138, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 155, 156, + 162, 163, 171, 176, 177, 178, 128, 134, + 135, 165, 176, 190, 144, 168, 176, 185, + 128, 180, 182, 191, 182, 144, 179, 155, + 133, 137, 141, 143, 157, 255, 190, 128, + 145, 147, 183, 136, 128, 134, 138, 141, + 143, 157, 159, 168, 176, 255, 171, 175, + 186, 255, 128, 131, 133, 140, 143, 144, + 147, 168, 170, 176, 178, 179, 181, 185, + 188, 191, 144, 151, 128, 132, 135, 136, + 139, 141, 157, 163, 166, 172, 176, 180, + 128, 138, 144, 153, 134, 136, 143, 154, + 255, 128, 181, 184, 255, 129, 151, 158, + 255, 129, 131, 133, 143, 154, 255, 128, + 137, 128, 153, 157, 171, 176, 185, 160, + 255, 170, 190, 192, 255, 128, 184, 128, + 136, 138, 182, 184, 191, 128, 144, 153, + 178, 255, 168, 144, 145, 183, 255, 128, + 142, 145, 149, 129, 141, 144, 146, 147, + 148, 175, 255, 132, 255, 128, 144, 129, + 143, 144, 153, 145, 152, 135, 255, 160, + 168, 169, 171, 172, 173, 174, 188, 189, + 190, 191, 161, 167, 185, 255, 128, 158, + 160, 169, 144, 173, 176, 180, 128, 131, + 144, 153, 163, 183, 189, 255, 144, 255, + 133, 143, 191, 255, 143, 159, 160, 128, + 129, 255, 159, 160, 171, 172, 255, 173, + 255, 179, 255, 128, 176, 177, 178, 128, + 129, 171, 175, 189, 255, 128, 136, 144, + 153, 157, 158, 133, 134, 137, 144, 145, + 146, 147, 148, 149, 154, 155, 156, 157, + 158, 159, 168, 169, 170, 150, 153, 165, + 169, 173, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 149, 157, 173, + 186, 188, 160, 161, 163, 164, 167, 168, + 132, 134, 149, 157, 186, 139, 140, 191, + 255, 134, 128, 132, 138, 144, 146, 255, + 166, 167, 129, 155, 187, 149, 181, 143, + 175, 137, 169, 131, 140, 141, 192, 255, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 160, 163, + 164, 165, 184, 185, 186, 161, 162, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 133, 143, 151, 255, 139, 143, 154, + 255, 164, 167, 185, 187, 128, 131, 133, + 159, 161, 162, 169, 178, 180, 183, 130, + 135, 137, 139, 148, 151, 153, 155, 157, + 159, 164, 190, 141, 143, 145, 146, 161, + 162, 167, 170, 172, 178, 180, 183, 185, + 188, 128, 137, 139, 155, 161, 163, 165, + 169, 171, 187, 155, 156, 151, 255, 156, + 157, 160, 181, 255, 186, 187, 255, 162, + 255, 160, 168, 161, 167, 158, 255, 160, + 132, 135, 133, 134, 176, 255, 128, 191, + 154, 164, 168, 128, 149, 150, 191, 128, + 152, 153, 191, 181, 128, 159, 160, 189, + 190, 191, 189, 128, 131, 132, 185, 186, + 191, 144, 128, 151, 152, 161, 162, 176, + 177, 255, 169, 177, 129, 132, 141, 142, + 145, 146, 179, 181, 186, 188, 190, 191, + 192, 255, 142, 158, 128, 155, 156, 161, + 162, 175, 176, 177, 178, 191, 169, 177, + 180, 183, 128, 132, 133, 138, 139, 142, + 143, 144, 145, 146, 147, 185, 186, 191, + 157, 128, 152, 153, 158, 159, 177, 178, + 180, 181, 191, 142, 146, 169, 177, 180, + 189, 128, 132, 133, 185, 186, 191, 144, + 185, 128, 159, 160, 161, 162, 191, 169, + 177, 180, 189, 128, 132, 133, 140, 141, + 142, 143, 144, 145, 146, 147, 185, 186, + 191, 158, 177, 128, 155, 156, 161, 162, + 191, 131, 145, 155, 157, 128, 132, 133, + 138, 139, 141, 142, 149, 150, 152, 153, + 159, 160, 162, 163, 164, 165, 167, 168, + 170, 171, 173, 174, 185, 186, 191, 144, + 128, 191, 141, 145, 169, 189, 128, 132, + 133, 185, 186, 191, 128, 151, 152, 154, + 155, 159, 160, 161, 162, 191, 128, 141, + 145, 169, 180, 189, 129, 132, 133, 185, + 186, 191, 158, 128, 159, 160, 161, 162, + 176, 177, 178, 179, 191, 141, 145, 189, + 128, 132, 133, 186, 187, 191, 142, 128, + 147, 148, 150, 151, 158, 159, 161, 162, + 185, 186, 191, 178, 188, 128, 132, 133, + 150, 151, 153, 154, 189, 190, 191, 128, + 134, 135, 191, 128, 177, 129, 179, 180, + 191, 128, 131, 137, 141, 152, 160, 164, + 166, 172, 177, 189, 129, 132, 133, 134, + 135, 138, 139, 147, 148, 167, 168, 169, + 170, 179, 180, 191, 133, 128, 134, 135, + 155, 156, 159, 160, 191, 128, 129, 191, + 136, 128, 172, 173, 191, 128, 135, 136, + 140, 141, 191, 191, 128, 170, 171, 190, + 161, 128, 143, 144, 149, 150, 153, 154, + 157, 158, 164, 165, 166, 167, 173, 174, + 176, 177, 180, 181, 255, 130, 141, 143, + 159, 134, 187, 136, 140, 142, 143, 137, + 151, 153, 142, 143, 158, 159, 137, 177, + 191, 142, 143, 182, 183, 192, 255, 129, + 151, 128, 133, 134, 135, 136, 255, 145, + 150, 151, 155, 191, 192, 255, 128, 143, + 144, 159, 160, 255, 182, 183, 190, 191, + 192, 255, 128, 129, 255, 173, 174, 192, + 255, 128, 129, 154, 155, 159, 160, 255, + 171, 173, 185, 191, 192, 255, 141, 128, + 145, 146, 159, 160, 177, 178, 191, 173, + 128, 145, 146, 159, 160, 176, 177, 191, + 128, 179, 180, 191, 151, 156, 128, 191, + 128, 159, 160, 255, 184, 191, 192, 255, + 169, 128, 170, 171, 175, 176, 255, 182, + 191, 192, 255, 128, 158, 159, 191, 128, + 143, 144, 173, 174, 175, 176, 180, 181, + 191, 128, 171, 172, 175, 176, 255, 138, + 191, 192, 255, 128, 150, 151, 159, 160, + 255, 149, 191, 192, 255, 167, 128, 191, + 128, 132, 133, 179, 180, 191, 128, 132, + 133, 139, 140, 191, 128, 130, 131, 160, + 161, 173, 174, 175, 176, 185, 186, 255, + 166, 191, 192, 255, 128, 163, 164, 191, + 128, 140, 141, 143, 144, 153, 154, 189, + 190, 191, 128, 136, 137, 191, 173, 128, + 168, 169, 177, 178, 180, 181, 182, 183, + 191, 0, 127, 192, 255, 150, 151, 158, + 159, 152, 154, 156, 158, 134, 135, 142, + 143, 190, 191, 192, 255, 181, 189, 191, + 128, 190, 133, 181, 128, 129, 130, 140, + 141, 143, 144, 147, 148, 149, 150, 155, + 156, 159, 160, 172, 173, 177, 178, 188, + 189, 191, 177, 191, 128, 190, 128, 143, + 144, 156, 157, 191, 130, 135, 148, 164, + 166, 168, 128, 137, 138, 149, 150, 151, + 152, 157, 158, 169, 170, 185, 186, 187, + 188, 191, 142, 128, 132, 133, 137, 138, + 159, 160, 255, 137, 191, 192, 255, 175, + 128, 255, 159, 165, 170, 175, 177, 180, + 191, 192, 255, 166, 173, 128, 167, 168, + 175, 176, 255, 168, 174, 176, 191, 192, + 255, 167, 175, 183, 191, 128, 150, 151, + 159, 160, 190, 135, 143, 151, 128, 158, + 159, 191, 128, 132, 133, 135, 136, 160, + 161, 169, 170, 176, 177, 181, 182, 183, + 184, 188, 189, 191, 160, 151, 154, 187, + 192, 255, 128, 132, 133, 173, 174, 176, + 177, 255, 143, 159, 187, 191, 192, 255, + 128, 175, 176, 191, 150, 191, 192, 255, + 141, 191, 192, 255, 128, 143, 144, 189, + 190, 191, 141, 143, 160, 169, 172, 191, + 192, 255, 191, 128, 174, 175, 190, 128, + 157, 158, 159, 160, 255, 176, 191, 192, + 255, 128, 150, 151, 159, 160, 161, 162, + 255, 175, 137, 138, 184, 191, 192, 255, + 128, 182, 183, 255, 130, 134, 139, 163, + 191, 192, 255, 128, 129, 130, 179, 180, + 191, 187, 189, 128, 177, 178, 183, 184, + 191, 128, 137, 138, 165, 166, 175, 176, + 255, 135, 159, 189, 191, 192, 255, 128, + 131, 132, 178, 179, 191, 143, 165, 191, + 128, 159, 160, 175, 176, 185, 186, 190, + 128, 168, 169, 191, 131, 186, 128, 139, + 140, 159, 160, 182, 183, 189, 190, 255, + 176, 178, 180, 183, 184, 190, 191, 192, + 255, 129, 128, 130, 131, 154, 155, 157, + 158, 159, 160, 170, 171, 177, 178, 180, + 181, 191, 128, 167, 175, 129, 134, 135, + 136, 137, 142, 143, 144, 145, 150, 151, + 159, 160, 255, 155, 166, 175, 128, 162, + 163, 191, 164, 175, 135, 138, 188, 191, + 192, 255, 174, 175, 154, 191, 192, 255, + 157, 169, 183, 189, 191, 128, 134, 135, + 146, 147, 151, 152, 158, 159, 190, 130, + 133, 128, 255, 178, 191, 192, 255, 128, + 146, 147, 255, 190, 191, 192, 255, 128, + 143, 144, 255, 144, 145, 136, 175, 188, + 191, 192, 255, 181, 128, 175, 176, 255, + 189, 191, 192, 255, 128, 160, 161, 186, + 187, 191, 128, 129, 154, 155, 165, 166, + 255, 191, 192, 255, 128, 129, 130, 135, + 136, 137, 138, 143, 144, 145, 146, 151, + 152, 153, 154, 156, 157, 191, 128, 191, + 128, 129, 130, 131, 133, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, + 149, 152, 156, 157, 160, 161, 162, 163, + 164, 166, 168, 169, 170, 171, 172, 173, + 174, 176, 177, 132, 151, 153, 155, 158, + 175, 178, 179, 180, 191, 140, 167, 187, + 190, 128, 255, 142, 143, 158, 191, 192, + 255, 187, 191, 192, 255, 128, 180, 181, + 191, 128, 156, 157, 159, 160, 255, 145, + 191, 192, 255, 128, 159, 160, 175, 176, + 255, 139, 143, 182, 191, 192, 255, 144, + 132, 135, 150, 191, 192, 255, 158, 175, + 148, 151, 188, 191, 192, 255, 128, 167, + 168, 175, 176, 255, 164, 191, 192, 255, + 183, 191, 192, 255, 128, 149, 150, 159, + 160, 167, 168, 191, 136, 182, 188, 128, + 133, 134, 137, 138, 184, 185, 190, 191, + 255, 150, 159, 183, 191, 192, 255, 179, + 128, 159, 160, 181, 182, 191, 128, 149, + 150, 159, 160, 185, 186, 191, 128, 183, + 184, 189, 190, 191, 128, 148, 152, 129, + 143, 144, 179, 180, 191, 128, 159, 160, + 188, 189, 191, 128, 156, 157, 191, 136, + 128, 164, 165, 191, 128, 181, 182, 191, + 128, 149, 150, 159, 160, 178, 179, 191, + 128, 145, 146, 191, 128, 178, 179, 191, + 128, 130, 131, 132, 133, 134, 135, 136, + 138, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 162, 163, + 171, 176, 177, 178, 129, 191, 128, 130, + 131, 183, 184, 191, 128, 130, 131, 175, + 176, 191, 128, 143, 144, 168, 169, 191, + 128, 130, 131, 166, 167, 191, 182, 128, + 143, 144, 178, 179, 191, 128, 130, 131, + 178, 179, 191, 128, 154, 156, 129, 132, + 133, 191, 146, 128, 171, 172, 191, 135, + 137, 142, 158, 128, 168, 169, 175, 176, + 255, 159, 191, 192, 255, 144, 128, 156, + 157, 161, 162, 191, 128, 134, 135, 138, + 139, 191, 128, 175, 176, 191, 134, 128, + 131, 132, 135, 136, 191, 128, 174, 175, + 191, 128, 151, 152, 155, 156, 191, 132, + 128, 191, 128, 170, 171, 191, 128, 153, + 154, 191, 160, 190, 192, 255, 128, 184, + 185, 191, 137, 128, 174, 175, 191, 128, + 129, 177, 178, 255, 144, 191, 192, 255, + 128, 142, 143, 144, 145, 146, 149, 129, + 148, 150, 191, 175, 191, 192, 255, 132, + 191, 192, 255, 128, 144, 129, 143, 145, + 191, 144, 153, 128, 143, 145, 152, 154, + 191, 135, 191, 192, 255, 160, 168, 169, + 171, 172, 173, 174, 188, 189, 190, 191, + 128, 159, 161, 167, 170, 187, 185, 191, + 192, 255, 128, 143, 144, 173, 174, 191, + 128, 131, 132, 162, 163, 183, 184, 188, + 189, 255, 133, 143, 145, 191, 192, 255, + 128, 146, 147, 159, 160, 191, 160, 128, + 191, 128, 129, 191, 192, 255, 159, 160, + 171, 128, 170, 172, 191, 192, 255, 173, + 191, 192, 255, 179, 191, 192, 255, 128, + 176, 177, 178, 129, 191, 128, 129, 130, + 191, 171, 175, 189, 191, 192, 255, 128, + 136, 137, 143, 144, 153, 154, 191, 144, + 145, 146, 147, 148, 149, 154, 155, 156, + 157, 158, 159, 128, 143, 150, 153, 160, + 191, 149, 157, 173, 186, 188, 160, 161, + 163, 164, 167, 168, 132, 134, 149, 157, + 186, 191, 139, 140, 192, 255, 133, 145, + 128, 134, 135, 137, 138, 255, 166, 167, + 129, 155, 187, 149, 181, 143, 175, 137, + 169, 131, 140, 191, 192, 255, 160, 163, + 164, 165, 184, 185, 186, 128, 159, 161, + 162, 166, 191, 133, 191, 192, 255, 132, + 160, 163, 167, 179, 184, 186, 128, 164, + 165, 168, 169, 187, 188, 191, 130, 135, + 137, 139, 144, 147, 151, 153, 155, 157, + 159, 163, 171, 179, 184, 189, 191, 128, + 140, 141, 148, 149, 160, 161, 164, 165, + 166, 167, 190, 138, 164, 170, 128, 155, + 156, 160, 161, 187, 188, 191, 128, 191, + 155, 156, 128, 191, 151, 191, 192, 255, + 156, 157, 160, 128, 191, 181, 191, 192, + 255, 158, 159, 186, 128, 185, 187, 191, + 192, 255, 162, 191, 192, 255, 160, 168, + 128, 159, 161, 167, 169, 191, 158, 191, + 192, 255, 9, 10, 13, 32, 33, 34, + 35, 37, 38, 46, 47, 60, 61, 62, + 64, 92, 95, 123, 124, 125, 126, 127, + 194, 195, 198, 199, 203, 204, 205, 206, + 207, 210, 212, 213, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 233, 234, 237, 238, 239, 240, + 0, 39, 40, 45, 48, 57, 58, 63, + 65, 90, 91, 96, 97, 122, 192, 193, + 196, 218, 229, 236, 241, 247, 9, 32, + 10, 61, 10, 38, 46, 42, 47, 42, + 46, 69, 101, 48, 57, 60, 61, 61, + 62, 61, 45, 95, 194, 195, 198, 199, + 203, 204, 205, 206, 207, 210, 212, 213, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 233, 234, + 237, 239, 240, 243, 48, 57, 65, 90, + 97, 122, 196, 218, 229, 236, 124, 125, + 128, 191, 170, 181, 186, 128, 191, 151, + 183, 128, 255, 192, 255, 0, 127, 173, + 130, 133, 146, 159, 165, 171, 175, 191, + 192, 255, 181, 190, 128, 175, 176, 183, + 184, 185, 186, 191, 134, 139, 141, 162, + 128, 135, 136, 255, 182, 130, 137, 176, + 151, 152, 154, 160, 136, 191, 192, 255, + 128, 143, 144, 170, 171, 175, 176, 178, + 179, 191, 128, 159, 160, 191, 176, 128, + 138, 139, 173, 174, 255, 148, 150, 164, + 167, 173, 176, 185, 189, 190, 192, 255, + 144, 128, 145, 146, 175, 176, 191, 128, + 140, 141, 255, 166, 176, 178, 191, 192, + 255, 186, 128, 137, 138, 170, 171, 179, + 180, 181, 182, 191, 160, 161, 162, 164, + 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 128, 191, 128, 129, 130, 131, + 137, 138, 139, 140, 141, 142, 143, 144, + 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 182, 183, 184, 188, + 189, 190, 191, 132, 187, 129, 130, 132, + 133, 134, 176, 177, 178, 179, 180, 181, + 182, 183, 128, 191, 128, 129, 130, 131, + 132, 133, 134, 135, 144, 136, 143, 145, + 191, 192, 255, 182, 183, 184, 128, 191, + 128, 191, 191, 128, 190, 192, 255, 128, + 146, 147, 148, 152, 153, 154, 155, 156, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 129, 191, 192, 255, 158, + 159, 128, 157, 160, 191, 192, 255, 128, + 191, 164, 169, 171, 172, 173, 174, 175, + 180, 181, 182, 183, 184, 185, 187, 188, + 189, 190, 191, 128, 163, 165, 186, 144, + 145, 146, 147, 148, 150, 151, 152, 155, + 157, 158, 160, 170, 171, 172, 175, 128, + 159, 161, 169, 173, 191, 128, 191, 10, + 13, 34, 36, 37, 92, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 34, 36, 37, 92, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 34, 36, 37, 92, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 34, 36, 37, 92, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 36, 37, + 92, 123, 192, 223, 224, 239, 240, 247, + 10, 13, 34, 36, 37, 92, 123, 128, + 191, 192, 223, 224, 239, 240, 247, 248, + 255, 10, 13, 34, 36, 37, 92, 123, + 128, 191, 192, 223, 224, 239, 240, 247, + 248, 255, 10, 13, 34, 36, 37, 92, + 123, 128, 191, 192, 223, 224, 239, 240, + 247, 248, 255, 10, 13, 34, 36, 37, + 92, 128, 191, 192, 223, 224, 239, 240, + 247, 248, 255, 36, 37, 92, 123, 192, + 223, 224, 239, 240, 247, 10, 13, 34, + 36, 37, 92, 123, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 34, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 34, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 34, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 34, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 34, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 34, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 34, 36, 37, 92, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 123, 126, + 123, 126, 128, 191, 128, 191, 128, 191, + 10, 13, 36, 37, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 36, 37, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 10, 13, 36, 37, + 128, 191, 192, 223, 224, 239, 240, 247, + 248, 255, 10, 13, 36, 37, 128, 191, + 192, 223, 224, 239, 240, 247, 248, 255, + 126, 126, 128, 191, 128, 191, 128, 191, + 10, 13, 36, 37, 128, 191, 192, 223, + 224, 239, 240, 247, 248, 255, 10, 13, + 36, 37, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 126, 126, 128, 191, + 128, 191, 128, 191, 95, 194, 195, 198, + 199, 203, 204, 205, 206, 207, 210, 212, + 213, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 228, 233, + 234, 237, 238, 239, 240, 65, 90, 97, + 122, 128, 191, 192, 193, 196, 218, 229, + 236, 241, 247, 248, 255, 45, 95, 194, + 195, 198, 199, 203, 204, 205, 206, 207, + 210, 212, 213, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 228, 233, 234, 237, 239, 240, 243, 48, + 57, 65, 90, 97, 122, 196, 218, 229, + 236, 128, 191, 170, 181, 186, 128, 191, + 151, 183, 128, 255, 192, 255, 0, 127, + 173, 130, 133, 146, 159, 165, 171, 175, + 191, 192, 255, 181, 190, 128, 175, 176, + 183, 184, 185, 186, 191, 134, 139, 141, + 162, 128, 135, 136, 255, 182, 130, 137, + 176, 151, 152, 154, 160, 136, 191, 192, + 255, 128, 143, 144, 170, 171, 175, 176, + 178, 179, 191, 128, 159, 160, 191, 176, + 128, 138, 139, 173, 174, 255, 148, 150, + 164, 167, 173, 176, 185, 189, 190, 192, + 255, 144, 128, 145, 146, 175, 176, 191, + 128, 140, 141, 255, 166, 176, 178, 191, + 192, 255, 186, 128, 137, 138, 170, 171, + 179, 180, 181, 182, 191, 160, 161, 162, + 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 128, 191, 128, 129, 130, + 131, 137, 138, 139, 140, 141, 142, 143, + 144, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 182, 183, 184, + 188, 189, 190, 191, 132, 187, 129, 130, + 132, 133, 134, 176, 177, 178, 179, 180, + 181, 182, 183, 128, 191, 128, 129, 130, + 131, 132, 133, 134, 135, 144, 136, 143, + 145, 191, 192, 255, 182, 183, 184, 128, + 191, 128, 191, 191, 128, 190, 192, 255, + 128, 146, 147, 148, 152, 153, 154, 155, + 156, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 129, 191, 192, 255, + 158, 159, 128, 157, 160, 191, 192, 255, + 128, 191, 164, 169, 171, 172, 173, 174, + 175, 180, 181, 182, 183, 184, 185, 187, + 188, 189, 190, 191, 128, 163, 165, 186, + 144, 145, 146, 147, 148, 150, 151, 152, + 155, 157, 158, 160, 170, 171, 172, 175, + 128, 159, 161, 169, 173, 191, 128, 191, +} + +var _hcltok_single_lengths []byte = []byte{ + 0, 1, 1, 1, 2, 3, 2, 0, + 32, 31, 36, 1, 4, 0, 0, 0, + 0, 1, 2, 1, 1, 1, 1, 0, + 1, 1, 0, 0, 2, 0, 0, 0, + 1, 32, 0, 0, 0, 0, 1, 3, + 1, 1, 1, 0, 2, 0, 1, 1, + 2, 0, 3, 0, 1, 0, 2, 1, + 2, 0, 0, 5, 1, 4, 0, 0, + 1, 43, 0, 0, 0, 2, 3, 2, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 4, + 1, 0, 15, 0, 0, 0, 1, 6, + 1, 0, 0, 1, 0, 2, 0, 0, + 0, 9, 0, 1, 1, 0, 0, 0, + 3, 0, 1, 0, 28, 0, 0, 0, + 1, 0, 1, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 2, 0, 0, 18, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 16, 36, 0, 0, 0, + 0, 1, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 2, 0, + 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 28, 0, 0, 0, + 1, 1, 1, 1, 0, 0, 2, 0, + 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 1, 4, 0, 0, 2, + 2, 0, 11, 0, 0, 0, 0, 0, + 0, 0, 1, 1, 3, 0, 0, 4, + 0, 0, 0, 18, 0, 0, 0, 1, + 4, 1, 4, 1, 0, 3, 2, 2, + 2, 1, 0, 0, 1, 8, 0, 0, + 0, 4, 12, 0, 2, 0, 3, 0, + 1, 0, 2, 0, 1, 2, 0, 3, + 1, 2, 0, 0, 0, 0, 0, 1, + 1, 0, 0, 1, 28, 3, 0, 1, + 1, 2, 1, 0, 1, 1, 2, 1, + 1, 2, 1, 1, 0, 2, 1, 1, + 1, 1, 0, 0, 6, 1, 1, 0, + 0, 46, 1, 1, 0, 0, 0, 0, + 2, 1, 0, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 13, 2, 0, + 0, 0, 9, 0, 1, 28, 0, 1, + 3, 0, 2, 0, 0, 0, 1, 0, + 1, 1, 2, 0, 18, 2, 0, 0, + 16, 35, 0, 0, 0, 1, 0, 28, + 0, 0, 0, 0, 1, 0, 2, 0, + 0, 1, 0, 0, 1, 0, 0, 1, + 0, 0, 0, 0, 1, 11, 0, 0, + 0, 0, 4, 0, 12, 1, 7, 0, + 4, 0, 0, 0, 0, 1, 2, 1, + 1, 1, 1, 0, 1, 1, 0, 0, + 2, 0, 0, 0, 1, 32, 0, 0, + 0, 0, 1, 3, 1, 1, 1, 0, + 2, 0, 1, 1, 2, 0, 3, 0, + 1, 0, 2, 1, 2, 0, 0, 5, + 1, 4, 0, 0, 1, 43, 0, 0, + 0, 2, 3, 2, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 4, 1, 0, 15, 0, + 0, 0, 1, 6, 1, 0, 0, 1, + 0, 2, 0, 0, 0, 9, 0, 1, + 1, 0, 0, 0, 3, 0, 1, 0, + 28, 0, 0, 0, 1, 0, 1, 0, + 0, 0, 1, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 2, 0, 0, 18, + 0, 0, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 16, + 36, 0, 0, 0, 0, 1, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 2, 0, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, + 28, 0, 0, 0, 1, 1, 1, 1, + 0, 0, 2, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 1, + 4, 0, 0, 2, 2, 0, 11, 0, + 0, 0, 0, 0, 0, 0, 1, 1, + 3, 0, 0, 4, 0, 0, 0, 18, + 0, 0, 0, 1, 4, 1, 4, 1, + 0, 3, 2, 2, 2, 1, 0, 0, + 1, 8, 0, 0, 0, 4, 12, 0, + 2, 0, 3, 0, 1, 0, 2, 0, + 1, 2, 0, 0, 3, 0, 1, 1, + 1, 2, 2, 4, 1, 6, 2, 4, + 2, 4, 1, 4, 0, 6, 1, 3, + 1, 2, 0, 2, 11, 1, 1, 1, + 0, 1, 1, 0, 2, 0, 3, 3, + 2, 1, 0, 0, 0, 1, 0, 1, + 0, 1, 1, 0, 2, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 4, 3, 2, 2, 0, + 6, 1, 0, 1, 1, 0, 2, 0, + 4, 3, 0, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 1, 0, 0, 0, + 1, 0, 3, 0, 2, 0, 0, 0, + 3, 0, 2, 1, 1, 3, 1, 0, + 0, 0, 0, 0, 5, 2, 0, 0, + 0, 0, 0, 0, 1, 0, 0, 1, + 1, 0, 0, 35, 4, 0, 0, 0, + 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 3, 0, 1, 0, 0, + 3, 0, 0, 1, 0, 0, 0, 0, + 28, 0, 0, 0, 0, 1, 0, 3, + 1, 4, 0, 1, 0, 0, 1, 0, + 0, 1, 0, 0, 0, 0, 1, 1, + 0, 7, 0, 0, 2, 2, 0, 11, + 0, 0, 0, 0, 0, 1, 1, 3, + 0, 0, 4, 0, 0, 0, 12, 1, + 4, 1, 5, 2, 0, 3, 2, 2, + 2, 1, 7, 0, 7, 17, 3, 0, + 2, 0, 3, 0, 0, 1, 0, 2, + 0, 1, 1, 0, 0, 0, 0, 0, + 1, 1, 1, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 1, 1, 4, + 0, 0, 0, 0, 1, 2, 1, 1, + 1, 1, 0, 1, 1, 0, 0, 2, + 0, 0, 0, 1, 32, 0, 0, 0, + 0, 1, 3, 1, 1, 1, 0, 2, + 0, 1, 1, 2, 0, 3, 0, 1, + 0, 2, 1, 2, 0, 0, 5, 1, + 4, 0, 0, 1, 43, 0, 0, 0, + 2, 3, 2, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 4, 1, 0, 15, 0, 0, + 0, 1, 6, 1, 0, 0, 1, 0, + 2, 0, 0, 0, 9, 0, 1, 1, + 0, 0, 0, 3, 0, 1, 0, 28, + 0, 0, 0, 1, 0, 1, 0, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 2, 0, 0, 18, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 16, 36, + 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, + 0, 2, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 28, + 0, 0, 0, 1, 1, 1, 1, 0, + 0, 2, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 4, + 0, 0, 2, 2, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 3, + 0, 0, 4, 0, 0, 0, 18, 0, + 0, 0, 1, 4, 1, 4, 1, 0, + 3, 2, 2, 2, 1, 0, 0, 1, + 8, 0, 0, 0, 4, 12, 0, 2, + 0, 3, 0, 1, 0, 2, 0, 1, + 2, 0, 0, 3, 0, 1, 1, 1, + 2, 2, 4, 1, 6, 2, 4, 2, + 4, 1, 4, 0, 6, 1, 3, 1, + 2, 0, 2, 11, 1, 1, 1, 0, + 1, 1, 0, 2, 0, 3, 3, 2, + 1, 0, 0, 0, 1, 0, 1, 0, + 1, 1, 0, 2, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 4, 3, 2, 2, 0, 6, + 1, 0, 1, 1, 0, 2, 0, 4, + 3, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 1, + 0, 3, 0, 2, 0, 0, 0, 3, + 0, 2, 1, 1, 3, 1, 0, 0, + 0, 0, 0, 5, 2, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 1, 1, + 0, 0, 35, 4, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 3, 0, 1, 0, 0, 3, + 0, 0, 1, 0, 0, 0, 0, 28, + 0, 0, 0, 0, 1, 0, 3, 1, + 4, 0, 1, 0, 0, 1, 0, 0, + 1, 0, 0, 0, 0, 1, 1, 0, + 7, 0, 0, 2, 2, 0, 11, 0, + 0, 0, 0, 0, 1, 1, 3, 0, + 0, 4, 0, 0, 0, 12, 1, 4, + 1, 5, 2, 0, 3, 2, 2, 2, + 1, 7, 0, 7, 17, 3, 0, 2, + 0, 3, 0, 0, 1, 0, 2, 0, + 54, 2, 1, 1, 1, 1, 1, 2, + 1, 3, 2, 2, 1, 34, 1, 1, + 0, 3, 2, 0, 0, 0, 1, 2, + 4, 1, 0, 1, 0, 0, 0, 0, + 1, 1, 1, 0, 0, 1, 30, 47, + 13, 9, 3, 0, 1, 28, 2, 0, + 18, 16, 0, 6, 6, 6, 6, 5, + 4, 7, 7, 7, 6, 4, 7, 6, + 6, 6, 6, 6, 6, 6, 1, 1, + 1, 1, 0, 0, 0, 4, 4, 4, + 4, 1, 1, 0, 0, 0, 4, 2, + 1, 1, 0, 0, 0, 33, 34, 0, + 3, 2, 0, 0, 0, 1, 2, 4, + 1, 0, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 1, 30, 47, 13, + 9, 3, 0, 1, 28, 2, 0, 18, + 16, 0, +} + +var _hcltok_range_lengths []byte = []byte{ + 0, 0, 0, 0, 0, 1, 1, 1, + 5, 5, 5, 0, 0, 3, 0, 1, + 1, 4, 2, 3, 0, 1, 0, 2, + 2, 4, 2, 2, 3, 1, 1, 1, + 1, 0, 1, 1, 2, 2, 1, 4, + 6, 9, 6, 8, 5, 8, 7, 10, + 4, 6, 4, 7, 7, 5, 5, 4, + 5, 1, 2, 8, 4, 3, 3, 3, + 0, 3, 1, 2, 1, 2, 2, 3, + 3, 1, 3, 2, 2, 1, 2, 2, + 2, 3, 4, 4, 3, 1, 2, 1, + 3, 2, 2, 2, 2, 2, 3, 3, + 1, 1, 2, 1, 3, 2, 2, 3, + 2, 7, 0, 1, 4, 1, 2, 4, + 2, 1, 2, 0, 2, 2, 3, 5, + 5, 1, 4, 1, 1, 2, 2, 1, + 0, 0, 1, 1, 1, 1, 1, 2, + 2, 2, 2, 1, 1, 1, 4, 2, + 2, 3, 1, 4, 4, 6, 1, 3, + 1, 1, 2, 1, 1, 1, 5, 3, + 1, 1, 1, 2, 3, 3, 1, 2, + 2, 1, 4, 1, 2, 5, 2, 1, + 1, 0, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 1, 1, 2, 4, 2, + 1, 2, 2, 2, 6, 1, 1, 2, + 1, 2, 1, 1, 1, 2, 2, 2, + 1, 3, 2, 5, 2, 8, 6, 2, + 2, 2, 2, 3, 1, 3, 1, 2, + 1, 3, 2, 2, 3, 1, 1, 1, + 1, 1, 1, 1, 2, 2, 4, 1, + 2, 1, 0, 1, 1, 1, 1, 0, + 1, 2, 3, 1, 3, 3, 1, 0, + 3, 0, 2, 3, 1, 0, 0, 0, + 0, 2, 2, 2, 2, 1, 5, 2, + 2, 5, 7, 5, 0, 1, 0, 1, + 1, 1, 1, 1, 0, 1, 1, 0, + 3, 3, 1, 1, 2, 1, 3, 5, + 1, 1, 2, 2, 1, 1, 1, 1, + 2, 6, 3, 7, 2, 6, 1, 6, + 2, 8, 0, 4, 2, 5, 2, 3, + 3, 3, 1, 2, 8, 2, 0, 2, + 1, 2, 1, 5, 2, 1, 3, 3, + 0, 2, 1, 2, 1, 0, 1, 1, + 3, 1, 1, 2, 3, 0, 0, 3, + 2, 4, 1, 4, 1, 1, 3, 1, + 1, 1, 1, 2, 2, 1, 3, 1, + 4, 3, 3, 1, 1, 5, 2, 1, + 1, 2, 1, 2, 1, 3, 2, 0, + 1, 1, 1, 1, 1, 1, 1, 2, + 1, 1, 1, 1, 1, 1, 1, 0, + 1, 1, 2, 2, 1, 1, 1, 3, + 2, 1, 0, 2, 1, 1, 1, 1, + 0, 3, 0, 1, 1, 4, 2, 3, + 0, 1, 0, 2, 2, 4, 2, 2, + 3, 1, 1, 1, 1, 0, 1, 1, + 2, 2, 1, 4, 6, 9, 6, 8, + 5, 8, 7, 10, 4, 6, 4, 7, + 7, 5, 5, 4, 5, 1, 2, 8, + 4, 3, 3, 3, 0, 3, 1, 2, + 1, 2, 2, 3, 3, 1, 3, 2, + 2, 1, 2, 2, 2, 3, 4, 4, + 3, 1, 2, 1, 3, 2, 2, 2, + 2, 2, 3, 3, 1, 1, 2, 1, + 3, 2, 2, 3, 2, 7, 0, 1, + 4, 1, 2, 4, 2, 1, 2, 0, + 2, 2, 3, 5, 5, 1, 4, 1, + 1, 2, 2, 1, 0, 0, 1, 1, + 1, 1, 1, 2, 2, 2, 2, 1, + 1, 1, 4, 2, 2, 3, 1, 4, + 4, 6, 1, 3, 1, 1, 2, 1, + 1, 1, 5, 3, 1, 1, 1, 2, + 3, 3, 1, 2, 2, 1, 4, 1, + 2, 5, 2, 1, 1, 0, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 1, + 1, 2, 4, 2, 1, 2, 2, 2, + 6, 1, 1, 2, 1, 2, 1, 1, + 1, 2, 2, 2, 1, 3, 2, 5, + 2, 8, 6, 2, 2, 2, 2, 3, + 1, 3, 1, 2, 1, 3, 2, 2, + 3, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 4, 1, 2, 1, 0, 1, + 1, 1, 1, 0, 1, 2, 3, 1, + 3, 3, 1, 0, 3, 0, 2, 3, + 1, 0, 0, 0, 0, 2, 2, 2, + 2, 1, 5, 2, 2, 5, 7, 5, + 0, 1, 0, 1, 1, 1, 1, 1, + 0, 1, 1, 1, 2, 2, 3, 3, + 4, 7, 5, 7, 5, 3, 3, 7, + 3, 13, 1, 3, 5, 3, 5, 3, + 6, 5, 2, 2, 8, 4, 1, 2, + 3, 2, 10, 2, 2, 0, 2, 3, + 3, 1, 2, 3, 3, 1, 2, 3, + 3, 4, 4, 2, 1, 2, 2, 3, + 2, 2, 5, 3, 2, 3, 2, 1, + 3, 3, 6, 2, 2, 5, 2, 5, + 1, 1, 2, 4, 1, 11, 1, 3, + 8, 4, 2, 1, 0, 4, 3, 3, + 3, 2, 9, 1, 1, 4, 3, 2, + 2, 2, 3, 4, 2, 3, 2, 4, + 3, 2, 2, 3, 3, 4, 3, 3, + 4, 2, 5, 4, 8, 7, 1, 2, + 1, 3, 1, 2, 5, 1, 2, 2, + 2, 2, 1, 3, 2, 2, 3, 3, + 1, 9, 1, 5, 1, 3, 2, 2, + 3, 2, 3, 3, 3, 1, 3, 3, + 2, 2, 4, 5, 3, 3, 4, 3, + 3, 3, 2, 2, 2, 4, 2, 2, + 1, 3, 3, 3, 3, 3, 3, 2, + 2, 3, 2, 3, 3, 2, 3, 2, + 3, 1, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 3, 2, 3, + 2, 3, 5, 3, 3, 1, 2, 3, + 2, 2, 1, 2, 3, 4, 3, 0, + 3, 0, 2, 3, 1, 0, 0, 0, + 0, 2, 3, 2, 4, 6, 4, 1, + 1, 2, 1, 2, 1, 3, 2, 3, + 2, 0, 0, 1, 1, 1, 1, 1, + 0, 0, 0, 1, 1, 1, 0, 0, + 0, 0, 1, 1, 1, 0, 0, 0, + 3, 0, 1, 1, 4, 2, 3, 0, + 1, 0, 2, 2, 4, 2, 2, 3, + 1, 1, 1, 1, 0, 1, 1, 2, + 2, 1, 4, 6, 9, 6, 8, 5, + 8, 7, 10, 4, 6, 4, 7, 7, + 5, 5, 4, 5, 1, 2, 8, 4, + 3, 3, 3, 0, 3, 1, 2, 1, + 2, 2, 3, 3, 1, 3, 2, 2, + 1, 2, 2, 2, 3, 4, 4, 3, + 1, 2, 1, 3, 2, 2, 2, 2, + 2, 3, 3, 1, 1, 2, 1, 3, + 2, 2, 3, 2, 7, 0, 1, 4, + 1, 2, 4, 2, 1, 2, 0, 2, + 2, 3, 5, 5, 1, 4, 1, 1, + 2, 2, 1, 0, 0, 1, 1, 1, + 1, 1, 2, 2, 2, 2, 1, 1, + 1, 4, 2, 2, 3, 1, 4, 4, + 6, 1, 3, 1, 1, 2, 1, 1, + 1, 5, 3, 1, 1, 1, 2, 3, + 3, 1, 2, 2, 1, 4, 1, 2, + 5, 2, 1, 1, 0, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 1, 1, + 2, 4, 2, 1, 2, 2, 2, 6, + 1, 1, 2, 1, 2, 1, 1, 1, + 2, 2, 2, 1, 3, 2, 5, 2, + 8, 6, 2, 2, 2, 2, 3, 1, + 3, 1, 2, 1, 3, 2, 2, 3, + 1, 1, 1, 1, 1, 1, 1, 2, + 2, 4, 1, 2, 1, 0, 1, 1, + 1, 1, 0, 1, 2, 3, 1, 3, + 3, 1, 0, 3, 0, 2, 3, 1, + 0, 0, 0, 0, 2, 2, 2, 2, + 1, 5, 2, 2, 5, 7, 5, 0, + 1, 0, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 2, 2, 3, 3, 4, + 7, 5, 7, 5, 3, 3, 7, 3, + 13, 1, 3, 5, 3, 5, 3, 6, + 5, 2, 2, 8, 4, 1, 2, 3, + 2, 10, 2, 2, 0, 2, 3, 3, + 1, 2, 3, 3, 1, 2, 3, 3, + 4, 4, 2, 1, 2, 2, 3, 2, + 2, 5, 3, 2, 3, 2, 1, 3, + 3, 6, 2, 2, 5, 2, 5, 1, + 1, 2, 4, 1, 11, 1, 3, 8, + 4, 2, 1, 0, 4, 3, 3, 3, + 2, 9, 1, 1, 4, 3, 2, 2, + 2, 3, 4, 2, 3, 2, 4, 3, + 2, 2, 3, 3, 4, 3, 3, 4, + 2, 5, 4, 8, 7, 1, 2, 1, + 3, 1, 2, 5, 1, 2, 2, 2, + 2, 1, 3, 2, 2, 3, 3, 1, + 9, 1, 5, 1, 3, 2, 2, 3, + 2, 3, 3, 3, 1, 3, 3, 2, + 2, 4, 5, 3, 3, 4, 3, 3, + 3, 2, 2, 2, 4, 2, 2, 1, + 3, 3, 3, 3, 3, 3, 2, 2, + 3, 2, 3, 3, 2, 3, 2, 3, + 1, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 3, 2, 3, 2, + 3, 5, 3, 3, 1, 2, 3, 2, + 2, 1, 2, 3, 4, 3, 0, 3, + 0, 2, 3, 1, 0, 0, 0, 0, + 2, 3, 2, 4, 6, 4, 1, 1, + 2, 1, 2, 1, 3, 2, 3, 2, + 11, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 5, 0, 0, + 1, 1, 1, 0, 1, 1, 5, 4, + 2, 0, 1, 0, 2, 2, 5, 2, + 3, 5, 3, 2, 3, 5, 1, 1, + 1, 3, 1, 1, 2, 2, 3, 1, + 2, 3, 1, 5, 5, 5, 5, 5, + 3, 5, 5, 5, 5, 3, 5, 5, + 5, 5, 5, 5, 5, 5, 0, 0, + 0, 0, 1, 1, 1, 5, 5, 5, + 5, 0, 0, 1, 1, 1, 5, 6, + 0, 0, 1, 1, 1, 8, 5, 1, + 1, 1, 0, 1, 1, 5, 4, 2, + 0, 1, 0, 2, 2, 5, 2, 3, + 5, 3, 2, 3, 5, 1, 1, 1, + 3, 1, 1, 2, 2, 3, 1, 2, + 3, 1, +} + +var _hcltok_index_offsets []int16 = []int16{ + 0, 0, 2, 4, 6, 9, 14, 18, + 20, 58, 95, 137, 139, 144, 148, 149, + 151, 153, 159, 164, 169, 171, 174, 176, + 179, 183, 189, 192, 195, 201, 203, 205, + 207, 210, 243, 245, 247, 250, 253, 256, + 264, 272, 283, 291, 300, 308, 317, 326, + 338, 345, 352, 360, 368, 377, 383, 391, + 397, 405, 407, 410, 424, 430, 438, 442, + 446, 448, 495, 497, 500, 502, 507, 513, + 519, 524, 527, 531, 534, 537, 539, 542, + 545, 548, 552, 557, 562, 566, 568, 571, + 573, 577, 580, 583, 586, 589, 593, 598, + 602, 604, 606, 609, 611, 615, 618, 621, + 629, 633, 641, 657, 659, 664, 666, 670, + 681, 685, 687, 690, 692, 695, 700, 704, + 710, 716, 727, 732, 735, 738, 741, 744, + 746, 750, 751, 754, 756, 786, 788, 790, + 793, 797, 800, 804, 806, 808, 810, 816, + 819, 822, 826, 828, 833, 838, 845, 848, + 852, 856, 858, 861, 881, 883, 885, 892, + 896, 898, 900, 902, 905, 909, 913, 915, + 919, 922, 924, 929, 947, 986, 992, 995, + 997, 999, 1001, 1004, 1007, 1010, 1013, 1016, + 1020, 1023, 1026, 1029, 1031, 1033, 1036, 1043, + 1046, 1048, 1051, 1054, 1057, 1065, 1067, 1069, + 1072, 1074, 1077, 1079, 1081, 1111, 1114, 1117, + 1120, 1123, 1128, 1132, 1139, 1142, 1151, 1160, + 1163, 1167, 1170, 1173, 1177, 1179, 1183, 1185, + 1188, 1190, 1194, 1198, 1202, 1210, 1212, 1214, + 1218, 1222, 1224, 1237, 1239, 1242, 1245, 1250, + 1252, 1255, 1257, 1259, 1262, 1267, 1269, 1271, + 1276, 1278, 1281, 1285, 1305, 1309, 1313, 1315, + 1317, 1325, 1327, 1334, 1339, 1341, 1345, 1348, + 1351, 1354, 1358, 1361, 1364, 1368, 1378, 1384, + 1387, 1390, 1400, 1420, 1426, 1429, 1431, 1435, + 1437, 1440, 1442, 1446, 1448, 1450, 1454, 1456, + 1460, 1465, 1471, 1473, 1475, 1478, 1480, 1484, + 1491, 1494, 1496, 1499, 1503, 1533, 1538, 1540, + 1543, 1547, 1556, 1561, 1569, 1573, 1581, 1585, + 1593, 1597, 1608, 1610, 1616, 1619, 1627, 1631, + 1636, 1641, 1646, 1648, 1651, 1666, 1670, 1672, + 1675, 1677, 1726, 1729, 1736, 1739, 1741, 1745, + 1749, 1752, 1756, 1758, 1761, 1763, 1765, 1767, + 1769, 1773, 1775, 1777, 1780, 1784, 1798, 1801, + 1805, 1808, 1813, 1824, 1829, 1832, 1862, 1866, + 1869, 1874, 1876, 1880, 1883, 1886, 1888, 1893, + 1895, 1901, 1906, 1912, 1914, 1934, 1942, 1945, + 1947, 1965, 2003, 2005, 2008, 2010, 2015, 2018, + 2047, 2049, 2051, 2053, 2055, 2058, 2060, 2064, + 2067, 2069, 2072, 2074, 2076, 2079, 2081, 2083, + 2085, 2087, 2089, 2092, 2095, 2098, 2111, 2113, + 2117, 2120, 2122, 2127, 2130, 2144, 2147, 2156, + 2158, 2163, 2167, 2168, 2170, 2172, 2178, 2183, + 2188, 2190, 2193, 2195, 2198, 2202, 2208, 2211, + 2214, 2220, 2222, 2224, 2226, 2229, 2262, 2264, + 2266, 2269, 2272, 2275, 2283, 2291, 2302, 2310, + 2319, 2327, 2336, 2345, 2357, 2364, 2371, 2379, + 2387, 2396, 2402, 2410, 2416, 2424, 2426, 2429, + 2443, 2449, 2457, 2461, 2465, 2467, 2514, 2516, + 2519, 2521, 2526, 2532, 2538, 2543, 2546, 2550, + 2553, 2556, 2558, 2561, 2564, 2567, 2571, 2576, + 2581, 2585, 2587, 2590, 2592, 2596, 2599, 2602, + 2605, 2608, 2612, 2617, 2621, 2623, 2625, 2628, + 2630, 2634, 2637, 2640, 2648, 2652, 2660, 2676, + 2678, 2683, 2685, 2689, 2700, 2704, 2706, 2709, + 2711, 2714, 2719, 2723, 2729, 2735, 2746, 2751, + 2754, 2757, 2760, 2763, 2765, 2769, 2770, 2773, + 2775, 2805, 2807, 2809, 2812, 2816, 2819, 2823, + 2825, 2827, 2829, 2835, 2838, 2841, 2845, 2847, + 2852, 2857, 2864, 2867, 2871, 2875, 2877, 2880, + 2900, 2902, 2904, 2911, 2915, 2917, 2919, 2921, + 2924, 2928, 2932, 2934, 2938, 2941, 2943, 2948, + 2966, 3005, 3011, 3014, 3016, 3018, 3020, 3023, + 3026, 3029, 3032, 3035, 3039, 3042, 3045, 3048, + 3050, 3052, 3055, 3062, 3065, 3067, 3070, 3073, + 3076, 3084, 3086, 3088, 3091, 3093, 3096, 3098, + 3100, 3130, 3133, 3136, 3139, 3142, 3147, 3151, + 3158, 3161, 3170, 3179, 3182, 3186, 3189, 3192, + 3196, 3198, 3202, 3204, 3207, 3209, 3213, 3217, + 3221, 3229, 3231, 3233, 3237, 3241, 3243, 3256, + 3258, 3261, 3264, 3269, 3271, 3274, 3276, 3278, + 3281, 3286, 3288, 3290, 3295, 3297, 3300, 3304, + 3324, 3328, 3332, 3334, 3336, 3344, 3346, 3353, + 3358, 3360, 3364, 3367, 3370, 3373, 3377, 3380, + 3383, 3387, 3397, 3403, 3406, 3409, 3419, 3439, + 3445, 3448, 3450, 3454, 3456, 3459, 3461, 3465, + 3467, 3469, 3473, 3475, 3477, 3483, 3486, 3491, + 3496, 3502, 3512, 3520, 3532, 3539, 3549, 3555, + 3567, 3573, 3591, 3594, 3602, 3608, 3618, 3625, + 3632, 3640, 3648, 3651, 3656, 3676, 3682, 3685, + 3689, 3693, 3697, 3709, 3712, 3717, 3718, 3724, + 3731, 3737, 3740, 3743, 3747, 3751, 3754, 3757, + 3762, 3766, 3772, 3778, 3781, 3785, 3788, 3791, + 3796, 3799, 3802, 3808, 3812, 3815, 3819, 3822, + 3825, 3829, 3833, 3840, 3843, 3846, 3852, 3855, + 3862, 3864, 3866, 3869, 3878, 3883, 3897, 3901, + 3905, 3920, 3926, 3929, 3932, 3934, 3939, 3945, + 3949, 3957, 3963, 3973, 3976, 3979, 3984, 3988, + 3991, 3994, 3997, 4001, 4006, 4010, 4014, 4017, + 4022, 4027, 4030, 4036, 4040, 4046, 4051, 4055, + 4059, 4067, 4070, 4078, 4084, 4094, 4105, 4108, + 4111, 4113, 4117, 4119, 4122, 4133, 4137, 4140, + 4143, 4146, 4149, 4151, 4155, 4159, 4162, 4166, + 4171, 4174, 4184, 4186, 4227, 4233, 4237, 4240, + 4243, 4247, 4250, 4254, 4258, 4263, 4265, 4269, + 4273, 4276, 4279, 4284, 4293, 4297, 4302, 4307, + 4311, 4318, 4322, 4325, 4329, 4332, 4337, 4340, + 4343, 4373, 4377, 4381, 4385, 4389, 4394, 4398, + 4404, 4408, 4416, 4419, 4424, 4428, 4431, 4436, + 4439, 4443, 4446, 4449, 4452, 4455, 4458, 4462, + 4466, 4469, 4479, 4482, 4485, 4490, 4496, 4499, + 4514, 4517, 4521, 4527, 4531, 4535, 4538, 4542, + 4549, 4552, 4555, 4561, 4564, 4568, 4573, 4589, + 4591, 4599, 4601, 4609, 4615, 4617, 4621, 4624, + 4627, 4630, 4634, 4645, 4648, 4660, 4684, 4692, + 4694, 4698, 4701, 4706, 4709, 4711, 4716, 4719, + 4725, 4728, 4730, 4732, 4734, 4736, 4738, 4740, + 4742, 4744, 4746, 4748, 4750, 4752, 4754, 4756, + 4758, 4760, 4762, 4764, 4766, 4768, 4770, 4772, + 4777, 4781, 4782, 4784, 4786, 4792, 4797, 4802, + 4804, 4807, 4809, 4812, 4816, 4822, 4825, 4828, + 4834, 4836, 4838, 4840, 4843, 4876, 4878, 4880, + 4883, 4886, 4889, 4897, 4905, 4916, 4924, 4933, + 4941, 4950, 4959, 4971, 4978, 4985, 4993, 5001, + 5010, 5016, 5024, 5030, 5038, 5040, 5043, 5057, + 5063, 5071, 5075, 5079, 5081, 5128, 5130, 5133, + 5135, 5140, 5146, 5152, 5157, 5160, 5164, 5167, + 5170, 5172, 5175, 5178, 5181, 5185, 5190, 5195, + 5199, 5201, 5204, 5206, 5210, 5213, 5216, 5219, + 5222, 5226, 5231, 5235, 5237, 5239, 5242, 5244, + 5248, 5251, 5254, 5262, 5266, 5274, 5290, 5292, + 5297, 5299, 5303, 5314, 5318, 5320, 5323, 5325, + 5328, 5333, 5337, 5343, 5349, 5360, 5365, 5368, + 5371, 5374, 5377, 5379, 5383, 5384, 5387, 5389, + 5419, 5421, 5423, 5426, 5430, 5433, 5437, 5439, + 5441, 5443, 5449, 5452, 5455, 5459, 5461, 5466, + 5471, 5478, 5481, 5485, 5489, 5491, 5494, 5514, + 5516, 5518, 5525, 5529, 5531, 5533, 5535, 5538, + 5542, 5546, 5548, 5552, 5555, 5557, 5562, 5580, + 5619, 5625, 5628, 5630, 5632, 5634, 5637, 5640, + 5643, 5646, 5649, 5653, 5656, 5659, 5662, 5664, + 5666, 5669, 5676, 5679, 5681, 5684, 5687, 5690, + 5698, 5700, 5702, 5705, 5707, 5710, 5712, 5714, + 5744, 5747, 5750, 5753, 5756, 5761, 5765, 5772, + 5775, 5784, 5793, 5796, 5800, 5803, 5806, 5810, + 5812, 5816, 5818, 5821, 5823, 5827, 5831, 5835, + 5843, 5845, 5847, 5851, 5855, 5857, 5870, 5872, + 5875, 5878, 5883, 5885, 5888, 5890, 5892, 5895, + 5900, 5902, 5904, 5909, 5911, 5914, 5918, 5938, + 5942, 5946, 5948, 5950, 5958, 5960, 5967, 5972, + 5974, 5978, 5981, 5984, 5987, 5991, 5994, 5997, + 6001, 6011, 6017, 6020, 6023, 6033, 6053, 6059, + 6062, 6064, 6068, 6070, 6073, 6075, 6079, 6081, + 6083, 6087, 6089, 6091, 6097, 6100, 6105, 6110, + 6116, 6126, 6134, 6146, 6153, 6163, 6169, 6181, + 6187, 6205, 6208, 6216, 6222, 6232, 6239, 6246, + 6254, 6262, 6265, 6270, 6290, 6296, 6299, 6303, + 6307, 6311, 6323, 6326, 6331, 6332, 6338, 6345, + 6351, 6354, 6357, 6361, 6365, 6368, 6371, 6376, + 6380, 6386, 6392, 6395, 6399, 6402, 6405, 6410, + 6413, 6416, 6422, 6426, 6429, 6433, 6436, 6439, + 6443, 6447, 6454, 6457, 6460, 6466, 6469, 6476, + 6478, 6480, 6483, 6492, 6497, 6511, 6515, 6519, + 6534, 6540, 6543, 6546, 6548, 6553, 6559, 6563, + 6571, 6577, 6587, 6590, 6593, 6598, 6602, 6605, + 6608, 6611, 6615, 6620, 6624, 6628, 6631, 6636, + 6641, 6644, 6650, 6654, 6660, 6665, 6669, 6673, + 6681, 6684, 6692, 6698, 6708, 6719, 6722, 6725, + 6727, 6731, 6733, 6736, 6747, 6751, 6754, 6757, + 6760, 6763, 6765, 6769, 6773, 6776, 6780, 6785, + 6788, 6798, 6800, 6841, 6847, 6851, 6854, 6857, + 6861, 6864, 6868, 6872, 6877, 6879, 6883, 6887, + 6890, 6893, 6898, 6907, 6911, 6916, 6921, 6925, + 6932, 6936, 6939, 6943, 6946, 6951, 6954, 6957, + 6987, 6991, 6995, 6999, 7003, 7008, 7012, 7018, + 7022, 7030, 7033, 7038, 7042, 7045, 7050, 7053, + 7057, 7060, 7063, 7066, 7069, 7072, 7076, 7080, + 7083, 7093, 7096, 7099, 7104, 7110, 7113, 7128, + 7131, 7135, 7141, 7145, 7149, 7152, 7156, 7163, + 7166, 7169, 7175, 7178, 7182, 7187, 7203, 7205, + 7213, 7215, 7223, 7229, 7231, 7235, 7238, 7241, + 7244, 7248, 7259, 7262, 7274, 7298, 7306, 7308, + 7312, 7315, 7320, 7323, 7325, 7330, 7333, 7339, + 7342, 7408, 7411, 7413, 7415, 7417, 7419, 7421, + 7424, 7426, 7431, 7434, 7437, 7439, 7479, 7481, + 7483, 7485, 7490, 7494, 7495, 7497, 7499, 7506, + 7513, 7520, 7522, 7524, 7526, 7529, 7532, 7538, + 7541, 7546, 7553, 7558, 7561, 7565, 7572, 7604, + 7653, 7668, 7681, 7686, 7688, 7692, 7723, 7729, + 7731, 7752, 7772, 7774, 7786, 7798, 7810, 7822, + 7833, 7841, 7854, 7867, 7880, 7892, 7900, 7913, + 7925, 7937, 7949, 7961, 7973, 7985, 7997, 7999, + 8001, 8003, 8005, 8007, 8009, 8011, 8021, 8031, + 8041, 8051, 8053, 8055, 8057, 8059, 8061, 8071, + 8080, 8082, 8084, 8086, 8088, 8090, 8132, 8172, + 8174, 8179, 8183, 8184, 8186, 8188, 8195, 8202, + 8209, 8211, 8213, 8215, 8218, 8221, 8227, 8230, + 8235, 8242, 8247, 8250, 8254, 8261, 8293, 8342, + 8357, 8370, 8375, 8377, 8381, 8412, 8418, 8420, + 8441, 8461, +} + +var _hcltok_indicies []int16 = []int16{ + 2, 1, 4, 3, 6, 5, 6, 7, + 5, 9, 11, 11, 10, 8, 12, 12, + 10, 8, 10, 8, 13, 14, 15, 16, + 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, 42, + 43, 44, 45, 46, 14, 14, 17, 17, + 41, 3, 14, 15, 16, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 40, 42, 43, 44, 45, + 46, 14, 14, 17, 17, 41, 3, 47, + 48, 14, 14, 49, 16, 18, 19, 20, + 19, 50, 51, 23, 52, 25, 26, 53, + 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 40, 42, 66, 44, + 67, 68, 69, 14, 14, 14, 17, 41, + 3, 47, 3, 14, 14, 14, 14, 3, + 14, 14, 14, 3, 14, 3, 14, 3, + 14, 3, 3, 3, 3, 3, 14, 3, + 3, 3, 3, 14, 14, 14, 14, 14, + 3, 3, 14, 3, 3, 14, 3, 14, + 3, 3, 14, 3, 3, 3, 14, 14, + 14, 14, 14, 14, 3, 14, 14, 3, + 14, 14, 3, 3, 3, 3, 3, 3, + 14, 14, 3, 3, 14, 3, 14, 14, + 14, 3, 70, 71, 72, 73, 17, 74, + 75, 76, 77, 78, 79, 80, 81, 82, + 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, + 99, 100, 3, 14, 3, 14, 3, 14, + 14, 3, 14, 14, 3, 3, 3, 14, + 3, 3, 3, 3, 3, 3, 3, 14, + 3, 3, 3, 3, 3, 3, 3, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 3, 3, 3, 3, 3, 3, + 3, 3, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 3, 3, 3, 3, 3, + 3, 3, 3, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 3, 14, 14, 14, + 14, 14, 14, 14, 14, 3, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 3, 14, 14, 14, 14, 14, 14, + 3, 14, 14, 14, 14, 14, 14, 3, + 3, 3, 3, 3, 3, 3, 3, 14, + 14, 14, 14, 14, 14, 14, 14, 3, + 14, 14, 14, 14, 14, 14, 14, 14, + 3, 14, 14, 14, 14, 14, 3, 3, + 3, 3, 3, 3, 3, 3, 14, 14, + 14, 14, 14, 14, 3, 14, 14, 14, + 14, 14, 14, 14, 3, 14, 3, 14, + 14, 3, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 3, + 14, 14, 14, 14, 14, 3, 14, 14, + 14, 14, 14, 14, 14, 3, 14, 14, + 14, 3, 14, 14, 14, 3, 14, 3, + 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, + 117, 19, 118, 119, 120, 121, 122, 123, + 124, 125, 126, 127, 128, 129, 130, 131, + 132, 133, 134, 135, 17, 18, 136, 137, + 138, 139, 140, 17, 19, 17, 3, 14, + 3, 14, 14, 3, 3, 14, 3, 3, + 3, 3, 14, 3, 3, 3, 3, 3, + 14, 3, 3, 3, 3, 3, 14, 14, + 14, 14, 14, 3, 3, 3, 14, 3, + 3, 3, 14, 14, 14, 3, 3, 3, + 14, 14, 3, 3, 3, 14, 14, 14, + 3, 3, 3, 14, 14, 14, 14, 3, + 14, 14, 14, 14, 3, 3, 3, 3, + 3, 14, 14, 14, 14, 3, 3, 14, + 14, 14, 3, 3, 14, 14, 14, 14, + 3, 14, 14, 3, 14, 14, 3, 3, + 3, 14, 14, 14, 3, 3, 3, 3, + 14, 14, 14, 14, 14, 3, 3, 3, + 3, 14, 3, 14, 14, 3, 14, 14, + 3, 14, 3, 14, 14, 14, 3, 14, + 14, 3, 3, 3, 14, 3, 3, 3, + 3, 3, 3, 3, 14, 14, 14, 14, + 3, 14, 14, 14, 14, 14, 14, 14, + 3, 141, 142, 143, 144, 145, 146, 147, + 148, 149, 17, 150, 151, 152, 153, 154, + 3, 14, 3, 3, 3, 3, 3, 14, + 14, 3, 14, 14, 14, 3, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 3, 14, 14, 14, 3, 3, 14, 14, + 14, 3, 3, 14, 3, 3, 14, 14, + 14, 14, 14, 3, 3, 3, 3, 14, + 14, 14, 14, 14, 14, 3, 14, 14, + 14, 14, 14, 3, 155, 112, 156, 157, + 158, 17, 159, 160, 19, 17, 3, 14, + 14, 14, 14, 3, 3, 3, 14, 3, + 3, 14, 14, 14, 3, 3, 3, 14, + 14, 3, 122, 3, 19, 17, 17, 161, + 3, 17, 3, 14, 19, 162, 163, 19, + 164, 165, 19, 60, 166, 167, 168, 169, + 170, 19, 171, 172, 173, 19, 174, 175, + 176, 18, 177, 178, 179, 18, 180, 19, + 17, 3, 3, 14, 14, 3, 3, 3, + 14, 14, 14, 14, 3, 14, 14, 3, + 3, 3, 3, 14, 14, 3, 3, 14, + 14, 3, 3, 3, 3, 3, 3, 14, + 14, 14, 3, 3, 3, 14, 3, 3, + 3, 14, 14, 3, 14, 14, 14, 14, + 3, 14, 14, 14, 14, 3, 14, 14, + 14, 14, 14, 14, 3, 3, 3, 14, + 14, 14, 14, 3, 181, 182, 3, 17, + 3, 14, 3, 3, 14, 19, 183, 184, + 185, 186, 60, 187, 188, 58, 189, 190, + 191, 192, 193, 194, 195, 196, 197, 17, + 3, 3, 14, 3, 14, 14, 14, 14, + 14, 14, 14, 3, 14, 14, 14, 3, + 14, 3, 3, 14, 3, 14, 3, 3, + 14, 14, 14, 14, 3, 14, 14, 14, + 3, 3, 14, 14, 14, 14, 3, 14, + 14, 3, 3, 14, 14, 14, 14, 14, + 3, 198, 199, 200, 201, 202, 203, 204, + 205, 206, 207, 208, 204, 209, 210, 211, + 212, 41, 3, 213, 214, 19, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 19, + 17, 224, 225, 226, 227, 19, 228, 229, + 230, 231, 232, 233, 234, 235, 236, 237, + 238, 239, 240, 241, 242, 19, 147, 17, + 243, 3, 14, 14, 14, 14, 14, 3, + 3, 3, 14, 3, 14, 14, 3, 14, + 3, 14, 14, 3, 3, 3, 14, 14, + 14, 3, 3, 3, 14, 14, 14, 3, + 3, 3, 3, 14, 3, 3, 14, 3, + 3, 14, 14, 14, 3, 3, 14, 3, + 14, 14, 14, 3, 14, 14, 14, 14, + 14, 14, 3, 3, 3, 14, 14, 3, + 14, 14, 3, 14, 14, 3, 14, 14, + 3, 14, 14, 14, 14, 14, 14, 14, + 3, 14, 3, 14, 3, 14, 14, 3, + 14, 3, 14, 14, 3, 14, 3, 14, + 3, 244, 215, 245, 246, 247, 248, 249, + 250, 251, 252, 253, 101, 254, 19, 255, + 256, 257, 19, 258, 132, 259, 260, 261, + 262, 263, 264, 265, 266, 19, 3, 3, + 3, 14, 14, 14, 3, 14, 14, 3, + 14, 14, 3, 3, 3, 3, 3, 14, + 14, 14, 14, 3, 14, 14, 14, 14, + 14, 14, 3, 3, 3, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 3, 14, + 14, 14, 14, 14, 14, 14, 14, 3, + 14, 14, 3, 3, 3, 3, 14, 14, + 14, 3, 3, 3, 14, 3, 3, 3, + 14, 14, 3, 14, 14, 14, 3, 14, + 3, 3, 3, 14, 14, 3, 14, 14, + 14, 3, 14, 14, 14, 3, 3, 3, + 3, 14, 19, 184, 267, 268, 17, 19, + 17, 3, 3, 14, 3, 14, 19, 267, + 17, 3, 19, 269, 17, 3, 3, 14, + 19, 270, 271, 272, 175, 273, 274, 19, + 275, 276, 277, 17, 3, 3, 14, 14, + 14, 3, 14, 14, 3, 14, 14, 14, + 14, 3, 3, 14, 3, 3, 14, 14, + 3, 14, 3, 19, 17, 3, 278, 19, + 279, 3, 17, 3, 14, 3, 14, 280, + 19, 281, 282, 3, 14, 3, 3, 3, + 14, 14, 14, 14, 3, 283, 284, 285, + 19, 286, 287, 288, 289, 290, 291, 292, + 293, 294, 295, 296, 297, 298, 299, 17, + 3, 14, 14, 14, 3, 3, 3, 3, + 14, 14, 3, 3, 14, 3, 3, 3, + 3, 3, 3, 3, 14, 3, 14, 3, + 3, 3, 3, 3, 3, 14, 14, 14, + 14, 14, 3, 3, 14, 3, 3, 3, + 14, 3, 3, 14, 3, 3, 14, 3, + 3, 14, 3, 3, 3, 14, 14, 14, + 3, 3, 3, 14, 14, 14, 14, 3, + 300, 19, 301, 19, 302, 303, 304, 305, + 17, 3, 14, 14, 14, 14, 14, 3, + 3, 3, 14, 3, 3, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 3, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 3, 14, 14, 14, 14, + 14, 3, 306, 19, 17, 3, 14, 307, + 19, 103, 17, 3, 14, 308, 3, 17, + 3, 14, 19, 309, 17, 3, 3, 14, + 310, 3, 19, 311, 17, 3, 3, 14, + 14, 14, 14, 3, 14, 14, 14, 14, + 3, 14, 14, 14, 14, 14, 3, 3, + 14, 3, 14, 14, 14, 3, 14, 3, + 14, 14, 14, 3, 3, 3, 3, 3, + 3, 3, 14, 14, 14, 3, 14, 3, + 3, 3, 14, 14, 14, 14, 3, 312, + 313, 72, 314, 315, 316, 317, 318, 319, + 320, 321, 322, 323, 324, 325, 326, 327, + 328, 329, 330, 331, 332, 334, 335, 336, + 337, 338, 339, 333, 3, 14, 14, 14, + 14, 3, 14, 3, 14, 14, 3, 14, + 14, 14, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 14, 14, 14, 14, 14, + 3, 14, 14, 14, 14, 14, 14, 14, + 3, 14, 14, 14, 3, 14, 14, 14, + 14, 14, 14, 14, 3, 14, 14, 14, + 3, 14, 14, 14, 14, 14, 14, 14, + 3, 14, 14, 14, 3, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 3, + 14, 3, 14, 14, 14, 14, 14, 3, + 14, 14, 3, 14, 14, 14, 14, 14, + 14, 14, 3, 14, 14, 14, 3, 14, + 14, 14, 14, 3, 14, 14, 14, 14, + 3, 14, 14, 14, 14, 3, 14, 3, + 14, 14, 3, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 3, 14, 14, 14, 3, 14, 3, + 14, 14, 3, 14, 3, 340, 341, 342, + 104, 105, 106, 107, 108, 343, 110, 111, + 112, 113, 114, 115, 344, 345, 170, 346, + 261, 120, 347, 122, 232, 272, 125, 348, + 349, 350, 351, 352, 353, 354, 355, 356, + 357, 134, 358, 19, 17, 18, 19, 137, + 138, 139, 140, 17, 17, 3, 14, 14, + 3, 14, 14, 14, 14, 14, 14, 3, + 3, 3, 14, 3, 14, 14, 14, 14, + 3, 14, 14, 14, 3, 14, 14, 3, + 14, 14, 14, 3, 3, 14, 14, 14, + 3, 3, 14, 14, 3, 14, 3, 14, + 3, 14, 14, 14, 3, 3, 14, 14, + 3, 14, 14, 3, 14, 14, 14, 3, + 359, 143, 145, 146, 147, 148, 149, 17, + 360, 151, 361, 153, 362, 3, 14, 14, + 3, 3, 3, 3, 14, 3, 3, 14, + 14, 14, 14, 14, 3, 363, 112, 364, + 157, 158, 17, 159, 160, 19, 17, 3, + 14, 14, 14, 14, 3, 3, 3, 14, + 19, 162, 163, 19, 365, 366, 222, 311, + 166, 167, 168, 367, 170, 368, 369, 370, + 371, 372, 373, 374, 375, 376, 377, 178, + 179, 18, 378, 19, 17, 3, 3, 3, + 3, 14, 14, 14, 3, 3, 3, 3, + 3, 14, 14, 3, 14, 14, 14, 3, + 14, 14, 3, 3, 3, 14, 14, 3, + 14, 14, 14, 14, 3, 14, 3, 14, + 14, 14, 14, 14, 3, 3, 3, 3, + 3, 14, 14, 14, 14, 14, 14, 3, + 14, 3, 19, 183, 184, 379, 186, 60, + 187, 188, 58, 189, 190, 380, 17, 193, + 381, 195, 196, 197, 17, 3, 14, 14, + 14, 14, 14, 14, 14, 3, 14, 14, + 3, 14, 3, 382, 383, 200, 201, 202, + 384, 204, 205, 385, 386, 387, 204, 209, + 210, 211, 212, 41, 3, 213, 214, 19, + 215, 216, 218, 388, 220, 389, 222, 223, + 19, 17, 390, 225, 226, 227, 19, 228, + 229, 230, 231, 232, 233, 234, 235, 391, + 237, 238, 392, 240, 241, 242, 19, 147, + 17, 243, 3, 3, 14, 3, 3, 14, + 3, 14, 14, 14, 14, 14, 3, 14, + 14, 3, 393, 394, 395, 396, 397, 398, + 399, 400, 250, 401, 322, 402, 216, 403, + 404, 405, 406, 407, 404, 408, 409, 410, + 261, 411, 263, 412, 413, 274, 3, 14, + 3, 14, 3, 14, 3, 14, 3, 14, + 14, 3, 14, 3, 14, 14, 14, 3, + 14, 14, 3, 3, 14, 14, 14, 3, + 14, 3, 14, 3, 14, 14, 3, 14, + 3, 14, 3, 14, 3, 14, 3, 14, + 3, 3, 3, 14, 14, 14, 3, 14, + 14, 3, 19, 270, 232, 414, 404, 415, + 274, 19, 416, 417, 277, 17, 3, 14, + 3, 14, 14, 14, 3, 3, 3, 14, + 14, 3, 280, 19, 281, 418, 3, 14, + 14, 3, 19, 286, 287, 288, 289, 290, + 291, 292, 293, 294, 295, 419, 17, 3, + 3, 3, 14, 19, 420, 19, 268, 303, + 304, 305, 17, 3, 3, 14, 422, 422, + 422, 422, 421, 422, 422, 422, 421, 422, + 421, 422, 422, 421, 421, 421, 421, 421, + 421, 422, 421, 421, 421, 421, 422, 422, + 422, 422, 422, 421, 421, 422, 421, 421, + 422, 421, 422, 421, 421, 422, 421, 421, + 421, 422, 422, 422, 422, 422, 422, 421, + 422, 422, 421, 422, 422, 421, 421, 421, + 421, 421, 421, 422, 422, 421, 421, 422, + 421, 422, 422, 422, 421, 423, 424, 425, + 426, 427, 428, 429, 430, 431, 432, 433, + 434, 435, 436, 437, 438, 439, 440, 441, + 442, 443, 444, 445, 446, 447, 448, 449, + 450, 451, 452, 453, 454, 421, 422, 421, + 422, 421, 422, 422, 421, 422, 422, 421, + 421, 421, 422, 421, 421, 421, 421, 421, + 421, 421, 422, 421, 421, 421, 421, 421, + 421, 421, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 421, 421, 421, + 421, 421, 421, 421, 421, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 421, 421, + 421, 421, 421, 421, 421, 421, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 421, + 422, 422, 422, 422, 422, 422, 422, 422, + 421, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 421, 422, 422, 422, + 422, 422, 422, 421, 422, 422, 422, 422, + 422, 422, 421, 421, 421, 421, 421, 421, + 421, 421, 422, 422, 422, 422, 422, 422, + 422, 422, 421, 422, 422, 422, 422, 422, + 422, 422, 422, 421, 422, 422, 422, 422, + 422, 421, 421, 421, 421, 421, 421, 421, + 421, 422, 422, 422, 422, 422, 422, 421, + 422, 422, 422, 422, 422, 422, 422, 421, + 422, 421, 422, 422, 421, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 421, 422, 422, 422, 422, 422, + 421, 422, 422, 422, 422, 422, 422, 422, + 421, 422, 422, 422, 421, 422, 422, 422, + 421, 422, 421, 455, 456, 457, 458, 459, + 460, 461, 462, 463, 464, 465, 466, 467, + 468, 469, 470, 471, 472, 473, 474, 475, + 476, 477, 478, 479, 480, 481, 482, 483, + 484, 485, 486, 487, 488, 489, 490, 427, + 491, 492, 493, 494, 495, 496, 427, 472, + 427, 421, 422, 421, 422, 422, 421, 421, + 422, 421, 421, 421, 421, 422, 421, 421, + 421, 421, 421, 422, 421, 421, 421, 421, + 421, 422, 422, 422, 422, 422, 421, 421, + 421, 422, 421, 421, 421, 422, 422, 422, + 421, 421, 421, 422, 422, 421, 421, 421, + 422, 422, 422, 421, 421, 421, 422, 422, + 422, 422, 421, 422, 422, 422, 422, 421, + 421, 421, 421, 421, 422, 422, 422, 422, + 421, 421, 422, 422, 422, 421, 421, 422, + 422, 422, 422, 421, 422, 422, 421, 422, + 422, 421, 421, 421, 422, 422, 422, 421, + 421, 421, 421, 422, 422, 422, 422, 422, + 421, 421, 421, 421, 422, 421, 422, 422, + 421, 422, 422, 421, 422, 421, 422, 422, + 422, 421, 422, 422, 421, 421, 421, 422, + 421, 421, 421, 421, 421, 421, 421, 422, + 422, 422, 422, 421, 422, 422, 422, 422, + 422, 422, 422, 421, 497, 498, 499, 500, + 501, 502, 503, 504, 505, 427, 506, 507, + 508, 509, 510, 421, 422, 421, 421, 421, + 421, 421, 422, 422, 421, 422, 422, 422, + 421, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 421, 422, 422, 422, 421, + 421, 422, 422, 422, 421, 421, 422, 421, + 421, 422, 422, 422, 422, 422, 421, 421, + 421, 421, 422, 422, 422, 422, 422, 422, + 421, 422, 422, 422, 422, 422, 421, 511, + 466, 512, 513, 514, 427, 515, 516, 472, + 427, 421, 422, 422, 422, 422, 421, 421, + 421, 422, 421, 421, 422, 422, 422, 421, + 421, 421, 422, 422, 421, 477, 421, 472, + 427, 427, 517, 421, 427, 421, 422, 472, + 518, 519, 472, 520, 521, 472, 522, 523, + 524, 525, 526, 527, 472, 528, 529, 530, + 472, 531, 532, 533, 491, 534, 535, 536, + 491, 537, 472, 427, 421, 421, 422, 422, + 421, 421, 421, 422, 422, 422, 422, 421, + 422, 422, 421, 421, 421, 421, 422, 422, + 421, 421, 422, 422, 421, 421, 421, 421, + 421, 421, 422, 422, 422, 421, 421, 421, + 422, 421, 421, 421, 422, 422, 421, 422, + 422, 422, 422, 421, 422, 422, 422, 422, + 421, 422, 422, 422, 422, 422, 422, 421, + 421, 421, 422, 422, 422, 422, 421, 538, + 539, 421, 427, 421, 422, 421, 421, 422, + 472, 540, 541, 542, 543, 522, 544, 545, + 546, 547, 548, 549, 550, 551, 552, 553, + 554, 555, 427, 421, 421, 422, 421, 422, + 422, 422, 422, 422, 422, 422, 421, 422, + 422, 422, 421, 422, 421, 421, 422, 421, + 422, 421, 421, 422, 422, 422, 422, 421, + 422, 422, 422, 421, 421, 422, 422, 422, + 422, 421, 422, 422, 421, 421, 422, 422, + 422, 422, 422, 421, 556, 557, 558, 559, + 560, 561, 562, 563, 564, 565, 566, 562, + 568, 569, 570, 571, 567, 421, 572, 573, + 472, 574, 575, 576, 577, 578, 579, 580, + 581, 582, 472, 427, 583, 584, 585, 586, + 472, 587, 588, 589, 590, 591, 592, 593, + 594, 595, 596, 597, 598, 599, 600, 601, + 472, 503, 427, 602, 421, 422, 422, 422, + 422, 422, 421, 421, 421, 422, 421, 422, + 422, 421, 422, 421, 422, 422, 421, 421, + 421, 422, 422, 422, 421, 421, 421, 422, + 422, 422, 421, 421, 421, 421, 422, 421, + 421, 422, 421, 421, 422, 422, 422, 421, + 421, 422, 421, 422, 422, 422, 421, 422, + 422, 422, 422, 422, 422, 421, 421, 421, + 422, 422, 421, 422, 422, 421, 422, 422, + 421, 422, 422, 421, 422, 422, 422, 422, + 422, 422, 422, 421, 422, 421, 422, 421, + 422, 422, 421, 422, 421, 422, 422, 421, + 422, 421, 422, 421, 603, 574, 604, 605, + 606, 607, 608, 609, 610, 611, 612, 455, + 613, 472, 614, 615, 616, 472, 617, 487, + 618, 619, 620, 621, 622, 623, 624, 625, + 472, 421, 421, 421, 422, 422, 422, 421, + 422, 422, 421, 422, 422, 421, 421, 421, + 421, 421, 422, 422, 422, 422, 421, 422, + 422, 422, 422, 422, 422, 421, 421, 421, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 421, 422, 422, 422, 422, 422, 422, + 422, 422, 421, 422, 422, 421, 421, 421, + 421, 422, 422, 422, 421, 421, 421, 422, + 421, 421, 421, 422, 422, 421, 422, 422, + 422, 421, 422, 421, 421, 421, 422, 422, + 421, 422, 422, 422, 421, 422, 422, 422, + 421, 421, 421, 421, 422, 472, 541, 626, + 627, 427, 472, 427, 421, 421, 422, 421, + 422, 472, 626, 427, 421, 472, 628, 427, + 421, 421, 422, 472, 629, 630, 631, 532, + 632, 633, 472, 634, 635, 636, 427, 421, + 421, 422, 422, 422, 421, 422, 422, 421, + 422, 422, 422, 422, 421, 421, 422, 421, + 421, 422, 422, 421, 422, 421, 472, 427, + 421, 637, 472, 638, 421, 427, 421, 422, + 421, 422, 639, 472, 640, 641, 421, 422, + 421, 421, 421, 422, 422, 422, 422, 421, + 642, 643, 644, 472, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 427, 421, 422, 422, 422, 421, + 421, 421, 421, 422, 422, 421, 421, 422, + 421, 421, 421, 421, 421, 421, 421, 422, + 421, 422, 421, 421, 421, 421, 421, 421, + 422, 422, 422, 422, 422, 421, 421, 422, + 421, 421, 421, 422, 421, 421, 422, 421, + 421, 422, 421, 421, 422, 421, 421, 421, + 422, 422, 422, 421, 421, 421, 422, 422, + 422, 422, 421, 659, 472, 660, 472, 661, + 662, 663, 664, 427, 421, 422, 422, 422, + 422, 422, 421, 421, 421, 422, 421, 421, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 421, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 421, 422, + 422, 422, 422, 422, 421, 665, 472, 427, + 421, 422, 666, 472, 457, 427, 421, 422, + 667, 421, 427, 421, 422, 472, 668, 427, + 421, 421, 422, 669, 421, 472, 670, 427, + 421, 421, 422, 672, 671, 422, 422, 422, + 422, 672, 671, 422, 672, 671, 672, 672, + 422, 672, 671, 422, 672, 422, 672, 671, + 422, 672, 422, 672, 422, 671, 672, 672, + 672, 672, 672, 672, 672, 672, 671, 422, + 422, 672, 672, 422, 672, 422, 672, 671, + 672, 672, 672, 672, 672, 422, 672, 422, + 672, 422, 672, 671, 672, 672, 422, 672, + 422, 672, 671, 672, 672, 672, 672, 672, + 422, 672, 422, 672, 671, 422, 422, 672, + 422, 672, 671, 672, 672, 672, 422, 672, + 422, 672, 422, 672, 422, 672, 671, 672, + 422, 672, 422, 672, 671, 422, 672, 672, + 672, 672, 422, 672, 422, 672, 422, 672, + 422, 672, 422, 672, 422, 672, 671, 422, + 672, 671, 672, 672, 672, 422, 672, 422, + 672, 671, 672, 422, 672, 422, 672, 671, + 422, 672, 672, 672, 672, 422, 672, 422, + 672, 671, 422, 672, 422, 672, 422, 672, + 671, 672, 672, 422, 672, 422, 672, 671, + 422, 672, 422, 672, 422, 672, 422, 671, + 672, 672, 672, 422, 672, 422, 672, 671, + 422, 672, 671, 672, 672, 422, 672, 671, + 672, 672, 672, 422, 672, 672, 672, 672, + 672, 672, 422, 422, 672, 422, 672, 422, + 672, 422, 672, 671, 672, 422, 672, 422, + 672, 671, 422, 672, 671, 672, 422, 672, + 671, 672, 422, 672, 671, 422, 422, 672, + 671, 422, 672, 422, 672, 422, 672, 422, + 672, 422, 672, 422, 671, 672, 672, 422, + 672, 672, 672, 672, 422, 422, 672, 672, + 672, 672, 672, 422, 672, 672, 672, 672, + 672, 671, 422, 672, 672, 422, 672, 422, + 671, 672, 672, 422, 672, 671, 422, 422, + 672, 422, 671, 672, 672, 671, 422, 672, + 422, 671, 672, 671, 422, 672, 422, 672, + 422, 671, 672, 672, 671, 422, 672, 422, + 672, 422, 672, 671, 672, 422, 672, 422, + 672, 671, 422, 672, 671, 422, 422, 672, + 671, 672, 422, 671, 672, 671, 422, 672, + 422, 672, 422, 671, 672, 671, 422, 422, + 672, 671, 672, 422, 672, 422, 672, 671, + 422, 672, 422, 671, 672, 671, 422, 422, + 672, 422, 671, 672, 671, 422, 422, 672, + 671, 672, 422, 672, 671, 672, 422, 672, + 671, 672, 422, 672, 422, 672, 422, 671, + 672, 671, 422, 422, 672, 671, 672, 422, + 672, 422, 672, 671, 422, 672, 671, 672, + 672, 422, 672, 422, 672, 671, 671, 422, + 671, 422, 672, 672, 422, 672, 672, 672, + 672, 672, 672, 672, 671, 422, 672, 672, + 672, 422, 671, 672, 672, 672, 422, 672, + 422, 672, 422, 672, 422, 672, 422, 672, + 671, 422, 422, 672, 671, 672, 422, 672, + 671, 422, 422, 672, 422, 422, 422, 672, + 422, 672, 422, 672, 422, 672, 422, 671, + 422, 672, 422, 672, 422, 671, 672, 671, + 422, 672, 422, 671, 672, 422, 672, 672, + 672, 671, 422, 672, 422, 422, 672, 422, + 671, 672, 672, 671, 422, 672, 672, 672, + 672, 422, 672, 422, 671, 672, 672, 672, + 422, 672, 671, 672, 422, 672, 422, 672, + 422, 672, 422, 672, 671, 672, 672, 422, + 672, 671, 422, 672, 422, 672, 422, 671, + 672, 672, 671, 422, 672, 422, 671, 672, + 671, 422, 672, 671, 422, 672, 422, 672, + 671, 672, 672, 672, 671, 422, 422, 422, + 672, 671, 422, 672, 422, 671, 672, 671, + 422, 672, 422, 672, 422, 671, 672, 672, + 672, 671, 422, 672, 422, 671, 672, 672, + 672, 672, 671, 422, 672, 422, 672, 671, + 422, 422, 672, 422, 672, 671, 672, 422, + 672, 422, 671, 672, 672, 671, 422, 672, + 422, 672, 671, 422, 672, 672, 672, 422, + 672, 422, 671, 422, 672, 671, 672, 422, + 422, 672, 422, 672, 422, 671, 672, 672, + 672, 672, 671, 422, 672, 422, 672, 422, + 672, 422, 672, 422, 672, 671, 672, 672, + 672, 422, 672, 422, 672, 422, 672, 422, + 671, 672, 672, 422, 422, 672, 671, 672, + 422, 672, 672, 671, 422, 672, 422, 672, + 671, 422, 422, 672, 672, 672, 672, 422, + 672, 422, 672, 422, 671, 672, 672, 422, + 671, 672, 671, 422, 672, 422, 671, 672, + 671, 422, 672, 422, 671, 672, 422, 672, + 672, 671, 422, 672, 672, 422, 671, 672, + 671, 422, 672, 422, 672, 671, 672, 422, + 672, 422, 671, 672, 671, 422, 672, 422, + 672, 422, 672, 422, 672, 422, 672, 671, + 673, 671, 674, 675, 676, 677, 678, 679, + 680, 681, 682, 683, 684, 676, 685, 686, + 687, 688, 689, 676, 690, 691, 692, 693, + 694, 695, 696, 697, 698, 699, 700, 701, + 702, 703, 704, 676, 705, 673, 685, 673, + 706, 673, 671, 672, 672, 672, 672, 422, + 671, 672, 672, 671, 422, 672, 671, 422, + 422, 672, 671, 422, 672, 422, 671, 672, + 671, 422, 422, 672, 422, 671, 672, 672, + 671, 422, 672, 672, 672, 671, 422, 672, + 422, 672, 672, 671, 422, 422, 672, 422, + 671, 672, 671, 422, 672, 671, 422, 422, + 672, 422, 672, 671, 422, 672, 422, 422, + 672, 422, 672, 422, 671, 672, 672, 671, + 422, 672, 672, 422, 672, 671, 422, 672, + 422, 672, 671, 422, 672, 422, 671, 422, + 672, 672, 672, 422, 672, 671, 672, 422, + 672, 671, 422, 672, 671, 672, 422, 672, + 671, 422, 672, 671, 422, 672, 422, 672, + 671, 422, 672, 671, 422, 672, 671, 707, + 708, 709, 710, 711, 712, 713, 714, 715, + 716, 717, 718, 678, 719, 720, 721, 722, + 723, 720, 724, 725, 726, 727, 728, 729, + 730, 731, 732, 673, 671, 672, 422, 672, + 671, 672, 422, 672, 671, 672, 422, 672, + 671, 672, 422, 672, 671, 422, 672, 422, + 672, 671, 672, 422, 672, 671, 672, 422, + 422, 422, 672, 671, 672, 422, 672, 671, + 672, 672, 672, 672, 422, 672, 422, 671, + 672, 671, 422, 422, 672, 422, 672, 671, + 672, 422, 672, 671, 422, 672, 671, 672, + 672, 422, 672, 671, 422, 672, 671, 672, + 422, 672, 671, 422, 672, 671, 422, 672, + 671, 422, 672, 671, 672, 671, 422, 422, + 672, 671, 672, 422, 672, 671, 422, 672, + 422, 671, 672, 671, 422, 676, 733, 673, + 676, 734, 676, 735, 685, 673, 671, 672, + 671, 422, 672, 671, 422, 676, 734, 685, + 673, 671, 676, 736, 673, 685, 673, 671, + 672, 671, 422, 676, 737, 694, 738, 720, + 739, 732, 676, 740, 741, 742, 673, 685, + 673, 671, 672, 671, 422, 672, 422, 672, + 671, 422, 672, 422, 672, 422, 671, 672, + 672, 671, 422, 672, 422, 672, 671, 422, + 672, 671, 676, 685, 427, 671, 743, 676, + 744, 685, 673, 671, 427, 672, 671, 422, + 672, 671, 422, 745, 676, 746, 747, 673, + 671, 422, 672, 671, 672, 672, 671, 422, + 422, 672, 422, 672, 671, 676, 748, 749, + 750, 751, 752, 753, 754, 755, 756, 757, + 758, 673, 685, 673, 671, 672, 422, 672, + 672, 672, 672, 672, 672, 672, 422, 672, + 422, 672, 672, 672, 672, 672, 672, 671, + 422, 672, 672, 422, 672, 422, 671, 672, + 422, 672, 672, 672, 422, 672, 672, 422, + 672, 672, 422, 672, 672, 422, 672, 672, + 671, 422, 676, 759, 676, 735, 760, 761, + 762, 673, 685, 673, 671, 672, 671, 422, + 672, 672, 672, 422, 672, 672, 672, 422, + 672, 422, 672, 671, 422, 422, 422, 422, + 672, 672, 422, 422, 422, 422, 422, 672, + 672, 672, 672, 672, 672, 672, 422, 672, + 422, 672, 422, 671, 672, 672, 672, 422, + 672, 422, 672, 671, 685, 427, 763, 676, + 685, 427, 672, 671, 422, 764, 676, 765, + 685, 427, 672, 671, 422, 672, 422, 766, + 685, 673, 671, 427, 672, 671, 422, 676, + 767, 673, 685, 673, 671, 672, 671, 422, + 768, 769, 768, 770, 771, 768, 772, 768, + 773, 768, 771, 774, 775, 774, 777, 776, + 778, 779, 778, 780, 781, 776, 782, 776, + 783, 778, 784, 779, 785, 780, 787, 786, + 788, 789, 789, 786, 790, 786, 791, 788, + 792, 789, 793, 789, 795, 795, 795, 795, + 794, 795, 795, 795, 794, 795, 794, 795, + 795, 794, 794, 794, 794, 794, 794, 795, + 794, 794, 794, 794, 795, 795, 795, 795, + 795, 794, 794, 795, 794, 794, 795, 794, + 795, 794, 794, 795, 794, 794, 794, 795, + 795, 795, 795, 795, 795, 794, 795, 795, + 794, 795, 795, 794, 794, 794, 794, 794, + 794, 795, 795, 794, 794, 795, 794, 795, + 795, 795, 794, 797, 798, 799, 800, 801, + 802, 803, 804, 805, 806, 807, 808, 809, + 810, 811, 812, 813, 814, 815, 816, 817, + 818, 819, 820, 821, 822, 823, 824, 825, + 826, 827, 828, 794, 795, 794, 795, 794, + 795, 795, 794, 795, 795, 794, 794, 794, + 795, 794, 794, 794, 794, 794, 794, 794, + 795, 794, 794, 794, 794, 794, 794, 794, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 794, 794, 794, 794, 794, + 794, 794, 794, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 794, 794, 794, 794, + 794, 794, 794, 794, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 794, 795, 795, + 795, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 794, 795, 795, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 795, 795, + 794, 794, 794, 794, 794, 794, 794, 794, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 795, 794, + 794, 794, 794, 794, 794, 794, 794, 795, + 795, 795, 795, 795, 795, 794, 795, 795, + 795, 795, 795, 795, 795, 794, 795, 794, + 795, 795, 794, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 794, 795, 795, 795, 794, 795, + 794, 829, 830, 831, 832, 833, 834, 835, + 836, 837, 838, 839, 840, 841, 842, 843, + 844, 845, 846, 847, 848, 849, 850, 851, + 852, 853, 854, 855, 856, 857, 858, 859, + 860, 861, 862, 863, 864, 801, 865, 866, + 867, 868, 869, 870, 801, 846, 801, 794, + 795, 794, 795, 795, 794, 794, 795, 794, + 794, 794, 794, 795, 794, 794, 794, 794, + 794, 795, 794, 794, 794, 794, 794, 795, + 795, 795, 795, 795, 794, 794, 794, 795, + 794, 794, 794, 795, 795, 795, 794, 794, + 794, 795, 795, 794, 794, 794, 795, 795, + 795, 794, 794, 794, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 794, 794, 794, + 794, 794, 795, 795, 795, 795, 794, 794, + 795, 795, 795, 794, 794, 795, 795, 795, + 795, 794, 795, 795, 794, 795, 795, 794, + 794, 794, 795, 795, 795, 794, 794, 794, + 794, 795, 795, 795, 795, 795, 794, 794, + 794, 794, 795, 794, 795, 795, 794, 795, + 795, 794, 795, 794, 795, 795, 795, 794, + 795, 795, 794, 794, 794, 795, 794, 794, + 794, 794, 794, 794, 794, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 795, 795, + 795, 794, 871, 872, 873, 874, 875, 876, + 877, 878, 879, 801, 880, 881, 882, 883, + 884, 794, 795, 794, 794, 794, 794, 794, + 795, 795, 794, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 794, 795, 795, 795, 794, 794, 795, + 795, 795, 794, 794, 795, 794, 794, 795, + 795, 795, 795, 795, 794, 794, 794, 794, + 795, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 794, 885, 840, 886, + 887, 888, 801, 889, 890, 846, 801, 794, + 795, 795, 795, 795, 794, 794, 794, 795, + 794, 794, 795, 795, 795, 794, 794, 794, + 795, 795, 794, 851, 794, 846, 801, 801, + 891, 794, 801, 794, 795, 846, 892, 893, + 846, 894, 895, 846, 896, 897, 898, 899, + 900, 901, 846, 902, 903, 904, 846, 905, + 906, 907, 865, 908, 909, 910, 865, 911, + 846, 801, 794, 794, 795, 795, 794, 794, + 794, 795, 795, 795, 795, 794, 795, 795, + 794, 794, 794, 794, 795, 795, 794, 794, + 795, 795, 794, 794, 794, 794, 794, 794, + 795, 795, 795, 794, 794, 794, 795, 794, + 794, 794, 795, 795, 794, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 794, 794, 794, + 795, 795, 795, 795, 794, 912, 913, 794, + 801, 794, 795, 794, 794, 795, 846, 914, + 915, 916, 917, 896, 918, 919, 920, 921, + 922, 923, 924, 925, 926, 927, 928, 929, + 801, 794, 794, 795, 794, 795, 795, 795, + 795, 795, 795, 795, 794, 795, 795, 795, + 794, 795, 794, 794, 795, 794, 795, 794, + 794, 795, 795, 795, 795, 794, 795, 795, + 795, 794, 794, 795, 795, 795, 795, 794, + 795, 795, 794, 794, 795, 795, 795, 795, + 795, 794, 930, 931, 932, 933, 934, 935, + 936, 937, 938, 939, 940, 936, 942, 943, + 944, 945, 941, 794, 946, 947, 846, 948, + 949, 950, 951, 952, 953, 954, 955, 956, + 846, 801, 957, 958, 959, 960, 846, 961, + 962, 963, 964, 965, 966, 967, 968, 969, + 970, 971, 972, 973, 974, 975, 846, 877, + 801, 976, 794, 795, 795, 795, 795, 795, + 794, 794, 794, 795, 794, 795, 795, 794, + 795, 794, 795, 795, 794, 794, 794, 795, + 795, 795, 794, 794, 794, 795, 795, 795, + 794, 794, 794, 794, 795, 794, 794, 795, + 794, 794, 795, 795, 795, 794, 794, 795, + 794, 795, 795, 795, 794, 795, 795, 795, + 795, 795, 795, 794, 794, 794, 795, 795, + 794, 795, 795, 794, 795, 795, 794, 795, + 795, 794, 795, 795, 795, 795, 795, 795, + 795, 794, 795, 794, 795, 794, 795, 795, + 794, 795, 794, 795, 795, 794, 795, 794, + 795, 794, 977, 948, 978, 979, 980, 981, + 982, 983, 984, 985, 986, 829, 987, 846, + 988, 989, 990, 846, 991, 861, 992, 993, + 994, 995, 996, 997, 998, 999, 846, 794, + 794, 794, 795, 795, 795, 794, 795, 795, + 794, 795, 795, 794, 794, 794, 794, 794, + 795, 795, 795, 795, 794, 795, 795, 795, + 795, 795, 795, 794, 794, 794, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 794, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 794, 794, 794, 794, 795, + 795, 795, 794, 794, 794, 795, 794, 794, + 794, 795, 795, 794, 795, 795, 795, 794, + 795, 794, 794, 794, 795, 795, 794, 795, + 795, 795, 794, 795, 795, 795, 794, 794, + 794, 794, 795, 846, 915, 1000, 1001, 801, + 846, 801, 794, 794, 795, 794, 795, 846, + 1000, 801, 794, 846, 1002, 801, 794, 794, + 795, 846, 1003, 1004, 1005, 906, 1006, 1007, + 846, 1008, 1009, 1010, 801, 794, 794, 795, + 795, 795, 794, 795, 795, 794, 795, 795, + 795, 795, 794, 794, 795, 794, 794, 795, + 795, 794, 795, 794, 846, 801, 794, 1011, + 846, 1012, 794, 801, 794, 795, 794, 795, + 1013, 846, 1014, 1015, 794, 795, 794, 794, + 794, 795, 795, 795, 795, 794, 1016, 1017, + 1018, 846, 1019, 1020, 1021, 1022, 1023, 1024, + 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, + 801, 794, 795, 795, 795, 794, 794, 794, + 794, 795, 795, 794, 794, 795, 794, 794, + 794, 794, 794, 794, 794, 795, 794, 795, + 794, 794, 794, 794, 794, 794, 795, 795, + 795, 795, 795, 794, 794, 795, 794, 794, + 794, 795, 794, 794, 795, 794, 794, 795, + 794, 794, 795, 794, 794, 794, 795, 795, + 795, 794, 794, 794, 795, 795, 795, 795, + 794, 1033, 846, 1034, 846, 1035, 1036, 1037, + 1038, 801, 794, 795, 795, 795, 795, 795, + 794, 794, 794, 795, 794, 794, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 794, 795, 795, 795, + 795, 795, 794, 1039, 846, 801, 794, 795, + 1040, 846, 831, 801, 794, 795, 1041, 794, + 801, 794, 795, 846, 1042, 801, 794, 794, + 795, 1043, 794, 846, 1044, 801, 794, 794, + 795, 1046, 1045, 795, 795, 795, 795, 1046, + 1045, 795, 1046, 1045, 1046, 1046, 795, 1046, + 1045, 795, 1046, 795, 1046, 1045, 795, 1046, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1045, 795, 795, 1046, + 1046, 795, 1046, 795, 1046, 1045, 1046, 1046, + 1046, 1046, 1046, 795, 1046, 795, 1046, 795, + 1046, 1045, 1046, 1046, 795, 1046, 795, 1046, + 1045, 1046, 1046, 1046, 1046, 1046, 795, 1046, + 795, 1046, 1045, 795, 795, 1046, 795, 1046, + 1045, 1046, 1046, 1046, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 1045, 1046, 795, 1046, + 795, 1046, 1045, 795, 1046, 1046, 1046, 1046, + 795, 1046, 795, 1046, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 1045, 795, 1046, 1045, + 1046, 1046, 1046, 795, 1046, 795, 1046, 1045, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 1046, 1046, 1046, 795, 1046, 795, 1046, 1045, + 795, 1046, 795, 1046, 795, 1046, 1045, 1046, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 795, 1046, 795, 1046, 795, 1045, 1046, 1046, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 1045, 1046, 1046, 795, 1046, 1045, 1046, 1046, + 1046, 795, 1046, 1046, 1046, 1046, 1046, 1046, + 795, 795, 1046, 795, 1046, 795, 1046, 795, + 1046, 1045, 1046, 795, 1046, 795, 1046, 1045, + 795, 1046, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 1045, 795, 795, 1046, 1045, 795, + 1046, 795, 1046, 795, 1046, 795, 1046, 795, + 1046, 795, 1045, 1046, 1046, 795, 1046, 1046, + 1046, 1046, 795, 795, 1046, 1046, 1046, 1046, + 1046, 795, 1046, 1046, 1046, 1046, 1046, 1045, + 795, 1046, 1046, 795, 1046, 795, 1045, 1046, + 1046, 795, 1046, 1045, 795, 795, 1046, 795, + 1045, 1046, 1046, 1045, 795, 1046, 795, 1045, + 1046, 1045, 795, 1046, 795, 1046, 795, 1045, + 1046, 1046, 1045, 795, 1046, 795, 1046, 795, + 1046, 1045, 1046, 795, 1046, 795, 1046, 1045, + 795, 1046, 1045, 795, 795, 1046, 1045, 1046, + 795, 1045, 1046, 1045, 795, 1046, 795, 1046, + 795, 1045, 1046, 1045, 795, 795, 1046, 1045, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 795, 1045, 1046, 1045, 795, 795, 1046, 795, + 1045, 1046, 1045, 795, 795, 1046, 1045, 1046, + 795, 1046, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 795, 1046, 795, 1045, 1046, 1045, + 795, 795, 1046, 1045, 1046, 795, 1046, 795, + 1046, 1045, 795, 1046, 1045, 1046, 1046, 795, + 1046, 795, 1046, 1045, 1045, 795, 1045, 795, + 1046, 1046, 795, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1045, 795, 1046, 1046, 1046, 795, + 1045, 1046, 1046, 1046, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 795, 1046, 1045, 795, + 795, 1046, 1045, 1046, 795, 1046, 1045, 795, + 795, 1046, 795, 795, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 795, 1045, 795, 1046, + 795, 1046, 795, 1045, 1046, 1045, 795, 1046, + 795, 1045, 1046, 795, 1046, 1046, 1046, 1045, + 795, 1046, 795, 795, 1046, 795, 1045, 1046, + 1046, 1045, 795, 1046, 1046, 1046, 1046, 795, + 1046, 795, 1045, 1046, 1046, 1046, 795, 1046, + 1045, 1046, 795, 1046, 795, 1046, 795, 1046, + 795, 1046, 1045, 1046, 1046, 795, 1046, 1045, + 795, 1046, 795, 1046, 795, 1045, 1046, 1046, + 1045, 795, 1046, 795, 1045, 1046, 1045, 795, + 1046, 1045, 795, 1046, 795, 1046, 1045, 1046, + 1046, 1046, 1045, 795, 795, 795, 1046, 1045, + 795, 1046, 795, 1045, 1046, 1045, 795, 1046, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1045, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1046, + 1045, 795, 1046, 795, 1046, 1045, 795, 795, + 1046, 795, 1046, 1045, 1046, 795, 1046, 795, + 1045, 1046, 1046, 1045, 795, 1046, 795, 1046, + 1045, 795, 1046, 1046, 1046, 795, 1046, 795, + 1045, 795, 1046, 1045, 1046, 795, 795, 1046, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1046, + 1045, 795, 1046, 795, 1046, 795, 1046, 795, + 1046, 795, 1046, 1045, 1046, 1046, 1046, 795, + 1046, 795, 1046, 795, 1046, 795, 1045, 1046, + 1046, 795, 795, 1046, 1045, 1046, 795, 1046, + 1046, 1045, 795, 1046, 795, 1046, 1045, 795, + 795, 1046, 1046, 1046, 1046, 795, 1046, 795, + 1046, 795, 1045, 1046, 1046, 795, 1045, 1046, + 1045, 795, 1046, 795, 1045, 1046, 1045, 795, + 1046, 795, 1045, 1046, 795, 1046, 1046, 1045, + 795, 1046, 1046, 795, 1045, 1046, 1045, 795, + 1046, 795, 1046, 1045, 1046, 795, 1046, 795, + 1045, 1046, 1045, 795, 1046, 795, 1046, 795, + 1046, 795, 1046, 795, 1046, 1045, 1047, 1045, + 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, + 1056, 1057, 1058, 1050, 1059, 1060, 1061, 1062, + 1063, 1050, 1064, 1065, 1066, 1067, 1068, 1069, + 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1050, 1079, 1047, 1059, 1047, 1080, 1047, + 1045, 1046, 1046, 1046, 1046, 795, 1045, 1046, + 1046, 1045, 795, 1046, 1045, 795, 795, 1046, + 1045, 795, 1046, 795, 1045, 1046, 1045, 795, + 795, 1046, 795, 1045, 1046, 1046, 1045, 795, + 1046, 1046, 1046, 1045, 795, 1046, 795, 1046, + 1046, 1045, 795, 795, 1046, 795, 1045, 1046, + 1045, 795, 1046, 1045, 795, 795, 1046, 795, + 1046, 1045, 795, 1046, 795, 795, 1046, 795, + 1046, 795, 1045, 1046, 1046, 1045, 795, 1046, + 1046, 795, 1046, 1045, 795, 1046, 795, 1046, + 1045, 795, 1046, 795, 1045, 795, 1046, 1046, + 1046, 795, 1046, 1045, 1046, 795, 1046, 1045, + 795, 1046, 1045, 1046, 795, 1046, 1045, 795, + 1046, 1045, 795, 1046, 795, 1046, 1045, 795, + 1046, 1045, 795, 1046, 1045, 1081, 1082, 1083, + 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, + 1092, 1052, 1093, 1094, 1095, 1096, 1097, 1094, + 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, + 1106, 1047, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 1045, 795, 1046, 795, 1046, 1045, + 1046, 795, 1046, 1045, 1046, 795, 795, 795, + 1046, 1045, 1046, 795, 1046, 1045, 1046, 1046, + 1046, 1046, 795, 1046, 795, 1045, 1046, 1045, + 795, 795, 1046, 795, 1046, 1045, 1046, 795, + 1046, 1045, 795, 1046, 1045, 1046, 1046, 795, + 1046, 1045, 795, 1046, 1045, 1046, 795, 1046, + 1045, 795, 1046, 1045, 795, 1046, 1045, 795, + 1046, 1045, 1046, 1045, 795, 795, 1046, 1045, + 1046, 795, 1046, 1045, 795, 1046, 795, 1045, + 1046, 1045, 795, 1050, 1107, 1047, 1050, 1108, + 1050, 1109, 1059, 1047, 1045, 1046, 1045, 795, + 1046, 1045, 795, 1050, 1108, 1059, 1047, 1045, + 1050, 1110, 1047, 1059, 1047, 1045, 1046, 1045, + 795, 1050, 1111, 1068, 1112, 1094, 1113, 1106, + 1050, 1114, 1115, 1116, 1047, 1059, 1047, 1045, + 1046, 1045, 795, 1046, 795, 1046, 1045, 795, + 1046, 795, 1046, 795, 1045, 1046, 1046, 1045, + 795, 1046, 795, 1046, 1045, 795, 1046, 1045, + 1050, 1059, 801, 1045, 1117, 1050, 1118, 1059, + 1047, 1045, 801, 1046, 1045, 795, 1046, 1045, + 795, 1119, 1050, 1120, 1121, 1047, 1045, 795, + 1046, 1045, 1046, 1046, 1045, 795, 795, 1046, + 795, 1046, 1045, 1050, 1122, 1123, 1124, 1125, + 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1047, + 1059, 1047, 1045, 1046, 795, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 795, 1046, 795, 1046, + 1046, 1046, 1046, 1046, 1046, 1045, 795, 1046, + 1046, 795, 1046, 795, 1045, 1046, 795, 1046, + 1046, 1046, 795, 1046, 1046, 795, 1046, 1046, + 795, 1046, 1046, 795, 1046, 1046, 1045, 795, + 1050, 1133, 1050, 1109, 1134, 1135, 1136, 1047, + 1059, 1047, 1045, 1046, 1045, 795, 1046, 1046, + 1046, 795, 1046, 1046, 1046, 795, 1046, 795, + 1046, 1045, 795, 795, 795, 795, 1046, 1046, + 795, 795, 795, 795, 795, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 795, 1046, 795, 1046, + 795, 1045, 1046, 1046, 1046, 795, 1046, 795, + 1046, 1045, 1059, 801, 1137, 1050, 1059, 801, + 1046, 1045, 795, 1138, 1050, 1139, 1059, 801, + 1046, 1045, 795, 1046, 795, 1140, 1059, 1047, + 1045, 801, 1046, 1045, 795, 1050, 1141, 1047, + 1059, 1047, 1045, 1046, 1045, 795, 1142, 1143, + 1144, 1142, 1145, 1146, 1147, 1148, 1149, 1150, + 1151, 1152, 1153, 1154, 672, 672, 422, 1155, + 1156, 1157, 1158, 672, 1161, 1162, 1164, 1165, + 1166, 1160, 1167, 1168, 1169, 1170, 1171, 1172, + 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, + 1181, 1182, 1183, 1184, 1185, 1186, 1188, 1189, + 1190, 1191, 1192, 1193, 672, 1148, 10, 1148, + 422, 1148, 422, 1160, 1163, 1187, 1194, 1159, + 1142, 1142, 1195, 1143, 1196, 1198, 1197, 2, + 1, 1199, 1197, 1200, 1197, 5, 1, 1197, + 6, 5, 9, 11, 11, 10, 1202, 1203, + 1204, 1197, 1205, 1206, 1197, 1207, 1197, 422, + 422, 1209, 1210, 491, 472, 1211, 472, 1212, + 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, + 1221, 1222, 546, 1223, 522, 1224, 1225, 1226, + 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, + 1235, 422, 422, 422, 427, 567, 1208, 1236, + 1197, 1237, 1197, 672, 1238, 422, 422, 422, + 672, 1238, 672, 672, 422, 1238, 422, 1238, + 422, 1238, 422, 672, 672, 672, 672, 672, + 1238, 422, 672, 672, 672, 422, 672, 422, + 1238, 422, 672, 672, 672, 672, 422, 1238, + 672, 422, 672, 422, 672, 422, 672, 672, + 422, 672, 1238, 422, 672, 422, 672, 422, + 672, 1238, 672, 422, 1238, 672, 422, 672, + 422, 1238, 672, 672, 672, 672, 672, 1238, + 422, 422, 672, 422, 672, 1238, 672, 422, + 1238, 672, 672, 1238, 422, 422, 672, 422, + 672, 422, 672, 1238, 1239, 1240, 1241, 1242, + 1243, 1244, 1245, 1246, 1247, 1248, 1249, 717, + 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, + 1258, 1259, 1260, 1261, 1260, 1262, 1263, 1264, + 1265, 1266, 673, 1238, 1267, 1268, 1269, 1270, + 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, + 1279, 1280, 1281, 1282, 1283, 1284, 1285, 727, + 1286, 1287, 1288, 694, 1289, 1290, 1291, 1292, + 1293, 1294, 673, 1295, 1296, 1297, 1298, 1299, + 1300, 1301, 1302, 676, 1303, 673, 676, 1304, + 1305, 1306, 1307, 685, 1238, 1308, 1309, 1310, + 1311, 705, 1312, 1313, 685, 1314, 1315, 1316, + 1317, 1318, 673, 1238, 1319, 1278, 1320, 1321, + 1322, 685, 1323, 1324, 676, 673, 685, 427, + 1238, 1288, 673, 676, 685, 427, 685, 427, + 1325, 685, 1238, 427, 676, 1326, 1327, 676, + 1328, 1329, 683, 1330, 1331, 1332, 1333, 1334, + 1284, 1335, 1336, 1337, 1338, 1339, 1340, 1341, + 1342, 1343, 1344, 1345, 1346, 1303, 1347, 676, + 685, 427, 1238, 1348, 1349, 685, 673, 1238, + 427, 673, 1238, 676, 1350, 733, 1351, 1352, + 1353, 1354, 1355, 1356, 1357, 1358, 673, 1359, + 1360, 1361, 1362, 1363, 1364, 673, 685, 1238, + 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, + 1374, 1375, 1376, 1372, 1378, 1379, 1380, 1381, + 1365, 1377, 1365, 1238, 1365, 1238, 1382, 1382, + 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, + 1387, 771, 1391, 1391, 1391, 1392, 1393, 1386, + 1391, 772, 773, 1394, 1391, 771, 1395, 1395, + 1395, 1397, 1398, 1399, 1395, 1400, 1401, 1402, + 1395, 1396, 1403, 1403, 1403, 1405, 1406, 1407, + 1403, 1408, 1409, 1410, 1403, 1404, 1391, 1391, + 1411, 1412, 1386, 1391, 772, 773, 1394, 1391, + 771, 1413, 1414, 1415, 771, 1416, 1417, 1418, + 769, 769, 769, 769, 1420, 1421, 1422, 1396, + 769, 1423, 1424, 1425, 769, 1419, 770, 770, + 770, 1427, 1428, 1429, 1396, 770, 1430, 1431, + 1432, 770, 1426, 769, 769, 769, 1434, 1435, + 1436, 1404, 769, 1437, 1438, 1439, 769, 1433, + 1395, 1395, 771, 1440, 1441, 1399, 1395, 1400, + 1401, 1402, 1395, 1396, 1442, 1443, 1444, 771, + 1445, 1446, 1447, 770, 770, 770, 770, 1449, + 1450, 1451, 1404, 770, 1452, 1453, 1454, 770, + 1448, 1403, 1403, 771, 1455, 1456, 1407, 1403, + 1408, 1409, 1410, 1403, 1404, 1403, 1403, 1403, + 1405, 1406, 1407, 771, 1408, 1409, 1410, 1403, + 1404, 1403, 1403, 1403, 1405, 1406, 1407, 772, + 1408, 1409, 1410, 1403, 1404, 1403, 1403, 1403, + 1405, 1406, 1407, 773, 1408, 1409, 1410, 1403, + 1404, 1395, 1395, 1395, 1397, 1398, 1399, 771, + 1400, 1401, 1402, 1395, 1396, 1395, 1395, 1395, + 1397, 1398, 1399, 772, 1400, 1401, 1402, 1395, + 1396, 1395, 1395, 1395, 1397, 1398, 1399, 773, + 1400, 1401, 1402, 1395, 1396, 1458, 769, 1460, + 1459, 1461, 770, 1463, 1462, 771, 1464, 775, + 1464, 1465, 1464, 777, 1466, 1467, 1468, 1469, + 1470, 1471, 1472, 1469, 781, 777, 1466, 1474, + 1475, 1473, 782, 783, 1476, 1473, 781, 1479, + 1480, 1481, 1482, 1477, 1483, 1484, 1485, 1477, + 1478, 1488, 1489, 1490, 1491, 1486, 1492, 1493, + 1494, 1486, 1487, 1496, 1495, 1498, 1497, 781, + 1499, 782, 1499, 783, 1499, 787, 1500, 1501, + 1502, 1503, 1504, 1505, 1506, 1503, 789, 787, + 1500, 1508, 1507, 790, 791, 1509, 1507, 789, + 1511, 1510, 1513, 1512, 789, 1514, 790, 1514, + 791, 1514, 795, 1517, 1518, 1520, 1521, 1522, + 1516, 1523, 1524, 1525, 1526, 1527, 1528, 1529, + 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537, + 1538, 1539, 1540, 1541, 1542, 1544, 1545, 1546, + 1547, 1548, 1549, 795, 795, 1515, 1516, 1519, + 1543, 1550, 1515, 1046, 795, 795, 1552, 1553, + 865, 846, 1554, 846, 1555, 1556, 1557, 1558, + 1559, 1560, 1561, 1562, 1563, 1564, 1565, 920, + 1566, 896, 1567, 1568, 1569, 1570, 1571, 1572, + 1573, 1574, 1575, 1576, 1577, 1578, 795, 795, + 795, 801, 941, 1551, 1046, 1579, 795, 795, + 795, 1046, 1579, 1046, 1046, 795, 1579, 795, + 1579, 795, 1579, 795, 1046, 1046, 1046, 1046, + 1046, 1579, 795, 1046, 1046, 1046, 795, 1046, + 795, 1579, 795, 1046, 1046, 1046, 1046, 795, + 1579, 1046, 795, 1046, 795, 1046, 795, 1046, + 1046, 795, 1046, 1579, 795, 1046, 795, 1046, + 795, 1046, 1579, 1046, 795, 1579, 1046, 795, + 1046, 795, 1579, 1046, 1046, 1046, 1046, 1046, + 1579, 795, 795, 1046, 795, 1046, 1579, 1046, + 795, 1579, 1046, 1046, 1579, 795, 795, 1046, + 795, 1046, 795, 1046, 1579, 1580, 1581, 1582, + 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, + 1091, 1591, 1592, 1593, 1594, 1595, 1596, 1597, + 1598, 1599, 1600, 1601, 1602, 1601, 1603, 1604, + 1605, 1606, 1607, 1047, 1579, 1608, 1609, 1610, + 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, + 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626, + 1101, 1627, 1628, 1629, 1068, 1630, 1631, 1632, + 1633, 1634, 1635, 1047, 1636, 1637, 1638, 1639, + 1640, 1641, 1642, 1643, 1050, 1644, 1047, 1050, + 1645, 1646, 1647, 1648, 1059, 1579, 1649, 1650, + 1651, 1652, 1079, 1653, 1654, 1059, 1655, 1656, + 1657, 1658, 1659, 1047, 1579, 1660, 1619, 1661, + 1662, 1663, 1059, 1664, 1665, 1050, 1047, 1059, + 801, 1579, 1629, 1047, 1050, 1059, 801, 1059, + 801, 1666, 1059, 1579, 801, 1050, 1667, 1668, + 1050, 1669, 1670, 1057, 1671, 1672, 1673, 1674, + 1675, 1625, 1676, 1677, 1678, 1679, 1680, 1681, + 1682, 1683, 1684, 1685, 1686, 1687, 1644, 1688, + 1050, 1059, 801, 1579, 1689, 1690, 1059, 1047, + 1579, 801, 1047, 1579, 1050, 1691, 1107, 1692, + 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1047, + 1700, 1701, 1702, 1703, 1704, 1705, 1047, 1059, + 1579, 1707, 1708, 1709, 1710, 1711, 1712, 1713, + 1714, 1715, 1716, 1717, 1713, 1719, 1720, 1721, + 1722, 1706, 1718, 1706, 1579, 1706, 1579, +} + +var _hcltok_trans_targs []int16 = []int16{ + 1464, 1, 1464, 1464, 1464, 3, 4, 1472, + 1464, 5, 1473, 6, 7, 9, 10, 287, + 13, 14, 15, 16, 17, 288, 289, 20, + 290, 22, 23, 291, 292, 293, 294, 295, + 296, 297, 298, 299, 300, 329, 349, 354, + 128, 129, 130, 357, 152, 372, 376, 1464, + 11, 12, 18, 19, 21, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 65, + 106, 121, 132, 155, 171, 284, 34, 35, + 36, 37, 38, 39, 40, 41, 42, 43, + 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, + 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, + 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, + 118, 119, 120, 122, 123, 124, 125, 126, + 127, 131, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 153, 154, 156, + 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 172, 204, + 228, 231, 232, 234, 243, 244, 247, 251, + 269, 276, 278, 280, 282, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, + 200, 201, 202, 203, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 229, 230, 233, 235, 236, + 237, 238, 239, 240, 241, 242, 245, 246, + 248, 249, 250, 252, 253, 254, 255, 256, + 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 268, 270, 271, 272, 273, + 274, 275, 277, 279, 281, 283, 285, 286, + 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 315, 316, + 317, 318, 319, 320, 321, 322, 323, 324, + 325, 326, 327, 328, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 342, 343, 344, 345, 346, 347, 348, 350, + 351, 352, 353, 355, 356, 358, 359, 360, + 361, 362, 363, 364, 365, 366, 367, 368, + 369, 370, 371, 373, 374, 375, 377, 383, + 405, 410, 412, 414, 378, 379, 380, 381, + 382, 384, 385, 386, 387, 388, 389, 390, + 391, 392, 393, 394, 395, 396, 397, 398, + 399, 400, 401, 402, 403, 404, 406, 407, + 408, 409, 411, 413, 415, 1464, 1477, 438, + 439, 440, 441, 418, 442, 443, 444, 445, + 446, 447, 448, 449, 450, 451, 452, 453, + 454, 455, 456, 457, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 467, 468, 470, + 471, 472, 473, 474, 475, 476, 477, 478, + 479, 480, 481, 482, 483, 484, 485, 486, + 420, 487, 488, 489, 490, 491, 492, 493, + 494, 495, 496, 497, 498, 499, 500, 501, + 502, 503, 504, 419, 505, 506, 507, 508, + 509, 511, 512, 513, 514, 515, 516, 517, + 518, 519, 520, 521, 522, 523, 524, 526, + 527, 528, 529, 530, 531, 535, 537, 538, + 539, 540, 435, 541, 542, 543, 544, 545, + 546, 547, 548, 549, 550, 551, 552, 553, + 554, 555, 557, 558, 560, 561, 562, 563, + 564, 565, 433, 566, 567, 568, 569, 570, + 571, 572, 573, 574, 576, 608, 632, 635, + 636, 638, 647, 648, 651, 655, 673, 533, + 680, 682, 684, 686, 577, 578, 579, 580, + 581, 582, 583, 584, 585, 586, 587, 588, + 589, 590, 591, 592, 593, 594, 595, 596, + 597, 598, 599, 600, 601, 602, 603, 604, + 605, 606, 607, 609, 610, 611, 612, 613, + 614, 615, 616, 617, 618, 619, 620, 621, + 622, 623, 624, 625, 626, 627, 628, 629, + 630, 631, 633, 634, 637, 639, 640, 641, + 642, 643, 644, 645, 646, 649, 650, 652, + 653, 654, 656, 657, 658, 659, 660, 661, + 662, 663, 664, 665, 666, 667, 668, 669, + 670, 671, 672, 674, 675, 676, 677, 678, + 679, 681, 683, 685, 687, 689, 690, 1464, + 1464, 691, 828, 829, 760, 830, 831, 832, + 833, 834, 835, 789, 836, 725, 837, 838, + 839, 840, 841, 842, 843, 844, 745, 845, + 846, 847, 848, 849, 850, 851, 852, 853, + 854, 770, 855, 857, 858, 859, 860, 861, + 862, 863, 864, 865, 866, 703, 867, 868, + 869, 870, 871, 872, 873, 874, 875, 741, + 876, 877, 878, 879, 880, 811, 882, 883, + 886, 888, 889, 890, 891, 892, 893, 896, + 897, 899, 900, 901, 903, 904, 905, 906, + 907, 908, 909, 910, 911, 912, 913, 915, + 916, 917, 918, 921, 923, 924, 926, 928, + 1515, 1517, 1518, 1516, 931, 932, 1515, 934, + 1541, 1541, 1541, 1543, 1544, 1542, 939, 940, + 1545, 1546, 1550, 1550, 1550, 1551, 946, 947, + 1552, 1553, 1557, 1558, 1557, 973, 974, 975, + 976, 953, 977, 978, 979, 980, 981, 982, + 983, 984, 985, 986, 987, 988, 989, 990, + 991, 992, 993, 994, 995, 996, 997, 998, + 999, 1000, 1001, 1002, 1003, 1005, 1006, 1007, + 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, + 1016, 1017, 1018, 1019, 1020, 1021, 955, 1022, + 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, + 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, + 1039, 954, 1040, 1041, 1042, 1043, 1044, 1046, + 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, + 1055, 1056, 1057, 1058, 1059, 1061, 1062, 1063, + 1064, 1065, 1066, 1070, 1072, 1073, 1074, 1075, + 970, 1076, 1077, 1078, 1079, 1080, 1081, 1082, + 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, + 1092, 1093, 1095, 1096, 1097, 1098, 1099, 1100, + 968, 1101, 1102, 1103, 1104, 1105, 1106, 1107, + 1108, 1109, 1111, 1143, 1167, 1170, 1171, 1173, + 1182, 1183, 1186, 1190, 1208, 1068, 1215, 1217, + 1219, 1221, 1112, 1113, 1114, 1115, 1116, 1117, + 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, + 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, + 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, + 1142, 1144, 1145, 1146, 1147, 1148, 1149, 1150, + 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, + 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, + 1168, 1169, 1172, 1174, 1175, 1176, 1177, 1178, + 1179, 1180, 1181, 1184, 1185, 1187, 1188, 1189, + 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, + 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, + 1207, 1209, 1210, 1211, 1212, 1213, 1214, 1216, + 1218, 1220, 1222, 1224, 1225, 1557, 1557, 1226, + 1363, 1364, 1295, 1365, 1366, 1367, 1368, 1369, + 1370, 1324, 1371, 1260, 1372, 1373, 1374, 1375, + 1376, 1377, 1378, 1379, 1280, 1380, 1381, 1382, + 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1305, + 1390, 1392, 1393, 1394, 1395, 1396, 1397, 1398, + 1399, 1400, 1401, 1238, 1402, 1403, 1404, 1405, + 1406, 1407, 1408, 1409, 1410, 1276, 1411, 1412, + 1413, 1414, 1415, 1346, 1417, 1418, 1421, 1423, + 1424, 1425, 1426, 1427, 1428, 1431, 1432, 1434, + 1435, 1436, 1438, 1439, 1440, 1441, 1442, 1443, + 1444, 1445, 1446, 1447, 1448, 1450, 1451, 1452, + 1453, 1456, 1458, 1459, 1461, 1463, 1465, 1464, + 1466, 1467, 1464, 1468, 1464, 1469, 1470, 1471, + 1474, 1475, 1476, 1464, 1478, 1464, 1479, 1464, + 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, + 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, + 1496, 1497, 1498, 1499, 1500, 1501, 1502, 1503, + 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511, + 1512, 1513, 1514, 1464, 1464, 1464, 1464, 1464, + 2, 1464, 1464, 8, 1464, 1464, 1464, 1464, + 1464, 416, 417, 421, 422, 423, 424, 425, + 426, 427, 428, 429, 430, 431, 432, 434, + 436, 437, 469, 510, 525, 532, 534, 536, + 556, 559, 575, 688, 1464, 1464, 1464, 692, + 693, 694, 695, 696, 697, 698, 699, 700, + 701, 702, 704, 705, 706, 707, 708, 709, + 710, 711, 712, 713, 714, 715, 716, 717, + 718, 719, 720, 721, 722, 723, 724, 726, + 727, 728, 729, 730, 731, 732, 733, 734, + 735, 736, 737, 738, 739, 740, 742, 743, + 744, 746, 747, 748, 749, 750, 751, 752, + 753, 754, 755, 756, 757, 758, 759, 761, + 762, 763, 764, 765, 766, 767, 768, 769, + 771, 772, 773, 774, 775, 776, 777, 778, + 779, 780, 781, 782, 783, 784, 785, 786, + 787, 788, 790, 791, 792, 793, 794, 795, + 796, 797, 798, 799, 800, 801, 802, 803, + 804, 805, 806, 807, 808, 809, 810, 812, + 813, 814, 815, 816, 817, 818, 819, 820, + 821, 822, 823, 824, 825, 826, 827, 856, + 881, 884, 885, 887, 894, 895, 898, 902, + 914, 919, 920, 922, 925, 927, 1515, 1515, + 1534, 1536, 1519, 1515, 1538, 1539, 1540, 1515, + 929, 930, 933, 1515, 1516, 929, 930, 1519, + 931, 932, 933, 1515, 1516, 929, 930, 1519, + 931, 932, 933, 1520, 1525, 1521, 1522, 1524, + 1531, 1532, 1533, 1517, 1521, 1522, 1524, 1531, + 1532, 1533, 1518, 1523, 1526, 1527, 1528, 1529, + 1530, 1517, 1521, 1522, 1524, 1531, 1532, 1533, + 1520, 1525, 1523, 1526, 1527, 1528, 1529, 1530, + 1518, 1523, 1526, 1527, 1528, 1529, 1530, 1520, + 1525, 1515, 1535, 1515, 1515, 1537, 1515, 1515, + 1515, 935, 936, 942, 943, 1541, 1547, 1548, + 1549, 1541, 937, 938, 941, 1541, 1542, 1541, + 936, 937, 938, 939, 940, 941, 1541, 1542, + 1541, 936, 937, 938, 939, 940, 941, 1541, + 1541, 1541, 1541, 1541, 944, 949, 950, 1550, + 1554, 1555, 1556, 1550, 945, 948, 1550, 1550, + 1550, 1550, 1550, 1557, 1559, 1560, 1561, 1562, + 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, + 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578, + 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586, + 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1557, + 951, 952, 956, 957, 958, 959, 960, 961, + 962, 963, 964, 965, 966, 967, 969, 971, + 972, 1004, 1045, 1060, 1067, 1069, 1071, 1091, + 1094, 1110, 1223, 1557, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1239, + 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, + 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, + 1256, 1257, 1258, 1259, 1261, 1262, 1263, 1264, + 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, + 1273, 1274, 1275, 1277, 1278, 1279, 1281, 1282, + 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, + 1291, 1292, 1293, 1294, 1296, 1297, 1298, 1299, + 1300, 1301, 1302, 1303, 1304, 1306, 1307, 1308, + 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, + 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1325, + 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, + 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, + 1342, 1343, 1344, 1345, 1347, 1348, 1349, 1350, + 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, + 1359, 1360, 1361, 1362, 1391, 1416, 1419, 1420, + 1422, 1429, 1430, 1433, 1437, 1449, 1454, 1455, + 1457, 1460, 1462, +} + +var _hcltok_trans_actions []byte = []byte{ + 151, 0, 93, 147, 109, 0, 0, 201, + 143, 0, 13, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 123, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 145, 198, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 149, + 127, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 35, 13, 13, 13, 0, 0, 37, 0, + 57, 43, 55, 180, 180, 180, 0, 0, + 0, 0, 77, 63, 75, 186, 0, 0, + 0, 0, 87, 192, 91, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 89, 81, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 95, + 0, 0, 121, 210, 113, 0, 13, 204, + 13, 0, 0, 115, 0, 117, 0, 125, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 13, 13, + 13, 207, 207, 207, 207, 207, 207, 13, + 13, 207, 13, 129, 141, 137, 99, 105, + 0, 135, 131, 0, 103, 97, 111, 101, + 133, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 107, 119, 139, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 21, 19, + 0, 0, 13, 23, 0, 13, 13, 29, + 0, 0, 0, 153, 174, 1, 1, 174, + 1, 1, 1, 156, 177, 3, 3, 177, + 3, 3, 3, 0, 0, 0, 0, 13, + 13, 13, 13, 174, 1, 1, 174, 174, + 174, 174, 174, 1, 1, 174, 174, 174, + 174, 177, 3, 3, 177, 177, 177, 177, + 1, 1, 0, 0, 13, 13, 13, 13, + 177, 3, 3, 177, 177, 177, 177, 3, + 3, 31, 0, 25, 15, 0, 27, 17, + 33, 0, 0, 0, 0, 45, 0, 183, + 183, 51, 0, 0, 0, 162, 213, 159, + 5, 5, 5, 5, 5, 5, 168, 217, + 165, 7, 7, 7, 7, 7, 7, 47, + 39, 49, 41, 53, 0, 0, 0, 65, + 0, 189, 189, 71, 0, 0, 67, 59, + 69, 61, 73, 79, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 13, 13, 13, 195, 195, 195, + 195, 195, 195, 13, 13, 195, 13, 83, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 85, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, +} + +var _hcltok_to_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 9, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 9, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 171, 0, 0, + 0, 0, 0, 0, 0, 0, 171, 0, + 0, 0, 0, 0, 0, 9, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, +} + +var _hcltok_from_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 11, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 11, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 0, 11, 0, + 0, 0, 0, 0, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, +} + +var _hcltok_eof_trans []int16 = []int16{ + 0, 1, 4, 1, 1, 9, 9, 9, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, + 422, 422, 1, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 422, 422, 422, 422, 422, + 422, 422, 422, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 672, 672, 672, 672, 672, 672, 672, + 672, 769, 769, 769, 769, 769, 775, 775, + 777, 779, 779, 777, 777, 779, 0, 0, + 787, 789, 787, 787, 789, 0, 0, 795, + 795, 797, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 0, 1196, 1197, 1198, 1197, 1198, 1198, 1198, + 1202, 1203, 1198, 1198, 1198, 1209, 1198, 1198, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 0, 1392, 1396, 1404, 1392, + 1392, 1396, 1396, 1404, 1396, 1392, 1404, 1404, + 1404, 1404, 1404, 1396, 1396, 1396, 1458, 1460, + 1458, 1463, 1465, 1465, 1465, 0, 1474, 1478, + 1487, 1496, 1498, 1500, 1500, 1500, 0, 1508, + 1511, 1513, 1515, 1515, 1515, 0, 1552, 1580, + 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580, + 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580, + 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580, + 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580, + 1580, 1580, +} + +const hcltok_start int = 1464 +const hcltok_first_final int = 1464 +const hcltok_error int = 0 + +const hcltok_en_stringTemplate int = 1515 +const hcltok_en_heredocTemplate int = 1541 +const hcltok_en_bareTemplate int = 1550 +const hcltok_en_identOnly int = 1557 +const hcltok_en_main int = 1464 + +// line 16 "scan_tokens.rl" + +func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { + f := &tokenAccum{ + Filename: filename, + Bytes: data, + Pos: start, + } + + // line 294 "scan_tokens.rl" + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + var stack []int + var top int + + var cs int // current state + switch mode { + case scanNormal: + cs = hcltok_en_main + case scanTemplate: + cs = hcltok_en_bareTemplate + case scanIdentOnly: + cs = hcltok_en_identOnly + default: + panic("invalid scanMode") + } + + braces := 0 + var retBraces []int // stack of brace levels that cause us to use fret + var heredocs []heredocInProgress // stack of heredocs we're currently processing + + // line 329 "scan_tokens.rl" + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + token := func(ty TokenType) { + f.emitToken(ty, ts, te) + } + selfToken := func() { + b := data[ts:te] + if len(b) != 1 { + // should never happen + panic("selfToken only works for single-character tokens") + } + f.emitToken(TokenType(b[0]), ts, te) + } + + // line 4372 "scan_tokens.go" + { + top = 0 + ts = 0 + te = 0 + act = 0 + } + + // line 4380 "scan_tokens.go" + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } + _resume: + _acts = int(_hcltok_from_state_actions[cs]) + _nacts = uint(_hcltok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _hcltok_actions[_acts-1] { + case 6: + // line 1 "NONE" + + ts = p + + // line 4404 "scan_tokens.go" + } + } + + _keys = int(_hcltok_key_offsets[cs]) + _trans = int(_hcltok_index_offsets[cs]) + + _klen = int(_hcltok_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _hcltok_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _hcltok_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_hcltok_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _hcltok_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _hcltok_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_hcltok_indicies[_trans]) + _eof_trans: + cs = int(_hcltok_trans_targs[_trans]) + + if _hcltok_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_hcltok_trans_actions[_trans]) + _nacts = uint(_hcltok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _hcltok_actions[_acts-1] { + case 0: + // line 218 "scan_tokens.rl" + + p-- + + case 1: + // line 219 "scan_tokens.rl" + + p-- + + case 2: + // line 224 "scan_tokens.rl" + + p-- + + case 3: + // line 225 "scan_tokens.rl" + + p-- + + case 7: + // line 1 "NONE" + + te = p + 1 + + case 8: + // line 155 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 9: + // line 165 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 10: + // line 79 "scan_tokens.rl" + + te = p + 1 + { + token(TokenCQuote) + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } + case 11: + // line 239 "scan_tokens.rl" + + te = p + 1 + { + token(TokenInvalid) + } + case 12: + // line 240 "scan_tokens.rl" + + te = p + 1 + { + token(TokenBadUTF8) + } + case 13: + // line 155 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 14: + // line 165 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 15: + // line 238 "scan_tokens.rl" + + te = p + p-- + { + token(TokenQuotedLit) + } + case 16: + // line 239 "scan_tokens.rl" + + te = p + p-- + { + token(TokenInvalid) + } + case 17: + // line 240 "scan_tokens.rl" + + te = p + p-- + { + token(TokenBadUTF8) + } + case 18: + // line 238 "scan_tokens.rl" + + p = (te) - 1 + { + token(TokenQuotedLit) + } + case 19: + // line 240 "scan_tokens.rl" + + p = (te) - 1 + { + token(TokenBadUTF8) + } + case 20: + // line 143 "scan_tokens.rl" + + act = 10 + case 21: + // line 248 "scan_tokens.rl" + + act = 11 + case 22: + // line 155 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 23: + // line 165 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 24: + // line 106 "scan_tokens.rl" + + te = p + 1 + { + // This action is called specificially when a heredoc literal + // ends with a newline character. + + // This might actually be our end marker. + topdoc := &heredocs[len(heredocs)-1] + if topdoc.StartOfLine { + maybeMarker := bytes.TrimSpace(data[ts:te]) + if bytes.Equal(maybeMarker, topdoc.Marker) { + // We actually emit two tokens here: the end-of-heredoc + // marker first, and then separately the newline that + // follows it. This then avoids issues with the closing + // marker consuming a newline that would normally be used + // to mark the end of an attribute definition. + // We might have either a \n sequence or an \r\n sequence + // here, so we must handle both. + nls := te - 1 + nle := te + te-- + if data[te-1] == '\r' { + // back up one more byte + nls-- + te-- + } + token(TokenCHeredoc) + ts = nls + te = nle + token(TokenNewline) + heredocs = heredocs[:len(heredocs)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } + } + + topdoc.StartOfLine = true + token(TokenStringLit) + } + case 25: + // line 248 "scan_tokens.rl" + + te = p + 1 + { + token(TokenBadUTF8) + } + case 26: + // line 155 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 27: + // line 165 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 28: + // line 143 "scan_tokens.rl" + + te = p + p-- + { + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 29: + // line 248 "scan_tokens.rl" + + te = p + p-- + { + token(TokenBadUTF8) + } + case 30: + // line 143 "scan_tokens.rl" + + p = (te) - 1 + { + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 31: + // line 1 "NONE" + + switch act { + case 0: + { + cs = 0 + goto _again + } + case 10: + { + p = (te) - 1 + + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 11: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 32: + // line 151 "scan_tokens.rl" + + act = 14 + case 33: + // line 255 "scan_tokens.rl" + + act = 15 + case 34: + // line 155 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 35: + // line 165 "scan_tokens.rl" + + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 36: + // line 151 "scan_tokens.rl" + + te = p + 1 + { + token(TokenStringLit) + } + case 37: + // line 255 "scan_tokens.rl" + + te = p + 1 + { + token(TokenBadUTF8) + } + case 38: + // line 155 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 39: + // line 165 "scan_tokens.rl" + + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1464 + goto _again + } + } + case 40: + // line 151 "scan_tokens.rl" + + te = p + p-- + { + token(TokenStringLit) + } + case 41: + // line 255 "scan_tokens.rl" + + te = p + p-- + { + token(TokenBadUTF8) + } + case 42: + // line 151 "scan_tokens.rl" + + p = (te) - 1 + { + token(TokenStringLit) + } + case 43: + // line 1 "NONE" + + switch act { + case 0: + { + cs = 0 + goto _again + } + case 14: + { + p = (te) - 1 + + token(TokenStringLit) + } + case 15: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 44: + // line 259 "scan_tokens.rl" + + act = 16 + case 45: + // line 260 "scan_tokens.rl" + + act = 17 + case 46: + // line 260 "scan_tokens.rl" + + te = p + 1 + { + token(TokenBadUTF8) + } + case 47: + // line 261 "scan_tokens.rl" + + te = p + 1 + { + token(TokenInvalid) + } + case 48: + // line 259 "scan_tokens.rl" + + te = p + p-- + { + token(TokenIdent) + } + case 49: + // line 260 "scan_tokens.rl" + + te = p + p-- + { + token(TokenBadUTF8) + } + case 50: + // line 259 "scan_tokens.rl" + + p = (te) - 1 + { + token(TokenIdent) + } + case 51: + // line 260 "scan_tokens.rl" + + p = (te) - 1 + { + token(TokenBadUTF8) + } + case 52: + // line 1 "NONE" + + switch act { + case 16: + { + p = (te) - 1 + token(TokenIdent) + } + case 17: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 53: + // line 267 "scan_tokens.rl" + + act = 21 + case 54: + // line 269 "scan_tokens.rl" + + act = 22 + case 55: + // line 280 "scan_tokens.rl" + + act = 32 + case 56: + // line 290 "scan_tokens.rl" + + act = 38 + case 57: + // line 291 "scan_tokens.rl" + + act = 39 + case 58: + // line 269 "scan_tokens.rl" + + te = p + 1 + { + token(TokenComment) + } + case 59: + // line 270 "scan_tokens.rl" + + te = p + 1 + { + token(TokenNewline) + } + case 60: + // line 272 "scan_tokens.rl" + + te = p + 1 + { + token(TokenEqualOp) + } + case 61: + // line 273 "scan_tokens.rl" + + te = p + 1 + { + token(TokenNotEqual) + } + case 62: + // line 274 "scan_tokens.rl" + + te = p + 1 + { + token(TokenGreaterThanEq) + } + case 63: + // line 275 "scan_tokens.rl" + + te = p + 1 + { + token(TokenLessThanEq) + } + case 64: + // line 276 "scan_tokens.rl" + + te = p + 1 + { + token(TokenAnd) + } + case 65: + // line 277 "scan_tokens.rl" + + te = p + 1 + { + token(TokenOr) + } + case 66: + // line 278 "scan_tokens.rl" + + te = p + 1 + { + token(TokenEllipsis) + } + case 67: + // line 279 "scan_tokens.rl" + + te = p + 1 + { + token(TokenFatArrow) + } + case 68: + // line 280 "scan_tokens.rl" + + te = p + 1 + { + selfToken() + } + case 69: + // line 175 "scan_tokens.rl" + + te = p + 1 + { + token(TokenOBrace) + braces++ + } + case 70: + // line 180 "scan_tokens.rl" + + te = p + 1 + { + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd) + braces-- + retBraces = retBraces[0 : len(retBraces)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } else { + token(TokenCBrace) + braces-- + } + } + case 71: + // line 192 "scan_tokens.rl" + + te = p + 1 + { + // Only consume from the retBraces stack and return if we are at + // a suitable brace nesting level, otherwise things will get + // confused. (Not entering this branch indicates a syntax error, + // which we will catch in the parser.) + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd) + braces-- + retBraces = retBraces[0 : len(retBraces)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } else { + // We intentionally generate a TokenTemplateSeqEnd here, + // even though the user apparently wanted a brace, because + // we want to allow the parser to catch the incorrect use + // of a ~} to balance a generic opening brace, rather than + // a template sequence. + token(TokenTemplateSeqEnd) + braces-- + } + } + case 72: + // line 74 "scan_tokens.rl" + + te = p + 1 + { + token(TokenOQuote) + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1515 + goto _again + } + } + case 73: + // line 84 "scan_tokens.rl" + + te = p + 1 + { + token(TokenOHeredoc) + // the token is currently the whole heredoc introducer, like + // < 0; _nacts-- { + _acts++ + switch _hcltok_actions[_acts-1] { + case 4: + // line 1 "NONE" + + ts = 0 + + case 5: + // line 1 "NONE" + + act = 0 + + // line 5252 "scan_tokens.go" + } + } + + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + if _hcltok_eof_trans[cs] > 0 { + _trans = int(_hcltok_eof_trans[cs] - 1) + goto _eof_trans + } + } + + _out: + { + } + } + + // line 352 "scan_tokens.rl" + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which we'll + // deal with as an invalid. + if cs < hcltok_first_final { + if mode == scanTemplate && len(stack) == 0 { + // If we're scanning a bare template then any straggling + // top-level stuff is actually literal string, rather than + // invalid. This handles the case where the template ends + // with a single "$" or "%", which trips us up because we + // want to see another character to decide if it's a sequence + // or an escape. + f.emitToken(TokenStringLit, ts, len(data)) + } else { + f.emitToken(TokenInvalid, ts, len(data)) + } + } + + // We always emit a synthetic EOF token at the end, since it gives the + // parser position information for an "unexpected EOF" diagnostic. + f.emitToken(TokenEOF, len(data), len(data)) + + return f.Tokens +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl new file mode 100644 index 00000000..83ef65b4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl @@ -0,0 +1,376 @@ + +package hclsyntax + +import ( + "bytes" + + "github.com/hashicorp/hcl2/hcl" +) + +// This file is generated from scan_tokens.rl. DO NOT EDIT. +%%{ + # (except you are actually in scan_tokens.rl here, so edit away!) + + machine hcltok; + write data; +}%% + +func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { + f := &tokenAccum{ + Filename: filename, + Bytes: data, + Pos: start, + } + + %%{ + include UnicodeDerived "unicode_derived.rl"; + + UTF8Cont = 0x80 .. 0xBF; + AnyUTF8 = ( + 0x00..0x7F | + 0xC0..0xDF . UTF8Cont | + 0xE0..0xEF . UTF8Cont . UTF8Cont | + 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont + ); + BrokenUTF8 = any - AnyUTF8; + + NumberLitContinue = (digit|'.'|('e'|'E') ('+'|'-')? digit); + NumberLit = digit ("" | (NumberLitContinue - '.') | (NumberLitContinue* (NumberLitContinue - '.'))); + Ident = (ID_Start | '_') (ID_Continue | '-')*; + + # Symbols that just represent themselves are handled as a single rule. + SelfToken = "[" | "]" | "(" | ")" | "." | "," | "*" | "/" | "%" | "+" | "-" | "=" | "<" | ">" | "!" | "?" | ":" | "\n" | "&" | "|" | "~" | "^" | ";" | "`"; + + EqualOp = "=="; + NotEqual = "!="; + GreaterThanEqual = ">="; + LessThanEqual = "<="; + LogicalAnd = "&&"; + LogicalOr = "||"; + + Ellipsis = "..."; + FatArrow = "=>"; + + Newline = '\r' ? '\n'; + EndOfLine = Newline; + + BeginStringTmpl = '"'; + BeginHeredocTmpl = '<<' ('-')? Ident Newline; + + Comment = ( + ("#" (any - EndOfLine)* EndOfLine) | + ("//" (any - EndOfLine)* EndOfLine) | + ("/*" any* "*/") + ); + + # Note: hclwrite assumes that only ASCII spaces appear between tokens, + # and uses this assumption to recreate the spaces between tokens by + # looking at byte offset differences. This means it will produce + # incorrect results in the presence of tabs, but that's acceptable + # because the canonical style (which hclwrite itself can impose + # automatically is to never use tabs). + Spaces = (' ' | 0x09)+; + + action beginStringTemplate { + token(TokenOQuote); + fcall stringTemplate; + } + + action endStringTemplate { + token(TokenCQuote); + fret; + } + + action beginHeredocTemplate { + token(TokenOHeredoc); + // the token is currently the whole heredoc introducer, like + // < 0 { + heredocs[len(heredocs)-1].StartOfLine = false; + } + fcall main; + } + + action beginTemplateControl { + token(TokenTemplateControl); + braces++; + retBraces = append(retBraces, braces); + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false; + } + fcall main; + } + + action openBrace { + token(TokenOBrace); + braces++; + } + + action closeBrace { + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd); + braces--; + retBraces = retBraces[0:len(retBraces)-1] + fret; + } else { + token(TokenCBrace); + braces--; + } + } + + action closeTemplateSeqEatWhitespace { + // Only consume from the retBraces stack and return if we are at + // a suitable brace nesting level, otherwise things will get + // confused. (Not entering this branch indicates a syntax error, + // which we will catch in the parser.) + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd); + braces--; + retBraces = retBraces[0:len(retBraces)-1] + fret; + } else { + // We intentionally generate a TokenTemplateSeqEnd here, + // even though the user apparently wanted a brace, because + // we want to allow the parser to catch the incorrect use + // of a ~} to balance a generic opening brace, rather than + // a template sequence. + token(TokenTemplateSeqEnd); + braces--; + } + } + + TemplateInterp = "${" ("~")?; + TemplateControl = "%{" ("~")?; + EndStringTmpl = '"'; + StringLiteralChars = (AnyUTF8 - ("\r"|"\n")); + TemplateStringLiteral = ( + ('$' ^'{' %{ fhold; }) | + ('%' ^'{' %{ fhold; }) | + ('\\' StringLiteralChars) | + (StringLiteralChars - ("$" | '%' | '"')) + )+; + HeredocStringLiteral = ( + ('$' ^'{' %{ fhold; }) | + ('%' ^'{' %{ fhold; }) | + (StringLiteralChars - ("$" | '%')) + )*; + BareStringLiteral = ( + ('$' ^'{') | + ('%' ^'{') | + (StringLiteralChars - ("$" | '%')) + )* Newline?; + + stringTemplate := |* + TemplateInterp => beginTemplateInterp; + TemplateControl => beginTemplateControl; + EndStringTmpl => endStringTemplate; + TemplateStringLiteral => { token(TokenQuotedLit); }; + AnyUTF8 => { token(TokenInvalid); }; + BrokenUTF8 => { token(TokenBadUTF8); }; + *|; + + heredocTemplate := |* + TemplateInterp => beginTemplateInterp; + TemplateControl => beginTemplateControl; + HeredocStringLiteral EndOfLine => heredocLiteralEOL; + HeredocStringLiteral => heredocLiteralMidline; + BrokenUTF8 => { token(TokenBadUTF8); }; + *|; + + bareTemplate := |* + TemplateInterp => beginTemplateInterp; + TemplateControl => beginTemplateControl; + BareStringLiteral => bareTemplateLiteral; + BrokenUTF8 => { token(TokenBadUTF8); }; + *|; + + identOnly := |* + Ident => { token(TokenIdent) }; + BrokenUTF8 => { token(TokenBadUTF8) }; + AnyUTF8 => { token(TokenInvalid) }; + *|; + + main := |* + Spaces => {}; + NumberLit => { token(TokenNumberLit) }; + Ident => { token(TokenIdent) }; + + Comment => { token(TokenComment) }; + Newline => { token(TokenNewline) }; + + EqualOp => { token(TokenEqualOp); }; + NotEqual => { token(TokenNotEqual); }; + GreaterThanEqual => { token(TokenGreaterThanEq); }; + LessThanEqual => { token(TokenLessThanEq); }; + LogicalAnd => { token(TokenAnd); }; + LogicalOr => { token(TokenOr); }; + Ellipsis => { token(TokenEllipsis); }; + FatArrow => { token(TokenFatArrow); }; + SelfToken => { selfToken() }; + + "{" => openBrace; + "}" => closeBrace; + + "~}" => closeTemplateSeqEatWhitespace; + + BeginStringTmpl => beginStringTemplate; + BeginHeredocTmpl => beginHeredocTemplate; + + BrokenUTF8 => { token(TokenBadUTF8) }; + AnyUTF8 => { token(TokenInvalid) }; + *|; + + }%% + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + var stack []int + var top int + + var cs int // current state + switch mode { + case scanNormal: + cs = hcltok_en_main + case scanTemplate: + cs = hcltok_en_bareTemplate + case scanIdentOnly: + cs = hcltok_en_identOnly + default: + panic("invalid scanMode") + } + + braces := 0 + var retBraces []int // stack of brace levels that cause us to use fret + var heredocs []heredocInProgress // stack of heredocs we're currently processing + + %%{ + prepush { + stack = append(stack, 0); + } + postpop { + stack = stack[:len(stack)-1]; + } + }%% + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + token := func (ty TokenType) { + f.emitToken(ty, ts, te) + } + selfToken := func () { + b := data[ts:te] + if len(b) != 1 { + // should never happen + panic("selfToken only works for single-character tokens") + } + f.emitToken(TokenType(b[0]), ts, te) + } + + %%{ + write init nocs; + write exec; + }%% + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which we'll + // deal with as an invalid. + if cs < hcltok_first_final { + if mode == scanTemplate && len(stack) == 0 { + // If we're scanning a bare template then any straggling + // top-level stuff is actually literal string, rather than + // invalid. This handles the case where the template ends + // with a single "$" or "%", which trips us up because we + // want to see another character to decide if it's a sequence + // or an escape. + f.emitToken(TokenStringLit, ts, len(data)) + } else { + f.emitToken(TokenInvalid, ts, len(data)) + } + } + + // We always emit a synthetic EOF token at the end, since it gives the + // parser position information for an "unexpected EOF" diagnostic. + f.emitToken(TokenEOF, len(data), len(data)) + + return f.Tokens +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md new file mode 100644 index 00000000..49b9a3ea --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md @@ -0,0 +1,923 @@ +# HCL Native Syntax Specification + +This is the specification of the syntax and semantics of the native syntax +for HCL. HCL is a system for defining configuration languages for applications. +The HCL information model is designed to support multiple concrete syntaxes +for configuration, but this native syntax is considered the primary format +and is optimized for human authoring and maintenence, as opposed to machine +generation of configuration. + +The language consists of three integrated sub-languages: + +* The _structural_ language defines the overall heirarchical configuration + structure, and is a serialization of HCL bodies, blocks and attributes. + +* The _expression_ language is used to express attribute values, either as + literals or as derivations of other values. + +* The _template_ language is used to compose values together into strings, + as one of several types of expression in the expression language. + +In normal use these three sub-languages are used together within configuration +files to describe an overall configuration, with the structural language +being used at the top level. The expression and template languages can also +be used in isolation, to implement features such as REPLs, debuggers, and +integration into more limited HCL syntaxes such as the JSON profile. + +## Syntax Notation + +Within this specification a semi-formal notation is used to illustrate the +details of syntax. This notation is intended for human consumption rather +than machine consumption, with the following conventions: + +* A naked name starting with an uppercase letter is a global production, + common to all of the syntax specifications in this document. +* A naked name starting with a lowercase letter is a local production, + meaningful only within the specification where it is defined. +* Double and single quotes (`"` and `'`) are used to mark literal character + sequences, which may be either punctuation markers or keywords. +* The default operator for combining items, which has no punctuation, + is concatenation. +* The symbol `|` indicates that any one of its left and right operands may + be present. +* The `*` symbol indicates zero or more repetitions of the item to its left. +* The `?` symbol indicates zero or one of the item to its left. +* Parentheses (`(` and `)`) are used to group items together to apply + the `|`, `*` and `?` operators to them collectively. + +The grammar notation does not fully describe the language. The prose may +augment or conflict with the illustrated grammar. In case of conflict, prose +has priority. + +## Source Code Representation + +Source code is unicode text expressed in the UTF-8 encoding. The language +itself does not perform unicode normalization, so syntax features such as +identifiers are sequences of unicode code points and so e.g. a precombined +accented character is distinct from a letter associated with a combining +accent. (String literals have some special handling with regard to Unicode +normalization which will be covered later in the relevant section.) + +UTF-8 encoded Unicode byte order marks are not permitted. Invalid or +non-normalized UTF-8 encoding is always a parse error. + +## Lexical Elements + +### Comments and Whitespace + +Comments and Whitespace are recognized as lexical elements but are ignored +except as described below. + +Whitespace is defined as a sequence of zero or more space characters +(U+0020). Newline sequences (either U+000A or U+000D followed by U+000A) +are _not_ considered whitespace but are ignored as such in certain contexts. + +Horizontal tab characters (U+0009) are not considered to be whitespace and +are not valid within HCL native syntax. + +Comments serve as program documentation and come in two forms: + +* _Line comments_ start with either the `//` or `#` sequences and end with + the next newline sequence. A line comments is considered equivalent to a + newline sequence. + +* _Inline comments_ start with the `/*` sequence and end with the `*/` + sequence, and may have any characters within except the ending sequence. + An inline comments is considered equivalent to a whitespace sequence. + +Comments and whitespace cannot begin within within other comments, or within +template literals except inside an interpolation sequence or template directive. + +### Identifiers + +Identifiers name entities such as blocks, attributes and expression variables. +Identifiers are interpreted as per [UAX #31][UAX31] Section 2. Specifically, +their syntax is defined in terms of the `ID_Start` and `ID_Continue` +character properties as follows: + +```ebnf +Identifier = ID_Start (ID_Continue | '-')*; +``` + +The Unicode specification provides the normative requirements for identifier +parsing. Non-normatively, the spirit of this specification is that `ID_Start` +consists of Unicode letter and certain unambiguous punctuation tokens, while +`ID_Continue` augments that set with Unicode digits, combining marks, etc. + +The dash character `-` is additionally allowed in identifiers, even though +that is not part of the unicode `ID_Continue` definition. This is to allow +attribute names and block type names to contain dashes, although underscores +as word separators are considered the idiomatic usage. + +[UAX31]: http://unicode.org/reports/tr31/ "Unicode Identifier and Pattern Syntax" + +### Keywords + +There are no globally-reserved words, but in some contexts certain identifiers +are reserved to function as keywords. These are discussed further in the +relevant documentation sections that follow. In such situations, the +identifier's role as a keyword supersedes any other valid interpretation that +may be possible. Outside of these specific situations, the keywords have no +special meaning and are interpreted as regular identifiers. + +### Operators and Delimiters + +The following character sequences represent operators, delimiters, and other +special tokens: + +``` ++ && == < : { [ ( ${ +- || != > ? } ] ) %{ +* ! <= = . +/ >= => , +% ... +``` + +### Numeric Literals + +A numeric literal is a decimal representation of a +real number. It has an integer part, a fractional part, +and an exponent part. + +```ebnf +NumericLit = decimal+ ("." decimal+)? (expmark decimal+)?; +decimal = '0' .. '9'; +expmark = ('e' | 'E') ("+" | "-")?; +``` + +## Structural Elements + +The structural language consists of syntax representing the following +constructs: + +* _Attributes_, which assign a value to a specified name. +* _Blocks_, which create a child body annotated by a type and optional labels. +* _Body Content_, which consists of a collection of attributes and blocks. + +These constructs correspond to the similarly-named concepts in the +language-agnostic HCL information model. + +```ebnf +ConfigFile = Body; +Body = (Attribute | Block)*; +Attribute = Identifier "=" Expression Newline; +Block = Identifier (StringLit|Identifier)* "{" Newline Body "}" Newline; +``` + +### Configuration Files + +A _configuration file_ is a sequence of characters whose top-level is +interpreted as a Body. + +### Bodies + +A _body_ is a collection of associated attributes and blocks. The meaning of +this association is defined by the calling application. + +### Attribute Definitions + +An _attribute definition_ assigns a value to a particular attribute name within +a body. Each distinct attribute name may be defined no more than once within a +single body. + +The attribute value is given as an expression, which is retained literally +for later evaluation by the calling application. + +### Blocks + +A _block_ creates a child body that is annotated with a block _type_ and +zero or more block _labels_. Blocks create a structural heirachy which can be +interpreted by the calling application. + +Block labels can either be quoted literal strings or naked identifiers. + +## Expressions + +The expression sub-language is used within attribute definitions to specify +values. + +```ebnf +Expression = ( + ExprTerm | + Operation | + Conditional +); +``` + +### Types + +The value types used within the expression language are those defined by the +syntax-agnostic HCL information model. An expression may return any valid +type, but only a subset of the available types have first-class syntax. +A calling application may make other types available via _variables_ and +_functions_. + +### Expression Terms + +Expression _terms_ are the operands for unary and binary expressions, as well +as acting as expressions in their own right. + +```ebnf +ExprTerm = ( + LiteralValue | + CollectionValue | + TemplateExpr | + VariableExpr | + FunctionCall | + ForExpr | + ExprTerm Index | + ExprTerm GetAttr | + ExprTerm Splat | + "(" Expression ")" +); +``` + +The productions for these different term types are given in their corresponding +sections. + +Between the `(` and `)` characters denoting a sub-expression, newline +characters are ignored as whitespace. + +### Literal Values + +A _literal value_ immediately represents a particular value of a primitive +type. + +```ebnf +LiteralValue = ( + NumericLit | + "true" | + "false" | + "null" +); +``` + +* Numeric literals represent values of type _number_. +* The `true` and `false` keywords represent values of type _bool_. +* The `null` keyword represents a null value of the dynamic pseudo-type. + +String literals are not directly available in the expression sub-language, but +are available via the template sub-language, which can in turn be incorporated +via _template expressions_. + +### Collection Values + +A _collection value_ combines zero or more other expressions to produce a +collection value. + +```ebnf +CollectionValue = tuple | object; +tuple = "[" ( + (Expression ("," Expression)* ","?)? +) "]"; +object = "{" ( + (objectelem ("," objectelem)* ","?)? +) "}"; +objectelem = (Identifier | Expression) "=" Expression; +``` + +Only tuple and object values can be directly constructed via native syntax. +Tuple and object values can in turn be converted to list, set and map values +with other operations, which behaves as defined by the syntax-agnostic HCL +information model. + +When specifying an object element, an identifier is interpreted as a literal +attribute name as opposed to a variable reference. To populate an item key +from a variable, use parentheses to disambiguate: + +* `{foo = "baz"}` is interpreted as an attribute literally named `foo`. +* `{(foo) = "baz"}` is interpreted as an attribute whose name is taken + from the variable named `foo`. + +Between the open and closing delimiters of these sequences, newline sequences +are ignored as whitespace. + +There is a syntax ambiguity between _for expressions_ and collection values +whose first element is a reference to a variable named `for`. The +_for expression_ interpretation has priority, so to produce a tuple whose +first element is the value of a variable named `for`, or an object with a +key named `for`, use paretheses to disambiguate: + +* `[for, foo, baz]` is a syntax error. +* `[(for), foo, baz]` is a tuple whose first element is the value of variable + `for`. +* `{for: 1, baz: 2}` is a syntax error. +* `{(for): 1, baz: 2}` is an object with an attribute literally named `for`. +* `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the + ambiguity by reordering. + +### Template Expressions + +A _template expression_ embeds a program written in the template sub-language +as an expression. Template expressions come in two forms: + +* A _quoted_ template expression is delimited by quote characters (`"`) and + defines a template as a single-line expression with escape characters. +* A _heredoc_ template expression is introduced by a `<<` sequence and + defines a template via a multi-line sequence terminated by a user-chosen + delimiter. + +In both cases the template interpolation and directive syntax is available for +use within the delimiters, and any text outside of these special sequences is +interpreted as a literal string. + +In _quoted_ template expressions any literal string sequences within the +template behave in a special way: literal newline sequences are not permitted +and instead _escape sequences_ can be included, starting with the +backslash `\`: + +``` + \n Unicode newline control character + \r Unicode carriage return control character + \t Unicode tab control character + \" Literal quote mark, used to prevent interpretation as end of string + \\ Literal backslash, used to prevent interpretation as escape sequence + \uNNNN Unicode character from Basic Multilingual Plane (NNNN is four hexadecimal digits) + \UNNNNNNNN Unicode character from supplementary planes (NNNNNNNN is eight hexadecimal digits) +``` + +The _heredoc_ template expression type is introduced by either `<<` or `<<-`, +followed by an identifier. The template expression ends when the given +identifier subsequently appears again on a line of its own. + +If a heredoc template is introduced with the `<<-` symbol, any literal string +at the start of each line is analyzed to find the minimum number of leading +spaces, and then that number of prefix spaces is removed from all line-leading +literal strings. The final closing marker may also have an arbitrary number +of spaces preceding it on its line. + +```ebnf +TemplateExpr = quotedTemplate | heredocTemplate; +quotedTemplate = (as defined in prose above); +heredocTemplate = ( + ("<<" | "<<-") Identifier Newline + (content as defined in prose above) + Identifier Newline +); +``` + +A quoted template expression containing only a single literal string serves +as a syntax for defining literal string _expressions_. In certain contexts +the template syntax is restricted in this manner: + +```ebnf +StringLit = '"' (quoted literals as defined in prose above) '"'; +``` + +The `StringLit` production permits the escape sequences discussed for quoted +template expressions as above, but does _not_ permit template interpolation +or directive sequences. + +### Variables and Variable Expressions + +A _variable_ is a value that has been assigned a symbolic name. Variables are +made available for use in expressions by the calling application, by populating +the _global scope_ used for expression evaluation. + +Variables can also be created by expressions themselves, which always creates +a _child scope_ that incorporates the variables from its parent scope but +(re-)defines zero or more names with new values. + +The value of a variable is accessed using a _variable expression_, which is +a standalone `Identifier` whose name corresponds to a defined variable: + +```ebnf +VariableExpr = Identifier; +``` + +Variables in a particular scope are immutable, but child scopes may _hide_ +a variable from an ancestor scope by defining a new variable of the same name. +When looking up variables, the most locally-defined variable of the given name +is used, and ancestor-scoped variables of the same name cannot be accessed. + +No direct syntax is provided for declaring or assigning variables, but other +expression constructs implicitly create child scopes and define variables as +part of their evaluation. + +### Functions and Function Calls + +A _function_ is an operation that has been assigned a symbolic name. Functions +are made available for use in expressions by the calling application, by +populating the _function table_ used for expression evaluation. + +The namespace of functions is distinct from the namespace of variables. A +function and a variable may share the same name with no implication that they +are in any way related. + +A function can be executed via a _function call_ expression: + +```ebnf +FunctionCall = Identifier "(" arguments ")"; +Arguments = ( + () || + (Expression ("," Expression)* ("," | "...")?) +); +``` + +The definition of functions and the semantics of calling them are defined by +the language-agnostic HCL information model. The given arguments are mapped +onto the function's _parameters_ and the result of a function call expression +is the return value of the named function when given those arguments. + +If the final argument expression is followed by the ellipsis symbol (`...`), +the final argument expression must evaluate to either a list or tuple value. +The elements of the value are each mapped to a single parameter of the +named function, beginning at the first parameter remaining after all other +argument expressions have been mapped. + +Within the parentheses that delimit the function arguments, newline sequences +are ignored as whitespace. + +### For Expressions + +A _for expression_ is a construct for constructing a collection by projecting +the items from another collection. + +```ebnf +ForExpr = forTupleExpr | forObjectExpr; +forTupleExpr = "[" forIntro Expression forCond? "]"; +forObjectExpr = "{" forIntro Expression "=>" Expression "..."? forCond? "}"; +forIntro = "for" Identifier ("," Identifier)? "in" Expression ":"; +forCond = "if" Expression; +``` + +The punctuation used to delimit a for expression decide whether it will produce +a tuple value (`[` and `]`) or an object value (`{` and `}`). + +The "introduction" is equivalent in both cases: the keyword `for` followed by +either one or two identifiers separated by a comma which define the temporary +variable names used for iteration, followed by the keyword `in` and then +an expression that must evaluate to a value that can be iterated. The +introduction is then terminated by the colon (`:`) symbol. + +If only one identifier is provided, it is the name of a variable that will +be temporarily assigned the value of each element during iteration. If both +are provided, the first is the key and the second is the value. + +Tuple, object, list, map, and set types are iterable. The type of collection +used defines how the key and value variables are populated: + +* For tuple and list types, the _key_ is the zero-based index into the + sequence for each element, and the _value_ is the element value. The + elements are visited in index order. +* For object and map types, the _key_ is the string attribute name or element + key, and the _value_ is the attribute or element value. The elements are + visited in the order defined by a lexicographic sort of the attribute names + or keys. +* For set types, the _key_ and _value_ are both the element value. The elements + are visited in an undefined but consistent order. + +The expression after the colon and (in the case of object `for`) the expression +after the `=>` are both evaluated once for each element of the source +collection, in a local scope that defines the key and value variable names +specified. + +The results of evaluating these expressions for each input element are used +to populate an element in the new collection. In the case of tuple `for`, the +single expression becomes an element, appending values to the tuple in visit +order. In the case of object `for`, the pair of expressions is used as an +attribute name and value respectively, creating an element in the resulting +object. + +In the case of object `for`, it is an error if two input elements produce +the same result from the attribute name expression, since duplicate +attributes are not possible. If the ellipsis symbol (`...`) appears +immediately after the value experssion, this activates the grouping mode in +which each value in the resulting object is a _tuple_ of all of the values +that were produced against each distinct key. + +* `[for v in ["a", "b"]: v]` returns `["a", "b"]`. +* `[for i, v in ["a", "b"]: i]` returns `[0, 1]`. +* `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`. +* `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute + `a` is defined twice. +* `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`. + +If the `if` keyword is used after the element expression(s), it applies an +additional predicate that can be used to conditionally filter elements from +the source collection from consideration. The expression following `if` is +evaluated once for each source element, in the same scope used for the +element expression(s). It must evaluate to a boolean value; if `true`, the +element will be evaluated as normal, while if `false` the element will be +skipped. + +* `[for i, v in ["a", "b", "c"]: v if i < 2]` returns `["a", "b"]`. + +If the collection value, element expression(s) or condition expression return +unknown values that are otherwise type-valid, the result is a value of the +dynamic pseudo-type. + +### Index Operator + +The _index_ operator returns the value of a single element of a collection +value. It is a postfix operator and can be applied to any value that has +a tuple, object, map, or list type. + +```ebnf +Index = "[" Expression "]"; +``` + +The expression delimited by the brackets is the _key_ by which an element +will be looked up. + +If the index operator is applied to a value of tuple or list type, the +key expression must be an non-negative integer number representing the +zero-based element index to access. If applied to a value of object or map +type, the key expression must be a string representing the attribute name +or element key. If the given key value is not of the appropriate type, a +conversion is attempted using the conversion rules from the HCL +syntax-agnostic information model. + +An error is produced if the given key expression does not correspond to +an element in the collection, either because it is of an unconvertable type, +because it is outside the range of elements for a tuple or list, or because +the given attribute or key does not exist. + +If either the collection or the key are an unknown value of an +otherwise-suitable type, the return value is an unknown value whose type +matches what type would be returned given known values, or a value of the +dynamic pseudo-type if type information alone cannot determine a suitable +return type. + +Within the brackets that delimit the index key, newline sequences are ignored +as whitespace. + +### Attribute Access Operator + +The _attribute access_ operator returns the value of a single attribute in +an object value. It is a postfix operator and can be applied to any value +that has an object type. + +```ebnf +GetAttr = "." Identifier; +``` + +The given identifier is interpreted as the name of the attribute to access. +An error is produced if the object to which the operator is applied does not +have an attribute with the given name. + +If the object is an unknown value of a type that has the attribute named, the +result is an unknown value of the attribute's type. + +### Splat Operators + +The _splat operators_ allow convenient access to attributes or elements of +elements in a tuple, list, or set value. + +There are two kinds of "splat" operator: + +* The _attribute-only_ splat operator supports only attribute lookups into + the elements from a list, but supports an arbitrary number of them. + +* The _full_ splat operator additionally supports indexing into the elements + from a list, and allows any combination of attribute access and index + operations. + +```ebnf +Splat = attrSplat | fullSplat; +attrSplat = "." "*" GetAttr*; +fullSplat = "[" "*" "]" (GetAttr | Index)*; +``` + +The splat operators can be thought of as shorthands for common operations that +could otherwise be performed using _for expressions_: + +* `tuple.*.foo.bar[0]` is approximately equivalent to + `[for v in tuple: v.foo.bar][0]`. +* `tuple[*].foo.bar[0]` is approximately equivalent to + `[for v in tuple: v.foo.bar[0]]` + +Note the difference in how the trailing index operator is interpreted in +each case. This different interpretation is the key difference between the +_attribute-only_ and _full_ splat operators. + +Splat operators have one additional behavior compared to the equivalent +_for expressions_ shown above: if a splat operator is applied to a value that +is _not_ of tuple, list, or set type, the value is coerced automatically into +a single-value list of the value type: + +* `any_object.*.id` is equivalent to `[any_object.id]`, assuming that `any_object` + is a single object. +* `any_number.*` is equivalent to `[any_number]`, assuming that `any_number` + is a single number. + +If the left operand of a splat operator is an unknown value of any type, the +result is a value of the dynamic pseudo-type. + +### Operations + +Operations apply a particular operator to either one or two expression terms. + +```ebnf +Operation = unaryOp | binaryOp; +unaryOp = ("-" | "!") ExprTerm; +binaryOp = ExprTerm binaryOperator ExprTerm; +binaryOperator = compareOperator | arithmeticOperator | logicOperator; +compareOperator = "==" | "!=" | "<" | ">" | "<=" | ">="; +arithmeticOperator = "+" | "-" | "*" | "/" | "%"; +logicOperator = "&&" | "||" | "!"; +``` + +The unary operators have the highest precedence. + +The binary operators are grouped into the following precedence levels: + +``` +Level Operators + 6 * / % + 5 + - + 4 > >= < <= + 3 == != + 2 && + 1 || +``` + +Higher values of "level" bind tighter. Operators within the same precedence +level have left-to-right associativity. For example, `x / y * z` is equivalent +to `(x / y) * z`. + +### Comparison Operators + +Comparison operators always produce boolean values, as a result of testing +the relationship between two values. + +The two equality operators apply to values of any type: + +``` +a == b equal +a != b not equal +``` + +Two values are equal if the are of identical types and their values are +equal as defined in the HCL syntax-agnostic information model. The equality +operators are commutative and opposite, such that `(a == b) == !(a != b)` +and `(a == b) == (b == a)` for all values `a` and `b`. + +The four numeric comparison operators apply only to numbers: + +``` +a < b less than +a <= b less than or equal to +a > b greater than +a >= b greater than or equal to +``` + +If either operand of a comparison operator is a correctly-typed unknown value +or a value of the dynamic pseudo-type, the result is an unknown boolean. + +### Arithmetic Operators + +Arithmetic operators apply only to number values and always produce number +values as results. + +``` +a + b sum (addition) +a - b difference (subtraction) +a * b product (multiplication) +a / b quotient (division) +a % b remainder (modulo) +-a negation +``` + +Arithmetic operations are considered to be performed in an arbitrary-precision +number space. + +If either operand of an arithmetic operator is an unknown number or a value +of the dynamic pseudo-type, the result is an unknown number. + +### Logic Operators + +Logic operators apply only to boolean values and always produce boolean values +as results. + +``` +a && b logical AND +a || b logical OR +!a logical NOT +``` + +If either operand of a logic operator is an unknown bool value or a value +of the dynamic pseudo-type, the result is an unknown bool value. + +### Conditional Operator + +The conditional operator allows selecting from one of two expressions based on +the outcome of a boolean expression. + +```ebnf +Conditional = Expression "?" Expression ":" Expression; +``` + +The first expression is the _predicate_, which is evaluated and must produce +a boolean result. If the predicate value is `true`, the result of the second +expression is the result of the conditional. If the predicate value is +`false`, the result of the third expression is the result of the conditional. + +The second and third expressions must be of the same type or must be able to +unify into a common type using the type unification rules defined in the +HCL syntax-agnostic information model. This unified type is the result type +of the conditional, with both expressions converted as necessary to the +unified type. + +If the predicate is an unknown boolean value or a value of the dynamic +pseudo-type then the result is an unknown value of the unified type of the +other two expressions. + +If either the second or third expressions produce errors when evaluated, +these errors are passed through only if the erroneous expression is selected. +This allows for expressions such as +`length(some_list) > 0 ? some_list[0] : default` (given some suitable `length` +function) without producing an error when the predicate is `false`. + +## Templates + +The template sub-language is used within template expressions to concisely +combine strings and other values to produce other strings. It can also be +used in isolation as a standalone template language. + +```ebnf +Template = ( + TemplateLiteral | + TemplateInterpolation | + TemplateDirective +)* +TemplateDirective = TemplateIf | TemplateFor; +``` + +A template behaves like an expression that always returns a string value. +The different elements of the template are evaluated and combined into a +single string to return. If any of the elements produce an unknown string +or a value of the dynamic pseudo-type, the result is an unknown string. + +An important use-case for standalone templates is to enable the use of +expressions in alternative HCL syntaxes where a native expression grammar is +not available. For example, the HCL JSON profile treats the values of JSON +strings as standalone templates when attributes are evaluated in expression +mode. + +### Template Literals + +A template literal is a literal sequence of characters to include in the +resulting string. When the template sub-language is used standalone, a +template literal can contain any unicode character, with the exception +of the sequences that introduce interpolations and directives, and for the +sequences that escape those introductions. + +The interpolation and directive introductions are escaped by doubling their +leading characters. The `${` sequence is escaped as `$${` and the `%{` +sequence is escaped as `%%{`. + +When the template sub-language is embedded in the expression language via +_template expressions_, additional constraints and transforms are applied to +template literalsas described in the definition of template expressions. + +The value of a template literal can be modified by _strip markers_ in any +interpolations or directives that are adjacent to it. A strip marker is +a tilde (`~`) placed immediately after the opening `{` or before the closing +`}` of a template sequence: + +* `hello ${~ "world" }` produces `"helloworld"`. +* `%{ if true ~} hello %{~ endif }` produces `"hello"`. + +When a strip marker is present, any spaces adjacent to it in the corresponding +string literal (if any) are removed before producing the final value. Space +characters are interpreted as per Unicode's definition. + +Stripping is done at syntax level rather than value level. Values returned +by interpolations or directives are not subject to stripping: + +* `${"hello" ~}${" world"}` produces `"hello world"`, and not `"helloworld"`, + because the space is not in a template literal directly adjacent to the + strip marker. + +### Template Interpolations + +An _interpolation sequence_ evaluates an expression (written in the +expression sub-language), converts the result to a string value, and +replaces itself with the resulting string. + +```ebnf +TemplateInterpolation = ("${" | "${~") Expression ("}" | "~}"; +``` + +If the expression result cannot be converted to a string, an error is +produced. + +### Template If Directive + +The template `if` directive is the template equivalent of the +_conditional expression_, allowing selection of one of two sub-templates based +on the value of a predicate expression. + +```ebnf +TemplateIf = ( + ("%{" | "%{~") "if" Expression ("}" | "~}") + Template + ( + ("%{" | "%{~") "else" ("}" | "~}") + Template + )? + ("%{" | "%{~") "endif" ("}" | "~}") +); +``` + +The evaluation of the `if` directive is equivalent to the conditional +expression, with the following exceptions: + +* The two sub-templates always produce strings, and thus the result value is + also always a string. +* The `else` clause may be omitted, in which case the conditional's third + expression result is implied to be the empty string. + +### Template For Directive + +The template `for` directive is the template equivalent of the _for expression_, +producing zero or more copies of its sub-template based on the elements of +a collection. + +```ebnf +TemplateFor = ( + ("%{" | "%{~") "for" Identifier ("," Identifier) "in" Expression ("}" | "~}") + Template + ("%{" | "%{~") "endfor" ("}" | "~}") +); +``` + +The evaluation of the `for` directive is equivalent to the _for expression_ +when producing a tuple, with the following exceptions: + +* The sub-template always produces a string. +* There is no equivalent of the "if" clause on the for expression. +* The elements of the resulting tuple are all converted to strings and + concatenated to produce a flat string result. + +### Template Interpolation Unwrapping + +As a special case, a template that consists only of a single interpolation, +with no surrounding literals, directives or other interpolations, is +"unwrapped". In this case, the result of the interpolation expression is +returned verbatim, without conversion to string. + +This special case exists primarily to enable the native template language +to be used inside strings in alternative HCL syntaxes that lack a first-class +template or expression syntax. Unwrapping allows arbitrary expressions to be +used to populate attributes when strings in such languages are interpreted +as templates. + +* `${true}` produces the boolean value `true` +* `${"${true}"}` produces the boolean value `true`, because both the inner + and outer interpolations are subject to unwrapping. +* `hello ${true}` produces the string `"hello true"` +* `${""}${true}` produces the string `"true"` because there are two + interpolation sequences, even though one produces an empty result. +* `%{ for v in [true] }${v}%{ endif }` produces the string `true` because + the presence of the `for` directive circumvents the unwrapping even though + the final result is a single value. + +In some contexts this unwrapping behavior may be circumvented by the calling +application, by converting the final template result to string. This is +necessary, for example, if a standalone template is being used to produce +the direct contents of a file, since the result in that case must always be a +string. + +## Static Analysis + +The HCL static analysis operations are implemented for some expression types +in the native syntax, as described in the following sections. + +A goal for static analysis of the native syntax is for the interpretation to +be as consistent as possible with the dynamic evaluation interpretation of +the given expression, though some deviations are intentionally made in order +to maximize the potential for analysis. + +### Static List + +The tuple construction syntax can be interpreted as a static list. All of +the expression elements given are returned as the static list elements, +with no further interpretation. + +### Static Map + +The object construction syntax can be interpreted as a static map. All of the +key/value pairs given are returned as the static pairs, with no further +interpretation. + +The usual requirement that an attribute name be interpretable as a string +does not apply to this static analyis, allowing callers to provide map-like +constructs with different key types by building on the map syntax. + +### Static Call + +The function call syntax can be interpreted as a static call. The called +function name is returned verbatim and the given argument expressions are +returned as the static arguments, with no further interpretation. + +### Static Traversal + +A variable expression and any attached attribute access operations and +constant index operations can be interpreted as a static traversal. + +The keywords `true`, `false` and `null` can also be interpreted as +static traversals, behaving as if they were references to variables of those +names, to allow callers to redefine the meaning of those keywords in certain +contexts. diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go new file mode 100644 index 00000000..d69f65b6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go @@ -0,0 +1,379 @@ +package hclsyntax + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// AsHCLBlock returns the block data expressed as a *hcl.Block. +func (b *Block) AsHCLBlock() *hcl.Block { + lastHeaderRange := b.TypeRange + if len(b.LabelRanges) > 0 { + lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1] + } + + return &hcl.Block{ + Type: b.Type, + Labels: b.Labels, + Body: b.Body, + + DefRange: hcl.RangeBetween(b.TypeRange, lastHeaderRange), + TypeRange: b.TypeRange, + LabelRanges: b.LabelRanges, + } +} + +// Body is the implementation of hcl.Body for the HCL native syntax. +type Body struct { + Attributes Attributes + Blocks Blocks + + // These are used with PartialContent to produce a "remaining items" + // body to return. They are nil on all bodies fresh out of the parser. + hiddenAttrs map[string]struct{} + hiddenBlocks map[string]struct{} + + SrcRange hcl.Range + EndRange hcl.Range // Final token of the body, for reporting missing items +} + +// Assert that *Body implements hcl.Body +var assertBodyImplBody hcl.Body = &Body{} + +func (b *Body) walkChildNodes(w internalWalkFunc) { + b.Attributes = w(b.Attributes).(Attributes) + b.Blocks = w(b.Blocks).(Blocks) +} + +func (b *Body) Range() hcl.Range { + return b.SrcRange +} + +func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, remainHCL, diags := b.PartialContent(schema) + + // No we'll see if anything actually remains, to produce errors about + // extraneous items. + remain := remainHCL.(*Body) + + for name, attr := range b.Attributes { + if _, hidden := remain.hiddenAttrs[name]; !hidden { + var suggestions []string + for _, attrS := range schema.Attributes { + if _, defined := content.Attributes[attrS.Name]; defined { + continue + } + suggestions = append(suggestions, attrS.Name) + } + suggestion := nameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + // Is there a block of the same name? + for _, blockS := range schema.Blocks { + if blockS.Type == name { + suggestion = fmt.Sprintf(" Did you mean to define a block of type %q?", name) + break + } + } + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported attribute", + Detail: fmt.Sprintf("An attribute named %q is not expected here.%s", name, suggestion), + Subject: &attr.NameRange, + }) + } + } + + for _, block := range b.Blocks { + blockTy := block.Type + if _, hidden := remain.hiddenBlocks[blockTy]; !hidden { + var suggestions []string + for _, blockS := range schema.Blocks { + suggestions = append(suggestions, blockS.Type) + } + suggestion := nameSuggestion(blockTy, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + // Is there an attribute of the same name? + for _, attrS := range schema.Attributes { + if attrS.Name == blockTy { + suggestion = fmt.Sprintf(" Did you mean to define attribute %q?", blockTy) + break + } + } + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported block type", + Detail: fmt.Sprintf("Blocks of type %q are not expected here.%s", blockTy, suggestion), + Subject: &block.TypeRange, + }) + } + } + + return content, diags +} + +func (b *Body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + attrs := make(hcl.Attributes) + var blocks hcl.Blocks + var diags hcl.Diagnostics + hiddenAttrs := make(map[string]struct{}) + hiddenBlocks := make(map[string]struct{}) + + if b.hiddenAttrs != nil { + for k, v := range b.hiddenAttrs { + hiddenAttrs[k] = v + } + } + if b.hiddenBlocks != nil { + for k, v := range b.hiddenBlocks { + hiddenBlocks[k] = v + } + } + + for _, attrS := range schema.Attributes { + name := attrS.Name + attr, exists := b.Attributes[name] + _, hidden := hiddenAttrs[name] + if hidden || !exists { + if attrS.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required attribute", + Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), + Subject: b.MissingItemRange().Ptr(), + }) + } + continue + } + + hiddenAttrs[name] = struct{}{} + attrs[name] = attr.AsHCLAttribute() + } + + blocksWanted := make(map[string]hcl.BlockHeaderSchema) + for _, blockS := range schema.Blocks { + blocksWanted[blockS.Type] = blockS + } + + for _, block := range b.Blocks { + if _, hidden := hiddenBlocks[block.Type]; hidden { + continue + } + blockS, wanted := blocksWanted[block.Type] + if !wanted { + continue + } + + if len(block.Labels) > len(blockS.LabelNames) { + name := block.Type + if len(blockS.LabelNames) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extraneous label for %s", name), + Detail: fmt.Sprintf( + "No labels are expected for %s blocks.", name, + ), + Subject: block.LabelRanges[0].Ptr(), + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extraneous label for %s", name), + Detail: fmt.Sprintf( + "Only %d labels (%s) are expected for %s blocks.", + len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), name, + ), + Subject: block.LabelRanges[len(blockS.LabelNames)].Ptr(), + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + } + continue + } + + if len(block.Labels) < len(blockS.LabelNames) { + name := block.Type + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s for %s", blockS.LabelNames[len(block.Labels)], name), + Detail: fmt.Sprintf( + "All %s blocks must have %d labels (%s).", + name, len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), + ), + Subject: &block.OpenBraceRange, + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + continue + } + + blocks = append(blocks, block.AsHCLBlock()) + } + + // We hide blocks only after we've processed all of them, since otherwise + // we can't process more than one of the same type. + for _, blockS := range schema.Blocks { + hiddenBlocks[blockS.Type] = struct{}{} + } + + remain := &Body{ + Attributes: b.Attributes, + Blocks: b.Blocks, + + hiddenAttrs: hiddenAttrs, + hiddenBlocks: hiddenBlocks, + + SrcRange: b.SrcRange, + EndRange: b.EndRange, + } + + return &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + + MissingItemRange: b.MissingItemRange(), + }, remain, diags +} + +func (b *Body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + attrs := make(hcl.Attributes) + var diags hcl.Diagnostics + + if len(b.Blocks) > 0 { + example := b.Blocks[0] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s block", example.Type), + Detail: "Blocks are not allowed here.", + Context: &example.TypeRange, + }) + // we will continue processing anyway, and return the attributes + // we are able to find so that certain analyses can still be done + // in the face of errors. + } + + if b.Attributes == nil { + return attrs, diags + } + + for name, attr := range b.Attributes { + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + attrs[name] = attr.AsHCLAttribute() + } + + return attrs, diags +} + +func (b *Body) MissingItemRange() hcl.Range { + return b.EndRange +} + +// Attributes is the collection of attribute definitions within a body. +type Attributes map[string]*Attribute + +func (a Attributes) walkChildNodes(w internalWalkFunc) { + for k, attr := range a { + a[k] = w(attr).(*Attribute) + } +} + +// Range returns the range of some arbitrary point within the set of +// attributes, or an invalid range if there are no attributes. +// +// This is provided only to complete the Node interface, but has no practical +// use. +func (a Attributes) Range() hcl.Range { + // An attributes doesn't really have a useful range to report, since + // it's just a grouping construct. So we'll arbitrarily take the + // range of one of the attributes, or produce an invalid range if we have + // none. In practice, there's little reason to ask for the range of + // an Attributes. + for _, attr := range a { + return attr.Range() + } + return hcl.Range{ + Filename: "", + } +} + +// Attribute represents a single attribute definition within a body. +type Attribute struct { + Name string + Expr Expression + + SrcRange hcl.Range + NameRange hcl.Range + EqualsRange hcl.Range +} + +func (a *Attribute) walkChildNodes(w internalWalkFunc) { + a.Expr = w(a.Expr).(Expression) +} + +func (a *Attribute) Range() hcl.Range { + return a.SrcRange +} + +// AsHCLAttribute returns the block data expressed as a *hcl.Attribute. +func (a *Attribute) AsHCLAttribute() *hcl.Attribute { + return &hcl.Attribute{ + Name: a.Name, + Expr: a.Expr, + + Range: a.SrcRange, + NameRange: a.NameRange, + } +} + +// Blocks is the list of nested blocks within a body. +type Blocks []*Block + +func (bs Blocks) walkChildNodes(w internalWalkFunc) { + for i, block := range bs { + bs[i] = w(block).(*Block) + } +} + +// Range returns the range of some arbitrary point within the list of +// blocks, or an invalid range if there are no blocks. +// +// This is provided only to complete the Node interface, but has no practical +// use. +func (bs Blocks) Range() hcl.Range { + if len(bs) > 0 { + return bs[0].Range() + } + return hcl.Range{ + Filename: "", + } +} + +// Block represents a nested block structure +type Block struct { + Type string + Labels []string + Body *Body + + TypeRange hcl.Range + LabelRanges []hcl.Range + OpenBraceRange hcl.Range + CloseBraceRange hcl.Range +} + +func (b *Block) walkChildNodes(w internalWalkFunc) { + b.Body = w(b.Body).(*Body) +} + +func (b *Block) Range() hcl.Range { + return hcl.RangeBetween(b.TypeRange, b.CloseBraceRange) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go new file mode 100644 index 00000000..bcaa15f0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go @@ -0,0 +1,272 @@ +package hclsyntax + +import ( + "fmt" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" +) + +// Token represents a sequence of bytes from some HCL code that has been +// tagged with a type and its range within the source file. +type Token struct { + Type TokenType + Bytes []byte + Range hcl.Range +} + +// Tokens is a slice of Token. +type Tokens []Token + +// TokenType is an enumeration used for the Type field on Token. +type TokenType rune + +const ( + // Single-character tokens are represented by their own character, for + // convenience in producing these within the scanner. However, the values + // are otherwise arbitrary and just intended to be mnemonic for humans + // who might see them in debug output. + + TokenOBrace TokenType = '{' + TokenCBrace TokenType = '}' + TokenOBrack TokenType = '[' + TokenCBrack TokenType = ']' + TokenOParen TokenType = '(' + TokenCParen TokenType = ')' + TokenOQuote TokenType = '«' + TokenCQuote TokenType = '»' + TokenOHeredoc TokenType = 'H' + TokenCHeredoc TokenType = 'h' + + TokenStar TokenType = '*' + TokenSlash TokenType = '/' + TokenPlus TokenType = '+' + TokenMinus TokenType = '-' + TokenPercent TokenType = '%' + + TokenEqual TokenType = '=' + TokenEqualOp TokenType = '≔' + TokenNotEqual TokenType = '≠' + TokenLessThan TokenType = '<' + TokenLessThanEq TokenType = '≤' + TokenGreaterThan TokenType = '>' + TokenGreaterThanEq TokenType = '≥' + + TokenAnd TokenType = '∧' + TokenOr TokenType = '∨' + TokenBang TokenType = '!' + + TokenDot TokenType = '.' + TokenComma TokenType = ',' + + TokenEllipsis TokenType = '…' + TokenFatArrow TokenType = '⇒' + + TokenQuestion TokenType = '?' + TokenColon TokenType = ':' + + TokenTemplateInterp TokenType = '∫' + TokenTemplateControl TokenType = 'λ' + TokenTemplateSeqEnd TokenType = '∎' + + TokenQuotedLit TokenType = 'Q' // might contain backslash escapes + TokenStringLit TokenType = 'S' // cannot contain backslash escapes + TokenNumberLit TokenType = 'N' + TokenIdent TokenType = 'I' + + TokenComment TokenType = 'C' + + TokenNewline TokenType = '\n' + TokenEOF TokenType = '␄' + + // The rest are not used in the language but recognized by the scanner so + // we can generate good diagnostics in the parser when users try to write + // things that might work in other languages they are familiar with, or + // simply make incorrect assumptions about the HCL language. + + TokenBitwiseAnd TokenType = '&' + TokenBitwiseOr TokenType = '|' + TokenBitwiseNot TokenType = '~' + TokenBitwiseXor TokenType = '^' + TokenStarStar TokenType = '➚' + TokenBacktick TokenType = '`' + TokenSemicolon TokenType = ';' + TokenTabs TokenType = '␉' + TokenInvalid TokenType = '�' + TokenBadUTF8 TokenType = '💩' + + // TokenNil is a placeholder for when a token is required but none is + // available, e.g. when reporting errors. The scanner will never produce + // this as part of a token stream. + TokenNil TokenType = '\x00' +) + +func (t TokenType) GoString() string { + return fmt.Sprintf("hclsyntax.%s", t.String()) +} + +type scanMode int + +const ( + scanNormal scanMode = iota + scanTemplate + scanIdentOnly +) + +type tokenAccum struct { + Filename string + Bytes []byte + Pos hcl.Pos + Tokens []Token +} + +func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) { + // Walk through our buffer to figure out how much we need to adjust + // the start pos to get our end pos. + + start := f.Pos + start.Column += startOfs - f.Pos.Byte // Safe because only ASCII spaces can be in the offset + start.Byte = startOfs + + end := start + end.Byte = endOfs + b := f.Bytes[startOfs:endOfs] + for len(b) > 0 { + advance, seq, _ := textseg.ScanGraphemeClusters(b, true) + if (len(seq) == 1 && seq[0] == '\n') || (len(seq) == 2 && seq[0] == '\r' && seq[1] == '\n') { + end.Line++ + end.Column = 1 + } else { + end.Column++ + } + b = b[advance:] + } + + f.Pos = end + + f.Tokens = append(f.Tokens, Token{ + Type: ty, + Bytes: f.Bytes[startOfs:endOfs], + Range: hcl.Range{ + Filename: f.Filename, + Start: start, + End: end, + }, + }) +} + +type heredocInProgress struct { + Marker []byte + StartOfLine bool +} + +// checkInvalidTokens does a simple pass across the given tokens and generates +// diagnostics for tokens that should _never_ appear in HCL source. This +// is intended to avoid the need for the parser to have special support +// for them all over. +// +// Returns a diagnostics with no errors if everything seems acceptable. +// Otherwise, returns zero or more error diagnostics, though tries to limit +// repetition of the same information. +func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { + var diags hcl.Diagnostics + + toldBitwise := 0 + toldExponent := 0 + toldBacktick := 0 + toldSemicolon := 0 + toldTabs := 0 + toldBadUTF8 := 0 + + for _, tok := range tokens { + switch tok.Type { + case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot: + if toldBitwise < 4 { + var suggestion string + switch tok.Type { + case TokenBitwiseAnd: + suggestion = " Did you mean boolean AND (\"&&\")?" + case TokenBitwiseOr: + suggestion = " Did you mean boolean OR (\"&&\")?" + case TokenBitwiseNot: + suggestion = " Did you mean boolean NOT (\"!\")?" + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported operator", + Detail: fmt.Sprintf("Bitwise operators are not supported.%s", suggestion), + Subject: &tok.Range, + }) + toldBitwise++ + } + case TokenStarStar: + if toldExponent < 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported operator", + Detail: "\"**\" is not a supported operator. Exponentiation is not supported as an operator.", + Subject: &tok.Range, + }) + + toldExponent++ + } + case TokenBacktick: + // Only report for alternating (even) backticks, so we won't report both start and ends of the same + // backtick-quoted string. + if toldExponent < 4 && (toldExponent%2) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid character", + Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"< +# +# This script uses the unicode spec to generate a Ragel state machine +# that recognizes unicode alphanumeric characters. It generates 5 +# character classes: uupper, ulower, ualpha, udigit, and ualnum. +# Currently supported encodings are UTF-8 [default] and UCS-4. +# +# Usage: unicode2ragel.rb [options] +# -e, --encoding [ucs4 | utf8] Data encoding +# -h, --help Show this message +# +# This script was originally written as part of the Ferret search +# engine library. +# +# Author: Rakan El-Khalil + +require 'optparse' +require 'open-uri' + +ENCODINGS = [ :utf8, :ucs4 ] +ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" } +DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt" +DEFAULT_MACHINE_NAME= "WChar" + +### +# Display vars & default option + +TOTAL_WIDTH = 80 +RANGE_WIDTH = 23 +@encoding = :utf8 +@chart_url = DEFAULT_CHART_URL +machine_name = DEFAULT_MACHINE_NAME +properties = [] +@output = $stdout + +### +# Option parsing + +cli_opts = OptionParser.new do |opts| + opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o| + @encoding = o.downcase.to_sym + end + opts.on("-h", "--help", "Show this message") do + puts opts + exit + end + opts.on("-u", "--url URL", "URL to process") do |o| + @chart_url = o + end + opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o| + machine_name = o + end + opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o| + properties = o + end + opts.on("-o", "--output FILE", "output file") do |o| + @output = File.new(o, "w+") + end +end + +cli_opts.parse(ARGV) +unless ENCODINGS.member? @encoding + puts "Invalid encoding: #{@encoding}" + puts cli_opts + exit +end + +## +# Downloads the document at url and yields every alpha line's hex +# range and description. + +def each_alpha( url, property ) + open( url ) do |file| + file.each_line do |line| + next if line =~ /^#/; + next if line !~ /; #{property} #/; + + range, description = line.split(/;/) + range.strip! + description.gsub!(/.*#/, '').strip! + + if range =~ /\.\./ + start, stop = range.split '..' + else start = stop = range + end + + yield start.hex .. stop.hex, description + end + end +end + +### +# Formats to hex at minimum width + +def to_hex( n ) + r = "%0X" % n + r = "0#{r}" unless (r.length % 2).zero? + r +end + +### +# UCS4 is just a straight hex conversion of the unicode codepoint. + +def to_ucs4( range ) + rangestr = "0x" + to_hex(range.begin) + rangestr << "..0x" + to_hex(range.end) if range.begin != range.end + [ rangestr ] +end + +## +# 0x00 - 0x7f -> 0zzzzzzz[7] +# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6] +# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6] +# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] + +UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff] + +def to_utf8_enc( n ) + r = 0 + if n <= 0x7f + r = n + elsif n <= 0x7ff + y = 0xc0 | (n >> 6) + z = 0x80 | (n & 0x3f) + r = y << 8 | z + elsif n <= 0xffff + x = 0xe0 | (n >> 12) + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = x << 16 | y << 8 | z + elsif n <= 0x10ffff + w = 0xf0 | (n >> 18) + x = 0x80 | (n >> 12) & 0x3f + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = w << 24 | x << 16 | y << 8 | z + end + + to_hex(r) +end + +def from_utf8_enc( n ) + n = n.hex + r = 0 + if n <= 0x7f + r = n + elsif n <= 0xdfff + y = (n >> 8) & 0x1f + z = n & 0x3f + r = y << 6 | z + elsif n <= 0xefffff + x = (n >> 16) & 0x0f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = x << 10 | y << 6 | z + elsif n <= 0xf7ffffff + w = (n >> 24) & 0x07 + x = (n >> 16) & 0x3f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = w << 18 | x << 12 | y << 6 | z + end + r +end + +### +# Given a range, splits it up into ranges that can be continuously +# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff] +# This is not strictly needed since the current [5.1] unicode standard +# doesn't have ranges that straddle utf8 boundaries. This is included +# for completeness as there is no telling if that will ever change. + +def utf8_ranges( range ) + ranges = [] + UTF8_BOUNDARIES.each do |max| + if range.begin <= max + if range.end <= max + ranges << range + return ranges + end + + ranges << (range.begin .. max) + range = (max + 1) .. range.end + end + end + ranges +end + +def build_range( start, stop ) + size = start.size/2 + left = size - 1 + return [""] if size < 1 + + a = start[0..1] + b = stop[0..1] + + ### + # Shared prefix + + if a == b + return build_range(start[2..-1], stop[2..-1]).map do |elt| + "0x#{a} " + elt + end + end + + ### + # Unshared prefix, end of run + + return ["0x#{a}..0x#{b} "] if left.zero? + + ### + # Unshared prefix, not end of run + # Range can be 0x123456..0x56789A + # Which is equivalent to: + # 0x123456 .. 0x12FFFF + # 0x130000 .. 0x55FFFF + # 0x560000 .. 0x56789A + + ret = [] + ret << build_range(start, a + "FF" * left) + + ### + # Only generate middle range if need be. + + if a.hex+1 != b.hex + max = to_hex(b.hex - 1) + max = "FF" if b == "FF" + ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left + end + + ### + # Don't generate last range if it is covered by first range + + ret << build_range(b + "00" * left, stop) unless b == "FF" + ret.flatten! +end + +def to_utf8( range ) + utf8_ranges( range ).map do |r| + begin_enc = to_utf8_enc(r.begin) + end_enc = to_utf8_enc(r.end) + build_range begin_enc, end_enc + end.flatten! +end + +## +# Perform a 3-way comparison of the number of codepoints advertised by +# the unicode spec for the given range, the originally parsed range, +# and the resulting utf8 encoded range. + +def count_codepoints( code ) + code.split(' ').inject(1) do |acc, elt| + if elt =~ /0x(.+)\.\.0x(.+)/ + if @encoding == :utf8 + acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1) + else + acc * ($2.hex - $1.hex + 1) + end + else + acc + end + end +end + +def is_valid?( range, desc, codes ) + spec_count = 1 + spec_count = $1.to_i if desc =~ /\[(\d+)\]/ + range_count = range.end - range.begin + 1 + + sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) } + sum == spec_count and sum == range_count +end + +## +# Generate the state maching to stdout + +def generate_machine( name, property ) + pipe = " " + @output.puts " #{name} = " + each_alpha( @chart_url, property ) do |range, desc| + + codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range) + + #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless + # is_valid? range, desc, codes + + range_width = codes.map { |a| a.size }.max + range_width = RANGE_WIDTH if range_width < RANGE_WIDTH + + desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11 + desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH + + if desc.size > desc_width + desc = desc[0..desc_width - 4] + "..." + end + + codes.each_with_index do |r, idx| + desc = "" unless idx.zero? + code = "%-#{range_width}s" % r + @output.puts " #{pipe} #{code} ##{desc}" + pipe = "|" + end + end + @output.puts " ;" + @output.puts "" +end + +@output.puts < 0 && ret[0] == '.' { + ret = ret[1:] + } + return ret +} + +func navigationStepsRev(v node, offset int) []string { + switch tv := v.(type) { + case *objectVal: + // Do any of our properties have an object that contains the target + // offset? + for _, attr := range tv.Attrs { + k := attr.Name + av := attr.Value + + switch av.(type) { + case *objectVal, *arrayVal: + // okay + default: + continue + } + + if av.Range().ContainsOffset(offset) { + return append(navigationStepsRev(av, offset), "."+k) + } + } + case *arrayVal: + // Do any of our elements contain the target offset? + for i, elem := range tv.Values { + + switch elem.(type) { + case *objectVal, *arrayVal: + // okay + default: + continue + } + + if elem.Range().ContainsOffset(offset) { + return append(navigationStepsRev(elem, offset), fmt.Sprintf("[%d]", i)) + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go b/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go new file mode 100644 index 00000000..246fd1c3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go @@ -0,0 +1,491 @@ +package json + +import ( + "encoding/json" + "fmt" + "math/big" + + "github.com/hashicorp/hcl2/hcl" +) + +func parseFileContent(buf []byte, filename string) (node, hcl.Diagnostics) { + tokens := scan(buf, pos{ + Filename: filename, + Pos: hcl.Pos{ + Byte: 0, + Line: 1, + Column: 1, + }, + }) + p := newPeeker(tokens) + node, diags := parseValue(p) + if len(diags) == 0 && p.Peek().Type != tokenEOF { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous data after value", + Detail: "Extra characters appear after the JSON value.", + Subject: p.Peek().Range.Ptr(), + }) + } + return node, diags +} + +func parseValue(p *peeker) (node, hcl.Diagnostics) { + tok := p.Peek() + + wrapInvalid := func(n node, diags hcl.Diagnostics) (node, hcl.Diagnostics) { + if n != nil { + return n, diags + } + return invalidVal{tok.Range}, diags + } + + switch tok.Type { + case tokenBraceO: + return wrapInvalid(parseObject(p)) + case tokenBrackO: + return wrapInvalid(parseArray(p)) + case tokenNumber: + return wrapInvalid(parseNumber(p)) + case tokenString: + return wrapInvalid(parseString(p)) + case tokenKeyword: + return wrapInvalid(parseKeyword(p)) + case tokenBraceC: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Missing attribute value", + Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", + Subject: &tok.Range, + }, + }) + case tokenBrackC: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Missing array element value", + Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", + Subject: &tok.Range, + }, + }) + case tokenEOF: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Missing value", + Detail: "The JSON data ends prematurely.", + Subject: &tok.Range, + }, + }) + default: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid start of value", + Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", + Subject: &tok.Range, + }, + }) + } +} + +func tokenCanStartValue(tok token) bool { + switch tok.Type { + case tokenBraceO, tokenBrackO, tokenNumber, tokenString, tokenKeyword: + return true + default: + return false + } +} + +func parseObject(p *peeker) (node, hcl.Diagnostics) { + var diags hcl.Diagnostics + + open := p.Read() + attrs := []*objectAttr{} + + // recover is used to shift the peeker to what seems to be the end of + // our object, so that when we encounter an error we leave the peeker + // at a reasonable point in the token stream to continue parsing. + recover := func(tok token) { + open := 1 + for { + switch tok.Type { + case tokenBraceO: + open++ + case tokenBraceC: + open-- + if open <= 1 { + return + } + case tokenEOF: + // Ran out of source before we were able to recover, + // so we'll bail here and let the caller deal with it. + return + } + tok = p.Read() + } + } + +Token: + for { + if p.Peek().Type == tokenBraceC { + break Token + } + + keyNode, keyDiags := parseValue(p) + diags = diags.Extend(keyDiags) + if keyNode == nil { + return nil, diags + } + + keyStrNode, ok := keyNode.(*stringVal) + if !ok { + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object attribute name", + Detail: "A JSON object attribute name must be a string", + Subject: keyNode.StartRange().Ptr(), + }) + } + + key := keyStrNode.Value + + colon := p.Read() + if colon.Type != tokenColon { + recover(colon) + + if colon.Type == tokenBraceC || colon.Type == tokenComma { + // Catch common mistake of using braces instead of brackets + // for an object. + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing object value", + Detail: "A JSON object attribute must have a value, introduced by a colon.", + Subject: &colon.Range, + }) + } + + if colon.Type == tokenEquals { + // Possible confusion with native HCL syntax. + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute value colon", + Detail: "JSON uses a colon as its name/value delimiter, not an equals sign.", + Subject: &colon.Range, + }) + } + + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute value colon", + Detail: "A colon must appear between an object attribute's name and its value.", + Subject: &colon.Range, + }) + } + + valNode, valDiags := parseValue(p) + diags = diags.Extend(valDiags) + if valNode == nil { + return nil, diags + } + + attrs = append(attrs, &objectAttr{ + Name: key, + Value: valNode, + NameRange: keyStrNode.SrcRange, + }) + + switch p.Peek().Type { + case tokenComma: + comma := p.Read() + if p.Peek().Type == tokenBraceC { + // Special error message for this common mistake + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Trailing comma in object", + Detail: "JSON does not permit a trailing comma after the final attribute in an object.", + Subject: &comma.Range, + }) + } + continue Token + case tokenEOF: + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed object", + Detail: "No closing brace was found for this JSON object.", + Subject: &open.Range, + }) + case tokenBrackC: + // Consume the bracket anyway, so that we don't return with the peeker + // at a strange place. + p.Read() + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Mismatched braces", + Detail: "A JSON object must be closed with a brace, not a bracket.", + Subject: p.Peek().Range.Ptr(), + }) + case tokenBraceC: + break Token + default: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute seperator comma", + Detail: "A comma must appear between each attribute declaration in an object.", + Subject: p.Peek().Range.Ptr(), + }) + } + + } + + close := p.Read() + return &objectVal{ + Attrs: attrs, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + CloseRange: close.Range, + }, diags +} + +func parseArray(p *peeker) (node, hcl.Diagnostics) { + var diags hcl.Diagnostics + + open := p.Read() + vals := []node{} + + // recover is used to shift the peeker to what seems to be the end of + // our array, so that when we encounter an error we leave the peeker + // at a reasonable point in the token stream to continue parsing. + recover := func(tok token) { + open := 1 + for { + switch tok.Type { + case tokenBrackO: + open++ + case tokenBrackC: + open-- + if open <= 1 { + return + } + case tokenEOF: + // Ran out of source before we were able to recover, + // so we'll bail here and let the caller deal with it. + return + } + tok = p.Read() + } + } + +Token: + for { + if p.Peek().Type == tokenBrackC { + break Token + } + + valNode, valDiags := parseValue(p) + diags = diags.Extend(valDiags) + if valNode == nil { + return nil, diags + } + + vals = append(vals, valNode) + + switch p.Peek().Type { + case tokenComma: + comma := p.Read() + if p.Peek().Type == tokenBrackC { + // Special error message for this common mistake + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Trailing comma in array", + Detail: "JSON does not permit a trailing comma after the final attribute in an array.", + Subject: &comma.Range, + }) + } + continue Token + case tokenColon: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid array value", + Detail: "A colon is not used to introduce values in a JSON array.", + Subject: p.Peek().Range.Ptr(), + }) + case tokenEOF: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed object", + Detail: "No closing bracket was found for this JSON array.", + Subject: &open.Range, + }) + case tokenBraceC: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Mismatched brackets", + Detail: "A JSON array must be closed with a bracket, not a brace.", + Subject: p.Peek().Range.Ptr(), + }) + case tokenBrackC: + break Token + default: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute seperator comma", + Detail: "A comma must appear between each value in an array.", + Subject: p.Peek().Range.Ptr(), + }) + } + + } + + close := p.Read() + return &arrayVal{ + Values: vals, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func parseNumber(p *peeker) (node, hcl.Diagnostics) { + tok := p.Read() + + // Use encoding/json to validate the number syntax. + // TODO: Do this more directly to produce better diagnostics. + var num json.Number + err := json.Unmarshal(tok.Bytes, &num) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON number", + Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), + Subject: &tok.Range, + }, + } + } + + f, _, err := big.ParseFloat(string(num), 10, 512, big.ToNearestEven) + if err != nil { + // Should never happen if above passed, since JSON numbers are a subset + // of what big.Float can parse... + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON number", + Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), + Subject: &tok.Range, + }, + } + } + + return &numberVal{ + Value: f, + SrcRange: tok.Range, + }, nil +} + +func parseString(p *peeker) (node, hcl.Diagnostics) { + tok := p.Read() + var str string + err := json.Unmarshal(tok.Bytes, &str) + + if err != nil { + var errRange hcl.Range + if serr, ok := err.(*json.SyntaxError); ok { + errOfs := serr.Offset + errPos := tok.Range.Start + errPos.Byte += int(errOfs) + + // TODO: Use the byte offset to properly count unicode + // characters for the column, and mark the whole of the + // character that was wrong as part of our range. + errPos.Column += int(errOfs) + + errEndPos := errPos + errEndPos.Byte++ + errEndPos.Column++ + + errRange = hcl.Range{ + Filename: tok.Range.Filename, + Start: errPos, + End: errEndPos, + } + } else { + errRange = tok.Range + } + + var contextRange *hcl.Range + if errRange != tok.Range { + contextRange = &tok.Range + } + + // FIXME: Eventually we should parse strings directly here so + // we can produce a more useful error message in the face fo things + // such as invalid escapes, etc. + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON string", + Detail: fmt.Sprintf("There is a syntax error in the given JSON string."), + Subject: &errRange, + Context: contextRange, + }, + } + } + + return &stringVal{ + Value: str, + SrcRange: tok.Range, + }, nil +} + +func parseKeyword(p *peeker) (node, hcl.Diagnostics) { + tok := p.Read() + s := string(tok.Bytes) + + switch s { + case "true": + return &booleanVal{ + Value: true, + SrcRange: tok.Range, + }, nil + case "false": + return &booleanVal{ + Value: false, + SrcRange: tok.Range, + }, nil + case "null": + return &nullVal{ + SrcRange: tok.Range, + }, nil + case "undefined", "NaN", "Infinity": + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON keyword", + Detail: fmt.Sprintf("The JavaScript identifier %q cannot be used in JSON.", s), + Subject: &tok.Range, + }, + } + default: + var dym string + if suggest := keywordSuggestion(s); suggest != "" { + dym = fmt.Sprintf(" Did you mean %q?", suggest) + } + + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON keyword", + Detail: fmt.Sprintf("%q is not a valid JSON keyword.%s", s, dym), + Subject: &tok.Range, + }, + } + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go b/vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go new file mode 100644 index 00000000..fc7bbf58 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go @@ -0,0 +1,25 @@ +package json + +type peeker struct { + tokens []token + pos int +} + +func newPeeker(tokens []token) *peeker { + return &peeker{ + tokens: tokens, + pos: 0, + } +} + +func (p *peeker) Peek() token { + return p.tokens[p.pos] +} + +func (p *peeker) Read() token { + ret := p.tokens[p.pos] + if ret.Type != tokenEOF { + p.pos++ + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/public.go b/vendor/github.com/hashicorp/hcl2/hcl/json/public.go new file mode 100644 index 00000000..2728aa13 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/public.go @@ -0,0 +1,94 @@ +package json + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/hashicorp/hcl2/hcl" +) + +// Parse attempts to parse the given buffer as JSON and, if successful, returns +// a hcl.File for the HCL configuration represented by it. +// +// This is not a generic JSON parser. Instead, it deals only with the profile +// of JSON used to express HCL configuration. +// +// The returned file is valid only if the returned diagnostics returns false +// from its HasErrors method. If HasErrors returns true, the file represents +// the subset of data that was able to be parsed, which may be none. +func Parse(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + rootNode, diags := parseFileContent(src, filename) + + switch rootNode.(type) { + case *objectVal, *arrayVal: + // okay + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Root value must be object", + Detail: "The root value in a JSON-based configuration must be either a JSON object or a JSON array of objects.", + Subject: rootNode.StartRange().Ptr(), + }) + + // Since we've already produced an error message for this being + // invalid, we'll return an empty placeholder here so that trying to + // extract content from our root body won't produce a redundant + // error saying the same thing again in more general terms. + fakePos := hcl.Pos{ + Byte: 0, + Line: 1, + Column: 1, + } + fakeRange := hcl.Range{ + Filename: filename, + Start: fakePos, + End: fakePos, + } + rootNode = &objectVal{ + Attrs: []*objectAttr{}, + SrcRange: fakeRange, + OpenRange: fakeRange, + } + } + + file := &hcl.File{ + Body: &body{ + val: rootNode, + }, + Bytes: src, + Nav: navigation{rootNode}, + } + return file, diags +} + +// ParseFile is a convenience wrapper around Parse that first attempts to load +// data from the given filename, passing the result to Parse if successful. +// +// If the file cannot be read, an error diagnostic with nil context is returned. +func ParseFile(filename string) (*hcl.File, hcl.Diagnostics) { + f, err := os.Open(filename) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to open file", + Detail: fmt.Sprintf("The file %q could not be opened.", filename), + }, + } + } + defer f.Close() + + src, err := ioutil.ReadAll(f) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The file %q was opened, but an error occured while reading it.", filename), + }, + } + } + + return Parse(src, filename) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go b/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go new file mode 100644 index 00000000..0a8378b9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go @@ -0,0 +1,293 @@ +package json + +import ( + "fmt" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" +) + +//go:generate stringer -type tokenType scanner.go +type tokenType rune + +const ( + tokenBraceO tokenType = '{' + tokenBraceC tokenType = '}' + tokenBrackO tokenType = '[' + tokenBrackC tokenType = ']' + tokenComma tokenType = ',' + tokenColon tokenType = ':' + tokenKeyword tokenType = 'K' + tokenString tokenType = 'S' + tokenNumber tokenType = 'N' + tokenEOF tokenType = '␄' + tokenInvalid tokenType = 0 + tokenEquals tokenType = '=' // used only for reminding the user of JSON syntax +) + +type token struct { + Type tokenType + Bytes []byte + Range hcl.Range +} + +// scan returns the primary tokens for the given JSON buffer in sequence. +// +// The responsibility of this pass is to just mark the slices of the buffer +// as being of various types. It is lax in how it interprets the multi-byte +// token types keyword, string and number, preferring to capture erroneous +// extra bytes that we presume the user intended to be part of the token +// so that we can generate more helpful diagnostics in the parser. +func scan(buf []byte, start pos) []token { + var tokens []token + p := start + for { + if len(buf) == 0 { + tokens = append(tokens, token{ + Type: tokenEOF, + Bytes: nil, + Range: posRange(p, p), + }) + return tokens + } + + buf, p = skipWhitespace(buf, p) + + if len(buf) == 0 { + tokens = append(tokens, token{ + Type: tokenEOF, + Bytes: nil, + Range: posRange(p, p), + }) + return tokens + } + + start = p + + first := buf[0] + switch { + case first == '{' || first == '}' || first == '[' || first == ']' || first == ',' || first == ':' || first == '=': + p.Pos.Column++ + p.Pos.Byte++ + tokens = append(tokens, token{ + Type: tokenType(first), + Bytes: buf[0:1], + Range: posRange(start, p), + }) + buf = buf[1:] + case first == '"': + var tokBuf []byte + tokBuf, buf, p = scanString(buf, p) + tokens = append(tokens, token{ + Type: tokenString, + Bytes: tokBuf, + Range: posRange(start, p), + }) + case byteCanStartNumber(first): + var tokBuf []byte + tokBuf, buf, p = scanNumber(buf, p) + tokens = append(tokens, token{ + Type: tokenNumber, + Bytes: tokBuf, + Range: posRange(start, p), + }) + case byteCanStartKeyword(first): + var tokBuf []byte + tokBuf, buf, p = scanKeyword(buf, p) + tokens = append(tokens, token{ + Type: tokenKeyword, + Bytes: tokBuf, + Range: posRange(start, p), + }) + default: + tokens = append(tokens, token{ + Type: tokenInvalid, + Bytes: buf[:1], + Range: start.Range(1, 1), + }) + // If we've encountered an invalid then we might as well stop + // scanning since the parser won't proceed beyond this point. + return tokens + } + } +} + +func byteCanStartNumber(b byte) bool { + switch b { + // We are slightly more tolerant than JSON requires here since we + // expect the parser will make a stricter interpretation of the + // number bytes, but we specifically don't allow 'e' or 'E' here + // since we want the scanner to treat that as the start of an + // invalid keyword instead, to produce more intelligible error messages. + case '-', '+', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return true + default: + return false + } +} + +func scanNumber(buf []byte, start pos) ([]byte, []byte, pos) { + // The scanner doesn't check that the sequence of digit-ish bytes is + // in a valid order. The parser must do this when decoding a number + // token. + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + switch buf[i] { + case '-', '+', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + p.Pos.Byte++ + p.Pos.Column++ + default: + break Byte + } + } + return buf[:i], buf[i:], p +} + +func byteCanStartKeyword(b byte) bool { + switch { + // We allow any sequence of alphabetical characters here, even though + // JSON is more constrained, so that we can collect what we presume + // the user intended to be a single keyword and then check its validity + // in the parser, where we can generate better diagnostics. + // So e.g. we want to be able to say: + // unrecognized keyword "True". Did you mean "true"? + case b >= 'a' || b <= 'z' || b >= 'A' || b <= 'Z': + return true + default: + return false + } +} + +func scanKeyword(buf []byte, start pos) ([]byte, []byte, pos) { + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + b := buf[i] + switch { + case (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_': + p.Pos.Byte++ + p.Pos.Column++ + default: + break Byte + } + } + return buf[:i], buf[i:], p +} + +func scanString(buf []byte, start pos) ([]byte, []byte, pos) { + // The scanner doesn't validate correct use of escapes, etc. It pays + // attention to escapes only for the purpose of identifying the closing + // quote character. It's the parser's responsibility to do proper + // validation. + // + // The scanner also doesn't specifically detect unterminated string + // literals, though they can be identified in the parser by checking if + // the final byte in a string token is the double-quote character. + + // Skip the opening quote symbol + i := 1 + p := start + p.Pos.Byte++ + p.Pos.Column++ + escaping := false +Byte: + for i < len(buf) { + b := buf[i] + + switch { + case b == '\\': + escaping = !escaping + p.Pos.Byte++ + p.Pos.Column++ + i++ + case b == '"': + p.Pos.Byte++ + p.Pos.Column++ + i++ + if !escaping { + break Byte + } + escaping = false + case b < 32: + break Byte + default: + // Advance by one grapheme cluster, so that we consider each + // grapheme to be a "column". + // Ignoring error because this scanner cannot produce errors. + advance, _, _ := textseg.ScanGraphemeClusters(buf[i:], true) + + p.Pos.Byte += advance + p.Pos.Column++ + i += advance + + escaping = false + } + } + return buf[:i], buf[i:], p +} + +func skipWhitespace(buf []byte, start pos) ([]byte, pos) { + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + switch buf[i] { + case ' ': + p.Pos.Byte++ + p.Pos.Column++ + case '\n': + p.Pos.Byte++ + p.Pos.Column = 1 + p.Pos.Line++ + case '\r': + // For the purpose of line/column counting we consider a + // carriage return to take up no space, assuming that it will + // be paired up with a newline (on Windows, for example) that + // will account for both of them. + p.Pos.Byte++ + case '\t': + // We arbitrarily count a tab as if it were two spaces, because + // we need to choose _some_ number here. This means any system + // that renders code on-screen with markers must itself treat + // tabs as a pair of spaces for rendering purposes, or instead + // use the byte offset and back into its own column position. + p.Pos.Byte++ + p.Pos.Column += 2 + default: + break Byte + } + } + return buf[i:], p +} + +type pos struct { + Filename string + Pos hcl.Pos +} + +func (p *pos) Range(byteLen, charLen int) hcl.Range { + start := p.Pos + end := p.Pos + end.Byte += byteLen + end.Column += charLen + return hcl.Range{ + Filename: p.Filename, + Start: start, + End: end, + } +} + +func posRange(start, end pos) hcl.Range { + return hcl.Range{ + Filename: start.Filename, + Start: start.Pos, + End: end.Pos, + } +} + +func (t token) GoString() string { + return fmt.Sprintf("json.token{json.%s, []byte(%q), %#v}", t.Type, t.Bytes, t.Range) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md new file mode 100644 index 00000000..9b33c7f4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md @@ -0,0 +1,405 @@ +# HCL JSON Syntax Specification + +This is the specification for the JSON serialization for hcl. HCL is a system +for defining configuration languages for applications. The HCL information +model is designed to support multiple concrete syntaxes for configuration, +and this JSON-based format complements [the native syntax](../hclsyntax/spec.md) +by being easy to machine-generate, whereas the native syntax is oriented +towards human authoring and maintenence. + +This syntax is defined in terms of JSON as defined in +[RFC7159](https://tools.ietf.org/html/rfc7159). As such it inherits the JSON +grammar as-is, and merely defines a specific methodology for interpreting +JSON constructs into HCL structural elements and expressions. + +This mapping is defined such that valid JSON-serialized HCL input can be +_produced_ using standard JSON implementations in various programming languages. +_Parsing_ such JSON has some additional constraints not beyond what is normally +supported by JSON parsers, so a specialized parser may be required that +is able to: + +* Preserve the relative ordering of properties defined in an object. +* Preserve multiple definitions of the same property name. +* Preserve numeric values to the precision required by the number type + in [the HCL syntax-agnostic information model](../spec.md). +* Retain source location information for parsed tokens/constructs in order + to produce good error messages. + +## Structural Elements + +[The HCL syntax-agnostic information model](../spec.md) defines a _body_ as an +abstract container for attribute definitions and child blocks. A body is +represented in JSON as either a single JSON object or a JSON array of objects. + +Body processing is in terms of JSON object properties, visited in the order +they appear in the input. Where a body is represented by a single JSON object, +the properties of that object are visited in order. Where a body is +represented by a JSON array, each of its elements are visited in order and +each element has its properties visited in order. If any element of the array +is not a JSON object then the input is erroneous. + +When a body is being processed in the _dynamic attributes_ mode, the allowance +of a JSON array in the previous paragraph does not apply and instead a single +JSON object is always required. + +As defined in the language-agnostic model, body processing is in terms +of a schema which provides context for interpreting the body's content. For +JSON bodies, the schema is crucial to allow differentiation of attribute +definitions and block definitions, both of which are represented via object +properties. + +The special property name `"//"`, when used in an object representing a HCL +body, is parsed and ignored. A property with this name can be used to +include human-readable comments. (This special property name is _not_ +processed in this way for any _other_ HCL constructs that are represented as +JSON objects.) + +### Attributes + +Where the given schema describes an attribute with a given name, the object +property with the matching name — if present — serves as the attribute's +definition. + +When a body is being processed in the _dynamic attributes_ mode, each object +property serves as an attribute definition for the attribute whose name +matches the property name. + +The value of an attribute definition property is interpreted as an _expression_, +as described in a later section. + +Given a schema that calls for an attribute named "foo", a JSON object like +the following provides a definition for that attribute: + +```json +{ + "foo": "bar baz" +} +``` + +### Blocks + +Where the given schema describes a block with a given type name, each object +property with the matching name serves as a definition of zero or more blocks +of that type. + +Processing of child blocks is in terms of nested JSON objects and arrays. +If the schema defines one or more _labels_ for the block type, a nested JSON +object or JSON array of objects is required for each labelling level. These +are flattened to a single ordered sequence of object properties using the +same algorithm as for body content as defined above. Each object property +serves as a label value at the corresponding level. + +After any labelling levels, the next nested value is either a JSON object +representing a single block body, or a JSON array of JSON objects that each +represent a single block body. Use of an array accommodates the definition +of multiple blocks that have identical type and labels. + +Given a schema that calls for a block type named "foo" with no labels, the +following JSON objects are all valid definitions of zero or more blocks of this +type: + +```json +{ + "foo": { + "child_attr": "baz" + } +} +``` + +```json +{ + "foo": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] +} +``` +```json +{ + "foo": [] +} +``` + +The first of these defines a single child block of type "foo". The second +defines _two_ such blocks. The final example shows a degenerate definition +of zero blocks, though generators should prefer to omit the property entirely +in this scenario. + +Given a schema that calls for a block type named "foo" with _two_ labels, the +extra label levels must be represented as objects or arrays of objects as in +the following examples: + +```json +{ + "foo": { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + "boz": { + "baz": { + "child_attr": "baz" + }, + } + } +} +``` + +```json +{ + "foo": { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + "boz": { + "baz": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] + } + } +} +``` + +```json +{ + "foo": [ + { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + }, + { + "bar": { + "baz": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] + } + } + ] +} +``` + +```json +{ + "foo": { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + "bar": { + "baz": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] + } + } +} +``` + +Arrays can be introduced at either the label definition or block body +definition levels to define multiple definitions of the same block type +or labels while preserving order. + +A JSON HCL parser _must_ support duplicate definitions of the same property +name within a single object, preserving all of them and the relative ordering +between them. The array-based forms are also required so that JSON HCL +configurations can be produced with JSON producing libraries that are not +able to preserve property definition order and multiple definitions of +the same property. + +## Expressions + +JSON lacks a native expression syntax, so the HCL JSON syntax instead defines +a mapping for each of the JSON value types, including a special mapping for +strings that allows optional use of arbitrary expressions. + +### Objects + +When interpreted as an expression, a JSON object represents a value of a HCL +object type. + +Each property of the JSON object represents an attribute of the HCL object type. +The property name string given in the JSON input is interpreted as a string +expression as described below, and its result is converted to string as defined +by the syntax-agnostic information model. If such a conversion is not possible, +an error is produced and evaluation fails. + +An instance of the constructed object type is then created, whose values +are interpreted by again recursively applying the mapping rules defined in +this section to each of the property values. + +If any evaluated property name strings produce null values, an error is +produced and evaluation fails. If any produce _unknown_ values, the _entire +object's_ result is an unknown value of the dynamic pseudo-type, signalling +that the type of the object cannot be determined. + +It is an error to define the same property name multiple times within a single +JSON object interpreted as an expression. In full expression mode, this +constraint applies to the name expression results after conversion to string, +rather than the raw string that may contain interpolation expressions. + +### Arrays + +When interpreted as an expression, a JSON array represents a value of a HCL +tuple type. + +Each element of the JSON array represents an element of the HCL tuple type. +The tuple type is constructed by enumerationg the JSON array elements, creating +for each an element whose type is the result of recursively applying the +expression mapping rules. Correspondance is preserved between the array element +indices and the tuple element indices. + +An instance of the constructed tuple type is then created, whose values are +interpreted by again recursively applying the mapping rules defined in this +section. + +### Numbers + +When interpreted as an expression, a JSON number represents a HCL number value. + +HCL numbers are arbitrary-precision decimal values, so a JSON HCL parser must +be able to translate exactly the value given to a number of corresponding +precision, within the constraints set by the HCL syntax-agnostic information +model. + +In practice, off-the-shelf JSON serializers often do not support customizing the +processing of numbers, and instead force processing as 32-bit or 64-bit +floating point values. + +A _producer_ of JSON HCL that uses such a serializer can provide numeric values +as JSON strings where they have precision too great for representation in the +serializer's chosen numeric type in situations where the result will be +converted to number (using the standard conversion rules) by a calling +application. + +Alternatively, for expressions that are evaluated in full expression mode an +embedded template interpolation can be used to faithfully represent a number, +such as `"${1e150}"`, which will then be evaluated by the underlying HCL native +syntax expression evaluator. + +### Boolean Values + +The JSON boolean values `true` and `false`, when interpreted as expressions, +represent the corresponding HCL boolean values. + +### The Null Value + +The JSON value `null`, when interpreted as an expression, represents a +HCL null value of the dynamic pseudo-type. + +### Strings + +When intepreted as an expression, a JSON string may be interpreted in one of +two ways depending on the evaluation mode. + +If evaluating in literal-only mode (as defined by the syntax-agnostic +information model) the literal string is intepreted directly as a HCL string +value, by directly using the exact sequence of unicode characters represented. +Template interpolations and directives MUST NOT be processed in this mode, +allowing any characters that appear as introduction sequences to pass through +literally: + +```json +"Hello world! Template sequences like ${ are not intepreted here." +``` + +When evaluating in full expression mode (again, as defined by the syntax- +agnostic information model) the literal string is instead interpreted as a +_standalone template_ in the HCL Native Syntax. The expression evaluation +result is then the direct result of evaluating that template with the current +variable scope and function table. + +```json +"Hello, ${name}! Template sequences are interpreted in full expression mode." +``` + +In particular the _Template Interpolation Unwrapping_ requirement from the +HCL native syntax specification must be implemented, allowing the use of +single-interpolation templates to represent expressions that would not +otherwise be representable in JSON, such as the following example where +the result must be a number, rather than a string representation of a number: + +```json +"${ a + b }" +``` + +## Static Analysis + +The HCL static analysis operations are implemented for JSON values that +represent expressions, as described in the following sections. + +Due to the limited expressive power of the JSON syntax alone, use of these +static analyses functions rather than normal expression evaluation is used +as additional context for how a JSON value is to be interpreted, which means +that static analyses can result in a different interpretation of a given +expression than normal evaluation. + +### Static List + +An expression interpreted as a static list must be a JSON array. Each of the +values in the array is interpreted as an expression and returned. + +### Static Map + +An expression interpreted as a static map must be a JSON object. Each of the +key/value pairs in the object is presented as a pair of expressions. Since +object property names are always strings, evaluating the key expression with +a non-`nil` evaluation context will evaluate any template sequences given +in the property name. + +### Static Call + +An expression interpreted as a static call must be a string. The content of +the string is interpreted as a native syntax expression (not a _template_, +unlike normal evaluation) and then the static call analysis is delegated to +that expression. + +If the original expression is not a string or its contents cannot be parsed +as a native syntax expression then static call analysis is not supported. + +### Static Traversal + +An expression interpreted as a static traversal must be a string. The content +of the string is interpreted as a native syntax expression (not a _template_, +unlike normal evaluation) and then static traversal analysis is delegated +to that expression. + +If the original expression is not a string or its contents cannot be parsed +as a native syntax expression then static call analysis is not supported. + diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go new file mode 100644 index 00000000..28dcf525 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go @@ -0,0 +1,616 @@ +package json + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// body is the implementation of "Body" used for files processed with the JSON +// parser. +type body struct { + val node + + // If non-nil, the keys of this map cause the corresponding attributes to + // be treated as non-existing. This is used when Body.PartialContent is + // called, to produce the "remaining content" Body. + hiddenAttrs map[string]struct{} +} + +// expression is the implementation of "Expression" used for files processed +// with the JSON parser. +type expression struct { + src node +} + +func (b *body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, newBody, diags := b.PartialContent(schema) + + hiddenAttrs := newBody.(*body).hiddenAttrs + + var nameSuggestions []string + for _, attrS := range schema.Attributes { + if _, ok := hiddenAttrs[attrS.Name]; !ok { + // Only suggest an attribute name if we didn't use it already. + nameSuggestions = append(nameSuggestions, attrS.Name) + } + } + for _, blockS := range schema.Blocks { + // Blocks can appear multiple times, so we'll suggest their type + // names regardless of whether they've already been used. + nameSuggestions = append(nameSuggestions, blockS.Type) + } + + jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) + diags = append(diags, attrDiags...) + + for _, attr := range jsonAttrs { + k := attr.Name + if k == "//" { + // Ignore "//" keys in objects representing bodies, to allow + // their use as comments. + continue + } + + if _, ok := hiddenAttrs[k]; !ok { + suggestion := nameSuggestion(k, nameSuggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous JSON object property", + Detail: fmt.Sprintf("No attribute or block type is named %q.%s", k, suggestion), + Subject: &attr.NameRange, + Context: attr.Range().Ptr(), + }) + } + } + + return content, diags +} + +func (b *body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + var diags hcl.Diagnostics + + jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) + diags = append(diags, attrDiags...) + + usedNames := map[string]struct{}{} + if b.hiddenAttrs != nil { + for k := range b.hiddenAttrs { + usedNames[k] = struct{}{} + } + } + + content := &hcl.BodyContent{ + Attributes: map[string]*hcl.Attribute{}, + Blocks: nil, + + MissingItemRange: b.MissingItemRange(), + } + + // Create some more convenient data structures for our work below. + attrSchemas := map[string]hcl.AttributeSchema{} + blockSchemas := map[string]hcl.BlockHeaderSchema{} + for _, attrS := range schema.Attributes { + attrSchemas[attrS.Name] = attrS + } + for _, blockS := range schema.Blocks { + blockSchemas[blockS.Type] = blockS + } + + for _, jsonAttr := range jsonAttrs { + attrName := jsonAttr.Name + if _, used := b.hiddenAttrs[attrName]; used { + continue + } + + if attrS, defined := attrSchemas[attrName]; defined { + if existing, exists := content.Attributes[attrName]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate attribute definition", + Detail: fmt.Sprintf("The attribute %q was already defined at %s.", attrName, existing.Range), + Subject: &jsonAttr.NameRange, + Context: jsonAttr.Range().Ptr(), + }) + continue + } + + content.Attributes[attrS.Name] = &hcl.Attribute{ + Name: attrS.Name, + Expr: &expression{src: jsonAttr.Value}, + Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), + NameRange: jsonAttr.NameRange, + } + usedNames[attrName] = struct{}{} + + } else if blockS, defined := blockSchemas[attrName]; defined { + bv := jsonAttr.Value + blockDiags := b.unpackBlock(bv, blockS.Type, &jsonAttr.NameRange, blockS.LabelNames, nil, nil, &content.Blocks) + diags = append(diags, blockDiags...) + usedNames[attrName] = struct{}{} + } + + // We ignore anything that isn't defined because that's the + // PartialContent contract. The Content method will catch leftovers. + } + + // Make sure we got all the required attributes. + for _, attrS := range schema.Attributes { + if !attrS.Required { + continue + } + if _, defined := content.Attributes[attrS.Name]; !defined { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required attribute", + Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), + Subject: b.MissingItemRange().Ptr(), + }) + } + } + + unusedBody := &body{ + val: b.val, + hiddenAttrs: usedNames, + } + + return content, unusedBody, diags +} + +// JustAttributes for JSON bodies interprets all properties of the wrapped +// JSON object as attributes and returns them. +func (b *body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + var diags hcl.Diagnostics + attrs := make(map[string]*hcl.Attribute) + + obj, ok := b.val.(*objectVal) + if !ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: "A JSON object is required here, defining the attributes for this block.", + Subject: b.val.StartRange().Ptr(), + }) + return attrs, diags + } + + for _, jsonAttr := range obj.Attrs { + name := jsonAttr.Name + if name == "//" { + // Ignore "//" keys in objects representing bodies, to allow + // their use as comments. + continue + } + + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + + if existing, exists := attrs[name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate attribute definition", + Detail: fmt.Sprintf("The attribute %q was already defined at %s.", name, existing.Range), + Subject: &jsonAttr.NameRange, + }) + continue + } + + attrs[name] = &hcl.Attribute{ + Name: name, + Expr: &expression{src: jsonAttr.Value}, + Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), + NameRange: jsonAttr.NameRange, + } + } + + // No diagnostics possible here, since the parser already took care of + // finding duplicates and every JSON value can be a valid attribute value. + return attrs, diags +} + +func (b *body) MissingItemRange() hcl.Range { + switch tv := b.val.(type) { + case *objectVal: + return tv.CloseRange + case *arrayVal: + return tv.OpenRange + default: + // Should not happen in correct operation, but might show up if the + // input is invalid and we are producing partial results. + return tv.StartRange() + } +} + +func (b *body) unpackBlock(v node, typeName string, typeRange *hcl.Range, labelsLeft []string, labelsUsed []string, labelRanges []hcl.Range, blocks *hcl.Blocks) (diags hcl.Diagnostics) { + if len(labelsLeft) > 0 { + labelName := labelsLeft[0] + jsonAttrs, attrDiags := b.collectDeepAttrs(v, &labelName) + diags = append(diags, attrDiags...) + + if len(jsonAttrs) == 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing block label", + Detail: fmt.Sprintf("At least one object property is required, whose name represents the %s block's %s.", typeName, labelName), + Subject: v.StartRange().Ptr(), + }) + return + } + labelsUsed := append(labelsUsed, "") + labelRanges := append(labelRanges, hcl.Range{}) + for _, p := range jsonAttrs { + pk := p.Name + labelsUsed[len(labelsUsed)-1] = pk + labelRanges[len(labelRanges)-1] = p.NameRange + diags = append(diags, b.unpackBlock(p.Value, typeName, typeRange, labelsLeft[1:], labelsUsed, labelRanges, blocks)...) + } + return + } + + // By the time we get here, we've peeled off all the labels and we're ready + // to deal with the block's actual content. + + // need to copy the label slices because their underlying arrays will + // continue to be mutated after we return. + labels := make([]string, len(labelsUsed)) + copy(labels, labelsUsed) + labelR := make([]hcl.Range, len(labelRanges)) + copy(labelR, labelRanges) + + switch tv := v.(type) { + case *objectVal: + // Single instance of the block + *blocks = append(*blocks, &hcl.Block{ + Type: typeName, + Labels: labels, + Body: &body{ + val: tv, + }, + + DefRange: tv.OpenRange, + TypeRange: *typeRange, + LabelRanges: labelR, + }) + case *arrayVal: + // Multiple instances of the block + for _, av := range tv.Values { + *blocks = append(*blocks, &hcl.Block{ + Type: typeName, + Labels: labels, + Body: &body{ + val: av, // might be mistyped; we'll find out when content is requested for this body + }, + + DefRange: tv.OpenRange, + TypeRange: *typeRange, + LabelRanges: labelR, + }) + } + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("Either a JSON object or a JSON array is required, representing the contents of one or more %q blocks.", typeName), + Subject: v.StartRange().Ptr(), + }) + } + return +} + +// collectDeepAttrs takes either a single object or an array of objects and +// flattens it into a list of object attributes, collecting attributes from +// all of the objects in a given array. +// +// Ordering is preserved, so a list of objects that each have one property +// will result in those properties being returned in the same order as the +// objects appeared in the array. +// +// This is appropriate for use only for objects representing bodies or labels +// within a block. +// +// The labelName argument, if non-null, is used to tailor returned error +// messages to refer to block labels rather than attributes and child blocks. +// It has no other effect. +func (b *body) collectDeepAttrs(v node, labelName *string) ([]*objectAttr, hcl.Diagnostics) { + var diags hcl.Diagnostics + var attrs []*objectAttr + + switch tv := v.(type) { + + case *objectVal: + attrs = append(attrs, tv.Attrs...) + + case *arrayVal: + for _, ev := range tv.Values { + switch tev := ev.(type) { + case *objectVal: + attrs = append(attrs, tev.Attrs...) + default: + if labelName != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("A JSON object is required here, to specify %s labels for this block.", *labelName), + Subject: ev.StartRange().Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: "A JSON object is required here, to define attributes and child blocks.", + Subject: ev.StartRange().Ptr(), + }) + } + } + } + + default: + if labelName != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("Either a JSON object or JSON array of objects is required here, to specify %s labels for this block.", *labelName), + Subject: v.StartRange().Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: "Either a JSON object or JSON array of objects is required here, to define attributes and child blocks.", + Subject: v.StartRange().Ptr(), + }) + } + } + + return attrs, diags +} + +func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + switch v := e.src.(type) { + case *stringVal: + if ctx != nil { + // Parse string contents as a HCL native language expression. + // We only do this if we have a context, so passing a nil context + // is how the caller specifies that interpolations are not allowed + // and that the string should just be returned verbatim. + templateSrc := v.Value + expr, diags := hclsyntax.ParseTemplate( + []byte(templateSrc), + v.SrcRange.Filename, + + // This won't produce _exactly_ the right result, since + // the hclsyntax parser can't "see" any escapes we removed + // while parsing JSON, but it's better than nothing. + hcl.Pos{ + Line: v.SrcRange.Start.Line, + + // skip over the opening quote mark + Byte: v.SrcRange.Start.Byte + 1, + Column: v.SrcRange.Start.Column + 1, + }, + ) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + val, evalDiags := expr.Value(ctx) + diags = append(diags, evalDiags...) + return val, diags + } + + return cty.StringVal(v.Value), nil + case *numberVal: + return cty.NumberVal(v.Value), nil + case *booleanVal: + return cty.BoolVal(v.Value), nil + case *arrayVal: + vals := []cty.Value{} + for _, jsonVal := range v.Values { + val, _ := (&expression{src: jsonVal}).Value(ctx) + vals = append(vals, val) + } + return cty.TupleVal(vals), nil + case *objectVal: + var diags hcl.Diagnostics + attrs := map[string]cty.Value{} + attrRanges := map[string]hcl.Range{} + known := true + for _, jsonAttr := range v.Attrs { + // In this one context we allow keys to contain interpolation + // experessions too, assuming we're evaluating in interpolation + // mode. This achieves parity with the native syntax where + // object expressions can have dynamic keys, while block contents + // may not. + name, nameDiags := (&expression{src: &stringVal{ + Value: jsonAttr.Name, + SrcRange: jsonAttr.NameRange, + }}).Value(ctx) + val, valDiags := (&expression{src: jsonAttr.Value}).Value(ctx) + diags = append(diags, nameDiags...) + diags = append(diags, valDiags...) + + var err error + name, err = convert.Convert(name, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key expression", + Detail: fmt.Sprintf("Cannot use this expression as an object key: %s.", err), + Subject: &jsonAttr.NameRange, + }) + continue + } + if name.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key expression", + Detail: "Cannot use null value as an object key.", + Subject: &jsonAttr.NameRange, + }) + continue + } + if !name.IsKnown() { + // This is a bit of a weird case, since our usual rules require + // us to tolerate unknowns and just represent the result as + // best we can but if we don't know the key then we can't + // know the type of our object at all, and thus we must turn + // the whole thing into cty.DynamicVal. This is consistent with + // how this situation is handled in the native syntax. + // We'll keep iterating so we can collect other errors in + // subsequent attributes. + known = false + continue + } + nameStr := name.AsString() + if _, defined := attrs[nameStr]; defined { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate object attribute", + Detail: fmt.Sprintf("An attribute named %q was already defined at %s.", nameStr, attrRanges[nameStr]), + Subject: &jsonAttr.NameRange, + }) + continue + } + attrs[nameStr] = val + attrRanges[nameStr] = jsonAttr.NameRange + } + if !known { + // We encountered an unknown key somewhere along the way, so + // we can't know what our type will eventually be. + return cty.DynamicVal, diags + } + return cty.ObjectVal(attrs), diags + default: + // Default to DynamicVal so that ASTs containing invalid nodes can + // still be partially-evaluated. + return cty.DynamicVal, nil + } +} + +func (e *expression) Variables() []hcl.Traversal { + var vars []hcl.Traversal + + switch v := e.src.(type) { + case *stringVal: + templateSrc := v.Value + expr, diags := hclsyntax.ParseTemplate( + []byte(templateSrc), + v.SrcRange.Filename, + + // This won't produce _exactly_ the right result, since + // the hclsyntax parser can't "see" any escapes we removed + // while parsing JSON, but it's better than nothing. + hcl.Pos{ + Line: v.SrcRange.Start.Line, + + // skip over the opening quote mark + Byte: v.SrcRange.Start.Byte + 1, + Column: v.SrcRange.Start.Column + 1, + }, + ) + if diags.HasErrors() { + return vars + } + return expr.Variables() + + case *arrayVal: + for _, jsonVal := range v.Values { + vars = append(vars, (&expression{src: jsonVal}).Variables()...) + } + case *objectVal: + for _, jsonAttr := range v.Attrs { + vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...) + } + } + + return vars +} + +func (e *expression) Range() hcl.Range { + return e.src.Range() +} + +func (e *expression) StartRange() hcl.Range { + return e.src.StartRange() +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *expression) AsTraversal() hcl.Traversal { + // In JSON-based syntax a traversal is given as a string containing + // traversal syntax as defined by hclsyntax.ParseTraversalAbs. + + switch v := e.src.(type) { + case *stringVal: + traversal, diags := hclsyntax.ParseTraversalAbs([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) + if diags.HasErrors() { + return nil + } + return traversal + default: + return nil + } +} + +// Implementation for hcl.ExprCall. +func (e *expression) ExprCall() *hcl.StaticCall { + // In JSON-based syntax a static call is given as a string containing + // an expression in the native syntax that also supports ExprCall. + + switch v := e.src.(type) { + case *stringVal: + expr, diags := hclsyntax.ParseExpression([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) + if diags.HasErrors() { + return nil + } + + call, diags := hcl.ExprCall(expr) + if diags.HasErrors() { + return nil + } + + return call + default: + return nil + } +} + +// Implementation for hcl.ExprList. +func (e *expression) ExprList() []hcl.Expression { + switch v := e.src.(type) { + case *arrayVal: + ret := make([]hcl.Expression, len(v.Values)) + for i, node := range v.Values { + ret[i] = &expression{src: node} + } + return ret + default: + return nil + } +} + +// Implementation for hcl.ExprMap. +func (e *expression) ExprMap() []hcl.KeyValuePair { + switch v := e.src.(type) { + case *objectVal: + ret := make([]hcl.KeyValuePair, len(v.Attrs)) + for i, jsonAttr := range v.Attrs { + ret[i] = hcl.KeyValuePair{ + Key: &expression{src: &stringVal{ + Value: jsonAttr.Name, + SrcRange: jsonAttr.NameRange, + }}, + Value: &expression{src: jsonAttr.Value}, + } + } + return ret + default: + return nil + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go b/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go new file mode 100644 index 00000000..bbcce5b3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type tokenType scanner.go"; DO NOT EDIT. + +package json + +import "strconv" + +const _tokenType_name = "tokenInvalidtokenCommatokenColontokenEqualstokenKeywordtokenNumbertokenStringtokenBrackOtokenBrackCtokenBraceOtokenBraceCtokenEOF" + +var _tokenType_map = map[tokenType]string{ + 0: _tokenType_name[0:12], + 44: _tokenType_name[12:22], + 58: _tokenType_name[22:32], + 61: _tokenType_name[32:43], + 75: _tokenType_name[43:55], + 78: _tokenType_name[55:66], + 83: _tokenType_name[66:77], + 91: _tokenType_name[77:88], + 93: _tokenType_name[88:99], + 123: _tokenType_name[99:110], + 125: _tokenType_name[110:121], + 9220: _tokenType_name[121:129], +} + +func (i tokenType) String() string { + if str, ok := _tokenType_map[i]; ok { + return str + } + return "tokenType(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/merged.go b/vendor/github.com/hashicorp/hcl2/hcl/merged.go new file mode 100644 index 00000000..ca2b728a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/merged.go @@ -0,0 +1,226 @@ +package hcl + +import ( + "fmt" +) + +// MergeFiles combines the given files to produce a single body that contains +// configuration from all of the given files. +// +// The ordering of the given files decides the order in which contained +// elements will be returned. If any top-level attributes are defined with +// the same name across multiple files, a diagnostic will be produced from +// the Content and PartialContent methods describing this error in a +// user-friendly way. +func MergeFiles(files []*File) Body { + var bodies []Body + for _, file := range files { + bodies = append(bodies, file.Body) + } + return MergeBodies(bodies) +} + +// MergeBodies is like MergeFiles except it deals directly with bodies, rather +// than with entire files. +func MergeBodies(bodies []Body) Body { + if len(bodies) == 0 { + // Swap out for our singleton empty body, to reduce the number of + // empty slices we have hanging around. + return emptyBody + } + + // If any of the given bodies are already merged bodies, we'll unpack + // to flatten to a single mergedBodies, since that's conceptually simpler. + // This also, as a side-effect, eliminates any empty bodies, since + // empties are merged bodies with no inner bodies. + var newLen int + var flatten bool + for _, body := range bodies { + if children, merged := body.(mergedBodies); merged { + newLen += len(children) + flatten = true + } else { + newLen++ + } + } + + if !flatten { // not just newLen == len, because we might have mergedBodies with single bodies inside + return mergedBodies(bodies) + } + + if newLen == 0 { + // Don't allocate a new empty when we already have one + return emptyBody + } + + new := make([]Body, 0, newLen) + for _, body := range bodies { + if children, merged := body.(mergedBodies); merged { + new = append(new, children...) + } else { + new = append(new, body) + } + } + return mergedBodies(new) +} + +var emptyBody = mergedBodies([]Body{}) + +// EmptyBody returns a body with no content. This body can be used as a +// placeholder when a body is required but no body content is available. +func EmptyBody() Body { + return emptyBody +} + +type mergedBodies []Body + +// Content returns the content produced by applying the given schema to all +// of the merged bodies and merging the result. +// +// Although required attributes _are_ supported, they should be used sparingly +// with merged bodies since in this case there is no contextual information +// with which to return good diagnostics. Applications working with merged +// bodies may wish to mark all attributes as optional and then check for +// required attributes afterwards, to produce better diagnostics. +func (mb mergedBodies) Content(schema *BodySchema) (*BodyContent, Diagnostics) { + // the returned body will always be empty in this case, because mergedContent + // will only ever call Content on the child bodies. + content, _, diags := mb.mergedContent(schema, false) + return content, diags +} + +func (mb mergedBodies) PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics) { + return mb.mergedContent(schema, true) +} + +func (mb mergedBodies) JustAttributes() (Attributes, Diagnostics) { + attrs := make(map[string]*Attribute) + var diags Diagnostics + + for _, body := range mb { + thisAttrs, thisDiags := body.JustAttributes() + + if len(thisDiags) != 0 { + diags = append(diags, thisDiags...) + } + + if thisAttrs != nil { + for name, attr := range thisAttrs { + if existing := attrs[name]; existing != nil { + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Duplicate attribute", + Detail: fmt.Sprintf( + "Attribute %q was already assigned at %s", + name, existing.NameRange.String(), + ), + Subject: &attr.NameRange, + }) + continue + } + + attrs[name] = attr + } + } + } + + return attrs, diags +} + +func (mb mergedBodies) MissingItemRange() Range { + if len(mb) == 0 { + // Nothing useful to return here, so we'll return some garbage. + return Range{ + Filename: "", + } + } + + // arbitrarily use the first body's missing item range + return mb[0].MissingItemRange() +} + +func (mb mergedBodies) mergedContent(schema *BodySchema, partial bool) (*BodyContent, Body, Diagnostics) { + // We need to produce a new schema with none of the attributes marked as + // required, since _any one_ of our bodies can contribute an attribute value. + // We'll separately check that all required attributes are present at + // the end. + mergedSchema := &BodySchema{ + Blocks: schema.Blocks, + } + for _, attrS := range schema.Attributes { + mergedAttrS := attrS + mergedAttrS.Required = false + mergedSchema.Attributes = append(mergedSchema.Attributes, mergedAttrS) + } + + var mergedLeftovers []Body + content := &BodyContent{ + Attributes: map[string]*Attribute{}, + } + + var diags Diagnostics + for _, body := range mb { + var thisContent *BodyContent + var thisLeftovers Body + var thisDiags Diagnostics + + if partial { + thisContent, thisLeftovers, thisDiags = body.PartialContent(mergedSchema) + } else { + thisContent, thisDiags = body.Content(mergedSchema) + } + + if thisLeftovers != nil { + mergedLeftovers = append(mergedLeftovers) + } + if len(thisDiags) != 0 { + diags = append(diags, thisDiags...) + } + + if thisContent.Attributes != nil { + for name, attr := range thisContent.Attributes { + if existing := content.Attributes[name]; existing != nil { + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Duplicate attribute", + Detail: fmt.Sprintf( + "Attribute %q was already assigned at %s", + name, existing.NameRange.String(), + ), + Subject: &attr.NameRange, + }) + continue + } + content.Attributes[name] = attr + } + } + + if len(thisContent.Blocks) != 0 { + content.Blocks = append(content.Blocks, thisContent.Blocks...) + } + } + + // Finally, we check for required attributes. + for _, attrS := range schema.Attributes { + if !attrS.Required { + continue + } + + if content.Attributes[attrS.Name] == nil { + // We don't have any context here to produce a good diagnostic, + // which is why we warn in the Content docstring to minimize the + // use of required attributes on merged bodies. + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Missing required attribute", + Detail: fmt.Sprintf( + "The attribute %q is required, but was not assigned.", + attrS.Name, + ), + }) + } + } + + leftoverBody := MergeBodies(mergedLeftovers) + return content, leftoverBody, diags +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/ops.go b/vendor/github.com/hashicorp/hcl2/hcl/ops.go new file mode 100644 index 00000000..f4e30b09 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/ops.go @@ -0,0 +1,147 @@ +package hcl + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// Index is a helper function that performs the same operation as the index +// operator in the HCL expression language. That is, the result is the +// same as it would be for collection[key] in a configuration expression. +// +// This is exported so that applications can perform indexing in a manner +// consistent with how the language does it, including handling of null and +// unknown values, etc. +// +// Diagnostics are produced if the given combination of values is not valid. +// Therefore a pointer to a source range must be provided to use in diagnostics, +// though nil can be provided if the calling application is going to +// ignore the subject of the returned diagnostics anyway. +func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) { + if collection.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Attempt to index null value", + Detail: "This value is null, so it does not have any indices.", + Subject: srcRange, + }, + } + } + if key.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "Can't use a null value as an indexing key.", + Subject: srcRange, + }, + } + } + ty := collection.Type() + kty := key.Type() + if kty == cty.DynamicPseudoType || ty == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + + switch { + + case ty.IsListType() || ty.IsTupleType() || ty.IsMapType(): + var wantType cty.Type + switch { + case ty.IsListType() || ty.IsTupleType(): + wantType = cty.Number + case ty.IsMapType(): + wantType = cty.String + default: + // should never happen + panic("don't know what key type we want") + } + + key, keyErr := convert.Convert(key, wantType) + if keyErr != nil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: fmt.Sprintf( + "The given key does not identify an element in this collection value: %s.", + keyErr.Error(), + ), + Subject: srcRange, + }, + } + } + + has := collection.HasIndex(key) + if !has.IsKnown() { + if ty.IsTupleType() { + return cty.DynamicVal, nil + } else { + return cty.UnknownVal(ty.ElementType()), nil + } + } + if has.False() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "The given key does not identify an element in this collection value.", + Subject: srcRange, + }, + } + } + + return collection.Index(key), nil + + case ty.IsObjectType(): + key, keyErr := convert.Convert(key, cty.String) + if keyErr != nil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: fmt.Sprintf( + "The given key does not identify an element in this collection value: %s.", + keyErr.Error(), + ), + Subject: srcRange, + }, + } + } + if !collection.IsKnown() { + return cty.DynamicVal, nil + } + if !key.IsKnown() { + return cty.DynamicVal, nil + } + + attrName := key.AsString() + + if !ty.HasAttribute(attrName) { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "The given key does not identify an element in this collection value.", + Subject: srcRange, + }, + } + } + + return collection.GetAttr(attrName), nil + + default: + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "This value does not have any indices.", + Subject: srcRange, + }, + } + } + +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/pos.go b/vendor/github.com/hashicorp/hcl2/hcl/pos.go new file mode 100644 index 00000000..1a4b329d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/pos.go @@ -0,0 +1,262 @@ +package hcl + +import "fmt" + +// Pos represents a single position in a source file, by addressing the +// start byte of a unicode character encoded in UTF-8. +// +// Pos is generally used only in the context of a Range, which then defines +// which source file the position is within. +type Pos struct { + // Line is the source code line where this position points. Lines are + // counted starting at 1 and incremented for each newline character + // encountered. + Line int + + // Column is the source code column where this position points, in + // unicode characters, with counting starting at 1. + // + // Column counts characters as they appear visually, so for example a + // latin letter with a combining diacritic mark counts as one character. + // This is intended for rendering visual markers against source code in + // contexts where these diacritics would be rendered in a single character + // cell. Technically speaking, Column is counting grapheme clusters as + // used in unicode normalization. + Column int + + // Byte is the byte offset into the file where the indicated character + // begins. This is a zero-based offset to the first byte of the first + // UTF-8 codepoint sequence in the character, and thus gives a position + // that can be resolved _without_ awareness of Unicode characters. + Byte int +} + +// Range represents a span of characters between two positions in a source +// file. +// +// This struct is usually used by value in types that represent AST nodes, +// but by pointer in types that refer to the positions of other objects, +// such as in diagnostics. +type Range struct { + // Filename is the name of the file into which this range's positions + // point. + Filename string + + // Start and End represent the bounds of this range. Start is inclusive + // and End is exclusive. + Start, End Pos +} + +// RangeBetween returns a new range that spans from the beginning of the +// start range to the end of the end range. +// +// The result is meaningless if the two ranges do not belong to the same +// source file or if the end range appears before the start range. +func RangeBetween(start, end Range) Range { + return Range{ + Filename: start.Filename, + Start: start.Start, + End: end.End, + } +} + +// RangeOver returns a new range that covers both of the given ranges and +// possibly additional content between them if the two ranges do not overlap. +// +// If either range is empty then it is ignored. The result is empty if both +// given ranges are empty. +// +// The result is meaningless if the two ranges to not belong to the same +// source file. +func RangeOver(a, b Range) Range { + if a.Empty() { + return b + } + if b.Empty() { + return a + } + + var start, end Pos + if a.Start.Byte < b.Start.Byte { + start = a.Start + } else { + start = b.Start + } + if a.End.Byte > b.End.Byte { + end = a.End + } else { + end = b.End + } + return Range{ + Filename: a.Filename, + Start: start, + End: end, + } +} + +// ContainsOffset returns true if and only if the given byte offset is within +// the receiving Range. +func (r Range) ContainsOffset(offset int) bool { + return offset >= r.Start.Byte && offset < r.End.Byte +} + +// Ptr returns a pointer to a copy of the receiver. This is a convenience when +// ranges in places where pointers are required, such as in Diagnostic, but +// the range in question is returned from a method. Go would otherwise not +// allow one to take the address of a function call. +func (r Range) Ptr() *Range { + return &r +} + +// String returns a compact string representation of the receiver. +// Callers should generally prefer to present a range more visually, +// e.g. via markers directly on the relevant portion of source code. +func (r Range) String() string { + if r.Start.Line == r.End.Line { + return fmt.Sprintf( + "%s:%d,%d-%d", + r.Filename, + r.Start.Line, r.Start.Column, + r.End.Column, + ) + } else { + return fmt.Sprintf( + "%s:%d,%d-%d,%d", + r.Filename, + r.Start.Line, r.Start.Column, + r.End.Line, r.End.Column, + ) + } +} + +func (r Range) Empty() bool { + return r.Start.Byte == r.End.Byte +} + +// CanSliceBytes returns true if SliceBytes could return an accurate +// sub-slice of the given slice. +// +// This effectively tests whether the start and end offsets of the range +// are within the bounds of the slice, and thus whether SliceBytes can be +// trusted to produce an accurate start and end position within that slice. +func (r Range) CanSliceBytes(b []byte) bool { + switch { + case r.Start.Byte < 0 || r.Start.Byte > len(b): + return false + case r.End.Byte < 0 || r.End.Byte > len(b): + return false + case r.End.Byte < r.Start.Byte: + return false + default: + return true + } +} + +// SliceBytes returns a sub-slice of the given slice that is covered by the +// receiving range, assuming that the given slice is the source code of the +// file indicated by r.Filename. +// +// If the receiver refers to any byte offsets that are outside of the slice +// then the result is constrained to the overlapping portion only, to avoid +// a panic. Use CanSliceBytes to determine if the result is guaranteed to +// be an accurate span of the requested range. +func (r Range) SliceBytes(b []byte) []byte { + start := r.Start.Byte + end := r.End.Byte + if start < 0 { + start = 0 + } else if start > len(b) { + start = len(b) + } + if end < 0 { + end = 0 + } else if end > len(b) { + end = len(b) + } + if end < start { + end = start + } + return b[start:end] +} + +// Overlaps returns true if the receiver and the other given range share any +// characters in common. +func (r Range) Overlaps(other Range) bool { + switch { + case r.Filename != other.Filename: + // If the ranges are in different files then they can't possibly overlap + return false + case r.Empty() || other.Empty(): + // Empty ranges can never overlap + return false + case r.ContainsOffset(other.Start.Byte) || r.ContainsOffset(other.End.Byte): + return true + case other.ContainsOffset(r.Start.Byte) || other.ContainsOffset(r.End.Byte): + return true + default: + return false + } +} + +// Overlap finds a range that is either identical to or a sub-range of both +// the receiver and the other given range. It returns an empty range +// within the receiver if there is no overlap between the two ranges. +// +// A non-empty result is either identical to or a subset of the receiver. +func (r Range) Overlap(other Range) Range { + if !r.Overlaps(other) { + // Start == End indicates an empty range + return Range{ + Filename: r.Filename, + Start: r.Start, + End: r.Start, + } + } + + var start, end Pos + if r.Start.Byte > other.Start.Byte { + start = r.Start + } else { + start = other.Start + } + if r.End.Byte < other.End.Byte { + end = r.End + } else { + end = other.End + } + + return Range{ + Filename: r.Filename, + Start: start, + End: end, + } +} + +// PartitionAround finds the portion of the given range that overlaps with +// the reciever and returns three ranges: the portion of the reciever that +// precedes the overlap, the overlap itself, and then the portion of the +// reciever that comes after the overlap. +// +// If the two ranges do not overlap then all three returned ranges are empty. +// +// If the given range aligns with or extends beyond either extent of the +// reciever then the corresponding outer range will be empty. +func (r Range) PartitionAround(other Range) (before, overlap, after Range) { + overlap = r.Overlap(other) + if overlap.Empty() { + return overlap, overlap, overlap + } + + before = Range{ + Filename: r.Filename, + Start: r.Start, + End: overlap.Start, + } + after = Range{ + Filename: r.Filename, + Start: overlap.End, + End: r.End, + } + + return before, overlap, after +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go b/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go new file mode 100644 index 00000000..7c8f2dfa --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go @@ -0,0 +1,148 @@ +package hcl + +import ( + "bufio" + "bytes" + + "github.com/apparentlymart/go-textseg/textseg" +) + +// RangeScanner is a helper that will scan over a buffer using a bufio.SplitFunc +// and visit a source range for each token matched. +// +// For example, this can be used with bufio.ScanLines to find the source range +// for each line in the file, skipping over the actual newline characters, which +// may be useful when printing source code snippets as part of diagnostic +// messages. +// +// The line and column information in the returned ranges is produced by +// counting newline characters and grapheme clusters respectively, which +// mimics the behavior we expect from a parser when producing ranges. +type RangeScanner struct { + filename string + b []byte + cb bufio.SplitFunc + + pos Pos // position of next byte to process in b + cur Range // latest range + tok []byte // slice of b that is covered by cur + err error // error from last scan, if any +} + +// Create a new RangeScanner for the given buffer, producing ranges for the +// given filename. +// +// Since ranges have grapheme-cluster granularity rather than byte granularity, +// the scanner will produce incorrect results if the given SplitFunc creates +// tokens between grapheme cluster boundaries. In particular, it is incorrect +// to use RangeScanner with bufio.ScanRunes because it will produce tokens +// around individual UTF-8 sequences, which will split any multi-sequence +// grapheme clusters. +func NewRangeScanner(b []byte, filename string, cb bufio.SplitFunc) *RangeScanner { + return &RangeScanner{ + filename: filename, + b: b, + cb: cb, + pos: Pos{ + Byte: 0, + Line: 1, + Column: 1, + }, + } +} + +func (sc *RangeScanner) Scan() bool { + if sc.pos.Byte >= len(sc.b) || sc.err != nil { + // All done + return false + } + + // Since we're operating on an in-memory buffer, we always pass the whole + // remainder of the buffer to our SplitFunc and set isEOF to let it know + // that it has the whole thing. + advance, token, err := sc.cb(sc.b[sc.pos.Byte:], true) + + // Since we are setting isEOF to true this should never happen, but + // if it does we will just abort and assume the SplitFunc is misbehaving. + if advance == 0 && token == nil && err == nil { + return false + } + + if err != nil { + sc.err = err + sc.cur = Range{ + Filename: sc.filename, + Start: sc.pos, + End: sc.pos, + } + sc.tok = nil + return false + } + + sc.tok = token + start := sc.pos + end := sc.pos + new := sc.pos + + // adv is similar to token but it also includes any subsequent characters + // we're being asked to skip over by the SplitFunc. + // adv is a slice covering any additional bytes we are skipping over, based + // on what the SplitFunc told us to do with advance. + adv := sc.b[sc.pos.Byte : sc.pos.Byte+advance] + + // We now need to scan over our token to count the grapheme clusters + // so we can correctly advance Column, and count the newlines so we + // can correctly advance Line. + advR := bytes.NewReader(adv) + gsc := bufio.NewScanner(advR) + advanced := 0 + gsc.Split(textseg.ScanGraphemeClusters) + for gsc.Scan() { + gr := gsc.Bytes() + new.Byte += len(gr) + new.Column++ + + // We rely here on the fact that \r\n is considered a grapheme cluster + // and so we don't need to worry about miscounting additional lines + // on files with Windows-style line endings. + if len(gr) != 0 && (gr[0] == '\r' || gr[0] == '\n') { + new.Column = 1 + new.Line++ + } + + if advanced < len(token) { + // If we've not yet found the end of our token then we'll + // also push our "end" marker along. + // (if advance > len(token) then we'll stop moving "end" early + // so that the caller only sees the range covered by token.) + end = new + } + advanced += len(gr) + } + + sc.cur = Range{ + Filename: sc.filename, + Start: start, + End: end, + } + sc.pos = new + return true +} + +// Range returns a range that covers the latest token obtained after a call +// to Scan returns true. +func (sc *RangeScanner) Range() Range { + return sc.cur +} + +// Bytes returns the slice of the input buffer that is covered by the range +// that would be returned by Range. +func (sc *RangeScanner) Bytes() []byte { + return sc.tok +} + +// Err can be called after Scan returns false to determine if the latest read +// resulted in an error, and obtain that error if so. +func (sc *RangeScanner) Err() error { + return sc.err +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/schema.go b/vendor/github.com/hashicorp/hcl2/hcl/schema.go new file mode 100644 index 00000000..891257ac --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/schema.go @@ -0,0 +1,21 @@ +package hcl + +// BlockHeaderSchema represents the shape of a block header, and is +// used for matching blocks within bodies. +type BlockHeaderSchema struct { + Type string + LabelNames []string +} + +// AttributeSchema represents the requirements for an attribute, and is used +// for matching attributes within bodies. +type AttributeSchema struct { + Name string + Required bool +} + +// BodySchema represents the desired shallow structure of a body. +type BodySchema struct { + Attributes []AttributeSchema + Blocks []BlockHeaderSchema +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/spec.md new file mode 100644 index 00000000..58257bfe --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/spec.md @@ -0,0 +1,691 @@ +# HCL Syntax-Agnostic Information Model + +This is the specification for the general information model (abstract types and +semantics) for hcl. HCL is a system for defining configuration languages for +applications. The HCL information model is designed to support multiple +concrete syntaxes for configuration, each with a mapping to the model defined +in this specification. + +The two primary syntaxes intended for use in conjunction with this model are +[the HCL native syntax](./hclsyntax/spec.md) and [the JSON syntax](./json/spec.md). +In principle other syntaxes are possible as long as either their language model +is sufficiently rich to express the concepts described in this specification +or the language targets a well-defined subset of the specification. + +## Structural Elements + +The primary structural element is the _body_, which is a container representing +a set of zero or more _attributes_ and a set of zero or more _blocks_. + +A _configuration file_ is the top-level object, and will usually be produced +by reading a file from disk and parsing it as a particular syntax. A +configuration file has its own _body_, representing the top-level attributes +and blocks. + +An _attribute_ is a name and value pair associated with a body. Attribute names +are unique within a given body. Attribute values are provided as _expressions_, +which are discussed in detail in a later section. + +A _block_ is a nested structure that has a _type name_, zero or more string +_labels_ (e.g. identifiers), and a nested body. + +Together the structural elements create a heirarchical data structure, with +attributes intended to represent the direct properties of a particular object +in the calling application, and blocks intended to represent child objects +of a particular object. + +## Body Content + +To support the expression of the HCL concepts in languages whose information +model is a subset of HCL's, such as JSON, a _body_ is an opaque container +whose content can only be accessed by providing information on the expected +structure of the content. + +The specification for each syntax must describe how its physical constructs +are mapped on to body content given a schema. For syntaxes that have +first-class syntax distinguishing attributes and bodies this can be relatively +straightforward, while more detailed mapping rules may be required in syntaxes +where the representation of attributes vs. blocks is ambiguous. + +### Schema-driven Processing + +Schema-driven processing is the primary way to access body content. +A _body schema_ is a description of what is expected within a particular body, +which can then be used to extract the _body content_, which then provides +access to the specific attributes and blocks requested. + +A _body schema_ consists of a list of _attribute schemata_ and +_block header schemata_: + +* An _attribute schema_ provides the name of an attribute and whether its + presence is required. + +* A _block header schema_ provides a block type name and the semantic names + assigned to each of the labels of that block type, if any. + +Within a schema, it is an error to request the same attribute name twice or +to request a block type whose name is also an attribute name. While this can +in principle be supported in some syntaxes, in other syntaxes the attribute +and block namespaces are combined and so an an attribute cannot coexist with +a block whose type name is identical to the attribute name. + +The result of applying a body schema to a body is _body content_, which +consists of an _attribute map_ and a _block sequence_: + +* The _attribute map_ is a map data structure whose keys are attribute names + and whose values are _expressions_ that represent the corresponding attribute + values. + +* The _block sequence_ is an ordered sequence of blocks, with each specifying + a block _type name_, the sequence of _labels_ specified for the block, + and the body object (not body _content_) representing the block's own body. + +After obtaining _body content_, the calling application may continue processing +by evaluating attribute expressions and/or recursively applying further +schema-driven processing to the child block bodies. + +**Note:** The _body schema_ is intentionally minimal, to reduce the set of +mapping rules that must be defined for each syntax. Higher-level utility +libraries may be provided to assist in the construction of a schema and +perform additional processing, such as automatically evaluating attribute +expressions and assigning their result values into a data structure, or +recursively applying a schema to child blocks. Such utilities are not part of +this core specification and will vary depending on the capabilities and idiom +of the implementation language. + +### _Dynamic Attributes_ Processing + +The _schema-driven_ processing model is useful when the expected structure +of a body is known a priori by the calling application. Some blocks are +instead more free-form, such as a user-provided set of arbitrary key/value +pairs. + +The alternative _dynamic attributes_ processing mode allows for this more +ad-hoc approach. Processing in this mode behaves as if a schema had been +constructed without any _block header schemata_ and with an attribute +schema for each distinct key provided within the physical representation +of the body. + +The means by which _distinct keys_ are identified is dependent on the +physical syntax; this processing mode assumes that the syntax has a way +to enumerate keys provided by the author and identify expressions that +correspond with those keys, but does not define the means by which this is +done. + +The result of _dynamic attributes_ processing is an _attribute map_ as +defined in the previous section. No _block sequence_ is produced in this +processing mode. + +### Partial Processing of Body Content + +Under _schema-driven processing_, by default the given schema is assumed +to be exhaustive, such that any attribute or block not matched by schema +elements is considered an error. This allows feedback about unsupported +attributes and blocks (such as typos) to be provided. + +An alternative is _partial processing_, where any additional elements within +the body are not considered an error. + +Under partial processing, the result is both body content as described +above _and_ a new body that represents any body elements that remain after +the schema has been processed. + +Specifically: + +* Any attribute whose name is specified in the schema is returned in body + content and elided from the new body. + +* Any block whose type is specified in the schema is returned in body content + and elided from the new body. + +* Any attribute or block _not_ meeting the above conditions is placed into + the new body, unmodified. + +The new body can then be recursively processed using any of the body +processing models. This facility allows different subsets of body content +to be processed by different parts of the calling application. + +Processing a body in two steps — first partial processing of a source body, +then exhaustive processing of the returned body — is equivalent to single-step +processing with a schema that is the union of the schemata used +across the two steps. + +## Expressions + +Attribute values are represented by _expressions_. Depending on the concrete +syntax in use, an expression may just be a literal value or it may describe +a computation in terms of literal values, variables, and functions. + +Each syntax defines its own representation of expressions. For syntaxes based +in languages that do not have any non-literal expression syntax, it is +recommended to embed the template language from +[the native syntax](./hclsyntax/spec.md) e.g. as a post-processing step on +string literals. + +### Expression Evaluation + +In order to obtain a concrete value, each expression must be _evaluated_. +Evaluation is performed in terms of an evaluation context, which +consists of the following: + +* An _evaluation mode_, which is defined below. +* A _variable scope_, which provides a set of named variables for use in + expressions. +* A _function table_, which provides a set of named functions for use in + expressions. + +The _evaluation mode_ allows for two different interpretations of an +expression: + +* In _literal-only mode_, variables and functions are not available and it + is assumed that the calling application's intent is to treat the attribute + value as a literal. + +* In _full expression mode_, variables and functions are defined and it is + assumed that the calling application wishes to provide a full expression + language for definition of the attribute value. + +The actual behavior of these two modes depends on the syntax in use. For +languages with first-class expression syntax, these two modes may be considered +equivalent, with _literal-only mode_ simply not defining any variables or +functions. For languages that embed arbitrary expressions via string templates, +_literal-only mode_ may disable such processing, allowing literal strings to +pass through without interpretation as templates. + +Since literal-only mode does not support variables and functions, it is an +error for the calling application to enable this mode and yet provide a +variable scope and/or function table. + +## Values and Value Types + +The result of expression evaluation is a _value_. Each value has a _type_, +which is dynamically determined during evaluation. The _variable scope_ in +the evaluation context is a map from variable name to value, using the same +definition of value. + +The type system for HCL values is intended to be of a level abstraction +suitable for configuration of various applications. A well-defined, +implementation-language-agnostic type system is defined to allow for +consistent processing of configuration across many implementation languages. +Concrete implementations may provide additional functionality to lower +HCL values and types to corresponding native language types, which may then +impose additional constraints on the values outside of the scope of this +specification. + +Two values are _equal_ if and only if they have identical types and their +values are equal according to the rules of their shared type. + +### Primitive Types + +The primitive types are _string_, _bool_, and _number_. + +A _string_ is a sequence of unicode characters. Two strings are equal if +NFC normalization ([UAX#15](http://unicode.org/reports/tr15/) +of each string produces two identical sequences of characters. +NFC normalization ensures that, for example, a precomposed combination of a +latin letter and a diacritic compares equal with the letter followed by +a combining diacritic. + +The _bool_ type has only two non-null values: _true_ and _false_. Two bool +values are equal if and only if they are either both true or both false. + +A _number_ is an arbitrary-precision floating point value. An implementation +_must_ make the full-precision values available to the calling application +for interpretation into any suitable number representation. An implementation +may in practice implement numbers with limited precision so long as the +following constraints are met: + +* Integers are represented with at least 256 bits. +* Non-integer numbers are represented as floating point values with a + mantissa of at least 256 bits and a signed binary exponent of at least + 16 bits. +* An error is produced if an integer value given in source cannot be + represented precisely. +* An error is produced if a non-integer value cannot be represented due to + overflow. +* A non-integer number is rounded to the nearest possible value when a + value is of too high a precision to be represented. + +The _number_ type also requires representation of both positive and negative +infinity. A "not a number" (NaN) value is _not_ provided nor used. + +Two number values are equal if they are numerically equal to the precision +associated with the number. Positive infinity and negative infinity are +equal to themselves but not to each other. Positive infinity is greater than +any other number value, and negative infinity is less than any other number +value. + +Some syntaxes may be unable to represent numeric literals of arbitrary +precision. This must be defined in the syntax specification as part of its +description of mapping numeric literals to HCL values. + +### Structural Types + +_Structural types_ are types that are constructed by combining other types. +Each distinct combination of other types is itself a distinct type. There +are two structural type _kinds_: + +* _Object types_ are constructed of a set of named attributes, each of which + has a type. Attribute names are always strings. (_Object_ attributes are a + distinct idea from _body_ attributes, though calling applications + may choose to blur the distinction by use of common naming schemes.) +* _Tuple tupes_ are constructed of a sequence of elements, each of which + has a type. + +Values of structural types are compared for equality in terms of their +attributes or elements. A structural type value is equal to another if and +only if all of the corresponding attributes or elements are equal. + +Two structural types are identical if they are of the same kind and +have attributes or elements with identical types. + +### Collection Types + +_Collection types_ are types that combine together an arbitrary number of +values of some other single type. There are three collection type _kinds_: + +* _List types_ represent ordered sequences of values of their element type. +* _Map types_ represent values of their element type accessed via string keys. +* _Set types_ represent unordered sets of distinct values of their element type. + +For each of these kinds and each distinct element type there is a distinct +collection type. For example, "list of string" is a distinct type from +"set of string", and "list of number" is a distinct type from "list of string". + +Values of collection types are compared for equality in terms of their +elements. A collection type value is equal to another if and only if both +have the same number of elements and their corresponding elements are equal. + +Two collection types are identical if they are of the same kind and have +the same element type. + +### Null values + +Each type has a null value. The null value of a type represents the absense +of a value, but with type information retained to allow for type checking. + +Null values are used primarily to represent the conditional absense of a +body attribute. In a syntax with a conditional operator, one of the result +values of that conditional may be null to indicate that the attribute should be +considered not present in that case. + +Calling applications _should_ consider an attribute with a null value as +equivalent to the value not being present at all. + +A null value of a particular type is equal to itself. + +### Unknown Values and the Dynamic Pseudo-type + +An _unknown value_ is a placeholder for a value that is not yet known. +Operations on unknown values themselves return unknown values that have a +type appropriate to the operation. For example, adding together two unknown +numbers yields an unknown number, while comparing two unknown values of any +type for equality yields an unknown bool. + +Each type has a distinct unknown value. For example, an unknown _number_ is +a distinct value from an unknown _string_. + +_The dynamic pseudo-type_ is a placeholder for a type that is not yet known. +The only values of this type are its null value and its unknown value. It is +referred to as a _pseudo-type_ because it should not be considered a type in +its own right, but rather as a placeholder for a type yet to be established. +The unknown value of the dynamic pseudo-type is referred to as _the dynamic +value_. + +Operations on values of the dynamic pseudo-type behave as if it is a value +of the expected type, optimistically assuming that once the value and type +are known they will be valid for the operation. For example, adding together +a number and the dynamic value produces an unknown number. + +Unknown values and the dynamic pseudo-type can be used as a mechanism for +partial type checking and semantic checking: by evaluating an expression with +all variables set to an unknown value, the expression can be evaluated to +produce an unknown value of a given type, or produce an error if any operation +is provably invalid with only type information. + +Unknown values and the dynamic pseudo-type must never be returned from +operations unless at least one operand is unknown or dynamic. Calling +applications are guaranteed that unless the global scope includes unknown +values, or the function table includes functions that return unknown values, +no expression will evaluate to an unknown value. The calling application is +thus in total control over the use and meaning of unknown values. + +The dynamic pseudo-type is identical only to itself. + +### Capsule Types + +A _capsule type_ is a custom type defined by the calling application. A value +of a capsule type is considered opaque to HCL, but may be accepted +by functions provided by the calling application. + +A particular capsule type is identical only to itself. The equality of two +values of the same capsule type is defined by the calling application. No +other operations are supported for values of capsule types. + +Support for capsule types in a HCL implementation is optional. Capsule types +are intended to allow calling applications to pass through values that are +not part of the standard type system. For example, an application that +deals with raw binary data may define a capsule type representing a byte +array, and provide functions that produce or operate on byte arrays. + +### Type Specifications + +In certain situations it is necessary to define expectations about the expected +type of a value. Whereas two _types_ have a commutative _identity_ relationship, +a type has a non-commutative _matches_ relationship with a _type specification_. +A type specification is, in practice, just a different interpretation of a +type such that: + +* Any type _matches_ any type that it is identical to. + +* Any type _matches_ the dynamic pseudo-type. + +For example, given a type specification "list of dynamic pseudo-type", the +concrete types "list of string" and "list of map" match, but the +type "set of string" does not. + +## Functions and Function Calls + +The evaluation context used to evaluate an expression includes a function +table, which represents an application-defined set of named functions +available for use in expressions. + +Each syntax defines whether function calls are supported and how they are +physically represented in source code, but the semantics of function calls are +defined here to ensure consistent results across syntaxes and to allow +applications to provide functions that are interoperable with all syntaxes. + +A _function_ is defined from the following elements: + +* Zero or more _positional parameters_, each with a name used for documentation, + a type specification for expected argument values, and a flag for whether + each of null values, unknown values, and values of the dynamic pseudo-type + are accepted. + +* Zero or one _variadic parameters_, with the same structure as the _positional_ + parameters, which if present collects any additional arguments provided at + the function call site. + +* A _result type definition_, which specifies the value type returned for each + valid sequence of argument values. + +* A _result value definition_, which specifies the value returned for each + valid sequence of argument values. + +A _function call_, regardless of source syntax, consists of a sequence of +argument values. The argument values are each mapped to a corresponding +parameter as follows: + +* For each of the function's positional parameters in sequence, take the next + argument. If there are no more arguments, the call is erroneous. + +* If the function has a variadic parameter, take all remaining arguments that + where not yet assigned to a positional parameter and collect them into + a sequence of variadic arguments that each correspond to the variadic + parameter. + +* If the function has _no_ variadic parameter, it is an error if any arguments + remain after taking one argument for each positional parameter. + +After mapping each argument to a parameter, semantic checking proceeds +for each argument: + +* If the argument value corresponding to a parameter does not match the + parameter's type specification, the call is erroneous. + +* If the argument value corresponding to a parameter is null and the parameter + is not specified as accepting nulls, the call is erroneous. + +* If the argument value corresponding to a parameter is the dynamic value + and the parameter is not specified as accepting values of the dynamic + pseudo-type, the call is valid but its _result type_ is forced to be the + dynamic pseudo type. + +* If neither of the above conditions holds for any argument, the call is + valid and the function's value type definition is used to determine the + call's _result type_. A function _may_ vary its result type depending on + the argument _values_ as well as the argument _types_; for example, a + function that decodes a JSON value will return a different result type + depending on the data structure described by the given JSON source code. + +If semantic checking succeeds without error, the call is _executed_: + +* For each argument, if its value is unknown and its corresponding parameter + is not specified as accepting unknowns, the _result value_ is forced to be an + unknown value of the result type. + +* If the previous condition does not apply, the function's result value + definition is used to determine the call's _result value_. + +The result of a function call expression is either an error, if one of the +erroenous conditions above applies, or the _result value_. + +## Type Conversions and Unification + +Values given in configuration may not always match the expectations of the +operations applied to them or to the calling application. In such situations, +automatic type conversion is attempted as a convenience to the user. + +Along with conversions to a _specified_ type, it is sometimes necessary to +ensure that a selection of values are all of the _same_ type, without any +constraint on which type that is. This is the process of _type unification_, +which attempts to find the most general type that all of the given types can +be converted to. + +Both type conversions and unification are defined in the syntax-agnostic +model to ensure consistency of behavior between syntaxes. + +Type conversions are broadly characterized into two categories: _safe_ and +_unsafe_. A conversion is "safe" if any distinct value of the source type +has a corresponding distinct value in the target type. A conversion is +"unsafe" if either the target type values are _not_ distinct (information +may be lost in conversion) or if some values of the source type do not have +any corresponding value in the target type. An unsafe conversion may result +in an error. + +A given type can always be converted to itself, which is a no-op. + +### Conversion of Null Values + +All null values are safely convertable to a null value of any other type, +regardless of other type-specific rules specified in the sections below. + +### Conversion to and from the Dynamic Pseudo-type + +Conversion _from_ the dynamic pseudo-type _to_ any other type always succeeds, +producing an unknown value of the target type. + +Conversion of any value _to_ the dynamic pseudo-type is a no-op. The result +is the input value, verbatim. This is the only situation where the conversion +result value is not of the the given target type. + +### Primitive Type Conversions + +Bidirectional conversions are available between the string and number types, +and between the string and boolean types. + +The bool value true corresponds to the string containing the characters "true", +while the bool value false corresponds to teh string containing the characters +"false". Conversion from bool to string is safe, while the converse is +unsafe. The strings "1" and "0" are alternative string representations +of true and false respectively. It is an error to convert a string other than +the four in this paragraph to type bool. + +A number value is converted to string by translating its integer portion +into a sequence of decimal digits (`0` through `9`), and then if it has a +non-zero fractional part, a period `.` followed by a sequence of decimal +digits representing its fractional part. No exponent portion is included. +The number is converted at its full precision. Conversion from number to +string is safe. + +A string is converted to a number value by reversing the above mapping. +No exponent portion is allowed. Conversion from string to number is unsafe. +It is an error to convert a string that does not comply with the expected +syntax to type number. + +No direct conversion is available between the bool and number types. + +### Collection and Structural Type Conversions + +Conversion from set types to list types is _safe_, as long as their +element types are safely convertable. If the element types are _unsafely_ +convertable, then the collection conversion is also unsafe. Each set element +becomes a corresponding list element, in an undefined order. Although no +particular ordering is required, implementations _should_ produce list +elements in a consistent order for a given input set, as a convenience +to calling applications. + +Conversion from list types to set types is _unsafe_, as long as their element +types are convertable. Each distinct list item becomes a distinct set item. +If two list items are equal, one of the two is lost in the conversion. + +Conversion from tuple types to list types permitted if all of the +tuple element types are convertable to the target list element type. +The safety of the conversion depends on the safety of each of the element +conversions. Each element in turn is converted to the list element type, +producing a list of identical length. + +Conversion from tuple types to set types is permitted, behaving as if the +tuple type was first converted to a list of the same element type and then +that list converted to the target set type. + +Conversion from object types to map types is permitted if all of the object +attribute types are convertable to the target map element type. The safety +of the conversion depends on the safety of each of the attribute conversions. +Each attribute in turn is converted to the map element type, and map element +keys are set to the name of each corresponding object attribute. + +Conversion from list and set types to tuple types is permitted, following +the opposite steps as the converse conversions. Such conversions are _unsafe_. +It is an error to convert a list or set to a tuple type whose number of +elements does not match the list or set length. + +Conversion from map types to object types is permitted if each map key +corresponds to an attribute in the target object type. It is an error to +convert from a map value whose set of keys does not exactly match the target +type's attributes. The conversion takes the opposite steps of the converse +conversion. + +Conversion from one object type to another is permitted as long as the +common attribute names have convertable types. Any attribute present in the +target type but not in the source type is populated with a null value of +the appropriate type. + +Conversion from one tuple type to another is permitted as long as the +tuples have the same length and the elements have convertable types. + +### Type Unification + +Type unification is an operation that takes a list of types and attempts +to find a single type to which they can all be converted. Since some +type pairs have bidirectional conversions, preference is given to _safe_ +conversions. In technical terms, all possible types are arranged into +a lattice, from which a most general supertype is selected where possible. + +The type resulting from type unification may be one of the input types, or +it may be an entirely new type produced by combination of two or more +input types. + +The following rules do not guarantee a valid result. In addition to these +rules, unification fails if any of the given types are not convertable +(per the above rules) to the selected result type. + +The following unification rules apply transitively. That is, if a rule is +defined from A to B, and one from B to C, then A can unify to C. + +Number and bool types both unify with string by preferring string. + +Two collection types of the same kind unify according to the unification +of their element types. + +List and set types unify by preferring the list type. + +Map and object types unify by preferring the object type. + +List, set and tuple types unify by preferring the tuple type. + +The dynamic pseudo-type unifies with any other type by selecting that other +type. The dynamic pseudo-type is the result type only if _all_ input types +are the dynamic pseudo-type. + +Two object types unify by constructing a new type whose attributes are +the union of those of the two input types. Any common attributes themselves +have their types unified. + +Two tuple types of the same length unify constructing a new type of the +same length whose elements are the unification of the corresponding elements +in the two input types. + +## Static Analysis + +In most applications, full expression evaluation is sufficient for understanding +the provided configuration. However, some specialized applications require more +direct access to the physical structures in the expressions, which can for +example allow the construction of new language constructs in terms of the +existing syntax elements. + +Since static analysis analyses the physical structure of configuration, the +details will vary depending on syntax. Each syntax must decide which of its +physical structures corresponds to the following analyses, producing error +diagnostics if they are applied to inappropriate expressions. + +The following are the required static analysis functions: + +* **Static List**: Require list/tuple construction syntax to be used and + return a list of expressions for each of the elements given. + +* **Static Map**: Require map/object construction syntax to be used and + return a list of key/value pairs -- both expressions -- for each of + the elements given. The usual constraint that a map key must be a string + must not apply to this analysis, thus allowing applications to interpret + arbitrary keys as they see fit. + +* **Static Call**: Require function call syntax to be used and return an + object describing the called function name and a list of expressions + representing each of the call arguments. + +* **Static Traversal**: Require a reference to a symbol in the variable + scope and return a description of the path from the root scope to the + accessed attribute or index. + +The intent of a calling application using these features is to require a more +rigid interpretation of the configuration than in expression evaluation. +Syntax implementations should make use of the extra contextual information +provided in order to make an intuitive mapping onto the constructs of the +underlying syntax, possibly interpreting the expression slightly differently +than it would be interpreted in normal evaluation. + +Each syntax must define which of its expression elements each of the analyses +above applies to, and how those analyses behave given those expression elements. + +## Implementation Considerations + +Implementations of this specification are free to adopt any strategy that +produces behavior consistent with the specification. This non-normative +section describes some possible implementation strategies that are consistent +with the goals of this specification. + +### Language-agnosticism + +The language-agnosticism of this specification assumes that certain behaviors +are implemented separately for each syntax: + +* Matching of a body schema with the physical elements of a body in the + source language, to determine correspondance between physical constructs + and schema elements. + +* Implementing the _dynamic attributes_ body processing mode by either + interpreting all physical constructs as attributes or producing an error + if non-attribute constructs are present. + +* Providing an evaluation function for all possible expressions that produces + a value given an evaluation context. + +* Providing the static analysis functionality described above in a manner that + makes sense within the convention of the syntax. + +The suggested implementation strategy is to use an implementation language's +closest concept to an _abstract type_, _virtual type_ or _interface type_ +to represent both Body and Expression. Each language-specific implementation +can then provide an implementation of each of these types wrapping AST nodes +or other physical constructs from the language parser. diff --git a/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go b/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go new file mode 100644 index 00000000..98ada87b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go @@ -0,0 +1,40 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" +) + +type staticExpr struct { + val cty.Value + rng Range +} + +// StaticExpr returns an Expression that always evaluates to the given value. +// +// This is useful to substitute default values for expressions that are +// not explicitly given in configuration and thus would otherwise have no +// Expression to return. +// +// Since expressions are expected to have a source range, the caller must +// provide one. Ideally this should be a real source range, but it can +// be a synthetic one (with an empty-string filename) if no suitable range +// is available. +func StaticExpr(val cty.Value, rng Range) Expression { + return staticExpr{val, rng} +} + +func (e staticExpr) Value(ctx *EvalContext) (cty.Value, Diagnostics) { + return e.val, nil +} + +func (e staticExpr) Variables() []Traversal { + return nil +} + +func (e staticExpr) Range() Range { + return e.rng +} + +func (e staticExpr) StartRange() Range { + return e.rng +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/structure.go new file mode 100644 index 00000000..b336f300 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/structure.go @@ -0,0 +1,151 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" +) + +// File is the top-level node that results from parsing a HCL file. +type File struct { + Body Body + Bytes []byte + + // Nav is used to integrate with the "hcled" editor integration package, + // and with diagnostic information formatters. It is not for direct use + // by a calling application. + Nav interface{} +} + +// Block represents a nested block within a Body. +type Block struct { + Type string + Labels []string + Body Body + + DefRange Range // Range that can be considered the "definition" for seeking in an editor + TypeRange Range // Range for the block type declaration specifically. + LabelRanges []Range // Ranges for the label values specifically. +} + +// Blocks is a sequence of Block. +type Blocks []*Block + +// Attributes is a set of attributes keyed by their names. +type Attributes map[string]*Attribute + +// Body is a container for attributes and blocks. It serves as the primary +// unit of heirarchical structure within configuration. +// +// The content of a body cannot be meaningfully intepreted without a schema, +// so Body represents the raw body content and has methods that allow the +// content to be extracted in terms of a given schema. +type Body interface { + // Content verifies that the entire body content conforms to the given + // schema and then returns it, and/or returns diagnostics. The returned + // body content is valid if non-nil, regardless of whether Diagnostics + // are provided, but diagnostics should still be eventually shown to + // the user. + Content(schema *BodySchema) (*BodyContent, Diagnostics) + + // PartialContent is like Content except that it permits the configuration + // to contain additional blocks or attributes not specified in the + // schema. If any are present, the returned Body is non-nil and contains + // the remaining items from the body that were not selected by the schema. + PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics) + + // JustAttributes attempts to interpret all of the contents of the body + // as attributes, allowing for the contents to be accessed without a priori + // knowledge of the structure. + // + // The behavior of this method depends on the body's source language. + // Some languages, like JSON, can't distinguish between attributes and + // blocks without schema hints, but for languages that _can_ error + // diagnostics will be generated if any blocks are present in the body. + // + // Diagnostics may be produced for other reasons too, such as duplicate + // declarations of the same attribute. + JustAttributes() (Attributes, Diagnostics) + + // MissingItemRange returns a range that represents where a missing item + // might hypothetically be inserted. This is used when producing + // diagnostics about missing required attributes or blocks. Not all bodies + // will have an obvious single insertion point, so the result here may + // be rather arbitrary. + MissingItemRange() Range +} + +// BodyContent is the result of applying a BodySchema to a Body. +type BodyContent struct { + Attributes Attributes + Blocks Blocks + + MissingItemRange Range +} + +// Attribute represents an attribute from within a body. +type Attribute struct { + Name string + Expr Expression + + Range Range + NameRange Range +} + +// Expression is a literal value or an expression provided in the +// configuration, which can be evaluated within a scope to produce a value. +type Expression interface { + // Value returns the value resulting from evaluating the expression + // in the given evaluation context. + // + // The context may be nil, in which case the expression may contain + // only constants and diagnostics will be produced for any non-constant + // sub-expressions. (The exact definition of this depends on the source + // language.) + // + // The context may instead be set but have either its Variables or + // Functions maps set to nil, in which case only use of these features + // will return diagnostics. + // + // Different diagnostics are provided depending on whether the given + // context maps are nil or empty. In the former case, the message + // tells the user that variables/functions are not permitted at all, + // while in the latter case usage will produce a "not found" error for + // the specific symbol in question. + Value(ctx *EvalContext) (cty.Value, Diagnostics) + + // Variables returns a list of variables referenced in the receiving + // expression. These are expressed as absolute Traversals, so may include + // additional information about how the variable is used, such as + // attribute lookups, which the calling application can potentially use + // to only selectively populate the scope. + Variables() []Traversal + + Range() Range + StartRange() Range +} + +// OfType filters the receiving block sequence by block type name, +// returning a new block sequence including only the blocks of the +// requested type. +func (els Blocks) OfType(typeName string) Blocks { + ret := make(Blocks, 0) + for _, el := range els { + if el.Type == typeName { + ret = append(ret, el) + } + } + return ret +} + +// ByType transforms the receiving block sequence into a map from type +// name to block sequences of only that type. +func (els Blocks) ByType() map[string]Blocks { + ret := make(map[string]Blocks) + for _, el := range els { + ty := el.Type + if ret[ty] == nil { + ret[ty] = make(Blocks, 0, 1) + } + ret[ty] = append(ret[ty], el) + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/traversal.go b/vendor/github.com/hashicorp/hcl2/hcl/traversal.go new file mode 100644 index 00000000..24f4c91b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/traversal.go @@ -0,0 +1,352 @@ +package hcl + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// A Traversal is a description of traversing through a value through a +// series of operations such as attribute lookup, index lookup, etc. +// +// It is used to look up values in scopes, for example. +// +// The traversal operations are implementations of interface Traverser. +// This is a closed set of implementations, so the interface cannot be +// implemented from outside this package. +// +// A traversal can be absolute (its first value is a symbol name) or relative +// (starts from an existing value). +type Traversal []Traverser + +// TraversalJoin appends a relative traversal to an absolute traversal to +// produce a new absolute traversal. +func TraversalJoin(abs Traversal, rel Traversal) Traversal { + if abs.IsRelative() { + panic("first argument to TraversalJoin must be absolute") + } + if !rel.IsRelative() { + panic("second argument to TraversalJoin must be relative") + } + + ret := make(Traversal, len(abs)+len(rel)) + copy(ret, abs) + copy(ret[len(abs):], rel) + return ret +} + +// TraverseRel applies the receiving traversal to the given value, returning +// the resulting value. This is supported only for relative traversals, +// and will panic if applied to an absolute traversal. +func (t Traversal) TraverseRel(val cty.Value) (cty.Value, Diagnostics) { + if !t.IsRelative() { + panic("can't use TraverseRel on an absolute traversal") + } + + current := val + var diags Diagnostics + for _, tr := range t { + var newDiags Diagnostics + current, newDiags = tr.TraversalStep(current) + diags = append(diags, newDiags...) + if newDiags.HasErrors() { + return cty.DynamicVal, diags + } + } + return current, diags +} + +// TraverseAbs applies the receiving traversal to the given eval context, +// returning the resulting value. This is supported only for absolute +// traversals, and will panic if applied to a relative traversal. +func (t Traversal) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) { + if t.IsRelative() { + panic("can't use TraverseAbs on a relative traversal") + } + + split := t.SimpleSplit() + root := split.Abs[0].(TraverseRoot) + name := root.Name + + thisCtx := ctx + hasNonNil := false + for thisCtx != nil { + if thisCtx.Variables == nil { + thisCtx = thisCtx.parent + continue + } + hasNonNil = true + val, exists := thisCtx.Variables[name] + if exists { + return split.Rel.TraverseRel(val) + } + thisCtx = thisCtx.parent + } + + if !hasNonNil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Variables not allowed", + Detail: "Variables may not be used here.", + Subject: &root.SrcRange, + }, + } + } + + suggestions := make([]string, 0, len(ctx.Variables)) + thisCtx = ctx + for thisCtx != nil { + for k := range thisCtx.Variables { + suggestions = append(suggestions, k) + } + thisCtx = thisCtx.parent + } + suggestion := nameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unknown variable", + Detail: fmt.Sprintf("There is no variable named %q.%s", name, suggestion), + Subject: &root.SrcRange, + }, + } +} + +// IsRelative returns true if the receiver is a relative traversal, or false +// otherwise. +func (t Traversal) IsRelative() bool { + if len(t) == 0 { + return true + } + if _, firstIsRoot := t[0].(TraverseRoot); firstIsRoot { + return false + } + return true +} + +// SimpleSplit returns a TraversalSplit where the name lookup is the absolute +// part and the remainder is the relative part. Supported only for +// absolute traversals, and will panic if applied to a relative traversal. +// +// This can be used by applications that have a relatively-simple variable +// namespace where only the top-level is directly populated in the scope, with +// everything else handled by relative lookups from those initial values. +func (t Traversal) SimpleSplit() TraversalSplit { + if t.IsRelative() { + panic("can't use SimpleSplit on a relative traversal") + } + return TraversalSplit{ + Abs: t[0:1], + Rel: t[1:], + } +} + +// RootName returns the root name for a absolute traversal. Will panic if +// called on a relative traversal. +func (t Traversal) RootName() string { + if t.IsRelative() { + panic("can't use RootName on a relative traversal") + + } + return t[0].(TraverseRoot).Name +} + +// SourceRange returns the source range for the traversal. +func (t Traversal) SourceRange() Range { + if len(t) == 0 { + // Nothing useful to return here, but we'll return something + // that's correctly-typed at least. + return Range{} + } + + return RangeBetween(t[0].SourceRange(), t[len(t)-1].SourceRange()) +} + +// TraversalSplit represents a pair of traversals, the first of which is +// an absolute traversal and the second of which is relative to the first. +// +// This is used by calling applications that only populate prefixes of the +// traversals in the scope, with Abs representing the part coming from the +// scope and Rel representing the remaining steps once that part is +// retrieved. +type TraversalSplit struct { + Abs Traversal + Rel Traversal +} + +// TraverseAbs traverses from a scope to the value resulting from the +// absolute traversal. +func (t TraversalSplit) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) { + return t.Abs.TraverseAbs(ctx) +} + +// TraverseRel traverses from a given value, assumed to be the result of +// TraverseAbs on some scope, to a final result for the entire split traversal. +func (t TraversalSplit) TraverseRel(val cty.Value) (cty.Value, Diagnostics) { + return t.Rel.TraverseRel(val) +} + +// Traverse is a convenience function to apply TraverseAbs followed by +// TraverseRel. +func (t TraversalSplit) Traverse(ctx *EvalContext) (cty.Value, Diagnostics) { + v1, diags := t.TraverseAbs(ctx) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + v2, newDiags := t.TraverseRel(v1) + diags = append(diags, newDiags...) + return v2, diags +} + +// Join concatenates together the Abs and Rel parts to produce a single +// absolute traversal. +func (t TraversalSplit) Join() Traversal { + return TraversalJoin(t.Abs, t.Rel) +} + +// RootName returns the root name for the absolute part of the split. +func (t TraversalSplit) RootName() string { + return t.Abs.RootName() +} + +// A Traverser is a step within a Traversal. +type Traverser interface { + TraversalStep(cty.Value) (cty.Value, Diagnostics) + SourceRange() Range + isTraverserSigil() isTraverser +} + +// Embed this in a struct to declare it as a Traverser +type isTraverser struct { +} + +func (tr isTraverser) isTraverserSigil() isTraverser { + return isTraverser{} +} + +// TraverseRoot looks up a root name in a scope. It is used as the first step +// of an absolute Traversal, and cannot itself be traversed directly. +type TraverseRoot struct { + isTraverser + Name string + SrcRange Range +} + +// TraversalStep on a TraverseName immediately panics, because absolute +// traversals cannot be directly traversed. +func (tn TraverseRoot) TraversalStep(cty.Value) (cty.Value, Diagnostics) { + panic("Cannot traverse an absolute traversal") +} + +func (tn TraverseRoot) SourceRange() Range { + return tn.SrcRange +} + +// TraverseAttr looks up an attribute in its initial value. +type TraverseAttr struct { + isTraverser + Name string + SrcRange Range +} + +func (tn TraverseAttr) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + if val.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Attempt to get attribute from null value", + Detail: "This value is null, so it does not have any attributes.", + Subject: &tn.SrcRange, + }, + } + } + + ty := val.Type() + switch { + case ty.IsObjectType(): + if !ty.HasAttribute(tn.Name) { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unsupported attribute", + Detail: fmt.Sprintf("This object does not have an attribute named %q.", tn.Name), + Subject: &tn.SrcRange, + }, + } + } + + if !val.IsKnown() { + return cty.UnknownVal(ty.AttributeType(tn.Name)), nil + } + + return val.GetAttr(tn.Name), nil + case ty.IsMapType(): + if !val.IsKnown() { + return cty.UnknownVal(ty.ElementType()), nil + } + + idx := cty.StringVal(tn.Name) + if val.HasIndex(idx).False() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Missing map element", + Detail: fmt.Sprintf("This map does not have an element with the key %q.", tn.Name), + Subject: &tn.SrcRange, + }, + } + } + + return val.Index(idx), nil + case ty == cty.DynamicPseudoType: + return cty.DynamicVal, nil + default: + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unsupported attribute", + Detail: "This value does not have any attributes.", + Subject: &tn.SrcRange, + }, + } + } +} + +func (tn TraverseAttr) SourceRange() Range { + return tn.SrcRange +} + +// TraverseIndex applies the index operation to its initial value. +type TraverseIndex struct { + isTraverser + Key cty.Value + SrcRange Range +} + +func (tn TraverseIndex) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + return Index(val, tn.Key, &tn.SrcRange) +} + +func (tn TraverseIndex) SourceRange() Range { + return tn.SrcRange +} + +// TraverseSplat applies the splat operation to its initial value. +type TraverseSplat struct { + isTraverser + Each Traversal + SrcRange Range +} + +func (tn TraverseSplat) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + panic("TraverseSplat not yet implemented") +} + +func (tn TraverseSplat) SourceRange() Range { + return tn.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go new file mode 100644 index 00000000..5f529467 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go @@ -0,0 +1,121 @@ +package hcl + +// AbsTraversalForExpr attempts to interpret the given expression as +// an absolute traversal, or returns error diagnostic(s) if that is +// not possible for the given expression. +// +// A particular Expression implementation can support this function by +// offering a method called AsTraversal that takes no arguments and +// returns either a valid absolute traversal or nil to indicate that +// no traversal is possible. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +// +// In most cases the calling application is interested in the value +// that results from an expression, but in rarer cases the application +// needs to see the the name of the variable and subsequent +// attributes/indexes itself, for example to allow users to give references +// to the variables themselves rather than to their values. An implementer +// of this function should at least support attribute and index steps. +func AbsTraversalForExpr(expr Expression) (Traversal, Diagnostics) { + type asTraversal interface { + AsTraversal() Traversal + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(asTraversal) + return supported + }) + + if asT, supported := physExpr.(asTraversal); supported { + if traversal := asT.AsTraversal(); traversal != nil { + return traversal, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static variable reference is required.", + Subject: expr.Range().Ptr(), + }, + } +} + +// RelTraversalForExpr is similar to AbsTraversalForExpr but it returns +// a relative traversal instead. Due to the nature of HCL expressions, the +// first element of the returned traversal is always a TraverseAttr, and +// then it will be followed by zero or more other expressions. +// +// Any expression accepted by AbsTraversalForExpr is also accepted by +// RelTraversalForExpr. +func RelTraversalForExpr(expr Expression) (Traversal, Diagnostics) { + traversal, diags := AbsTraversalForExpr(expr) + if len(traversal) > 0 { + root := traversal[0].(TraverseRoot) + traversal[0] = TraverseAttr{ + Name: root.Name, + SrcRange: root.SrcRange, + } + } + return traversal, diags +} + +// ExprAsKeyword attempts to interpret the given expression as a static keyword, +// returning the keyword string if possible, and the empty string if not. +// +// A static keyword, for the sake of this function, is a single identifier. +// For example, the following attribute has an expression that would produce +// the keyword "foo": +// +// example = foo +// +// This function is a variant of AbsTraversalForExpr, which uses the same +// interface on the given expression. This helper constrains the result +// further by requiring only a single root identifier. +// +// This function is intended to be used with the following idiom, to recognize +// situations where one of a fixed set of keywords is required and arbitrary +// expressions are not allowed: +// +// switch hcl.ExprAsKeyword(expr) { +// case "allow": +// // (take suitable action for keyword "allow") +// case "deny": +// // (take suitable action for keyword "deny") +// default: +// diags = append(diags, &hcl.Diagnostic{ +// // ... "invalid keyword" diagnostic message ... +// }) +// } +// +// The above approach will generate the same message for both the use of an +// unrecognized keyword and for not using a keyword at all, which is usually +// reasonable if the message specifies that the given value must be a keyword +// from that fixed list. +// +// Note that in the native syntax the keywords "true", "false", and "null" are +// recognized as literal values during parsing and so these reserved words +// cannot not be accepted as keywords by this function. +// +// Since interpreting an expression as a keyword bypasses usual expression +// evaluation, it should be used sparingly for situations where e.g. one of +// a fixed set of keywords is used in a structural way in a special attribute +// to affect the further processing of a block. +func ExprAsKeyword(expr Expression) string { + type asTraversal interface { + AsTraversal() Traversal + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(asTraversal) + return supported + }) + + if asT, supported := physExpr.(asTraversal); supported { + if traversal := asT.AsTraversal(); len(traversal) == 1 { + return traversal.RootName() + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go b/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go new file mode 100644 index 00000000..7e652e9b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go @@ -0,0 +1,21 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +type blockLabel struct { + Value string + Range hcl.Range +} + +func labelsForBlock(block *hcl.Block) []blockLabel { + ret := make([]blockLabel, len(block.Labels)) + for i := range block.Labels { + ret[i] = blockLabel{ + Value: block.Labels[i], + Range: block.LabelRanges[i], + } + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/decode.go b/vendor/github.com/hashicorp/hcl2/hcldec/decode.go new file mode 100644 index 00000000..6cf93fed --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/decode.go @@ -0,0 +1,36 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +func decode(body hcl.Body, blockLabels []blockLabel, ctx *hcl.EvalContext, spec Spec, partial bool) (cty.Value, hcl.Body, hcl.Diagnostics) { + schema := ImpliedSchema(spec) + + var content *hcl.BodyContent + var diags hcl.Diagnostics + var leftovers hcl.Body + + if partial { + content, leftovers, diags = body.PartialContent(schema) + } else { + content, diags = body.Content(schema) + } + + val, valDiags := spec.decode(content, blockLabels, ctx) + diags = append(diags, valDiags...) + + return val, leftovers, diags +} + +func impliedType(spec Spec) cty.Type { + return spec.impliedType() +} + +func sourceRange(body hcl.Body, blockLabels []blockLabel, spec Spec) hcl.Range { + schema := ImpliedSchema(spec) + content, _, _ := body.PartialContent(schema) + + return spec.sourceRange(content, blockLabels) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/doc.go b/vendor/github.com/hashicorp/hcl2/hcldec/doc.go new file mode 100644 index 00000000..23bfe542 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/doc.go @@ -0,0 +1,12 @@ +// Package hcldec provides a higher-level API for unpacking the content of +// HCL bodies, implemented in terms of the low-level "Content" API exposed +// by the bodies themselves. +// +// It allows decoding an entire nested configuration in a single operation +// by providing a description of the intended structure. +// +// For some applications it may be more convenient to use the "gohcl" +// package, which has a similar purpose but decodes directly into native +// Go data types. hcldec instead targets the cty type system, and thus allows +// a cty-driven application to remain within that type system. +package hcldec diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/gob.go b/vendor/github.com/hashicorp/hcl2/hcldec/gob.go new file mode 100644 index 00000000..e2027cfd --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/gob.go @@ -0,0 +1,23 @@ +package hcldec + +import ( + "encoding/gob" +) + +func init() { + // Every Spec implementation should be registered with gob, so that + // specs can be sent over gob channels, such as using + // github.com/hashicorp/go-plugin with plugins that need to describe + // what shape of configuration they are expecting. + gob.Register(ObjectSpec(nil)) + gob.Register(TupleSpec(nil)) + gob.Register((*AttrSpec)(nil)) + gob.Register((*LiteralSpec)(nil)) + gob.Register((*ExprSpec)(nil)) + gob.Register((*BlockSpec)(nil)) + gob.Register((*BlockListSpec)(nil)) + gob.Register((*BlockSetSpec)(nil)) + gob.Register((*BlockMapSpec)(nil)) + gob.Register((*BlockLabelSpec)(nil)) + gob.Register((*DefaultSpec)(nil)) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/public.go b/vendor/github.com/hashicorp/hcl2/hcldec/public.go new file mode 100644 index 00000000..5d1f10a3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/public.go @@ -0,0 +1,78 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// Decode interprets the given body using the given specification and returns +// the resulting value. If the given body is not valid per the spec, error +// diagnostics are returned and the returned value is likely to be incomplete. +// +// The ctx argument may be nil, in which case any references to variables or +// functions will produce error diagnostics. +func Decode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + val, _, diags := decode(body, nil, ctx, spec, false) + return val, diags +} + +// PartialDecode is like Decode except that it permits "leftover" items in +// the top-level body, which are returned as a new body to allow for +// further processing. +// +// Any descendent block bodies are _not_ decoded partially and thus must +// be fully described by the given specification. +func PartialDecode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Body, hcl.Diagnostics) { + return decode(body, nil, ctx, spec, true) +} + +// ImpliedType returns the value type that should result from decoding the +// given spec. +func ImpliedType(spec Spec) cty.Type { + return impliedType(spec) +} + +// SourceRange interprets the given body using the given specification and +// then returns the source range of the value that would be used to +// fulfill the spec. +// +// This can be used if application-level validation detects value errors, to +// obtain a reasonable SourceRange to use for generated diagnostics. It works +// best when applied to specific body items (e.g. using AttrSpec, BlockSpec, ...) +// as opposed to entire bodies using ObjectSpec, TupleSpec. The result will +// be less useful the broader the specification, so e.g. a spec that returns +// the entirety of all of the blocks of a given type is likely to be +// _particularly_ arbitrary and useless. +// +// If the given body is not valid per the given spec, the result is best-effort +// and may not actually be something ideal. It's expected that an application +// will already have used Decode or PartialDecode earlier and thus had an +// opportunity to detect and report spec violations. +func SourceRange(body hcl.Body, spec Spec) hcl.Range { + return sourceRange(body, nil, spec) +} + +// ChildBlockTypes returns a map of all of the child block types declared +// by the given spec, with block type names as keys and the associated +// nested body specs as values. +func ChildBlockTypes(spec Spec) map[string]Spec { + ret := map[string]Spec{} + + // visitSameBodyChildren walks through the spec structure, calling + // the given callback for each descendent spec encountered. We are + // interested in the specs that reference attributes and blocks. + var visit visitFunc + visit = func(s Spec) { + if bs, ok := s.(blockSpec); ok { + for _, blockS := range bs.blockHeaderSchemata() { + ret[blockS.Type] = bs.nestedSpec() + } + } + + s.visitSameBodyChildren(visit) + } + + visit(spec) + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/schema.go b/vendor/github.com/hashicorp/hcl2/hcldec/schema.go new file mode 100644 index 00000000..b57bd969 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/schema.go @@ -0,0 +1,36 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// ImpliedSchema returns the *hcl.BodySchema implied by the given specification. +// This is the schema that the Decode function will use internally to +// access the content of a given body. +func ImpliedSchema(spec Spec) *hcl.BodySchema { + var attrs []hcl.AttributeSchema + var blocks []hcl.BlockHeaderSchema + + // visitSameBodyChildren walks through the spec structure, calling + // the given callback for each descendent spec encountered. We are + // interested in the specs that reference attributes and blocks. + var visit visitFunc + visit = func(s Spec) { + if as, ok := s.(attrSpec); ok { + attrs = append(attrs, as.attrSchemata()...) + } + + if bs, ok := s.(blockSpec); ok { + blocks = append(blocks, bs.blockHeaderSchemata()...) + } + + s.visitSameBodyChildren(visit) + } + + visit(spec) + + return &hcl.BodySchema{ + Attributes: attrs, + Blocks: blocks, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/spec.go b/vendor/github.com/hashicorp/hcl2/hcldec/spec.go new file mode 100644 index 00000000..25cafcd9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/spec.go @@ -0,0 +1,998 @@ +package hcldec + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// A Spec is a description of how to decode a hcl.Body to a cty.Value. +// +// The various other types in this package whose names end in "Spec" are +// the spec implementations. The most common top-level spec is ObjectSpec, +// which decodes body content into a cty.Value of an object type. +type Spec interface { + // Perform the decode operation on the given body, in the context of + // the given block (which might be null), using the given eval context. + // + // "block" is provided only by the nested calls performed by the spec + // types that work on block bodies. + decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) + + // Return the cty.Type that should be returned when decoding a body with + // this spec. + impliedType() cty.Type + + // Call the given callback once for each of the nested specs that would + // get decoded with the same body and block as the receiver. This should + // not descend into the nested specs used when decoding blocks. + visitSameBodyChildren(cb visitFunc) + + // Determine the source range of the value that would be returned for the + // spec in the given content, in the context of the given block + // (which might be null). If the corresponding item is missing, return + // a place where it might be inserted. + sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range +} + +type visitFunc func(spec Spec) + +// An ObjectSpec is a Spec that produces a cty.Value of an object type whose +// attributes correspond to the keys of the spec map. +type ObjectSpec map[string]Spec + +// attrSpec is implemented by specs that require attributes from the body. +type attrSpec interface { + attrSchemata() []hcl.AttributeSchema +} + +// blockSpec is implemented by specs that require blocks from the body. +type blockSpec interface { + blockHeaderSchemata() []hcl.BlockHeaderSchema + nestedSpec() Spec +} + +// specNeedingVariables is implemented by specs that can use variables +// from the EvalContext, to declare which variables they need. +type specNeedingVariables interface { + variablesNeeded(content *hcl.BodyContent) []hcl.Traversal +} + +func (s ObjectSpec) visitSameBodyChildren(cb visitFunc) { + for _, c := range s { + cb(c) + } +} + +func (s ObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + vals := make(map[string]cty.Value, len(s)) + var diags hcl.Diagnostics + + for k, spec := range s { + var kd hcl.Diagnostics + vals[k], kd = spec.decode(content, blockLabels, ctx) + diags = append(diags, kd...) + } + + return cty.ObjectVal(vals), diags +} + +func (s ObjectSpec) impliedType() cty.Type { + if len(s) == 0 { + return cty.EmptyObject + } + + attrTypes := make(map[string]cty.Type) + for k, childSpec := range s { + attrTypes[k] = childSpec.impliedType() + } + return cty.Object(attrTypes) +} + +func (s ObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // This is not great, but the best we can do. In practice, it's rather + // strange to ask for the source range of an entire top-level body, since + // that's already readily available to the caller. + return content.MissingItemRange +} + +// A TupleSpec is a Spec that produces a cty.Value of a tuple type whose +// elements correspond to the elements of the spec slice. +type TupleSpec []Spec + +func (s TupleSpec) visitSameBodyChildren(cb visitFunc) { + for _, c := range s { + cb(c) + } +} + +func (s TupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + vals := make([]cty.Value, len(s)) + var diags hcl.Diagnostics + + for i, spec := range s { + var ed hcl.Diagnostics + vals[i], ed = spec.decode(content, blockLabels, ctx) + diags = append(diags, ed...) + } + + return cty.TupleVal(vals), diags +} + +func (s TupleSpec) impliedType() cty.Type { + if len(s) == 0 { + return cty.EmptyTuple + } + + attrTypes := make([]cty.Type, len(s)) + for i, childSpec := range s { + attrTypes[i] = childSpec.impliedType() + } + return cty.Tuple(attrTypes) +} + +func (s TupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // This is not great, but the best we can do. In practice, it's rather + // strange to ask for the source range of an entire top-level body, since + // that's already readily available to the caller. + return content.MissingItemRange +} + +// An AttrSpec is a Spec that evaluates a particular attribute expression in +// the body and returns its resulting value converted to the requested type, +// or produces a diagnostic if the type is incorrect. +type AttrSpec struct { + Name string + Type cty.Type + Required bool +} + +func (s *AttrSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// specNeedingVariables implementation +func (s *AttrSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + attr, exists := content.Attributes[s.Name] + if !exists { + return nil + } + + return attr.Expr.Variables() +} + +// attrSpec implementation +func (s *AttrSpec) attrSchemata() []hcl.AttributeSchema { + return []hcl.AttributeSchema{ + { + Name: s.Name, + Required: s.Required, + }, + } +} + +func (s *AttrSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + attr, exists := content.Attributes[s.Name] + if !exists { + return content.MissingItemRange + } + + return attr.Expr.Range() +} + +func (s *AttrSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + attr, exists := content.Attributes[s.Name] + if !exists { + // We don't need to check required and emit a diagnostic here, because + // that would already have happened when building "content". + return cty.NullVal(s.Type), nil + } + + val, diags := attr.Expr.Value(ctx) + + convVal, err := convert.Convert(val, s.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect attribute value type", + Detail: fmt.Sprintf( + "Inappropriate value for attribute %q: %s.", + s.Name, err.Error(), + ), + Subject: attr.Expr.StartRange().Ptr(), + Context: hcl.RangeBetween(attr.NameRange, attr.Expr.StartRange()).Ptr(), + }) + // We'll return an unknown value of the _correct_ type so that the + // incomplete result can still be used for some analysis use-cases. + val = cty.UnknownVal(s.Type) + } else { + val = convVal + } + + return val, diags +} + +func (s *AttrSpec) impliedType() cty.Type { + return s.Type +} + +// A LiteralSpec is a Spec that produces the given literal value, ignoring +// the given body. +type LiteralSpec struct { + Value cty.Value +} + +func (s *LiteralSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +func (s *LiteralSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return s.Value, nil +} + +func (s *LiteralSpec) impliedType() cty.Type { + return s.Value.Type() +} + +func (s *LiteralSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // No sensible range to return for a literal, so the caller had better + // ensure it doesn't cause any diagnostics. + return hcl.Range{ + Filename: "", + } +} + +// An ExprSpec is a Spec that evaluates the given expression, ignoring the +// given body. +type ExprSpec struct { + Expr hcl.Expression +} + +func (s *ExprSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// specNeedingVariables implementation +func (s *ExprSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + return s.Expr.Variables() +} + +func (s *ExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return s.Expr.Value(ctx) +} + +func (s *ExprSpec) impliedType() cty.Type { + // We can't know the type of our expression until we evaluate it + return cty.DynamicPseudoType +} + +func (s *ExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + return s.Expr.Range() +} + +// A BlockSpec is a Spec that produces a cty.Value by decoding the contents +// of a single nested block of a given type, using a nested spec. +// +// If the Required flag is not set, the nested block may be omitted, in which +// case a null value is produced. If it _is_ set, an error diagnostic is +// produced if there are no nested blocks of the given type. +type BlockSpec struct { + TypeName string + Nested Spec + Required bool +} + +func (s *BlockSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return nil + } + + return Variables(childBlock.Body, s.Nested) +} + +func (s *BlockSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + if childBlock != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "Only one block of type %q is allowed. Previous definition was at %s.", + s.TypeName, childBlock.DefRange.String(), + ), + Subject: &candidate.DefRange, + }) + break + } + + childBlock = candidate + } + + if childBlock == nil { + if s.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block of type %q is required here.", s.TypeName, + ), + Subject: &content.MissingItemRange, + }) + } + return cty.NullVal(s.Nested.impliedType()), diags + } + + if s.Nested == nil { + panic("BlockSpec with no Nested Spec") + } + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + return val, diags +} + +func (s *BlockSpec) impliedType() cty.Type { + return s.Nested.impliedType() +} + +func (s *BlockSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockListSpec is a Spec that produces a cty list of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +type BlockListSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockListSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockListSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockListSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockListSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockListSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockListSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.ListValEmpty(s.Nested.impliedType()) + } else { + ret = cty.ListVal(elems) + } + + return ret, diags +} + +func (s *BlockListSpec) impliedType() cty.Type { + return cty.List(s.Nested.impliedType()) +} + +func (s *BlockListSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockSetSpec is a Spec that produces a cty set of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +type BlockSetSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockSetSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockSetSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockSetSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockSetSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockSetSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockSetSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.SetValEmpty(s.Nested.impliedType()) + } else { + ret = cty.SetVal(elems) + } + + return ret, diags +} + +func (s *BlockSetSpec) impliedType() cty.Type { + return cty.Set(s.Nested.impliedType()) +} + +func (s *BlockSetSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockMapSpec is a Spec that produces a cty map of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +// +// One level of map structure is created for each of the given label names. +// There must be at least one given label name. +type BlockMapSpec struct { + TypeName string + LabelNames []string + Nested Spec +} + +func (s *BlockMapSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockMapSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), + }, + } +} + +// blockSpec implementation +func (s *BlockMapSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockMapSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockMapSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockSetSpec with no Nested Spec") + } + + elems := map[string]interface{}{} + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + childLabels := labelsForBlock(childBlock) + val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) + targetMap := elems + for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { + if _, exists := targetMap[key]; !exists { + targetMap[key] = make(map[string]interface{}) + } + targetMap = targetMap[key].(map[string]interface{}) + } + + diags = append(diags, childDiags...) + + key := childBlock.Labels[len(s.LabelNames)-1] + if _, exists := targetMap[key]; exists { + labelsBuf := bytes.Buffer{} + for _, label := range childBlock.Labels { + fmt.Fprintf(&labelsBuf, " %q", label) + } + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block for %s%s was already defined. The %s labels must be unique.", + s.TypeName, labelsBuf.String(), s.TypeName, + ), + Subject: &childBlock.DefRange, + }) + continue + } + + targetMap[key] = val + } + + if len(elems) == 0 { + return cty.MapValEmpty(s.Nested.impliedType()), diags + } + + var ctyMap func(map[string]interface{}, int) cty.Value + ctyMap = func(raw map[string]interface{}, depth int) cty.Value { + vals := make(map[string]cty.Value, len(raw)) + if depth == 1 { + for k, v := range raw { + vals[k] = v.(cty.Value) + } + } else { + for k, v := range raw { + vals[k] = ctyMap(v.(map[string]interface{}), depth-1) + } + } + return cty.MapVal(vals) + } + + return ctyMap(elems, len(s.LabelNames)), diags +} + +func (s *BlockMapSpec) impliedType() cty.Type { + ret := s.Nested.impliedType() + for _ = range s.LabelNames { + ret = cty.Map(ret) + } + return ret +} + +func (s *BlockMapSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockLabelSpec is a Spec that returns a cty.String representing the +// label of the block its given body belongs to, if indeed its given body +// belongs to a block. It is a programming error to use this in a non-block +// context, so this spec will panic in that case. +// +// This spec only works in the nested spec within a BlockSpec, BlockListSpec, +// BlockSetSpec or BlockMapSpec. +// +// The full set of label specs used against a particular block must have a +// consecutive set of indices starting at zero. The maximum index found +// defines how many labels the corresponding blocks must have in cty source. +type BlockLabelSpec struct { + Index int + Name string +} + +func (s *BlockLabelSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +func (s *BlockLabelSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + if s.Index >= len(blockLabels) { + panic("BlockListSpec used in non-block context") + } + + return cty.StringVal(blockLabels[s.Index].Value), nil +} + +func (s *BlockLabelSpec) impliedType() cty.Type { + return cty.String // labels are always strings +} + +func (s *BlockLabelSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + if s.Index >= len(blockLabels) { + panic("BlockListSpec used in non-block context") + } + + return blockLabels[s.Index].Range +} + +func findLabelSpecs(spec Spec) []string { + maxIdx := -1 + var names map[int]string + + var visit visitFunc + visit = func(s Spec) { + if ls, ok := s.(*BlockLabelSpec); ok { + if maxIdx < ls.Index { + maxIdx = ls.Index + } + if names == nil { + names = make(map[int]string) + } + names[ls.Index] = ls.Name + } + s.visitSameBodyChildren(visit) + } + + visit(spec) + + if maxIdx < 0 { + return nil // no labels at all + } + + ret := make([]string, maxIdx+1) + for i := range ret { + name := names[i] + if name == "" { + // Should never happen if the spec is conformant, since we require + // consecutive indices starting at zero. + name = fmt.Sprintf("missing%02d", i) + } + ret[i] = name + } + + return ret +} + +// DefaultSpec is a spec that wraps two specs, evaluating the primary first +// and then evaluating the default if the primary returns a null value. +// +// The two specifications must have the same implied result type for correct +// operation. If not, the result is undefined. +type DefaultSpec struct { + Primary Spec + Default Spec +} + +func (s *DefaultSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Primary) + cb(s.Default) +} + +func (s *DefaultSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + val, diags := s.Primary.decode(content, blockLabels, ctx) + if val.IsNull() { + var moreDiags hcl.Diagnostics + val, moreDiags = s.Default.decode(content, blockLabels, ctx) + diags = append(diags, moreDiags...) + } + return val, diags +} + +func (s *DefaultSpec) impliedType() cty.Type { + return s.Primary.impliedType() +} + +func (s *DefaultSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We can't tell from here which of the two specs will ultimately be used + // in our result, so we'll just assume the first. This is usually the right + // choice because the default is often a literal spec that doesn't have a + // reasonable source range to return anyway. + return s.Primary.sourceRange(content, blockLabels) +} + +// TransformExprSpec is a spec that wraps another and then evaluates a given +// hcl.Expression on the result. +// +// The implied type of this spec is determined by evaluating the expression +// with an unknown value of the nested spec's implied type, which may cause +// the result to be imprecise. This spec should not be used in situations where +// precise result type information is needed. +type TransformExprSpec struct { + Wrapped Spec + Expr hcl.Expression + TransformCtx *hcl.EvalContext + VarName string +} + +func (s *TransformExprSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Wrapped) +} + +func (s *TransformExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) + if diags.HasErrors() { + // We won't try to run our function in this case, because it'll probably + // generate confusing additional errors that will distract from the + // root cause. + return cty.UnknownVal(s.impliedType()), diags + } + + chiCtx := s.TransformCtx.NewChild() + chiCtx.Variables = map[string]cty.Value{ + s.VarName: wrappedVal, + } + resultVal, resultDiags := s.Expr.Value(chiCtx) + diags = append(diags, resultDiags...) + return resultVal, diags +} + +func (s *TransformExprSpec) impliedType() cty.Type { + wrappedTy := s.Wrapped.impliedType() + chiCtx := s.TransformCtx.NewChild() + chiCtx.Variables = map[string]cty.Value{ + s.VarName: cty.UnknownVal(wrappedTy), + } + resultVal, _ := s.Expr.Value(chiCtx) + return resultVal.Type() +} + +func (s *TransformExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We'll just pass through our wrapped range here, even though that's + // not super-accurate, because there's nothing better to return. + return s.Wrapped.sourceRange(content, blockLabels) +} + +// TransformFuncSpec is a spec that wraps another and then evaluates a given +// cty function with the result. The given function must expect exactly one +// argument, where the result of the wrapped spec will be passed. +// +// The implied type of this spec is determined by type-checking the function +// with an unknown value of the nested spec's implied type, which may cause +// the result to be imprecise. This spec should not be used in situations where +// precise result type information is needed. +// +// If the given function produces an error when run, this spec will produce +// a non-user-actionable diagnostic message. It's the caller's responsibility +// to ensure that the given function cannot fail for any non-error result +// of the wrapped spec. +type TransformFuncSpec struct { + Wrapped Spec + Func function.Function +} + +func (s *TransformFuncSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Wrapped) +} + +func (s *TransformFuncSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) + if diags.HasErrors() { + // We won't try to run our function in this case, because it'll probably + // generate confusing additional errors that will distract from the + // root cause. + return cty.UnknownVal(s.impliedType()), diags + } + + resultVal, err := s.Func.Call([]cty.Value{wrappedVal}) + if err != nil { + // This is not a good example of a diagnostic because it is reporting + // a programming error in the calling application, rather than something + // an end-user could act on. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Transform function failed", + Detail: fmt.Sprintf("Decoder transform returned an error: %s", err), + Subject: s.sourceRange(content, blockLabels).Ptr(), + }) + return cty.UnknownVal(s.impliedType()), diags + } + + return resultVal, diags +} + +func (s *TransformFuncSpec) impliedType() cty.Type { + wrappedTy := s.Wrapped.impliedType() + resultTy, err := s.Func.ReturnType([]cty.Type{wrappedTy}) + if err != nil { + // Should never happen with a correctly-configured spec + return cty.DynamicPseudoType + } + + return resultTy +} + +func (s *TransformFuncSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We'll just pass through our wrapped range here, even though that's + // not super-accurate, because there's nothing better to return. + return s.Wrapped.sourceRange(content, blockLabels) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/variables.go b/vendor/github.com/hashicorp/hcl2/hcldec/variables.go new file mode 100644 index 00000000..427b0d0e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/variables.go @@ -0,0 +1,34 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// Variables processes the given body with the given spec and returns a +// list of the variable traversals that would be required to decode +// the same pairing of body and spec. +// +// This can be used to conditionally populate the variables in the EvalContext +// passed to Decode, for applications where a static scope is insufficient. +// +// If the given body is not compliant with the given schema, the result may +// be incomplete, but that's assumed to be okay because the eventual call +// to Decode will produce error diagnostics anyway. +func Variables(body hcl.Body, spec Spec) []hcl.Traversal { + schema := ImpliedSchema(spec) + + content, _, _ := body.PartialContent(schema) + + var vars []hcl.Traversal + + if vs, ok := spec.(specNeedingVariables); ok { + vars = append(vars, vs.variablesNeeded(content)...) + } + spec.visitSameBodyChildren(func(s Spec) { + if vs, ok := s.(specNeedingVariables); ok { + vars = append(vars, vs.variablesNeeded(content)...) + } + }) + + return vars +} diff --git a/vendor/github.com/hashicorp/hcl2/hclparse/parser.go b/vendor/github.com/hashicorp/hcl2/hclparse/parser.go new file mode 100644 index 00000000..6d47f126 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclparse/parser.go @@ -0,0 +1,123 @@ +package hclparse + +import ( + "fmt" + "io/ioutil" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/hcl2/hcl/json" +) + +// NOTE: This is the public interface for parsing. The actual parsers are +// in other packages alongside this one, with this package just wrapping them +// to provide a unified interface for the caller across all supported formats. + +// Parser is the main interface for parsing configuration files. As well as +// parsing files, a parser also retains a registry of all of the files it +// has parsed so that multiple attempts to parse the same file will return +// the same object and so the collected files can be used when printing +// diagnostics. +// +// Any diagnostics for parsing a file are only returned once on the first +// call to parse that file. Callers are expected to collect up diagnostics +// and present them together, so returning diagnostics for the same file +// multiple times would create a confusing result. +type Parser struct { + files map[string]*hcl.File +} + +// NewParser creates a new parser, ready to parse configuration files. +func NewParser() *Parser { + return &Parser{ + files: map[string]*hcl.File{}, + } +} + +// ParseHCL parses the given buffer (which is assumed to have been loaded from +// the given filename) as a native-syntax configuration file and returns the +// hcl.File object representing it. +func (p *Parser) ParseHCL(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := hclsyntax.ParseConfig(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) + p.files[filename] = file + return file, diags +} + +// ParseHCLFile reads the given filename and parses it as a native-syntax HCL +// configuration file. An error diagnostic is returned if the given file +// cannot be read. +func (p *Parser) ParseHCLFile(filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + src, err := ioutil.ReadFile(filename) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The configuration file %q could not be read.", filename), + }, + } + } + + return p.ParseHCL(src, filename) +} + +// ParseJSON parses the given JSON buffer (which is assumed to have been loaded +// from the given filename) and returns the hcl.File object representing it. +func (p *Parser) ParseJSON(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := json.Parse(src, filename) + p.files[filename] = file + return file, diags +} + +// ParseJSONFile reads the given filename and parses it as JSON, similarly to +// ParseJSON. An error diagnostic is returned if the given file cannot be read. +func (p *Parser) ParseJSONFile(filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := json.ParseFile(filename) + p.files[filename] = file + return file, diags +} + +// AddFile allows a caller to record in a parser a file that was parsed some +// other way, thus allowing it to be included in the registry of sources. +func (p *Parser) AddFile(filename string, file *hcl.File) { + p.files[filename] = file +} + +// Sources returns a map from filenames to the raw source code that was +// read from them. This is intended to be used, for example, to print +// diagnostics with contextual information. +// +// The arrays underlying the returned slices should not be modified. +func (p *Parser) Sources() map[string][]byte { + ret := make(map[string][]byte) + for fn, f := range p.files { + ret[fn] = f.Bytes + } + return ret +} + +// Files returns a map from filenames to the File objects produced from them. +// This is intended to be used, for example, to print diagnostics with +// contextual information. +// +// The returned map and all of the objects it refers to directly or indirectly +// must not be modified. +func (p *Parser) Files() map[string]*hcl.File { + return p.files +} diff --git a/vendor/github.com/hashicorp/hil/.gitignore b/vendor/github.com/hashicorp/hil/.gitignore new file mode 100644 index 00000000..9d6e5df3 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/.gitignore @@ -0,0 +1,3 @@ +.DS_Store +.idea +*.iml diff --git a/vendor/github.com/hashicorp/hil/.travis.yml b/vendor/github.com/hashicorp/hil/.travis.yml new file mode 100644 index 00000000..a7854442 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/.travis.yml @@ -0,0 +1,3 @@ +sudo: false +language: go +go: 1.7 diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go index e36dd42d..18880c60 100644 --- a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go +++ b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go @@ -5,9 +5,20 @@ type ArithmeticOp int const ( ArithmeticOpInvalid ArithmeticOp = 0 - ArithmeticOpAdd ArithmeticOp = iota + + ArithmeticOpAdd ArithmeticOp = iota ArithmeticOpSub ArithmeticOpMul ArithmeticOpDiv ArithmeticOpMod + + ArithmeticOpLogicalAnd + ArithmeticOpLogicalOr + + ArithmeticOpEqual + ArithmeticOpNotEqual + ArithmeticOpLessThan + ArithmeticOpLessThanOrEqual + ArithmeticOpGreaterThan + ArithmeticOpGreaterThanOrEqual ) diff --git a/vendor/github.com/hashicorp/hil/ast/ast.go b/vendor/github.com/hashicorp/hil/ast/ast.go index 5d8d7555..c6350f8b 100644 --- a/vendor/github.com/hashicorp/hil/ast/ast.go +++ b/vendor/github.com/hashicorp/hil/ast/ast.go @@ -19,13 +19,22 @@ type Node interface { // Pos is the starting position of an AST node type Pos struct { - Column, Line int // Column/Line number, starting at 1 + Column, Line int // Column/Line number, starting at 1 + Filename string // Optional source filename, if known } func (p Pos) String() string { - return fmt.Sprintf("%d:%d", p.Line, p.Column) + if p.Filename == "" { + return fmt.Sprintf("%d:%d", p.Line, p.Column) + } else { + return fmt.Sprintf("%s:%d:%d", p.Filename, p.Line, p.Column) + } } +// InitPos is an initiaial position value. This should be used as +// the starting position (presets the column and line to 1). +var InitPos = Pos{Column: 1, Line: 1} + // Visitors are just implementations of this function. // // The function must return the Node to replace this node with. "nil" is @@ -49,11 +58,19 @@ type Type uint32 const ( TypeInvalid Type = 0 TypeAny Type = 1 << iota + TypeBool TypeString TypeInt TypeFloat TypeList TypeMap + + // This is a special type used by Terraform to mark "unknown" values. + // It is impossible for this type to be introduced into your HIL programs + // unless you explicitly set a variable to this value. In that case, + // any operation including the variable will return "TypeUnknown" as the + // type. + TypeUnknown ) func (t Type) Printable() string { @@ -62,6 +79,8 @@ func (t Type) Printable() string { return "invalid type" case TypeAny: return "any type" + case TypeBool: + return "type bool" case TypeString: return "type string" case TypeInt: @@ -72,6 +91,8 @@ func (t Type) Printable() string { return "type list" case TypeMap: return "type map" + case TypeUnknown: + return "type unknown" default: return "unknown type" } diff --git a/vendor/github.com/hashicorp/hil/ast/conditional.go b/vendor/github.com/hashicorp/hil/ast/conditional.go new file mode 100644 index 00000000..be48f89d --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/conditional.go @@ -0,0 +1,36 @@ +package ast + +import ( + "fmt" +) + +type Conditional struct { + CondExpr Node + TrueExpr Node + FalseExpr Node + Posx Pos +} + +// Accept passes the given visitor to the child nodes in this order: +// CondExpr, TrueExpr, FalseExpr. It then finally passes itself to the visitor. +func (n *Conditional) Accept(v Visitor) Node { + n.CondExpr = n.CondExpr.Accept(v) + n.TrueExpr = n.TrueExpr.Accept(v) + n.FalseExpr = n.FalseExpr.Accept(v) + + return v(n) +} + +func (n *Conditional) Pos() Pos { + return n.Posx +} + +func (n *Conditional) Type(Scope) (Type, error) { + // This is not actually a useful value; the type checker ignores + // this function when analyzing conditionals, just as with Arithmetic. + return TypeInt, nil +} + +func (n *Conditional) GoString() string { + return fmt.Sprintf("*%#v", *n) +} diff --git a/vendor/github.com/hashicorp/hil/ast/index.go b/vendor/github.com/hashicorp/hil/ast/index.go index 49a3b9c3..860c25fd 100644 --- a/vendor/github.com/hashicorp/hil/ast/index.go +++ b/vendor/github.com/hashicorp/hil/ast/index.go @@ -13,6 +13,8 @@ type Index struct { } func (n *Index) Accept(v Visitor) Node { + n.Target = n.Target.Accept(v) + n.Key = n.Key.Accept(v) return v(n) } diff --git a/vendor/github.com/hashicorp/hil/ast/literal.go b/vendor/github.com/hashicorp/hil/ast/literal.go index 1714ff02..da6014fe 100644 --- a/vendor/github.com/hashicorp/hil/ast/literal.go +++ b/vendor/github.com/hashicorp/hil/ast/literal.go @@ -2,6 +2,7 @@ package ast import ( "fmt" + "reflect" ) // LiteralNode represents a single literal value, such as "foo" or @@ -12,6 +13,51 @@ type LiteralNode struct { Posx Pos } +// NewLiteralNode returns a new literal node representing the given +// literal Go value, which must correspond to one of the primitive types +// supported by HIL. Lists and maps cannot currently be constructed via +// this function. +// +// If an inappropriately-typed value is provided, this function will +// return an error. The main intended use of this function is to produce +// "synthetic" literals from constants in code, where the value type is +// well known at compile time. To easily store these in global variables, +// see also MustNewLiteralNode. +func NewLiteralNode(value interface{}, pos Pos) (*LiteralNode, error) { + goType := reflect.TypeOf(value) + var hilType Type + + switch goType.Kind() { + case reflect.Bool: + hilType = TypeBool + case reflect.Int: + hilType = TypeInt + case reflect.Float64: + hilType = TypeFloat + case reflect.String: + hilType = TypeString + default: + return nil, fmt.Errorf("unsupported literal node type: %T", value) + } + + return &LiteralNode{ + Value: value, + Typex: hilType, + Posx: pos, + }, nil +} + +// MustNewLiteralNode wraps NewLiteralNode and panics if an error is +// returned, thus allowing valid literal nodes to be easily assigned to +// global variables. +func MustNewLiteralNode(value interface{}, pos Pos) *LiteralNode { + node, err := NewLiteralNode(value, pos) + if err != nil { + panic(err) + } + return node +} + func (n *LiteralNode) Accept(v Visitor) Node { return v(n) } @@ -31,3 +77,12 @@ func (n *LiteralNode) String() string { func (n *LiteralNode) Type(Scope) (Type, error) { return n.Typex, nil } + +// IsUnknown returns true either if the node's value is itself unknown +// of if it is a collection containing any unknown elements, deeply. +func (n *LiteralNode) IsUnknown() bool { + return IsUnknown(Variable{ + Type: n.Typex, + Value: n.Value, + }) +} diff --git a/vendor/github.com/hashicorp/hil/ast/type_string.go b/vendor/github.com/hashicorp/hil/ast/type_string.go index 11793ea5..1f51a98d 100644 --- a/vendor/github.com/hashicorp/hil/ast/type_string.go +++ b/vendor/github.com/hashicorp/hil/ast/type_string.go @@ -7,21 +7,25 @@ import "fmt" const ( _Type_name_0 = "TypeInvalid" _Type_name_1 = "TypeAny" - _Type_name_2 = "TypeString" - _Type_name_3 = "TypeInt" - _Type_name_4 = "TypeFloat" - _Type_name_5 = "TypeList" - _Type_name_6 = "TypeMap" + _Type_name_2 = "TypeBool" + _Type_name_3 = "TypeString" + _Type_name_4 = "TypeInt" + _Type_name_5 = "TypeFloat" + _Type_name_6 = "TypeList" + _Type_name_7 = "TypeMap" + _Type_name_8 = "TypeUnknown" ) var ( _Type_index_0 = [...]uint8{0, 11} _Type_index_1 = [...]uint8{0, 7} - _Type_index_2 = [...]uint8{0, 10} - _Type_index_3 = [...]uint8{0, 7} - _Type_index_4 = [...]uint8{0, 9} - _Type_index_5 = [...]uint8{0, 8} - _Type_index_6 = [...]uint8{0, 7} + _Type_index_2 = [...]uint8{0, 8} + _Type_index_3 = [...]uint8{0, 10} + _Type_index_4 = [...]uint8{0, 7} + _Type_index_5 = [...]uint8{0, 9} + _Type_index_6 = [...]uint8{0, 8} + _Type_index_7 = [...]uint8{0, 7} + _Type_index_8 = [...]uint8{0, 11} ) func (i Type) String() string { @@ -40,6 +44,10 @@ func (i Type) String() string { return _Type_name_5 case i == 64: return _Type_name_6 + case i == 128: + return _Type_name_7 + case i == 256: + return _Type_name_8 default: return fmt.Sprintf("Type(%d)", i) } diff --git a/vendor/github.com/hashicorp/hil/ast/unknown.go b/vendor/github.com/hashicorp/hil/ast/unknown.go new file mode 100644 index 00000000..d6ddaecc --- /dev/null +++ b/vendor/github.com/hashicorp/hil/ast/unknown.go @@ -0,0 +1,30 @@ +package ast + +// IsUnknown reports whether a variable is unknown or contains any value +// that is unknown. This will recurse into lists and maps and so on. +func IsUnknown(v Variable) bool { + // If it is unknown itself, return true + if v.Type == TypeUnknown { + return true + } + + // If it is a container type, check the values + switch v.Type { + case TypeList: + for _, el := range v.Value.([]Variable) { + if IsUnknown(el) { + return true + } + } + case TypeMap: + for _, el := range v.Value.(map[string]Variable) { + if IsUnknown(el) { + return true + } + } + default: + } + + // Not a container type or survive the above checks + return false +} diff --git a/vendor/github.com/hashicorp/hil/ast/variables_helper.go b/vendor/github.com/hashicorp/hil/ast/variables_helper.go index 4b328419..06bd18de 100644 --- a/vendor/github.com/hashicorp/hil/ast/variables_helper.go +++ b/vendor/github.com/hashicorp/hil/ast/variables_helper.go @@ -3,43 +3,61 @@ package ast import "fmt" func VariableListElementTypesAreHomogenous(variableName string, list []Variable) (Type, error) { - listTypes := make(map[Type]struct{}) + if len(list) == 0 { + return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName) + } + + elemType := TypeUnknown for _, v := range list { - if _, ok := listTypes[v.Type]; ok { + if v.Type == TypeUnknown { continue } - listTypes[v.Type] = struct{}{} - } - if len(listTypes) != 1 && len(list) != 0 { - return TypeInvalid, fmt.Errorf("list %q does not have homogenous types. found %s", variableName, reportTypes(listTypes)) - } + if elemType == TypeUnknown { + elemType = v.Type + continue + } + + if v.Type != elemType { + return TypeInvalid, fmt.Errorf( + "list %q does not have homogenous types. found %s and then %s", + variableName, + elemType, v.Type, + ) + } - if len(list) > 0 { - return list[0].Type, nil + elemType = v.Type } - return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName) + return elemType, nil } func VariableMapValueTypesAreHomogenous(variableName string, vmap map[string]Variable) (Type, error) { - valueTypes := make(map[Type]struct{}) + if len(vmap) == 0 { + return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName) + } + + elemType := TypeUnknown for _, v := range vmap { - if _, ok := valueTypes[v.Type]; ok { + if v.Type == TypeUnknown { continue } - valueTypes[v.Type] = struct{}{} - } - if len(valueTypes) != 1 && len(vmap) != 0 { - return TypeInvalid, fmt.Errorf("map %q does not have homogenous value types. found %s", variableName, reportTypes(valueTypes)) - } + if elemType == TypeUnknown { + elemType = v.Type + continue + } - // For loop here is an easy way to get a single key, we return immediately. - for _, v := range vmap { - return v.Type, nil + if v.Type != elemType { + return TypeInvalid, fmt.Errorf( + "map %q does not have homogenous types. found %s and then %s", + variableName, + elemType, v.Type, + ) + } + + elemType = v.Type } - // This means the map is empty - return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName) + return elemType, nil } diff --git a/vendor/github.com/hashicorp/hil/builtins.go b/vendor/github.com/hashicorp/hil/builtins.go index 444bab39..909c788a 100644 --- a/vendor/github.com/hashicorp/hil/builtins.go +++ b/vendor/github.com/hashicorp/hil/builtins.go @@ -18,16 +18,23 @@ func registerBuiltins(scope *ast.BasicScope) *ast.BasicScope { } // Implicit conversions + scope.FuncMap["__builtin_BoolToString"] = builtinBoolToString() scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt() scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString() scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat() scope.FuncMap["__builtin_IntToString"] = builtinIntToString() scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt() scope.FuncMap["__builtin_StringToFloat"] = builtinStringToFloat() + scope.FuncMap["__builtin_StringToBool"] = builtinStringToBool() // Math operations scope.FuncMap["__builtin_IntMath"] = builtinIntMath() scope.FuncMap["__builtin_FloatMath"] = builtinFloatMath() + scope.FuncMap["__builtin_BoolCompare"] = builtinBoolCompare() + scope.FuncMap["__builtin_FloatCompare"] = builtinFloatCompare() + scope.FuncMap["__builtin_IntCompare"] = builtinIntCompare() + scope.FuncMap["__builtin_StringCompare"] = builtinStringCompare() + scope.FuncMap["__builtin_Logical"] = builtinLogical() return scope } @@ -97,6 +104,136 @@ func builtinIntMath() ast.Function { } } +func builtinBoolCompare() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeBool, ast.TypeBool}, + Variadic: false, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + lhs := args[1].(bool) + rhs := args[2].(bool) + + switch op { + case ast.ArithmeticOpEqual: + return lhs == rhs, nil + case ast.ArithmeticOpNotEqual: + return lhs != rhs, nil + default: + return nil, errors.New("invalid comparison operation") + } + }, + } +} + +func builtinFloatCompare() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeFloat, ast.TypeFloat}, + Variadic: false, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + lhs := args[1].(float64) + rhs := args[2].(float64) + + switch op { + case ast.ArithmeticOpEqual: + return lhs == rhs, nil + case ast.ArithmeticOpNotEqual: + return lhs != rhs, nil + case ast.ArithmeticOpLessThan: + return lhs < rhs, nil + case ast.ArithmeticOpLessThanOrEqual: + return lhs <= rhs, nil + case ast.ArithmeticOpGreaterThan: + return lhs > rhs, nil + case ast.ArithmeticOpGreaterThanOrEqual: + return lhs >= rhs, nil + default: + return nil, errors.New("invalid comparison operation") + } + }, + } +} + +func builtinIntCompare() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeInt, ast.TypeInt}, + Variadic: false, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + lhs := args[1].(int) + rhs := args[2].(int) + + switch op { + case ast.ArithmeticOpEqual: + return lhs == rhs, nil + case ast.ArithmeticOpNotEqual: + return lhs != rhs, nil + case ast.ArithmeticOpLessThan: + return lhs < rhs, nil + case ast.ArithmeticOpLessThanOrEqual: + return lhs <= rhs, nil + case ast.ArithmeticOpGreaterThan: + return lhs > rhs, nil + case ast.ArithmeticOpGreaterThanOrEqual: + return lhs >= rhs, nil + default: + return nil, errors.New("invalid comparison operation") + } + }, + } +} + +func builtinStringCompare() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString, ast.TypeString}, + Variadic: false, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + lhs := args[1].(string) + rhs := args[2].(string) + + switch op { + case ast.ArithmeticOpEqual: + return lhs == rhs, nil + case ast.ArithmeticOpNotEqual: + return lhs != rhs, nil + default: + return nil, errors.New("invalid comparison operation") + } + }, + } +} + +func builtinLogical() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt}, + Variadic: true, + VariadicType: ast.TypeBool, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + op := args[0].(ast.ArithmeticOp) + result := args[1].(bool) + for _, raw := range args[2:] { + arg := raw.(bool) + switch op { + case ast.ArithmeticOpLogicalOr: + result = result || arg + case ast.ArithmeticOpLogicalAnd: + result = result && arg + default: + return nil, errors.New("invalid logical operator") + } + } + + return result, nil + }, + } +} + func builtinFloatToInt() ast.Function { return ast.Function{ ArgTypes: []ast.Type{ast.TypeFloat}, @@ -167,3 +304,28 @@ func builtinStringToFloat() ast.Function { }, } } + +func builtinBoolToString() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeBool}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return strconv.FormatBool(args[0].(bool)), nil + }, + } +} + +func builtinStringToBool() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + v, err := strconv.ParseBool(args[0].(string)) + if err != nil { + return nil, err + } + + return v, nil + }, + } +} diff --git a/vendor/github.com/hashicorp/hil/check_types.go b/vendor/github.com/hashicorp/hil/check_types.go index 4b35d114..f16da391 100644 --- a/vendor/github.com/hashicorp/hil/check_types.go +++ b/vendor/github.com/hashicorp/hil/check_types.go @@ -44,6 +44,12 @@ func (v *TypeCheck) Visit(root ast.Node) error { defer v.lock.Unlock() defer v.reset() root.Accept(v.visit) + + // If the resulting type is unknown, then just let the whole thing go. + if v.err == errExitUnknown { + v.err = nil + } + return v.err } @@ -61,6 +67,9 @@ func (v *TypeCheck) visit(raw ast.Node) ast.Node { case *ast.Call: tc := &typeCheckCall{n} result, err = tc.TypeCheck(v) + case *ast.Conditional: + tc := &typeCheckConditional{n} + result, err = tc.TypeCheck(v) case *ast.Index: tc := &typeCheckIndex{n} result, err = tc.TypeCheck(v) @@ -103,6 +112,28 @@ func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) { exprs[len(tc.n.Exprs)-1-i] = v.StackPop() } + // If any operand is unknown then our result is automatically unknown + for _, ty := range exprs { + if ty == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + } + + switch tc.n.Op { + case ast.ArithmeticOpLogicalAnd, ast.ArithmeticOpLogicalOr: + return tc.checkLogical(v, exprs) + case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual, + ast.ArithmeticOpLessThan, ast.ArithmeticOpGreaterThan, + ast.ArithmeticOpGreaterThanOrEqual, ast.ArithmeticOpLessThanOrEqual: + return tc.checkComparison(v, exprs) + default: + return tc.checkNumeric(v, exprs) + } + +} + +func (tc *typeCheckArithmetic) checkNumeric(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { // Determine the resulting type we want. We do this by going over // every expression until we find one with a type we recognize. // We do this because the first expr might be a string ("var.foo") @@ -110,20 +141,11 @@ func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) { mathFunc := "__builtin_IntMath" mathType := ast.TypeInt for _, v := range exprs { - exit := true - switch v { - case ast.TypeInt: - mathFunc = "__builtin_IntMath" - mathType = v - case ast.TypeFloat: + // We assume int math but if we find ANY float, the entire + // expression turns into floating point math. + if v == ast.TypeFloat { mathFunc = "__builtin_FloatMath" mathType = v - default: - exit = false - } - - // We found the type, so leave - if exit { break } } @@ -167,6 +189,131 @@ func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) { }, nil } +func (tc *typeCheckArithmetic) checkComparison(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { + if len(exprs) != 2 { + // This should never happen, because the parser never produces + // nodes that violate this. + return nil, fmt.Errorf( + "comparison operators must have exactly two operands", + ) + } + + // The first operand always dictates the type for a comparison. + compareFunc := "" + compareType := exprs[0] + switch compareType { + case ast.TypeBool: + compareFunc = "__builtin_BoolCompare" + case ast.TypeFloat: + compareFunc = "__builtin_FloatCompare" + case ast.TypeInt: + compareFunc = "__builtin_IntCompare" + case ast.TypeString: + compareFunc = "__builtin_StringCompare" + default: + return nil, fmt.Errorf( + "comparison operators apply only to bool, float, int, and string", + ) + } + + // For non-equality comparisons, we will do implicit conversions to + // integer types if possible. In this case, we need to go through and + // determine the type of comparison we're doing to enable the implicit + // conversion. + if tc.n.Op != ast.ArithmeticOpEqual && tc.n.Op != ast.ArithmeticOpNotEqual { + compareFunc = "__builtin_IntCompare" + compareType = ast.TypeInt + for _, expr := range exprs { + if expr == ast.TypeFloat { + compareFunc = "__builtin_FloatCompare" + compareType = ast.TypeFloat + break + } + } + } + + // Verify (and possibly, convert) the args + for i, arg := range exprs { + if arg != compareType { + cn := v.ImplicitConversion(exprs[i], compareType, tc.n.Exprs[i]) + if cn != nil { + tc.n.Exprs[i] = cn + continue + } + + return nil, fmt.Errorf( + "operand %d should be %s, got %s", + i+1, compareType, arg, + ) + } + } + + // Only ints and floats can have the <, >, <= and >= operators applied + switch tc.n.Op { + case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual: + // anything goes + default: + switch compareType { + case ast.TypeFloat, ast.TypeInt: + // fine + default: + return nil, fmt.Errorf( + "<, >, <= and >= may apply only to int and float values", + ) + } + } + + // Comparison operators always return bool + v.StackPush(ast.TypeBool) + + // Replace our node with a call to the proper function. This isn't + // type checked but we already verified types. + args := make([]ast.Node, len(tc.n.Exprs)+1) + args[0] = &ast.LiteralNode{ + Value: tc.n.Op, + Typex: ast.TypeInt, + Posx: tc.n.Pos(), + } + copy(args[1:], tc.n.Exprs) + return &ast.Call{ + Func: compareFunc, + Args: args, + Posx: tc.n.Pos(), + }, nil +} + +func (tc *typeCheckArithmetic) checkLogical(v *TypeCheck, exprs []ast.Type) (ast.Node, error) { + for i, t := range exprs { + if t != ast.TypeBool { + cn := v.ImplicitConversion(t, ast.TypeBool, tc.n.Exprs[i]) + if cn == nil { + return nil, fmt.Errorf( + "logical operators require boolean operands, not %s", + t, + ) + } + tc.n.Exprs[i] = cn + } + } + + // Return type is always boolean + v.StackPush(ast.TypeBool) + + // Arithmetic nodes are replaced with a call to a built-in function + args := make([]ast.Node, len(tc.n.Exprs)+1) + args[0] = &ast.LiteralNode{ + Value: tc.n.Op, + Typex: ast.TypeInt, + Posx: tc.n.Pos(), + } + copy(args[1:], tc.n.Exprs) + return &ast.Call{ + Func: "__builtin_Logical", + Args: args, + Posx: tc.n.Pos(), + }, nil +} + type typeCheckCall struct { n *ast.Call } @@ -190,6 +337,11 @@ func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) { continue } + if args[i] == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + if args[i] != expected { cn := v.ImplicitConversion(args[i], expected, tc.n.Args[i]) if cn != nil { @@ -207,6 +359,11 @@ func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) { if function.Variadic && function.VariadicType != ast.TypeAny { args = args[len(function.ArgTypes):] for i, t := range args { + if t == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + if t != function.VariadicType { realI := i + len(function.ArgTypes) cn := v.ImplicitConversion( @@ -230,6 +387,90 @@ func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) { return tc.n, nil } +type typeCheckConditional struct { + n *ast.Conditional +} + +func (tc *typeCheckConditional) TypeCheck(v *TypeCheck) (ast.Node, error) { + // On the stack we have the types of the condition, true and false + // expressions, but they are in reverse order. + falseType := v.StackPop() + trueType := v.StackPop() + condType := v.StackPop() + + if condType == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + + if condType != ast.TypeBool { + cn := v.ImplicitConversion(condType, ast.TypeBool, tc.n.CondExpr) + if cn == nil { + return nil, fmt.Errorf( + "condition must be type bool, not %s", condType.Printable(), + ) + } + tc.n.CondExpr = cn + } + + // The types of the true and false expression must match + if trueType != falseType && trueType != ast.TypeUnknown && falseType != ast.TypeUnknown { + + // Since passing around stringified versions of other types is + // common, we pragmatically allow the false expression to dictate + // the result type when the true expression is a string. + if trueType == ast.TypeString { + cn := v.ImplicitConversion(trueType, falseType, tc.n.TrueExpr) + if cn == nil { + return nil, fmt.Errorf( + "true and false expression types must match; have %s and %s", + trueType.Printable(), falseType.Printable(), + ) + } + tc.n.TrueExpr = cn + trueType = falseType + } else { + cn := v.ImplicitConversion(falseType, trueType, tc.n.FalseExpr) + if cn == nil { + return nil, fmt.Errorf( + "true and false expression types must match; have %s and %s", + trueType.Printable(), falseType.Printable(), + ) + } + tc.n.FalseExpr = cn + falseType = trueType + } + } + + // Currently list and map types cannot be used, because we cannot + // generally assert that their element types are consistent. + // Such support might be added later, either by improving the type + // system or restricting usage to only variable and literal expressions, + // but for now this is simply prohibited because it doesn't seem to + // be a common enough case to be worth the complexity. + switch trueType { + case ast.TypeList: + return nil, fmt.Errorf( + "conditional operator cannot be used with list values", + ) + case ast.TypeMap: + return nil, fmt.Errorf( + "conditional operator cannot be used with map values", + ) + } + + // Result type (guaranteed to also match falseType due to the above) + if trueType == ast.TypeUnknown { + // falseType may also be unknown, but that's okay because two + // unknowns means our result is unknown anyway. + v.StackPush(falseType) + } else { + v.StackPush(trueType) + } + + return tc.n, nil +} + type typeCheckOutput struct { n *ast.Output } @@ -241,20 +482,33 @@ func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) { types[len(n.Exprs)-1-i] = v.StackPop() } - // If there is only one argument and it is a list, we evaluate to a list - if len(types) == 1 && types[0] == ast.TypeList { - v.StackPush(ast.TypeList) - return n, nil + for _, ty := range types { + if ty == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } } - // If there is only one argument and it is a map, we evaluate to a map - if len(types) == 1 && types[0] == ast.TypeMap { - v.StackPush(ast.TypeMap) - return n, nil + // If there is only one argument and it is a list, we evaluate to a list + if len(types) == 1 { + switch t := types[0]; t { + case ast.TypeList: + fallthrough + case ast.TypeMap: + v.StackPush(t) + return n, nil + } } // Otherwise, all concat args must be strings, so validate that + resultType := ast.TypeString for i, t := range types { + + if t == ast.TypeUnknown { + resultType = ast.TypeUnknown + continue + } + if t != ast.TypeString { cn := v.ImplicitConversion(t, ast.TypeString, n.Exprs[i]) if cn != nil { @@ -267,8 +521,8 @@ func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) { } } - // This always results in type string - v.StackPush(ast.TypeString) + // This always results in type string, unless there are unknowns + v.StackPush(resultType) return n, nil } @@ -305,30 +559,40 @@ type typeCheckIndex struct { } func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) { + keyType := v.StackPop() + targetType := v.StackPop() + + if keyType == ast.TypeUnknown || targetType == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + // Ensure we have a VariableAccess as the target varAccessNode, ok := tc.n.Target.(*ast.VariableAccess) if !ok { - return nil, fmt.Errorf("target of an index must be a VariableAccess node, was %T", tc.n.Target) + return nil, fmt.Errorf( + "target of an index must be a VariableAccess node, was %T", tc.n.Target) } // Get the variable variable, ok := v.Scope.LookupVar(varAccessNode.Name) if !ok { - return nil, fmt.Errorf("unknown variable accessed: %s", varAccessNode.Name) - } - - keyType, err := tc.n.Key.Type(v.Scope) - if err != nil { - return nil, err + return nil, fmt.Errorf( + "unknown variable accessed: %s", varAccessNode.Name) } - switch variable.Type { + switch targetType { case ast.TypeList: if keyType != ast.TypeInt { - return nil, fmt.Errorf("key of an index must be an int, was %s", keyType) + tc.n.Key = v.ImplicitConversion(keyType, ast.TypeInt, tc.n.Key) + if tc.n.Key == nil { + return nil, fmt.Errorf( + "key of an index must be an int, was %s", keyType) + } } - valType, err := ast.VariableListElementTypesAreHomogenous(varAccessNode.Name, variable.Value.([]ast.Variable)) + valType, err := ast.VariableListElementTypesAreHomogenous( + varAccessNode.Name, variable.Value.([]ast.Variable)) if err != nil { return tc.n, err } @@ -337,10 +601,15 @@ func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) { return tc.n, nil case ast.TypeMap: if keyType != ast.TypeString { - return nil, fmt.Errorf("key of an index must be a string, was %s", keyType) + tc.n.Key = v.ImplicitConversion(keyType, ast.TypeString, tc.n.Key) + if tc.n.Key == nil { + return nil, fmt.Errorf( + "key of an index must be a string, was %s", keyType) + } } - valType, err := ast.VariableMapValueTypesAreHomogenous(varAccessNode.Name, variable.Value.(map[string]ast.Variable)) + valType, err := ast.VariableMapValueTypesAreHomogenous( + varAccessNode.Name, variable.Value.(map[string]ast.Variable)) if err != nil { return tc.n, err } @@ -389,3 +658,11 @@ func (v *TypeCheck) StackPop() ast.Type { x, v.Stack = v.Stack[len(v.Stack)-1], v.Stack[:len(v.Stack)-1] return x } + +func (v *TypeCheck) StackPeek() ast.Type { + if len(v.Stack) == 0 { + return ast.TypeInvalid + } + + return v.Stack[len(v.Stack)-1] +} diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go index 3841d1fb..f2024d01 100644 --- a/vendor/github.com/hashicorp/hil/convert.go +++ b/vendor/github.com/hashicorp/hil/convert.go @@ -8,6 +8,11 @@ import ( "github.com/mitchellh/mapstructure" ) +// UnknownValue is a sentinel value that can be used to denote +// that a value of a variable (or map element, list element, etc.) +// is unknown. This will always have the type ast.TypeUnknown. +const UnknownValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + var hilMapstructureDecodeHookSlice []interface{} var hilMapstructureDecodeHookStringSlice []string var hilMapstructureDecodeHookMap map[string]interface{} @@ -48,6 +53,12 @@ func InterfaceToVariable(input interface{}) (ast.Variable, error) { var stringVal string if err := hilMapstructureWeakDecode(input, &stringVal); err == nil { + // Special case the unknown value to turn into "unknown" + if stringVal == UnknownValue { + return ast.Variable{Value: UnknownValue, Type: ast.TypeUnknown}, nil + } + + // Otherwise return the string value return ast.Variable{ Type: ast.TypeString, Value: stringVal, diff --git a/vendor/github.com/hashicorp/hil/eval.go b/vendor/github.com/hashicorp/hil/eval.go index 9be0d59f..27820769 100644 --- a/vendor/github.com/hashicorp/hil/eval.go +++ b/vendor/github.com/hashicorp/hil/eval.go @@ -2,6 +2,7 @@ package hil import ( "bytes" + "errors" "fmt" "sync" @@ -32,6 +33,7 @@ type SemanticChecker func(ast.Node) error // TypeString: string // TypeList: []interface{} // TypeMap: map[string]interface{} +// TypBool: bool type EvaluationResult struct { Type EvalType Value interface{} @@ -42,12 +44,24 @@ type EvaluationResult struct { // The error is described out of band in the accompanying error return value. var InvalidResult = EvaluationResult{Type: TypeInvalid, Value: nil} +// errExitUnknown is an internal error that when returned means the result +// is an unknown value. We use this for early exit. +var errExitUnknown = errors.New("unknown value") + func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) { output, outputType, err := internalEval(root, config) if err != nil { return InvalidResult, err } + // If the result contains any nested unknowns then the result as a whole + // is unknown, so that callers only have to deal with "entirely known" + // or "entirely unknown" as outcomes. + if ast.IsUnknown(ast.Variable{Type: outputType, Value: output}) { + outputType = ast.TypeUnknown + output = UnknownValue + } + switch outputType { case ast.TypeList: val, err := VariableToInterface(ast.Variable{ @@ -72,6 +86,16 @@ func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) { Type: TypeString, Value: output, }, nil + case ast.TypeBool: + return EvaluationResult{ + Type: TypeBool, + Value: output, + }, nil + case ast.TypeUnknown: + return EvaluationResult{ + Type: TypeUnknown, + Value: UnknownValue, + }, nil default: return InvalidResult, fmt.Errorf("unknown type %s as interpolation output", outputType) } @@ -97,6 +121,10 @@ func internalEval(root ast.Node, config *EvalConfig) (interface{}, ast.Type, err ast.TypeString: { ast.TypeInt: "__builtin_StringToInt", ast.TypeFloat: "__builtin_StringToFloat", + ast.TypeBool: "__builtin_StringToBool", + }, + ast.TypeBool: { + ast.TypeString: "__builtin_BoolToString", }, } @@ -154,6 +182,12 @@ func (v *evalVisitor) Visit(root ast.Node) (interface{}, ast.Type, error) { result = new(ast.LiteralNode) } resultErr := v.err + if resultErr == errExitUnknown { + // This means the return value is unknown and we used the error + // as an early exit mechanism. Reset since the value on the stack + // should be the unknown value. + resultErr = nil + } // Clear everything else so we aren't just dangling v.Stack.Reset() @@ -188,6 +222,13 @@ func (v *evalVisitor) visit(raw ast.Node) ast.Node { Value: out, Typex: outType, }) + + if outType == ast.TypeUnknown { + // Halt immediately + v.err = errExitUnknown + return raw + } + return raw } @@ -199,6 +240,8 @@ func evalNode(raw ast.Node) (EvalNode, error) { return &evalIndex{n}, nil case *ast.Call: return &evalCall{n}, nil + case *ast.Conditional: + return &evalConditional{n}, nil case *ast.Output: return &evalOutput{n}, nil case *ast.LiteralNode: @@ -229,6 +272,10 @@ func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, e args := make([]interface{}, len(v.Args)) for i, _ := range v.Args { node := stack.Pop().(*ast.LiteralNode) + if node.IsUnknown() { + // If any arguments are unknown then the result is automatically unknown + return UnknownValue, ast.TypeUnknown, nil + } args[len(v.Args)-1-i] = node.Value } @@ -241,42 +288,56 @@ func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, e return result, function.ReturnType, nil } -type evalIndex struct{ *ast.Index } +type evalConditional struct{ *ast.Conditional } -func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { - evalVarAccess, err := evalNode(v.Target) - if err != nil { - return nil, ast.TypeInvalid, err - } - target, targetType, err := evalVarAccess.Eval(scope, stack) +func (v *evalConditional) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { + // On the stack we have literal nodes representing the resulting values + // of the condition, true and false expressions, but they are in reverse + // order. + falseLit := stack.Pop().(*ast.LiteralNode) + trueLit := stack.Pop().(*ast.LiteralNode) + condLit := stack.Pop().(*ast.LiteralNode) - evalKey, err := evalNode(v.Key) - if err != nil { - return nil, ast.TypeInvalid, err + if condLit.IsUnknown() { + // If our conditional is unknown then our result is also unknown + return UnknownValue, ast.TypeUnknown, nil } - key, keyType, err := evalKey.Eval(scope, stack) - if err != nil { - return nil, ast.TypeInvalid, err + if condLit.Value.(bool) { + return trueLit.Value, trueLit.Typex, nil + } else { + return falseLit.Value, trueLit.Typex, nil } +} + +type evalIndex struct{ *ast.Index } + +func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { + key := stack.Pop().(*ast.LiteralNode) + target := stack.Pop().(*ast.LiteralNode) variableName := v.Index.Target.(*ast.VariableAccess).Name - switch targetType { - case ast.TypeList: - if keyType != ast.TypeInt { - return nil, ast.TypeInvalid, fmt.Errorf("key for indexing list %q must be an int, is %s", variableName, keyType) - } + if key.IsUnknown() { + // If our key is unknown then our result is also unknown + return UnknownValue, ast.TypeUnknown, nil + } - return v.evalListIndex(variableName, target, key) - case ast.TypeMap: - if keyType != ast.TypeString { - return nil, ast.TypeInvalid, fmt.Errorf("key for indexing map %q must be a string, is %s", variableName, keyType) - } + // For target, we'll accept collections containing unknown values but + // we still need to catch when the collection itself is unknown, shallowly. + if target.Typex == ast.TypeUnknown { + return UnknownValue, ast.TypeUnknown, nil + } - return v.evalMapIndex(variableName, target, key) + switch target.Typex { + case ast.TypeList: + return v.evalListIndex(variableName, target.Value, key.Value) + case ast.TypeMap: + return v.evalMapIndex(variableName, target.Value, key.Value) default: - return nil, ast.TypeInvalid, fmt.Errorf("target %q for indexing must be ast.TypeList or ast.TypeMap, is %s", variableName, targetType) + return nil, ast.TypeInvalid, fmt.Errorf( + "target %q for indexing must be ast.TypeList or ast.TypeMap, is %s", + variableName, target.Typex) } } @@ -285,12 +346,14 @@ func (v *evalIndex) evalListIndex(variableName string, target interface{}, key i // is a list and key is an int list, ok := target.([]ast.Variable) if !ok { - return nil, ast.TypeInvalid, fmt.Errorf("cannot cast target to []Variable") + return nil, ast.TypeInvalid, fmt.Errorf( + "cannot cast target to []Variable, is: %T", target) } keyInt, ok := key.(int) if !ok { - return nil, ast.TypeInvalid, fmt.Errorf("cannot cast key to int") + return nil, ast.TypeInvalid, fmt.Errorf( + "cannot cast key to int, is: %T", key) } if len(list) == 0 { @@ -298,12 +361,13 @@ func (v *evalIndex) evalListIndex(variableName string, target interface{}, key i } if keyInt < 0 || len(list) < keyInt+1 { - return nil, ast.TypeInvalid, fmt.Errorf("index %d out of range for list %s (max %d)", keyInt, variableName, len(list)) + return nil, ast.TypeInvalid, fmt.Errorf( + "index %d out of range for list %s (max %d)", + keyInt, variableName, len(list)) } returnVal := list[keyInt].Value returnType := list[keyInt].Type - return returnVal, returnType, nil } @@ -312,12 +376,14 @@ func (v *evalIndex) evalMapIndex(variableName string, target interface{}, key in // is a map and key is a string vmap, ok := target.(map[string]ast.Variable) if !ok { - return nil, ast.TypeInvalid, fmt.Errorf("cannot cast target to map[string]Variable") + return nil, ast.TypeInvalid, fmt.Errorf( + "cannot cast target to map[string]Variable, is: %T", target) } keyString, ok := key.(string) if !ok { - return nil, ast.TypeInvalid, fmt.Errorf("cannot cast key to string") + return nil, ast.TypeInvalid, fmt.Errorf( + "cannot cast key to string, is: %T", key) } if len(vmap) == 0 { @@ -326,7 +392,8 @@ func (v *evalIndex) evalMapIndex(variableName string, target interface{}, key in value, ok := vmap[keyString] if !ok { - return nil, ast.TypeInvalid, fmt.Errorf("key %q does not exist in map %s", keyString, variableName) + return nil, ast.TypeInvalid, fmt.Errorf( + "key %q does not exist in map %s", keyString, variableName) } return value.Value, value.Type, nil @@ -338,21 +405,47 @@ func (v *evalOutput) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, // The expressions should all be on the stack in reverse // order. So pop them off, reverse their order, and concatenate. nodes := make([]*ast.LiteralNode, 0, len(v.Exprs)) + haveUnknown := false for range v.Exprs { - nodes = append(nodes, stack.Pop().(*ast.LiteralNode)) + n := stack.Pop().(*ast.LiteralNode) + nodes = append(nodes, n) + + // If we have any unknowns then the whole result is unknown + // (we must deal with this first, because the type checker can + // skip type conversions in the presence of unknowns, and thus + // any of our other nodes may be incorrectly typed.) + if n.IsUnknown() { + haveUnknown = true + } } - // Special case the single list and map - if len(nodes) == 1 && nodes[0].Typex == ast.TypeList { - return nodes[0].Value, ast.TypeList, nil + if haveUnknown { + return UnknownValue, ast.TypeUnknown, nil } - if len(nodes) == 1 && nodes[0].Typex == ast.TypeMap { - return nodes[0].Value, ast.TypeMap, nil + + // Special case the single list and map + if len(nodes) == 1 { + switch t := nodes[0].Typex; t { + case ast.TypeList: + fallthrough + case ast.TypeMap: + fallthrough + case ast.TypeUnknown: + return nodes[0].Value, t, nil + } } // Otherwise concatenate the strings var buf bytes.Buffer for i := len(nodes) - 1; i >= 0; i-- { + if nodes[i].Typex != ast.TypeString { + return nil, ast.TypeInvalid, fmt.Errorf( + "invalid output with %s value at index %d: %#v", + nodes[i].Typex, + i, + nodes[i].Value, + ) + } buf.WriteString(nodes[i].Value.(string)) } diff --git a/vendor/github.com/hashicorp/hil/eval_type.go b/vendor/github.com/hashicorp/hil/eval_type.go index c78a25bb..6946ecd2 100644 --- a/vendor/github.com/hashicorp/hil/eval_type.go +++ b/vendor/github.com/hashicorp/hil/eval_type.go @@ -9,6 +9,8 @@ type EvalType uint32 const ( TypeInvalid EvalType = 0 TypeString EvalType = 1 << iota + TypeBool TypeList TypeMap + TypeUnknown ) diff --git a/vendor/github.com/hashicorp/hil/evaltype_string.go b/vendor/github.com/hashicorp/hil/evaltype_string.go index f86907a4..b107ddd4 100644 --- a/vendor/github.com/hashicorp/hil/evaltype_string.go +++ b/vendor/github.com/hashicorp/hil/evaltype_string.go @@ -7,15 +7,19 @@ import "fmt" const ( _EvalType_name_0 = "TypeInvalid" _EvalType_name_1 = "TypeString" - _EvalType_name_2 = "TypeList" - _EvalType_name_3 = "TypeMap" + _EvalType_name_2 = "TypeBool" + _EvalType_name_3 = "TypeList" + _EvalType_name_4 = "TypeMap" + _EvalType_name_5 = "TypeUnknown" ) var ( _EvalType_index_0 = [...]uint8{0, 11} _EvalType_index_1 = [...]uint8{0, 10} _EvalType_index_2 = [...]uint8{0, 8} - _EvalType_index_3 = [...]uint8{0, 7} + _EvalType_index_3 = [...]uint8{0, 8} + _EvalType_index_4 = [...]uint8{0, 7} + _EvalType_index_5 = [...]uint8{0, 11} ) func (i EvalType) String() string { @@ -28,6 +32,10 @@ func (i EvalType) String() string { return _EvalType_name_2 case i == 8: return _EvalType_name_3 + case i == 16: + return _EvalType_name_4 + case i == 32: + return _EvalType_name_5 default: return fmt.Sprintf("EvalType(%d)", i) } diff --git a/vendor/github.com/hashicorp/hil/lang.y b/vendor/github.com/hashicorp/hil/lang.y deleted file mode 100644 index 46cca02e..00000000 --- a/vendor/github.com/hashicorp/hil/lang.y +++ /dev/null @@ -1,200 +0,0 @@ -// This is the yacc input for creating the parser for interpolation -// expressions in Go. To build it, just run `go generate` on this -// package, as the lexer has the go generate pragma within it. - -%{ -package hil - -import ( - "fmt" - - "github.com/hashicorp/hil/ast" -) - -%} - -%union { - node ast.Node - nodeList []ast.Node - str string - token *parserToken -} - -%token PROGRAM_BRACKET_LEFT PROGRAM_BRACKET_RIGHT -%token PROGRAM_STRING_START PROGRAM_STRING_END -%token PAREN_LEFT PAREN_RIGHT COMMA -%token SQUARE_BRACKET_LEFT SQUARE_BRACKET_RIGHT - -%token ARITH_OP IDENTIFIER INTEGER FLOAT STRING - -%type expr interpolation literal literalModeTop literalModeValue -%type args - -%left ARITH_OP - -%% - -top: - { - parserResult = &ast.LiteralNode{ - Value: "", - Typex: ast.TypeString, - Posx: ast.Pos{Column: 1, Line: 1}, - } - } -| literalModeTop - { - parserResult = $1 - - // We want to make sure that the top value is always an Output - // so that the return value is always a string, list of map from an - // interpolation. - // - // The logic for checking for a LiteralNode is a little annoying - // because functionally the AST is the same, but we do that because - // it makes for an easy literal check later (to check if a string - // has any interpolations). - if _, ok := $1.(*ast.Output); !ok { - if n, ok := $1.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString { - parserResult = &ast.Output{ - Exprs: []ast.Node{$1}, - Posx: $1.Pos(), - } - } - } - } - -literalModeTop: - literalModeValue - { - $$ = $1 - } -| literalModeTop literalModeValue - { - var result []ast.Node - if c, ok := $1.(*ast.Output); ok { - result = append(c.Exprs, $2) - } else { - result = []ast.Node{$1, $2} - } - - $$ = &ast.Output{ - Exprs: result, - Posx: result[0].Pos(), - } - } - -literalModeValue: - literal - { - $$ = $1 - } -| interpolation - { - $$ = $1 - } - -interpolation: - PROGRAM_BRACKET_LEFT expr PROGRAM_BRACKET_RIGHT - { - $$ = $2 - } - -expr: - PAREN_LEFT expr PAREN_RIGHT - { - $$ = $2 - } -| literalModeTop - { - $$ = $1 - } -| INTEGER - { - $$ = &ast.LiteralNode{ - Value: $1.Value.(int), - Typex: ast.TypeInt, - Posx: $1.Pos, - } - } -| FLOAT - { - $$ = &ast.LiteralNode{ - Value: $1.Value.(float64), - Typex: ast.TypeFloat, - Posx: $1.Pos, - } - } -| ARITH_OP expr - { - // This is REALLY jank. We assume that a singular ARITH_OP - // means 0 ARITH_OP expr, which... is weird. We don't want to - // support *, /, etc., only -. We should fix this later with a pure - // Go scanner/parser. - if $1.Value.(ast.ArithmeticOp) != ast.ArithmeticOpSub { - if parserErr == nil { - parserErr = fmt.Errorf("Invalid unary operation: %v", $1.Value) - } - } - - $$ = &ast.Arithmetic{ - Op: $1.Value.(ast.ArithmeticOp), - Exprs: []ast.Node{ - &ast.LiteralNode{Value: 0, Typex: ast.TypeInt}, - $2, - }, - Posx: $2.Pos(), - } - } -| expr ARITH_OP expr - { - $$ = &ast.Arithmetic{ - Op: $2.Value.(ast.ArithmeticOp), - Exprs: []ast.Node{$1, $3}, - Posx: $1.Pos(), - } - } -| IDENTIFIER - { - $$ = &ast.VariableAccess{Name: $1.Value.(string), Posx: $1.Pos} - } -| IDENTIFIER PAREN_LEFT args PAREN_RIGHT - { - $$ = &ast.Call{Func: $1.Value.(string), Args: $3, Posx: $1.Pos} - } -| IDENTIFIER SQUARE_BRACKET_LEFT expr SQUARE_BRACKET_RIGHT - { - $$ = &ast.Index{ - Target: &ast.VariableAccess{ - Name: $1.Value.(string), - Posx: $1.Pos, - }, - Key: $3, - Posx: $1.Pos, - } - } - -args: - { - $$ = nil - } -| args COMMA expr - { - $$ = append($1, $3) - } -| expr - { - $$ = append($$, $1) - } - -literal: - STRING - { - $$ = &ast.LiteralNode{ - Value: $1.Value.(string), - Typex: ast.TypeString, - Posx: $1.Pos, - } - } - -%% diff --git a/vendor/github.com/hashicorp/hil/lex.go b/vendor/github.com/hashicorp/hil/lex.go deleted file mode 100644 index a6669464..00000000 --- a/vendor/github.com/hashicorp/hil/lex.go +++ /dev/null @@ -1,407 +0,0 @@ -package hil - -import ( - "bytes" - "fmt" - "strconv" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hil/ast" -) - -//go:generate go tool yacc -p parser lang.y - -// The parser expects the lexer to return 0 on EOF. -const lexEOF = 0 - -// The parser uses the type Lex as a lexer. It must provide -// the methods Lex(*SymType) int and Error(string). -type parserLex struct { - Err error - Input string - - mode parserMode - interpolationDepth int - pos int - width int - col, line int - lastLine int - astPos *ast.Pos -} - -// parserToken is the token yielded to the parser. The value can be -// determined within the parser type based on the enum value returned -// from Lex. -type parserToken struct { - Value interface{} - Pos ast.Pos -} - -// parserMode keeps track of what mode we're in for the parser. We have -// two modes: literal and interpolation. Literal mode is when strings -// don't have to be quoted, and interpolations are defined as ${foo}. -// Interpolation mode means that strings have to be quoted and unquoted -// things are identifiers, such as foo("bar"). -type parserMode uint8 - -const ( - parserModeInvalid parserMode = 0 - parserModeLiteral = 1 << iota - parserModeInterpolation -) - -// The parser calls this method to get each new token. -func (x *parserLex) Lex(yylval *parserSymType) int { - // We always start in literal mode, since programs don't start - // in an interpolation. ex. "foo ${bar}" vs "bar" (and assuming interp.) - if x.mode == parserModeInvalid { - x.mode = parserModeLiteral - } - - // Defer an update to set the proper column/line we read the next token. - defer func() { - if yylval.token != nil && yylval.token.Pos.Column == 0 { - yylval.token.Pos = *x.astPos - } - }() - - x.astPos = nil - return x.lex(yylval) -} - -func (x *parserLex) lex(yylval *parserSymType) int { - switch x.mode { - case parserModeLiteral: - return x.lexModeLiteral(yylval) - case parserModeInterpolation: - return x.lexModeInterpolation(yylval) - default: - x.Error(fmt.Sprintf("Unknown parse mode: %d", x.mode)) - return lexEOF - } -} - -func (x *parserLex) lexModeLiteral(yylval *parserSymType) int { - for { - c := x.next() - if c == lexEOF { - return lexEOF - } - - // Are we starting an interpolation? - if c == '$' && x.peek() == '{' { - x.next() - x.interpolationDepth++ - x.mode = parserModeInterpolation - return PROGRAM_BRACKET_LEFT - } - - // We're just a normal string that isn't part of any interpolation yet. - x.backup() - result, terminated := x.lexString(yylval, x.interpolationDepth > 0) - - // If the string terminated and we're within an interpolation already - // then that means that we finished a nested string, so pop - // back out to interpolation mode. - if terminated && x.interpolationDepth > 0 { - x.mode = parserModeInterpolation - - // If the string is empty, just skip it. We're still in - // an interpolation so we do this to avoid empty nodes. - if yylval.token.Value.(string) == "" { - return x.lex(yylval) - } - } - - return result - } -} - -func (x *parserLex) lexModeInterpolation(yylval *parserSymType) int { - for { - c := x.next() - if c == lexEOF { - return lexEOF - } - - // Ignore all whitespace - if unicode.IsSpace(c) { - continue - } - - // If we see a double quote then we're lexing a string since - // we're in interpolation mode. - if c == '"' { - result, terminated := x.lexString(yylval, true) - if !terminated { - // The string didn't end, which means that we're in the - // middle of starting another interpolation. - x.mode = parserModeLiteral - - // If the string is empty and we're starting an interpolation, - // then just skip it to avoid empty string AST nodes - if yylval.token.Value.(string) == "" { - return x.lex(yylval) - } - } - - return result - } - - // If we are seeing a number, it is the start of a number. Lex it. - if c >= '0' && c <= '9' { - x.backup() - return x.lexNumber(yylval) - } - - switch c { - case '}': - // '}' means we ended the interpolation. Pop back into - // literal mode and reduce our interpolation depth. - x.interpolationDepth-- - x.mode = parserModeLiteral - return PROGRAM_BRACKET_RIGHT - case '(': - return PAREN_LEFT - case ')': - return PAREN_RIGHT - case '[': - return SQUARE_BRACKET_LEFT - case ']': - return SQUARE_BRACKET_RIGHT - case ',': - return COMMA - case '+': - yylval.token = &parserToken{Value: ast.ArithmeticOpAdd} - return ARITH_OP - case '-': - yylval.token = &parserToken{Value: ast.ArithmeticOpSub} - return ARITH_OP - case '*': - yylval.token = &parserToken{Value: ast.ArithmeticOpMul} - return ARITH_OP - case '/': - yylval.token = &parserToken{Value: ast.ArithmeticOpDiv} - return ARITH_OP - case '%': - yylval.token = &parserToken{Value: ast.ArithmeticOpMod} - return ARITH_OP - default: - x.backup() - return x.lexId(yylval) - } - } -} - -func (x *parserLex) lexId(yylval *parserSymType) int { - var b bytes.Buffer - var last rune - for { - c := x.next() - if c == lexEOF { - break - } - - // We only allow * after a '.' for resource splast: type.name.*.id - // Otherwise, its probably multiplication. - if c == '*' && last != '.' { - x.backup() - break - } - - // If this isn't a character we want in an ID, return out. - // One day we should make this a regexp. - if c != '_' && - c != '-' && - c != '.' && - c != '*' && - !unicode.IsLetter(c) && - !unicode.IsNumber(c) { - x.backup() - break - } - - if _, err := b.WriteRune(c); err != nil { - x.Error(err.Error()) - return lexEOF - } - - last = c - } - - yylval.token = &parserToken{Value: b.String()} - return IDENTIFIER -} - -// lexNumber lexes out a number: an integer or a float. -func (x *parserLex) lexNumber(yylval *parserSymType) int { - var b bytes.Buffer - gotPeriod := false - for { - c := x.next() - if c == lexEOF { - break - } - - // If we see a period, we might be getting a float.. - if c == '.' { - // If we've already seen a period, then ignore it, and - // exit. This will probably result in a syntax error later. - if gotPeriod { - x.backup() - break - } - - gotPeriod = true - } else if c < '0' || c > '9' { - // If we're not seeing a number, then also exit. - x.backup() - break - } - - if _, err := b.WriteRune(c); err != nil { - x.Error(fmt.Sprintf("internal error: %s", err)) - return lexEOF - } - } - - // If we didn't see a period, it is an int - if !gotPeriod { - v, err := strconv.ParseInt(b.String(), 0, 0) - if err != nil { - x.Error(fmt.Sprintf("expected number: %s", err)) - return lexEOF - } - - yylval.token = &parserToken{Value: int(v)} - return INTEGER - } - - // If we did see a period, it is a float - f, err := strconv.ParseFloat(b.String(), 64) - if err != nil { - x.Error(fmt.Sprintf("expected float: %s", err)) - return lexEOF - } - - yylval.token = &parserToken{Value: f} - return FLOAT -} - -func (x *parserLex) lexString(yylval *parserSymType, quoted bool) (int, bool) { - var b bytes.Buffer - terminated := false - for { - c := x.next() - if c == lexEOF { - if quoted { - x.Error("unterminated string") - } - - break - } - - // Behavior is a bit different if we're lexing within a quoted string. - if quoted { - // If its a double quote, we've reached the end of the string - if c == '"' { - terminated = true - break - } - - // Let's check to see if we're escaping anything. - if c == '\\' { - switch n := x.next(); n { - case '\\', '"': - c = n - case 'n': - c = '\n' - default: - x.backup() - } - } - } - - // If we hit a dollar sign, then check if we're starting - // another interpolation. If so, then we're done. - if c == '$' { - n := x.peek() - - // If it is '{', then we're starting another interpolation - if n == '{' { - x.backup() - break - } - - // If it is '$', then we're escaping a dollar sign - if n == '$' { - x.next() - } - } - - if _, err := b.WriteRune(c); err != nil { - x.Error(err.Error()) - return lexEOF, false - } - } - - yylval.token = &parserToken{Value: b.String()} - return STRING, terminated -} - -// Return the next rune for the lexer. -func (x *parserLex) next() rune { - if int(x.pos) >= len(x.Input) { - x.width = 0 - return lexEOF - } - - r, w := utf8.DecodeRuneInString(x.Input[x.pos:]) - x.width = w - x.pos += x.width - - if x.line == 0 { - x.line = 1 - x.col = 1 - } else { - x.col += 1 - } - - if r == '\n' { - x.lastLine = x.col - x.line += 1 - x.col = 1 - } - - if x.astPos == nil { - x.astPos = &ast.Pos{Column: x.col, Line: x.line} - } - - return r -} - -// peek returns but does not consume the next rune in the input -func (x *parserLex) peek() rune { - r := x.next() - x.backup() - return r -} - -// backup steps back one rune. Can only be called once per next. -func (x *parserLex) backup() { - x.pos -= x.width - x.col -= 1 - - // If we are at column 0, we're backing up across a line boundary - // so we need to be careful to get the proper value. - if x.col == 0 { - x.col = x.lastLine - x.line -= 1 - } -} - -// The parser calls this method on a parse error. -func (x *parserLex) Error(s string) { - x.Err = fmt.Errorf("parse error: %s", s) -} diff --git a/vendor/github.com/hashicorp/hil/parse.go b/vendor/github.com/hashicorp/hil/parse.go index f498b5a8..ecbe1fdb 100644 --- a/vendor/github.com/hashicorp/hil/parse.go +++ b/vendor/github.com/hashicorp/hil/parse.go @@ -1,42 +1,29 @@ package hil import ( - "sync" - "github.com/hashicorp/hil/ast" + "github.com/hashicorp/hil/parser" + "github.com/hashicorp/hil/scanner" ) -var parserLock sync.Mutex -var parserResult ast.Node -var parserErr error - // Parse parses the given program and returns an executable AST tree. +// +// Syntax errors are returned with error having the dynamic type +// *parser.ParseError, which gives the caller access to the source position +// where the error was found, which allows (for example) combining it with +// a known source filename to add context to the error message. func Parse(v string) (ast.Node, error) { - // Unfortunately due to the way that goyacc generated parsers are - // formatted, we can only do a single parse at a time without a lot - // of extra work. In the future we can remove this limitation. - parserLock.Lock() - defer parserLock.Unlock() - - // Reset our globals - parserErr = nil - parserResult = nil - - // Create the lexer - lex := &parserLex{Input: v} - - // Parse! - parserParse(lex) - - // If we have a lex error, return that - if lex.Err != nil { - return nil, lex.Err - } - - // If we have a parser error, return that - if parserErr != nil { - return nil, parserErr - } + return ParseWithPosition(v, ast.Pos{Line: 1, Column: 1}) +} - return parserResult, nil +// ParseWithPosition is like Parse except that it overrides the source +// row and column position of the first character in the string, which should +// be 1-based. +// +// This can be used when HIL is embedded in another language and the outer +// parser knows the row and column where the HIL expression started within +// the overall source file. +func ParseWithPosition(v string, pos ast.Pos) (ast.Node, error) { + ch := scanner.Scan(v, pos) + return parser.Parse(ch) } diff --git a/vendor/github.com/hashicorp/hil/parser/binary_op.go b/vendor/github.com/hashicorp/hil/parser/binary_op.go new file mode 100644 index 00000000..2e013e01 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/parser/binary_op.go @@ -0,0 +1,45 @@ +package parser + +import ( + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/hil/scanner" +) + +var binaryOps []map[scanner.TokenType]ast.ArithmeticOp + +func init() { + // This operation table maps from the operator's scanner token type + // to the AST arithmetic operation. All expressions produced from + // binary operators are *ast.Arithmetic nodes. + // + // Binary operator groups are listed in order of precedence, with + // the *lowest* precedence first. Operators within the same group + // have left-to-right associativity. + binaryOps = []map[scanner.TokenType]ast.ArithmeticOp{ + { + scanner.OR: ast.ArithmeticOpLogicalOr, + }, + { + scanner.AND: ast.ArithmeticOpLogicalAnd, + }, + { + scanner.EQUAL: ast.ArithmeticOpEqual, + scanner.NOTEQUAL: ast.ArithmeticOpNotEqual, + }, + { + scanner.GT: ast.ArithmeticOpGreaterThan, + scanner.GTE: ast.ArithmeticOpGreaterThanOrEqual, + scanner.LT: ast.ArithmeticOpLessThan, + scanner.LTE: ast.ArithmeticOpLessThanOrEqual, + }, + { + scanner.PLUS: ast.ArithmeticOpAdd, + scanner.MINUS: ast.ArithmeticOpSub, + }, + { + scanner.STAR: ast.ArithmeticOpMul, + scanner.SLASH: ast.ArithmeticOpDiv, + scanner.PERCENT: ast.ArithmeticOpMod, + }, + } +} diff --git a/vendor/github.com/hashicorp/hil/parser/error.go b/vendor/github.com/hashicorp/hil/parser/error.go new file mode 100644 index 00000000..bacd6964 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/parser/error.go @@ -0,0 +1,38 @@ +package parser + +import ( + "fmt" + + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/hil/scanner" +) + +type ParseError struct { + Message string + Pos ast.Pos +} + +func Errorf(pos ast.Pos, format string, args ...interface{}) error { + return &ParseError{ + Message: fmt.Sprintf(format, args...), + Pos: pos, + } +} + +// TokenErrorf is a convenient wrapper around Errorf that uses the +// position of the given token. +func TokenErrorf(token *scanner.Token, format string, args ...interface{}) error { + return Errorf(token.Pos, format, args...) +} + +func ExpectationError(wanted string, got *scanner.Token) error { + return TokenErrorf(got, "expected %s but found %s", wanted, got) +} + +func (e *ParseError) Error() string { + return fmt.Sprintf("parse error at %s: %s", e.Pos, e.Message) +} + +func (e *ParseError) String() string { + return e.Error() +} diff --git a/vendor/github.com/hashicorp/hil/parser/fuzz.go b/vendor/github.com/hashicorp/hil/parser/fuzz.go new file mode 100644 index 00000000..de954f38 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/parser/fuzz.go @@ -0,0 +1,28 @@ +// +build gofuzz + +package parser + +import ( + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/hil/scanner" +) + +// This is a fuzz testing function designed to be used with go-fuzz: +// https://github.com/dvyukov/go-fuzz +// +// It's not included in a normal build due to the gofuzz build tag above. +// +// There are some input files that you can use as a seed corpus for go-fuzz +// in the directory ./fuzz-corpus . + +func Fuzz(data []byte) int { + str := string(data) + + ch := scanner.Scan(str, ast.Pos{Line: 1, Column: 1}) + _, err := Parse(ch) + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/hashicorp/hil/parser/parser.go b/vendor/github.com/hashicorp/hil/parser/parser.go new file mode 100644 index 00000000..376f1c49 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/parser/parser.go @@ -0,0 +1,522 @@ +package parser + +import ( + "strconv" + "unicode/utf8" + + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/hil/scanner" +) + +func Parse(ch <-chan *scanner.Token) (ast.Node, error) { + peeker := scanner.NewPeeker(ch) + parser := &parser{peeker} + output, err := parser.ParseTopLevel() + peeker.Close() + return output, err +} + +type parser struct { + peeker *scanner.Peeker +} + +func (p *parser) ParseTopLevel() (ast.Node, error) { + return p.parseInterpolationSeq(false) +} + +func (p *parser) ParseQuoted() (ast.Node, error) { + return p.parseInterpolationSeq(true) +} + +// parseInterpolationSeq parses either the top-level sequence of literals +// and interpolation expressions or a similar sequence within a quoted +// string inside an interpolation expression. The latter case is requested +// by setting 'quoted' to true. +func (p *parser) parseInterpolationSeq(quoted bool) (ast.Node, error) { + literalType := scanner.LITERAL + endType := scanner.EOF + if quoted { + // exceptions for quoted sequences + literalType = scanner.STRING + endType = scanner.CQUOTE + } + + startPos := p.peeker.Peek().Pos + + if quoted { + tok := p.peeker.Read() + if tok.Type != scanner.OQUOTE { + return nil, ExpectationError("open quote", tok) + } + } + + var exprs []ast.Node + for { + tok := p.peeker.Read() + + if tok.Type == endType { + break + } + + switch tok.Type { + case literalType: + val, err := p.parseStringToken(tok) + if err != nil { + return nil, err + } + exprs = append(exprs, &ast.LiteralNode{ + Value: val, + Typex: ast.TypeString, + Posx: tok.Pos, + }) + case scanner.BEGIN: + expr, err := p.ParseInterpolation() + if err != nil { + return nil, err + } + exprs = append(exprs, expr) + default: + return nil, ExpectationError(`"${"`, tok) + } + } + + if len(exprs) == 0 { + // If we have no parts at all then the input must've + // been an empty string. + exprs = append(exprs, &ast.LiteralNode{ + Value: "", + Typex: ast.TypeString, + Posx: startPos, + }) + } + + // As a special case, if our "Output" contains only one expression + // and it's a literal string then we'll hoist it up to be our + // direct return value, so callers can easily recognize a string + // that has no interpolations at all. + if len(exprs) == 1 { + if lit, ok := exprs[0].(*ast.LiteralNode); ok { + if lit.Typex == ast.TypeString { + return lit, nil + } + } + } + + return &ast.Output{ + Exprs: exprs, + Posx: startPos, + }, nil +} + +// parseStringToken takes a token of either LITERAL or STRING type and +// returns the interpreted string, after processing any relevant +// escape sequences. +func (p *parser) parseStringToken(tok *scanner.Token) (string, error) { + var backslashes bool + switch tok.Type { + case scanner.LITERAL: + backslashes = false + case scanner.STRING: + backslashes = true + default: + panic("unsupported string token type") + } + + raw := []byte(tok.Content) + buf := make([]byte, 0, len(raw)) + + for i := 0; i < len(raw); i++ { + b := raw[i] + more := len(raw) > (i + 1) + + if b == '$' { + if more && raw[i+1] == '$' { + // skip over the second dollar sign + i++ + } + } else if backslashes && b == '\\' { + if !more { + return "", Errorf( + ast.Pos{ + Column: tok.Pos.Column + utf8.RuneCount(raw[:i]), + Line: tok.Pos.Line, + }, + `unfinished backslash escape sequence`, + ) + } + escapeType := raw[i+1] + switch escapeType { + case '\\': + // skip over the second slash + i++ + case 'n': + b = '\n' + i++ + case '"': + b = '"' + i++ + default: + return "", Errorf( + ast.Pos{ + Column: tok.Pos.Column + utf8.RuneCount(raw[:i]), + Line: tok.Pos.Line, + }, + `invalid backslash escape sequence`, + ) + } + } + + buf = append(buf, b) + } + + return string(buf), nil +} + +func (p *parser) ParseInterpolation() (ast.Node, error) { + // By the time we're called, we're already "inside" the ${ sequence + // because the caller consumed the ${ token. + + expr, err := p.ParseExpression() + if err != nil { + return nil, err + } + + err = p.requireTokenType(scanner.END, `"}"`) + if err != nil { + return nil, err + } + + return expr, nil +} + +func (p *parser) ParseExpression() (ast.Node, error) { + return p.parseTernaryCond() +} + +func (p *parser) parseTernaryCond() (ast.Node, error) { + // The ternary condition operator (.. ? .. : ..) behaves somewhat + // like a binary operator except that the "operator" is itself + // an expression enclosed in two punctuation characters. + // The middle expression is parsed as if the ? and : symbols + // were parentheses. The "rhs" (the "false expression") is then + // treated right-associatively so it behaves similarly to the + // middle in terms of precedence. + + startPos := p.peeker.Peek().Pos + + var cond, trueExpr, falseExpr ast.Node + var err error + + cond, err = p.parseBinaryOps(binaryOps) + if err != nil { + return nil, err + } + + next := p.peeker.Peek() + if next.Type != scanner.QUESTION { + return cond, nil + } + + p.peeker.Read() // eat question mark + + trueExpr, err = p.ParseExpression() + if err != nil { + return nil, err + } + + colon := p.peeker.Read() + if colon.Type != scanner.COLON { + return nil, ExpectationError(":", colon) + } + + falseExpr, err = p.ParseExpression() + if err != nil { + return nil, err + } + + return &ast.Conditional{ + CondExpr: cond, + TrueExpr: trueExpr, + FalseExpr: falseExpr, + Posx: startPos, + }, nil +} + +// parseBinaryOps calls itself recursively to work through all of the +// operator precedence groups, and then eventually calls ParseExpressionTerm +// for each operand. +func (p *parser) parseBinaryOps(ops []map[scanner.TokenType]ast.ArithmeticOp) (ast.Node, error) { + if len(ops) == 0 { + // We've run out of operators, so now we'll just try to parse a term. + return p.ParseExpressionTerm() + } + + thisLevel := ops[0] + remaining := ops[1:] + + startPos := p.peeker.Peek().Pos + + var lhs, rhs ast.Node + operator := ast.ArithmeticOpInvalid + var err error + + // parse a term that might be the first operand of a binary + // expression or it might just be a standalone term, but + // we won't know until we've parsed it and can look ahead + // to see if there's an operator token. + lhs, err = p.parseBinaryOps(remaining) + if err != nil { + return nil, err + } + + // We'll keep eating up arithmetic operators until we run + // out, so that operators with the same precedence will combine in a + // left-associative manner: + // a+b+c => (a+b)+c, not a+(b+c) + // + // Should we later want to have right-associative operators, a way + // to achieve that would be to call back up to ParseExpression here + // instead of iteratively parsing only the remaining operators. + for { + next := p.peeker.Peek() + var newOperator ast.ArithmeticOp + var ok bool + if newOperator, ok = thisLevel[next.Type]; !ok { + break + } + + // Are we extending an expression started on + // the previous iteration? + if operator != ast.ArithmeticOpInvalid { + lhs = &ast.Arithmetic{ + Op: operator, + Exprs: []ast.Node{lhs, rhs}, + Posx: startPos, + } + } + + operator = newOperator + p.peeker.Read() // eat operator token + rhs, err = p.parseBinaryOps(remaining) + if err != nil { + return nil, err + } + } + + if operator != ast.ArithmeticOpInvalid { + return &ast.Arithmetic{ + Op: operator, + Exprs: []ast.Node{lhs, rhs}, + Posx: startPos, + }, nil + } else { + return lhs, nil + } +} + +func (p *parser) ParseExpressionTerm() (ast.Node, error) { + + next := p.peeker.Peek() + + switch next.Type { + + case scanner.OPAREN: + p.peeker.Read() + expr, err := p.ParseExpression() + if err != nil { + return nil, err + } + err = p.requireTokenType(scanner.CPAREN, `")"`) + return expr, err + + case scanner.OQUOTE: + return p.ParseQuoted() + + case scanner.INTEGER: + tok := p.peeker.Read() + val, err := strconv.Atoi(tok.Content) + if err != nil { + return nil, TokenErrorf(tok, "invalid integer: %s", err) + } + return &ast.LiteralNode{ + Value: val, + Typex: ast.TypeInt, + Posx: tok.Pos, + }, nil + + case scanner.FLOAT: + tok := p.peeker.Read() + val, err := strconv.ParseFloat(tok.Content, 64) + if err != nil { + return nil, TokenErrorf(tok, "invalid float: %s", err) + } + return &ast.LiteralNode{ + Value: val, + Typex: ast.TypeFloat, + Posx: tok.Pos, + }, nil + + case scanner.BOOL: + tok := p.peeker.Read() + // the scanner guarantees that tok.Content is either "true" or "false" + var val bool + if tok.Content[0] == 't' { + val = true + } else { + val = false + } + return &ast.LiteralNode{ + Value: val, + Typex: ast.TypeBool, + Posx: tok.Pos, + }, nil + + case scanner.MINUS: + opTok := p.peeker.Read() + // important to use ParseExpressionTerm rather than ParseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + // e.g. -46+5 should parse as (0-46)+5, not 0-(46+5) + operand, err := p.ParseExpressionTerm() + if err != nil { + return nil, err + } + // The AST currently represents negative numbers as + // a binary subtraction of the number from zero. + return &ast.Arithmetic{ + Op: ast.ArithmeticOpSub, + Exprs: []ast.Node{ + &ast.LiteralNode{ + Value: 0, + Typex: ast.TypeInt, + Posx: opTok.Pos, + }, + operand, + }, + Posx: opTok.Pos, + }, nil + + case scanner.BANG: + opTok := p.peeker.Read() + // important to use ParseExpressionTerm rather than ParseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + operand, err := p.ParseExpressionTerm() + if err != nil { + return nil, err + } + // The AST currently represents binary negation as an equality + // test with "false". + return &ast.Arithmetic{ + Op: ast.ArithmeticOpEqual, + Exprs: []ast.Node{ + &ast.LiteralNode{ + Value: false, + Typex: ast.TypeBool, + Posx: opTok.Pos, + }, + operand, + }, + Posx: opTok.Pos, + }, nil + + case scanner.IDENTIFIER: + return p.ParseScopeInteraction() + + default: + return nil, ExpectationError("expression", next) + } +} + +// ParseScopeInteraction parses the expression types that interact +// with the evaluation scope: variable access, function calls, and +// indexing. +// +// Indexing should actually be a distinct operator in its own right, +// so that e.g. it can be applied to the result of a function call, +// but for now we're preserving the behavior of the older yacc-based +// parser. +func (p *parser) ParseScopeInteraction() (ast.Node, error) { + first := p.peeker.Read() + startPos := first.Pos + if first.Type != scanner.IDENTIFIER { + return nil, ExpectationError("identifier", first) + } + + next := p.peeker.Peek() + if next.Type == scanner.OPAREN { + // function call + funcName := first.Content + p.peeker.Read() // eat paren + var args []ast.Node + + for { + if p.peeker.Peek().Type == scanner.CPAREN { + break + } + + arg, err := p.ParseExpression() + if err != nil { + return nil, err + } + + args = append(args, arg) + + if p.peeker.Peek().Type == scanner.COMMA { + p.peeker.Read() // eat comma + continue + } else { + break + } + } + + err := p.requireTokenType(scanner.CPAREN, `")"`) + if err != nil { + return nil, err + } + + return &ast.Call{ + Func: funcName, + Args: args, + Posx: startPos, + }, nil + } + + varNode := &ast.VariableAccess{ + Name: first.Content, + Posx: startPos, + } + + if p.peeker.Peek().Type == scanner.OBRACKET { + // index operator + startPos := p.peeker.Read().Pos // eat bracket + indexExpr, err := p.ParseExpression() + if err != nil { + return nil, err + } + err = p.requireTokenType(scanner.CBRACKET, `"]"`) + if err != nil { + return nil, err + } + return &ast.Index{ + Target: varNode, + Key: indexExpr, + Posx: startPos, + }, nil + } + + return varNode, nil +} + +// requireTokenType consumes the next token an returns an error if its +// type does not match the given type. nil is returned if the type matches. +// +// This is a helper around peeker.Read() for situations where the parser just +// wants to assert that a particular token type must be present. +func (p *parser) requireTokenType(wantType scanner.TokenType, wantName string) error { + token := p.peeker.Read() + if token.Type != wantType { + return ExpectationError(wantName, token) + } + return nil +} diff --git a/vendor/github.com/hashicorp/hil/scanner/peeker.go b/vendor/github.com/hashicorp/hil/scanner/peeker.go new file mode 100644 index 00000000..4de37283 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/scanner/peeker.go @@ -0,0 +1,55 @@ +package scanner + +// Peeker is a utility that wraps a token channel returned by Scan and +// provides an interface that allows a caller (e.g. the parser) to +// work with the token stream in a mode that allows one token of lookahead, +// and provides utilities for more convenient processing of the stream. +type Peeker struct { + ch <-chan *Token + peeked *Token +} + +func NewPeeker(ch <-chan *Token) *Peeker { + return &Peeker{ + ch: ch, + } +} + +// Peek returns the next token in the stream without consuming it. A +// subsequent call to Read will return the same token. +func (p *Peeker) Peek() *Token { + if p.peeked == nil { + p.peeked = <-p.ch + } + return p.peeked +} + +// Read consumes the next token in the stream and returns it. +func (p *Peeker) Read() *Token { + token := p.Peek() + + // As a special case, we will produce the EOF token forever once + // it is reached. + if token.Type != EOF { + p.peeked = nil + } + + return token +} + +// Close ensures that the token stream has been exhausted, to prevent +// the goroutine in the underlying scanner from leaking. +// +// It's not necessary to call this if the caller reads the token stream +// to EOF, since that implicitly closes the scanner. +func (p *Peeker) Close() { + for _ = range p.ch { + // discard + } + // Install a synthetic EOF token in 'peeked' in case someone + // erroneously calls Peek() or Read() after we've closed. + p.peeked = &Token{ + Type: EOF, + Content: "", + } +} diff --git a/vendor/github.com/hashicorp/hil/scanner/scanner.go b/vendor/github.com/hashicorp/hil/scanner/scanner.go new file mode 100644 index 00000000..86085de0 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/scanner/scanner.go @@ -0,0 +1,556 @@ +package scanner + +import ( + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hil/ast" +) + +// Scan returns a channel that recieves Tokens from the given input string. +// +// The scanner's job is just to partition the string into meaningful parts. +// It doesn't do any transformation of the raw input string, so the caller +// must deal with any further interpretation required, such as parsing INTEGER +// tokens into real ints, or dealing with escape sequences in LITERAL or +// STRING tokens. +// +// Strings in the returned tokens are slices from the original string. +// +// startPos should be set to ast.InitPos unless the caller knows that +// this interpolation string is part of a larger file and knows the position +// of the first character in that larger file. +func Scan(s string, startPos ast.Pos) <-chan *Token { + ch := make(chan *Token) + go scan(s, ch, startPos) + return ch +} + +func scan(s string, ch chan<- *Token, pos ast.Pos) { + // 'remain' starts off as the whole string but we gradually + // slice of the front of it as we work our way through. + remain := s + + // nesting keeps track of how many ${ .. } sequences we are + // inside, so we can recognize the minor differences in syntax + // between outer string literals (LITERAL tokens) and quoted + // string literals (STRING tokens). + nesting := 0 + + // We're going to flip back and forth between parsing literals/strings + // and parsing interpolation sequences ${ .. } until we reach EOF or + // some INVALID token. +All: + for { + startPos := pos + // Literal string processing first, since the beginning of + // a string is always outside of an interpolation sequence. + literalVal, terminator := scanLiteral(remain, pos, nesting > 0) + + if len(literalVal) > 0 { + litType := LITERAL + if nesting > 0 { + litType = STRING + } + ch <- &Token{ + Type: litType, + Content: literalVal, + Pos: startPos, + } + remain = remain[len(literalVal):] + } + + ch <- terminator + remain = remain[len(terminator.Content):] + pos = terminator.Pos + // Safe to use len() here because none of the terminator tokens + // can contain UTF-8 sequences. + pos.Column = pos.Column + len(terminator.Content) + + switch terminator.Type { + case INVALID: + // Synthetic EOF after invalid token, since further scanning + // is likely to just produce more garbage. + ch <- &Token{ + Type: EOF, + Content: "", + Pos: pos, + } + break All + case EOF: + // All done! + break All + case BEGIN: + nesting++ + case CQUOTE: + // nothing special to do + default: + // Should never happen + panic("invalid string/literal terminator") + } + + // Now we do the processing of the insides of ${ .. } sequences. + // This loop terminates when we encounter either a closing } or + // an opening ", which will cause us to return to literal processing. + Interpolation: + for { + + token, size, newPos := scanInterpolationToken(remain, pos) + ch <- token + remain = remain[size:] + pos = newPos + + switch token.Type { + case INVALID: + // Synthetic EOF after invalid token, since further scanning + // is likely to just produce more garbage. + ch <- &Token{ + Type: EOF, + Content: "", + Pos: pos, + } + break All + case EOF: + // All done + // (though a syntax error that we'll catch in the parser) + break All + case END: + nesting-- + if nesting < 0 { + // Can happen if there are unbalanced ${ and } sequences + // in the input, which we'll catch in the parser. + nesting = 0 + } + break Interpolation + case OQUOTE: + // Beginning of nested quoted string + break Interpolation + } + } + } + + close(ch) +} + +// Returns the token found at the start of the given string, followed by +// the number of bytes that were consumed from the string and the adjusted +// source position. +// +// Note that the number of bytes consumed can be more than the length of +// the returned token contents if the string begins with whitespace, since +// it will be silently consumed before reading the token. +func scanInterpolationToken(s string, startPos ast.Pos) (*Token, int, ast.Pos) { + pos := startPos + size := 0 + + // Consume whitespace, if any + for len(s) > 0 && byteIsSpace(s[0]) { + if s[0] == '\n' { + pos.Column = 1 + pos.Line++ + } else { + pos.Column++ + } + size++ + s = s[1:] + } + + // Unexpected EOF during sequence + if len(s) == 0 { + return &Token{ + Type: EOF, + Content: "", + Pos: pos, + }, size, pos + } + + next := s[0] + var token *Token + + switch next { + case '(', ')', '[', ']', ',', '.', '+', '-', '*', '/', '%', '?', ':': + // Easy punctuation symbols that don't have any special meaning + // during scanning, and that stand for themselves in the + // TokenType enumeration. + token = &Token{ + Type: TokenType(next), + Content: s[:1], + Pos: pos, + } + case '}': + token = &Token{ + Type: END, + Content: s[:1], + Pos: pos, + } + case '"': + token = &Token{ + Type: OQUOTE, + Content: s[:1], + Pos: pos, + } + case '!': + if len(s) >= 2 && s[:2] == "!=" { + token = &Token{ + Type: NOTEQUAL, + Content: s[:2], + Pos: pos, + } + } else { + token = &Token{ + Type: BANG, + Content: s[:1], + Pos: pos, + } + } + case '<': + if len(s) >= 2 && s[:2] == "<=" { + token = &Token{ + Type: LTE, + Content: s[:2], + Pos: pos, + } + } else { + token = &Token{ + Type: LT, + Content: s[:1], + Pos: pos, + } + } + case '>': + if len(s) >= 2 && s[:2] == ">=" { + token = &Token{ + Type: GTE, + Content: s[:2], + Pos: pos, + } + } else { + token = &Token{ + Type: GT, + Content: s[:1], + Pos: pos, + } + } + case '=': + if len(s) >= 2 && s[:2] == "==" { + token = &Token{ + Type: EQUAL, + Content: s[:2], + Pos: pos, + } + } else { + // A single equals is not a valid operator + token = &Token{ + Type: INVALID, + Content: s[:1], + Pos: pos, + } + } + case '&': + if len(s) >= 2 && s[:2] == "&&" { + token = &Token{ + Type: AND, + Content: s[:2], + Pos: pos, + } + } else { + token = &Token{ + Type: INVALID, + Content: s[:1], + Pos: pos, + } + } + case '|': + if len(s) >= 2 && s[:2] == "||" { + token = &Token{ + Type: OR, + Content: s[:2], + Pos: pos, + } + } else { + token = &Token{ + Type: INVALID, + Content: s[:1], + Pos: pos, + } + } + default: + if next >= '0' && next <= '9' { + num, numType := scanNumber(s) + token = &Token{ + Type: numType, + Content: num, + Pos: pos, + } + } else if stringStartsWithIdentifier(s) { + ident, runeLen := scanIdentifier(s) + tokenType := IDENTIFIER + if ident == "true" || ident == "false" { + tokenType = BOOL + } + token = &Token{ + Type: tokenType, + Content: ident, + Pos: pos, + } + // Skip usual token handling because it doesn't + // know how to deal with UTF-8 sequences. + pos.Column = pos.Column + runeLen + return token, size + len(ident), pos + } else { + _, byteLen := utf8.DecodeRuneInString(s) + token = &Token{ + Type: INVALID, + Content: s[:byteLen], + Pos: pos, + } + // Skip usual token handling because it doesn't + // know how to deal with UTF-8 sequences. + pos.Column = pos.Column + 1 + return token, size + byteLen, pos + } + } + + // Here we assume that the token content contains no UTF-8 sequences, + // because we dealt with UTF-8 characters as a special case where + // necessary above. + size = size + len(token.Content) + pos.Column = pos.Column + len(token.Content) + + return token, size, pos +} + +// Returns the (possibly-empty) prefix of the given string that represents +// a literal, followed by the token that marks the end of the literal. +func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) { + litLen := 0 + pos := startPos + var terminator *Token + for { + + if litLen >= len(s) { + if nested { + // We've ended in the middle of a quoted string, + // which means this token is actually invalid. + return "", &Token{ + Type: INVALID, + Content: s, + Pos: startPos, + } + } + terminator = &Token{ + Type: EOF, + Content: "", + Pos: pos, + } + break + } + + next := s[litLen] + + if next == '$' && len(s) > litLen+1 { + follow := s[litLen+1] + + if follow == '{' { + terminator = &Token{ + Type: BEGIN, + Content: s[litLen : litLen+2], + Pos: pos, + } + pos.Column = pos.Column + 2 + break + } else if follow == '$' { + // Double-$ escapes the special processing of $, + // so we will consume both characters here. + pos.Column = pos.Column + 2 + litLen = litLen + 2 + continue + } + } + + // special handling that applies only to quoted strings + if nested { + if next == '"' { + terminator = &Token{ + Type: CQUOTE, + Content: s[litLen : litLen+1], + Pos: pos, + } + pos.Column = pos.Column + 1 + break + } + + // Escaped quote marks do not terminate the string. + // + // All we do here in the scanner is avoid terminating a string + // due to an escaped quote. The parser is responsible for the + // full handling of escape sequences, since it's able to produce + // better error messages than we can produce in here. + if next == '\\' && len(s) > litLen+1 { + follow := s[litLen+1] + + if follow == '"' { + // \" escapes the special processing of ", + // so we will consume both characters here. + pos.Column = pos.Column + 2 + litLen = litLen + 2 + continue + } else if follow == '\\' { + // \\ escapes \ + // so we will consume both characters here. + pos.Column = pos.Column + 2 + litLen = litLen + 2 + continue + } + } + } + + if next == '\n' { + pos.Column = 1 + pos.Line++ + litLen++ + } else { + pos.Column++ + + // "Column" measures runes, so we need to actually consume + // a valid UTF-8 character here. + _, size := utf8.DecodeRuneInString(s[litLen:]) + litLen = litLen + size + } + + } + + return s[:litLen], terminator +} + +// scanNumber returns the extent of the prefix of the string that represents +// a valid number, along with what type of number it represents: INT or FLOAT. +// +// scanNumber does only basic character analysis: numbers consist of digits +// and periods, with at least one period signalling a FLOAT. It's the parser's +// responsibility to validate the form and range of the number, such as ensuring +// that a FLOAT actually contains only one period, etc. +func scanNumber(s string) (string, TokenType) { + period := -1 + byteLen := 0 + numType := INTEGER + for { + if byteLen >= len(s) { + break + } + + next := s[byteLen] + if next != '.' && (next < '0' || next > '9') { + // If our last value was a period, then we're not a float, + // we're just an integer that ends in a period. + if period == byteLen-1 { + byteLen-- + numType = INTEGER + } + + break + } + + if next == '.' { + // If we've already seen a period, break out + if period >= 0 { + break + } + + period = byteLen + numType = FLOAT + } + + byteLen++ + } + + return s[:byteLen], numType +} + +// scanIdentifier returns the extent of the prefix of the string that +// represents a valid identifier, along with the length of that prefix +// in runes. +// +// Identifiers may contain utf8-encoded non-Latin letters, which will +// cause the returned "rune length" to be shorter than the byte length +// of the returned string. +func scanIdentifier(s string) (string, int) { + byteLen := 0 + runeLen := 0 + for { + if byteLen >= len(s) { + break + } + + nextRune, size := utf8.DecodeRuneInString(s[byteLen:]) + if !(nextRune == '_' || + nextRune == '-' || + nextRune == '.' || + nextRune == '*' || + unicode.IsNumber(nextRune) || + unicode.IsLetter(nextRune) || + unicode.IsMark(nextRune)) { + break + } + + // If we reach a star, it must be between periods to be part + // of the same identifier. + if nextRune == '*' && s[byteLen-1] != '.' { + break + } + + // If our previous character was a star, then the current must + // be period. Otherwise, undo that and exit. + if byteLen > 0 && s[byteLen-1] == '*' && nextRune != '.' { + byteLen-- + if s[byteLen-1] == '.' { + byteLen-- + } + + break + } + + byteLen = byteLen + size + runeLen = runeLen + 1 + } + + return s[:byteLen], runeLen +} + +// byteIsSpace implements a restrictive interpretation of spaces that includes +// only what's valid inside interpolation sequences: spaces, tabs, newlines. +func byteIsSpace(b byte) bool { + switch b { + case ' ', '\t', '\r', '\n': + return true + default: + return false + } +} + +// stringStartsWithIdentifier returns true if the given string begins with +// a character that is a legal start of an identifier: an underscore or +// any character that Unicode considers to be a letter. +func stringStartsWithIdentifier(s string) bool { + if len(s) == 0 { + return false + } + + first := s[0] + + // Easy ASCII cases first + if (first >= 'a' && first <= 'z') || (first >= 'A' && first <= 'Z') || first == '_' { + return true + } + + // If our first byte begins a UTF-8 sequence then the sequence might + // be a unicode letter. + if utf8.RuneStart(first) { + firstRune, _ := utf8.DecodeRuneInString(s) + if unicode.IsLetter(firstRune) { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/hil/scanner/token.go b/vendor/github.com/hashicorp/hil/scanner/token.go new file mode 100644 index 00000000..b6c82ae9 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/scanner/token.go @@ -0,0 +1,105 @@ +package scanner + +import ( + "fmt" + + "github.com/hashicorp/hil/ast" +) + +type Token struct { + Type TokenType + Content string + Pos ast.Pos +} + +//go:generate stringer -type=TokenType +type TokenType rune + +const ( + // Raw string data outside of ${ .. } sequences + LITERAL TokenType = 'o' + + // STRING is like a LITERAL but it's inside a quoted string + // within a ${ ... } sequence, and so it can contain backslash + // escaping. + STRING TokenType = 'S' + + // Other Literals + INTEGER TokenType = 'I' + FLOAT TokenType = 'F' + BOOL TokenType = 'B' + + BEGIN TokenType = '$' // actually "${" + END TokenType = '}' + OQUOTE TokenType = '“' // Opening quote of a nested quoted sequence + CQUOTE TokenType = '”' // Closing quote of a nested quoted sequence + OPAREN TokenType = '(' + CPAREN TokenType = ')' + OBRACKET TokenType = '[' + CBRACKET TokenType = ']' + COMMA TokenType = ',' + + IDENTIFIER TokenType = 'i' + + PERIOD TokenType = '.' + PLUS TokenType = '+' + MINUS TokenType = '-' + STAR TokenType = '*' + SLASH TokenType = '/' + PERCENT TokenType = '%' + + AND TokenType = '∧' + OR TokenType = '∨' + BANG TokenType = '!' + + EQUAL TokenType = '=' + NOTEQUAL TokenType = '≠' + GT TokenType = '>' + LT TokenType = '<' + GTE TokenType = '≥' + LTE TokenType = '≤' + + QUESTION TokenType = '?' + COLON TokenType = ':' + + EOF TokenType = '␄' + + // Produced for sequences that cannot be understood as valid tokens + // e.g. due to use of unrecognized punctuation. + INVALID TokenType = '�' +) + +func (t *Token) String() string { + switch t.Type { + case EOF: + return "end of string" + case INVALID: + return fmt.Sprintf("invalid sequence %q", t.Content) + case INTEGER: + return fmt.Sprintf("integer %s", t.Content) + case FLOAT: + return fmt.Sprintf("float %s", t.Content) + case STRING: + return fmt.Sprintf("string %q", t.Content) + case LITERAL: + return fmt.Sprintf("literal %q", t.Content) + case OQUOTE: + return fmt.Sprintf("opening quote") + case CQUOTE: + return fmt.Sprintf("closing quote") + case AND: + return "&&" + case OR: + return "||" + case NOTEQUAL: + return "!=" + case GTE: + return ">=" + case LTE: + return "<=" + default: + // The remaining token types have content that + // speaks for itself. + return fmt.Sprintf("%q", t.Content) + } +} diff --git a/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go b/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go new file mode 100644 index 00000000..a602f5fd --- /dev/null +++ b/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go @@ -0,0 +1,51 @@ +// Code generated by "stringer -type=TokenType"; DO NOT EDIT + +package scanner + +import "fmt" + +const _TokenType_name = "BANGBEGINPERCENTOPARENCPARENSTARPLUSCOMMAMINUSPERIODSLASHCOLONLTEQUALGTQUESTIONBOOLFLOATINTEGERSTRINGOBRACKETCBRACKETIDENTIFIERLITERALENDOQUOTECQUOTEANDORNOTEQUALLTEGTEEOFINVALID" + +var _TokenType_map = map[TokenType]string{ + 33: _TokenType_name[0:4], + 36: _TokenType_name[4:9], + 37: _TokenType_name[9:16], + 40: _TokenType_name[16:22], + 41: _TokenType_name[22:28], + 42: _TokenType_name[28:32], + 43: _TokenType_name[32:36], + 44: _TokenType_name[36:41], + 45: _TokenType_name[41:46], + 46: _TokenType_name[46:52], + 47: _TokenType_name[52:57], + 58: _TokenType_name[57:62], + 60: _TokenType_name[62:64], + 61: _TokenType_name[64:69], + 62: _TokenType_name[69:71], + 63: _TokenType_name[71:79], + 66: _TokenType_name[79:83], + 70: _TokenType_name[83:88], + 73: _TokenType_name[88:95], + 83: _TokenType_name[95:101], + 91: _TokenType_name[101:109], + 93: _TokenType_name[109:117], + 105: _TokenType_name[117:127], + 111: _TokenType_name[127:134], + 125: _TokenType_name[134:137], + 8220: _TokenType_name[137:143], + 8221: _TokenType_name[143:149], + 8743: _TokenType_name[149:152], + 8744: _TokenType_name[152:154], + 8800: _TokenType_name[154:162], + 8804: _TokenType_name[162:165], + 8805: _TokenType_name[165:168], + 9220: _TokenType_name[168:171], + 65533: _TokenType_name[171:178], +} + +func (i TokenType) String() string { + if str, ok := _TokenType_map[i]; ok { + return str + } + return fmt.Sprintf("TokenType(%d)", i) +} diff --git a/vendor/github.com/hashicorp/hil/y.go b/vendor/github.com/hashicorp/hil/y.go deleted file mode 100644 index fdea7eb0..00000000 --- a/vendor/github.com/hashicorp/hil/y.go +++ /dev/null @@ -1,666 +0,0 @@ -//line lang.y:6 -package hil - -import __yyfmt__ "fmt" - -//line lang.y:6 -import ( - "fmt" - - "github.com/hashicorp/hil/ast" -) - -//line lang.y:16 -type parserSymType struct { - yys int - node ast.Node - nodeList []ast.Node - str string - token *parserToken -} - -const PROGRAM_BRACKET_LEFT = 57346 -const PROGRAM_BRACKET_RIGHT = 57347 -const PROGRAM_STRING_START = 57348 -const PROGRAM_STRING_END = 57349 -const PAREN_LEFT = 57350 -const PAREN_RIGHT = 57351 -const COMMA = 57352 -const SQUARE_BRACKET_LEFT = 57353 -const SQUARE_BRACKET_RIGHT = 57354 -const ARITH_OP = 57355 -const IDENTIFIER = 57356 -const INTEGER = 57357 -const FLOAT = 57358 -const STRING = 57359 - -var parserToknames = [...]string{ - "$end", - "error", - "$unk", - "PROGRAM_BRACKET_LEFT", - "PROGRAM_BRACKET_RIGHT", - "PROGRAM_STRING_START", - "PROGRAM_STRING_END", - "PAREN_LEFT", - "PAREN_RIGHT", - "COMMA", - "SQUARE_BRACKET_LEFT", - "SQUARE_BRACKET_RIGHT", - "ARITH_OP", - "IDENTIFIER", - "INTEGER", - "FLOAT", - "STRING", -} -var parserStatenames = [...]string{} - -const parserEofCode = 1 -const parserErrCode = 2 -const parserInitialStackSize = 16 - -//line lang.y:200 - -//line yacctab:1 -var parserExca = [...]int{ - -1, 1, - 1, -1, - -2, 0, -} - -const parserNprod = 21 -const parserPrivate = 57344 - -var parserTokenNames []string -var parserStates []string - -const parserLast = 37 - -var parserAct = [...]int{ - - 9, 7, 29, 17, 23, 16, 17, 3, 17, 20, - 8, 18, 21, 17, 6, 19, 27, 28, 22, 8, - 1, 25, 26, 7, 11, 2, 24, 10, 4, 30, - 5, 0, 14, 15, 12, 13, 6, -} -var parserPact = [...]int{ - - -3, -1000, -3, -1000, -1000, -1000, -1000, 19, -1000, 0, - 19, -3, -1000, -1000, 19, 1, -1000, 19, -5, -1000, - 19, 19, -1000, -1000, 7, -7, -10, -1000, 19, -1000, - -7, -} -var parserPgo = [...]int{ - - 0, 0, 30, 28, 24, 7, 26, 20, -} -var parserR1 = [...]int{ - - 0, 7, 7, 4, 4, 5, 5, 2, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 6, 6, 6, - 3, -} -var parserR2 = [...]int{ - - 0, 0, 1, 1, 2, 1, 1, 3, 3, 1, - 1, 1, 2, 3, 1, 4, 4, 0, 3, 1, - 1, -} -var parserChk = [...]int{ - - -1000, -7, -4, -5, -3, -2, 17, 4, -5, -1, - 8, -4, 15, 16, 13, 14, 5, 13, -1, -1, - 8, 11, -1, 9, -6, -1, -1, 9, 10, 12, - -1, -} -var parserDef = [...]int{ - - 1, -2, 2, 3, 5, 6, 20, 0, 4, 0, - 0, 9, 10, 11, 0, 14, 7, 0, 0, 12, - 17, 0, 13, 8, 0, 19, 0, 15, 0, 16, - 18, -} -var parserTok1 = [...]int{ - - 1, -} -var parserTok2 = [...]int{ - - 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, -} -var parserTok3 = [...]int{ - 0, -} - -var parserErrorMessages = [...]struct { - state int - token int - msg string -}{} - -//line yaccpar:1 - -/* parser for yacc output */ - -var ( - parserDebug = 0 - parserErrorVerbose = false -) - -type parserLexer interface { - Lex(lval *parserSymType) int - Error(s string) -} - -type parserParser interface { - Parse(parserLexer) int - Lookahead() int -} - -type parserParserImpl struct { - lval parserSymType - stack [parserInitialStackSize]parserSymType - char int -} - -func (p *parserParserImpl) Lookahead() int { - return p.char -} - -func parserNewParser() parserParser { - return &parserParserImpl{} -} - -const parserFlag = -1000 - -func parserTokname(c int) string { - if c >= 1 && c-1 < len(parserToknames) { - if parserToknames[c-1] != "" { - return parserToknames[c-1] - } - } - return __yyfmt__.Sprintf("tok-%v", c) -} - -func parserStatname(s int) string { - if s >= 0 && s < len(parserStatenames) { - if parserStatenames[s] != "" { - return parserStatenames[s] - } - } - return __yyfmt__.Sprintf("state-%v", s) -} - -func parserErrorMessage(state, lookAhead int) string { - const TOKSTART = 4 - - if !parserErrorVerbose { - return "syntax error" - } - - for _, e := range parserErrorMessages { - if e.state == state && e.token == lookAhead { - return "syntax error: " + e.msg - } - } - - res := "syntax error: unexpected " + parserTokname(lookAhead) - - // To match Bison, suggest at most four expected tokens. - expected := make([]int, 0, 4) - - // Look for shiftable tokens. - base := parserPact[state] - for tok := TOKSTART; tok-1 < len(parserToknames); tok++ { - if n := base + tok; n >= 0 && n < parserLast && parserChk[parserAct[n]] == tok { - if len(expected) == cap(expected) { - return res - } - expected = append(expected, tok) - } - } - - if parserDef[state] == -2 { - i := 0 - for parserExca[i] != -1 || parserExca[i+1] != state { - i += 2 - } - - // Look for tokens that we accept or reduce. - for i += 2; parserExca[i] >= 0; i += 2 { - tok := parserExca[i] - if tok < TOKSTART || parserExca[i+1] == 0 { - continue - } - if len(expected) == cap(expected) { - return res - } - expected = append(expected, tok) - } - - // If the default action is to accept or reduce, give up. - if parserExca[i+1] != 0 { - return res - } - } - - for i, tok := range expected { - if i == 0 { - res += ", expecting " - } else { - res += " or " - } - res += parserTokname(tok) - } - return res -} - -func parserlex1(lex parserLexer, lval *parserSymType) (char, token int) { - token = 0 - char = lex.Lex(lval) - if char <= 0 { - token = parserTok1[0] - goto out - } - if char < len(parserTok1) { - token = parserTok1[char] - goto out - } - if char >= parserPrivate { - if char < parserPrivate+len(parserTok2) { - token = parserTok2[char-parserPrivate] - goto out - } - } - for i := 0; i < len(parserTok3); i += 2 { - token = parserTok3[i+0] - if token == char { - token = parserTok3[i+1] - goto out - } - } - -out: - if token == 0 { - token = parserTok2[1] /* unknown char */ - } - if parserDebug >= 3 { - __yyfmt__.Printf("lex %s(%d)\n", parserTokname(token), uint(char)) - } - return char, token -} - -func parserParse(parserlex parserLexer) int { - return parserNewParser().Parse(parserlex) -} - -func (parserrcvr *parserParserImpl) Parse(parserlex parserLexer) int { - var parsern int - var parserVAL parserSymType - var parserDollar []parserSymType - _ = parserDollar // silence set and not used - parserS := parserrcvr.stack[:] - - Nerrs := 0 /* number of errors */ - Errflag := 0 /* error recovery flag */ - parserstate := 0 - parserrcvr.char = -1 - parsertoken := -1 // parserrcvr.char translated into internal numbering - defer func() { - // Make sure we report no lookahead when not parsing. - parserstate = -1 - parserrcvr.char = -1 - parsertoken = -1 - }() - parserp := -1 - goto parserstack - -ret0: - return 0 - -ret1: - return 1 - -parserstack: - /* put a state and value onto the stack */ - if parserDebug >= 4 { - __yyfmt__.Printf("char %v in %v\n", parserTokname(parsertoken), parserStatname(parserstate)) - } - - parserp++ - if parserp >= len(parserS) { - nyys := make([]parserSymType, len(parserS)*2) - copy(nyys, parserS) - parserS = nyys - } - parserS[parserp] = parserVAL - parserS[parserp].yys = parserstate - -parsernewstate: - parsern = parserPact[parserstate] - if parsern <= parserFlag { - goto parserdefault /* simple state */ - } - if parserrcvr.char < 0 { - parserrcvr.char, parsertoken = parserlex1(parserlex, &parserrcvr.lval) - } - parsern += parsertoken - if parsern < 0 || parsern >= parserLast { - goto parserdefault - } - parsern = parserAct[parsern] - if parserChk[parsern] == parsertoken { /* valid shift */ - parserrcvr.char = -1 - parsertoken = -1 - parserVAL = parserrcvr.lval - parserstate = parsern - if Errflag > 0 { - Errflag-- - } - goto parserstack - } - -parserdefault: - /* default state action */ - parsern = parserDef[parserstate] - if parsern == -2 { - if parserrcvr.char < 0 { - parserrcvr.char, parsertoken = parserlex1(parserlex, &parserrcvr.lval) - } - - /* look through exception table */ - xi := 0 - for { - if parserExca[xi+0] == -1 && parserExca[xi+1] == parserstate { - break - } - xi += 2 - } - for xi += 2; ; xi += 2 { - parsern = parserExca[xi+0] - if parsern < 0 || parsern == parsertoken { - break - } - } - parsern = parserExca[xi+1] - if parsern < 0 { - goto ret0 - } - } - if parsern == 0 { - /* error ... attempt to resume parsing */ - switch Errflag { - case 0: /* brand new error */ - parserlex.Error(parserErrorMessage(parserstate, parsertoken)) - Nerrs++ - if parserDebug >= 1 { - __yyfmt__.Printf("%s", parserStatname(parserstate)) - __yyfmt__.Printf(" saw %s\n", parserTokname(parsertoken)) - } - fallthrough - - case 1, 2: /* incompletely recovered error ... try again */ - Errflag = 3 - - /* find a state where "error" is a legal shift action */ - for parserp >= 0 { - parsern = parserPact[parserS[parserp].yys] + parserErrCode - if parsern >= 0 && parsern < parserLast { - parserstate = parserAct[parsern] /* simulate a shift of "error" */ - if parserChk[parserstate] == parserErrCode { - goto parserstack - } - } - - /* the current p has no shift on "error", pop stack */ - if parserDebug >= 2 { - __yyfmt__.Printf("error recovery pops state %d\n", parserS[parserp].yys) - } - parserp-- - } - /* there is no state on the stack with an error shift ... abort */ - goto ret1 - - case 3: /* no shift yet; clobber input char */ - if parserDebug >= 2 { - __yyfmt__.Printf("error recovery discards %s\n", parserTokname(parsertoken)) - } - if parsertoken == parserEofCode { - goto ret1 - } - parserrcvr.char = -1 - parsertoken = -1 - goto parsernewstate /* try again in the same state */ - } - } - - /* reduction by production parsern */ - if parserDebug >= 2 { - __yyfmt__.Printf("reduce %v in:\n\t%v\n", parsern, parserStatname(parserstate)) - } - - parsernt := parsern - parserpt := parserp - _ = parserpt // guard against "declared and not used" - - parserp -= parserR2[parsern] - // parserp is now the index of $0. Perform the default action. Iff the - // reduced production is ε, $1 is possibly out of range. - if parserp+1 >= len(parserS) { - nyys := make([]parserSymType, len(parserS)*2) - copy(nyys, parserS) - parserS = nyys - } - parserVAL = parserS[parserp+1] - - /* consult goto table to find next state */ - parsern = parserR1[parsern] - parserg := parserPgo[parsern] - parserj := parserg + parserS[parserp].yys + 1 - - if parserj >= parserLast { - parserstate = parserAct[parserg] - } else { - parserstate = parserAct[parserj] - if parserChk[parserstate] != -parsern { - parserstate = parserAct[parserg] - } - } - // dummy call; replaced with literal code - switch parsernt { - - case 1: - parserDollar = parserS[parserpt-0 : parserpt+1] - //line lang.y:38 - { - parserResult = &ast.LiteralNode{ - Value: "", - Typex: ast.TypeString, - Posx: ast.Pos{Column: 1, Line: 1}, - } - } - case 2: - parserDollar = parserS[parserpt-1 : parserpt+1] - //line lang.y:46 - { - parserResult = parserDollar[1].node - - // We want to make sure that the top value is always an Output - // so that the return value is always a string, list of map from an - // interpolation. - // - // The logic for checking for a LiteralNode is a little annoying - // because functionally the AST is the same, but we do that because - // it makes for an easy literal check later (to check if a string - // has any interpolations). - if _, ok := parserDollar[1].node.(*ast.Output); !ok { - if n, ok := parserDollar[1].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString { - parserResult = &ast.Output{ - Exprs: []ast.Node{parserDollar[1].node}, - Posx: parserDollar[1].node.Pos(), - } - } - } - } - case 3: - parserDollar = parserS[parserpt-1 : parserpt+1] - //line lang.y:69 - { - parserVAL.node = parserDollar[1].node - } - case 4: - parserDollar = parserS[parserpt-2 : parserpt+1] - //line lang.y:73 - { - var result []ast.Node - if c, ok := parserDollar[1].node.(*ast.Output); ok { - result = append(c.Exprs, parserDollar[2].node) - } else { - result = []ast.Node{parserDollar[1].node, parserDollar[2].node} - } - - parserVAL.node = &ast.Output{ - Exprs: result, - Posx: result[0].Pos(), - } - } - case 5: - parserDollar = parserS[parserpt-1 : parserpt+1] - //line lang.y:89 - { - parserVAL.node = parserDollar[1].node - } - case 6: - parserDollar = parserS[parserpt-1 : parserpt+1] - //line lang.y:93 - { - parserVAL.node = parserDollar[1].node - } - case 7: - parserDollar = parserS[parserpt-3 : parserpt+1] - //line lang.y:99 - { - parserVAL.node = parserDollar[2].node - } - case 8: - parserDollar = parserS[parserpt-3 : parserpt+1] - //line lang.y:105 - { - parserVAL.node = parserDollar[2].node - } - case 9: - parserDollar = parserS[parserpt-1 : parserpt+1] - //line lang.y:109 - { - parserVAL.node = parserDollar[1].node - } - case 10: - parserDollar = parserS[parserpt-1 : parserpt+1] - //line lang.y:113 - { - parserVAL.node = &ast.LiteralNode{ - Value: parserDollar[1].token.Value.(int), - Typex: ast.TypeInt, - Posx: parserDollar[1].token.Pos, - } - } - case 11: - parserDollar = parserS[parserpt-1 : parserpt+1] - //line lang.y:121 - { - parserVAL.node = &ast.LiteralNode{ - Value: parserDollar[1].token.Value.(float64), - Typex: ast.TypeFloat, - Posx: parserDollar[1].token.Pos, - } - } - case 12: - parserDollar = parserS[parserpt-2 : parserpt+1] - //line lang.y:129 - { - // This is REALLY jank. We assume that a singular ARITH_OP - // means 0 ARITH_OP expr, which... is weird. We don't want to - // support *, /, etc., only -. We should fix this later with a pure - // Go scanner/parser. - if parserDollar[1].token.Value.(ast.ArithmeticOp) != ast.ArithmeticOpSub { - if parserErr == nil { - parserErr = fmt.Errorf("Invalid unary operation: %v", parserDollar[1].token.Value) - } - } - - parserVAL.node = &ast.Arithmetic{ - Op: parserDollar[1].token.Value.(ast.ArithmeticOp), - Exprs: []ast.Node{ - &ast.LiteralNode{Value: 0, Typex: ast.TypeInt}, - parserDollar[2].node, - }, - Posx: parserDollar[2].node.Pos(), - } - } - case 13: - parserDollar = parserS[parserpt-3 : parserpt+1] - //line lang.y:150 - { - parserVAL.node = &ast.Arithmetic{ - Op: parserDollar[2].token.Value.(ast.ArithmeticOp), - Exprs: []ast.Node{parserDollar[1].node, parserDollar[3].node}, - Posx: parserDollar[1].node.Pos(), - } - } - case 14: - parserDollar = parserS[parserpt-1 : parserpt+1] - //line lang.y:158 - { - parserVAL.node = &ast.VariableAccess{Name: parserDollar[1].token.Value.(string), Posx: parserDollar[1].token.Pos} - } - case 15: - parserDollar = parserS[parserpt-4 : parserpt+1] - //line lang.y:162 - { - parserVAL.node = &ast.Call{Func: parserDollar[1].token.Value.(string), Args: parserDollar[3].nodeList, Posx: parserDollar[1].token.Pos} - } - case 16: - parserDollar = parserS[parserpt-4 : parserpt+1] - //line lang.y:166 - { - parserVAL.node = &ast.Index{ - Target: &ast.VariableAccess{ - Name: parserDollar[1].token.Value.(string), - Posx: parserDollar[1].token.Pos, - }, - Key: parserDollar[3].node, - Posx: parserDollar[1].token.Pos, - } - } - case 17: - parserDollar = parserS[parserpt-0 : parserpt+1] - //line lang.y:178 - { - parserVAL.nodeList = nil - } - case 18: - parserDollar = parserS[parserpt-3 : parserpt+1] - //line lang.y:182 - { - parserVAL.nodeList = append(parserDollar[1].nodeList, parserDollar[3].node) - } - case 19: - parserDollar = parserS[parserpt-1 : parserpt+1] - //line lang.y:186 - { - parserVAL.nodeList = append(parserVAL.nodeList, parserDollar[1].node) - } - case 20: - parserDollar = parserS[parserpt-1 : parserpt+1] - //line lang.y:192 - { - parserVAL.node = &ast.LiteralNode{ - Value: parserDollar[1].token.Value.(string), - Typex: ast.TypeString, - Posx: parserDollar[1].token.Pos, - } - } - } - goto parserstack /* stack new state and value */ -} diff --git a/vendor/github.com/hashicorp/hil/y.output b/vendor/github.com/hashicorp/hil/y.output deleted file mode 100644 index 057f3d40..00000000 --- a/vendor/github.com/hashicorp/hil/y.output +++ /dev/null @@ -1,328 +0,0 @@ - -state 0 - $accept: .top $end - top: . (1) - - PROGRAM_BRACKET_LEFT shift 7 - STRING shift 6 - . reduce 1 (src line 37) - - interpolation goto 5 - literal goto 4 - literalModeTop goto 2 - literalModeValue goto 3 - top goto 1 - -state 1 - $accept: top.$end - - $end accept - . error - - -state 2 - top: literalModeTop. (2) - literalModeTop: literalModeTop.literalModeValue - - PROGRAM_BRACKET_LEFT shift 7 - STRING shift 6 - . reduce 2 (src line 45) - - interpolation goto 5 - literal goto 4 - literalModeValue goto 8 - -state 3 - literalModeTop: literalModeValue. (3) - - . reduce 3 (src line 67) - - -state 4 - literalModeValue: literal. (5) - - . reduce 5 (src line 87) - - -state 5 - literalModeValue: interpolation. (6) - - . reduce 6 (src line 92) - - -state 6 - literal: STRING. (20) - - . reduce 20 (src line 190) - - -state 7 - interpolation: PROGRAM_BRACKET_LEFT.expr PROGRAM_BRACKET_RIGHT - - PROGRAM_BRACKET_LEFT shift 7 - PAREN_LEFT shift 10 - ARITH_OP shift 14 - IDENTIFIER shift 15 - INTEGER shift 12 - FLOAT shift 13 - STRING shift 6 - . error - - expr goto 9 - interpolation goto 5 - literal goto 4 - literalModeTop goto 11 - literalModeValue goto 3 - -state 8 - literalModeTop: literalModeTop literalModeValue. (4) - - . reduce 4 (src line 72) - - -state 9 - interpolation: PROGRAM_BRACKET_LEFT expr.PROGRAM_BRACKET_RIGHT - expr: expr.ARITH_OP expr - - PROGRAM_BRACKET_RIGHT shift 16 - ARITH_OP shift 17 - . error - - -state 10 - expr: PAREN_LEFT.expr PAREN_RIGHT - - PROGRAM_BRACKET_LEFT shift 7 - PAREN_LEFT shift 10 - ARITH_OP shift 14 - IDENTIFIER shift 15 - INTEGER shift 12 - FLOAT shift 13 - STRING shift 6 - . error - - expr goto 18 - interpolation goto 5 - literal goto 4 - literalModeTop goto 11 - literalModeValue goto 3 - -state 11 - literalModeTop: literalModeTop.literalModeValue - expr: literalModeTop. (9) - - PROGRAM_BRACKET_LEFT shift 7 - STRING shift 6 - . reduce 9 (src line 108) - - interpolation goto 5 - literal goto 4 - literalModeValue goto 8 - -state 12 - expr: INTEGER. (10) - - . reduce 10 (src line 112) - - -state 13 - expr: FLOAT. (11) - - . reduce 11 (src line 120) - - -state 14 - expr: ARITH_OP.expr - - PROGRAM_BRACKET_LEFT shift 7 - PAREN_LEFT shift 10 - ARITH_OP shift 14 - IDENTIFIER shift 15 - INTEGER shift 12 - FLOAT shift 13 - STRING shift 6 - . error - - expr goto 19 - interpolation goto 5 - literal goto 4 - literalModeTop goto 11 - literalModeValue goto 3 - -state 15 - expr: IDENTIFIER. (14) - expr: IDENTIFIER.PAREN_LEFT args PAREN_RIGHT - expr: IDENTIFIER.SQUARE_BRACKET_LEFT expr SQUARE_BRACKET_RIGHT - - PAREN_LEFT shift 20 - SQUARE_BRACKET_LEFT shift 21 - . reduce 14 (src line 157) - - -state 16 - interpolation: PROGRAM_BRACKET_LEFT expr PROGRAM_BRACKET_RIGHT. (7) - - . reduce 7 (src line 97) - - -state 17 - expr: expr ARITH_OP.expr - - PROGRAM_BRACKET_LEFT shift 7 - PAREN_LEFT shift 10 - ARITH_OP shift 14 - IDENTIFIER shift 15 - INTEGER shift 12 - FLOAT shift 13 - STRING shift 6 - . error - - expr goto 22 - interpolation goto 5 - literal goto 4 - literalModeTop goto 11 - literalModeValue goto 3 - -state 18 - expr: PAREN_LEFT expr.PAREN_RIGHT - expr: expr.ARITH_OP expr - - PAREN_RIGHT shift 23 - ARITH_OP shift 17 - . error - - -state 19 - expr: ARITH_OP expr. (12) - expr: expr.ARITH_OP expr - - . reduce 12 (src line 128) - - -state 20 - expr: IDENTIFIER PAREN_LEFT.args PAREN_RIGHT - args: . (17) - - PROGRAM_BRACKET_LEFT shift 7 - PAREN_LEFT shift 10 - ARITH_OP shift 14 - IDENTIFIER shift 15 - INTEGER shift 12 - FLOAT shift 13 - STRING shift 6 - . reduce 17 (src line 177) - - expr goto 25 - interpolation goto 5 - literal goto 4 - literalModeTop goto 11 - literalModeValue goto 3 - args goto 24 - -state 21 - expr: IDENTIFIER SQUARE_BRACKET_LEFT.expr SQUARE_BRACKET_RIGHT - - PROGRAM_BRACKET_LEFT shift 7 - PAREN_LEFT shift 10 - ARITH_OP shift 14 - IDENTIFIER shift 15 - INTEGER shift 12 - FLOAT shift 13 - STRING shift 6 - . error - - expr goto 26 - interpolation goto 5 - literal goto 4 - literalModeTop goto 11 - literalModeValue goto 3 - -state 22 - expr: expr.ARITH_OP expr - expr: expr ARITH_OP expr. (13) - - . reduce 13 (src line 149) - - -state 23 - expr: PAREN_LEFT expr PAREN_RIGHT. (8) - - . reduce 8 (src line 103) - - -state 24 - expr: IDENTIFIER PAREN_LEFT args.PAREN_RIGHT - args: args.COMMA expr - - PAREN_RIGHT shift 27 - COMMA shift 28 - . error - - -state 25 - expr: expr.ARITH_OP expr - args: expr. (19) - - ARITH_OP shift 17 - . reduce 19 (src line 185) - - -state 26 - expr: expr.ARITH_OP expr - expr: IDENTIFIER SQUARE_BRACKET_LEFT expr.SQUARE_BRACKET_RIGHT - - SQUARE_BRACKET_RIGHT shift 29 - ARITH_OP shift 17 - . error - - -state 27 - expr: IDENTIFIER PAREN_LEFT args PAREN_RIGHT. (15) - - . reduce 15 (src line 161) - - -state 28 - args: args COMMA.expr - - PROGRAM_BRACKET_LEFT shift 7 - PAREN_LEFT shift 10 - ARITH_OP shift 14 - IDENTIFIER shift 15 - INTEGER shift 12 - FLOAT shift 13 - STRING shift 6 - . error - - expr goto 30 - interpolation goto 5 - literal goto 4 - literalModeTop goto 11 - literalModeValue goto 3 - -state 29 - expr: IDENTIFIER SQUARE_BRACKET_LEFT expr SQUARE_BRACKET_RIGHT. (16) - - . reduce 16 (src line 165) - - -state 30 - expr: expr.ARITH_OP expr - args: args COMMA expr. (18) - - ARITH_OP shift 17 - . reduce 18 (src line 181) - - -17 terminals, 8 nonterminals -21 grammar rules, 31/2000 states -0 shift/reduce, 0 reduce/reduce conflicts reported -57 working sets used -memory: parser 45/30000 -26 extra closures -67 shift entries, 1 exceptions -16 goto entries -31 entries saved by goto default -Optimizer space used: output 37/30000 -37 table entries, 1 zero -maximum spread: 17, maximum offset: 28 diff --git a/vendor/github.com/hashicorp/terraform/config/append.go b/vendor/github.com/hashicorp/terraform/config/append.go index bf13534e..9d80c42b 100644 --- a/vendor/github.com/hashicorp/terraform/config/append.go +++ b/vendor/github.com/hashicorp/terraform/config/append.go @@ -35,6 +35,16 @@ func Append(c1, c2 *Config) (*Config, error) { c.Atlas = c2.Atlas } + // merge Terraform blocks + if c1.Terraform != nil { + c.Terraform = c1.Terraform + if c2.Terraform != nil { + c.Terraform.Merge(c2.Terraform) + } + } else { + c.Terraform = c2.Terraform + } + if len(c1.Modules) > 0 || len(c2.Modules) > 0 { c.Modules = make( []*Module, 0, len(c1.Modules)+len(c2.Modules)) @@ -72,5 +82,11 @@ func Append(c1, c2 *Config) (*Config, error) { c.Variables = append(c.Variables, c2.Variables...) } + if len(c1.Locals) > 0 || len(c2.Locals) > 0 { + c.Locals = make([]*Local, 0, len(c1.Locals)+len(c2.Locals)) + c.Locals = append(c.Locals, c1.Locals...) + c.Locals = append(c.Locals, c2.Locals...) + } + return c, nil } diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go index da2186b3..1772fd7e 100644 --- a/vendor/github.com/hashicorp/terraform/config/config.go +++ b/vendor/github.com/hashicorp/terraform/config/config.go @@ -8,16 +8,17 @@ import ( "strconv" "strings" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hil" + hcl2 "github.com/hashicorp/hcl2/hcl" "github.com/hashicorp/hil/ast" "github.com/hashicorp/terraform/helper/hilmapstructure" + "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/reflectwalk" ) // NameRegexp is the regular expression that all names (modules, providers, // resources, etc.) must follow. -var NameRegexp = regexp.MustCompile(`\A[A-Za-z0-9\-\_]+\z`) +var NameRegexp = regexp.MustCompile(`(?i)\A[A-Z0-9_][A-Z0-9\-\_]*\z`) // Config is the configuration that comes from loading a collection // of Terraform templates. @@ -27,11 +28,13 @@ type Config struct { // any meaningful directory. Dir string + Terraform *Terraform Atlas *AtlasConfig Modules []*Module ProviderConfigs []*ProviderConfig Resources []*Resource Variables []*Variable + Locals []*Local Outputs []*Output // The fields below can be filled in by loaders for validation @@ -53,6 +56,8 @@ type AtlasConfig struct { type Module struct { Name string Source string + Version string + Providers map[string]string RawConfig *RawConfig } @@ -63,6 +68,7 @@ type Module struct { type ProviderConfig struct { Name string Alias string + Version string RawConfig *RawConfig } @@ -128,6 +134,9 @@ type Provisioner struct { Type string RawConfig *RawConfig ConnInfo *RawConfig + + When ProvisionerWhen + OnFailure ProvisionerOnFailure } // Copy returns a copy of this Provisioner @@ -136,10 +145,12 @@ func (p *Provisioner) Copy() *Provisioner { Type: p.Type, RawConfig: p.RawConfig.Copy(), ConnInfo: p.ConnInfo.Copy(), + When: p.When, + OnFailure: p.OnFailure, } } -// Variable is a variable defined within the configuration. +// Variable is a module argument defined within the configuration. type Variable struct { Name string DeclaredType string `mapstructure:"type"` @@ -147,14 +158,22 @@ type Variable struct { Description string } +// Local is a local value defined within the configuration. +type Local struct { + Name string + RawConfig *RawConfig +} + // Output is an output defined within the configuration. An output is // resulting data that is highlighted by Terraform when finished. An // output marked Sensitive will be output in a masked form following // application, but will still be available in state. type Output struct { - Name string - Sensitive bool - RawConfig *RawConfig + Name string + DependsOn []string + Description string + Sensitive bool + RawConfig *RawConfig } // VariableType is the type of value a variable is holding, and returned @@ -203,9 +222,19 @@ func (r *Module) Id() string { // Count returns the count of this resource. func (r *Resource) Count() (int, error) { - v, err := strconv.ParseInt(r.RawCount.Value().(string), 0, 0) + raw := r.RawCount.Value() + count, ok := r.RawCount.Value().(string) + if !ok { + return 0, fmt.Errorf( + "expected count to be a string or int, got %T", raw) + } + + v, err := strconv.ParseInt(count, 0, 0) if err != nil { - return 0, err + return 0, fmt.Errorf( + "cannot parse %q as an integer", + count, + ) } return int(v), nil @@ -223,42 +252,98 @@ func (r *Resource) Id() string { } } +// ProviderFullName returns the full name of the provider for this resource, +// which may either be specified explicitly using the "provider" meta-argument +// or implied by the prefix on the resource type name. +func (r *Resource) ProviderFullName() string { + return ResourceProviderFullName(r.Type, r.Provider) +} + +// ResourceProviderFullName returns the full (dependable) name of the +// provider for a hypothetical resource with the given resource type and +// explicit provider string. If the explicit provider string is empty then +// the provider name is inferred from the resource type name. +func ResourceProviderFullName(resourceType, explicitProvider string) string { + if explicitProvider != "" { + // check for an explicit provider name, or return the original + parts := strings.SplitAfter(explicitProvider, "provider.") + return parts[len(parts)-1] + } + + idx := strings.IndexRune(resourceType, '_') + if idx == -1 { + // If no underscores, the resource name is assumed to be + // also the provider name, e.g. if the provider exposes + // only a single resource of each type. + return resourceType + } + + return resourceType[:idx] +} + // Validate does some basic semantic checking of the configuration. -func (c *Config) Validate() error { +func (c *Config) Validate() tfdiags.Diagnostics { if c == nil { return nil } - var errs []error + var diags tfdiags.Diagnostics for _, k := range c.unknownKeys { - errs = append(errs, fmt.Errorf( - "Unknown root level key: %s", k)) + diags = diags.Append( + fmt.Errorf("Unknown root level key: %s", k), + ) + } + + // Validate the Terraform config + if tf := c.Terraform; tf != nil { + errs := c.Terraform.Validate() + for _, err := range errs { + diags = diags.Append(err) + } } vars := c.InterpolatedVariables() varMap := make(map[string]*Variable) for _, v := range c.Variables { if _, ok := varMap[v.Name]; ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "Variable '%s': duplicate found. Variable names must be unique.", - v.Name)) + v.Name, + )) } varMap[v.Name] = v } + for k, _ := range varMap { + if !NameRegexp.MatchString(k) { + diags = diags.Append(fmt.Errorf( + "variable %q: variable name must match regular expression %s", + k, NameRegexp, + )) + } + } + for _, v := range c.Variables { if v.Type() == VariableTypeUnknown { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "Variable '%s': must be a string or a map", - v.Name)) + v.Name, + )) continue } interp := false - fn := func(ast.Node) (interface{}, error) { - interp = true + fn := func(n ast.Node) (interface{}, error) { + // LiteralNode is a literal string (outside of a ${ ... } sequence). + // interpolationWalker skips most of these. but in particular it + // visits those that have escaped sequences (like $${foo}) as a + // signal that *some* processing is required on this string. For + // our purposes here though, this is fine and not an interpolation. + if _, ok := n.(*ast.LiteralNode); !ok { + interp = true + } return "", nil } @@ -266,9 +351,10 @@ func (c *Config) Validate() error { if v.Default != nil { if err := reflectwalk.Walk(v.Default, w); err == nil { if interp { - errs = append(errs, fmt.Errorf( - "Variable '%s': cannot contain interpolations", - v.Name)) + diags = diags.Append(fmt.Errorf( + "variable %q: default may not contain interpolations", + v.Name, + )) } } } @@ -284,10 +370,11 @@ func (c *Config) Validate() error { } if _, ok := varMap[uv.Name]; !ok { - errs = append(errs, fmt.Errorf( - "%s: unknown variable referenced: '%s'. define it with 'variable' blocks", + diags = diags.Append(fmt.Errorf( + "%s: unknown variable referenced: '%s'; define it with a 'variable' block", source, - uv.Name)) + uv.Name, + )) } } } @@ -298,34 +385,55 @@ func (c *Config) Validate() error { switch v := rawV.(type) { case *CountVariable: if v.Type == CountValueInvalid { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: invalid count variable: %s", source, - v.FullKey())) + v.FullKey(), + )) } case *PathVariable: if v.Type == PathValueInvalid { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: invalid path variable: %s", source, - v.FullKey())) + v.FullKey(), + )) } } } } - // Check that providers aren't declared multiple times. - providerSet := make(map[string]struct{}) + // Check that providers aren't declared multiple times and that their + // version constraints, where present, are syntactically valid. + providerSet := make(map[string]bool) for _, p := range c.ProviderConfigs { name := p.FullName() if _, ok := providerSet[name]; ok { - errs = append(errs, fmt.Errorf( - "provider.%s: declared multiple times, you can only declare a provider once", - name)) + diags = diags.Append(fmt.Errorf( + "provider.%s: multiple configurations present; only one configuration is allowed per provider", + name, + )) continue } - providerSet[name] = struct{}{} + if p.Version != "" { + _, err := discovery.ConstraintStr(p.Version).Parse() + if err != nil { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid provider version constraint", + Detail: fmt.Sprintf( + "The value %q given for provider.%s is not a valid version constraint.", + p.Version, name, + ), + // TODO: include a "Subject" source reference in here, + // once the config loader is able to retain source + // location information. + }) + } + } + + providerSet[name] = true } // Check that all references to modules are valid @@ -337,9 +445,10 @@ func (c *Config) Validate() error { if _, ok := dupped[m.Id()]; !ok { dupped[m.Id()] = struct{}{} - errs = append(errs, fmt.Errorf( - "%s: module repeated multiple times", - m.Id())) + diags = diags.Append(fmt.Errorf( + "module %q: module repeated multiple times", + m.Id(), + )) } // Already seen this module, just skip it @@ -353,21 +462,23 @@ func (c *Config) Validate() error { "root": m.Source, }) if err != nil { - errs = append(errs, fmt.Errorf( - "%s: module source error: %s", - m.Id(), err)) + diags = diags.Append(fmt.Errorf( + "module %q: module source error: %s", + m.Id(), err, + )) } else if len(rc.Interpolations) > 0 { - errs = append(errs, fmt.Errorf( - "%s: module source cannot contain interpolations", - m.Id())) + diags = diags.Append(fmt.Errorf( + "module %q: module source cannot contain interpolations", + m.Id(), + )) } // Check that the name matches our regexp if !NameRegexp.Match([]byte(m.Name)) { - errs = append(errs, fmt.Errorf( - "%s: module name can only contain letters, numbers, "+ - "dashes, and underscores", - m.Id())) + diags = diags.Append(fmt.Errorf( + "module %q: module name must be a letter or underscore followed by only letters, numbers, dashes, and underscores", + m.Id(), + )) } // Check that the configuration can all be strings, lists or maps @@ -391,30 +502,47 @@ func (c *Config) Validate() error { continue } - errs = append(errs, fmt.Errorf( - "%s: variable %s must be a string, list or map value", - m.Id(), k)) + diags = diags.Append(fmt.Errorf( + "module %q: argument %s must have a string, list, or map value", + m.Id(), k, + )) } // Check for invalid count variables for _, v := range m.RawConfig.Variables { switch v.(type) { case *CountVariable: - errs = append(errs, fmt.Errorf( - "%s: count variables are only valid within resources", m.Name)) + diags = diags.Append(fmt.Errorf( + "module %q: count variables are only valid within resources", + m.Name, + )) case *SelfVariable: - errs = append(errs, fmt.Errorf( - "%s: self variables are only valid within resources", m.Name)) + diags = diags.Append(fmt.Errorf( + "module %q: self variables are only valid within resources", + m.Name, + )) } } // Update the raw configuration to only contain the string values m.RawConfig, err = NewRawConfig(raw) if err != nil { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: can't initialize configuration: %s", - m.Id(), err)) + m.Id(), err, + )) + } + + // check that all named providers actually exist + for _, p := range m.Providers { + if !providerSet[p] { + diags = diags.Append(fmt.Errorf( + "module %q: cannot pass non-existent provider %q", + m.Name, p, + )) + } } + } dupped = nil @@ -428,10 +556,10 @@ func (c *Config) Validate() error { } if _, ok := modules[mv.Name]; !ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: unknown module referenced: %s", - source, - mv.Name)) + source, mv.Name, + )) } } } @@ -444,9 +572,10 @@ func (c *Config) Validate() error { if _, ok := dupped[r.Id()]; !ok { dupped[r.Id()] = struct{}{} - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: resource repeated multiple times", - r.Id())) + r.Id(), + )) } } @@ -460,86 +589,46 @@ func (c *Config) Validate() error { for _, v := range r.RawCount.Variables { switch v.(type) { case *CountVariable: - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: resource count can't reference count variable: %s", - n, - v.FullKey())) - case *ModuleVariable: - errs = append(errs, fmt.Errorf( - "%s: resource count can't reference module variable: %s", - n, - v.FullKey())) - case *ResourceVariable: - errs = append(errs, fmt.Errorf( - "%s: resource count can't reference resource variable: %s", - n, - v.FullKey())) + n, v.FullKey(), + )) case *SimpleVariable: - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: resource count can't reference variable: %s", - n, - v.FullKey())) + n, v.FullKey(), + )) + + // Good + case *ModuleVariable: + case *ResourceVariable: + case *TerraformVariable: case *UserVariable: - // Good + case *LocalVariable: + default: - panic(fmt.Sprintf("Unknown type in count var in %s: %T", n, v)) + diags = diags.Append(fmt.Errorf( + "Internal error. Unknown type in count var in %s: %T", + n, v, + )) } } - // Interpolate with a fixed number to verify that its a number. - r.RawCount.interpolate(func(root ast.Node) (interface{}, error) { - // Execute the node but transform the AST so that it returns - // a fixed value of "5" for all interpolations. - result, err := hil.Eval( - hil.FixedValueTransform( - root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}), - nil) - if err != nil { - return "", err - } - - return result.Value, nil - }) - _, err := strconv.ParseInt(r.RawCount.Value().(string), 0, 0) - if err != nil { - errs = append(errs, fmt.Errorf( - "%s: resource count must be an integer", - n)) + if !r.RawCount.couldBeInteger() { + diags = diags.Append(fmt.Errorf( + "%s: resource count must be an integer", n, + )) } r.RawCount.init() - // Verify depends on points to resources that all exist - for _, d := range r.DependsOn { - // Check if we contain interpolations - rc, err := NewRawConfig(map[string]interface{}{ - "value": d, - }) - if err == nil && len(rc.Variables) > 0 { - errs = append(errs, fmt.Errorf( - "%s: depends on value cannot contain interpolations: %s", - n, d)) - continue - } - - if _, ok := resources[d]; !ok { - errs = append(errs, fmt.Errorf( - "%s: resource depends on non-existent resource '%s'", - n, d)) - } - } - - // Verify provider points to a provider that is configured - if r.Provider != "" { - if _, ok := providerSet[r.Provider]; !ok { - errs = append(errs, fmt.Errorf( - "%s: resource depends on non-configured provider '%s'", - n, r.Provider)) - } + // Validate DependsOn + for _, err := range c.validateDependsOn(n, r.DependsOn, resources, modules) { + diags = diags.Append(err) } - // Verify provisioners don't contain any splats + // Verify provisioners for _, p := range r.Provisioners { - // This validation checks that there are now splat variables + // This validation checks that there are no splat variables // referencing ourself. This currently is not allowed. for _, v := range p.ConnInfo.Variables { @@ -549,9 +638,10 @@ func (c *Config) Validate() error { } if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { - errs = append(errs, fmt.Errorf( - "%s: connection info cannot contain splat variable "+ - "referencing itself", n)) + diags = diags.Append(fmt.Errorf( + "%s: connection info cannot contain splat variable referencing itself", + n, + )) break } } @@ -563,20 +653,61 @@ func (c *Config) Validate() error { } if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { - errs = append(errs, fmt.Errorf( - "%s: connection info cannot contain splat variable "+ - "referencing itself", n)) + diags = diags.Append(fmt.Errorf( + "%s: connection info cannot contain splat variable referencing itself", + n, + )) break } } + + // Check for invalid when/onFailure values, though this should be + // picked up by the loader we check here just in case. + if p.When == ProvisionerWhenInvalid { + diags = diags.Append(fmt.Errorf( + "%s: provisioner 'when' value is invalid", n, + )) + } + if p.OnFailure == ProvisionerOnFailureInvalid { + diags = diags.Append(fmt.Errorf( + "%s: provisioner 'on_failure' value is invalid", n, + )) + } } // Verify ignore_changes contains valid entries for _, v := range r.Lifecycle.IgnoreChanges { if strings.Contains(v, "*") && v != "*" { - errs = append(errs, fmt.Errorf( - "%s: ignore_changes does not support using a partial string "+ - "together with a wildcard: %s", n, v)) + diags = diags.Append(fmt.Errorf( + "%s: ignore_changes does not support using a partial string together with a wildcard: %s", + n, v, + )) + } + } + + // Verify ignore_changes has no interpolations + rc, err := NewRawConfig(map[string]interface{}{ + "root": r.Lifecycle.IgnoreChanges, + }) + if err != nil { + diags = diags.Append(fmt.Errorf( + "%s: lifecycle ignore_changes error: %s", + n, err, + )) + } else if len(rc.Interpolations) > 0 { + diags = diags.Append(fmt.Errorf( + "%s: lifecycle ignore_changes cannot contain interpolations", + n, + )) + } + + // If it is a data source then it can't have provisioners + if r.Mode == DataResourceMode { + if _, ok := r.RawConfig.Raw["provisioner"]; ok { + diags = diags.Append(fmt.Errorf( + "%s: data sources cannot have provisioners", + n, + )) } } } @@ -590,25 +721,50 @@ func (c *Config) Validate() error { id := rv.ResourceId() if _, ok := resources[id]; !ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: unknown resource '%s' referenced in variable %s", source, id, - rv.FullKey())) + rv.FullKey(), + )) continue } } } + // Check that all locals are valid + { + found := make(map[string]struct{}) + for _, l := range c.Locals { + if _, ok := found[l.Name]; ok { + diags = diags.Append(fmt.Errorf( + "%s: duplicate local. local value names must be unique", + l.Name, + )) + continue + } + found[l.Name] = struct{}{} + + for _, v := range l.RawConfig.Variables { + if _, ok := v.(*CountVariable); ok { + diags = diags.Append(fmt.Errorf( + "local %s: count variables are only valid within resources", l.Name, + )) + } + } + } + } + // Check that all outputs are valid { found := make(map[string]struct{}) for _, o := range c.Outputs { // Verify the output is new if _, ok := found[o.Name]; ok { - errs = append(errs, fmt.Errorf( - "%s: duplicate output. output names must be unique.", - o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: an output of this name was already defined", + o.Name, + )) continue } found[o.Name] = struct{}{} @@ -628,40 +784,92 @@ func (c *Config) Validate() error { continue } - errs = append(errs, fmt.Errorf( - "%s: value for 'sensitive' must be boolean", - o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: value for 'sensitive' must be boolean", + o.Name, + )) + continue + } + if k == "description" { + if desc, ok := o.RawConfig.config[k].(string); ok { + o.Description = desc + continue + } + + diags = diags.Append(fmt.Errorf( + "output %q: value for 'description' must be string", + o.Name, + )) continue } invalidKeys = append(invalidKeys, k) } if len(invalidKeys) > 0 { - errs = append(errs, fmt.Errorf( - "%s: output has invalid keys: %s", - o.Name, strings.Join(invalidKeys, ", "))) + diags = diags.Append(fmt.Errorf( + "output %q: invalid keys: %s", + o.Name, strings.Join(invalidKeys, ", "), + )) } if !valueKeyFound { - errs = append(errs, fmt.Errorf( - "%s: output is missing required 'value' key", o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: missing required 'value' argument", o.Name, + )) } for _, v := range o.RawConfig.Variables { if _, ok := v.(*CountVariable); ok { - errs = append(errs, fmt.Errorf( - "%s: count variables are only valid within resources", o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: count variables are only valid within resources", + o.Name, + )) } } - } - } - // Check that all variables are in the proper context - for source, rc := range c.rawConfigs() { - walker := &interpolationWalker{ - ContextF: c.validateVarContextFn(source, &errs), - } - if err := reflectwalk.Walk(rc.Raw, walker); err != nil { - errs = append(errs, fmt.Errorf( - "%s: error reading config: %s", source, err)) + // Detect a common mistake of using a "count"ed resource in + // an output value without using the splat or index form. + // Prior to 0.11 this error was silently ignored, but outputs + // now have their errors checked like all other contexts. + // + // TODO: Remove this in 0.12. + for _, v := range o.RawConfig.Variables { + rv, ok := v.(*ResourceVariable) + if !ok { + continue + } + + // If the variable seems to be treating the referenced + // resource as a singleton (no count specified) then + // we'll check to make sure it is indeed a singleton. + // It's a warning if not. + + if rv.Multi || rv.Index != 0 { + // This reference is treating the resource as a + // multi-resource, so the warning doesn't apply. + continue + } + + for _, r := range c.Resources { + if r.Id() != rv.ResourceId() { + continue + } + + // We test specifically for the raw string "1" here + // because we _do_ want to generate this warning if + // the user has provided an expression that happens + // to return 1 right now, to catch situations where + // a count might dynamically be set to something + // other than 1 and thus splat syntax is still needed + // to be safe. + if r.RawCount != nil && r.RawCount.Raw != nil && r.RawCount.Raw["count"] != "1" && rv.Field != "count" { + diags = diags.Append(tfdiags.SimpleWarning(fmt.Sprintf( + "output %q: must use splat syntax to access %s attribute %q, because it has \"count\" set; use %s.*.%s to obtain a list of the attributes across all instances", + o.Name, + r.Id(), rv.Field, + r.Id(), rv.Field, + ))) + } + } + } } } @@ -675,17 +883,15 @@ func (c *Config) Validate() error { for _, v := range rc.Variables { if _, ok := v.(*SelfVariable); ok { - errs = append(errs, fmt.Errorf( - "%s: cannot contain self-reference %s", source, v.FullKey())) + diags = diags.Append(fmt.Errorf( + "%s: cannot contain self-reference %s", + source, v.FullKey(), + )) } } } - if len(errs) > 0 { - return &multierror.Error{Errors: errs} - } - - return nil + return diags } // InterpolatedVariables is a helper that returns a mapping of all the interpolated @@ -736,55 +942,46 @@ func (c *Config) rawConfigs() map[string]*RawConfig { return result } -func (c *Config) validateVarContextFn( - source string, errs *[]error) interpolationWalkerContextFunc { - return func(loc reflectwalk.Location, node ast.Node) { - // If we're in a slice element, then its fine, since you can do - // anything in there. - if loc == reflectwalk.SliceElem { - return - } - - // Otherwise, let's check if there is a splat resource variable - // at the top level in here. We do this by doing a transform that - // replaces everything with a noop node unless its a variable - // access or concat. This should turn the AST into a flat tree - // of Concat(Noop, ...). If there are any variables left that are - // multi-access, then its still broken. - node = node.Accept(func(n ast.Node) ast.Node { - // If it is a concat or variable access, we allow it. - switch n.(type) { - case *ast.Output: - return n - case *ast.VariableAccess: - return n - } - - // Otherwise, noop - return &noopNode{} +func (c *Config) validateDependsOn( + n string, + v []string, + resources map[string]*Resource, + modules map[string]*Module) []error { + // Verify depends on points to resources that all exist + var errs []error + for _, d := range v { + // Check if we contain interpolations + rc, err := NewRawConfig(map[string]interface{}{ + "value": d, }) - - vars, err := DetectVariables(node) - if err != nil { - // Ignore it since this will be caught during parse. This - // actually probably should never happen by the time this - // is called, but its okay. - return + if err == nil && len(rc.Variables) > 0 { + errs = append(errs, fmt.Errorf( + "%s: depends on value cannot contain interpolations: %s", + n, d)) + continue } - for _, v := range vars { - rv, ok := v.(*ResourceVariable) - if !ok { - return + // If it is a module, verify it is a module + if strings.HasPrefix(d, "module.") { + name := d[len("module."):] + if _, ok := modules[name]; !ok { + errs = append(errs, fmt.Errorf( + "%s: resource depends on non-existent module '%s'", + n, name)) } - if rv.Multi && rv.Index == -1 { - *errs = append(*errs, fmt.Errorf( - "%s: use of the splat ('*') operator must be wrapped in a list declaration", - source)) - } + continue + } + + // Check resources + if _, ok := resources[d]; !ok { + errs = append(errs, fmt.Errorf( + "%s: resource depends on non-existent resource '%s'", + n, d)) } } + + return errs } func (m *Module) mergerName() string { @@ -814,7 +1011,10 @@ func (o *Output) mergerMerge(m merger) merger { result := *o result.Name = o2.Name + result.Description = o2.Description result.RawConfig = result.RawConfig.merge(o2.RawConfig) + result.Sensitive = o2.Sensitive + result.DependsOn = o2.DependsOn return &result } @@ -842,6 +1042,10 @@ func (c *ProviderConfig) mergerMerge(m merger) merger { result.Name = c2.Name result.RawConfig = result.RawConfig.merge(c2.RawConfig) + if c2.Alias != "" { + result.Alias = c2.Alias + } + return &result } @@ -877,6 +1081,9 @@ func (v *Variable) Merge(v2 *Variable) *Variable { // The names should be the same, but the second name always wins. result.Name = v2.Name + if v2.DeclaredType != "" { + result.DeclaredType = v2.DeclaredType + } if v2.Default != nil { result.Default = v2.Default } @@ -914,7 +1121,16 @@ func (v *Variable) ValidateTypeAndDefault() error { // If an explicit type is declared, ensure it is valid if v.DeclaredType != "" { if _, ok := typeStringMap[v.DeclaredType]; !ok { - return fmt.Errorf("Variable '%s' must be of type string or map - '%s' is not a valid type", v.Name, v.DeclaredType) + validTypes := []string{} + for k := range typeStringMap { + validTypes = append(validTypes, k) + } + return fmt.Errorf( + "Variable '%s' type must be one of [%s] - '%s' is not a valid type", + v.Name, + strings.Join(validTypes, ", "), + v.DeclaredType, + ) } } diff --git a/vendor/github.com/hashicorp/terraform/config/config_string.go b/vendor/github.com/hashicorp/terraform/config/config_string.go index 362dc4ad..a6933c2a 100644 --- a/vendor/github.com/hashicorp/terraform/config/config_string.go +++ b/vendor/github.com/hashicorp/terraform/config/config_string.go @@ -50,6 +50,26 @@ func (c *Config) TestString() string { return strings.TrimSpace(buf.String()) } +func terraformStr(t *Terraform) string { + result := "" + + if b := t.Backend; b != nil { + result += fmt.Sprintf("backend (%s)\n", b.Type) + + keys := make([]string, 0, len(b.RawConfig.Raw)) + for k, _ := range b.RawConfig.Raw { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + result += fmt.Sprintf(" %s\n", k) + } + } + + return strings.TrimSpace(result) +} + func modulesStr(ms []*Module) string { result := "" order := make([]int, 0, len(ms)) @@ -100,6 +120,13 @@ func outputsStr(os []*Output) string { result += fmt.Sprintf("%s\n", n) + if len(o.DependsOn) > 0 { + result += fmt.Sprintf(" dependsOn\n") + for _, d := range o.DependsOn { + result += fmt.Sprintf(" %s\n", d) + } + } + if len(o.RawConfig.Variables) > 0 { result += fmt.Sprintf(" vars\n") for _, rawV := range o.RawConfig.Variables { @@ -116,6 +143,46 @@ func outputsStr(os []*Output) string { result += fmt.Sprintf(" %s: %s\n", kind, str) } } + + if o.Description != "" { + result += fmt.Sprintf(" description\n %s\n", o.Description) + } + } + + return strings.TrimSpace(result) +} + +func localsStr(ls []*Local) string { + ns := make([]string, 0, len(ls)) + m := make(map[string]*Local) + for _, l := range ls { + ns = append(ns, l.Name) + m[l.Name] = l + } + sort.Strings(ns) + + result := "" + for _, n := range ns { + l := m[n] + + result += fmt.Sprintf("%s\n", n) + + if len(l.RawConfig.Variables) > 0 { + result += fmt.Sprintf(" vars\n") + for _, rawV := range l.RawConfig.Variables { + kind := "unknown" + str := rawV.FullKey() + + switch rawV.(type) { + case *ResourceVariable: + kind = "resource" + case *UserVariable: + kind = "user" + } + + result += fmt.Sprintf(" %s: %s\n", kind, str) + } + } } return strings.TrimSpace(result) @@ -207,7 +274,16 @@ func resourcesStr(rs []*Resource) string { if len(r.Provisioners) > 0 { result += fmt.Sprintf(" provisioners\n") for _, p := range r.Provisioners { - result += fmt.Sprintf(" %s\n", p.Type) + when := "" + if p.When != ProvisionerWhenCreate { + when = fmt.Sprintf(" (%s)", p.When.String()) + } + + result += fmt.Sprintf(" %s%s\n", p.Type, when) + + if p.OnFailure != ProvisionerOnFailureFail { + result += fmt.Sprintf(" on_failure = %s\n", p.OnFailure.String()) + } ks := make([]string, 0, len(p.RawConfig.Raw)) for k, _ := range p.RawConfig.Raw { diff --git a/vendor/github.com/hashicorp/terraform/config/config_terraform.go b/vendor/github.com/hashicorp/terraform/config/config_terraform.go new file mode 100644 index 00000000..8535c964 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/config_terraform.go @@ -0,0 +1,117 @@ +package config + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-version" + "github.com/mitchellh/hashstructure" +) + +// Terraform is the Terraform meta-configuration that can be present +// in configuration files for configuring Terraform itself. +type Terraform struct { + RequiredVersion string `hcl:"required_version"` // Required Terraform version (constraint) + Backend *Backend // See Backend struct docs +} + +// Validate performs the validation for just the Terraform configuration. +func (t *Terraform) Validate() []error { + var errs []error + + if raw := t.RequiredVersion; raw != "" { + // Check that the value has no interpolations + rc, err := NewRawConfig(map[string]interface{}{ + "root": raw, + }) + if err != nil { + errs = append(errs, fmt.Errorf( + "terraform.required_version: %s", err)) + } else if len(rc.Interpolations) > 0 { + errs = append(errs, fmt.Errorf( + "terraform.required_version: cannot contain interpolations")) + } else { + // Check it is valid + _, err := version.NewConstraint(raw) + if err != nil { + errs = append(errs, fmt.Errorf( + "terraform.required_version: invalid syntax: %s", err)) + } + } + } + + if t.Backend != nil { + errs = append(errs, t.Backend.Validate()...) + } + + return errs +} + +// Merge t with t2. +// Any conflicting fields are overwritten by t2. +func (t *Terraform) Merge(t2 *Terraform) { + if t2.RequiredVersion != "" { + t.RequiredVersion = t2.RequiredVersion + } + + if t2.Backend != nil { + t.Backend = t2.Backend + } +} + +// Backend is the configuration for the "backend" to use with Terraform. +// A backend is responsible for all major behavior of Terraform's core. +// The abstraction layer above the core (the "backend") allows for behavior +// such as remote operation. +type Backend struct { + Type string + RawConfig *RawConfig + + // Hash is a unique hash code representing the original configuration + // of the backend. This won't be recomputed unless Rehash is called. + Hash uint64 +} + +// Rehash returns a unique content hash for this backend's configuration +// as a uint64 value. +func (b *Backend) Rehash() uint64 { + // If we have no backend, the value is zero + if b == nil { + return 0 + } + + // Use hashstructure to hash only our type with the config. + code, err := hashstructure.Hash(map[string]interface{}{ + "type": b.Type, + "config": b.RawConfig.Raw, + }, nil) + + // This should never happen since we have just some basic primitives + // so panic if there is an error. + if err != nil { + panic(err) + } + + return code +} + +func (b *Backend) Validate() []error { + if len(b.RawConfig.Interpolations) > 0 { + return []error{fmt.Errorf(strings.TrimSpace(errBackendInterpolations))} + } + + return nil +} + +const errBackendInterpolations = ` +terraform.backend: configuration cannot contain interpolations + +The backend configuration is loaded by Terraform extremely early, before +the core of Terraform can be initialized. This is necessary because the backend +dictates the behavior of that core. The core is what handles interpolation +processing. Because of this, interpolations cannot be used in backend +configuration. + +If you'd like to parameterize backend configuration, we recommend using +partial configuration with the "-backend-config" flag to "terraform init". +` diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go new file mode 100644 index 00000000..2b1b0cac --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go @@ -0,0 +1,97 @@ +package configschema + +import ( + "github.com/hashicorp/hcl2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +var mapLabelNames = []string{"key"} + +// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body +// using the facilities in the hcldec package. +// +// The returned specification is guaranteed to return a value of the same type +// returned by method ImpliedType, but it may contain null or unknown values if +// any of the block attributes are defined as optional and/or computed +// respectively. +func (b *Block) DecoderSpec() hcldec.Spec { + ret := hcldec.ObjectSpec{} + if b == nil { + return ret + } + + for name, attrS := range b.Attributes { + switch { + case attrS.Computed && attrS.Optional: + // In this special case we use an unknown value as a default + // to get the intended behavior that the result is computed + // unless it has been explicitly set in config. + ret[name] = &hcldec.DefaultSpec{ + Primary: &hcldec.AttrSpec{ + Name: name, + Type: attrS.Type, + }, + Default: &hcldec.LiteralSpec{ + Value: cty.UnknownVal(attrS.Type), + }, + } + case attrS.Computed: + ret[name] = &hcldec.LiteralSpec{ + Value: cty.UnknownVal(attrS.Type), + } + default: + ret[name] = &hcldec.AttrSpec{ + Name: name, + Type: attrS.Type, + Required: attrS.Required, + } + } + } + + for name, blockS := range b.BlockTypes { + if _, exists := ret[name]; exists { + // This indicates an invalid schema, since it's not valid to + // define both an attribute and a block type of the same name. + // However, we don't raise this here since it's checked by + // InternalValidate. + continue + } + + childSpec := blockS.Block.DecoderSpec() + + switch blockS.Nesting { + case NestingSingle: + ret[name] = &hcldec.BlockSpec{ + TypeName: name, + Nested: childSpec, + Required: blockS.MinItems == 1 && blockS.MaxItems >= 1, + } + case NestingList: + ret[name] = &hcldec.BlockListSpec{ + TypeName: name, + Nested: childSpec, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + case NestingSet: + ret[name] = &hcldec.BlockSetSpec{ + TypeName: name, + Nested: childSpec, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + case NestingMap: + ret[name] = &hcldec.BlockMapSpec{ + TypeName: name, + Nested: childSpec, + LabelNames: mapLabelNames, + } + default: + // Invalid nesting type is just ignored. It's checked by + // InternalValidate. + continue + } + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/doc.go b/vendor/github.com/hashicorp/terraform/config/configschema/doc.go new file mode 100644 index 00000000..caf8d730 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/doc.go @@ -0,0 +1,14 @@ +// Package configschema contains types for describing the expected structure +// of a configuration block whose shape is not known until runtime. +// +// For example, this is used to describe the expected contents of a resource +// configuration block, which is defined by the corresponding provider plugin +// and thus not compiled into Terraform core. +// +// A configschema primarily describes the shape of configuration, but it is +// also suitable for use with other structures derived from the configuration, +// such as the cached state of a resource or a resource diff. +// +// This package should not be confused with the package helper/schema, which +// is the higher-level helper library used to implement providers themselves. +package configschema diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go new file mode 100644 index 00000000..67324ebc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go @@ -0,0 +1,21 @@ +package configschema + +import ( + "github.com/hashicorp/hcl2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty.Type that would result from decoding a +// configuration block using the receiving block schema. +// +// ImpliedType always returns a result, even if the given schema is +// inconsistent. Code that creates configschema.Block objects should be +// tested using the InternalValidate method to detect any inconsistencies +// that would cause this method to fall back on defaults and assumptions. +func (b *Block) ImpliedType() cty.Type { + if b == nil { + return cty.EmptyObject + } + + return hcldec.ImpliedType(b.DecoderSpec()) +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go b/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go new file mode 100644 index 00000000..33cbe884 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go @@ -0,0 +1,92 @@ +package configschema + +import ( + "fmt" + "regexp" + + "github.com/zclconf/go-cty/cty" + + multierror "github.com/hashicorp/go-multierror" +) + +var validName = regexp.MustCompile(`^[a-z0-9_]+$`) + +// InternalValidate returns an error if the receiving block and its child +// schema definitions have any consistencies with the documented rules for +// valid schema. +// +// This is intended to be used within unit tests to detect when a given +// schema is invalid. +func (b *Block) InternalValidate() error { + if b == nil { + return fmt.Errorf("top-level block schema is nil") + } + return b.internalValidate("", nil) + +} + +func (b *Block) internalValidate(prefix string, err error) error { + for name, attrS := range b.Attributes { + if attrS == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) + continue + } + if !validName.MatchString(name) { + err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + } + if attrS.Optional == false && attrS.Required == false && attrS.Computed == false { + err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name)) + } + if attrS.Optional && attrS.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Optional and Required", prefix, name)) + } + if attrS.Computed && attrS.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Computed and Required", prefix, name)) + } + if attrS.Type == cty.NilType { + err = multierror.Append(err, fmt.Errorf("%s%s: Type must be set to something other than cty.NilType", prefix, name)) + } + } + + for name, blockS := range b.BlockTypes { + if blockS == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: block schema is nil", prefix, name)) + continue + } + + if _, isAttr := b.Attributes[name]; isAttr { + err = multierror.Append(err, fmt.Errorf("%s%s: name defined as both attribute and child block type", prefix, name)) + } else if !validName.MatchString(name) { + err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + } + + if blockS.MinItems < 0 || blockS.MaxItems < 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be greater than zero", prefix, name)) + } + + switch blockS.Nesting { + case NestingSingle: + switch { + case blockS.MinItems != blockS.MaxItems: + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must match in NestingSingle mode", prefix, name)) + case blockS.MinItems < 0 || blockS.MinItems > 1: + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) + } + case NestingList, NestingSet: + if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) + } + case NestingMap: + if blockS.MinItems != 0 || blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) + } + default: + err = multierror.Append(err, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, blockS.Nesting)) + } + + subPrefix := prefix + name + "." + err = blockS.Block.internalValidate(subPrefix, err) + } + + return err +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go new file mode 100644 index 00000000..6cb9313e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=NestingMode"; DO NOT EDIT. + +package configschema + +import "strconv" + +const _NestingMode_name = "nestingModeInvalidNestingSingleNestingListNestingSetNestingMap" + +var _NestingMode_index = [...]uint8{0, 18, 31, 42, 52, 62} + +func (i NestingMode) String() string { + if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { + return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/schema.go b/vendor/github.com/hashicorp/terraform/config/configschema/schema.go new file mode 100644 index 00000000..9a8ee550 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/configschema/schema.go @@ -0,0 +1,107 @@ +package configschema + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Block represents a configuration block. +// +// "Block" here is a logical grouping construct, though it happens to map +// directly onto the physical block syntax of Terraform's native configuration +// syntax. It may be a more a matter of convention in other syntaxes, such as +// JSON. +// +// When converted to a value, a Block always becomes an instance of an object +// type derived from its defined attributes and nested blocks +type Block struct { + // Attributes describes any attributes that may appear directly inside + // the block. + Attributes map[string]*Attribute + + // BlockTypes describes any nested block types that may appear directly + // inside the block. + BlockTypes map[string]*NestedBlock +} + +// Attribute represents a configuration attribute, within a block. +type Attribute struct { + // Type is a type specification that the attribute's value must conform to. + Type cty.Type + + // Required, if set to true, specifies that an omitted or null value is + // not permitted. + Required bool + + // Optional, if set to true, specifies that an omitted or null value is + // permitted. This field conflicts with Required. + Optional bool + + // Computed, if set to true, specifies that the value comes from the + // provider rather than from configuration. If combined with Optional, + // then the config may optionally provide an overridden value. + Computed bool + + // Sensitive, if set to true, indicates that an attribute may contain + // sensitive information. + // + // At present nothing is done with this information, but callers are + // encouraged to set it where appropriate so that it may be used in the + // future to help Terraform mask sensitive information. (Terraform + // currently achieves this in a limited sense via other mechanisms.) + Sensitive bool +} + +// NestedBlock represents the embedding of one block within another. +type NestedBlock struct { + // Block is the description of the block that's nested. + Block + + // Nesting provides the nesting mode for the child block, which determines + // how many instances of the block are allowed, how many labels it expects, + // and how the resulting data will be converted into a data structure. + Nesting NestingMode + + // MinItems and MaxItems set, for the NestingList and NestingSet nesting + // modes, lower and upper limits on the number of child blocks allowed + // of the given type. If both are left at zero, no limit is applied. + // + // As a special case, both values can be set to 1 for NestingSingle in + // order to indicate that a particular single block is required. + // + // These fields are ignored for other nesting modes and must both be left + // at zero. + MinItems, MaxItems int +} + +// NestingMode is an enumeration of modes for nesting blocks inside other +// blocks. +type NestingMode int + +//go:generate stringer -type=NestingMode + +const ( + nestingModeInvalid NestingMode = iota + + // NestingSingle indicates that only a single instance of a given + // block type is permitted, with no labels, and its content should be + // provided directly as an object value. + NestingSingle + + // NestingList indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a list. + NestingList + + // NestingSet indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a set. + NestingSet + + // NestingMap indicates that multiple blocks of the given type are + // permitted, each with a single label, and that their corresponding + // objects should be provided in a map whose keys are the labels. + // + // It's an error, therefore, to use the same label value on multiple + // blocks. + NestingMap +) diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go b/vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go new file mode 100644 index 00000000..207d1059 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go @@ -0,0 +1,134 @@ +package config + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty/function/stdlib" + + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/terraform/config/hcl2shim" + + hcl2 "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// --------------------------------------------------------------------------- +// This file contains some helper functions that are used to shim between +// HCL2 concepts and HCL/HIL concepts, to help us mostly preserve the existing +// public API that was built around HCL/HIL-oriented approaches. +// --------------------------------------------------------------------------- + +func hcl2InterpolationFuncs() map[string]function.Function { + hcl2Funcs := map[string]function.Function{} + + for name, hilFunc := range Funcs() { + hcl2Funcs[name] = hcl2InterpolationFuncShim(hilFunc) + } + + // Some functions in the old world are dealt with inside langEvalConfig + // due to their legacy reliance on direct access to the symbol table. + // Since 0.7 they don't actually need it anymore and just ignore it, + // so we're cheating a bit here and exploiting that detail by passing nil. + hcl2Funcs["lookup"] = hcl2InterpolationFuncShim(interpolationFuncLookup(nil)) + hcl2Funcs["keys"] = hcl2InterpolationFuncShim(interpolationFuncKeys(nil)) + hcl2Funcs["values"] = hcl2InterpolationFuncShim(interpolationFuncValues(nil)) + + // As a bonus, we'll provide the JSON-handling functions from the cty + // function library since its "jsonencode" is more complete (doesn't force + // weird type conversions) and HIL's type system can't represent + // "jsondecode" at all. The result of jsondecode will eventually be forced + // to conform to the HIL type system on exit into the rest of Terraform due + // to our shimming right now, but it should be usable for decoding _within_ + // an expression. + hcl2Funcs["jsonencode"] = stdlib.JSONEncodeFunc + hcl2Funcs["jsondecode"] = stdlib.JSONDecodeFunc + + return hcl2Funcs +} + +func hcl2InterpolationFuncShim(hilFunc ast.Function) function.Function { + spec := &function.Spec{} + + for i, hilArgType := range hilFunc.ArgTypes { + spec.Params = append(spec.Params, function.Parameter{ + Type: hcl2shim.HCL2TypeForHILType(hilArgType), + Name: fmt.Sprintf("arg%d", i+1), // HIL args don't have names, so we'll fudge it + }) + } + + if hilFunc.Variadic { + spec.VarParam = &function.Parameter{ + Type: hcl2shim.HCL2TypeForHILType(hilFunc.VariadicType), + Name: "varargs", // HIL args don't have names, so we'll fudge it + } + } + + spec.Type = func(args []cty.Value) (cty.Type, error) { + return hcl2shim.HCL2TypeForHILType(hilFunc.ReturnType), nil + } + spec.Impl = func(args []cty.Value, retType cty.Type) (cty.Value, error) { + hilArgs := make([]interface{}, len(args)) + for i, arg := range args { + hilV := hcl2shim.HILVariableFromHCL2Value(arg) + + // Although the cty function system does automatic type conversions + // to match the argument types, cty doesn't distinguish int and + // float and so we may need to adjust here to ensure that the + // wrapped function gets exactly the Go type it was expecting. + var wantType ast.Type + if i < len(hilFunc.ArgTypes) { + wantType = hilFunc.ArgTypes[i] + } else { + wantType = hilFunc.VariadicType + } + switch { + case hilV.Type == ast.TypeInt && wantType == ast.TypeFloat: + hilV.Type = wantType + hilV.Value = float64(hilV.Value.(int)) + case hilV.Type == ast.TypeFloat && wantType == ast.TypeInt: + hilV.Type = wantType + hilV.Value = int(hilV.Value.(float64)) + } + + // HIL functions actually expect to have the outermost variable + // "peeled" but any nested values (in lists or maps) will + // still have their ast.Variable wrapping. + hilArgs[i] = hilV.Value + } + + hilResult, err := hilFunc.Callback(hilArgs) + if err != nil { + return cty.DynamicVal, err + } + + // Just as on the way in, we get back a partially-peeled ast.Variable + // which we need to re-wrap in order to convert it back into what + // we're calling a "config value". + rv := hcl2shim.HCL2ValueFromHILVariable(ast.Variable{ + Type: hilFunc.ReturnType, + Value: hilResult, + }) + + return convert.Convert(rv, retType) // if result is unknown we'll force the correct type here + } + return function.New(spec) +} + +func hcl2EvalWithUnknownVars(expr hcl2.Expression) (cty.Value, hcl2.Diagnostics) { + trs := expr.Variables() + vars := map[string]cty.Value{} + val := cty.DynamicVal + + for _, tr := range trs { + name := tr.RootName() + vars[name] = val + } + + ctx := &hcl2.EvalContext{ + Variables: vars, + Functions: hcl2InterpolationFuncs(), + } + return expr.Value(ctx) +} diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go new file mode 100644 index 00000000..19651c81 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go @@ -0,0 +1,85 @@ +package hcl2shim + +import ( + "fmt" + + hcl2 "github.com/hashicorp/hcl2/hcl" +) + +// SingleAttrBody is a weird implementation of hcl2.Body that acts as if +// it has a single attribute whose value is the given expression. +// +// This is used to shim Resource.RawCount and Output.RawConfig to behave +// more like they do in the old HCL loader. +type SingleAttrBody struct { + Name string + Expr hcl2.Expression +} + +var _ hcl2.Body = SingleAttrBody{} + +func (b SingleAttrBody) Content(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + if !all { + // This should never happen because this body implementation should only + // be used by code that is aware that it's using a single-attr body. + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid attribute", + Detail: fmt.Sprintf("The correct attribute name is %q.", b.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + return content, diags +} + +func (b SingleAttrBody) PartialContent(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Body, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + var remain hcl2.Body + if all { + // If the request matched the one attribute we represent, then the + // remaining body is empty. + remain = hcl2.EmptyBody() + } else { + remain = b + } + return content, remain, diags +} + +func (b SingleAttrBody) content(schema *hcl2.BodySchema) (*hcl2.BodyContent, bool, hcl2.Diagnostics) { + ret := &hcl2.BodyContent{} + all := false + var diags hcl2.Diagnostics + + for _, attrS := range schema.Attributes { + if attrS.Name == b.Name { + attrs, _ := b.JustAttributes() + ret.Attributes = attrs + all = true + } else if attrS.Required { + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Missing attribute", + Detail: fmt.Sprintf("The attribute %q is required.", attrS.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + } + + return ret, all, diags +} + +func (b SingleAttrBody) JustAttributes() (hcl2.Attributes, hcl2.Diagnostics) { + return hcl2.Attributes{ + b.Name: { + Expr: b.Expr, + Name: b.Name, + NameRange: b.Expr.Range(), + Range: b.Expr.Range(), + }, + }, nil +} + +func (b SingleAttrBody) MissingItemRange() hcl2.Range { + return b.Expr.Range() +} diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go new file mode 100644 index 00000000..0b697a5f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go @@ -0,0 +1,246 @@ +package hcl2shim + +import ( + "fmt" + "math/big" + + "github.com/hashicorp/hil/ast" + "github.com/zclconf/go-cty/cty" +) + +// UnknownVariableValue is a sentinel value that can be used +// to denote that the value of a variable is unknown at this time. +// RawConfig uses this information to build up data about +// unknown keys. +const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a value type that matches what would've +// been produced from the HCL-based interpolator for an equivalent structure. +// +// This function will transform a cty null value into a Go nil value, which +// isn't a possible outcome of the HCL/HIL-based decoder and so callers may +// need to detect and reject any null values. +func ConfigValueFromHCL2(v cty.Value) interface{} { + if !v.IsKnown() { + return UnknownVariableValue + } + if v.IsNull() { + return nil + } + + switch v.Type() { + case cty.Bool: + return v.True() // like HCL.BOOL + case cty.String: + return v.AsString() // like HCL token.STRING or token.HEREDOC + case cty.Number: + // We can't match HCL _exactly_ here because it distinguishes between + // int and float values, but we'll get as close as we can by using + // an int if the number is exactly representable, and a float if not. + // The conversion to float will force precision to that of a float64, + // which is potentially losing information from the specific number + // given, but no worse than what HCL would've done in its own conversion + // to float. + + f := v.AsBigFloat() + if i, acc := f.Int64(); acc == big.Exact { + // if we're on a 32-bit system and the number is too big for 32-bit + // int then we'll fall through here and use a float64. + const MaxInt = int(^uint(0) >> 1) + const MinInt = -MaxInt - 1 + if i <= int64(MaxInt) && i >= int64(MinInt) { + return int(i) // Like HCL token.NUMBER + } + } + + f64, _ := f.Float64() + return f64 // like HCL token.FLOAT + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]interface{}, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, ConfigValueFromHCL2(ev)) + } + return l + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]interface{}) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + l[ek.AsString()] = ConfigValueFromHCL2(ev) + } + return l + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to config value", v)) +} + +// HCL2ValueFromConfigValue is the opposite of configValueFromHCL2: it takes +// a value as would be returned from the old interpolator and turns it into +// a cty.Value so it can be used within, for example, an HCL2 EvalContext. +func HCL2ValueFromConfigValue(v interface{}) cty.Value { + if v == nil { + return cty.NullVal(cty.DynamicPseudoType) + } + if v == UnknownVariableValue { + return cty.DynamicVal + } + + switch tv := v.(type) { + case bool: + return cty.BoolVal(tv) + case string: + return cty.StringVal(tv) + case int: + return cty.NumberIntVal(int64(tv)) + case float64: + return cty.NumberFloatVal(tv) + case []interface{}: + vals := make([]cty.Value, len(tv)) + for i, ev := range tv { + vals[i] = HCL2ValueFromConfigValue(ev) + } + return cty.TupleVal(vals) + case map[string]interface{}: + vals := map[string]cty.Value{} + for k, ev := range tv { + vals[k] = HCL2ValueFromConfigValue(ev) + } + return cty.ObjectVal(vals) + default: + // HCL/HIL should never generate anything that isn't caught by + // the above, so if we get here something has gone very wrong. + panic(fmt.Errorf("can't convert %#v to cty.Value", v)) + } +} + +func HILVariableFromHCL2Value(v cty.Value) ast.Variable { + if v.IsNull() { + // Caller should guarantee/check this before calling + panic("Null values cannot be represented in HIL") + } + if !v.IsKnown() { + return ast.Variable{ + Type: ast.TypeUnknown, + Value: UnknownVariableValue, + } + } + + switch v.Type() { + case cty.Bool: + return ast.Variable{ + Type: ast.TypeBool, + Value: v.True(), + } + case cty.Number: + v := ConfigValueFromHCL2(v) + switch tv := v.(type) { + case int: + return ast.Variable{ + Type: ast.TypeInt, + Value: tv, + } + case float64: + return ast.Variable{ + Type: ast.TypeFloat, + Value: tv, + } + default: + // should never happen + panic("invalid return value for configValueFromHCL2") + } + case cty.String: + return ast.Variable{ + Type: ast.TypeString, + Value: v.AsString(), + } + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]ast.Variable, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, HILVariableFromHCL2Value(ev)) + } + // If we were given a tuple then this could actually produce an invalid + // list with non-homogenous types, which we expect to be caught inside + // HIL just like a user-supplied non-homogenous list would be. + return ast.Variable{ + Type: ast.TypeList, + Value: l, + } + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]ast.Variable) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + l[ek.AsString()] = HILVariableFromHCL2Value(ev) + } + // If we were given an object then this could actually produce an invalid + // map with non-homogenous types, which we expect to be caught inside + // HIL just like a user-supplied non-homogenous map would be. + return ast.Variable{ + Type: ast.TypeMap, + Value: l, + } + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to HIL variable", v)) +} + +func HCL2ValueFromHILVariable(v ast.Variable) cty.Value { + switch v.Type { + case ast.TypeList: + vals := make([]cty.Value, len(v.Value.([]ast.Variable))) + for i, ev := range v.Value.([]ast.Variable) { + vals[i] = HCL2ValueFromHILVariable(ev) + } + return cty.TupleVal(vals) + case ast.TypeMap: + vals := make(map[string]cty.Value, len(v.Value.(map[string]ast.Variable))) + for k, ev := range v.Value.(map[string]ast.Variable) { + vals[k] = HCL2ValueFromHILVariable(ev) + } + return cty.ObjectVal(vals) + default: + return HCL2ValueFromConfigValue(v.Value) + } +} + +func HCL2TypeForHILType(hilType ast.Type) cty.Type { + switch hilType { + case ast.TypeAny: + return cty.DynamicPseudoType + case ast.TypeUnknown: + return cty.DynamicPseudoType + case ast.TypeBool: + return cty.Bool + case ast.TypeInt: + return cty.Number + case ast.TypeFloat: + return cty.Number + case ast.TypeString: + return cty.String + case ast.TypeList: + return cty.List(cty.DynamicPseudoType) + case ast.TypeMap: + return cty.Map(cty.DynamicPseudoType) + default: + return cty.NilType // equilvalent to ast.TypeInvalid + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/import_tree.go b/vendor/github.com/hashicorp/terraform/config/import_tree.go index 37ec11a1..08cbc773 100644 --- a/vendor/github.com/hashicorp/terraform/config/import_tree.go +++ b/vendor/github.com/hashicorp/terraform/config/import_tree.go @@ -1,8 +1,12 @@ package config import ( + "bufio" "fmt" "io" + "os" + + "github.com/hashicorp/errwrap" ) // configurable is an interface that must be implemented by any configuration @@ -27,15 +31,52 @@ type importTree struct { // imports. type fileLoaderFunc func(path string) (configurable, []string, error) +// Set this to a non-empty value at link time to enable the HCL2 experiment. +// This is not currently enabled for release builds. +// +// For example: +// go install -ldflags="-X github.com/hashicorp/terraform/config.enableHCL2Experiment=true" github.com/hashicorp/terraform +var enableHCL2Experiment = "" + // loadTree takes a single file and loads the entire importTree for that // file. This function detects what kind of configuration file it is an // executes the proper fileLoaderFunc. func loadTree(root string) (*importTree, error) { var f fileLoaderFunc - switch ext(root) { - case ".tf", ".tf.json": - f = loadFileHcl - default: + + // HCL2 experiment is currently activated at build time via the linker. + // See the comment on this variable for more information. + if enableHCL2Experiment == "" { + // Main-line behavior: always use the original HCL parser + switch ext(root) { + case ".tf", ".tf.json": + f = loadFileHcl + default: + } + } else { + // Experimental behavior: use the HCL2 parser if the opt-in comment + // is present. + switch ext(root) { + case ".tf": + // We need to sniff the file for the opt-in comment line to decide + // if the file is participating in the HCL2 experiment. + cf, err := os.Open(root) + if err != nil { + return nil, err + } + sc := bufio.NewScanner(cf) + for sc.Scan() { + if sc.Text() == "#terraform:hcl2" { + f = globalHCL2Loader.loadFile + } + } + if f == nil { + f = loadFileHcl + } + case ".tf.json": + f = loadFileHcl + default: + } } if f == nil { @@ -86,10 +127,7 @@ func (t *importTree) Close() error { func (t *importTree) ConfigTree() (*configTree, error) { config, err := t.Raw.Config() if err != nil { - return nil, fmt.Errorf( - "Error loading %s: %s", - t.Path, - err) + return nil, errwrap.Wrapf(fmt.Sprintf("Error loading %s: {{err}}", t.Path), err) } // Build our result diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate.go b/vendor/github.com/hashicorp/terraform/config/interpolate.go index bb526bf6..599e5ecd 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate.go @@ -5,6 +5,8 @@ import ( "strconv" "strings" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/hil/ast" ) @@ -14,6 +16,21 @@ import ( // variables can come from: user variables, resources, etc. type InterpolatedVariable interface { FullKey() string + SourceRange() tfdiags.SourceRange +} + +// varRange can be embedded into an InterpolatedVariable implementation to +// implement the SourceRange method. +type varRange struct { + rng tfdiags.SourceRange +} + +func (r varRange) SourceRange() tfdiags.SourceRange { + return r.rng +} + +func makeVarRange(rng tfdiags.SourceRange) varRange { + return varRange{rng} } // CountVariable is a variable for referencing information about @@ -21,6 +38,7 @@ type InterpolatedVariable interface { type CountVariable struct { Type CountValueType key string + varRange } // CountValueType is the type of the count variable that is referenced. @@ -37,6 +55,7 @@ type ModuleVariable struct { Name string Field string key string + varRange } // A PathVariable is a variable that references path information about the @@ -44,6 +63,7 @@ type ModuleVariable struct { type PathVariable struct { Type PathValueType key string + varRange } type PathValueType byte @@ -67,6 +87,7 @@ type ResourceVariable struct { Index int // Index for multi-variable: aws_instance.foo.1.id == 1 key string + varRange } // SelfVariable is a variable that is referencing the same resource @@ -75,6 +96,7 @@ type SelfVariable struct { Field string key string + varRange } // SimpleVariable is an unprefixed variable, which can show up when users have @@ -82,6 +104,15 @@ type SelfVariable struct { // internally. The template_file resource is an example of this. type SimpleVariable struct { Key string + varRange +} + +// TerraformVariable is a "terraform."-prefixed variable used to access +// metadata about the Terraform run. +type TerraformVariable struct { + Field string + key string + varRange } // A UserVariable is a variable that is referencing a user variable @@ -92,6 +123,14 @@ type UserVariable struct { Elem string key string + varRange +} + +// A LocalVariable is a variable that references a local value defined within +// the current module, via a "locals" block. This looks like "${local.foo}". +type LocalVariable struct { + Name string + varRange } func NewInterpolatedVariable(v string) (InterpolatedVariable, error) { @@ -101,8 +140,12 @@ func NewInterpolatedVariable(v string) (InterpolatedVariable, error) { return NewPathVariable(v) } else if strings.HasPrefix(v, "self.") { return NewSelfVariable(v) + } else if strings.HasPrefix(v, "terraform.") { + return NewTerraformVariable(v) } else if strings.HasPrefix(v, "var.") { return NewUserVariable(v) + } else if strings.HasPrefix(v, "local.") { + return NewLocalVariable(v) } else if strings.HasPrefix(v, "module.") { return NewModuleVariable(v) } else if !strings.ContainsRune(v, '.') { @@ -149,6 +192,10 @@ func (v *ModuleVariable) FullKey() string { return v.key } +func (v *ModuleVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + func NewPathVariable(key string) (*PathVariable, error) { var fieldType PathValueType parts := strings.SplitN(key, ".", 2) @@ -263,7 +310,7 @@ func (v *SelfVariable) GoString() string { } func NewSimpleVariable(key string) (*SimpleVariable, error) { - return &SimpleVariable{key}, nil + return &SimpleVariable{Key: key}, nil } func (v *SimpleVariable) FullKey() string { @@ -274,6 +321,22 @@ func (v *SimpleVariable) GoString() string { return fmt.Sprintf("*%#v", *v) } +func NewTerraformVariable(key string) (*TerraformVariable, error) { + field := key[len("terraform."):] + return &TerraformVariable{ + Field: field, + key: key, + }, nil +} + +func (v *TerraformVariable) FullKey() string { + return v.key +} + +func (v *TerraformVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + func NewUserVariable(key string) (*UserVariable, error) { name := key[len("var."):] elem := "" @@ -302,6 +365,25 @@ func (v *UserVariable) GoString() string { return fmt.Sprintf("*%#v", *v) } +func NewLocalVariable(key string) (*LocalVariable, error) { + name := key[len("local."):] + if idx := strings.Index(name, "."); idx > -1 { + return nil, fmt.Errorf("Can't use dot (.) attribute access in local.%s; use square bracket indexing", name) + } + + return &LocalVariable{ + Name: name, + }, nil +} + +func (v *LocalVariable) FullKey() string { + return fmt.Sprintf("local.%s", v.Name) +} + +func (v *LocalVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + // DetectVariables takes an AST root and returns all the interpolated // variables that are detected in the AST tree. func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) { diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go index d66073ec..b94fca88 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go @@ -1,26 +1,36 @@ package config import ( + "bytes" + "compress/gzip" "crypto/md5" + "crypto/rsa" "crypto/sha1" "crypto/sha256" + "crypto/sha512" + "crypto/x509" "encoding/base64" "encoding/hex" "encoding/json" - "errors" + "encoding/pem" "fmt" "io/ioutil" + "math" "net" + "net/url" + "path/filepath" "regexp" "sort" "strconv" "strings" + "time" "github.com/apparentlymart/go-cidr/cidr" "github.com/hashicorp/go-uuid" "github.com/hashicorp/hil" "github.com/hashicorp/hil/ast" "github.com/mitchellh/go-homedir" + "golang.org/x/crypto/bcrypt" ) // stringSliceToVariableValue converts a string slice into the value @@ -51,38 +61,68 @@ func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) { // Funcs is the mapping of built-in functions for configuration. func Funcs() map[string]ast.Function { return map[string]ast.Function{ + "abs": interpolationFuncAbs(), + "basename": interpolationFuncBasename(), "base64decode": interpolationFuncBase64Decode(), "base64encode": interpolationFuncBase64Encode(), + "base64gzip": interpolationFuncBase64Gzip(), "base64sha256": interpolationFuncBase64Sha256(), + "base64sha512": interpolationFuncBase64Sha512(), + "bcrypt": interpolationFuncBcrypt(), + "ceil": interpolationFuncCeil(), + "chomp": interpolationFuncChomp(), "cidrhost": interpolationFuncCidrHost(), "cidrnetmask": interpolationFuncCidrNetmask(), "cidrsubnet": interpolationFuncCidrSubnet(), "coalesce": interpolationFuncCoalesce(), + "coalescelist": interpolationFuncCoalesceList(), "compact": interpolationFuncCompact(), "concat": interpolationFuncConcat(), + "contains": interpolationFuncContains(), + "dirname": interpolationFuncDirname(), "distinct": interpolationFuncDistinct(), "element": interpolationFuncElement(), + "chunklist": interpolationFuncChunklist(), "file": interpolationFuncFile(), + "matchkeys": interpolationFuncMatchKeys(), + "flatten": interpolationFuncFlatten(), + "floor": interpolationFuncFloor(), "format": interpolationFuncFormat(), "formatlist": interpolationFuncFormatList(), + "indent": interpolationFuncIndent(), "index": interpolationFuncIndex(), "join": interpolationFuncJoin(), "jsonencode": interpolationFuncJSONEncode(), "length": interpolationFuncLength(), "list": interpolationFuncList(), + "log": interpolationFuncLog(), "lower": interpolationFuncLower(), "map": interpolationFuncMap(), + "max": interpolationFuncMax(), "md5": interpolationFuncMd5(), "merge": interpolationFuncMerge(), + "min": interpolationFuncMin(), + "pathexpand": interpolationFuncPathExpand(), + "pow": interpolationFuncPow(), "uuid": interpolationFuncUUID(), "replace": interpolationFuncReplace(), + "rsadecrypt": interpolationFuncRsaDecrypt(), "sha1": interpolationFuncSha1(), "sha256": interpolationFuncSha256(), + "sha512": interpolationFuncSha512(), "signum": interpolationFuncSignum(), + "slice": interpolationFuncSlice(), "sort": interpolationFuncSort(), "split": interpolationFuncSplit(), + "substr": interpolationFuncSubstr(), + "timestamp": interpolationFuncTimestamp(), + "timeadd": interpolationFuncTimeAdd(), + "title": interpolationFuncTitle(), + "transpose": interpolationFuncTranspose(), "trimspace": interpolationFuncTrimSpace(), "upper": interpolationFuncUpper(), + "urlencode": interpolationFuncURLEncode(), + "zipmap": interpolationFuncZipMap(), } } @@ -308,6 +348,46 @@ func interpolationFuncCoalesce() ast.Function { } } +// interpolationFuncCoalesceList implements the "coalescelist" function that +// returns the first non empty list from the provided input +func interpolationFuncCoalesceList() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: true, + VariadicType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + if len(args) < 2 { + return nil, fmt.Errorf("must provide at least two arguments") + } + for _, arg := range args { + argument := arg.([]ast.Variable) + + if len(argument) > 0 { + return argument, nil + } + } + return make([]ast.Variable, 0), nil + }, + } +} + +// interpolationFuncContains returns true if an element is in the list +// and return false otherwise +func interpolationFuncContains() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList, ast.TypeString}, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + _, err := interpolationFuncIndex().Callback(args) + if err != nil { + return false, nil + } + return true, nil + }, + } +} + // interpolationFuncConcat implements the "concat" function that concatenates // multiple lists. func interpolationFuncConcat() ast.Function { @@ -349,6 +429,17 @@ func interpolationFuncConcat() ast.Function { } } +// interpolationFuncPow returns base x exponential of y. +func interpolationFuncPow() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat, ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + return math.Pow(args[0].(float64), args[1].(float64)), nil + }, + } +} + // interpolationFuncFile implements the "file" function that allows // loading contents from a file. func interpolationFuncFile() ast.Function { @@ -385,6 +476,133 @@ func interpolationFuncFormat() ast.Function { } } +// interpolationFuncMax returns the maximum of the numeric arguments +func interpolationFuncMax() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Variadic: true, + VariadicType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + max := args[0].(float64) + + for i := 1; i < len(args); i++ { + max = math.Max(max, args[i].(float64)) + } + + return max, nil + }, + } +} + +// interpolationFuncMin returns the minimum of the numeric arguments +func interpolationFuncMin() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Variadic: true, + VariadicType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + min := args[0].(float64) + + for i := 1; i < len(args); i++ { + min = math.Min(min, args[i].(float64)) + } + + return min, nil + }, + } +} + +// interpolationFuncPathExpand will expand any `~`'s found with the full file path +func interpolationFuncPathExpand() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return homedir.Expand(args[0].(string)) + }, + } +} + +// interpolationFuncCeil returns the the least integer value greater than or equal to the argument +func interpolationFuncCeil() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeInt, + Callback: func(args []interface{}) (interface{}, error) { + return int(math.Ceil(args[0].(float64))), nil + }, + } +} + +// interpolationFuncLog returns the logarithnm. +func interpolationFuncLog() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat, ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + return math.Log(args[0].(float64)) / math.Log(args[1].(float64)), nil + }, + } +} + +// interpolationFuncChomp removes trailing newlines from the given string +func interpolationFuncChomp() ast.Function { + newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`) + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return newlines.ReplaceAllString(args[0].(string), ""), nil + }, + } +} + +// interpolationFuncFloorreturns returns the greatest integer value less than or equal to the argument +func interpolationFuncFloor() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeInt, + Callback: func(args []interface{}) (interface{}, error) { + return int(math.Floor(args[0].(float64))), nil + }, + } +} + +func interpolationFuncZipMap() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeList, // Keys + ast.TypeList, // Values + }, + ReturnType: ast.TypeMap, + Callback: func(args []interface{}) (interface{}, error) { + keys := args[0].([]ast.Variable) + values := args[1].([]ast.Variable) + + if len(keys) != len(values) { + return nil, fmt.Errorf("count of keys (%d) does not match count of values (%d)", + len(keys), len(values)) + } + + for i, val := range keys { + if val.Type != ast.TypeString { + return nil, fmt.Errorf("keys must be strings. value at position %d is %s", + i, val.Type.Printable()) + } + } + + result := map[string]ast.Variable{} + for i := 0; i < len(keys); i++ { + result[keys[i].Value.(string)] = values[i] + } + + return result, nil + }, + } +} + // interpolationFuncFormatList implements the "formatlist" function that does // string formatting on lists. func interpolationFuncFormatList() ast.Function { @@ -399,15 +617,25 @@ func interpolationFuncFormatList() ast.Function { varargs := make([]interface{}, len(args)-1) copy(varargs, args[1:]) + // Verify we have some arguments + if len(varargs) == 0 { + return nil, fmt.Errorf("no arguments to formatlist") + } + // Convert arguments that are lists into slices. // Confirm along the way that all lists have the same length (n). var n int + listSeen := false for i := 1; i < len(args); i++ { s, ok := args[i].([]ast.Variable) if !ok { continue } + // Mark that we've seen at least one list + listSeen = true + + // Convert the ast.Variable to a slice of strings parts, err := listVariableValueToStringSlice(s) if err != nil { return nil, err @@ -427,8 +655,11 @@ func interpolationFuncFormatList() ast.Function { } } - if n == 0 { - return nil, errors.New("no lists in arguments to formatlist") + // If we didn't see a list this is an error because we + // can't determine the return value length. + if !listSeen { + return nil, fmt.Errorf( + "formatlist requires at least one list argument") } // Do the formatting. @@ -453,6 +684,21 @@ func interpolationFuncFormatList() ast.Function { } } +// interpolationFuncIndent indents a multi-line string with the +// specified number of spaces +func interpolationFuncIndent() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + spaces := args[0].(int) + data := args[1].(string) + pad := strings.Repeat(" ", spaces) + return strings.Replace(data, "\n", "\n"+pad, -1), nil + }, + } +} + // interpolationFuncIndex implements the "index" function that allows one to // find the index of a specific element in a list func interpolationFuncIndex() ast.Function { @@ -472,6 +718,17 @@ func interpolationFuncIndex() ast.Function { } } +// interpolationFuncBasename implements the "dirname" function. +func interpolationFuncDirname() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return filepath.Dir(args[0].(string)), nil + }, + } +} + // interpolationFuncDistinct implements the "distinct" function that // removes duplicate elements from a list. func interpolationFuncDistinct() ast.Function { @@ -513,6 +770,57 @@ func appendIfMissing(slice []string, element string) []string { return append(slice, element) } +// for two lists `keys` and `values` of equal length, returns all elements +// from `values` where the corresponding element from `keys` is in `searchset`. +func interpolationFuncMatchKeys() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList, ast.TypeList, ast.TypeList}, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + output := make([]ast.Variable, 0) + + values, _ := args[0].([]ast.Variable) + keys, _ := args[1].([]ast.Variable) + searchset, _ := args[2].([]ast.Variable) + + if len(keys) != len(values) { + return nil, fmt.Errorf("length of keys and values should be equal") + } + + for i, key := range keys { + for _, search := range searchset { + if res, err := compareSimpleVariables(key, search); err != nil { + return nil, err + } else if res == true { + output = append(output, values[i]) + break + } + } + } + // if searchset is empty, then output is an empty list as well. + // if we haven't matched any key, then output is an empty list. + return output, nil + }, + } +} + +// compare two variables of the same type, i.e. non complex one, such as TypeList or TypeMap +func compareSimpleVariables(a, b ast.Variable) (bool, error) { + if a.Type != b.Type { + return false, fmt.Errorf( + "won't compare items of different types %s and %s", + a.Type.Printable(), b.Type.Printable()) + } + switch a.Type { + case ast.TypeString: + return a.Value.(string) == b.Value.(string), nil + default: + return false, fmt.Errorf( + "can't compare items of type %s", + a.Type.Printable()) + } +} + // interpolationFuncJoin implements the "join" function that allows // multi-variable values to be joined by some character. func interpolationFuncJoin() ast.Function { @@ -545,8 +853,7 @@ func interpolationFuncJoin() ast.Function { } // interpolationFuncJSONEncode implements the "jsonencode" function that encodes -// a string, list, or map as its JSON representation. For now, values in the -// list or map may only be strings. +// a string, list, or map as its JSON representation. func interpolationFuncJSONEncode() ast.Function { return ast.Function{ ArgTypes: []ast.Type{ast.TypeAny}, @@ -559,28 +866,36 @@ func interpolationFuncJSONEncode() ast.Function { toEncode = typedArg case []ast.Variable: - // We preallocate the list here. Note that it's important that in - // the length 0 case, we have an empty list rather than nil, as - // they encode differently. - // XXX It would be nice to support arbitrarily nested data here. Is - // there an inverse of hil.InterfaceToVariable? strings := make([]string, len(typedArg)) for i, v := range typedArg { if v.Type != ast.TypeString { - return "", fmt.Errorf("list elements must be strings") + variable, _ := hil.InterfaceToVariable(typedArg) + toEncode, _ = hil.VariableToInterface(variable) + + jEnc, err := json.Marshal(toEncode) + if err != nil { + return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode) + } + return string(jEnc), nil + } strings[i] = v.Value.(string) } toEncode = strings case map[string]ast.Variable: - // XXX It would be nice to support arbitrarily nested data here. Is - // there an inverse of hil.InterfaceToVariable? stringMap := make(map[string]string) for k, v := range typedArg { if v.Type != ast.TypeString { - return "", fmt.Errorf("map values must be strings") + variable, _ := hil.InterfaceToVariable(typedArg) + toEncode, _ = hil.VariableToInterface(variable) + + jEnc, err := json.Marshal(toEncode) + if err != nil { + return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode) + } + return string(jEnc), nil } stringMap[k] = v.Value.(string) } @@ -667,6 +982,42 @@ func interpolationFuncSignum() ast.Function { } } +// interpolationFuncSlice returns a portion of the input list between from, inclusive and to, exclusive. +func interpolationFuncSlice() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeList, // inputList + ast.TypeInt, // from + ast.TypeInt, // to + }, + ReturnType: ast.TypeList, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + inputList := args[0].([]ast.Variable) + from := args[1].(int) + to := args[2].(int) + + if from < 0 { + return nil, fmt.Errorf("from index must be >= 0") + } + if to > len(inputList) { + return nil, fmt.Errorf("to index must be <= length of the input list") + } + if from > to { + return nil, fmt.Errorf("from index must be <= to index") + } + + var outputList []ast.Variable + for i, val := range inputList { + if i >= from && i < to { + outputList = append(outputList, val) + } + } + return outputList, nil + }, + } +} + // interpolationFuncSort sorts a list of a strings lexographically func interpolationFuncSort() ast.Function { return ast.Function{ @@ -784,6 +1135,56 @@ func interpolationFuncElement() ast.Function { } } +// returns the `list` items chunked by `size`. +func interpolationFuncChunklist() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeList, // inputList + ast.TypeInt, // size + }, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + output := make([]ast.Variable, 0) + + values, _ := args[0].([]ast.Variable) + size, _ := args[1].(int) + + // errors if size is negative + if size < 0 { + return nil, fmt.Errorf("The size argument must be positive") + } + + // if size is 0, returns a list made of the initial list + if size == 0 { + output = append(output, ast.Variable{ + Type: ast.TypeList, + Value: values, + }) + return output, nil + } + + variables := make([]ast.Variable, 0) + chunk := ast.Variable{ + Type: ast.TypeList, + Value: variables, + } + l := len(values) + for i, v := range values { + variables = append(variables, v) + + // Chunk when index isn't 0, or when reaching the values's length + if (i+1)%size == 0 || (i+1) == l { + chunk.Value = variables + output = append(output, chunk) + variables = make([]ast.Variable, 0) + } + } + + return output, nil + }, + } +} + // interpolationFuncKeys implements the "keys" function that yields a list of // keys of map types within a Terraform configuration. func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function { @@ -842,6 +1243,17 @@ func interpolationFuncValues(vs map[string]ast.Variable) ast.Function { } } +// interpolationFuncBasename implements the "basename" function. +func interpolationFuncBasename() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return filepath.Base(args[0].(string)), nil + }, + } +} + // interpolationFuncBase64Encode implements the "base64encode" function that // allows Base64 encoding. func interpolationFuncBase64Encode() ast.Function { @@ -872,6 +1284,32 @@ func interpolationFuncBase64Decode() ast.Function { } } +// interpolationFuncBase64Gzip implements the "gzip" function that allows gzip +// compression encoding the result using base64 +func interpolationFuncBase64Gzip() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write([]byte(s)); err != nil { + return "", fmt.Errorf("failed to write gzip raw data: '%s'", s) + } + if err := gz.Flush(); err != nil { + return "", fmt.Errorf("failed to flush gzip writer: '%s'", s) + } + if err := gz.Close(); err != nil { + return "", fmt.Errorf("failed to close gzip writer: '%s'", s) + } + + return base64.StdEncoding.EncodeToString(b.Bytes()), nil + }, + } +} + // interpolationFuncLower implements the "lower" function that does // string lower casing. func interpolationFuncLower() ast.Function { @@ -961,6 +1399,20 @@ func interpolationFuncSha256() ast.Function { } } +func interpolationFuncSha512() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := sha512.New() + h.Write([]byte(s)) + hash := hex.EncodeToString(h.Sum(nil)) + return hash, nil + }, + } +} + func interpolationFuncTrimSpace() ast.Function { return ast.Function{ ArgTypes: []ast.Type{ast.TypeString}, @@ -987,6 +1439,55 @@ func interpolationFuncBase64Sha256() ast.Function { } } +func interpolationFuncBase64Sha512() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := sha512.New() + h.Write([]byte(s)) + shaSum := h.Sum(nil) + encoded := base64.StdEncoding.EncodeToString(shaSum[:]) + return encoded, nil + }, + } +} + +func interpolationFuncBcrypt() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + Variadic: true, + VariadicType: ast.TypeString, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + defaultCost := 10 + + if len(args) > 1 { + costStr := args[1].(string) + cost, err := strconv.Atoi(costStr) + if err != nil { + return "", err + } + + defaultCost = cost + } + + if len(args) > 2 { + return "", fmt.Errorf("bcrypt() takes no more than two arguments") + } + + input := args[0].(string) + out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) + if err != nil { + return "", fmt.Errorf("error occured generating password %s", err.Error()) + } + + return string(out), nil + }, + } +} + func interpolationFuncUUID() ast.Function { return ast.Function{ ArgTypes: []ast.Type{}, @@ -996,3 +1497,231 @@ func interpolationFuncUUID() ast.Function { }, } } + +// interpolationFuncTimestamp +func interpolationFuncTimestamp() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return time.Now().UTC().Format(time.RFC3339), nil + }, + } +} + +func interpolationFuncTimeAdd() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeString, // input timestamp string in RFC3339 format + ast.TypeString, // duration to add to input timestamp that should be parsable by time.ParseDuration + }, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + + ts, err := time.Parse(time.RFC3339, args[0].(string)) + if err != nil { + return nil, err + } + duration, err := time.ParseDuration(args[1].(string)) + if err != nil { + return nil, err + } + + return ts.Add(duration).Format(time.RFC3339), nil + }, + } +} + +// interpolationFuncTitle implements the "title" function that returns a copy of the +// string in which first characters of all the words are capitalized. +func interpolationFuncTitle() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + toTitle := args[0].(string) + return strings.Title(toTitle), nil + }, + } +} + +// interpolationFuncSubstr implements the "substr" function that allows strings +// to be truncated. +func interpolationFuncSubstr() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeString, // input string + ast.TypeInt, // offset + ast.TypeInt, // length + }, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + str := args[0].(string) + offset := args[1].(int) + length := args[2].(int) + + // Interpret a negative offset as being equivalent to a positive + // offset taken from the end of the string. + if offset < 0 { + offset += len(str) + } + + // Interpret a length of `-1` as indicating that the substring + // should start at `offset` and continue until the end of the + // string. Any other negative length (other than `-1`) is invalid. + if length == -1 { + length = len(str) + } else if length >= 0 { + length += offset + } else { + return nil, fmt.Errorf("length should be a non-negative integer") + } + + if offset > len(str) || offset < 0 { + return nil, fmt.Errorf("offset cannot be larger than the length of the string") + } + + if length > len(str) { + return nil, fmt.Errorf("'offset + length' cannot be larger than the length of the string") + } + + return str[offset:length], nil + }, + } +} + +// Flatten until it's not ast.TypeList +func flattener(finalList []ast.Variable, flattenList []ast.Variable) []ast.Variable { + for _, val := range flattenList { + if val.Type == ast.TypeList { + finalList = flattener(finalList, val.Value.([]ast.Variable)) + } else { + finalList = append(finalList, val) + } + } + return finalList +} + +// Flatten to single list +func interpolationFuncFlatten() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + inputList := args[0].([]ast.Variable) + + var outputList []ast.Variable + return flattener(outputList, inputList), nil + }, + } +} + +func interpolationFuncURLEncode() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + return url.QueryEscape(s), nil + }, + } +} + +// interpolationFuncTranspose implements the "transpose" function +// that converts a map (string,list) to a map (string,list) where +// the unique values of the original lists become the keys of the +// new map and the keys of the original map become values for the +// corresponding new keys. +func interpolationFuncTranspose() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeMap}, + ReturnType: ast.TypeMap, + Callback: func(args []interface{}) (interface{}, error) { + + inputMap := args[0].(map[string]ast.Variable) + outputMap := make(map[string]ast.Variable) + tmpMap := make(map[string][]string) + + for inKey, inVal := range inputMap { + if inVal.Type != ast.TypeList { + return nil, fmt.Errorf("transpose requires a map of lists of strings") + } + values := inVal.Value.([]ast.Variable) + for _, listVal := range values { + if listVal.Type != ast.TypeString { + return nil, fmt.Errorf("transpose requires the given map values to be lists of strings") + } + outKey := listVal.Value.(string) + if _, ok := tmpMap[outKey]; !ok { + tmpMap[outKey] = make([]string, 0) + } + outVal := tmpMap[outKey] + outVal = append(outVal, inKey) + sort.Strings(outVal) + tmpMap[outKey] = outVal + } + } + + for outKey, outVal := range tmpMap { + values := make([]ast.Variable, 0) + for _, v := range outVal { + values = append(values, ast.Variable{Type: ast.TypeString, Value: v}) + } + outputMap[outKey] = ast.Variable{Type: ast.TypeList, Value: values} + } + return outputMap, nil + }, + } +} + +// interpolationFuncAbs returns the absolute value of a given float. +func interpolationFuncAbs() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + return math.Abs(args[0].(float64)), nil + }, + } +} + +// interpolationFuncRsaDecrypt implements the "rsadecrypt" function that does +// RSA decryption. +func interpolationFuncRsaDecrypt() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString, ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + key := args[1].(string) + + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return "", fmt.Errorf("Failed to decode input %q: cipher text must be base64-encoded", s) + } + + block, _ := pem.Decode([]byte(key)) + if block == nil { + return "", fmt.Errorf("Failed to read key %q: no key found", key) + } + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return "", fmt.Errorf( + "Failed to read key %q: password protected keys are\n"+ + "not supported. Please decrypt the key prior to use.", key) + } + + x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return "", err + } + + out, err := rsa.DecryptPKCS1v15(nil, x509Key, b) + if err != nil { + return "", err + } + + return string(out), nil + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go index 720a8b28..66a677d5 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go @@ -32,7 +32,7 @@ type interpolationWalker struct { cs []reflect.Value csKey []reflect.Value csData interface{} - sliceIndex int + sliceIndex []int unknownKeys []string } @@ -54,9 +54,6 @@ type interpolationWalkerContextFunc func(reflectwalk.Location, ast.Node) func (w *interpolationWalker) Enter(loc reflectwalk.Location) error { w.loc = loc - if loc == reflectwalk.WalkLoc { - w.sliceIndex = -1 - } return nil } @@ -75,7 +72,7 @@ func (w *interpolationWalker) Exit(loc reflectwalk.Location) error { w.cs = w.cs[:len(w.cs)-1] case reflectwalk.SliceElem: w.csKey = w.csKey[:len(w.csKey)-1] - w.sliceIndex = -1 + w.sliceIndex = w.sliceIndex[:len(w.sliceIndex)-1] } return nil @@ -90,8 +87,8 @@ func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error { w.csData = k w.csKey = append(w.csKey, k) - if w.sliceIndex != -1 { - w.key = append(w.key, fmt.Sprintf("%d.%s", w.sliceIndex, k.String())) + if l := len(w.sliceIndex); l > 0 { + w.key = append(w.key, fmt.Sprintf("%d.%s", w.sliceIndex[l-1], k.String())) } else { w.key = append(w.key, k.String()) } @@ -107,7 +104,7 @@ func (w *interpolationWalker) Slice(s reflect.Value) error { func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error { w.csKey = append(w.csKey, reflect.ValueOf(i)) - w.sliceIndex = i + w.sliceIndex = append(w.sliceIndex, i) return nil } @@ -176,8 +173,7 @@ func (w *interpolationWalker) Primitive(v reflect.Value) error { } if remove { - w.removeCurrent() - return nil + w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, ".")) } resultVal := reflect.ValueOf(replaceVal) @@ -209,28 +205,13 @@ func (w *interpolationWalker) Primitive(v reflect.Value) error { return nil } -func (w *interpolationWalker) removeCurrent() { - // Append the key to the unknown keys - w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, ".")) - - for i := 1; i <= len(w.cs); i++ { - c := w.cs[len(w.cs)-i] - switch c.Kind() { - case reflect.Map: - // Zero value so that we delete the map key - var val reflect.Value - - // Get the key and delete it - k := w.csData.(reflect.Value) - c.SetMapIndex(k, val) - return - } +func (w *interpolationWalker) replaceCurrent(v reflect.Value) { + // if we don't have at least 2 values, we're not going to find a map, but + // we could panic. + if len(w.cs) < 2 { + return } - panic("No container found for removeCurrent") -} - -func (w *interpolationWalker) replaceCurrent(v reflect.Value) { c := w.cs[len(w.cs)-2] switch c.Kind() { case reflect.Map: @@ -290,9 +271,7 @@ func (w *interpolationWalker) splitSlice() { result = append(result, val.Value) } case []interface{}: - for _, element := range val { - result = append(result, element) - } + result = append(result, val...) default: result = append(result, v) } diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go index c9a1295f..6e347816 100644 --- a/vendor/github.com/hashicorp/terraform/config/loader.go +++ b/vendor/github.com/hashicorp/terraform/config/loader.go @@ -12,6 +12,18 @@ import ( "github.com/hashicorp/hcl" ) +// ErrNoConfigsFound is the error returned by LoadDir if no +// Terraform configuration files were found in the given directory. +type ErrNoConfigsFound struct { + Dir string +} + +func (e ErrNoConfigsFound) Error() string { + return fmt.Sprintf( + "No Terraform configuration files found in directory: %s", + e.Dir) +} + // LoadJSON loads a single Terraform configuration from a given JSON document. // // The document must be a complete Terraform configuration. This function will @@ -68,10 +80,8 @@ func LoadDir(root string) (*Config, error) { if err != nil { return nil, err } - if len(files) == 0 { - return nil, fmt.Errorf( - "No Terraform configuration files found in directory: %s", - root) + if len(files) == 0 && len(overrides) == 0 { + return nil, &ErrNoConfigsFound{Dir: root} } // Determine the absolute path to the directory. @@ -102,6 +112,9 @@ func LoadDir(root string) (*Config, error) { result = c } } + if len(files) == 0 { + result = &Config{} + } // Load all the overrides, and merge them into the config for _, f := range overrides { @@ -184,7 +197,7 @@ func dirFiles(dir string) ([]string, []string, error) { // Only care about files that are valid to load name := fi.Name() extValue := ext(name) - if extValue == "" || isIgnoredFile(name) { + if extValue == "" || IsIgnoredFile(name) { continue } @@ -205,9 +218,9 @@ func dirFiles(dir string) ([]string, []string, error) { return files, overrides, nil } -// isIgnoredFile returns true or false depending on whether the +// IsIgnoredFile returns true or false depending on whether the // provided file name is a file that should be ignored. -func isIgnoredFile(name string) bool { +func IsIgnoredFile(name string) bool { return strings.HasPrefix(name, ".") || // Unix-like hidden files strings.HasSuffix(name, "~") || // vim strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go index 264c4ccb..68cffe2c 100644 --- a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go +++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go @@ -17,27 +17,41 @@ type hclConfigurable struct { Root *ast.File } -func (t *hclConfigurable) Config() (*Config, error) { - validKeys := map[string]struct{}{ - "atlas": struct{}{}, - "data": struct{}{}, - "module": struct{}{}, - "output": struct{}{}, - "provider": struct{}{}, - "resource": struct{}{}, - "variable": struct{}{}, - } +var ReservedDataSourceFields = []string{ + "connection", + "count", + "depends_on", + "lifecycle", + "provider", + "provisioner", +} - type hclVariable struct { - Name string `hcl:",key"` - Default interface{} - Description string - DeclaredType string `hcl:"type"` - Fields []string `hcl:",decodedFields"` - } +var ReservedResourceFields = []string{ + "connection", + "count", + "depends_on", + "id", + "lifecycle", + "provider", + "provisioner", +} + +var ReservedProviderFields = []string{ + "alias", + "version", +} - var rawConfig struct { - Variable []*hclVariable +func (t *hclConfigurable) Config() (*Config, error) { + validKeys := map[string]struct{}{ + "atlas": struct{}{}, + "data": struct{}{}, + "locals": struct{}{}, + "module": struct{}{}, + "output": struct{}{}, + "provider": struct{}{}, + "resource": struct{}{}, + "terraform": struct{}{}, + "variable": struct{}{}, } // Top-level item should be the object list @@ -46,44 +60,33 @@ func (t *hclConfigurable) Config() (*Config, error) { return nil, fmt.Errorf("error parsing: file doesn't contain a root object") } - if err := hcl.DecodeObject(&rawConfig, list); err != nil { - return nil, err - } - - // Start building up the actual configuration. We start with - // variables. - // TODO(mitchellh): Make function like loadVariablesHcl so that - // duplicates aren't overriden + // Start building up the actual configuration. config := new(Config) - if len(rawConfig.Variable) > 0 { - config.Variables = make([]*Variable, 0, len(rawConfig.Variable)) - for _, v := range rawConfig.Variable { - // Defaults turn into a slice of map[string]interface{} and - // we need to make sure to convert that down into the - // proper type for Config. - if ms, ok := v.Default.([]map[string]interface{}); ok { - def := make(map[string]interface{}) - for _, m := range ms { - for k, v := range m { - def[k] = v - } - } - - v.Default = def - } - newVar := &Variable{ - Name: v.Name, - DeclaredType: v.DeclaredType, - Default: v.Default, - Description: v.Description, - } + // Terraform config + if o := list.Filter("terraform"); len(o.Items) > 0 { + var err error + config.Terraform, err = loadTerraformHcl(o) + if err != nil { + return nil, err + } + } - if err := newVar.ValidateTypeAndDefault(); err != nil { - return nil, err - } + // Build the variables + if vars := list.Filter("variable"); len(vars.Items) > 0 { + var err error + config.Variables, err = loadVariablesHcl(vars) + if err != nil { + return nil, err + } + } - config.Variables = append(config.Variables, newVar) + // Build local values + if locals := list.Filter("locals"); len(locals.Items) > 0 { + var err error + config.Locals, err = loadLocalsHcl(locals) + if err != nil { + return nil, err } } @@ -231,6 +234,106 @@ func loadFileHcl(root string) (configurable, []string, error) { return result, nil, nil } +// Given a handle to a HCL object, this transforms it into the Terraform config +func loadTerraformHcl(list *ast.ObjectList) (*Terraform, error) { + if len(list.Items) > 1 { + return nil, fmt.Errorf("only one 'terraform' block allowed per module") + } + + // Get our one item + item := list.Items[0] + + // This block should have an empty top level ObjectItem. If there are keys + // here, it's likely because we have a flattened JSON object, and we can + // lift this into a nested ObjectList to decode properly. + if len(item.Keys) > 0 { + item = &ast.ObjectItem{ + Val: &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{item}, + }, + }, + } + } + + // We need the item value as an ObjectList + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("terraform block: should be an object") + } + + // NOTE: We purposely don't validate unknown HCL keys here so that + // we can potentially read _future_ Terraform version config (to + // still be able to validate the required version). + // + // We should still keep track of unknown keys to validate later, but + // HCL doesn't currently support that. + + var config Terraform + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, fmt.Errorf( + "Error reading terraform config: %s", + err) + } + + // If we have provisioners, then parse those out + if os := listVal.Filter("backend"); len(os.Items) > 0 { + var err error + config.Backend, err = loadTerraformBackendHcl(os) + if err != nil { + return nil, fmt.Errorf( + "Error reading backend config for terraform block: %s", + err) + } + } + + return &config, nil +} + +// Loads the Backend configuration from an object list. +func loadTerraformBackendHcl(list *ast.ObjectList) (*Backend, error) { + if len(list.Items) > 1 { + return nil, fmt.Errorf("only one 'backend' block allowed") + } + + // Get our one item + item := list.Items[0] + + // Verify the keys + if len(item.Keys) != 1 { + return nil, fmt.Errorf( + "position %s: 'backend' must be followed by exactly one string: a type", + item.Pos()) + } + + typ := item.Keys[0].Token.Value().(string) + + // Decode the raw config + var config map[string]interface{} + if err := hcl.DecodeObject(&config, item.Val); err != nil { + return nil, fmt.Errorf( + "Error reading backend config: %s", + err) + } + + rawConfig, err := NewRawConfig(config) + if err != nil { + return nil, fmt.Errorf( + "Error reading backend config: %s", + err) + } + + b := &Backend{ + Type: typ, + RawConfig: rawConfig, + } + b.Hash = b.Rehash() + + return b, nil +} + // Given a handle to a HCL object, this transforms it into the Atlas // configuration. func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) { @@ -258,6 +361,10 @@ func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) { // represents exactly one module definition in the HCL configuration. // We leave it up to another pass to merge them together. func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { + if err := assertAllBlocksHaveNames("module", list); err != nil { + return nil, err + } + list = list.Children() if len(list.Items) == 0 { return nil, nil @@ -286,9 +393,6 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { err) } - // Remove the fields we handle specially - delete(config, "source") - rawConfig, err := NewRawConfig(config) if err != nil { return nil, fmt.Errorf( @@ -297,7 +401,11 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { err) } - // If we have a count, then figure it out + // Remove the fields we handle specially + delete(config, "source") + delete(config, "version") + delete(config, "providers") + var source string if o := listVal.Filter("source"); len(o.Items) > 0 { err = hcl.DecodeObject(&source, o.Items[0].Val) @@ -309,9 +417,33 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { } } + var version string + if o := listVal.Filter("version"); len(o.Items) > 0 { + err = hcl.DecodeObject(&version, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version for %s: %s", + k, + err) + } + } + + var providers map[string]string + if o := listVal.Filter("providers"); len(o.Items) > 0 { + err = hcl.DecodeObject(&providers, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error parsing providers for %s: %s", + k, + err) + } + } + result = append(result, &Module{ Name: k, Source: source, + Version: version, + Providers: providers, RawConfig: rawConfig, }) } @@ -319,24 +451,89 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { return result, nil } +// loadLocalsHcl recurses into the given HCL object turns it into +// a list of locals. +func loadLocalsHcl(list *ast.ObjectList) ([]*Local, error) { + + result := make([]*Local, 0, len(list.Items)) + + for _, block := range list.Items { + if len(block.Keys) > 0 { + return nil, fmt.Errorf( + "locals block at %s should not have label %q", + block.Pos(), block.Keys[0].Token.Value(), + ) + } + + blockObj, ok := block.Val.(*ast.ObjectType) + if !ok { + return nil, fmt.Errorf("locals value at %s should be a block", block.Val.Pos()) + } + + // blockObj now contains directly our local decls + for _, item := range blockObj.List.Items { + if len(item.Keys) != 1 { + return nil, fmt.Errorf("local declaration at %s may not be a block", item.Val.Pos()) + } + + // By the time we get here there can only be one item left, but + // we'll decode into a map anyway because it's a convenient way + // to extract both the key and the value robustly. + kv := map[string]interface{}{} + hcl.DecodeObject(&kv, item) + for k, v := range kv { + rawConfig, err := NewRawConfig(map[string]interface{}{ + "value": v, + }) + + if err != nil { + return nil, fmt.Errorf( + "error parsing local value %q at %s: %s", + k, item.Val.Pos(), err, + ) + } + + result = append(result, &Local{ + Name: k, + RawConfig: rawConfig, + }) + } + } + } + + return result, nil +} + // LoadOutputsHcl recurses into the given HCL object and turns // it into a mapping of outputs. func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { - list = list.Children() - if len(list.Items) == 0 { - return nil, nil + if err := assertAllBlocksHaveNames("output", list); err != nil { + return nil, err } + list = list.Children() + // Go through each object and turn it into an actual result. result := make([]*Output, 0, len(list.Items)) for _, item := range list.Items { n := item.Keys[0].Token.Value().(string) + var listVal *ast.ObjectList + if ot, ok := item.Val.(*ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("output '%s': should be an object", n) + } + var config map[string]interface{} if err := hcl.DecodeObject(&config, item.Val); err != nil { return nil, err } + // Delete special keys + delete(config, "depends_on") + delete(config, "description") + rawConfig, err := NewRawConfig(config) if err != nil { return nil, fmt.Errorf( @@ -345,18 +542,129 @@ func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { err) } + // If we have depends fields, then add those in + var dependsOn []string + if o := listVal.Filter("depends_on"); len(o.Items) > 0 { + err := hcl.DecodeObject(&dependsOn, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading depends_on for output %q: %s", + n, + err) + } + } + + // If we have a description field, then filter that + var description string + if o := listVal.Filter("description"); len(o.Items) > 0 { + err := hcl.DecodeObject(&description, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading description for output %q: %s", + n, + err) + } + } + result = append(result, &Output{ - Name: n, - RawConfig: rawConfig, + Name: n, + RawConfig: rawConfig, + DependsOn: dependsOn, + Description: description, }) } return result, nil } +// LoadVariablesHcl recurses into the given HCL object and turns +// it into a list of variables. +func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) { + if err := assertAllBlocksHaveNames("variable", list); err != nil { + return nil, err + } + + list = list.Children() + + // hclVariable is the structure each variable is decoded into + type hclVariable struct { + DeclaredType string `hcl:"type"` + Default interface{} + Description string + Fields []string `hcl:",decodedFields"` + } + + // Go through each object and turn it into an actual result. + result := make([]*Variable, 0, len(list.Items)) + for _, item := range list.Items { + // Clean up items from JSON + unwrapHCLObjectKeysFromJSON(item, 1) + + // Verify the keys + if len(item.Keys) != 1 { + return nil, fmt.Errorf( + "position %s: 'variable' must be followed by exactly one strings: a name", + item.Pos()) + } + + n := item.Keys[0].Token.Value().(string) + if !NameRegexp.MatchString(n) { + return nil, fmt.Errorf( + "position %s: 'variable' name must match regular expression: %s", + item.Pos(), NameRegexp) + } + + // Check for invalid keys + valid := []string{"type", "default", "description"} + if err := checkHCLKeys(item.Val, valid); err != nil { + return nil, multierror.Prefix(err, fmt.Sprintf( + "variable[%s]:", n)) + } + + // Decode into hclVariable to get typed values + var hclVar hclVariable + if err := hcl.DecodeObject(&hclVar, item.Val); err != nil { + return nil, err + } + + // Defaults turn into a slice of map[string]interface{} and + // we need to make sure to convert that down into the + // proper type for Config. + if ms, ok := hclVar.Default.([]map[string]interface{}); ok { + def := make(map[string]interface{}) + for _, m := range ms { + for k, v := range m { + def[k] = v + } + } + + hclVar.Default = def + } + + // Build the new variable and do some basic validation + newVar := &Variable{ + Name: n, + DeclaredType: hclVar.DeclaredType, + Default: hclVar.Default, + Description: hclVar.Description, + } + if err := newVar.ValidateTypeAndDefault(); err != nil { + return nil, err + } + + result = append(result, newVar) + } + + return result, nil +} + // LoadProvidersHcl recurses into the given HCL object and turns // it into a mapping of provider configs. func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) { + if err := assertAllBlocksHaveNames("provider", list); err != nil { + return nil, err + } + list = list.Children() if len(list.Items) == 0 { return nil, nil @@ -380,6 +688,7 @@ func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) { } delete(config, "alias") + delete(config, "version") rawConfig, err := NewRawConfig(config) if err != nil { @@ -401,9 +710,22 @@ func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) { } } + // If we have a version field then extract it + var version string + if a := listVal.Filter("version"); len(a.Items) > 0 { + err := hcl.DecodeObject(&version, a.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading version for provider[%s]: %s", + n, + err) + } + } + result = append(result, &ProviderConfig{ Name: n, Alias: alias, + Version: version, RawConfig: rawConfig, }) } @@ -418,6 +740,10 @@ func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) { // represents exactly one data definition in the HCL configuration. // We leave it up to another pass to merge them together. func loadDataResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { + if err := assertAllBlocksHaveNames("data", list); err != nil { + return nil, err + } + list = list.Children() if len(list.Items) == 0 { return nil, nil @@ -557,36 +883,8 @@ func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { item.Pos()) } - // HCL special case: if we're parsing JSON then directly nested - // items will show up as additional "keys". We need to unwrap them - // since we expect only two keys. Example: - // - // { "foo": { "bar": { "baz": {} } } } - // - // Will show up with Keys being: []string{"foo", "bar", "baz"} - // when we really just want the first two. To fix this we unwrap - // them into the right value. - if len(item.Keys) > 2 && item.Keys[0].Token.JSON { - for len(item.Keys) > 2 { - // Pop off the last key - n := len(item.Keys) - key := item.Keys[n-1] - item.Keys[n-1] = nil - item.Keys = item.Keys[:n-1] - - // Wrap our value in a list - item.Val = &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{ - &ast.ObjectItem{ - Keys: []*ast.ObjectKey{key}, - Val: item.Val, - }, - }, - }, - } - } - } + // Fix up JSON input + unwrapHCLObjectKeysFromJSON(item, 2) if len(item.Keys) != 2 { return nil, fmt.Errorf( @@ -707,6 +1005,12 @@ func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { // destroying the existing instance var lifecycle ResourceLifecycle if o := listVal.Filter("lifecycle"); len(o.Items) > 0 { + if len(o.Items) > 1 { + return nil, fmt.Errorf( + "%s[%s]: Multiple lifecycle blocks found, expected one", + t, k) + } + // Check for invalid keys valid := []string{"create_before_destroy", "ignore_changes", "prevent_destroy"} if err := checkHCLKeys(o.Items[0].Val, valid); err != nil { @@ -749,6 +1053,10 @@ func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { } func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) { + if err := assertAllBlocksHaveNames("provisioner", list); err != nil { + return nil, err + } + list = list.Children() if len(list.Items) == 0 { return nil, nil @@ -771,8 +1079,40 @@ func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) return nil, err } - // Delete the "connection" section, handle separately + // Parse the "when" value + when := ProvisionerWhenCreate + if v, ok := config["when"]; ok { + switch v { + case "create": + when = ProvisionerWhenCreate + case "destroy": + when = ProvisionerWhenDestroy + default: + return nil, fmt.Errorf( + "position %s: 'provisioner' when must be 'create' or 'destroy'", + item.Pos()) + } + } + + // Parse the "on_failure" value + onFailure := ProvisionerOnFailureFail + if v, ok := config["on_failure"]; ok { + switch v { + case "continue": + onFailure = ProvisionerOnFailureContinue + case "fail": + onFailure = ProvisionerOnFailureFail + default: + return nil, fmt.Errorf( + "position %s: 'provisioner' on_failure must be 'continue' or 'fail'", + item.Pos()) + } + } + + // Delete fields we special case delete(config, "connection") + delete(config, "when") + delete(config, "on_failure") rawConfig, err := NewRawConfig(config) if err != nil { @@ -811,6 +1151,8 @@ func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) Type: n, RawConfig: rawConfig, ConnInfo: connRaw, + When: when, + OnFailure: onFailure, }) } @@ -837,6 +1179,29 @@ func hclObjectMap(os *hclobj.Object) map[string]ast.ListNode { } */ +// assertAllBlocksHaveNames returns an error if any of the items in +// the given object list are blocks without keys (like "module {}") +// or simple assignments (like "module = 1"). It returns nil if +// neither of these things are true. +// +// The given name is used in any generated error messages, and should +// be the name of the block we're dealing with. The given list should +// be the result of calling .Filter on an object list with that same +// name. +func assertAllBlocksHaveNames(name string, list *ast.ObjectList) error { + if elem := list.Elem(); len(elem.Items) != 0 { + switch et := elem.Items[0].Val.(type) { + case *ast.ObjectType: + pos := et.Lbrace + return fmt.Errorf("%s: %q must be followed by a name", pos, name) + default: + pos := elem.Items[0].Val.Pos() + return fmt.Errorf("%s: %q must be a configuration block", pos, name) + } + } + return nil +} + func checkHCLKeys(node ast.Node, valid []string) error { var list *ast.ObjectList switch n := node.(type) { @@ -864,3 +1229,42 @@ func checkHCLKeys(node ast.Node, valid []string) error { return result } + +// unwrapHCLObjectKeysFromJSON cleans up an edge case that can occur when +// parsing JSON as input: if we're parsing JSON then directly nested +// items will show up as additional "keys". +// +// For objects that expect a fixed number of keys, this breaks the +// decoding process. This function unwraps the object into what it would've +// looked like if it came directly from HCL by specifying the number of keys +// you expect. +// +// Example: +// +// { "foo": { "baz": {} } } +// +// Will show up with Keys being: []string{"foo", "baz"} +// when we really just want the first two. This function will fix this. +func unwrapHCLObjectKeysFromJSON(item *ast.ObjectItem, depth int) { + if len(item.Keys) > depth && item.Keys[0].Token.JSON { + for len(item.Keys) > depth { + // Pop off the last key + n := len(item.Keys) + key := item.Keys[n-1] + item.Keys[n-1] = nil + item.Keys = item.Keys[:n-1] + + // Wrap our value in a list + item.Val = &ast.ObjectType{ + List: &ast.ObjectList{ + Items: []*ast.ObjectItem{ + &ast.ObjectItem{ + Keys: []*ast.ObjectKey{key}, + Val: item.Val, + }, + }, + }, + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go new file mode 100644 index 00000000..4f9f129f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go @@ -0,0 +1,473 @@ +package config + +import ( + "fmt" + "sort" + "strings" + + gohcl2 "github.com/hashicorp/hcl2/gohcl" + hcl2 "github.com/hashicorp/hcl2/hcl" + hcl2parse "github.com/hashicorp/hcl2/hclparse" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/zclconf/go-cty/cty" +) + +// hcl2Configurable is an implementation of configurable that knows +// how to turn a HCL Body into a *Config object. +type hcl2Configurable struct { + SourceFilename string + Body hcl2.Body +} + +// hcl2Loader is a wrapper around a HCL parser that provides a fileLoaderFunc. +type hcl2Loader struct { + Parser *hcl2parse.Parser +} + +// For the moment we'll just have a global loader since we don't have anywhere +// better to stash this. +// TODO: refactor the loader API so that it uses some sort of object we can +// stash the parser inside. +var globalHCL2Loader = newHCL2Loader() + +// newHCL2Loader creates a new hcl2Loader containing a new HCL Parser. +// +// HCL parsers retain information about files that are loaded to aid in +// producing diagnostic messages, so all files within a single configuration +// should be loaded with the same parser to ensure the availability of +// full diagnostic information. +func newHCL2Loader() hcl2Loader { + return hcl2Loader{ + Parser: hcl2parse.NewParser(), + } +} + +// loadFile is a fileLoaderFunc that knows how to read a HCL2 file and turn it +// into a hcl2Configurable. +func (l hcl2Loader) loadFile(filename string) (configurable, []string, error) { + var f *hcl2.File + var diags hcl2.Diagnostics + if strings.HasSuffix(filename, ".json") { + f, diags = l.Parser.ParseJSONFile(filename) + } else { + f, diags = l.Parser.ParseHCLFile(filename) + } + if diags.HasErrors() { + // Return diagnostics as an error; callers may type-assert this to + // recover the original diagnostics, if it doesn't end up wrapped + // in another error. + return nil, nil, diags + } + + return &hcl2Configurable{ + SourceFilename: filename, + Body: f.Body, + }, nil, nil +} + +func (t *hcl2Configurable) Config() (*Config, error) { + config := &Config{} + + // these structs are used only for the initial shallow decoding; we'll + // expand this into the main, public-facing config structs afterwards. + type atlas struct { + Name string `hcl:"name"` + Include *[]string `hcl:"include"` + Exclude *[]string `hcl:"exclude"` + } + type provider struct { + Name string `hcl:"name,label"` + Alias *string `hcl:"alias,attr"` + Version *string `hcl:"version,attr"` + Config hcl2.Body `hcl:",remain"` + } + type module struct { + Name string `hcl:"name,label"` + Source string `hcl:"source,attr"` + Version *string `hcl:"version,attr"` + Providers *map[string]string `hcl:"providers,attr"` + Config hcl2.Body `hcl:",remain"` + } + type resourceLifecycle struct { + CreateBeforeDestroy *bool `hcl:"create_before_destroy,attr"` + PreventDestroy *bool `hcl:"prevent_destroy,attr"` + IgnoreChanges *[]string `hcl:"ignore_changes,attr"` + } + type connection struct { + Config hcl2.Body `hcl:",remain"` + } + type provisioner struct { + Type string `hcl:"type,label"` + + When *string `hcl:"when,attr"` + OnFailure *string `hcl:"on_failure,attr"` + + Connection *connection `hcl:"connection,block"` + Config hcl2.Body `hcl:",remain"` + } + type managedResource struct { + Type string `hcl:"type,label"` + Name string `hcl:"name,label"` + + CountExpr hcl2.Expression `hcl:"count,attr"` + Provider *string `hcl:"provider,attr"` + DependsOn *[]string `hcl:"depends_on,attr"` + + Lifecycle *resourceLifecycle `hcl:"lifecycle,block"` + Provisioners []provisioner `hcl:"provisioner,block"` + Connection *connection `hcl:"connection,block"` + + Config hcl2.Body `hcl:",remain"` + } + type dataResource struct { + Type string `hcl:"type,label"` + Name string `hcl:"name,label"` + + CountExpr hcl2.Expression `hcl:"count,attr"` + Provider *string `hcl:"provider,attr"` + DependsOn *[]string `hcl:"depends_on,attr"` + + Config hcl2.Body `hcl:",remain"` + } + type variable struct { + Name string `hcl:"name,label"` + + DeclaredType *string `hcl:"type,attr"` + Default *cty.Value `hcl:"default,attr"` + Description *string `hcl:"description,attr"` + Sensitive *bool `hcl:"sensitive,attr"` + } + type output struct { + Name string `hcl:"name,label"` + + ValueExpr hcl2.Expression `hcl:"value,attr"` + DependsOn *[]string `hcl:"depends_on,attr"` + Description *string `hcl:"description,attr"` + Sensitive *bool `hcl:"sensitive,attr"` + } + type locals struct { + Definitions hcl2.Attributes `hcl:",remain"` + } + type backend struct { + Type string `hcl:"type,label"` + Config hcl2.Body `hcl:",remain"` + } + type terraform struct { + RequiredVersion *string `hcl:"required_version,attr"` + Backend *backend `hcl:"backend,block"` + } + type topLevel struct { + Atlas *atlas `hcl:"atlas,block"` + Datas []dataResource `hcl:"data,block"` + Modules []module `hcl:"module,block"` + Outputs []output `hcl:"output,block"` + Providers []provider `hcl:"provider,block"` + Resources []managedResource `hcl:"resource,block"` + Terraform *terraform `hcl:"terraform,block"` + Variables []variable `hcl:"variable,block"` + Locals []*locals `hcl:"locals,block"` + } + + var raw topLevel + diags := gohcl2.DecodeBody(t.Body, nil, &raw) + if diags.HasErrors() { + // Do some minimal decoding to see if we can at least get the + // required Terraform version, which might help explain why we + // couldn't parse the rest. + if raw.Terraform != nil && raw.Terraform.RequiredVersion != nil { + config.Terraform = &Terraform{ + RequiredVersion: *raw.Terraform.RequiredVersion, + } + } + + // We return the diags as an implementation of error, which the + // caller than then type-assert if desired to recover the individual + // diagnostics. + // FIXME: The current API gives us no way to return warnings in the + // absense of any errors. + return config, diags + } + + if raw.Terraform != nil { + var reqdVersion string + var backend *Backend + + if raw.Terraform.RequiredVersion != nil { + reqdVersion = *raw.Terraform.RequiredVersion + } + if raw.Terraform.Backend != nil { + backend = new(Backend) + backend.Type = raw.Terraform.Backend.Type + + // We don't permit interpolations or nested blocks inside the + // backend config, so we can decode the config early here and + // get direct access to the values, which is important for the + // config hashing to work as expected. + var config map[string]string + configDiags := gohcl2.DecodeBody(raw.Terraform.Backend.Config, nil, &config) + diags = append(diags, configDiags...) + + raw := make(map[string]interface{}, len(config)) + for k, v := range config { + raw[k] = v + } + + var err error + backend.RawConfig, err = NewRawConfig(raw) + if err != nil { + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid backend configuration", + Detail: fmt.Sprintf("Error in backend configuration: %s", err), + }) + } + } + + config.Terraform = &Terraform{ + RequiredVersion: reqdVersion, + Backend: backend, + } + } + + if raw.Atlas != nil { + var include, exclude []string + if raw.Atlas.Include != nil { + include = *raw.Atlas.Include + } + if raw.Atlas.Exclude != nil { + exclude = *raw.Atlas.Exclude + } + config.Atlas = &AtlasConfig{ + Name: raw.Atlas.Name, + Include: include, + Exclude: exclude, + } + } + + for _, rawM := range raw.Modules { + m := &Module{ + Name: rawM.Name, + Source: rawM.Source, + RawConfig: NewRawConfigHCL2(rawM.Config), + } + + if rawM.Version != nil { + m.Version = *rawM.Version + } + + if rawM.Providers != nil { + m.Providers = *rawM.Providers + } + + config.Modules = append(config.Modules, m) + } + + for _, rawV := range raw.Variables { + v := &Variable{ + Name: rawV.Name, + } + if rawV.DeclaredType != nil { + v.DeclaredType = *rawV.DeclaredType + } + if rawV.Default != nil { + v.Default = hcl2shim.ConfigValueFromHCL2(*rawV.Default) + } + if rawV.Description != nil { + v.Description = *rawV.Description + } + + config.Variables = append(config.Variables, v) + } + + for _, rawO := range raw.Outputs { + o := &Output{ + Name: rawO.Name, + } + + if rawO.Description != nil { + o.Description = *rawO.Description + } + if rawO.DependsOn != nil { + o.DependsOn = *rawO.DependsOn + } + if rawO.Sensitive != nil { + o.Sensitive = *rawO.Sensitive + } + + // The result is expected to be a map like map[string]interface{}{"value": something}, + // so we'll fake that with our hcl2shim.SingleAttrBody shim. + o.RawConfig = NewRawConfigHCL2(hcl2shim.SingleAttrBody{ + Name: "value", + Expr: rawO.ValueExpr, + }) + + config.Outputs = append(config.Outputs, o) + } + + for _, rawR := range raw.Resources { + r := &Resource{ + Mode: ManagedResourceMode, + Type: rawR.Type, + Name: rawR.Name, + } + if rawR.Lifecycle != nil { + var l ResourceLifecycle + if rawR.Lifecycle.CreateBeforeDestroy != nil { + l.CreateBeforeDestroy = *rawR.Lifecycle.CreateBeforeDestroy + } + if rawR.Lifecycle.PreventDestroy != nil { + l.PreventDestroy = *rawR.Lifecycle.PreventDestroy + } + if rawR.Lifecycle.IgnoreChanges != nil { + l.IgnoreChanges = *rawR.Lifecycle.IgnoreChanges + } + r.Lifecycle = l + } + if rawR.Provider != nil { + r.Provider = *rawR.Provider + } + if rawR.DependsOn != nil { + r.DependsOn = *rawR.DependsOn + } + + var defaultConnInfo *RawConfig + if rawR.Connection != nil { + defaultConnInfo = NewRawConfigHCL2(rawR.Connection.Config) + } + + for _, rawP := range rawR.Provisioners { + p := &Provisioner{ + Type: rawP.Type, + } + + switch { + case rawP.When == nil: + p.When = ProvisionerWhenCreate + case *rawP.When == "create": + p.When = ProvisionerWhenCreate + case *rawP.When == "destroy": + p.When = ProvisionerWhenDestroy + default: + p.When = ProvisionerWhenInvalid + } + + switch { + case rawP.OnFailure == nil: + p.OnFailure = ProvisionerOnFailureFail + case *rawP.When == "fail": + p.OnFailure = ProvisionerOnFailureFail + case *rawP.When == "continue": + p.OnFailure = ProvisionerOnFailureContinue + default: + p.OnFailure = ProvisionerOnFailureInvalid + } + + if rawP.Connection != nil { + p.ConnInfo = NewRawConfigHCL2(rawP.Connection.Config) + } else { + p.ConnInfo = defaultConnInfo + } + + p.RawConfig = NewRawConfigHCL2(rawP.Config) + + r.Provisioners = append(r.Provisioners, p) + } + + // The old loader records the count expression as a weird RawConfig with + // a single-element map inside. Since the rest of the world is assuming + // that, we'll mimic it here. + { + countBody := hcl2shim.SingleAttrBody{ + Name: "count", + Expr: rawR.CountExpr, + } + + r.RawCount = NewRawConfigHCL2(countBody) + r.RawCount.Key = "count" + } + + r.RawConfig = NewRawConfigHCL2(rawR.Config) + + config.Resources = append(config.Resources, r) + + } + + for _, rawR := range raw.Datas { + r := &Resource{ + Mode: DataResourceMode, + Type: rawR.Type, + Name: rawR.Name, + } + + if rawR.Provider != nil { + r.Provider = *rawR.Provider + } + if rawR.DependsOn != nil { + r.DependsOn = *rawR.DependsOn + } + + // The old loader records the count expression as a weird RawConfig with + // a single-element map inside. Since the rest of the world is assuming + // that, we'll mimic it here. + { + countBody := hcl2shim.SingleAttrBody{ + Name: "count", + Expr: rawR.CountExpr, + } + + r.RawCount = NewRawConfigHCL2(countBody) + r.RawCount.Key = "count" + } + + r.RawConfig = NewRawConfigHCL2(rawR.Config) + + config.Resources = append(config.Resources, r) + } + + for _, rawP := range raw.Providers { + p := &ProviderConfig{ + Name: rawP.Name, + } + + if rawP.Alias != nil { + p.Alias = *rawP.Alias + } + if rawP.Version != nil { + p.Version = *rawP.Version + } + + // The result is expected to be a map like map[string]interface{}{"value": something}, + // so we'll fake that with our hcl2shim.SingleAttrBody shim. + p.RawConfig = NewRawConfigHCL2(rawP.Config) + + config.ProviderConfigs = append(config.ProviderConfigs, p) + } + + for _, rawL := range raw.Locals { + names := make([]string, 0, len(rawL.Definitions)) + for n := range rawL.Definitions { + names = append(names, n) + } + sort.Strings(names) + for _, n := range names { + attr := rawL.Definitions[n] + l := &Local{ + Name: n, + RawConfig: NewRawConfigHCL2(hcl2shim.SingleAttrBody{ + Name: "value", + Expr: attr.Expr, + }), + } + config.Locals = append(config.Locals, l) + } + } + + // FIXME: The current API gives us no way to return warnings in the + // absense of any errors. + var err error + if diags.HasErrors() { + err = diags + } + + return config, err +} diff --git a/vendor/github.com/hashicorp/terraform/config/merge.go b/vendor/github.com/hashicorp/terraform/config/merge.go index f72fdfa9..55fc864f 100644 --- a/vendor/github.com/hashicorp/terraform/config/merge.go +++ b/vendor/github.com/hashicorp/terraform/config/merge.go @@ -32,6 +32,16 @@ func Merge(c1, c2 *Config) (*Config, error) { c.Atlas = c2.Atlas } + // Merge the Terraform configuration + if c1.Terraform != nil { + c.Terraform = c1.Terraform + if c2.Terraform != nil { + c.Terraform.Merge(c2.Terraform) + } + } else { + c.Terraform = c2.Terraform + } + // NOTE: Everything below is pretty gross. Due to the lack of generics // in Go, there is some hoop-jumping involved to make this merging a // little more test-friendly and less repetitive. Ironically, making it @@ -127,6 +137,17 @@ func Merge(c1, c2 *Config) (*Config, error) { } } + // Local Values + // These are simpler than the other config elements because they are just + // flat values and so no deep merging is required. + if localsCount := len(c1.Locals) + len(c2.Locals); localsCount != 0 { + // Explicit length check above because we want c.Locals to remain + // nil if the result would be empty. + c.Locals = make([]*Local, 0, len(c1.Locals)+len(c2.Locals)) + c.Locals = append(c.Locals, c1.Locals...) + c.Locals = append(c.Locals, c2.Locals...) + } + return c, nil } diff --git a/vendor/github.com/hashicorp/terraform/config/module/get.go b/vendor/github.com/hashicorp/terraform/config/module/get.go index 96b4a63c..5073d0d2 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/get.go +++ b/vendor/github.com/hashicorp/terraform/config/module/get.go @@ -3,6 +3,7 @@ package module import ( "io/ioutil" "os" + "path/filepath" "github.com/hashicorp/go-getter" ) @@ -37,13 +38,10 @@ func GetCopy(dst, src string) error { if err != nil { return err } - // FIXME: This isn't completely safe. Creating and removing our temp path - // exposes where to race to inject files. - if err := os.RemoveAll(tmpDir); err != nil { - return err - } defer os.RemoveAll(tmpDir) + tmpDir = filepath.Join(tmpDir, "module") + // Get to that temporary dir if err := getter.Get(tmpDir, src); err != nil { return err @@ -57,15 +55,3 @@ func GetCopy(dst, src string) error { // Copy to the final location return copyDir(dst, tmpDir) } - -func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) { - // Get the module with the level specified if we were told to. - if mode > GetModeNone { - if err := s.Get(key, src, mode == GetModeUpdate); err != nil { - return "", false, err - } - } - - // Get the directory where the module is. - return s.Dir(key) -} diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode.go b/vendor/github.com/hashicorp/terraform/config/module/inode.go index b9be7e38..da520abc 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/inode.go +++ b/vendor/github.com/hashicorp/terraform/config/module/inode.go @@ -1,4 +1,4 @@ -// +build linux darwin openbsd solaris +// +build linux darwin openbsd netbsd solaris dragonfly package module diff --git a/vendor/github.com/hashicorp/terraform/config/module/module.go b/vendor/github.com/hashicorp/terraform/config/module/module.go index f8649f6e..7dc8fccd 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/module.go +++ b/vendor/github.com/hashicorp/terraform/config/module/module.go @@ -2,6 +2,8 @@ package module // Module represents the metadata for a single module. type Module struct { - Name string - Source string + Name string + Source string + Version string + Providers map[string]string } diff --git a/vendor/github.com/hashicorp/terraform/config/module/storage.go b/vendor/github.com/hashicorp/terraform/config/module/storage.go new file mode 100644 index 00000000..4b828dcb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/storage.go @@ -0,0 +1,346 @@ +package module + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + + getter "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/mitchellh/cli" +) + +const manifestName = "modules.json" + +// moduleManifest is the serialization structure used to record the stored +// module's metadata. +type moduleManifest struct { + Modules []moduleRecord +} + +// moduleRecords represents the stored module's metadata. +// This is compared for equality using '==', so all fields needs to remain +// comparable. +type moduleRecord struct { + // Source is the module source string from the config, minus any + // subdirectory. + Source string + + // Key is the locally unique identifier for this module. + Key string + + // Version is the exact version string for the stored module. + Version string + + // Dir is the directory name returned by the FileStorage. This is what + // allows us to correlate a particular module version with the location on + // disk. + Dir string + + // Root is the root directory containing the module. If the module is + // unpacked from an archive, and not located in the root directory, this is + // used to direct the loader to the correct subdirectory. This is + // independent from any subdirectory in the original source string, which + // may traverse further into the module tree. + Root string + + // url is the location of the module source + url string + + // Registry is true if this module is sourced from a registry + registry bool +} + +// Storage implements methods to manage the storage of modules. +// This is used by Tree.Load to query registries, authenticate requests, and +// store modules locally. +type Storage struct { + // StorageDir is the full path to the directory where all modules will be + // stored. + StorageDir string + + // Ui is an optional cli.Ui for user output + Ui cli.Ui + + // Mode is the GetMode that will be used for various operations. + Mode GetMode + + registry *registry.Client +} + +// NewStorage returns a new initialized Storage object. +func NewStorage(dir string, services *disco.Disco) *Storage { + regClient := registry.NewClient(services, nil) + + return &Storage{ + StorageDir: dir, + registry: regClient, + } +} + +// loadManifest returns the moduleManifest file from the parent directory. +func (s Storage) loadManifest() (moduleManifest, error) { + manifest := moduleManifest{} + + manifestPath := filepath.Join(s.StorageDir, manifestName) + data, err := ioutil.ReadFile(manifestPath) + if err != nil && !os.IsNotExist(err) { + return manifest, err + } + + if len(data) == 0 { + return manifest, nil + } + + if err := json.Unmarshal(data, &manifest); err != nil { + return manifest, err + } + return manifest, nil +} + +// Store the location of the module, along with the version used and the module +// root directory. The storage method loads the entire file and rewrites it +// each time. This is only done a few times during init, so efficiency is +// not a concern. +func (s Storage) recordModule(rec moduleRecord) error { + manifest, err := s.loadManifest() + if err != nil { + // if there was a problem with the file, we will attempt to write a new + // one. Any non-data related error should surface there. + log.Printf("[WARN] error reading module manifest: %s", err) + } + + // do nothing if we already have the exact module + for i, stored := range manifest.Modules { + if rec == stored { + return nil + } + + // they are not equal, but if the storage path is the same we need to + // remove this rec to be replaced. + if rec.Dir == stored.Dir { + manifest.Modules[i] = manifest.Modules[len(manifest.Modules)-1] + manifest.Modules = manifest.Modules[:len(manifest.Modules)-1] + break + } + } + + manifest.Modules = append(manifest.Modules, rec) + + js, err := json.Marshal(manifest) + if err != nil { + panic(err) + } + + manifestPath := filepath.Join(s.StorageDir, manifestName) + return ioutil.WriteFile(manifestPath, js, 0644) +} + +// load the manifest from dir, and return all module versions matching the +// provided source. Records with no version info will be skipped, as they need +// to be uniquely identified by other means. +func (s Storage) moduleVersions(source string) ([]moduleRecord, error) { + manifest, err := s.loadManifest() + if err != nil { + return manifest.Modules, err + } + + var matching []moduleRecord + + for _, m := range manifest.Modules { + if m.Source == source && m.Version != "" { + log.Printf("[DEBUG] found local version %q for module %s", m.Version, m.Source) + matching = append(matching, m) + } + } + + return matching, nil +} + +func (s Storage) moduleDir(key string) (string, error) { + manifest, err := s.loadManifest() + if err != nil { + return "", err + } + + for _, m := range manifest.Modules { + if m.Key == key { + return m.Dir, nil + } + } + + return "", nil +} + +// return only the root directory of the module stored in dir. +func (s Storage) getModuleRoot(dir string) (string, error) { + manifest, err := s.loadManifest() + if err != nil { + return "", err + } + + for _, mod := range manifest.Modules { + if mod.Dir == dir { + return mod.Root, nil + } + } + return "", nil +} + +// record only the Root directory for the module stored at dir. +func (s Storage) recordModuleRoot(dir, root string) error { + rec := moduleRecord{ + Dir: dir, + Root: root, + } + + return s.recordModule(rec) +} + +func (s Storage) output(msg string) { + if s.Ui == nil || s.Mode == GetModeNone { + return + } + s.Ui.Output(msg) +} + +func (s Storage) getStorage(key string, src string) (string, bool, error) { + storage := &getter.FolderStorage{ + StorageDir: s.StorageDir, + } + + log.Printf("[DEBUG] fetching module from %s", src) + + // Get the module with the level specified if we were told to. + if s.Mode > GetModeNone { + log.Printf("[DEBUG] fetching %q with key %q", src, key) + if err := storage.Get(key, src, s.Mode == GetModeUpdate); err != nil { + return "", false, err + } + } + + // Get the directory where the module is. + dir, found, err := storage.Dir(key) + log.Printf("[DEBUG] found %q in %q: %t", src, dir, found) + return dir, found, err +} + +// find a stored module that's not from a registry +func (s Storage) findModule(key string) (string, error) { + if s.Mode == GetModeUpdate { + return "", nil + } + + return s.moduleDir(key) +} + +// GetModule fetches a module source into the specified directory. This is used +// as a convenience function by the CLI to initialize a configuration. +func (s Storage) GetModule(dst, src string) error { + // reset this in case the caller was going to re-use it + mode := s.Mode + s.Mode = GetModeUpdate + defer func() { + s.Mode = mode + }() + + rec, err := s.findRegistryModule(src, anyVersion) + if err != nil { + return err + } + + pwd, err := os.Getwd() + if err != nil { + return err + } + + source := rec.url + if source == "" { + source, err = getter.Detect(src, pwd, getter.Detectors) + if err != nil { + return fmt.Errorf("module %s: %s", src, err) + } + } + + if source == "" { + return fmt.Errorf("module %q not found", src) + } + + return GetCopy(dst, source) +} + +// find a registry module +func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, error) { + rec := moduleRecord{ + Source: mSource, + } + // detect if we have a registry source + mod, err := regsrc.ParseModuleSource(mSource) + switch err { + case nil: + //ok + case regsrc.ErrInvalidModuleSource: + return rec, nil + default: + return rec, err + } + rec.registry = true + + log.Printf("[TRACE] %q is a registry module", mod.Display()) + + versions, err := s.moduleVersions(mod.String()) + if err != nil { + log.Printf("[ERROR] error looking up versions for %q: %s", mod.Display(), err) + return rec, err + } + + match, err := newestRecord(versions, constraint) + if err != nil { + log.Printf("[INFO] no matching version for %q<%s>, %s", mod.Display(), constraint, err) + } + log.Printf("[DEBUG] matched %q version %s for %s", mod, match.Version, constraint) + + rec.Dir = match.Dir + rec.Version = match.Version + found := rec.Dir != "" + + // we need to lookup available versions + // Only on Get if it's not found, on unconditionally on Update + if (s.Mode == GetModeGet && !found) || (s.Mode == GetModeUpdate) { + resp, err := s.registry.Versions(mod) + if err != nil { + return rec, err + } + + if len(resp.Modules) == 0 { + return rec, fmt.Errorf("module %q not found in registry", mod.Display()) + } + + match, err := newestVersion(resp.Modules[0].Versions, constraint) + if err != nil { + return rec, err + } + + if match == nil { + return rec, fmt.Errorf("no versions for %q found matching %q", mod.Display(), constraint) + } + + rec.Version = match.Version + + rec.url, err = s.registry.Location(mod, rec.Version) + if err != nil { + return rec, err + } + + // we've already validated this by now + host, _ := mod.SvcHost() + s.output(fmt.Sprintf(" Found version %s of %s on %s", rec.Version, mod.Module(), host.ForDisplay())) + + } + return rec, nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/testing.go b/vendor/github.com/hashicorp/terraform/config/module/testing.go new file mode 100644 index 00000000..6f1ff050 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/testing.go @@ -0,0 +1,36 @@ +package module + +import ( + "io/ioutil" + "os" + "testing" +) + +// TestTree loads a module at the given path and returns the tree as well +// as a function that should be deferred to clean up resources. +func TestTree(t *testing.T, path string) (*Tree, func()) { + // Create a temporary directory for module storage + dir, err := ioutil.TempDir("", "tf") + if err != nil { + t.Fatalf("err: %s", err) + return nil, nil + } + + // Load the module + mod, err := NewTreeModule("", path) + if err != nil { + t.Fatalf("err: %s", err) + return nil, nil + } + + // Get the child modules + s := &Storage{StorageDir: dir, Mode: GetModeGet} + if err := mod.Load(s); err != nil { + t.Fatalf("err: %s", err) + return nil, nil + } + + return mod, func() { + os.RemoveAll(dir) + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree.go b/vendor/github.com/hashicorp/terraform/config/module/tree.go index 3af556e9..f56d69b7 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/tree.go +++ b/vendor/github.com/hashicorp/terraform/config/module/tree.go @@ -4,11 +4,14 @@ import ( "bufio" "bytes" "fmt" + "log" "path/filepath" "strings" "sync" - "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform/tfdiags" + + getter "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config" ) @@ -26,6 +29,17 @@ type Tree struct { children map[string]*Tree path []string lock sync.RWMutex + + // version is the final version of the config loaded for the Tree's module + version string + // source is the "source" string used to load this module. It's possible + // for a module source to change, but the path remains the same, preventing + // it from being reloaded. + source string + // parent allows us to walk back up the tree and determine if there are any + // versioned ancestor modules which may effect the stored location of + // submodules + parent *Tree } // NewTree returns a new Tree for the given config structure. @@ -40,7 +54,7 @@ func NewEmptyTree() *Tree { // We do this dummy load so that the tree is marked as "loaded". It // should never fail because this is just about a no-op. If it does fail // we panic so we can know its a bug. - if err := t.Load(nil, GetModeGet); err != nil { + if err := t.Load(&Storage{Mode: GetModeGet}); err != nil { panic(err) } @@ -66,6 +80,10 @@ func (t *Tree) Config() *config.Config { // Child returns the child with the given path (by name). func (t *Tree) Child(path []string) *Tree { + if t == nil { + return nil + } + if len(path) == 0 { return t } @@ -88,6 +106,25 @@ func (t *Tree) Children() map[string]*Tree { return t.children } +// DeepEach calls the provided callback for the receiver and then all of +// its descendents in the tree, allowing an operation to be performed on +// all modules in the tree. +// +// Parents will be visited before their children but otherwise the order is +// not defined. +func (t *Tree) DeepEach(cb func(*Tree)) { + t.lock.RLock() + defer t.lock.RUnlock() + t.deepEach(cb) +} + +func (t *Tree) deepEach(cb func(*Tree)) { + cb(t) + for _, c := range t.children { + c.deepEach(cb) + } +} + // Loaded says whether or not this tree has been loaded or not yet. func (t *Tree) Loaded() bool { t.lock.RLock() @@ -103,8 +140,10 @@ func (t *Tree) Modules() []*Module { result := make([]*Module, len(t.config.Modules)) for i, m := range t.config.Modules { result[i] = &Module{ - Name: m.Name, - Source: m.Source, + Name: m.Name, + Version: m.Version, + Source: m.Source, + Providers: m.Providers, } } @@ -132,81 +171,178 @@ func (t *Tree) Name() string { // module trees inherently require the configuration to be in a reasonably // sane state: no circular dependencies, proper module sources, etc. A full // suite of validations can be done by running Validate (after loading). -func (t *Tree) Load(s getter.Storage, mode GetMode) error { +func (t *Tree) Load(s *Storage) error { t.lock.Lock() defer t.lock.Unlock() - // Reset the children if we have any - t.children = nil + children, err := t.getChildren(s) + if err != nil { + return err + } + + // Go through all the children and load them. + for _, c := range children { + if err := c.Load(s); err != nil { + return err + } + } + + // Set our tree up + t.children = children - modules := t.Modules() + return nil +} + +func (t *Tree) getChildren(s *Storage) (map[string]*Tree, error) { children := make(map[string]*Tree) // Go through all the modules and get the directory for them. - for _, m := range modules { + for _, m := range t.Modules() { if _, ok := children[m.Name]; ok { - return fmt.Errorf( + return nil, fmt.Errorf( "module %s: duplicated. module names must be unique", m.Name) } // Determine the path to this child - path := make([]string, len(t.path), len(t.path)+1) - copy(path, t.path) - path = append(path, m.Name) + modPath := make([]string, len(t.path), len(t.path)+1) + copy(modPath, t.path) + modPath = append(modPath, m.Name) + + log.Printf("[TRACE] module source: %q", m.Source) - // Split out the subdir if we have one - source, subDir := getter.SourceDirSubdir(m.Source) + // add the module path to help indicate where modules with relative + // paths are being loaded from + s.output(fmt.Sprintf("- module.%s", strings.Join(modPath, "."))) - source, err := getter.Detect(source, t.config.Dir, getter.Detectors) + // Lookup the local location of the module. + // dir is the local directory where the module is stored + mod, err := s.findRegistryModule(m.Source, m.Version) if err != nil { - return fmt.Errorf("module %s: %s", m.Name, err) + return nil, err + } + + // The key is the string that will be used to uniquely id the Source in + // the local storage. The prefix digit can be incremented to + // invalidate the local module storage. + key := "1." + t.versionedPathKey(m) + if mod.Version != "" { + key += "." + mod.Version + } + + // Check for the exact key if it's not a registry module + if !mod.registry { + mod.Dir, err = s.findModule(key) + if err != nil { + return nil, err + } + } + + if mod.Dir != "" && s.Mode != GetModeUpdate { + // We found it locally, but in order to load the Tree we need to + // find out if there was another subDir stored from detection. + subDir, err := s.getModuleRoot(mod.Dir) + if err != nil { + // If there's a problem with the subdir record, we'll let the + // recordSubdir method fix it up. Any other filesystem errors + // will turn up again below. + log.Println("[WARN] error reading subdir record:", err) + } + + fullDir := filepath.Join(mod.Dir, subDir) + + child, err := NewTreeModule(m.Name, fullDir) + if err != nil { + return nil, fmt.Errorf("module %s: %s", m.Name, err) + } + child.path = modPath + child.parent = t + child.version = mod.Version + child.source = m.Source + children[m.Name] = child + continue + } + + // Split out the subdir if we have one. + // Terraform keeps the entire requested tree, so that modules can + // reference sibling modules from the same archive or repo. + rawSource, subDir := getter.SourceDirSubdir(m.Source) + + // we haven't found a source, so fallback to the go-getter detectors + source := mod.url + if source == "" { + source, err = getter.Detect(rawSource, t.config.Dir, getter.Detectors) + if err != nil { + return nil, fmt.Errorf("module %s: %s", m.Name, err) + } } + log.Printf("[TRACE] detected module source %q", source) + // Check if the detector introduced something new. - source, subDir2 := getter.SourceDirSubdir(source) - if subDir2 != "" { - subDir = filepath.Join(subDir2, subDir) + // For example, the registry always adds a subdir of `//*`, + // indicating that we need to strip off the first component from the + // tar archive, though we may not yet know what it is called. + source, detectedSubDir := getter.SourceDirSubdir(source) + if detectedSubDir != "" { + subDir = filepath.Join(detectedSubDir, subDir) + } + + output := "" + switch s.Mode { + case GetModeUpdate: + output = fmt.Sprintf(" Updating source %q", m.Source) + default: + output = fmt.Sprintf(" Getting source %q", m.Source) } + s.output(output) - // Get the directory where this module is so we can load it - key := strings.Join(path, ".") - key = fmt.Sprintf("root.%s-%s", key, source) - dir, ok, err := getStorage(s, key, source, mode) + dir, ok, err := s.getStorage(key, source) if err != nil { - return err + return nil, err } if !ok { - return fmt.Errorf( - "module %s: not found, may need to be downloaded using 'terraform get'", m.Name) + return nil, fmt.Errorf("module %s: not found, may need to run 'terraform init'", m.Name) } - // If we have a subdirectory, then merge that in + log.Printf("[TRACE] %q stored in %q", source, dir) + + // expand and record the subDir for later + fullDir := dir if subDir != "" { - dir = filepath.Join(dir, subDir) - } + fullDir, err = getter.SubdirGlob(dir, subDir) + if err != nil { + return nil, err + } - // Load the configurations.Dir(source) - children[m.Name], err = NewTreeModule(m.Name, dir) - if err != nil { - return fmt.Errorf( - "module %s: %s", m.Name, err) + // +1 to account for the pathsep + if len(dir)+1 > len(fullDir) { + return nil, fmt.Errorf("invalid module storage path %q", fullDir) + } + subDir = fullDir[len(dir)+1:] } - // Set the path of this child - children[m.Name].path = path - } + // add new info to the module record + mod.Key = key + mod.Dir = dir + mod.Root = subDir - // Go through all the children and load them. - for _, c := range children { - if err := c.Load(s, mode); err != nil { - return err + // record the module in our manifest + if err := s.recordModule(mod); err != nil { + return nil, err } - } - // Set our tree up - t.children = children + child, err := NewTreeModule(m.Name, fullDir) + if err != nil { + return nil, fmt.Errorf("module %s: %s", m.Name, err) + } + child.path = modPath + child.parent = t + child.version = mod.Version + child.source = m.Source + children[m.Name] = child + } - return nil + return children, nil } // Path is the full path to this tree. @@ -249,18 +385,36 @@ func (t *Tree) String() string { // as verifying things such as parameters/outputs between the various modules. // // Load must be called prior to calling Validate or an error will be returned. -func (t *Tree) Validate() error { +func (t *Tree) Validate() tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if !t.Loaded() { - return fmt.Errorf("tree must be loaded before calling Validate") + diags = diags.Append(fmt.Errorf( + "tree must be loaded before calling Validate", + )) + return diags } - // If something goes wrong, here is our error template - newErr := &TreeError{Name: []string{t.Name()}} + // Terraform core does not handle root module children named "root". + // We plan to fix this in the future but this bug was brought up in + // the middle of a release and we don't want to introduce wide-sweeping + // changes at that time. + if len(t.path) == 1 && t.name == "root" { + diags = diags.Append(fmt.Errorf( + "root module cannot contain module named 'root'", + )) + return diags + } // Validate our configuration first. - if err := t.config.Validate(); err != nil { - newErr.Err = err - return newErr + diags = diags.Append(t.config.Validate()) + + // If we're the root, we do extra validation. This validation usually + // requires the entire tree (since children don't have parent pointers). + if len(t.path) == 0 { + if err := t.validateProviderAlias(); err != nil { + diags = diags.Append(err) + } } // Get the child trees @@ -268,20 +422,11 @@ func (t *Tree) Validate() error { // Validate all our children for _, c := range children { - err := c.Validate() - if err == nil { + childDiags := c.Validate() + diags = diags.Append(childDiags) + if diags.HasErrors() { continue } - - verr, ok := err.(*TreeError) - if !ok { - // Unknown error, just return... - return err - } - - // Append ourselves to the error and then return - verr.Name = append(verr.Name, t.Name()) - return verr } // Go over all the modules and verify that any parameters are valid @@ -307,10 +452,10 @@ func (t *Tree) Validate() error { // Compare to the keys in our raw config for the module for k, _ := range m.RawConfig.Raw { if _, ok := varMap[k]; !ok { - newErr.Err = fmt.Errorf( - "module %s: %s is not a valid parameter", - m.Name, k) - return newErr + diags = diags.Append(fmt.Errorf( + "module %q: %q is not a valid argument", + m.Name, k, + )) } // Remove the required @@ -319,10 +464,10 @@ func (t *Tree) Validate() error { // If we have any required left over, they aren't set. for k, _ := range requiredMap { - newErr.Err = fmt.Errorf( - "module %s: required variable %s not set", - m.Name, k) - return newErr + diags = diags.Append(fmt.Errorf( + "module %q: missing required argument %q", + m.Name, k, + )) } } @@ -337,8 +482,11 @@ func (t *Tree) Validate() error { tree, ok := children[mv.Name] if !ok { - // This should never happen because Load watches us - panic("module not found in children: " + mv.Name) + diags = diags.Append(fmt.Errorf( + "%s: reference to undefined module %q", + source, mv.Name, + )) + continue } found := false @@ -349,33 +497,103 @@ func (t *Tree) Validate() error { } } if !found { - newErr.Err = fmt.Errorf( - "%s: %s is not a valid output for module %s", - source, mv.Field, mv.Name) - return newErr + diags = diags.Append(fmt.Errorf( + "%s: %q is not a valid output for module %q", + source, mv.Field, mv.Name, + )) } } } - return nil + return diags } -// TreeError is an error returned by Tree.Validate if an error occurs -// with validation. -type TreeError struct { - Name []string - Err error +// versionedPathKey returns a path string with every levels full name, version +// and source encoded. This is to provide a unique key for our module storage, +// since submodules need to know which versions of their ancestor modules they +// are loaded from. +// For example, if module A has a subdirectory B, if module A's source or +// version is updated B's storage key must reflect this change in order for the +// correct version of B's source to be loaded. +func (t *Tree) versionedPathKey(m *Module) string { + path := make([]string, len(t.path)+1) + path[len(path)-1] = m.Name + ";" + m.Source + // We're going to load these in order for easier reading and debugging, but + // in practice they only need to be unique and consistent. + + p := t + i := len(path) - 2 + for ; i >= 0; i-- { + if p == nil { + break + } + // we may have been loaded under a blank Tree, so always check for a name + // too. + if p.name == "" { + break + } + seg := p.name + if p.version != "" { + seg += "#" + p.version + } + + if p.source != "" { + seg += ";" + p.source + } + + path[i] = seg + p = p.parent + } + + key := strings.Join(path, "|") + return key +} + +// treeError is an error use by Tree.Validate to accumulates all +// validation errors. +type treeError struct { + Name []string + Errs []error + Children []*treeError +} + +func (e *treeError) Add(err error) { + e.Errs = append(e.Errs, err) } -func (e *TreeError) Error() string { - // Build up the name - var buf bytes.Buffer - for _, n := range e.Name { - buf.WriteString(n) - buf.WriteString(".") +func (e *treeError) AddChild(err *treeError) { + e.Children = append(e.Children, err) +} + +func (e *treeError) ErrOrNil() error { + if len(e.Errs) > 0 || len(e.Children) > 0 { + return e + } + return nil +} + +func (e *treeError) Error() string { + name := strings.Join(e.Name, ".") + var out bytes.Buffer + fmt.Fprintf(&out, "module %s: ", name) + + if len(e.Errs) == 1 { + // single like error + out.WriteString(e.Errs[0].Error()) + } else { + // multi-line error + for _, err := range e.Errs { + fmt.Fprintf(&out, "\n %s", err) + } + } + + if len(e.Children) > 0 { + // start the next error on a new line + out.WriteString("\n ") + } + for _, child := range e.Children { + out.WriteString(child.Error()) } - buf.Truncate(buf.Len() - 1) - // Format the value - return fmt.Sprintf("module %s: %s", buf.String(), e.Err) + return out.String() } diff --git a/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go new file mode 100644 index 00000000..f203556c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go @@ -0,0 +1,118 @@ +package module + +import ( + "fmt" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/dag" +) + +// validateProviderAlias validates that all provider alias references are +// defined at some point in the parent tree. This improves UX by catching +// alias typos at the slight cost of requiring a declaration of usage. This +// is usually a good tradeoff since not many aliases are used. +func (t *Tree) validateProviderAlias() error { + // If we're not the root, don't perform this validation. We must be the + // root since we require full tree visibilty. + if len(t.path) != 0 { + return nil + } + + // We'll use a graph to keep track of defined aliases at each level. + // As long as a parent defines an alias, it is okay. + var g dag.AcyclicGraph + t.buildProviderAliasGraph(&g, nil) + + // Go through the graph and check that the usage is all good. + var err error + for _, v := range g.Vertices() { + pv, ok := v.(*providerAliasVertex) + if !ok { + // This shouldn't happen, just ignore it. + continue + } + + // If we're not using any aliases, fast track and just continue + if len(pv.Used) == 0 { + continue + } + + // Grab the ancestors since we're going to have to check if our + // parents define any of our aliases. + var parents []*providerAliasVertex + ancestors, _ := g.Ancestors(v) + for _, raw := range ancestors.List() { + if pv, ok := raw.(*providerAliasVertex); ok { + parents = append(parents, pv) + } + } + for k, _ := range pv.Used { + // Check if we define this + if _, ok := pv.Defined[k]; ok { + continue + } + + // Check for a parent + found := false + for _, parent := range parents { + _, found = parent.Defined[k] + if found { + break + } + } + if found { + continue + } + + // We didn't find the alias, error! + err = multierror.Append(err, fmt.Errorf( + "module %s: provider alias must be defined by the module: %s", + strings.Join(pv.Path, "."), k)) + } + } + + return err +} + +func (t *Tree) buildProviderAliasGraph(g *dag.AcyclicGraph, parent dag.Vertex) { + // Add all our defined aliases + defined := make(map[string]struct{}) + for _, p := range t.config.ProviderConfigs { + defined[p.FullName()] = struct{}{} + } + + // Add all our used aliases + used := make(map[string]struct{}) + for _, r := range t.config.Resources { + if r.Provider != "" { + used[r.Provider] = struct{}{} + } + } + + // Add it to the graph + vertex := &providerAliasVertex{ + Path: t.Path(), + Defined: defined, + Used: used, + } + g.Add(vertex) + + // Connect to our parent if we have one + if parent != nil { + g.Connect(dag.BasicEdge(vertex, parent)) + } + + // Build all our children + for _, c := range t.Children() { + c.buildProviderAliasGraph(g, vertex) + } +} + +// providerAliasVertex is the vertex for the graph that keeps track of +// defined provider aliases. +type providerAliasVertex struct { + Path []string + Defined map[string]struct{} + Used map[string]struct{} +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/versions.go b/vendor/github.com/hashicorp/terraform/config/module/versions.go new file mode 100644 index 00000000..8348d4b1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/versions.go @@ -0,0 +1,95 @@ +package module + +import ( + "errors" + "fmt" + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/registry/response" +) + +const anyVersion = ">=0.0.0" + +// return the newest version that satisfies the provided constraint +func newest(versions []string, constraint string) (string, error) { + if constraint == "" { + constraint = anyVersion + } + cs, err := version.NewConstraint(constraint) + if err != nil { + return "", err + } + + switch len(versions) { + case 0: + return "", errors.New("no versions found") + case 1: + v, err := version.NewVersion(versions[0]) + if err != nil { + return "", err + } + + if !cs.Check(v) { + return "", fmt.Errorf("no version found matching constraint %q", constraint) + } + return versions[0], nil + } + + sort.Slice(versions, func(i, j int) bool { + // versions should have already been validated + // sort invalid version strings to the end + iv, err := version.NewVersion(versions[i]) + if err != nil { + return true + } + jv, err := version.NewVersion(versions[j]) + if err != nil { + return true + } + return iv.GreaterThan(jv) + }) + + // versions are now in order, so just find the first which satisfies the + // constraint + for i := range versions { + v, err := version.NewVersion(versions[i]) + if err != nil { + continue + } + if cs.Check(v) { + return versions[i], nil + } + } + + return "", nil +} + +// return the newest *moduleVersion that matches the given constraint +// TODO: reconcile these two types and newest* functions +func newestVersion(moduleVersions []*response.ModuleVersion, constraint string) (*response.ModuleVersion, error) { + var versions []string + modules := make(map[string]*response.ModuleVersion) + + for _, m := range moduleVersions { + versions = append(versions, m.Version) + modules[m.Version] = m + } + + match, err := newest(versions, constraint) + return modules[match], err +} + +// return the newest moduleRecord that matches the given constraint +func newestRecord(moduleVersions []moduleRecord, constraint string) (moduleRecord, error) { + var versions []string + modules := make(map[string]moduleRecord) + + for _, m := range moduleVersions { + versions = append(versions, m.Version) + modules[m.Version] = m + } + + match, err := newest(versions, constraint) + return modules[match], err +} diff --git a/vendor/github.com/hashicorp/terraform/config/providers.go b/vendor/github.com/hashicorp/terraform/config/providers.go new file mode 100644 index 00000000..7a50782f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/providers.go @@ -0,0 +1,103 @@ +package config + +import "github.com/blang/semver" + +// ProviderVersionConstraint presents a constraint for a particular +// provider, identified by its full name. +type ProviderVersionConstraint struct { + Constraint string + ProviderType string +} + +// ProviderVersionConstraints is a map from provider full name to its associated +// ProviderVersionConstraint, as produced by Config.RequiredProviders. +type ProviderVersionConstraints map[string]ProviderVersionConstraint + +// RequiredProviders returns the ProviderVersionConstraints for this +// module. +// +// This includes both providers that are explicitly requested by provider +// blocks and those that are used implicitly by instantiating one of their +// resource types. In the latter case, the returned semver Range will +// accept any version of the provider. +func (c *Config) RequiredProviders() ProviderVersionConstraints { + ret := make(ProviderVersionConstraints, len(c.ProviderConfigs)) + + configs := c.ProviderConfigsByFullName() + + // In order to find the *implied* dependencies (those without explicit + // "provider" blocks) we need to walk over all of the resources and + // cross-reference with the provider configs. + for _, rc := range c.Resources { + providerName := rc.ProviderFullName() + var providerType string + + // Default to (effectively) no constraint whatsoever, but we might + // override if there's an explicit constraint in config. + constraint := ">=0.0.0" + + config, ok := configs[providerName] + if ok { + if config.Version != "" { + constraint = config.Version + } + providerType = config.Name + } else { + providerType = providerName + } + + ret[providerName] = ProviderVersionConstraint{ + ProviderType: providerType, + Constraint: constraint, + } + } + + return ret +} + +// RequiredRanges returns a semver.Range for each distinct provider type in +// the constraint map. If the same provider type appears more than once +// (e.g. because aliases are in use) then their respective constraints are +// combined such that they must *all* apply. +// +// The result of this method can be passed to the +// PluginMetaSet.ConstrainVersions method within the plugin/discovery +// package in order to filter down the available plugins to those which +// satisfy the given constraints. +// +// This function will panic if any of the constraints within cannot be +// parsed as semver ranges. This is guaranteed to never happen for a +// constraint set that was built from a configuration that passed validation. +func (cons ProviderVersionConstraints) RequiredRanges() map[string]semver.Range { + ret := make(map[string]semver.Range, len(cons)) + + for _, con := range cons { + spec := semver.MustParseRange(con.Constraint) + if existing, exists := ret[con.ProviderType]; exists { + ret[con.ProviderType] = existing.AND(spec) + } else { + ret[con.ProviderType] = spec + } + } + + return ret +} + +// ProviderConfigsByFullName returns a map from provider full names (as +// returned by ProviderConfig.FullName()) to the corresponding provider +// configs. +// +// This function returns no new information than what's already in +// c.ProviderConfigs, but returns it in a more convenient shape. If there +// is more than one provider config with the same full name then the result +// is undefined, but that is guaranteed not to happen for any config that +// has passed validation. +func (c *Config) ProviderConfigsByFullName() map[string]*ProviderConfig { + ret := make(map[string]*ProviderConfig, len(c.ProviderConfigs)) + + for _, pc := range c.ProviderConfigs { + ret[pc.FullName()] = pc + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go new file mode 100644 index 00000000..00fd43fc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go @@ -0,0 +1,40 @@ +package config + +// ProvisionerWhen is an enum for valid values for when to run provisioners. +type ProvisionerWhen int + +const ( + ProvisionerWhenInvalid ProvisionerWhen = iota + ProvisionerWhenCreate + ProvisionerWhenDestroy +) + +var provisionerWhenStrs = map[ProvisionerWhen]string{ + ProvisionerWhenInvalid: "invalid", + ProvisionerWhenCreate: "create", + ProvisionerWhenDestroy: "destroy", +} + +func (v ProvisionerWhen) String() string { + return provisionerWhenStrs[v] +} + +// ProvisionerOnFailure is an enum for valid values for on_failure options +// for provisioners. +type ProvisionerOnFailure int + +const ( + ProvisionerOnFailureInvalid ProvisionerOnFailure = iota + ProvisionerOnFailureContinue + ProvisionerOnFailureFail +) + +var provisionerOnFailureStrs = map[ProvisionerOnFailure]string{ + ProvisionerOnFailureInvalid: "invalid", + ProvisionerOnFailureContinue: "continue", + ProvisionerOnFailureFail: "fail", +} + +func (v ProvisionerOnFailure) String() string { + return provisionerOnFailureStrs[v] +} diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go index cf3bdf9b..1854a8b2 100644 --- a/vendor/github.com/hashicorp/terraform/config/raw_config.go +++ b/vendor/github.com/hashicorp/terraform/config/raw_config.go @@ -3,8 +3,14 @@ package config import ( "bytes" "encoding/gob" + "errors" + "strconv" "sync" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + hcl2 "github.com/hashicorp/hcl2/hcl" "github.com/hashicorp/hil" "github.com/hashicorp/hil/ast" "github.com/mitchellh/copystructure" @@ -18,7 +24,7 @@ import ( const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" // RawConfig is a structure that holds a piece of configuration -// where te overall structure is unknown since it will be used +// where the overall structure is unknown since it will be used // to configure a plugin or some other similar external component. // // RawConfigs can be interpolated with variables that come from @@ -27,8 +33,24 @@ const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" // RawConfig supports a query-like interface to request // information from deep within the structure. type RawConfig struct { - Key string - Raw map[string]interface{} + Key string + + // Only _one_ of Raw and Body may be populated at a time. + // + // In the normal case, Raw is populated and Body is nil. + // + // When the experimental HCL2 parsing mode is enabled, "Body" + // is populated and RawConfig serves only to transport the hcl2.Body + // through the rest of Terraform core so we can ultimately decode it + // once its schema is known. + // + // Once we transition to HCL2 as the primary representation, RawConfig + // should be removed altogether and the hcl2.Body should be passed + // around directly. + + Raw map[string]interface{} + Body hcl2.Body + Interpolations []ast.Node Variables map[string]InterpolatedVariable @@ -48,6 +70,26 @@ func NewRawConfig(raw map[string]interface{}) (*RawConfig, error) { return result, nil } +// NewRawConfigHCL2 creates a new RawConfig that is serving as a capsule +// to transport a hcl2.Body. In this mode, the publicly-readable struct +// fields are not populated since all operations should instead be diverted +// to the HCL2 body. +// +// For a RawConfig object constructed with this function, the only valid use +// is to later retrieve the Body value and call its own methods. Callers +// may choose to set and then later handle the Key field, in a manner +// consistent with how it is handled by the Value method, but the Value +// method itself must not be used. +// +// This is an experimental codepath to be used only by the HCL2 config loader. +// Non-experimental parsing should _always_ use NewRawConfig to produce a +// fully-functional RawConfig object. +func NewRawConfigHCL2(body hcl2.Body) *RawConfig { + return &RawConfig{ + Body: body, + } +} + // RawMap returns a copy of the RawConfig.Raw map. func (r *RawConfig) RawMap() map[string]interface{} { r.lock.Lock() @@ -62,9 +104,17 @@ func (r *RawConfig) RawMap() map[string]interface{} { // Copy returns a copy of this RawConfig, uninterpolated. func (r *RawConfig) Copy() *RawConfig { + if r == nil { + return nil + } + r.lock.Lock() defer r.lock.Unlock() + if r.Body != nil { + return NewRawConfigHCL2(r.Body) + } + newRaw := make(map[string]interface{}) for k, v := range r.Raw { newRaw[k] = v @@ -123,27 +173,6 @@ func (r *RawConfig) Interpolate(vs map[string]ast.Variable) error { config := langEvalConfig(vs) return r.interpolate(func(root ast.Node) (interface{}, error) { - // We detect the variables again and check if the value of any - // of the variables is the computed value. If it is, then we - // treat this entire value as computed. - // - // We have to do this here before the `lang.Eval` because - // if any of the variables it depends on are computed, then - // the interpolation can fail at runtime for other reasons. Example: - // `${count.index+1}`: in a world where `count.index` is computed, - // this would fail a type check since the computed placeholder is - // a string, but realistically the whole value is just computed. - vars, err := DetectVariables(root) - if err != nil { - return "", err - } - for _, v := range vars { - varVal, ok := vs[v.FullKey()] - if ok && varVal.Value == UnknownVariableValue { - return UnknownVariableValue, nil - } - } - // None of the variables we need are computed, meaning we should // be able to properly evaluate. result, err := hil.Eval(root, config) @@ -187,17 +216,19 @@ func (r *RawConfig) Merge(other *RawConfig) *RawConfig { } // Build the unknown keys - unknownKeys := make(map[string]struct{}) - for _, k := range r.unknownKeys { - unknownKeys[k] = struct{}{} - } - for _, k := range other.unknownKeys { - unknownKeys[k] = struct{}{} - } + if len(r.unknownKeys) > 0 || len(other.unknownKeys) > 0 { + unknownKeys := make(map[string]struct{}) + for _, k := range r.unknownKeys { + unknownKeys[k] = struct{}{} + } + for _, k := range other.unknownKeys { + unknownKeys[k] = struct{}{} + } - result.unknownKeys = make([]string, 0, len(unknownKeys)) - for k, _ := range unknownKeys { - result.unknownKeys = append(result.unknownKeys, k) + result.unknownKeys = make([]string, 0, len(unknownKeys)) + for k, _ := range unknownKeys { + result.unknownKeys = append(result.unknownKeys, k) + } } return result @@ -238,6 +269,13 @@ func (r *RawConfig) init() error { } func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error { + if r.Body != nil { + // For RawConfigs created for the HCL2 experiement, callers must + // use the HCL2 Body API directly rather than interpolating via + // the RawConfig. + return errors.New("this feature is not yet supported under the HCL2 experiment") + } + config, err := copystructure.Copy(r.Raw) if err != nil { return err @@ -255,14 +293,24 @@ func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error { } func (r *RawConfig) merge(r2 *RawConfig) *RawConfig { + if r == nil && r2 == nil { + return nil + } + + if r == nil { + r = &RawConfig{} + } + rawRaw, err := copystructure.Copy(r.Raw) if err != nil { panic(err) } raw := rawRaw.(map[string]interface{}) - for k, v := range r2.Raw { - raw[k] = v + if r2 != nil { + for k, v := range r2.Raw { + raw[k] = v + } } result, err := NewRawConfig(raw) @@ -273,6 +321,74 @@ func (r *RawConfig) merge(r2 *RawConfig) *RawConfig { return result } +// couldBeInteger is a helper that determines if the represented value could +// result in an integer. +// +// This function only works for RawConfigs that have "Key" set, meaning that +// a single result can be produced. Calling this function will overwrite +// the Config and Value results to be a test value. +// +// This function is conservative. If there is some doubt about whether the +// result could be an integer -- for example, if it depends on a variable +// whose type we don't know yet -- it will still return true. +func (r *RawConfig) couldBeInteger() bool { + if r.Key == "" { + // un-keyed RawConfigs can never produce numbers + return false + } + if r.Body == nil { + // Normal path: using the interpolator in this package + // Interpolate with a fixed number to verify that its a number. + r.interpolate(func(root ast.Node) (interface{}, error) { + // Execute the node but transform the AST so that it returns + // a fixed value of "5" for all interpolations. + result, err := hil.Eval( + hil.FixedValueTransform( + root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}), + nil) + if err != nil { + return "", err + } + + return result.Value, nil + }) + _, err := strconv.ParseInt(r.Value().(string), 0, 0) + return err == nil + } else { + // HCL2 experiment path: using the HCL2 API via shims + // + // This path catches fewer situations because we have to assume all + // variables are entirely unknown in HCL2, rather than the assumption + // above that all variables can be numbers because names like "var.foo" + // are considered a single variable rather than an attribute access. + // This is fine in practice, because we get a definitive answer + // during the graph walk when we have real values to work with. + attrs, diags := r.Body.JustAttributes() + if diags.HasErrors() { + // This body is not just a single attribute with a value, so + // this can't be a number. + return false + } + attr, hasAttr := attrs[r.Key] + if !hasAttr { + return false + } + result, diags := hcl2EvalWithUnknownVars(attr.Expr) + if diags.HasErrors() { + // We'll conservatively assume that this error is a result of + // us not being ready to fully-populate the scope, and catch + // any further problems during the main graph walk. + return true + } + + // If the result is convertable to number then we'll allow it. + // We do this because an unknown string is optimistically convertable + // to number (might be "5") but a _known_ string "hello" is not. + _, err := convert.Convert(result, cty.Number) + return err == nil + } +} + // UnknownKeys returns the keys of the configuration that are unknown // because they had interpolated variables that must be computed. func (r *RawConfig) UnknownKeys() []string { diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go index 930645fa..8a55e060 100644 --- a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go +++ b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go @@ -1,8 +1,8 @@ -// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT +// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT. package config -import "fmt" +import "strconv" const _ResourceMode_name = "ManagedResourceModeDataResourceMode" @@ -10,7 +10,7 @@ var _ResourceMode_index = [...]uint8{0, 19, 35} func (i ResourceMode) String() string { if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { - return fmt.Sprintf("ResourceMode(%d)", i) + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" } return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/config/testing.go b/vendor/github.com/hashicorp/terraform/config/testing.go new file mode 100644 index 00000000..831fc778 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/testing.go @@ -0,0 +1,17 @@ +package config + +import ( + "testing" +) + +// TestRawConfig is used to create a RawConfig for testing. +func TestRawConfig(t *testing.T, c map[string]interface{}) *RawConfig { + t.Helper() + + cfg, err := NewRawConfig(c) + if err != nil { + t.Fatalf("err: %s", err) + } + + return cfg +} diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go index 20b3e040..b7eb10c3 100644 --- a/vendor/github.com/hashicorp/terraform/dag/dag.go +++ b/vendor/github.com/hashicorp/terraform/dag/dag.go @@ -2,11 +2,8 @@ package dag import ( "fmt" - "log" "sort" "strings" - "sync" - "time" "github.com/hashicorp/go-multierror" ) @@ -24,6 +21,10 @@ type WalkFunc func(Vertex) error // walk as an argument type DepthWalkFunc func(Vertex, int) error +func (g *AcyclicGraph) DirectedGraph() Grapher { + return g +} + // Returns a Set that includes every Vertex yielded by walking down from the // provided starting Vertex v. func (g *AcyclicGraph) Ancestors(v Vertex) (*Set, error) { @@ -99,11 +100,13 @@ func (g *AcyclicGraph) TransitiveReduction() { // v such that the edge (u,v) exists (v is a direct descendant of u). // // For each v-prime reachable from v, remove the edge (u, v-prime). + defer g.debug.BeginOperation("TransitiveReduction", "").End("") + for _, u := range g.Vertices() { uTargets := g.DownEdges(u) vs := AsVertexList(g.DownEdges(u)) - g.DepthFirstWalk(vs, func(v Vertex, d int) error { + g.depthFirstWalk(vs, false, func(v Vertex, d int) error { shared := uTargets.Intersection(g.DownEdges(v)) for _, vPrime := range AsVertexList(shared) { g.RemoveEdge(BasicEdge(u, vPrime)) @@ -161,94 +164,11 @@ func (g *AcyclicGraph) Cycles() [][]Vertex { // This will walk nodes in parallel if it can. Because the walk is done // in parallel, the error returned will be a multierror. func (g *AcyclicGraph) Walk(cb WalkFunc) error { - // Cache the vertices since we use it multiple times - vertices := g.Vertices() - - // Build the waitgroup that signals when we're done - var wg sync.WaitGroup - wg.Add(len(vertices)) - doneCh := make(chan struct{}) - go func() { - defer close(doneCh) - wg.Wait() - }() - - // The map of channels to watch to wait for vertices to finish - vertMap := make(map[Vertex]chan struct{}) - for _, v := range vertices { - vertMap[v] = make(chan struct{}) - } - - // The map of whether a vertex errored or not during the walk - var errLock sync.Mutex - var errs error - errMap := make(map[Vertex]bool) - for _, v := range vertices { - // Build our list of dependencies and the list of channels to - // wait on until we start executing for this vertex. - deps := AsVertexList(g.DownEdges(v)) - depChs := make([]<-chan struct{}, len(deps)) - for i, dep := range deps { - depChs[i] = vertMap[dep] - } - - // Get our channel so that we can close it when we're done - ourCh := vertMap[v] - - // Start the goroutine to wait for our dependencies - readyCh := make(chan bool) - go func(v Vertex, deps []Vertex, chs []<-chan struct{}, readyCh chan<- bool) { - // First wait for all the dependencies - for i, ch := range chs { - DepSatisfied: - for { - select { - case <-ch: - break DepSatisfied - case <-time.After(time.Second * 5): - log.Printf("[DEBUG] vertex %s, waiting for: %s", - VertexName(v), VertexName(deps[i])) - } - } - log.Printf("[DEBUG] vertex %s, got dep: %s", - VertexName(v), VertexName(deps[i])) - } - - // Then, check the map to see if any of our dependencies failed - errLock.Lock() - defer errLock.Unlock() - for _, dep := range deps { - if errMap[dep] { - errMap[v] = true - readyCh <- false - return - } - } - - readyCh <- true - }(v, deps, depChs, readyCh) - - // Start the goroutine that executes - go func(v Vertex, doneCh chan<- struct{}, readyCh <-chan bool) { - defer close(doneCh) - defer wg.Done() - - var err error - if ready := <-readyCh; ready { - err = cb(v) - } - - errLock.Lock() - defer errLock.Unlock() - if err != nil { - errMap[v] = true - errs = multierror.Append(errs, err) - } - }(v, ourCh, readyCh) - } + defer g.debug.BeginOperation(typeWalk, "").End("") - <-doneCh - return errs + w := &Walker{Callback: cb, Reverse: true} + w.Update(g) + return w.Wait() } // simple convenience helper for converting a dag.Set to a []Vertex @@ -267,9 +187,20 @@ type vertexAtDepth struct { } // depthFirstWalk does a depth-first walk of the graph starting from -// the vertices in start. This is not exported now but it would make sense -// to export this publicly at some point. +// the vertices in start. func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { + return g.depthFirstWalk(start, true, f) +} + +// This internal method provides the option of not sorting the vertices during +// the walk, which we use for the Transitive reduction. +// Some configurations can lead to fully-connected subgraphs, which makes our +// transitive reduction algorithm O(n^3). This is still passable for the size +// of our graphs, but the additional n^2 sort operations would make this +// uncomputable in a reasonable amount of time. +func (g *AcyclicGraph) depthFirstWalk(start []Vertex, sorted bool, f DepthWalkFunc) error { + defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("") + seen := make(map[Vertex]struct{}) frontier := make([]*vertexAtDepth, len(start)) for i, v := range start { @@ -297,7 +228,11 @@ func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { // Visit targets of this in a consistent order. targets := AsVertexList(g.DownEdges(current.Vertex)) - sort.Sort(byVertexName(targets)) + + if sorted { + sort.Sort(byVertexName(targets)) + } + for _, t := range targets { frontier = append(frontier, &vertexAtDepth{ Vertex: t, @@ -312,6 +247,8 @@ func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { // reverseDepthFirstWalk does a depth-first walk _up_ the graph starting from // the vertices in start. func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) error { + defer g.debug.BeginOperation(typeReverseDepthFirstWalk, "").End("") + seen := make(map[Vertex]struct{}) frontier := make([]*vertexAtDepth, len(start)) for i, v := range start { diff --git a/vendor/github.com/hashicorp/terraform/dag/dot.go b/vendor/github.com/hashicorp/terraform/dag/dot.go new file mode 100644 index 00000000..7e6d2af3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/dot.go @@ -0,0 +1,282 @@ +package dag + +import ( + "bytes" + "fmt" + "sort" + "strings" +) + +// DotOpts are the options for generating a dot formatted Graph. +type DotOpts struct { + // Allows some nodes to decide to only show themselves when the user has + // requested the "verbose" graph. + Verbose bool + + // Highlight Cycles + DrawCycles bool + + // How many levels to expand modules as we draw + MaxDepth int + + // use this to keep the cluster_ naming convention from the previous dot writer + cluster bool +} + +// GraphNodeDotter can be implemented by a node to cause it to be included +// in the dot graph. The Dot method will be called which is expected to +// return a representation of this node. +type GraphNodeDotter interface { + // Dot is called to return the dot formatting for the node. + // The first parameter is the title of the node. + // The second parameter includes user-specified options that affect the dot + // graph. See GraphDotOpts below for details. + DotNode(string, *DotOpts) *DotNode +} + +// DotNode provides a structure for Vertices to return in order to specify their +// dot format. +type DotNode struct { + Name string + Attrs map[string]string +} + +// Returns the DOT representation of this Graph. +func (g *marshalGraph) Dot(opts *DotOpts) []byte { + if opts == nil { + opts = &DotOpts{ + DrawCycles: true, + MaxDepth: -1, + Verbose: true, + } + } + + var w indentWriter + w.WriteString("digraph {\n") + w.Indent() + + // some dot defaults + w.WriteString(`compound = "true"` + "\n") + w.WriteString(`newrank = "true"` + "\n") + + // the top level graph is written as the first subgraph + w.WriteString(`subgraph "root" {` + "\n") + g.writeBody(opts, &w) + + // cluster isn't really used other than for naming purposes in some graphs + opts.cluster = opts.MaxDepth != 0 + maxDepth := opts.MaxDepth + if maxDepth == 0 { + maxDepth = -1 + } + + for _, s := range g.Subgraphs { + g.writeSubgraph(s, opts, maxDepth, &w) + } + + w.Unindent() + w.WriteString("}\n") + return w.Bytes() +} + +func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte { + var buf bytes.Buffer + graphName := g.Name + if graphName == "" { + graphName = "root" + } + + name := v.Name + attrs := v.Attrs + if v.graphNodeDotter != nil { + node := v.graphNodeDotter.DotNode(name, opts) + if node == nil { + return []byte{} + } + + newAttrs := make(map[string]string) + for k, v := range attrs { + newAttrs[k] = v + } + for k, v := range node.Attrs { + newAttrs[k] = v + } + + name = node.Name + attrs = newAttrs + } + + buf.WriteString(fmt.Sprintf(`"[%s] %s"`, graphName, name)) + writeAttrs(&buf, attrs) + buf.WriteByte('\n') + + return buf.Bytes() +} + +func (e *marshalEdge) dot(g *marshalGraph) string { + var buf bytes.Buffer + graphName := g.Name + if graphName == "" { + graphName = "root" + } + + sourceName := g.vertexByID(e.Source).Name + targetName := g.vertexByID(e.Target).Name + s := fmt.Sprintf(`"[%s] %s" -> "[%s] %s"`, graphName, sourceName, graphName, targetName) + buf.WriteString(s) + writeAttrs(&buf, e.Attrs) + + return buf.String() +} + +func cycleDot(e *marshalEdge, g *marshalGraph) string { + return e.dot(g) + ` [color = "red", penwidth = "2.0"]` +} + +// Write the subgraph body. The is recursive, and the depth argument is used to +// record the current depth of iteration. +func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int, w *indentWriter) { + if depth == 0 { + return + } + depth-- + + name := sg.Name + if opts.cluster { + // we prefix with cluster_ to match the old dot output + name = "cluster_" + name + sg.Attrs["label"] = sg.Name + } + w.WriteString(fmt.Sprintf("subgraph %q {\n", name)) + sg.writeBody(opts, w) + + for _, sg := range sg.Subgraphs { + g.writeSubgraph(sg, opts, depth, w) + } +} + +func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) { + w.Indent() + + for _, as := range attrStrings(g.Attrs) { + w.WriteString(as + "\n") + } + + // list of Vertices that aren't to be included in the dot output + skip := map[string]bool{} + + for _, v := range g.Vertices { + if v.graphNodeDotter == nil { + skip[v.ID] = true + continue + } + + w.Write(v.dot(g, opts)) + } + + var dotEdges []string + + if opts.DrawCycles { + for _, c := range g.Cycles { + if len(c) < 2 { + continue + } + + for i, j := 0, 1; i < len(c); i, j = i+1, j+1 { + if j >= len(c) { + j = 0 + } + src := c[i] + tgt := c[j] + + if skip[src.ID] || skip[tgt.ID] { + continue + } + + e := &marshalEdge{ + Name: fmt.Sprintf("%s|%s", src.Name, tgt.Name), + Source: src.ID, + Target: tgt.ID, + Attrs: make(map[string]string), + } + + dotEdges = append(dotEdges, cycleDot(e, g)) + src = tgt + } + } + } + + for _, e := range g.Edges { + dotEdges = append(dotEdges, e.dot(g)) + } + + // srot these again to match the old output + sort.Strings(dotEdges) + + for _, e := range dotEdges { + w.WriteString(e + "\n") + } + + w.Unindent() + w.WriteString("}\n") +} + +func writeAttrs(buf *bytes.Buffer, attrs map[string]string) { + if len(attrs) > 0 { + buf.WriteString(" [") + buf.WriteString(strings.Join(attrStrings(attrs), ", ")) + buf.WriteString("]") + } +} + +func attrStrings(attrs map[string]string) []string { + strings := make([]string, 0, len(attrs)) + for k, v := range attrs { + strings = append(strings, fmt.Sprintf("%s = %q", k, v)) + } + sort.Strings(strings) + return strings +} + +// Provide a bytes.Buffer like structure, which will indent when starting a +// newline. +type indentWriter struct { + bytes.Buffer + level int +} + +func (w *indentWriter) indent() { + newline := []byte("\n") + if !bytes.HasSuffix(w.Bytes(), newline) { + return + } + for i := 0; i < w.level; i++ { + w.Buffer.WriteString("\t") + } +} + +// Indent increases indentation by 1 +func (w *indentWriter) Indent() { w.level++ } + +// Unindent decreases indentation by 1 +func (w *indentWriter) Unindent() { w.level-- } + +// the following methods intercecpt the byte.Buffer writes and insert the +// indentation when starting a new line. +func (w *indentWriter) Write(b []byte) (int, error) { + w.indent() + return w.Buffer.Write(b) +} + +func (w *indentWriter) WriteString(s string) (int, error) { + w.indent() + return w.Buffer.WriteString(s) +} +func (w *indentWriter) WriteByte(b byte) error { + w.indent() + return w.Buffer.WriteByte(b) +} +func (w *indentWriter) WriteRune(r rune) (int, error) { + w.indent() + return w.Buffer.WriteRune(r) +} diff --git a/vendor/github.com/hashicorp/terraform/dag/graph.go b/vendor/github.com/hashicorp/terraform/dag/graph.go index 01211805..e7517a20 100644 --- a/vendor/github.com/hashicorp/terraform/dag/graph.go +++ b/vendor/github.com/hashicorp/terraform/dag/graph.go @@ -2,9 +2,10 @@ package dag import ( "bytes" + "encoding/json" "fmt" + "io" "sort" - "sync" ) // Graph is used to represent a dependency graph. @@ -13,7 +14,21 @@ type Graph struct { edges *Set downEdges map[interface{}]*Set upEdges map[interface{}]*Set - once sync.Once + + // JSON encoder for recording debug information + debug *encoder +} + +// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher. +type Subgrapher interface { + Subgraph() Grapher +} + +// A Grapher is any type that returns a Grapher, mainly used to identify +// dag.Graph and dag.AcyclicGraph. In the case of Graph and AcyclicGraph, they +// return themselves. +type Grapher interface { + DirectedGraph() Grapher } // Vertex of the graph. @@ -26,6 +41,10 @@ type NamedVertex interface { Name() string } +func (g *Graph) DirectedGraph() Grapher { + return g +} + // Vertices returns the list of all the vertices in the graph. func (g *Graph) Vertices() []Vertex { list := g.vertices.List() @@ -48,6 +67,32 @@ func (g *Graph) Edges() []Edge { return result } +// EdgesFrom returns the list of edges from the given source. +func (g *Graph) EdgesFrom(v Vertex) []Edge { + var result []Edge + from := hashcode(v) + for _, e := range g.Edges() { + if hashcode(e.Source()) == from { + result = append(result, e) + } + } + + return result +} + +// EdgesTo returns the list of edges to the given target. +func (g *Graph) EdgesTo(v Vertex) []Edge { + var result []Edge + search := hashcode(v) + for _, e := range g.Edges() { + if hashcode(e.Target()) == search { + result = append(result, e) + } + } + + return result +} + // HasVertex checks if the given Vertex is present in the graph. func (g *Graph) HasVertex(v Vertex) bool { return g.vertices.Include(v) @@ -61,8 +106,9 @@ func (g *Graph) HasEdge(e Edge) bool { // Add adds a vertex to the graph. This is safe to call multiple time with // the same Vertex. func (g *Graph) Add(v Vertex) Vertex { - g.once.Do(g.init) + g.init() g.vertices.Add(v) + g.debug.Add(v) return v } @@ -71,6 +117,7 @@ func (g *Graph) Add(v Vertex) Vertex { func (g *Graph) Remove(v Vertex) Vertex { // Delete the vertex itself g.vertices.Delete(v) + g.debug.Remove(v) // Delete the edges to non-existent things for _, target := range g.DownEdges(v).List() { @@ -92,6 +139,8 @@ func (g *Graph) Replace(original, replacement Vertex) bool { return false } + defer g.debug.BeginOperation("Replace", "").End("") + // If they're the same, then don't do anything if original == replacement { return true @@ -114,7 +163,8 @@ func (g *Graph) Replace(original, replacement Vertex) bool { // RemoveEdge removes an edge from the graph. func (g *Graph) RemoveEdge(edge Edge) { - g.once.Do(g.init) + g.init() + g.debug.RemoveEdge(edge) // Delete the edge from the set g.edges.Delete(edge) @@ -130,13 +180,13 @@ func (g *Graph) RemoveEdge(edge Edge) { // DownEdges returns the outward edges from the source Vertex v. func (g *Graph) DownEdges(v Vertex) *Set { - g.once.Do(g.init) + g.init() return g.downEdges[hashcode(v)] } // UpEdges returns the inward edges to the destination Vertex v. func (g *Graph) UpEdges(v Vertex) *Set { - g.once.Do(g.init) + g.init() return g.upEdges[hashcode(v)] } @@ -145,7 +195,8 @@ func (g *Graph) UpEdges(v Vertex) *Set { // verified through pointer equality of the vertices, not through the // value of the edge itself. func (g *Graph) Connect(edge Edge) { - g.once.Do(g.init) + g.init() + g.debug.Connect(edge) source := edge.Source() target := edge.Target() @@ -259,10 +310,72 @@ func (g *Graph) String() string { } func (g *Graph) init() { - g.vertices = new(Set) - g.edges = new(Set) - g.downEdges = make(map[interface{}]*Set) - g.upEdges = make(map[interface{}]*Set) + if g.vertices == nil { + g.vertices = new(Set) + } + if g.edges == nil { + g.edges = new(Set) + } + if g.downEdges == nil { + g.downEdges = make(map[interface{}]*Set) + } + if g.upEdges == nil { + g.upEdges = make(map[interface{}]*Set) + } +} + +// Dot returns a dot-formatted representation of the Graph. +func (g *Graph) Dot(opts *DotOpts) []byte { + return newMarshalGraph("", g).Dot(opts) +} + +// MarshalJSON returns a JSON representation of the entire Graph. +func (g *Graph) MarshalJSON() ([]byte, error) { + dg := newMarshalGraph("root", g) + return json.MarshalIndent(dg, "", " ") +} + +// SetDebugWriter sets the io.Writer where the Graph will record debug +// information. After this is set, the graph will immediately encode itself to +// the stream, and continue to record all subsequent operations. +func (g *Graph) SetDebugWriter(w io.Writer) { + g.debug = &encoder{w: w} + g.debug.Encode(newMarshalGraph("root", g)) +} + +// DebugVertexInfo encodes arbitrary information about a vertex in the graph +// debug logs. +func (g *Graph) DebugVertexInfo(v Vertex, info string) { + va := newVertexInfo(typeVertexInfo, v, info) + g.debug.Encode(va) +} + +// DebugEdgeInfo encodes arbitrary information about an edge in the graph debug +// logs. +func (g *Graph) DebugEdgeInfo(e Edge, info string) { + ea := newEdgeInfo(typeEdgeInfo, e, info) + g.debug.Encode(ea) +} + +// DebugVisitInfo records a visit to a Vertex during a walk operation. +func (g *Graph) DebugVisitInfo(v Vertex, info string) { + vi := newVertexInfo(typeVisitInfo, v, info) + g.debug.Encode(vi) +} + +// DebugOperation marks the start of a set of graph transformations in +// the debug log, and returns a DebugOperationEnd func, which marks the end of +// the operation in the log. Additional information can be added to the log via +// the info parameter. +// +// The returned func's End method allows this method to be called from a single +// defer statement: +// defer g.DebugOperationBegin("OpName", "operating").End("") +// +// The returned function must be called to properly close the logical operation +// in the logs. +func (g *Graph) DebugOperation(operation string, info string) DebugOperationEnd { + return g.debug.BeginOperation(operation, info) } // VertexName returns the name of a vertex. diff --git a/vendor/github.com/hashicorp/terraform/dag/marshal.go b/vendor/github.com/hashicorp/terraform/dag/marshal.go new file mode 100644 index 00000000..c567d271 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/marshal.go @@ -0,0 +1,474 @@ +package dag + +import ( + "encoding/json" + "fmt" + "io" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +const ( + typeOperation = "Operation" + typeTransform = "Transform" + typeWalk = "Walk" + typeDepthFirstWalk = "DepthFirstWalk" + typeReverseDepthFirstWalk = "ReverseDepthFirstWalk" + typeTransitiveReduction = "TransitiveReduction" + typeEdgeInfo = "EdgeInfo" + typeVertexInfo = "VertexInfo" + typeVisitInfo = "VisitInfo" +) + +// the marshal* structs are for serialization of the graph data. +type marshalGraph struct { + // Type is always "Graph", for identification as a top level object in the + // JSON stream. + Type string + + // Each marshal structure requires a unique ID so that it can be referenced + // by other structures. + ID string `json:",omitempty"` + + // Human readable name for this graph. + Name string `json:",omitempty"` + + // Arbitrary attributes that can be added to the output. + Attrs map[string]string `json:",omitempty"` + + // List of graph vertices, sorted by ID. + Vertices []*marshalVertex `json:",omitempty"` + + // List of edges, sorted by Source ID. + Edges []*marshalEdge `json:",omitempty"` + + // Any number of subgraphs. A subgraph itself is considered a vertex, and + // may be referenced by either end of an edge. + Subgraphs []*marshalGraph `json:",omitempty"` + + // Any lists of vertices that are included in cycles. + Cycles [][]*marshalVertex `json:",omitempty"` +} + +// The add, remove, connect, removeEdge methods mirror the basic Graph +// manipulations to reconstruct a marshalGraph from a debug log. +func (g *marshalGraph) add(v *marshalVertex) { + g.Vertices = append(g.Vertices, v) + sort.Sort(vertices(g.Vertices)) +} + +func (g *marshalGraph) remove(v *marshalVertex) { + for i, existing := range g.Vertices { + if v.ID == existing.ID { + g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...) + return + } + } +} + +func (g *marshalGraph) connect(e *marshalEdge) { + g.Edges = append(g.Edges, e) + sort.Sort(edges(g.Edges)) +} + +func (g *marshalGraph) removeEdge(e *marshalEdge) { + for i, existing := range g.Edges { + if e.Source == existing.Source && e.Target == existing.Target { + g.Edges = append(g.Edges[:i], g.Edges[i+1:]...) + return + } + } +} + +func (g *marshalGraph) vertexByID(id string) *marshalVertex { + for _, v := range g.Vertices { + if id == v.ID { + return v + } + } + return nil +} + +type marshalVertex struct { + // Unique ID, used to reference this vertex from other structures. + ID string + + // Human readable name + Name string `json:",omitempty"` + + Attrs map[string]string `json:",omitempty"` + + // This is to help transition from the old Dot interfaces. We record if the + // node was a GraphNodeDotter here, so we can call it to get attributes. + graphNodeDotter GraphNodeDotter +} + +func newMarshalVertex(v Vertex) *marshalVertex { + dn, ok := v.(GraphNodeDotter) + if !ok { + dn = nil + } + + return &marshalVertex{ + ID: marshalVertexID(v), + Name: VertexName(v), + Attrs: make(map[string]string), + graphNodeDotter: dn, + } +} + +// vertices is a sort.Interface implementation for sorting vertices by ID +type vertices []*marshalVertex + +func (v vertices) Less(i, j int) bool { return v[i].Name < v[j].Name } +func (v vertices) Len() int { return len(v) } +func (v vertices) Swap(i, j int) { v[i], v[j] = v[j], v[i] } + +type marshalEdge struct { + // Human readable name + Name string + + // Source and Target Vertices by ID + Source string + Target string + + Attrs map[string]string `json:",omitempty"` +} + +func newMarshalEdge(e Edge) *marshalEdge { + return &marshalEdge{ + Name: fmt.Sprintf("%s|%s", VertexName(e.Source()), VertexName(e.Target())), + Source: marshalVertexID(e.Source()), + Target: marshalVertexID(e.Target()), + Attrs: make(map[string]string), + } +} + +// edges is a sort.Interface implementation for sorting edges by Source ID +type edges []*marshalEdge + +func (e edges) Less(i, j int) bool { return e[i].Name < e[j].Name } +func (e edges) Len() int { return len(e) } +func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } + +// build a marshalGraph structure from a *Graph +func newMarshalGraph(name string, g *Graph) *marshalGraph { + mg := &marshalGraph{ + Type: "Graph", + Name: name, + Attrs: make(map[string]string), + } + + for _, v := range g.Vertices() { + id := marshalVertexID(v) + if sg, ok := marshalSubgrapher(v); ok { + smg := newMarshalGraph(VertexName(v), sg) + smg.ID = id + mg.Subgraphs = append(mg.Subgraphs, smg) + } + + mv := newMarshalVertex(v) + mg.Vertices = append(mg.Vertices, mv) + } + + sort.Sort(vertices(mg.Vertices)) + + for _, e := range g.Edges() { + mg.Edges = append(mg.Edges, newMarshalEdge(e)) + } + + sort.Sort(edges(mg.Edges)) + + for _, c := range (&AcyclicGraph{*g}).Cycles() { + var cycle []*marshalVertex + for _, v := range c { + mv := newMarshalVertex(v) + cycle = append(cycle, mv) + } + mg.Cycles = append(mg.Cycles, cycle) + } + + return mg +} + +// Attempt to return a unique ID for any vertex. +func marshalVertexID(v Vertex) string { + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + return strconv.Itoa(int(val.Pointer())) + case reflect.Interface: + return strconv.Itoa(int(val.InterfaceData()[1])) + } + + if v, ok := v.(Hashable); ok { + h := v.Hashcode() + if h, ok := h.(string); ok { + return h + } + } + + // fallback to a name, which we hope is unique. + return VertexName(v) + + // we could try harder by attempting to read the arbitrary value from the + // interface, but we shouldn't get here from terraform right now. +} + +// check for a Subgrapher, and return the underlying *Graph. +func marshalSubgrapher(v Vertex) (*Graph, bool) { + sg, ok := v.(Subgrapher) + if !ok { + return nil, false + } + + switch g := sg.Subgraph().DirectedGraph().(type) { + case *Graph: + return g, true + case *AcyclicGraph: + return &g.Graph, true + } + + return nil, false +} + +// The DebugOperationEnd func type provides a way to call an End function via a +// method call, allowing for the chaining of methods in a defer statement. +type DebugOperationEnd func(string) + +// End calls function e with the info parameter, marking the end of this +// operation in the logs. +func (e DebugOperationEnd) End(info string) { e(info) } + +// encoder provides methods to write debug data to an io.Writer, and is a noop +// when no writer is present +type encoder struct { + sync.Mutex + w io.Writer +} + +// Encode is analogous to json.Encoder.Encode +func (e *encoder) Encode(i interface{}) { + if e == nil || e.w == nil { + return + } + e.Lock() + defer e.Unlock() + + js, err := json.Marshal(i) + if err != nil { + log.Println("[ERROR] dag:", err) + return + } + js = append(js, '\n') + + _, err = e.w.Write(js) + if err != nil { + log.Println("[ERROR] dag:", err) + return + } +} + +func (e *encoder) Add(v Vertex) { + if e == nil { + return + } + e.Encode(marshalTransform{ + Type: typeTransform, + AddVertex: newMarshalVertex(v), + }) +} + +// Remove records the removal of Vertex v. +func (e *encoder) Remove(v Vertex) { + if e == nil { + return + } + e.Encode(marshalTransform{ + Type: typeTransform, + RemoveVertex: newMarshalVertex(v), + }) +} + +func (e *encoder) Connect(edge Edge) { + if e == nil { + return + } + e.Encode(marshalTransform{ + Type: typeTransform, + AddEdge: newMarshalEdge(edge), + }) +} + +func (e *encoder) RemoveEdge(edge Edge) { + if e == nil { + return + } + e.Encode(marshalTransform{ + Type: typeTransform, + RemoveEdge: newMarshalEdge(edge), + }) +} + +// BeginOperation marks the start of set of graph transformations, and returns +// an EndDebugOperation func to be called once the opration is complete. +func (e *encoder) BeginOperation(op string, info string) DebugOperationEnd { + if e == nil { + return func(string) {} + } + + e.Encode(marshalOperation{ + Type: typeOperation, + Begin: op, + Info: info, + }) + + return func(info string) { + e.Encode(marshalOperation{ + Type: typeOperation, + End: op, + Info: info, + }) + } +} + +// structure for recording graph transformations +type marshalTransform struct { + // Type: "Transform" + Type string + AddEdge *marshalEdge `json:",omitempty"` + RemoveEdge *marshalEdge `json:",omitempty"` + AddVertex *marshalVertex `json:",omitempty"` + RemoveVertex *marshalVertex `json:",omitempty"` +} + +func (t marshalTransform) Transform(g *marshalGraph) { + switch { + case t.AddEdge != nil: + g.connect(t.AddEdge) + case t.RemoveEdge != nil: + g.removeEdge(t.RemoveEdge) + case t.AddVertex != nil: + g.add(t.AddVertex) + case t.RemoveVertex != nil: + g.remove(t.RemoveVertex) + } +} + +// this structure allows us to decode any object in the json stream for +// inspection, then re-decode it into a proper struct if needed. +type streamDecode struct { + Type string + Map map[string]interface{} + JSON []byte +} + +func (s *streamDecode) UnmarshalJSON(d []byte) error { + s.JSON = d + err := json.Unmarshal(d, &s.Map) + if err != nil { + return err + } + + if t, ok := s.Map["Type"]; ok { + s.Type, _ = t.(string) + } + return nil +} + +// structure for recording the beginning and end of any multi-step +// transformations. These are informational, and not required to reproduce the +// graph state. +type marshalOperation struct { + Type string + Begin string `json:",omitempty"` + End string `json:",omitempty"` + Info string `json:",omitempty"` +} + +// decodeGraph decodes a marshalGraph from an encoded graph stream. +func decodeGraph(r io.Reader) (*marshalGraph, error) { + dec := json.NewDecoder(r) + + // a stream should always start with a graph + g := &marshalGraph{} + + err := dec.Decode(g) + if err != nil { + return nil, err + } + + // now replay any operations that occurred on the original graph + for dec.More() { + s := &streamDecode{} + err := dec.Decode(s) + if err != nil { + return g, err + } + + // the only Type we're concerned with here is Transform to complete the + // Graph + if s.Type != typeTransform { + continue + } + + t := &marshalTransform{} + err = json.Unmarshal(s.JSON, t) + if err != nil { + return g, err + } + t.Transform(g) + } + return g, nil +} + +// marshalVertexInfo allows encoding arbitrary information about the a single +// Vertex in the logs. These are accumulated for informational display while +// rebuilding the graph. +type marshalVertexInfo struct { + Type string + Vertex *marshalVertex + Info string +} + +func newVertexInfo(infoType string, v Vertex, info string) *marshalVertexInfo { + return &marshalVertexInfo{ + Type: infoType, + Vertex: newMarshalVertex(v), + Info: info, + } +} + +// marshalEdgeInfo allows encoding arbitrary information about the a single +// Edge in the logs. These are accumulated for informational display while +// rebuilding the graph. +type marshalEdgeInfo struct { + Type string + Edge *marshalEdge + Info string +} + +func newEdgeInfo(infoType string, e Edge, info string) *marshalEdgeInfo { + return &marshalEdgeInfo{ + Type: infoType, + Edge: newMarshalEdge(e), + Info: info, + } +} + +// JSON2Dot reads a Graph debug log from and io.Reader, and converts the final +// graph dot format. +// +// TODO: Allow returning the output at a certain point during decode. +// Encode extra information from the json log into the Dot. +func JSON2Dot(r io.Reader) ([]byte, error) { + g, err := decodeGraph(r) + if err != nil { + return nil, err + } + + return g.Dot(nil), nil +} diff --git a/vendor/github.com/hashicorp/terraform/dag/set.go b/vendor/github.com/hashicorp/terraform/dag/set.go index d4b29226..92b42151 100644 --- a/vendor/github.com/hashicorp/terraform/dag/set.go +++ b/vendor/github.com/hashicorp/terraform/dag/set.go @@ -48,6 +48,9 @@ func (s *Set) Include(v interface{}) bool { // Intersection computes the set intersection with other. func (s *Set) Intersection(other *Set) *Set { result := new(Set) + if s == nil { + return result + } if other != nil { for _, v := range s.m { if other.Include(v) { @@ -59,6 +62,39 @@ func (s *Set) Intersection(other *Set) *Set { return result } +// Difference returns a set with the elements that s has but +// other doesn't. +func (s *Set) Difference(other *Set) *Set { + result := new(Set) + if s != nil { + for k, v := range s.m { + var ok bool + if other != nil { + _, ok = other.m[k] + } + if !ok { + result.Add(v) + } + } + } + + return result +} + +// Filter returns a set that contains the elements from the receiver +// where the given callback returns true. +func (s *Set) Filter(cb func(interface{}) bool) *Set { + result := new(Set) + + for _, v := range s.m { + if cb(v) { + result.Add(v) + } + } + + return result +} + // Len is the number of items in the set. func (s *Set) Len() int { if s == nil { diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go new file mode 100644 index 00000000..f03b1003 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/dag/walk.go @@ -0,0 +1,445 @@ +package dag + +import ( + "errors" + "fmt" + "log" + "sync" + "time" + + "github.com/hashicorp/go-multierror" +) + +// Walker is used to walk every vertex of a graph in parallel. +// +// A vertex will only be walked when the dependencies of that vertex have +// been walked. If two vertices can be walked at the same time, they will be. +// +// Update can be called to update the graph. This can be called even during +// a walk, cahnging vertices/edges mid-walk. This should be done carefully. +// If a vertex is removed but has already been executed, the result of that +// execution (any error) is still returned by Wait. Changing or re-adding +// a vertex that has already executed has no effect. Changing edges of +// a vertex that has already executed has no effect. +// +// Non-parallelism can be enforced by introducing a lock in your callback +// function. However, the goroutine overhead of a walk will remain. +// Walker will create V*2 goroutines (one for each vertex, and dependency +// waiter for each vertex). In general this should be of no concern unless +// there are a huge number of vertices. +// +// The walk is depth first by default. This can be changed with the Reverse +// option. +// +// A single walker is only valid for one graph walk. After the walk is complete +// you must construct a new walker to walk again. State for the walk is never +// deleted in case vertices or edges are changed. +type Walker struct { + // Callback is what is called for each vertex + Callback WalkFunc + + // Reverse, if true, causes the source of an edge to depend on a target. + // When false (default), the target depends on the source. + Reverse bool + + // changeLock must be held to modify any of the fields below. Only Update + // should modify these fields. Modifying them outside of Update can cause + // serious problems. + changeLock sync.Mutex + vertices Set + edges Set + vertexMap map[Vertex]*walkerVertex + + // wait is done when all vertices have executed. It may become "undone" + // if new vertices are added. + wait sync.WaitGroup + + // errMap contains the errors recorded so far for execution. Reading + // and writing should hold errLock. + errMap map[Vertex]error + errLock sync.Mutex +} + +type walkerVertex struct { + // These should only be set once on initialization and never written again. + // They are not protected by a lock since they don't need to be since + // they are write-once. + + // DoneCh is closed when this vertex has completed execution, regardless + // of success. + // + // CancelCh is closed when the vertex should cancel execution. If execution + // is already complete (DoneCh is closed), this has no effect. Otherwise, + // execution is cancelled as quickly as possible. + DoneCh chan struct{} + CancelCh chan struct{} + + // Dependency information. Any changes to any of these fields requires + // holding DepsLock. + // + // DepsCh is sent a single value that denotes whether the upstream deps + // were successful (no errors). Any value sent means that the upstream + // dependencies are complete. No other values will ever be sent again. + // + // DepsUpdateCh is closed when there is a new DepsCh set. + DepsCh chan bool + DepsUpdateCh chan struct{} + DepsLock sync.Mutex + + // Below is not safe to read/write in parallel. This behavior is + // enforced by changes only happening in Update. Nothing else should + // ever modify these. + deps map[Vertex]chan struct{} + depsCancelCh chan struct{} +} + +// errWalkUpstream is used in the errMap of a walk to note that an upstream +// dependency failed so this vertex wasn't run. This is not shown in the final +// user-returned error. +var errWalkUpstream = errors.New("upstream dependency failed") + +// Wait waits for the completion of the walk and returns any errors ( +// in the form of a multierror) that occurred. Update should be called +// to populate the walk with vertices and edges prior to calling this. +// +// Wait will return as soon as all currently known vertices are complete. +// If you plan on calling Update with more vertices in the future, you +// should not call Wait until after this is done. +func (w *Walker) Wait() error { + // Wait for completion + w.wait.Wait() + + // Grab the error lock + w.errLock.Lock() + defer w.errLock.Unlock() + + // Build the error + var result error + for v, err := range w.errMap { + if err != nil && err != errWalkUpstream { + result = multierror.Append(result, fmt.Errorf( + "%s: %s", VertexName(v), err)) + } + } + + return result +} + +// Update updates the currently executing walk with the given graph. +// This will perform a diff of the vertices and edges and update the walker. +// Already completed vertices remain completed (including any errors during +// their execution). +// +// This returns immediately once the walker is updated; it does not wait +// for completion of the walk. +// +// Multiple Updates can be called in parallel. Update can be called at any +// time during a walk. +func (w *Walker) Update(g *AcyclicGraph) { + var v, e *Set + if g != nil { + v, e = g.vertices, g.edges + } + + // Grab the change lock so no more updates happen but also so that + // no new vertices are executed during this time since we may be + // removing them. + w.changeLock.Lock() + defer w.changeLock.Unlock() + + // Initialize fields + if w.vertexMap == nil { + w.vertexMap = make(map[Vertex]*walkerVertex) + } + + // Calculate all our sets + newEdges := e.Difference(&w.edges) + oldEdges := w.edges.Difference(e) + newVerts := v.Difference(&w.vertices) + oldVerts := w.vertices.Difference(v) + + // Add the new vertices + for _, raw := range newVerts.List() { + v := raw.(Vertex) + + // Add to the waitgroup so our walk is not done until everything finishes + w.wait.Add(1) + + // Add to our own set so we know about it already + log.Printf("[TRACE] dag/walk: added new vertex: %q", VertexName(v)) + w.vertices.Add(raw) + + // Initialize the vertex info + info := &walkerVertex{ + DoneCh: make(chan struct{}), + CancelCh: make(chan struct{}), + deps: make(map[Vertex]chan struct{}), + } + + // Add it to the map and kick off the walk + w.vertexMap[v] = info + } + + // Remove the old vertices + for _, raw := range oldVerts.List() { + v := raw.(Vertex) + + // Get the vertex info so we can cancel it + info, ok := w.vertexMap[v] + if !ok { + // This vertex for some reason was never in our map. This + // shouldn't be possible. + continue + } + + // Cancel the vertex + close(info.CancelCh) + + // Delete it out of the map + delete(w.vertexMap, v) + + log.Printf("[TRACE] dag/walk: removed vertex: %q", VertexName(v)) + w.vertices.Delete(raw) + } + + // Add the new edges + var changedDeps Set + for _, raw := range newEdges.List() { + edge := raw.(Edge) + waiter, dep := w.edgeParts(edge) + + // Get the info for the waiter + waiterInfo, ok := w.vertexMap[waiter] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Get the info for the dep + depInfo, ok := w.vertexMap[dep] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Add the dependency to our waiter + waiterInfo.deps[dep] = depInfo.DoneCh + + // Record that the deps changed for this waiter + changedDeps.Add(waiter) + + log.Printf( + "[TRACE] dag/walk: added edge: %q waiting on %q", + VertexName(waiter), VertexName(dep)) + w.edges.Add(raw) + } + + // Process reoved edges + for _, raw := range oldEdges.List() { + edge := raw.(Edge) + waiter, dep := w.edgeParts(edge) + + // Get the info for the waiter + waiterInfo, ok := w.vertexMap[waiter] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Delete the dependency from the waiter + delete(waiterInfo.deps, dep) + + // Record that the deps changed for this waiter + changedDeps.Add(waiter) + + log.Printf( + "[TRACE] dag/walk: removed edge: %q waiting on %q", + VertexName(waiter), VertexName(dep)) + w.edges.Delete(raw) + } + + // For each vertex with changed dependencies, we need to kick off + // a new waiter and notify the vertex of the changes. + for _, raw := range changedDeps.List() { + v := raw.(Vertex) + info, ok := w.vertexMap[v] + if !ok { + // Vertex doesn't exist... shouldn't be possible but ignore. + continue + } + + // Create a new done channel + doneCh := make(chan bool, 1) + + // Create the channel we close for cancellation + cancelCh := make(chan struct{}) + + // Build a new deps copy + deps := make(map[Vertex]<-chan struct{}) + for k, v := range info.deps { + deps[k] = v + } + + // Update the update channel + info.DepsLock.Lock() + if info.DepsUpdateCh != nil { + close(info.DepsUpdateCh) + } + info.DepsCh = doneCh + info.DepsUpdateCh = make(chan struct{}) + info.DepsLock.Unlock() + + // Cancel the older waiter + if info.depsCancelCh != nil { + close(info.depsCancelCh) + } + info.depsCancelCh = cancelCh + + log.Printf( + "[TRACE] dag/walk: dependencies changed for %q, sending new deps", + VertexName(v)) + + // Start the waiter + go w.waitDeps(v, deps, doneCh, cancelCh) + } + + // Start all the new vertices. We do this at the end so that all + // the edge waiters and changes are setup above. + for _, raw := range newVerts.List() { + v := raw.(Vertex) + go w.walkVertex(v, w.vertexMap[v]) + } +} + +// edgeParts returns the waiter and the dependency, in that order. +// The waiter is waiting on the dependency. +func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) { + if w.Reverse { + return e.Source(), e.Target() + } + + return e.Target(), e.Source() +} + +// walkVertex walks a single vertex, waiting for any dependencies before +// executing the callback. +func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { + // When we're done executing, lower the waitgroup count + defer w.wait.Done() + + // When we're done, always close our done channel + defer close(info.DoneCh) + + // Wait for our dependencies. We create a [closed] deps channel so + // that we can immediately fall through to load our actual DepsCh. + var depsSuccess bool + var depsUpdateCh chan struct{} + depsCh := make(chan bool, 1) + depsCh <- true + close(depsCh) + for { + select { + case <-info.CancelCh: + // Cancel + return + + case depsSuccess = <-depsCh: + // Deps complete! Mark as nil to trigger completion handling. + depsCh = nil + + case <-depsUpdateCh: + // New deps, reloop + } + + // Check if we have updated dependencies. This can happen if the + // dependencies were satisfied exactly prior to an Update occurring. + // In that case, we'd like to take into account new dependencies + // if possible. + info.DepsLock.Lock() + if info.DepsCh != nil { + depsCh = info.DepsCh + info.DepsCh = nil + } + if info.DepsUpdateCh != nil { + depsUpdateCh = info.DepsUpdateCh + } + info.DepsLock.Unlock() + + // If we still have no deps channel set, then we're done! + if depsCh == nil { + break + } + } + + // If we passed dependencies, we just want to check once more that + // we're not cancelled, since this can happen just as dependencies pass. + select { + case <-info.CancelCh: + // Cancelled during an update while dependencies completed. + return + default: + } + + // Run our callback or note that our upstream failed + var err error + if depsSuccess { + log.Printf("[TRACE] dag/walk: walking %q", VertexName(v)) + err = w.Callback(v) + } else { + log.Printf("[TRACE] dag/walk: upstream errored, not walking %q", VertexName(v)) + err = errWalkUpstream + } + + // Record the error + if err != nil { + w.errLock.Lock() + defer w.errLock.Unlock() + + if w.errMap == nil { + w.errMap = make(map[Vertex]error) + } + w.errMap[v] = err + } +} + +func (w *Walker) waitDeps( + v Vertex, + deps map[Vertex]<-chan struct{}, + doneCh chan<- bool, + cancelCh <-chan struct{}) { + // For each dependency given to us, wait for it to complete + for dep, depCh := range deps { + DepSatisfied: + for { + select { + case <-depCh: + // Dependency satisfied! + break DepSatisfied + + case <-cancelCh: + // Wait cancelled. Note that we didn't satisfy dependencies + // so that anything waiting on us also doesn't run. + doneCh <- false + return + + case <-time.After(time.Second * 5): + log.Printf("[TRACE] dag/walk: vertex %q, waiting for: %q", + VertexName(v), VertexName(dep)) + } + } + } + + // Dependencies satisfied! We need to check if any errored + w.errLock.Lock() + defer w.errLock.Unlock() + for dep, _ := range deps { + if w.errMap[dep] != nil { + // One of our dependencies failed, so return false + doneCh <- false + return + } + } + + // All dependencies satisfied and successful + doneCh <- true +} diff --git a/vendor/github.com/hashicorp/terraform/dot/graph.go b/vendor/github.com/hashicorp/terraform/dot/graph.go deleted file mode 100644 index 91fe9fc6..00000000 --- a/vendor/github.com/hashicorp/terraform/dot/graph.go +++ /dev/null @@ -1,224 +0,0 @@ -// The dot package contains utilities for working with DOT graphs. -package dot - -import ( - "bytes" - "fmt" - "sort" - "strings" -) - -// Graph is a representation of a drawable DOT graph. -type Graph struct { - // Whether this is a "digraph" or just a "graph" - Directed bool - - // Used for K/V settings in the DOT - Attrs map[string]string - - Nodes []*Node - Edges []*Edge - Subgraphs []*Subgraph - - nodesByName map[string]*Node -} - -// Subgraph is a Graph that lives inside a Parent graph, and contains some -// additional parameters to control how it is drawn. -type Subgraph struct { - Graph - Name string - Parent *Graph - Cluster bool -} - -// An Edge in a DOT graph, as expressed by recording the Name of the Node at -// each end. -type Edge struct { - // Name of source node. - Source string - - // Name of dest node. - Dest string - - // List of K/V attributes for this edge. - Attrs map[string]string -} - -// A Node in a DOT graph. -type Node struct { - Name string - Attrs map[string]string -} - -// Creates a properly initialized DOT Graph. -func NewGraph(attrs map[string]string) *Graph { - return &Graph{ - Attrs: attrs, - nodesByName: make(map[string]*Node), - } -} - -func NewEdge(src, dst string, attrs map[string]string) *Edge { - return &Edge{ - Source: src, - Dest: dst, - Attrs: attrs, - } -} - -func NewNode(n string, attrs map[string]string) *Node { - return &Node{ - Name: n, - Attrs: attrs, - } -} - -// Initializes a Subgraph with the provided name, attaches is to this Graph, -// and returns it. -func (g *Graph) AddSubgraph(name string) *Subgraph { - subgraph := &Subgraph{ - Graph: *NewGraph(map[string]string{}), - Parent: g, - Name: name, - } - g.Subgraphs = append(g.Subgraphs, subgraph) - return subgraph -} - -func (g *Graph) AddAttr(k, v string) { - g.Attrs[k] = v -} - -func (g *Graph) AddNode(n *Node) { - g.Nodes = append(g.Nodes, n) - g.nodesByName[n.Name] = n -} - -func (g *Graph) AddEdge(e *Edge) { - g.Edges = append(g.Edges, e) -} - -// Adds an edge between two Nodes. -// -// Note this does not do any verification of the existence of these nodes, -// which means that any strings you provide that are not existing nodes will -// result in extra auto-defined nodes in your resulting DOT. -func (g *Graph) AddEdgeBetween(src, dst string, attrs map[string]string) error { - g.AddEdge(NewEdge(src, dst, attrs)) - - return nil -} - -// Look up a node by name -func (g *Graph) GetNode(name string) (*Node, error) { - node, ok := g.nodesByName[name] - if !ok { - return nil, fmt.Errorf("Could not find node: %s", name) - } - return node, nil -} - -// Returns the DOT representation of this Graph. -func (g *Graph) String() string { - w := newGraphWriter() - - g.drawHeader(w) - w.Indent() - g.drawBody(w) - w.Unindent() - g.drawFooter(w) - - return w.String() -} - -func (g *Graph) drawHeader(w *graphWriter) { - if g.Directed { - w.Printf("digraph {\n") - } else { - w.Printf("graph {\n") - } -} - -func (g *Graph) drawBody(w *graphWriter) { - for _, as := range attrStrings(g.Attrs) { - w.Printf("%s\n", as) - } - - nodeStrings := make([]string, 0, len(g.Nodes)) - for _, n := range g.Nodes { - nodeStrings = append(nodeStrings, n.String()) - } - sort.Strings(nodeStrings) - for _, ns := range nodeStrings { - w.Printf(ns) - } - - edgeStrings := make([]string, 0, len(g.Edges)) - for _, e := range g.Edges { - edgeStrings = append(edgeStrings, e.String()) - } - sort.Strings(edgeStrings) - for _, es := range edgeStrings { - w.Printf(es) - } - - for _, s := range g.Subgraphs { - s.drawHeader(w) - w.Indent() - s.drawBody(w) - w.Unindent() - s.drawFooter(w) - } -} - -func (g *Graph) drawFooter(w *graphWriter) { - w.Printf("}\n") -} - -// Returns the DOT representation of this Edge. -func (e *Edge) String() string { - var buf bytes.Buffer - buf.WriteString( - fmt.Sprintf( - "%q -> %q", e.Source, e.Dest)) - writeAttrs(&buf, e.Attrs) - buf.WriteString("\n") - - return buf.String() -} - -func (s *Subgraph) drawHeader(w *graphWriter) { - name := s.Name - if s.Cluster { - name = fmt.Sprintf("cluster_%s", name) - } - w.Printf("subgraph %q {\n", name) -} - -// Returns the DOT representation of this Node. -func (n *Node) String() string { - var buf bytes.Buffer - buf.WriteString(fmt.Sprintf("%q", n.Name)) - writeAttrs(&buf, n.Attrs) - buf.WriteString("\n") - - return buf.String() -} - -func writeAttrs(buf *bytes.Buffer, attrs map[string]string) { - if len(attrs) > 0 { - buf.WriteString(" [") - buf.WriteString(strings.Join(attrStrings(attrs), ", ")) - buf.WriteString("]") - } -} - -func attrStrings(attrs map[string]string) []string { - strings := make([]string, 0, len(attrs)) - for k, v := range attrs { - strings = append(strings, fmt.Sprintf("%s = %q", k, v)) - } - sort.Strings(strings) - return strings -} diff --git a/vendor/github.com/hashicorp/terraform/dot/graph_writer.go b/vendor/github.com/hashicorp/terraform/dot/graph_writer.go deleted file mode 100644 index 7fa5d9ca..00000000 --- a/vendor/github.com/hashicorp/terraform/dot/graph_writer.go +++ /dev/null @@ -1,47 +0,0 @@ -package dot - -import ( - "bytes" - "fmt" -) - -// graphWriter wraps a bytes.Buffer and tracks indent level levels. -type graphWriter struct { - bytes.Buffer - indent int - indentStr string -} - -// Returns an initialized graphWriter at indent level 0. -func newGraphWriter() *graphWriter { - w := &graphWriter{ - indent: 0, - } - w.init() - return w -} - -// Prints to the buffer at the current indent level. -func (w *graphWriter) Printf(s string, args ...interface{}) { - w.WriteString(w.indentStr + fmt.Sprintf(s, args...)) -} - -// Increase the indent level. -func (w *graphWriter) Indent() { - w.indent++ - w.init() -} - -// Decrease the indent level. -func (w *graphWriter) Unindent() { - w.indent-- - w.init() -} - -func (w *graphWriter) init() { - indentBuf := new(bytes.Buffer) - for i := 0; i < w.indent; i++ { - indentBuf.WriteString("\t") - } - w.indentStr = indentBuf.String() -} diff --git a/vendor/github.com/hashicorp/terraform/flatmap/expand.go b/vendor/github.com/hashicorp/terraform/flatmap/expand.go index 2b689281..1449065e 100644 --- a/vendor/github.com/hashicorp/terraform/flatmap/expand.go +++ b/vendor/github.com/hashicorp/terraform/flatmap/expand.go @@ -2,8 +2,11 @@ package flatmap import ( "fmt" + "sort" "strconv" "strings" + + "github.com/hashicorp/hil" ) // Expand takes a map and a key (prefix) and expands that value into @@ -21,13 +24,20 @@ func Expand(m map[string]string, key string) interface{} { } // Check if the key is an array, and if so, expand the array - if _, ok := m[key+".#"]; ok { + if v, ok := m[key+".#"]; ok { + // If the count of the key is unknown, then just put the unknown + // value in the value itself. This will be detected by Terraform + // core later. + if v == hil.UnknownValue { + return v + } + return expandArray(m, key) } // Check if this is a prefix in the map prefix := key + "." - for k, _ := range m { + for k := range m { if strings.HasPrefix(k, prefix) { return expandMap(m, prefix) } @@ -42,17 +52,81 @@ func expandArray(m map[string]string, prefix string) []interface{} { panic(err) } - result := make([]interface{}, num) - for i := 0; i < int(num); i++ { - result[i] = Expand(m, fmt.Sprintf("%s.%d", prefix, i)) + // If the number of elements in this array is 0, then return an + // empty slice as there is nothing to expand. Trying to expand it + // anyway could lead to crashes as any child maps, arrays or sets + // that no longer exist are still shown as empty with a count of 0. + if num == 0 { + return []interface{}{} + } + + // NOTE: "num" is not necessarily accurate, e.g. if a user tampers + // with state, so the following code should not crash when given a + // number of items more or less than what's given in num. The + // num key is mainly just a hint that this is a list or set. + + // The Schema "Set" type stores its values in an array format, but + // using numeric hash values instead of ordinal keys. Take the set + // of keys regardless of value, and expand them in numeric order. + // See GH-11042 for more details. + keySet := map[int]bool{} + computed := map[string]bool{} + for k := range m { + if !strings.HasPrefix(k, prefix+".") { + continue + } + + key := k[len(prefix)+1:] + idx := strings.Index(key, ".") + if idx != -1 { + key = key[:idx] + } + + // skip the count value + if key == "#" { + continue + } + + // strip the computed flag if there is one + if strings.HasPrefix(key, "~") { + key = key[1:] + computed[key] = true + } + + k, err := strconv.Atoi(key) + if err != nil { + panic(err) + } + keySet[int(k)] = true + } + + keysList := make([]int, 0, num) + for key := range keySet { + keysList = append(keysList, key) + } + sort.Ints(keysList) + + result := make([]interface{}, len(keysList)) + for i, key := range keysList { + keyString := strconv.Itoa(key) + if computed[keyString] { + keyString = "~" + keyString + } + result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString)) } return result } func expandMap(m map[string]string, prefix string) map[string]interface{} { + // Submaps may not have a '%' key, so we can't count on this value being + // here. If we don't have a count, just proceed as if we have have a map. + if count, ok := m[prefix+"%"]; ok && count == "0" { + return map[string]interface{}{} + } + result := make(map[string]interface{}) - for k, _ := range m { + for k := range m { if !strings.HasPrefix(k, prefix) { continue } @@ -66,7 +140,11 @@ func expandMap(m map[string]string, prefix string) map[string]interface{} { continue } - // It contains a period, so it is a more complex structure + // skip the map count value + if key == "%" { + continue + } + result[key] = Expand(m, k[:len(prefix)+len(key)]) } diff --git a/vendor/github.com/hashicorp/terraform/httpclient/client.go b/vendor/github.com/hashicorp/terraform/httpclient/client.go new file mode 100644 index 00000000..bb06beb4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/httpclient/client.go @@ -0,0 +1,18 @@ +package httpclient + +import ( + "net/http" + + cleanhttp "github.com/hashicorp/go-cleanhttp" +) + +// New returns the DefaultPooledClient from the cleanhttp +// package that will also send a Terraform User-Agent string. +func New() *http.Client { + cli := cleanhttp.DefaultPooledClient() + cli.Transport = &userAgentRoundTripper{ + userAgent: UserAgentString(), + inner: cli.Transport, + } + return cli +} diff --git a/vendor/github.com/hashicorp/terraform/httpclient/useragent.go b/vendor/github.com/hashicorp/terraform/httpclient/useragent.go new file mode 100644 index 00000000..5e280176 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/httpclient/useragent.go @@ -0,0 +1,40 @@ +package httpclient + +import ( + "fmt" + "log" + "net/http" + "os" + "strings" + + "github.com/hashicorp/terraform/version" +) + +const userAgentFormat = "Terraform/%s" +const uaEnvVar = "TF_APPEND_USER_AGENT" + +func UserAgentString() string { + ua := fmt.Sprintf(userAgentFormat, version.Version) + + if add := os.Getenv(uaEnvVar); add != "" { + add = strings.TrimSpace(add) + if len(add) > 0 { + ua += " " + add + log.Printf("[DEBUG] Using modified User-Agent: %s", ua) + } + } + + return ua +} + +type userAgentRoundTripper struct { + inner http.RoundTripper + userAgent string +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if _, ok := req.Header["User-Agent"]; !ok { + req.Header.Set("User-Agent", rt.userAgent) + } + return rt.inner.RoundTrip(req) +} diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/dependencies.go b/vendor/github.com/hashicorp/terraform/moduledeps/dependencies.go new file mode 100644 index 00000000..87c8431e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/moduledeps/dependencies.go @@ -0,0 +1,43 @@ +package moduledeps + +import ( + "github.com/hashicorp/terraform/plugin/discovery" +) + +// Providers describes a set of provider dependencies for a given module. +// +// Each named provider instance can have one version constraint. +type Providers map[ProviderInstance]ProviderDependency + +// ProviderDependency describes the dependency for a particular provider +// instance, including both the set of allowed versions and the reason for +// the dependency. +type ProviderDependency struct { + Constraints discovery.Constraints + Reason ProviderDependencyReason +} + +// ProviderDependencyReason is an enumeration of reasons why a dependency might be +// present. +type ProviderDependencyReason int + +const ( + // ProviderDependencyExplicit means that there is an explicit "provider" + // block in the configuration for this module. + ProviderDependencyExplicit ProviderDependencyReason = iota + + // ProviderDependencyImplicit means that there is no explicit "provider" + // block but there is at least one resource that uses this provider. + ProviderDependencyImplicit + + // ProviderDependencyInherited is a special case of + // ProviderDependencyImplicit where a parent module has defined a + // configuration for the provider that has been inherited by at least one + // resource in this module. + ProviderDependencyInherited + + // ProviderDependencyFromState means that this provider is not currently + // referenced by configuration at all, but some existing instances in + // the state still depend on it. + ProviderDependencyFromState +) diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/doc.go b/vendor/github.com/hashicorp/terraform/moduledeps/doc.go new file mode 100644 index 00000000..7eff0831 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/moduledeps/doc.go @@ -0,0 +1,7 @@ +// Package moduledeps contains types that can be used to describe the +// providers required for all of the modules in a module tree. +// +// It does not itself contain the functionality for populating such +// data structures; that's in Terraform core, since this package intentionally +// does not depend on terraform core to avoid package dependency cycles. +package moduledeps diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/module.go b/vendor/github.com/hashicorp/terraform/moduledeps/module.go new file mode 100644 index 00000000..d6cbaf5c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/moduledeps/module.go @@ -0,0 +1,204 @@ +package moduledeps + +import ( + "sort" + "strings" + + "github.com/hashicorp/terraform/plugin/discovery" +) + +// Module represents the dependencies of a single module, as well being +// a node in a tree of such structures representing the dependencies of +// an entire configuration. +type Module struct { + Name string + Providers Providers + Children []*Module +} + +// WalkFunc is a callback type for use with Module.WalkTree +type WalkFunc func(path []string, parent *Module, current *Module) error + +// WalkTree calls the given callback once for the receiver and then +// once for each descendent, in an order such that parents are called +// before their children and siblings are called in the order they +// appear in the Children slice. +// +// When calling the callback, parent will be nil for the first call +// for the receiving module, and then set to the direct parent of +// each module for the subsequent calls. +// +// The path given to the callback is valid only until the callback +// returns, after which it will be mutated and reused. Callbacks must +// therefore copy the path slice if they wish to retain it. +// +// If the given callback returns an error, the walk will be aborted at +// that point and that error returned to the caller. +// +// This function is not thread-safe for concurrent modifications of the +// data structure, so it's the caller's responsibility to arrange for that +// should it be needed. +// +// It is safe for a callback to modify the descendents of the "current" +// module, including the ordering of the Children slice itself, but the +// callback MUST NOT modify the parent module. +func (m *Module) WalkTree(cb WalkFunc) error { + return walkModuleTree(make([]string, 0, 1), nil, m, cb) +} + +func walkModuleTree(path []string, parent *Module, current *Module, cb WalkFunc) error { + path = append(path, current.Name) + err := cb(path, parent, current) + if err != nil { + return err + } + + for _, child := range current.Children { + err := walkModuleTree(path, current, child, cb) + if err != nil { + return err + } + } + return nil +} + +// SortChildren sorts the Children slice into lexicographic order by +// name, in-place. +// +// This is primarily useful prior to calling WalkTree so that the walk +// will proceed in a consistent order. +func (m *Module) SortChildren() { + sort.Sort(sortModules{m.Children}) +} + +// SortDescendents is a convenience wrapper for calling SortChildren on +// the receiver and all of its descendent modules. +func (m *Module) SortDescendents() { + m.WalkTree(func(path []string, parent *Module, current *Module) error { + current.SortChildren() + return nil + }) +} + +type sortModules struct { + modules []*Module +} + +func (s sortModules) Len() int { + return len(s.modules) +} + +func (s sortModules) Less(i, j int) bool { + cmp := strings.Compare(s.modules[i].Name, s.modules[j].Name) + return cmp < 0 +} + +func (s sortModules) Swap(i, j int) { + s.modules[i], s.modules[j] = s.modules[j], s.modules[i] +} + +// PluginRequirements produces a PluginRequirements structure that can +// be used with discovery.PluginMetaSet.ConstrainVersions to identify +// suitable plugins to satisfy the module's provider dependencies. +// +// This method only considers the direct requirements of the receiver. +// Use AllPluginRequirements to flatten the dependencies for the +// entire tree of modules. +// +// Requirements returned by this method include only version constraints, +// and apply no particular SHA256 hash constraint. +func (m *Module) PluginRequirements() discovery.PluginRequirements { + ret := make(discovery.PluginRequirements) + for inst, dep := range m.Providers { + // m.Providers is keyed on provider names, such as "aws.foo". + // a PluginRequirements wants keys to be provider *types*, such + // as "aws". If there are multiple aliases for the same + // provider then we will flatten them into a single requirement + // by combining their constraint sets. + pty := inst.Type() + if existing, exists := ret[pty]; exists { + ret[pty].Versions = existing.Versions.Append(dep.Constraints) + } else { + ret[pty] = &discovery.PluginConstraints{ + Versions: dep.Constraints, + } + } + } + return ret +} + +// AllPluginRequirements calls PluginRequirements for the receiver and all +// of its descendents, and merges the result into a single PluginRequirements +// structure that would satisfy all of the modules together. +// +// Requirements returned by this method include only version constraints, +// and apply no particular SHA256 hash constraint. +func (m *Module) AllPluginRequirements() discovery.PluginRequirements { + var ret discovery.PluginRequirements + m.WalkTree(func(path []string, parent *Module, current *Module) error { + ret = ret.Merge(current.PluginRequirements()) + return nil + }) + return ret +} + +// Equal returns true if the receiver is the root of an identical tree +// to the other given Module. This is a deep comparison that considers +// the equality of all downstream modules too. +// +// The children are considered to be ordered, so callers may wish to use +// SortDescendents first to normalize the order of the slices of child nodes. +// +// The implementation of this function is not optimized since it is provided +// primarily for use in tests. +func (m *Module) Equal(other *Module) bool { + // take care of nils first + if m == nil && other == nil { + return true + } else if (m == nil && other != nil) || (m != nil && other == nil) { + return false + } + + if m.Name != other.Name { + return false + } + + if len(m.Providers) != len(other.Providers) { + return false + } + if len(m.Children) != len(other.Children) { + return false + } + + // Can't use reflect.DeepEqual on this provider structure because + // the nested Constraints objects contain function pointers that + // never compare as equal. So we'll need to walk it the long way. + for inst, dep := range m.Providers { + if _, exists := other.Providers[inst]; !exists { + return false + } + + if dep.Reason != other.Providers[inst].Reason { + return false + } + + // Constraints are not too easy to compare robustly, so + // we'll just use their string representations as a proxy + // for now. + if dep.Constraints.String() != other.Providers[inst].Constraints.String() { + return false + } + } + + // Above we already checked that we have the same number of children + // in each module, so now we just need to check that they are + // recursively equal. + for i := range m.Children { + if !m.Children[i].Equal(other.Children[i]) { + return false + } + } + + // If we fall out here then they are equal + return true +} diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/provider.go b/vendor/github.com/hashicorp/terraform/moduledeps/provider.go new file mode 100644 index 00000000..89ceefb2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/moduledeps/provider.go @@ -0,0 +1,30 @@ +package moduledeps + +import ( + "strings" +) + +// ProviderInstance describes a particular provider instance by its full name, +// like "null" or "aws.foo". +type ProviderInstance string + +// Type returns the provider type of this instance. For example, for an instance +// named "aws.foo" the type is "aws". +func (p ProviderInstance) Type() string { + t := string(p) + if dotPos := strings.Index(t, "."); dotPos != -1 { + t = t[:dotPos] + } + return t +} + +// Alias returns the alias of this provider, if any. An instance named "aws.foo" +// has the alias "foo", while an instance named just "docker" has no alias, +// so the empty string would be returned. +func (p ProviderInstance) Alias() string { + t := string(p) + if dotPos := strings.Index(t, "."); dotPos != -1 { + return t[dotPos+1:] + } + return "" +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go new file mode 100644 index 00000000..df855a76 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go @@ -0,0 +1,30 @@ +package discovery + +// Error is a type used to describe situations that the caller must handle +// since they indicate some form of user error. +// +// The functions and methods that return these specialized errors indicate so +// in their documentation. The Error type should not itself be used directly, +// but rather errors should be compared using the == operator with the +// error constants in this package. +// +// Values of this type are _not_ used when the error being reported is an +// operational error (server unavailable, etc) or indicative of a bug in +// this package or its caller. +type Error string + +// ErrorNoSuitableVersion indicates that a suitable version (meeting given +// constraints) is not available. +const ErrorNoSuitableVersion = Error("no suitable version is available") + +// ErrorNoVersionCompatible indicates that all of the available versions +// that otherwise met constraints are not compatible with the current +// version of Terraform. +const ErrorNoVersionCompatible = Error("no available version is compatible with this version of Terraform") + +// ErrorNoSuchProvider indicates that no provider exists with a name given +const ErrorNoSuchProvider = Error("no provider exists with the given name") + +func (err Error) Error() string { + return string(err) +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go new file mode 100644 index 00000000..f053312b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go @@ -0,0 +1,191 @@ +package discovery + +import ( + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" +) + +// FindPlugins looks in the given directories for files whose filenames +// suggest that they are plugins of the given kind (e.g. "provider") and +// returns a PluginMetaSet representing the discovered potential-plugins. +// +// Currently this supports two different naming schemes. The current +// standard naming scheme is a subdirectory called $GOOS-$GOARCH containing +// files named terraform-$KIND-$NAME-V$VERSION. The legacy naming scheme is +// files directly in the given directory whose names are like +// terraform-$KIND-$NAME. +// +// Only one plugin will be returned for each unique plugin (name, version) +// pair, with preference given to files found in earlier directories. +// +// This is a convenience wrapper around FindPluginPaths and ResolvePluginsPaths. +func FindPlugins(kind string, dirs []string) PluginMetaSet { + return ResolvePluginPaths(FindPluginPaths(kind, dirs)) +} + +// FindPluginPaths looks in the given directories for files whose filenames +// suggest that they are plugins of the given kind (e.g. "provider"). +// +// The return value is a list of absolute paths that appear to refer to +// plugins in the given directories, based only on what can be inferred +// from the naming scheme. The paths returned are ordered such that files +// in later dirs appear after files in earlier dirs in the given directory +// list. Within the same directory plugins are returned in a consistent but +// undefined order. +func FindPluginPaths(kind string, dirs []string) []string { + // This is just a thin wrapper around findPluginPaths so that we can + // use the latter in tests with a fake machineName so we can use our + // test fixtures. + return findPluginPaths(kind, dirs) +} + +func findPluginPaths(kind string, dirs []string) []string { + prefix := "terraform-" + kind + "-" + + ret := make([]string, 0, len(dirs)) + + for _, dir := range dirs { + items, err := ioutil.ReadDir(dir) + if err != nil { + // Ignore missing dirs, non-dirs, etc + continue + } + + log.Printf("[DEBUG] checking for %s in %q", kind, dir) + + for _, item := range items { + fullName := item.Name() + + if !strings.HasPrefix(fullName, prefix) { + continue + } + + // New-style paths must have a version segment in filename + if strings.Contains(strings.ToLower(fullName), "_v") { + absPath, err := filepath.Abs(filepath.Join(dir, fullName)) + if err != nil { + log.Printf("[ERROR] plugin filepath error: %s", err) + continue + } + + // Check that the file we found is usable + if !pathIsFile(absPath) { + log.Printf("[ERROR] ignoring non-file %s", absPath) + continue + } + + log.Printf("[DEBUG] found %s %q", kind, fullName) + ret = append(ret, filepath.Clean(absPath)) + continue + } + + // Legacy style with files directly in the base directory + absPath, err := filepath.Abs(filepath.Join(dir, fullName)) + if err != nil { + log.Printf("[ERROR] plugin filepath error: %s", err) + continue + } + + // Check that the file we found is usable + if !pathIsFile(absPath) { + log.Printf("[ERROR] ignoring non-file %s", absPath) + continue + } + + log.Printf("[WARN] found legacy %s %q", kind, fullName) + + ret = append(ret, filepath.Clean(absPath)) + } + } + + return ret +} + +// Returns true if and only if the given path refers to a file or a symlink +// to a file. +func pathIsFile(path string) bool { + info, err := os.Stat(path) + if err != nil { + return false + } + + return !info.IsDir() +} + +// ResolvePluginPaths takes a list of paths to plugin executables (as returned +// by e.g. FindPluginPaths) and produces a PluginMetaSet describing the +// referenced plugins. +// +// If the same combination of plugin name and version appears multiple times, +// the earlier reference will be preferred. Several different versions of +// the same plugin name may be returned, in which case the methods of +// PluginMetaSet can be used to filter down. +func ResolvePluginPaths(paths []string) PluginMetaSet { + s := make(PluginMetaSet) + + type nameVersion struct { + Name string + Version string + } + found := make(map[nameVersion]struct{}) + + for _, path := range paths { + baseName := strings.ToLower(filepath.Base(path)) + if !strings.HasPrefix(baseName, "terraform-") { + // Should never happen with reasonable input + continue + } + + baseName = baseName[10:] + firstDash := strings.Index(baseName, "-") + if firstDash == -1 { + // Should never happen with reasonable input + continue + } + + baseName = baseName[firstDash+1:] + if baseName == "" { + // Should never happen with reasonable input + continue + } + + // Trim the .exe suffix used on Windows before we start wrangling + // the remainder of the path. + if strings.HasSuffix(baseName, ".exe") { + baseName = baseName[:len(baseName)-4] + } + + parts := strings.SplitN(baseName, "_v", 2) + name := parts[0] + version := VersionZero + if len(parts) == 2 { + version = parts[1] + } + + // Auto-installed plugins contain an extra name portion representing + // the expected plugin version, which we must trim off. + if underX := strings.Index(version, "_x"); underX != -1 { + version = version[:underX] + } + + if _, ok := found[nameVersion{name, version}]; ok { + // Skip duplicate versions of the same plugin + // (We do this during this step because after this we will be + // dealing with sets and thus lose our ordering with which to + // decide preference.) + continue + } + + s.Add(PluginMeta{ + Name: name, + Version: VersionStr(version), + Path: path, + }) + found[nameVersion{name, version}] = struct{}{} + } + + return s +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go new file mode 100644 index 00000000..815640f1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go @@ -0,0 +1,548 @@ +package discovery + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + + "golang.org/x/net/html" + + getter "github.com/hashicorp/go-getter" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/httpclient" + "github.com/mitchellh/cli" +) + +// Releases are located by parsing the html listing from releases.hashicorp.com. +// +// The URL for releases follows the pattern: +// https://releases.hashicorp.com/terraform-provider-name//terraform-provider-name___. +// +// The plugin protocol version will be saved with the release and returned in +// the header X-TERRAFORM_PROTOCOL_VERSION. + +const protocolVersionHeader = "x-terraform-protocol-version" + +var releaseHost = "https://releases.hashicorp.com" + +var httpClient *http.Client + +func init() { + httpClient = httpclient.New() + + httpGetter := &getter.HttpGetter{ + Client: httpClient, + Netrc: true, + } + + getter.Getters["http"] = httpGetter + getter.Getters["https"] = httpGetter +} + +// An Installer maintains a local cache of plugins by downloading plugins +// from an online repository. +type Installer interface { + Get(name string, req Constraints) (PluginMeta, error) + PurgeUnused(used map[string]PluginMeta) (removed PluginMetaSet, err error) +} + +// ProviderInstaller is an Installer implementation that knows how to +// download Terraform providers from the official HashiCorp releases service +// into a local directory. The files downloaded are compliant with the +// naming scheme expected by FindPlugins, so the target directory of a +// provider installer can be used as one of several plugin discovery sources. +type ProviderInstaller struct { + Dir string + + // Cache is used to access and update a local cache of plugins if non-nil. + // Can be nil to disable caching. + Cache PluginCache + + PluginProtocolVersion uint + + // OS and Arch specify the OS and architecture that should be used when + // installing plugins. These use the same labels as the runtime.GOOS and + // runtime.GOARCH variables respectively, and indeed the values of these + // are used as defaults if either of these is the empty string. + OS string + Arch string + + // Skip checksum and signature verification + SkipVerify bool + + Ui cli.Ui // Ui for output +} + +// Get is part of an implementation of type Installer, and attempts to download +// and install a Terraform provider matching the given constraints. +// +// This method may return one of a number of sentinel errors from this +// package to indicate issues that are likely to be resolvable via user action: +// +// ErrorNoSuchProvider: no provider with the given name exists in the repository. +// ErrorNoSuitableVersion: the provider exists but no available version matches constraints. +// ErrorNoVersionCompatible: a plugin was found within the constraints but it is +// incompatible with the current Terraform version. +// +// These errors should be recognized and handled as special cases by the caller +// to present a suitable user-oriented error message. +// +// All other errors indicate an internal problem that is likely _not_ solvable +// through user action, or at least not within Terraform's scope. Error messages +// are produced under the assumption that if presented to the user they will +// be presented alongside context about what is being installed, and thus the +// error messages do not redundantly include such information. +func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, error) { + versions, err := i.listProviderVersions(provider) + // TODO: return multiple errors + if err != nil { + return PluginMeta{}, err + } + + if len(versions) == 0 { + return PluginMeta{}, ErrorNoSuitableVersion + } + + versions = allowedVersions(versions, req) + if len(versions) == 0 { + return PluginMeta{}, ErrorNoSuitableVersion + } + + // sort them newest to oldest + Versions(versions).Sort() + + // Ensure that our installation directory exists + err = os.MkdirAll(i.Dir, os.ModePerm) + if err != nil { + return PluginMeta{}, fmt.Errorf("failed to create plugin dir %s: %s", i.Dir, err) + } + + // take the first matching plugin we find + for _, v := range versions { + url := i.providerURL(provider, v.String()) + + if !i.SkipVerify { + sha256, err := i.getProviderChecksum(provider, v.String()) + if err != nil { + return PluginMeta{}, err + } + + // add the checksum parameter for go-getter to verify the download for us. + if sha256 != "" { + url = url + "?checksum=sha256:" + sha256 + } + } + + log.Printf("[DEBUG] fetching provider info for %s version %s", provider, v) + if checkPlugin(url, i.PluginProtocolVersion) { + i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %q (%s)...", provider, v.String())) + log.Printf("[DEBUG] getting provider %q version %q", provider, v) + err := i.install(provider, v, url) + if err != nil { + return PluginMeta{}, err + } + + // Find what we just installed + // (This is weird, because go-getter doesn't directly return + // information about what was extracted, and we just extracted + // the archive directly into a shared dir here.) + log.Printf("[DEBUG] looking for the %s %s plugin we just installed", provider, v) + metas := FindPlugins("provider", []string{i.Dir}) + log.Printf("[DEBUG] all plugins found %#v", metas) + metas, _ = metas.ValidateVersions() + metas = metas.WithName(provider).WithVersion(v) + log.Printf("[DEBUG] filtered plugins %#v", metas) + if metas.Count() == 0 { + // This should never happen. Suggests that the release archive + // contains an executable file whose name doesn't match the + // expected convention. + return PluginMeta{}, fmt.Errorf( + "failed to find installed plugin version %s; this is a bug in Terraform and should be reported", + v, + ) + } + + if metas.Count() > 1 { + // This should also never happen, and suggests that a + // particular version was re-released with a different + // executable filename. We consider releases as immutable, so + // this is an error. + return PluginMeta{}, fmt.Errorf( + "multiple plugins installed for version %s; this is a bug in Terraform and should be reported", + v, + ) + } + + // By now we know we have exactly one meta, and so "Newest" will + // return that one. + return metas.Newest(), nil + } + + log.Printf("[INFO] incompatible ProtocolVersion for %s version %s", provider, v) + } + + return PluginMeta{}, ErrorNoVersionCompatible +} + +func (i *ProviderInstaller) install(provider string, version Version, url string) error { + if i.Cache != nil { + log.Printf("[DEBUG] looking for provider %s %s in plugin cache", provider, version) + cached := i.Cache.CachedPluginPath("provider", provider, version) + if cached == "" { + log.Printf("[DEBUG] %s %s not yet in cache, so downloading %s", provider, version, url) + err := getter.Get(i.Cache.InstallDir(), url) + if err != nil { + return err + } + // should now be in cache + cached = i.Cache.CachedPluginPath("provider", provider, version) + if cached == "" { + // should never happen if the getter is behaving properly + // and the plugins are packaged properly. + return fmt.Errorf("failed to find downloaded plugin in cache %s", i.Cache.InstallDir()) + } + } + + // Link or copy the cached binary into our install dir so the + // normal resolution machinery can find it. + filename := filepath.Base(cached) + targetPath := filepath.Join(i.Dir, filename) + + log.Printf("[DEBUG] installing %s %s to %s from local cache %s", provider, version, targetPath, cached) + + // Delete if we can. If there's nothing there already then no harm done. + // This is important because we can't create a link if there's + // already a file of the same name present. + // (any other error here we'll catch below when we try to write here) + os.Remove(targetPath) + + // We don't attempt linking on Windows because links are not + // comprehensively supported by all tools/apps in Windows and + // so we choose to be conservative to avoid creating any + // weird issues for Windows users. + linkErr := errors.New("link not supported for Windows") // placeholder error, never actually returned + if runtime.GOOS != "windows" { + // Try hard linking first. Hard links are preferable because this + // creates a self-contained directory that doesn't depend on the + // cache after install. + linkErr = os.Link(cached, targetPath) + + // If that failed, try a symlink. This _does_ depend on the cache + // after install, so the user must manage the cache more carefully + // in this case, but avoids creating redundant copies of the + // plugins on disk. + if linkErr != nil { + linkErr = os.Symlink(cached, targetPath) + } + } + + // If we still have an error then we'll try a copy as a fallback. + // In this case either the OS is Windows or the target filesystem + // can't support symlinks. + if linkErr != nil { + srcFile, err := os.Open(cached) + if err != nil { + return fmt.Errorf("failed to open cached plugin %s: %s", cached, err) + } + defer srcFile.Close() + + destFile, err := os.OpenFile(targetPath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create %s: %s", targetPath, err) + } + + _, err = io.Copy(destFile, srcFile) + if err != nil { + destFile.Close() + return fmt.Errorf("failed to copy cached plugin from %s to %s: %s", cached, targetPath, err) + } + + err = destFile.Close() + if err != nil { + return fmt.Errorf("error creating %s: %s", targetPath, err) + } + } + + // One way or another, by the time we get here we should have either + // a link or a copy of the cached plugin within i.Dir, as expected. + } else { + log.Printf("[DEBUG] plugin cache is disabled, so downloading %s %s from %s", provider, version, url) + err := getter.Get(i.Dir, url) + if err != nil { + return err + } + } + + return nil +} + +func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaSet, error) { + purge := make(PluginMetaSet) + + present := FindPlugins("provider", []string{i.Dir}) + for meta := range present { + chosen, ok := used[meta.Name] + if !ok { + purge.Add(meta) + } + if chosen.Path != meta.Path { + purge.Add(meta) + } + } + + removed := make(PluginMetaSet) + var errs error + for meta := range purge { + path := meta.Path + err := os.Remove(path) + if err != nil { + errs = multierror.Append(errs, fmt.Errorf( + "failed to remove unused provider plugin %s: %s", + path, err, + )) + } else { + removed.Add(meta) + } + } + + return removed, errs +} + +// Plugins are referred to by the short name, but all URLs and files will use +// the full name prefixed with terraform-- +func (i *ProviderInstaller) providerName(name string) string { + return "terraform-provider-" + name +} + +func (i *ProviderInstaller) providerFileName(name, version string) string { + os := i.OS + arch := i.Arch + if os == "" { + os = runtime.GOOS + } + if arch == "" { + arch = runtime.GOARCH + } + return fmt.Sprintf("%s_%s_%s_%s.zip", i.providerName(name), version, os, arch) +} + +// providerVersionsURL returns the path to the released versions directory for the provider: +// https://releases.hashicorp.com/terraform-provider-name/ +func (i *ProviderInstaller) providerVersionsURL(name string) string { + return releaseHost + "/" + i.providerName(name) + "/" +} + +// providerURL returns the full path to the provider file, using the current OS +// and ARCH: +// .../terraform-provider-name_/terraform-provider-name___. +func (i *ProviderInstaller) providerURL(name, version string) string { + return fmt.Sprintf("%s%s/%s", i.providerVersionsURL(name), version, i.providerFileName(name, version)) +} + +func (i *ProviderInstaller) providerChecksumURL(name, version string) string { + fileName := fmt.Sprintf("%s_%s_SHA256SUMS", i.providerName(name), version) + u := fmt.Sprintf("%s%s/%s", i.providerVersionsURL(name), version, fileName) + return u +} + +func (i *ProviderInstaller) getProviderChecksum(name, version string) (string, error) { + checksums, err := getPluginSHA256SUMs(i.providerChecksumURL(name, version)) + if err != nil { + return "", err + } + + return checksumForFile(checksums, i.providerFileName(name, version)), nil +} + +// Return the plugin version by making a HEAD request to the provided url. +// If the header is not present, we assume the latest version will be +// compatible, and leave the check for discovery or execution. +func checkPlugin(url string, pluginProtocolVersion uint) bool { + resp, err := httpClient.Head(url) + if err != nil { + log.Printf("[ERROR] error fetching plugin headers: %s", err) + return false + } + + if resp.StatusCode != http.StatusOK { + log.Println("[ERROR] non-200 status fetching plugin headers:", resp.Status) + return false + } + + proto := resp.Header.Get(protocolVersionHeader) + if proto == "" { + // The header isn't present, but we don't make this error fatal since + // the latest version will probably work. + log.Printf("[WARN] missing %s from: %s", protocolVersionHeader, url) + return true + } + + protoVersion, err := strconv.Atoi(proto) + if err != nil { + log.Printf("[ERROR] invalid ProtocolVersion: %s", proto) + return false + } + + return protoVersion == int(pluginProtocolVersion) +} + +// list the version available for the named plugin +func (i *ProviderInstaller) listProviderVersions(name string) ([]Version, error) { + versions, err := listPluginVersions(i.providerVersionsURL(name)) + if err != nil { + // listPluginVersions returns a verbose error message indicating + // what was being accessed and what failed + return nil, err + } + return versions, nil +} + +var errVersionNotFound = errors.New("version not found") + +// take the list of available versions for a plugin, and filter out those that +// don't fit the constraints. +func allowedVersions(available []Version, required Constraints) []Version { + var allowed []Version + + for _, v := range available { + if required.Allows(v) { + allowed = append(allowed, v) + } + } + + return allowed +} + +// return a list of the plugin versions at the given URL +func listPluginVersions(url string) ([]Version, error) { + resp, err := httpClient.Get(url) + if err != nil { + // http library produces a verbose error message that includes the + // URL being accessed, etc. + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := ioutil.ReadAll(resp.Body) + log.Printf("[ERROR] failed to fetch plugin versions from %s\n%s\n%s", url, resp.Status, body) + + switch resp.StatusCode { + case http.StatusNotFound, http.StatusForbidden: + // These are treated as indicative of the given name not being + // a valid provider name at all. + return nil, ErrorNoSuchProvider + + default: + // All other errors are assumed to be operational problems. + return nil, fmt.Errorf("error accessing %s: %s", url, resp.Status) + } + + } + + body, err := html.Parse(resp.Body) + if err != nil { + log.Fatal(err) + } + + names := []string{} + + // all we need to do is list links on the directory listing page that look like plugins + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + c := n.FirstChild + if c != nil && c.Type == html.TextNode && strings.HasPrefix(c.Data, "terraform-") { + names = append(names, c.Data) + return + } + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(body) + + return versionsFromNames(names), nil +} + +// parse the list of directory names into a sorted list of available versions +func versionsFromNames(names []string) []Version { + var versions []Version + for _, name := range names { + parts := strings.SplitN(name, "_", 2) + if len(parts) == 2 && parts[1] != "" { + v, err := VersionStr(parts[1]).Parse() + if err != nil { + // filter invalid versions scraped from the page + log.Printf("[WARN] invalid version found for %q: %s", name, err) + continue + } + + versions = append(versions, v) + } + } + + return versions +} + +func checksumForFile(sums []byte, name string) string { + for _, line := range strings.Split(string(sums), "\n") { + parts := strings.Fields(line) + if len(parts) > 1 && parts[1] == name { + return parts[0] + } + } + return "" +} + +// fetch the SHA256SUMS file provided, and verify its signature. +func getPluginSHA256SUMs(sumsURL string) ([]byte, error) { + sigURL := sumsURL + ".sig" + + sums, err := getFile(sumsURL) + if err != nil { + return nil, fmt.Errorf("error fetching checksums: %s", err) + } + + sig, err := getFile(sigURL) + if err != nil { + return nil, fmt.Errorf("error fetching checksums signature: %s", err) + } + + if err := verifySig(sums, sig); err != nil { + return nil, err + } + + return sums, nil +} + +func getFile(url string) ([]byte, error) { + resp, err := httpClient.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s", resp.Status) + } + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return data, err + } + return data, nil +} + +func GetReleaseHost() string { + return releaseHost +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go new file mode 100644 index 00000000..1a100426 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go @@ -0,0 +1,48 @@ +package discovery + +// PluginCache is an interface implemented by objects that are able to maintain +// a cache of plugins. +type PluginCache interface { + // CachedPluginPath returns a path where the requested plugin is already + // cached, or an empty string if the requested plugin is not yet cached. + CachedPluginPath(kind string, name string, version Version) string + + // InstallDir returns the directory that new plugins should be installed into + // in order to populate the cache. This directory should be used as the + // first argument to getter.Get when downloading plugins with go-getter. + // + // After installing into this directory, use CachedPluginPath to obtain the + // path where the plugin was installed. + InstallDir() string +} + +// NewLocalPluginCache returns a PluginCache that caches plugins in a +// given local directory. +func NewLocalPluginCache(dir string) PluginCache { + return &pluginCache{ + Dir: dir, + } +} + +type pluginCache struct { + Dir string +} + +func (c *pluginCache) CachedPluginPath(kind string, name string, version Version) string { + allPlugins := FindPlugins(kind, []string{c.Dir}) + plugins := allPlugins.WithName(name).WithVersion(version) + + if plugins.Count() == 0 { + // nothing cached + return "" + } + + // There should generally be only one plugin here; if there's more than + // one match for some reason then we'll just choose one arbitrarily. + plugin := plugins.Newest() + return plugin.Path +} + +func (c *pluginCache) InstallDir() string { + return c.Dir +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go new file mode 100644 index 00000000..bdcebcb9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go @@ -0,0 +1,41 @@ +package discovery + +import ( + "crypto/sha256" + "io" + "os" +) + +// PluginMeta is metadata about a plugin, useful for launching the plugin +// and for understanding which plugins are available. +type PluginMeta struct { + // Name is the name of the plugin, e.g. as inferred from the plugin + // binary's filename, or by explicit configuration. + Name string + + // Version is the semver version of the plugin, expressed as a string + // that might not be semver-valid. + Version VersionStr + + // Path is the absolute path of the executable that can be launched + // to provide the RPC server for this plugin. + Path string +} + +// SHA256 returns a SHA256 hash of the content of the referenced executable +// file, or an error if the file's contents cannot be read. +func (m PluginMeta) SHA256() ([]byte, error) { + f, err := os.Open(m.Path) + if err != nil { + return nil, err + } + defer f.Close() + + h := sha256.New() + _, err = io.Copy(h, f) + if err != nil { + return nil, err + } + + return h.Sum(nil), nil +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go new file mode 100644 index 00000000..181ea1fc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go @@ -0,0 +1,195 @@ +package discovery + +// A PluginMetaSet is a set of PluginMeta objects meeting a certain criteria. +// +// Methods on this type allow filtering of the set to produce subsets that +// meet more restrictive criteria. +type PluginMetaSet map[PluginMeta]struct{} + +// Add inserts the given PluginMeta into the receiving set. This is a no-op +// if the given meta is already present. +func (s PluginMetaSet) Add(p PluginMeta) { + s[p] = struct{}{} +} + +// Remove removes the given PluginMeta from the receiving set. This is a no-op +// if the given meta is not already present. +func (s PluginMetaSet) Remove(p PluginMeta) { + delete(s, p) +} + +// Has returns true if the given meta is in the receiving set, or false +// otherwise. +func (s PluginMetaSet) Has(p PluginMeta) bool { + _, ok := s[p] + return ok +} + +// Count returns the number of metas in the set +func (s PluginMetaSet) Count() int { + return len(s) +} + +// ValidateVersions returns two new PluginMetaSets, separating those with +// versions that have syntax-valid semver versions from those that don't. +// +// Eliminating invalid versions from consideration (and possibly warning about +// them) is usually the first step of working with a meta set after discovery +// has completed. +func (s PluginMetaSet) ValidateVersions() (valid, invalid PluginMetaSet) { + valid = make(PluginMetaSet) + invalid = make(PluginMetaSet) + for p := range s { + if _, err := p.Version.Parse(); err == nil { + valid.Add(p) + } else { + invalid.Add(p) + } + } + return +} + +// WithName returns the subset of metas that have the given name. +func (s PluginMetaSet) WithName(name string) PluginMetaSet { + ns := make(PluginMetaSet) + for p := range s { + if p.Name == name { + ns.Add(p) + } + } + return ns +} + +// WithVersion returns the subset of metas that have the given version. +// +// This should be used only with the "valid" result from ValidateVersions; +// it will ignore any plugin metas that have a invalid version strings. +func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet { + ns := make(PluginMetaSet) + for p := range s { + gotVersion, err := p.Version.Parse() + if err != nil { + continue + } + if gotVersion.Equal(version) { + ns.Add(p) + } + } + return ns +} + +// ByName groups the metas in the set by their Names, returning a map. +func (s PluginMetaSet) ByName() map[string]PluginMetaSet { + ret := make(map[string]PluginMetaSet) + for p := range s { + if _, ok := ret[p.Name]; !ok { + ret[p.Name] = make(PluginMetaSet) + } + ret[p.Name].Add(p) + } + return ret +} + +// Newest returns the one item from the set that has the newest Version value. +// +// The result is meaningful only if the set is already filtered such that +// all of the metas have the same Name. +// +// If there isn't at least one meta in the set then this function will panic. +// Use Count() to ensure that there is at least one value before calling. +// +// If any of the metas have invalid version strings then this function will +// panic. Use ValidateVersions() first to filter out metas with invalid +// versions. +// +// If two metas have the same Version then one is arbitrarily chosen. This +// situation should be avoided by pre-filtering the set. +func (s PluginMetaSet) Newest() PluginMeta { + if len(s) == 0 { + panic("can't call NewestStable on empty PluginMetaSet") + } + + var first = true + var winner PluginMeta + var winnerVersion Version + for p := range s { + version, err := p.Version.Parse() + if err != nil { + panic(err) + } + + if first == true || version.NewerThan(winnerVersion) { + winner = p + winnerVersion = version + first = false + } + } + + return winner +} + +// ConstrainVersions takes a set of requirements and attempts to +// return a map from name to a set of metas that have the matching +// name and an appropriate version. +// +// If any of the given requirements match *no* plugins then its PluginMetaSet +// in the returned map will be empty. +// +// All viable metas are returned, so the caller can apply any desired filtering +// to reduce down to a single option. For example, calling Newest() to obtain +// the highest available version. +// +// If any of the metas in the set have invalid version strings then this +// function will panic. Use ValidateVersions() first to filter out metas with +// invalid versions. +func (s PluginMetaSet) ConstrainVersions(reqd PluginRequirements) map[string]PluginMetaSet { + ret := make(map[string]PluginMetaSet) + for p := range s { + name := p.Name + allowedVersions, ok := reqd[name] + if !ok { + continue + } + if _, ok := ret[p.Name]; !ok { + ret[p.Name] = make(PluginMetaSet) + } + version, err := p.Version.Parse() + if err != nil { + panic(err) + } + if allowedVersions.Allows(version) { + ret[p.Name].Add(p) + } + } + return ret +} + +// OverridePaths returns a new set where any existing plugins with the given +// names are removed and replaced with the single path given in the map. +// +// This is here only to continue to support the legacy way of overriding +// plugin binaries in the .terraformrc file. It treats all given plugins +// as pre-versioning (version 0.0.0). This mechanism will eventually be +// phased out, with vendor directories being the intended replacement. +func (s PluginMetaSet) OverridePaths(paths map[string]string) PluginMetaSet { + ret := make(PluginMetaSet) + for p := range s { + if _, ok := paths[p.Name]; ok { + // Skip plugins that we're overridding + continue + } + + ret.Add(p) + } + + // Now add the metadata for overriding plugins + for name, path := range paths { + ret.Add(PluginMeta{ + Name: name, + Version: VersionZero, + Path: path, + }) + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go new file mode 100644 index 00000000..75430fdd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go @@ -0,0 +1,105 @@ +package discovery + +import ( + "bytes" +) + +// PluginRequirements describes a set of plugins (assumed to be of a consistent +// kind) that are required to exist and have versions within the given +// corresponding sets. +type PluginRequirements map[string]*PluginConstraints + +// PluginConstraints represents an element of PluginRequirements describing +// the constraints for a single plugin. +type PluginConstraints struct { + // Specifies that the plugin's version must be within the given + // constraints. + Versions Constraints + + // If non-nil, the hash of the on-disk plugin executable must exactly + // match the SHA256 hash given here. + SHA256 []byte +} + +// Allows returns true if the given version is within the receiver's version +// constraints. +func (s *PluginConstraints) Allows(v Version) bool { + return s.Versions.Allows(v) +} + +// AcceptsSHA256 returns true if the given executable SHA256 hash is acceptable, +// either because it matches the constraint or because there is no such +// constraint. +func (s *PluginConstraints) AcceptsSHA256(digest []byte) bool { + if s.SHA256 == nil { + return true + } + return bytes.Equal(s.SHA256, digest) +} + +// Merge takes the contents of the receiver and the other given requirements +// object and merges them together into a single requirements structure +// that satisfies both sets of requirements. +// +// Note that it doesn't make sense to merge two PluginRequirements with +// differing required plugin SHA256 hashes, since the result will never +// match any plugin. +func (r PluginRequirements) Merge(other PluginRequirements) PluginRequirements { + ret := make(PluginRequirements) + for n, c := range r { + ret[n] = &PluginConstraints{ + Versions: Constraints{}.Append(c.Versions), + SHA256: c.SHA256, + } + } + for n, c := range other { + if existing, exists := ret[n]; exists { + ret[n].Versions = ret[n].Versions.Append(c.Versions) + + if existing.SHA256 != nil { + if c.SHA256 != nil && !bytes.Equal(c.SHA256, existing.SHA256) { + // If we've been asked to merge two constraints with + // different SHA256 hashes then we'll produce a dummy value + // that can never match anything. This is a silly edge case + // that no reasonable caller should hit. + ret[n].SHA256 = []byte(invalidProviderHash) + } + } else { + ret[n].SHA256 = c.SHA256 // might still be nil + } + } else { + ret[n] = &PluginConstraints{ + Versions: Constraints{}.Append(c.Versions), + SHA256: c.SHA256, + } + } + } + return ret +} + +// LockExecutables applies additional constraints to the receiver that +// require plugin executables with specific SHA256 digests. This modifies +// the receiver in-place, since it's intended to be applied after +// version constraints have been resolved. +// +// The given map must include a key for every plugin that is already +// required. If not, any missing keys will cause the corresponding plugin +// to never match, though the direct caller doesn't necessarily need to +// guarantee this as long as the downstream code _applying_ these constraints +// is able to deal with the non-match in some way. +func (r PluginRequirements) LockExecutables(sha256s map[string][]byte) { + for name, cons := range r { + digest := sha256s[name] + + if digest == nil { + // Prevent any match, which will then presumably cause the + // downstream consumer of this requirements to report an error. + cons.SHA256 = []byte(invalidProviderHash) + continue + } + + cons.SHA256 = digest + } +} + +const invalidProviderHash = "" diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go new file mode 100644 index 00000000..b6686a5d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go @@ -0,0 +1,53 @@ +package discovery + +import ( + "bytes" + "log" + "strings" + + "golang.org/x/crypto/openpgp" +) + +// Verify the data using the provided openpgp detached signature and the +// embedded hashicorp public key. +func verifySig(data, sig []byte) error { + el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(hashiPublicKey)) + if err != nil { + log.Fatal(err) + } + + _, err = openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig)) + return err +} + +// this is the public key that signs the checksums file for releases. +const hashiPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f +W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq +fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA +3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca +KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k +SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1 +cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG +CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n +Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i +SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi +psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w +sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO +klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW +WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9 +wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j +2tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM +skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo +mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y +0H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA +CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc +z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP +0BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG +unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ +EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ +oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C +=LYpS +-----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go new file mode 100644 index 00000000..8fad58d6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go @@ -0,0 +1,72 @@ +package discovery + +import ( + "fmt" + "sort" + + version "github.com/hashicorp/go-version" +) + +const VersionZero = "0.0.0" + +// A VersionStr is a string containing a possibly-invalid representation +// of a semver version number. Call Parse on it to obtain a real Version +// object, or discover that it is invalid. +type VersionStr string + +// Parse transforms a VersionStr into a Version if it is +// syntactically valid. If it isn't then an error is returned instead. +func (s VersionStr) Parse() (Version, error) { + raw, err := version.NewVersion(string(s)) + if err != nil { + return Version{}, err + } + return Version{raw}, nil +} + +// MustParse transforms a VersionStr into a Version if it is +// syntactically valid. If it isn't then it panics. +func (s VersionStr) MustParse() Version { + ret, err := s.Parse() + if err != nil { + panic(err) + } + return ret +} + +// Version represents a version number that has been parsed from +// a semver string and known to be valid. +type Version struct { + // We wrap this here just because it avoids a proliferation of + // direct go-version imports all over the place, and keeps the + // version-processing details within this package. + raw *version.Version +} + +func (v Version) String() string { + return v.raw.String() +} + +func (v Version) NewerThan(other Version) bool { + return v.raw.GreaterThan(other.raw) +} + +func (v Version) Equal(other Version) bool { + return v.raw.Equal(other.raw) +} + +// MinorUpgradeConstraintStr returns a ConstraintStr that would permit +// minor upgrades relative to the receiving version. +func (v Version) MinorUpgradeConstraintStr() ConstraintStr { + segments := v.raw.Segments() + return ConstraintStr(fmt.Sprintf("~> %d.%d", segments[0], segments[1])) +} + +type Versions []Version + +// Sort sorts version from newest to oldest. +func (v Versions) Sort() { + sort.Slice(v, func(i, j int) bool { + return v[i].NewerThan(v[j]) + }) +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go new file mode 100644 index 00000000..0aefd759 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go @@ -0,0 +1,84 @@ +package discovery + +import ( + "sort" + + version "github.com/hashicorp/go-version" +) + +// A ConstraintStr is a string containing a possibly-invalid representation +// of a version constraint provided in configuration. Call Parse on it to +// obtain a real Constraint object, or discover that it is invalid. +type ConstraintStr string + +// Parse transforms a ConstraintStr into a Constraints if it is +// syntactically valid. If it isn't then an error is returned instead. +func (s ConstraintStr) Parse() (Constraints, error) { + raw, err := version.NewConstraint(string(s)) + if err != nil { + return Constraints{}, err + } + return Constraints{raw}, nil +} + +// MustParse is like Parse but it panics if the constraint string is invalid. +func (s ConstraintStr) MustParse() Constraints { + ret, err := s.Parse() + if err != nil { + panic(err) + } + return ret +} + +// Constraints represents a set of versions which any given Version is either +// a member of or not. +type Constraints struct { + raw version.Constraints +} + +// AllVersions is a Constraints containing all versions +var AllVersions Constraints + +func init() { + AllVersions = Constraints{ + raw: make(version.Constraints, 0), + } +} + +// Allows returns true if the given version permitted by the receiving +// constraints set. +func (s Constraints) Allows(v Version) bool { + return s.raw.Check(v.raw) +} + +// Append combines the receiving set with the given other set to produce +// a set that is the intersection of both sets, which is to say that resulting +// constraints contain only the versions that are members of both. +func (s Constraints) Append(other Constraints) Constraints { + raw := make(version.Constraints, 0, len(s.raw)+len(other.raw)) + + // Since "raw" is a list of constraints that remove versions from the set, + // "Intersection" is implemented by concatenating together those lists, + // thus leaving behind only the versions not removed by either list. + raw = append(raw, s.raw...) + raw = append(raw, other.raw...) + + // while the set is unordered, we sort these lexically for consistent output + sort.Slice(raw, func(i, j int) bool { + return raw[i].String() < raw[j].String() + }) + + return Constraints{raw} +} + +// String returns a string representation of the set members as a set +// of range constraints. +func (s Constraints) String() string { + return s.raw.String() +} + +// Unconstrained returns true if and only if the receiver is an empty +// constraint set. +func (s Constraints) Unconstrained() bool { + return len(s.raw) == 0 +} diff --git a/vendor/github.com/hashicorp/terraform/registry/client.go b/vendor/github.com/hashicorp/terraform/registry/client.go new file mode 100644 index 00000000..8e31a6a3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/client.go @@ -0,0 +1,227 @@ +package registry + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "path" + "strings" + "time" + + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/registry/response" + "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/hashicorp/terraform/version" +) + +const ( + xTerraformGet = "X-Terraform-Get" + xTerraformVersion = "X-Terraform-Version" + requestTimeout = 10 * time.Second + serviceID = "modules.v1" +) + +var tfVersion = version.String() + +// Client provides methods to query Terraform Registries. +type Client struct { + // this is the client to be used for all requests. + client *http.Client + + // services is a required *disco.Disco, which may have services and + // credentials pre-loaded. + services *disco.Disco +} + +// NewClient returns a new initialized registry client. +func NewClient(services *disco.Disco, client *http.Client) *Client { + if services == nil { + services = disco.New() + } + + if client == nil { + client = httpclient.New() + client.Timeout = requestTimeout + } + + services.Transport = client.Transport + + return &Client{ + client: client, + services: services, + } +} + +// Discover qeuries the host, and returns the url for the registry. +func (c *Client) Discover(host svchost.Hostname) *url.URL { + service := c.services.DiscoverServiceURL(host, serviceID) + if service == nil { + return nil + } + if !strings.HasSuffix(service.Path, "/") { + service.Path += "/" + } + return service +} + +// Versions queries the registry for a module, and returns the available versions. +func (c *Client) Versions(module *regsrc.Module) (*response.ModuleVersions, error) { + host, err := module.SvcHost() + if err != nil { + return nil, err + } + + service := c.Discover(host) + if service == nil { + return nil, fmt.Errorf("host %s does not provide Terraform modules", host) + } + + p, err := url.Parse(path.Join(module.Module(), "versions")) + if err != nil { + return nil, err + } + + service = service.ResolveReference(p) + + log.Printf("[DEBUG] fetching module versions from %q", service) + + req, err := http.NewRequest("GET", service.String(), nil) + if err != nil { + return nil, err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // OK + case http.StatusNotFound: + return nil, &errModuleNotFound{addr: module} + default: + return nil, fmt.Errorf("error looking up module versions: %s", resp.Status) + } + + var versions response.ModuleVersions + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&versions); err != nil { + return nil, err + } + + for _, mod := range versions.Modules { + for _, v := range mod.Versions { + log.Printf("[DEBUG] found available version %q for %s", v.Version, mod.Source) + } + } + + return &versions, nil +} + +func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) { + creds, err := c.services.CredentialsForHost(host) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err) + return + } + + if creds != nil { + creds.PrepareRequest(req) + } +} + +// Location find the download location for a specific version module. +// This returns a string, because the final location may contain special go-getter syntax. +func (c *Client) Location(module *regsrc.Module, version string) (string, error) { + host, err := module.SvcHost() + if err != nil { + return "", err + } + + service := c.Discover(host) + if service == nil { + return "", fmt.Errorf("host %s does not provide Terraform modules", host.ForDisplay()) + } + + var p *url.URL + if version == "" { + p, err = url.Parse(path.Join(module.Module(), "download")) + } else { + p, err = url.Parse(path.Join(module.Module(), version, "download")) + } + if err != nil { + return "", err + } + download := service.ResolveReference(p) + + log.Printf("[DEBUG] looking up module location from %q", download) + + req, err := http.NewRequest("GET", download.String(), nil) + if err != nil { + return "", err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + // there should be no body, but save it for logging + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("error reading response body from registry: %s", err) + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusNoContent: + // OK + case http.StatusNotFound: + return "", fmt.Errorf("module %q version %q not found", module, version) + default: + // anything else is an error: + return "", fmt.Errorf("error getting download location for %q: %s resp:%s", module, resp.Status, body) + } + + // the download location is in the X-Terraform-Get header + location := resp.Header.Get(xTerraformGet) + if location == "" { + return "", fmt.Errorf("failed to get download URL for %q: %s resp:%s", module, resp.Status, body) + } + + // If location looks like it's trying to be a relative URL, treat it as + // one. + // + // We don't do this for just _any_ location, since the X-Terraform-Get + // header is a go-getter location rather than a URL, and so not all + // possible values will parse reasonably as URLs.) + // + // When used in conjunction with go-getter we normally require this header + // to be an absolute URL, but we are more liberal here because third-party + // registry implementations may not "know" their own absolute URLs if + // e.g. they are running behind a reverse proxy frontend, or such. + if strings.HasPrefix(location, "/") || strings.HasPrefix(location, "./") || strings.HasPrefix(location, "../") { + locationURL, err := url.Parse(location) + if err != nil { + return "", fmt.Errorf("invalid relative URL for %q: %s", module, err) + } + locationURL = download.ResolveReference(locationURL) + location = locationURL.String() + } + + return location, nil +} diff --git a/vendor/github.com/hashicorp/terraform/registry/errors.go b/vendor/github.com/hashicorp/terraform/registry/errors.go new file mode 100644 index 00000000..b8dcd31e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/errors.go @@ -0,0 +1,23 @@ +package registry + +import ( + "fmt" + + "github.com/hashicorp/terraform/registry/regsrc" +) + +type errModuleNotFound struct { + addr *regsrc.Module +} + +func (e *errModuleNotFound) Error() string { + return fmt.Sprintf("module %s not found", e.addr) +} + +// IsModuleNotFound returns true only if the given error is a "module not found" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsModuleNotFound(err error) bool { + _, ok := err.(*errModuleNotFound) + return ok +} diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go new file mode 100644 index 00000000..14b4dce9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go @@ -0,0 +1,140 @@ +package regsrc + +import ( + "regexp" + "strings" + + "github.com/hashicorp/terraform/svchost" +) + +var ( + // InvalidHostString is a placeholder returned when a raw host can't be + // converted by IDNA spec. It will never be returned for any host for which + // Valid() is true. + InvalidHostString = "" + + // urlLabelEndSubRe is a sub-expression that matches any character that's + // allowed at the start or end of a URL label according to RFC1123. + urlLabelEndSubRe = "[0-9A-Za-z]" + + // urlLabelEndSubRe is a sub-expression that matches any character that's + // allowed at in a non-start or end of a URL label according to RFC1123. + urlLabelMidSubRe = "[0-9A-Za-z-]" + + // urlLabelUnicodeSubRe is a sub-expression that matches any non-ascii char + // in an IDN (Unicode) display URL. It's not strict - there are only ~15k + // valid Unicode points in IDN RFC (some with conditions). We are just going + // with being liberal with matching and then erroring if we fail to convert + // to punycode later (which validates chars fully). This at least ensures + // ascii chars dissalowed by the RC1123 parts above don't become legal + // again. + urlLabelUnicodeSubRe = "[^[:ascii:]]" + + // hostLabelSubRe is the sub-expression that matches a valid hostname label. + // It does not anchor the start or end so it can be composed into more + // complex RegExps below. Note that for sanity we don't handle disallowing + // raw punycode in this regexp (esp. since re2 doesn't support negative + // lookbehind, but we can capture it's presence here to check later). + hostLabelSubRe = "" + + // Match valid initial char, or unicode char + "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + + // Optionally, match 0 to 61 valid URL or Unicode chars, + // followed by one valid end char or unicode char + "(?:" + + "(?:" + urlLabelMidSubRe + "|" + urlLabelUnicodeSubRe + "){0,61}" + + "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + + ")?" + + // hostSubRe is the sub-expression that matches a valid host prefix. + // Allows custom port. + hostSubRe = hostLabelSubRe + "(?:\\." + hostLabelSubRe + ")+(?::\\d+)?" + + // hostRe is a regexp that matches a valid host prefix. Additional + // validation of unicode strings is needed for matches. + hostRe = regexp.MustCompile("^" + hostSubRe + "$") +) + +// FriendlyHost describes a registry instance identified in source strings by a +// simple bare hostname like registry.terraform.io. +type FriendlyHost struct { + Raw string +} + +func NewFriendlyHost(host string) *FriendlyHost { + return &FriendlyHost{Raw: host} +} + +// ParseFriendlyHost attempts to parse a valid "friendly host" prefix from the +// given string. If no valid prefix is found, host will be nil and rest will +// contain the full source string. The host prefix must terminate at the end of +// the input or at the first / character. If one or more characters exist after +// the first /, they will be returned as rest (without the / delimiter). +// Hostnames containing punycode WILL be parsed successfully since they may have +// come from an internal normalized source string, however should be considered +// invalid if the string came from a user directly. This must be checked +// explicitly for user-input strings by calling Valid() on the +// returned host. +func ParseFriendlyHost(source string) (host *FriendlyHost, rest string) { + parts := strings.SplitN(source, "/", 2) + + if hostRe.MatchString(parts[0]) { + host = &FriendlyHost{Raw: parts[0]} + if len(parts) == 2 { + rest = parts[1] + } + return + } + + // No match, return whole string as rest along with nil host + rest = source + return +} + +// Valid returns whether the host prefix is considered valid in any case. +// Example of invalid prefixes might include ones that don't conform to the host +// name specifications. Not that IDN prefixes containing punycode are not valid +// input which we expect to always be in user-input or normalised display form. +func (h *FriendlyHost) Valid() bool { + return svchost.IsValid(h.Raw) +} + +// Display returns the host formatted for display to the user in CLI or web +// output. +func (h *FriendlyHost) Display() string { + return svchost.ForDisplay(h.Raw) +} + +// Normalized returns the host formatted for internal reference or comparison. +func (h *FriendlyHost) Normalized() string { + host, err := svchost.ForComparison(h.Raw) + if err != nil { + return InvalidHostString + } + return string(host) +} + +// String returns the host formatted as the user originally typed it assuming it +// was parsed from user input. +func (h *FriendlyHost) String() string { + return h.Raw +} + +// Equal compares the FriendlyHost against another instance taking normalization +// into account. Invalid hosts cannot be compared and will always return false. +func (h *FriendlyHost) Equal(other *FriendlyHost) bool { + if other == nil { + return false + } + + otherHost, err := svchost.ForComparison(other.Raw) + if err != nil { + return false + } + + host, err := svchost.ForComparison(h.Raw) + if err != nil { + return false + } + + return otherHost == host +} diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go new file mode 100644 index 00000000..325706ec --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go @@ -0,0 +1,205 @@ +package regsrc + +import ( + "errors" + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform/svchost" +) + +var ( + ErrInvalidModuleSource = errors.New("not a valid registry module source") + + // nameSubRe is the sub-expression that matches a valid module namespace or + // name. It's strictly a super-set of what GitHub allows for user/org and + // repo names respectively, but more restrictive than our original repo-name + // regex which allowed periods but could cause ambiguity with hostname + // prefixes. It does not anchor the start or end so it can be composed into + // more complex RegExps below. Alphanumeric with - and _ allowed in non + // leading or trailing positions. Max length 64 chars. (GitHub username is + // 38 max.) + nameSubRe = "[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?" + + // providerSubRe is the sub-expression that matches a valid provider. It + // does not anchor the start or end so it can be composed into more complex + // RegExps below. Only lowercase chars and digits are supported in practice. + // Max length 64 chars. + providerSubRe = "[0-9a-z]{1,64}" + + // moduleSourceRe is a regular expression that matches the basic + // namespace/name/provider[//...] format for registry sources. It assumes + // any FriendlyHost prefix has already been removed if present. + moduleSourceRe = regexp.MustCompile( + fmt.Sprintf("^(%s)\\/(%s)\\/(%s)(?:\\/\\/(.*))?$", + nameSubRe, nameSubRe, providerSubRe)) + + // NameRe is a regular expression defining the format allowed for namespace + // or name fields in module registry implementations. + NameRe = regexp.MustCompile("^" + nameSubRe + "$") + + // ProviderRe is a regular expression defining the format allowed for + // provider fields in module registry implementations. + ProviderRe = regexp.MustCompile("^" + providerSubRe + "$") + + // these hostnames are not allowed as registry sources, because they are + // already special case module sources in terraform. + disallowed = map[string]bool{ + "github.com": true, + "bitbucket.org": true, + } +) + +// Module describes a Terraform Registry Module source. +type Module struct { + // RawHost is the friendly host prefix if one was present. It might be nil + // if the original source had no host prefix which implies + // PublicRegistryHost but is distinct from having an actual pointer to + // PublicRegistryHost since it encodes the fact the original string didn't + // include a host prefix at all which is significant for recovering actual + // input not just normalized form. Most callers should access it with Host() + // which will return public registry host instance if it's nil. + RawHost *FriendlyHost + RawNamespace string + RawName string + RawProvider string + RawSubmodule string +} + +// NewModule construct a new module source from separate parts. Pass empty +// string if host or submodule are not needed. +func NewModule(host, namespace, name, provider, submodule string) (*Module, error) { + m := &Module{ + RawNamespace: namespace, + RawName: name, + RawProvider: provider, + RawSubmodule: submodule, + } + if host != "" { + h := NewFriendlyHost(host) + if h != nil { + fmt.Println("HOST:", h) + if !h.Valid() || disallowed[h.Display()] { + return nil, ErrInvalidModuleSource + } + } + m.RawHost = h + } + return m, nil +} + +// ParseModuleSource attempts to parse source as a Terraform registry module +// source. If the string is not found to be in a valid format, +// ErrInvalidModuleSource is returned. Note that this can only be used on +// "input" strings, e.g. either ones supplied by the user or potentially +// normalised but in Display form (unicode). It will fail to parse a source with +// a punycoded domain since this is not permitted input from a user. If you have +// an already normalized string internally, you can compare it without parsing +// by comparing with the normalized version of the subject with the normal +// string equality operator. +func ParseModuleSource(source string) (*Module, error) { + // See if there is a friendly host prefix. + host, rest := ParseFriendlyHost(source) + if host != nil { + if !host.Valid() || disallowed[host.Display()] { + return nil, ErrInvalidModuleSource + } + } + + matches := moduleSourceRe.FindStringSubmatch(rest) + if len(matches) < 4 { + return nil, ErrInvalidModuleSource + } + + m := &Module{ + RawHost: host, + RawNamespace: matches[1], + RawName: matches[2], + RawProvider: matches[3], + } + + if len(matches) == 5 { + m.RawSubmodule = matches[4] + } + + return m, nil +} + +// Display returns the source formatted for display to the user in CLI or web +// output. +func (m *Module) Display() string { + return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Display()), false) +} + +// Normalized returns the source formatted for internal reference or comparison. +func (m *Module) Normalized() string { + return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Normalized()), false) +} + +// String returns the source formatted as the user originally typed it assuming +// it was parsed from user input. +func (m *Module) String() string { + // Don't normalize public registry hostname - leave it exactly like the user + // input it. + hostPrefix := "" + if m.RawHost != nil { + hostPrefix = m.RawHost.String() + "/" + } + return m.formatWithPrefix(hostPrefix, true) +} + +// Equal compares the module source against another instance taking +// normalization into account. +func (m *Module) Equal(other *Module) bool { + return m.Normalized() == other.Normalized() +} + +// Host returns the FriendlyHost object describing which registry this module is +// in. If the original source string had not host component this will return the +// PublicRegistryHost. +func (m *Module) Host() *FriendlyHost { + if m.RawHost == nil { + return PublicRegistryHost + } + return m.RawHost +} + +func (m *Module) normalizedHostPrefix(host string) string { + if m.Host().Equal(PublicRegistryHost) { + return "" + } + return host + "/" +} + +func (m *Module) formatWithPrefix(hostPrefix string, preserveCase bool) string { + suffix := "" + if m.RawSubmodule != "" { + suffix = "//" + m.RawSubmodule + } + str := fmt.Sprintf("%s%s/%s/%s%s", hostPrefix, m.RawNamespace, m.RawName, + m.RawProvider, suffix) + + // lower case by default + if !preserveCase { + return strings.ToLower(str) + } + return str +} + +// Module returns just the registry ID of the module, without a hostname or +// suffix. +func (m *Module) Module() string { + return fmt.Sprintf("%s/%s/%s", m.RawNamespace, m.RawName, m.RawProvider) +} + +// SvcHost returns the svchost.Hostname for this module. Since FriendlyHost may +// contain an invalid hostname, this also returns an error indicating if it +// could be converted to a svchost.Hostname. If no host is specified, the +// default PublicRegistryHost is returned. +func (m *Module) SvcHost() (svchost.Hostname, error) { + if m.RawHost == nil { + return svchost.ForComparison(PublicRegistryHost.Raw) + } + return svchost.ForComparison(m.RawHost.Raw) +} diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go new file mode 100644 index 00000000..c430bf14 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go @@ -0,0 +1,8 @@ +// Package regsrc provides helpers for working with source strings that identify +// resources within a Terraform registry. +package regsrc + +var ( + // PublicRegistryHost is a FriendlyHost that represents the public registry. + PublicRegistryHost = NewFriendlyHost("registry.terraform.io") +) diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module.go b/vendor/github.com/hashicorp/terraform/registry/response/module.go new file mode 100644 index 00000000..3bd2b3df --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/module.go @@ -0,0 +1,93 @@ +package response + +import ( + "time" +) + +// Module is the response structure with the data for a single module version. +type Module struct { + ID string `json:"id"` + + //--------------------------------------------------------------- + // Metadata about the overall module. + + Owner string `json:"owner"` + Namespace string `json:"namespace"` + Name string `json:"name"` + Version string `json:"version"` + Provider string `json:"provider"` + Description string `json:"description"` + Source string `json:"source"` + PublishedAt time.Time `json:"published_at"` + Downloads int `json:"downloads"` + Verified bool `json:"verified"` +} + +// ModuleDetail represents a module in full detail. +type ModuleDetail struct { + Module + + //--------------------------------------------------------------- + // Metadata about the overall module. This is only available when + // requesting the specific module (not in list responses). + + // Root is the root module. + Root *ModuleSubmodule `json:"root"` + + // Submodules are the other submodules that are available within + // this module. + Submodules []*ModuleSubmodule `json:"submodules"` + + //--------------------------------------------------------------- + // The fields below are only set when requesting this specific + // module. They are available to easily know all available versions + // and providers without multiple API calls. + + Providers []string `json:"providers"` // All available providers + Versions []string `json:"versions"` // All versions +} + +// ModuleSubmodule is the metadata about a specific submodule within +// a module. This includes the root module as a special case. +type ModuleSubmodule struct { + Path string `json:"path"` + Readme string `json:"readme"` + Empty bool `json:"empty"` + + Inputs []*ModuleInput `json:"inputs"` + Outputs []*ModuleOutput `json:"outputs"` + Dependencies []*ModuleDep `json:"dependencies"` + Resources []*ModuleResource `json:"resources"` +} + +// ModuleInput is an input for a module. +type ModuleInput struct { + Name string `json:"name"` + Description string `json:"description"` + Default string `json:"default"` +} + +// ModuleOutput is an output for a module. +type ModuleOutput struct { + Name string `json:"name"` + Description string `json:"description"` +} + +// ModuleDep is an output for a module. +type ModuleDep struct { + Name string `json:"name"` + Source string `json:"source"` + Version string `json:"version"` +} + +// ModuleProviderDep is the output for a provider dependency +type ModuleProviderDep struct { + Name string `json:"name"` + Version string `json:"version"` +} + +// ModuleResource is an output for a module. +type ModuleResource struct { + Name string `json:"name"` + Type string `json:"type"` +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module_list.go b/vendor/github.com/hashicorp/terraform/registry/response/module_list.go new file mode 100644 index 00000000..97837482 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/module_list.go @@ -0,0 +1,7 @@ +package response + +// ModuleList is the response structure for a pageable list of modules. +type ModuleList struct { + Meta PaginationMeta `json:"meta"` + Modules []*Module `json:"modules"` +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module_provider.go b/vendor/github.com/hashicorp/terraform/registry/response/module_provider.go new file mode 100644 index 00000000..e48499dc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/module_provider.go @@ -0,0 +1,14 @@ +package response + +// ModuleProvider represents a single provider for modules. +type ModuleProvider struct { + Name string `json:"name"` + Downloads int `json:"downloads"` + ModuleCount int `json:"module_count"` +} + +// ModuleProviderList is the response structure for a pageable list of ModuleProviders. +type ModuleProviderList struct { + Meta PaginationMeta `json:"meta"` + Providers []*ModuleProvider `json:"providers"` +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module_versions.go b/vendor/github.com/hashicorp/terraform/registry/response/module_versions.go new file mode 100644 index 00000000..f69e9750 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/module_versions.go @@ -0,0 +1,32 @@ +package response + +// ModuleVersions is the response format that contains all metadata about module +// versions needed for terraform CLI to resolve version constraints. See RFC +// TF-042 for details on this format. +type ModuleVersions struct { + Modules []*ModuleProviderVersions `json:"modules"` +} + +// ModuleProviderVersions is the response format for a single module instance, +// containing metadata about all versions and their dependencies. +type ModuleProviderVersions struct { + Source string `json:"source"` + Versions []*ModuleVersion `json:"versions"` +} + +// ModuleVersion is the output metadata for a given version needed by CLI to +// resolve candidate versions to satisfy requirements. +type ModuleVersion struct { + Version string `json:"version"` + Root VersionSubmodule `json:"root"` + Submodules []*VersionSubmodule `json:"submodules"` +} + +// VersionSubmodule is the output metadata for a submodule within a given +// version needed by CLI to resolve candidate versions to satisfy requirements. +// When representing the Root in JSON the path is omitted. +type VersionSubmodule struct { + Path string `json:"path,omitempty"` + Providers []*ModuleProviderDep `json:"providers"` + Dependencies []*ModuleDep `json:"dependencies"` +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/pagination.go b/vendor/github.com/hashicorp/terraform/registry/response/pagination.go new file mode 100644 index 00000000..75a92549 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/pagination.go @@ -0,0 +1,65 @@ +package response + +import ( + "net/url" + "strconv" +) + +// PaginationMeta is a structure included in responses for pagination. +type PaginationMeta struct { + Limit int `json:"limit"` + CurrentOffset int `json:"current_offset"` + NextOffset *int `json:"next_offset,omitempty"` + PrevOffset *int `json:"prev_offset,omitempty"` + NextURL string `json:"next_url,omitempty"` + PrevURL string `json:"prev_url,omitempty"` +} + +// NewPaginationMeta populates pagination meta data from result parameters +func NewPaginationMeta(offset, limit int, hasMore bool, currentURL string) PaginationMeta { + pm := PaginationMeta{ + Limit: limit, + CurrentOffset: offset, + } + + // Calculate next/prev offsets, leave nil if not valid pages + nextOffset := offset + limit + if hasMore { + pm.NextOffset = &nextOffset + } + + prevOffset := offset - limit + if prevOffset < 0 { + prevOffset = 0 + } + if prevOffset < offset { + pm.PrevOffset = &prevOffset + } + + // If URL format provided, populate URLs. Intentionally swallow URL errors for now, API should + // catch missing URLs if we call with bad URL arg (and we care about them being present). + if currentURL != "" && pm.NextOffset != nil { + pm.NextURL, _ = setQueryParam(currentURL, "offset", *pm.NextOffset, 0) + } + if currentURL != "" && pm.PrevOffset != nil { + pm.PrevURL, _ = setQueryParam(currentURL, "offset", *pm.PrevOffset, 0) + } + + return pm +} + +func setQueryParam(baseURL, key string, val, defaultVal int) (string, error) { + u, err := url.Parse(baseURL) + if err != nil { + return "", err + } + q := u.Query() + if val == defaultVal { + // elide param if it's the default value + q.Del(key) + } else { + q.Set(key, strconv.Itoa(val)) + } + u.RawQuery = q.Encode() + return u.String(), nil +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/redirect.go b/vendor/github.com/hashicorp/terraform/registry/response/redirect.go new file mode 100644 index 00000000..d5eb49ba --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/redirect.go @@ -0,0 +1,6 @@ +package response + +// Redirect causes the frontend to perform a window redirect. +type Redirect struct { + URL string `json:"url"` +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/cache.go b/vendor/github.com/hashicorp/terraform/svchost/auth/cache.go new file mode 100644 index 00000000..4f0d1689 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/cache.go @@ -0,0 +1,45 @@ +package auth + +import ( + "github.com/hashicorp/terraform/svchost" +) + +// CachingCredentialsSource creates a new credentials source that wraps another +// and caches its results in memory, on a per-hostname basis. +// +// No means is provided for expiration of cached credentials, so a caching +// credentials source should have a limited lifetime (one Terraform operation, +// for example) to ensure that time-limited credentials don't expire before +// their cache entries do. +func CachingCredentialsSource(source CredentialsSource) CredentialsSource { + return &cachingCredentialsSource{ + source: source, + cache: map[svchost.Hostname]HostCredentials{}, + } +} + +type cachingCredentialsSource struct { + source CredentialsSource + cache map[svchost.Hostname]HostCredentials +} + +// ForHost passes the given hostname on to the wrapped credentials source and +// caches the result to return for future requests with the same hostname. +// +// Both credentials and non-credentials (nil) responses are cached. +// +// No cache entry is created if the wrapped source returns an error, to allow +// the caller to retry the failing operation. +func (s *cachingCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + if cache, cached := s.cache[host]; cached { + return cache, nil + } + + result, err := s.source.ForHost(host) + if err != nil { + return result, err + } + + s.cache[host] = result + return result, nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go b/vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go new file mode 100644 index 00000000..0372c160 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go @@ -0,0 +1,63 @@ +// Package auth contains types and functions to manage authentication +// credentials for service hosts. +package auth + +import ( + "net/http" + + "github.com/hashicorp/terraform/svchost" +) + +// Credentials is a list of CredentialsSource objects that can be tried in +// turn until one returns credentials for a host, or one returns an error. +// +// A Credentials is itself a CredentialsSource, wrapping its members. +// In principle one CredentialsSource can be nested inside another, though +// there is no good reason to do so. +type Credentials []CredentialsSource + +// NoCredentials is an empty CredentialsSource that always returns nil +// when asked for credentials. +var NoCredentials CredentialsSource = Credentials{} + +// A CredentialsSource is an object that may be able to provide credentials +// for a given host. +// +// Credentials lookups are not guaranteed to be concurrency-safe. Callers +// using these facilities in concurrent code must use external concurrency +// primitives to prevent race conditions. +type CredentialsSource interface { + // ForHost returns a non-nil HostCredentials if the source has credentials + // available for the host, and a nil HostCredentials if it does not. + // + // If an error is returned, progress through a list of CredentialsSources + // is halted and the error is returned to the user. + ForHost(host svchost.Hostname) (HostCredentials, error) +} + +// HostCredentials represents a single set of credentials for a particular +// host. +type HostCredentials interface { + // PrepareRequest modifies the given request in-place to apply the + // receiving credentials. The usual behavior of this method is to + // add some sort of Authorization header to the request. + PrepareRequest(req *http.Request) + + // Token returns the authentication token. + Token() string +} + +// ForHost iterates over the contained CredentialsSource objects and +// tries to obtain credentials for the given host from each one in turn. +// +// If any source returns either a non-nil HostCredentials or a non-nil error +// then this result is returned. Otherwise, the result is nil, nil. +func (c Credentials) ForHost(host svchost.Hostname) (HostCredentials, error) { + for _, source := range c { + creds, err := source.ForHost(host) + if creds != nil || err != nil { + return creds, err + } + } + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go b/vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go new file mode 100644 index 00000000..f91006ae --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go @@ -0,0 +1,18 @@ +package auth + +// HostCredentialsFromMap converts a map of key-value pairs from a credentials +// definition provided by the user (e.g. in a config file, or via a credentials +// helper) into a HostCredentials object if possible, or returns nil if +// no credentials could be extracted from the map. +// +// This function ignores map keys it is unfamiliar with, to allow for future +// expansion of the credentials map format for new credential types. +func HostCredentialsFromMap(m map[string]interface{}) HostCredentials { + if m == nil { + return nil + } + if token, ok := m["token"].(string); ok { + return HostCredentialsToken(token) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go b/vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go new file mode 100644 index 00000000..d72ffe3c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go @@ -0,0 +1,80 @@ +package auth + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "path/filepath" + + "github.com/hashicorp/terraform/svchost" +) + +type helperProgramCredentialsSource struct { + executable string + args []string +} + +// HelperProgramCredentialsSource returns a CredentialsSource that runs the +// given program with the given arguments in order to obtain credentials. +// +// The given executable path must be an absolute path; it is the caller's +// responsibility to validate and process a relative path or other input +// provided by an end-user. If the given path is not absolute, this +// function will panic. +// +// When credentials are requested, the program will be run in a child process +// with the given arguments along with two additional arguments added to the +// end of the list: the literal string "get", followed by the requested +// hostname in ASCII compatibility form (punycode form). +func HelperProgramCredentialsSource(executable string, args ...string) CredentialsSource { + if !filepath.IsAbs(executable) { + panic("NewCredentialsSourceHelperProgram requires absolute path to executable") + } + + fullArgs := make([]string, len(args)+1) + fullArgs[0] = executable + copy(fullArgs[1:], args) + + return &helperProgramCredentialsSource{ + executable: executable, + args: fullArgs, + } +} + +func (s *helperProgramCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + args := make([]string, len(s.args), len(s.args)+2) + copy(args, s.args) + args = append(args, "get") + args = append(args, string(host)) + + outBuf := bytes.Buffer{} + errBuf := bytes.Buffer{} + + cmd := exec.Cmd{ + Path: s.executable, + Args: args, + Stdin: nil, + Stdout: &outBuf, + Stderr: &errBuf, + } + err := cmd.Run() + if _, isExitErr := err.(*exec.ExitError); isExitErr { + errText := errBuf.String() + if errText == "" { + // Shouldn't happen for a well-behaved helper program + return nil, fmt.Errorf("error in %s, but it produced no error message", s.executable) + } + return nil, fmt.Errorf("error in %s: %s", s.executable, errText) + } else if err != nil { + return nil, fmt.Errorf("failed to run %s: %s", s.executable, err) + } + + var m map[string]interface{} + err = json.Unmarshal(outBuf.Bytes(), &m) + if err != nil { + return nil, fmt.Errorf("malformed output from %s: %s", s.executable, err) + } + + return HostCredentialsFromMap(m), nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/static.go b/vendor/github.com/hashicorp/terraform/svchost/auth/static.go new file mode 100644 index 00000000..5373fddf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/static.go @@ -0,0 +1,28 @@ +package auth + +import ( + "github.com/hashicorp/terraform/svchost" +) + +// StaticCredentialsSource is a credentials source that retrieves credentials +// from the provided map. It returns nil if a requested hostname is not +// present in the map. +// +// The caller should not modify the given map after passing it to this function. +func StaticCredentialsSource(creds map[svchost.Hostname]map[string]interface{}) CredentialsSource { + return staticCredentialsSource(creds) +} + +type staticCredentialsSource map[svchost.Hostname]map[string]interface{} + +func (s staticCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + if s == nil { + return nil, nil + } + + if m, exists := s[host]; exists { + return HostCredentialsFromMap(m), nil + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go b/vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go new file mode 100644 index 00000000..9358bcb6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go @@ -0,0 +1,25 @@ +package auth + +import ( + "net/http" +) + +// HostCredentialsToken is a HostCredentials implementation that represents a +// single "bearer token", to be sent to the server via an Authorization header +// with the auth type set to "Bearer" +type HostCredentialsToken string + +// PrepareRequest alters the given HTTP request by setting its Authorization +// header to the string "Bearer " followed by the encapsulated authentication +// token. +func (tc HostCredentialsToken) PrepareRequest(req *http.Request) { + if req.Header == nil { + req.Header = http.Header{} + } + req.Header.Set("Authorization", "Bearer "+string(tc)) +} + +// Token returns the authentication token. +func (tc HostCredentialsToken) Token() string { + return string(tc) +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go b/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go new file mode 100644 index 00000000..7fc49da9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go @@ -0,0 +1,241 @@ +// Package disco handles Terraform's remote service discovery protocol. +// +// This protocol allows mapping from a service hostname, as produced by the +// svchost package, to a set of services supported by that host and the +// endpoint information for each supported service. +package disco + +import ( + "encoding/json" + "errors" + "io" + "io/ioutil" + "log" + "mime" + "net/http" + "net/url" + "time" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/svchost/auth" +) + +const ( + discoPath = "/.well-known/terraform.json" + maxRedirects = 3 // arbitrary-but-small number to prevent runaway redirect loops + discoTimeout = 11 * time.Second // arbitrary-but-small time limit to prevent UI "hangs" during discovery + maxDiscoDocBytes = 1 * 1024 * 1024 // 1MB - to prevent abusive services from using loads of our memory +) + +var httpTransport = cleanhttp.DefaultPooledTransport() // overridden during tests, to skip TLS verification + +// Disco is the main type in this package, which allows discovery on given +// hostnames and caches the results by hostname to avoid repeated requests +// for the same information. +type Disco struct { + hostCache map[svchost.Hostname]Host + credsSrc auth.CredentialsSource + + // Transport is a custom http.RoundTripper to use. + // A package default is used if this is nil. + Transport http.RoundTripper +} + +// New returns a new initialized discovery object. +func New() *Disco { + return NewWithCredentialsSource(nil) +} + +// NewWithCredentialsSource returns a new discovery object initialized with +// the given credentials source. +func NewWithCredentialsSource(credsSrc auth.CredentialsSource) *Disco { + return &Disco{credsSrc: credsSrc} +} + +// SetCredentialsSource provides a credentials source that will be used to +// add credentials to outgoing discovery requests, where available. +// +// If this method is never called, no outgoing discovery requests will have +// credentials. +func (d *Disco) SetCredentialsSource(src auth.CredentialsSource) { + d.credsSrc = src +} + +// CredentialsForHost returns a non-nil HostCredentials if the embedded source has +// credentials available for the host, and a nil HostCredentials if it does not. +func (d *Disco) CredentialsForHost(host svchost.Hostname) (auth.HostCredentials, error) { + if d.credsSrc == nil { + return nil, nil + } + return d.credsSrc.ForHost(host) +} + +// ForceHostServices provides a pre-defined set of services for a given +// host, which prevents the receiver from attempting network-based discovery +// for the given host. Instead, the given services map will be returned +// verbatim. +// +// When providing "forced" services, any relative URLs are resolved against +// the initial discovery URL that would have been used for network-based +// discovery, yielding the same results as if the given map were published +// at the host's default discovery URL, though using absolute URLs is strongly +// recommended to make the configured behavior more explicit. +func (d *Disco) ForceHostServices(host svchost.Hostname, services map[string]interface{}) { + if d.hostCache == nil { + d.hostCache = map[svchost.Hostname]Host{} + } + if services == nil { + services = map[string]interface{}{} + } + d.hostCache[host] = Host{ + discoURL: &url.URL{ + Scheme: "https", + Host: string(host), + Path: discoPath, + }, + services: services, + } +} + +// Discover runs the discovery protocol against the given hostname (which must +// already have been validated and prepared with svchost.ForComparison) and +// returns an object describing the services available at that host. +// +// If a given hostname supports no Terraform services at all, a non-nil but +// empty Host object is returned. When giving feedback to the end user about +// such situations, we say e.g. "the host doesn't provide a module +// registry", regardless of whether that is due to that service specifically +// being absent or due to the host not providing Terraform services at all, +// since we don't wish to expose the detail of whole-host discovery to an +// end-user. +func (d *Disco) Discover(host svchost.Hostname) Host { + if d.hostCache == nil { + d.hostCache = map[svchost.Hostname]Host{} + } + if cache, cached := d.hostCache[host]; cached { + return cache + } + + ret := d.discover(host) + d.hostCache[host] = ret + return ret +} + +// DiscoverServiceURL is a convenience wrapper for discovery on a given +// hostname and then looking up a particular service in the result. +func (d *Disco) DiscoverServiceURL(host svchost.Hostname, serviceID string) *url.URL { + return d.Discover(host).ServiceURL(serviceID) +} + +// discover implements the actual discovery process, with its result cached +// by the public-facing Discover method. +func (d *Disco) discover(host svchost.Hostname) Host { + discoURL := &url.URL{ + Scheme: "https", + Host: host.String(), + Path: discoPath, + } + + t := d.Transport + if t == nil { + t = httpTransport + } + + client := &http.Client{ + Transport: t, + Timeout: discoTimeout, + + CheckRedirect: func(req *http.Request, via []*http.Request) error { + log.Printf("[DEBUG] Service discovery redirected to %s", req.URL) + if len(via) > maxRedirects { + return errors.New("too many redirects") // (this error message will never actually be seen) + } + return nil + }, + } + + req := &http.Request{ + Method: "GET", + URL: discoURL, + } + + if creds, err := d.CredentialsForHost(host); err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err) + } else if creds != nil { + creds.PrepareRequest(req) // alters req to include credentials + } + + log.Printf("[DEBUG] Service discovery for %s at %s", host, discoURL) + + ret := Host{ + discoURL: discoURL, + } + + resp, err := client.Do(req) + if err != nil { + log.Printf("[WARN] Failed to request discovery document: %s", err) + return ret // empty + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + log.Printf("[WARN] Failed to request discovery document: %s", resp.Status) + return ret // empty + } + + // If the client followed any redirects, we will have a new URL to use + // as our base for relative resolution. + ret.discoURL = resp.Request.URL + + contentType := resp.Header.Get("Content-Type") + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + log.Printf("[WARN] Discovery URL has malformed Content-Type %q", contentType) + return ret // empty + } + if mediaType != "application/json" { + log.Printf("[DEBUG] Discovery URL returned Content-Type %q, rather than application/json", mediaType) + return ret // empty + } + + // (this doesn't catch chunked encoding, because ContentLength is -1 in that case...) + if resp.ContentLength > maxDiscoDocBytes { + // Size limit here is not a contractual requirement and so we may + // adjust it over time if we find a different limit is warranted. + log.Printf("[WARN] Discovery doc response is too large (got %d bytes; limit %d)", resp.ContentLength, maxDiscoDocBytes) + return ret // empty + } + + // If the response is using chunked encoding then we can't predict + // its size, but we'll at least prevent reading the entire thing into + // memory. + lr := io.LimitReader(resp.Body, maxDiscoDocBytes) + + servicesBytes, err := ioutil.ReadAll(lr) + if err != nil { + log.Printf("[WARN] Error reading discovery document body: %s", err) + return ret // empty + } + + var services map[string]interface{} + err = json.Unmarshal(servicesBytes, &services) + if err != nil { + log.Printf("[WARN] Failed to decode discovery document as a JSON object: %s", err) + return ret // empty + } + + ret.services = services + return ret +} + +// Forget invalidates any cached record of the given hostname. If the host +// has no cache entry then this is a no-op. +func (d *Disco) Forget(host svchost.Hostname) { + delete(d.hostCache, host) +} + +// ForgetAll is like Forget, but for all of the hostnames that have cache entries. +func (d *Disco) ForgetAll() { + d.hostCache = nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/disco/host.go b/vendor/github.com/hashicorp/terraform/svchost/disco/host.go new file mode 100644 index 00000000..faf58220 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/disco/host.go @@ -0,0 +1,51 @@ +package disco + +import ( + "net/url" +) + +type Host struct { + discoURL *url.URL + services map[string]interface{} +} + +// ServiceURL returns the URL associated with the given service identifier, +// which should be of the form "servicename.vN". +// +// A non-nil result is always an absolute URL with a scheme of either https +// or http. +// +// If the requested service is not supported by the host, this method returns +// a nil URL. +// +// If the discovery document entry for the given service is invalid (not a URL), +// it is treated as absent, also returning a nil URL. +func (h Host) ServiceURL(id string) *url.URL { + if h.services == nil { + return nil // no services supported for an empty Host + } + + urlStr, ok := h.services[id].(string) + if !ok { + return nil + } + + ret, err := url.Parse(urlStr) + if err != nil { + return nil + } + if !ret.IsAbs() { + ret = h.discoURL.ResolveReference(ret) // make absolute using our discovery doc URL + } + if ret.Scheme != "https" && ret.Scheme != "http" { + return nil + } + if ret.User != nil { + // embedded username/password information is not permitted; credentials + // are handled out of band. + return nil + } + ret.Fragment = "" // fragment part is irrelevant, since we're not a browser + + return h.discoURL.ResolveReference(ret) +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/label_iter.go b/vendor/github.com/hashicorp/terraform/svchost/label_iter.go new file mode 100644 index 00000000..af8ccbab --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/label_iter.go @@ -0,0 +1,69 @@ +package svchost + +import ( + "strings" +) + +// A labelIter allows iterating over domain name labels. +// +// This type is copied from golang.org/x/net/idna, where it is used +// to segment hostnames into their separate labels for analysis. We use +// it for the same purpose here, in ForComparison. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/svchost.go b/vendor/github.com/hashicorp/terraform/svchost/svchost.go new file mode 100644 index 00000000..4eded142 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/svchost.go @@ -0,0 +1,207 @@ +// Package svchost deals with the representations of the so-called "friendly +// hostnames" that we use to represent systems that provide Terraform-native +// remote services, such as module registry, remote operations, etc. +// +// Friendly hostnames are specified such that, as much as possible, they +// are consistent with how web browsers think of hostnames, so that users +// can bring their intuitions about how hostnames behave when they access +// a Terraform Enterprise instance's web UI (or indeed any other website) +// and have this behave in a similar way. +package svchost + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "golang.org/x/net/idna" +) + +// Hostname is specialized name for string that indicates that the string +// has been converted to (or was already in) the storage and comparison form. +// +// Hostname values are not suitable for display in the user-interface. Use +// the ForDisplay method to obtain a form suitable for display in the UI. +// +// Unlike user-supplied hostnames, strings of type Hostname (assuming they +// were constructed by a function within this package) can be compared for +// equality using the standard Go == operator. +type Hostname string + +// acePrefix is the ASCII Compatible Encoding prefix, used to indicate that +// a domain name label is in "punycode" form. +const acePrefix = "xn--" + +// displayProfile is a very liberal idna profile that we use to do +// normalization for display without imposing validation rules. +var displayProfile = idna.New( + idna.MapForLookup(), + idna.Transitional(true), +) + +// ForDisplay takes a user-specified hostname and returns a normalized form of +// it suitable for display in the UI. +// +// If the input is so invalid that no normalization can be performed then +// this will return the input, assuming that the caller still wants to +// display _something_. This function is, however, more tolerant than the +// other functions in this package and will make a best effort to prepare +// _any_ given hostname for display. +// +// For validation, use either IsValid (for explicit validation) or +// ForComparison (which implicitly validates, returning an error if invalid). +func ForDisplay(given string) string { + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + portPortion, _ = normalizePortPortion(portPortion) + + ascii, err := displayProfile.ToASCII(given) + if err != nil { + return given + portPortion + } + display, err := displayProfile.ToUnicode(ascii) + if err != nil { + return given + portPortion + } + return display + portPortion +} + +// IsValid returns true if the given user-specified hostname is a valid +// service hostname. +// +// Validity is determined by complying with the RFC 5891 requirements for +// names that are valid for domain lookup (section 5), with the additional +// requirement that user-supplied forms must not _already_ contain +// Punycode segments. +func IsValid(given string) bool { + _, err := ForComparison(given) + return err == nil +} + +// ForComparison takes a user-specified hostname and returns a normalized +// form of it suitable for storage and comparison. The result is not suitable +// for display to end-users because it uses Punycode to represent non-ASCII +// characters, and this form is unreadable for non-ASCII-speaking humans. +// +// The result is typed as Hostname -- a specialized name for string -- so that +// other APIs can make it clear within the type system whether they expect a +// user-specified or display-form hostname or a value already normalized for +// comparison. +// +// The returned Hostname is not valid if the returned error is non-nil. +func ForComparison(given string) (Hostname, error) { + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + + var err error + portPortion, err = normalizePortPortion(portPortion) + if err != nil { + return Hostname(""), err + } + + if given == "" { + return Hostname(""), fmt.Errorf("empty string is not a valid hostname") + } + + // First we'll apply our additional constraint that Punycode must not + // be given directly by the user. This is not an IDN specification + // requirement, but we prohibit it to force users to use human-readable + // hostname forms within Terraform configuration. + labels := labelIter{orig: given} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + return Hostname(""), fmt.Errorf( + "hostname contains empty label (two consecutive periods)", + ) + } + if strings.HasPrefix(label, acePrefix) { + return Hostname(""), fmt.Errorf( + "hostname label %q specified in punycode format; service hostnames must be given in unicode", + label, + ) + } + } + + result, err := idna.Lookup.ToASCII(given) + if err != nil { + return Hostname(""), err + } + return Hostname(result + portPortion), nil +} + +// ForDisplay returns a version of the receiver that is appropriate for display +// in the UI. This includes converting any punycode labels to their +// corresponding Unicode characters. +// +// A round-trip through ForComparison and this ForDisplay method does not +// guarantee the same result as calling this package's top-level ForDisplay +// function, since a round-trip through the Hostname type implies stricter +// handling than we do when doing basic display-only processing. +func (h Hostname) ForDisplay() string { + given := string(h) + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + // We don't normalize the port portion here because we assume it's + // already been normalized on the way in. + + result, err := idna.Lookup.ToUnicode(given) + if err != nil { + // Should never happen, since type Hostname indicates that a string + // passed through our validation rules. + panic(fmt.Errorf("ForDisplay called on invalid Hostname: %s", err)) + } + return result + portPortion +} + +func (h Hostname) String() string { + return string(h) +} + +func (h Hostname) GoString() string { + return fmt.Sprintf("svchost.Hostname(%q)", string(h)) +} + +// normalizePortPortion attempts to normalize the "port portion" of a hostname, +// which begins with the first colon in the hostname and should be followed +// by a string of decimal digits. +// +// If the port portion is valid, a normalized version of it is returned along +// with a nil error. +// +// If the port portion is invalid, the input string is returned verbatim along +// with a non-nil error. +// +// An empty string is a valid port portion representing the absense of a port. +// If non-empty, the first character must be a colon. +func normalizePortPortion(s string) (string, error) { + if s == "" { + return s, nil + } + + if s[0] != ':' { + // should never happen, since caller tends to guarantee the presence + // of a colon due to how it's extracted from the string. + return s, errors.New("port portion is missing its initial colon") + } + + numStr := s[1:] + num, err := strconv.Atoi(numStr) + if err != nil { + return s, errors.New("port portion contains non-digit characters") + } + if num == 443 { + return "", nil // ":443" is the default + } + if num > 65535 { + return s, errors.New("port number is greater than 65535") + } + return fmt.Sprintf(":%d", num), nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go index 5940dd3f..f133cc20 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context.go @@ -1,16 +1,20 @@ package terraform import ( + "context" "fmt" "log" "sort" "strings" "sync" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/version" ) // InputMode defines what sort of input will be asked for when Input @@ -21,7 +25,8 @@ const ( // InputModeVar asks for all variables InputModeVar InputMode = 1 << iota - // InputModeVarUnset asks for variables which are not set yet + // InputModeVarUnset asks for variables which are not set yet. + // InputModeVar must be set for this to have an effect. InputModeVarUnset // InputModeProvider asks for provider variables @@ -32,9 +37,21 @@ const ( InputModeStd = InputModeVar | InputModeProvider ) +var ( + // contextFailOnShadowError will cause Context operations to return + // errors when shadow operations fail. This is only used for testing. + contextFailOnShadowError = false + + // contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every + // Plan operation, effectively testing the Diff DeepCopy whenever + // a Plan occurs. This is enabled for tests. + contextTestDeepCopyOnPlan = false +) + // ContextOpts are the user-configurable options to create a context with // NewContext. type ContextOpts struct { + Meta *ContextMeta Destroy bool Diff *Diff Hooks []Hook @@ -42,38 +59,62 @@ type ContextOpts struct { Parallelism int State *State StateFutureAllowed bool - Providers map[string]ResourceProviderFactory + ProviderResolver ResourceProviderResolver Provisioners map[string]ResourceProvisionerFactory + Shadow bool Targets []string Variables map[string]interface{} + // If non-nil, will apply as additional constraints on the provider + // plugins that will be requested from the provider resolver. + ProviderSHA256s map[string][]byte + SkipProviderVerify bool + UIInput UIInput } +// ContextMeta is metadata about the running context. This is information +// that this package or structure cannot determine on its own but exposes +// into Terraform in various ways. This must be provided by the Context +// initializer. +type ContextMeta struct { + Env string // Env is the state environment +} + // Context represents all the context that Terraform needs in order to // perform operations on infrastructure. This structure is built using // NewContext. See the documentation for that. // // Extra functions on Context can be found in context_*.go files. type Context struct { - destroy bool - diff *Diff - diffLock sync.RWMutex - hooks []Hook - module *module.Tree - providers map[string]ResourceProviderFactory - provisioners map[string]ResourceProvisionerFactory - sh *stopHook - state *State - stateLock sync.RWMutex - targets []string - uiInput UIInput - variables map[string]interface{} + // Maintainer note: Anytime this struct is changed, please verify + // that newShadowContext still does the right thing. Tests should + // fail regardless but putting this note here as well. + + components contextComponentFactory + destroy bool + diff *Diff + diffLock sync.RWMutex + hooks []Hook + meta *ContextMeta + module *module.Tree + sh *stopHook + shadow bool + state *State + stateLock sync.RWMutex + targets []string + uiInput UIInput + variables map[string]interface{} l sync.Mutex // Lock acquired during any task parallelSem Semaphore providerInputConfig map[string]map[string]interface{} - runCh <-chan struct{} + providerSHA256s map[string][]byte + runLock sync.Mutex + runCond *sync.Cond + runContext context.Context + runContextCancel context.CancelFunc + shadowErr error } // NewContext creates a new Context structure. @@ -82,6 +123,13 @@ type Context struct { // should not be mutated in any way, since the pointers are copied, not // the values themselves. func NewContext(opts *ContextOpts) (*Context, error) { + // Validate the version requirement if it is given + if opts.Module != nil { + if err := CheckRequiredVersion(opts.Module); err != nil { + return nil, err + } + } + // Copy all the hooks and add our stop hook. We don't append directly // to the Config so that we're not modifying that in-place. sh := new(stopHook) @@ -97,19 +145,14 @@ func NewContext(opts *ContextOpts) (*Context, error) { // If our state is from the future, then error. Callers can avoid // this error by explicitly setting `StateFutureAllowed`. - if !opts.StateFutureAllowed && state.FromFutureTerraform() { - return nil, fmt.Errorf( - "Terraform doesn't allow running any operations against a state\n"+ - "that was written by a future Terraform version. The state is\n"+ - "reporting it is written by Terraform '%s'.\n\n"+ - "Please run at least that version of Terraform to continue.", - state.TFVersion) + if err := CheckStateVersion(state); err != nil && !opts.StateFutureAllowed { + return nil, err } // Explicitly reset our state version to our current version so that // any operations we do will write out that our latest version // has run. - state.TFVersion = Version + state.TFVersion = version.Version // Determine parallelism, default to 10. We do this both to limit // CPU pressure but also to have an extra guard against rate throttling @@ -126,7 +169,6 @@ func NewContext(opts *ContextOpts) (*Context, error) { // set by environment variables if necessary. This includes // values taken from -var-file in addition. variables := make(map[string]interface{}) - if opts.Module != nil { var err error variables, err = Variables(opts.Module, opts.Variables) @@ -135,58 +177,179 @@ func NewContext(opts *ContextOpts) (*Context, error) { } } + // Bind available provider plugins to the constraints in config + var providers map[string]ResourceProviderFactory + if opts.ProviderResolver != nil { + var err error + deps := ModuleTreeDependencies(opts.Module, state) + reqd := deps.AllPluginRequirements() + if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify { + reqd.LockExecutables(opts.ProviderSHA256s) + } + providers, err = resourceProviderFactories(opts.ProviderResolver, reqd) + if err != nil { + return nil, err + } + } else { + providers = make(map[string]ResourceProviderFactory) + } + + diff := opts.Diff + if diff == nil { + diff = &Diff{} + } + return &Context{ - destroy: opts.Destroy, - diff: opts.Diff, - hooks: hooks, - module: opts.Module, - providers: opts.Providers, - provisioners: opts.Provisioners, - state: state, - targets: opts.Targets, - uiInput: opts.UIInput, - variables: variables, + components: &basicComponentFactory{ + providers: providers, + provisioners: opts.Provisioners, + }, + destroy: opts.Destroy, + diff: diff, + hooks: hooks, + meta: opts.Meta, + module: opts.Module, + shadow: opts.Shadow, + state: state, + targets: opts.Targets, + uiInput: opts.UIInput, + variables: variables, parallelSem: NewSemaphore(par), providerInputConfig: make(map[string]map[string]interface{}), + providerSHA256s: opts.ProviderSHA256s, sh: sh, }, nil } type ContextGraphOpts struct { + // If true, validates the graph structure (checks for cycles). Validate bool - Verbose bool -} -// Graph returns the graph for this config. -func (c *Context) Graph(g *ContextGraphOpts) (*Graph, error) { - return c.graphBuilder(g).Build(RootModulePath) + // Legacy graphs only: won't prune the graph + Verbose bool } -// GraphBuilder returns the GraphBuilder that will be used to create -// the graphs for this context. -func (c *Context) graphBuilder(g *ContextGraphOpts) GraphBuilder { - // TODO test - providers := make([]string, 0, len(c.providers)) - for k, _ := range c.providers { - providers = append(providers, k) - } +// Graph returns the graph used for the given operation type. +// +// The most extensive or complex graph type is GraphTypePlan. +func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) { + if opts == nil { + opts = &ContextGraphOpts{Validate: true} + } + + log.Printf("[INFO] terraform: building graph: %s", typ) + switch typ { + case GraphTypeApply: + return (&ApplyGraphBuilder{ + Module: c.module, + Diff: c.diff, + State: c.state, + Providers: c.components.ResourceProviders(), + Provisioners: c.components.ResourceProvisioners(), + Targets: c.targets, + Destroy: c.destroy, + Validate: opts.Validate, + }).Build(RootModulePath) + + case GraphTypeInput: + // The input graph is just a slightly modified plan graph + fallthrough + case GraphTypeValidate: + // The validate graph is just a slightly modified plan graph + fallthrough + case GraphTypePlan: + // Create the plan graph builder + p := &PlanGraphBuilder{ + Module: c.module, + State: c.state, + Providers: c.components.ResourceProviders(), + Targets: c.targets, + Validate: opts.Validate, + } - provisioners := make([]string, 0, len(c.provisioners)) - for k, _ := range c.provisioners { - provisioners = append(provisioners, k) + // Some special cases for other graph types shared with plan currently + var b GraphBuilder = p + switch typ { + case GraphTypeInput: + b = InputGraphBuilder(p) + case GraphTypeValidate: + // We need to set the provisioners so those can be validated + p.Provisioners = c.components.ResourceProvisioners() + + b = ValidateGraphBuilder(p) + } + + return b.Build(RootModulePath) + + case GraphTypePlanDestroy: + return (&DestroyPlanGraphBuilder{ + Module: c.module, + State: c.state, + Targets: c.targets, + Validate: opts.Validate, + }).Build(RootModulePath) + + case GraphTypeRefresh: + return (&RefreshGraphBuilder{ + Module: c.module, + State: c.state, + Providers: c.components.ResourceProviders(), + Targets: c.targets, + Validate: opts.Validate, + }).Build(RootModulePath) } - return &BuiltinGraphBuilder{ - Root: c.module, - Diff: c.diff, - Providers: providers, - Provisioners: provisioners, - State: c.state, - Targets: c.targets, - Destroy: c.destroy, - Validate: g.Validate, - Verbose: g.Verbose, + return nil, fmt.Errorf("unknown graph type: %s", typ) +} + +// ShadowError returns any errors caught during a shadow operation. +// +// A shadow operation is an operation run in parallel to a real operation +// that performs the same tasks using new logic on copied state. The results +// are compared to ensure that the new logic works the same as the old logic. +// The shadow never affects the real operation or return values. +// +// The result of the shadow operation are only available through this function +// call after a real operation is complete. +// +// For API consumers of Context, you can safely ignore this function +// completely if you have no interest in helping report experimental feature +// errors to Terraform maintainers. Otherwise, please call this function +// after every operation and report this to the user. +// +// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect +// the real state or result of a real operation. They are purely informational +// to assist in future Terraform versions being more stable. Please message +// this effectively to the end user. +// +// This must be called only when no other operation is running (refresh, +// plan, etc.). The result can be used in parallel to any other operation +// running. +func (c *Context) ShadowError() error { + return c.shadowErr +} + +// State returns a copy of the current state associated with this context. +// +// This cannot safely be called in parallel with any other Context function. +func (c *Context) State() *State { + return c.state.DeepCopy() +} + +// Interpolater returns an Interpolater built on a copy of the state +// that can be used to test interpolation values. +func (c *Context) Interpolater() *Interpolater { + var varLock sync.Mutex + var stateLock sync.RWMutex + return &Interpolater{ + Operation: walkApply, + Meta: c.meta, + Module: c.module, + State: c.state.DeepCopy(), + StateLock: &stateLock, + VariableValues: c.variables, + VariableValuesLock: &varLock, } } @@ -194,8 +357,7 @@ func (c *Context) graphBuilder(g *ContextGraphOpts) GraphBuilder { // This modifies the configuration in-place, so asking for Input twice // may result in different UI output showing different current values. func (c *Context) Input(mode InputMode) error { - v := c.acquireRun() - defer c.releaseRun(v) + defer c.acquireRun("input")() if mode&InputModeVar != 0 { // Walk the variables first for the root module. We walk them in @@ -294,7 +456,7 @@ func (c *Context) Input(mode InputMode) error { if mode&InputModeProvider != 0 { // Build the graph - graph, err := c.Graph(&ContextGraphOpts{Validate: true}) + graph, err := c.Graph(GraphTypeInput, nil) if err != nil { return err } @@ -311,29 +473,44 @@ func (c *Context) Input(mode InputMode) error { // Apply applies the changes represented by this context and returns // the resulting state. // -// In addition to returning the resulting state, this context is updated -// with the latest state. +// Even in the case an error is returned, the state may be returned and will +// potentially be partially updated. In addition to returning the resulting +// state, this context is updated with the latest state. +// +// If the state is required after an error, the caller should call +// Context.State, rather than rely on the return value. +// +// TODO: Apply and Refresh should either always return a state, or rely on the +// State() method. Currently the helper/resource testing framework relies +// on the absence of a returned state to determine if Destroy can be +// called, so that will need to be refactored before this can be changed. func (c *Context) Apply() (*State, error) { - v := c.acquireRun() - defer c.releaseRun(v) + defer c.acquireRun("apply")() + + // Check there are no empty target parameter values + for _, target := range c.targets { + if target == "" { + return nil, fmt.Errorf("Target parameter must not have empty value") + } + } // Copy our own state c.state = c.state.DeepCopy() - // Build the graph - graph, err := c.Graph(&ContextGraphOpts{Validate: true}) + // Build the graph. + graph, err := c.Graph(GraphTypeApply, nil) if err != nil { return nil, err } - // Do the walk - var walker *ContextGraphWalker + // Determine the operation + operation := walkApply if c.destroy { - walker, err = c.walk(graph, walkDestroy) - } else { - walker, err = c.walk(graph, walkApply) + operation = walkDestroy } + // Walk the graph + walker, err := c.walk(graph, operation) if len(walker.ValidationErrors) > 0 { err = multierror.Append(err, walker.ValidationErrors...) } @@ -352,19 +529,29 @@ func (c *Context) Apply() (*State, error) { // Plan also updates the diff of this context to be the diff generated // by the plan, so Apply can be called after. func (c *Context) Plan() (*Plan, error) { - v := c.acquireRun() - defer c.releaseRun(v) + defer c.acquireRun("plan")() + + // Check there are no empty target parameter values + for _, target := range c.targets { + if target == "" { + return nil, fmt.Errorf("Target parameter must not have empty value") + } + } p := &Plan{ Module: c.module, Vars: c.variables, State: c.state, Targets: c.targets, + + TerraformVersion: version.String(), + ProviderSHA256s: c.providerSHA256s, } var operation walkOperation if c.destroy { operation = walkPlanDestroy + p.Destroy = true } else { // Set our state to be something temporary. We do this so that // the plan can update a fake state so that variables work, then @@ -389,8 +576,12 @@ func (c *Context) Plan() (*Plan, error) { c.diff.init() c.diffLock.Unlock() - // Build the graph - graph, err := c.Graph(&ContextGraphOpts{Validate: true}) + // Build the graph. + graphType := GraphTypePlan + if c.destroy { + graphType = GraphTypePlanDestroy + } + graph, err := c.Graph(graphType, nil) if err != nil { return nil, err } @@ -402,11 +593,29 @@ func (c *Context) Plan() (*Plan, error) { } p.Diff = c.diff - // Now that we have a diff, we can build the exact graph that Apply will use - // and catch any possible cycles during the Plan phase. - if _, err := c.Graph(&ContextGraphOpts{Validate: true}); err != nil { - return nil, err - } + // If this is true, it means we're running unit tests. In this case, + // we perform a deep copy just to ensure that all context tests also + // test that a diff is copy-able. This will panic if it fails. This + // is enabled during unit tests. + // + // This should never be true during production usage, but even if it is, + // it can't do any real harm. + if contextTestDeepCopyOnPlan { + p.Diff.DeepCopy() + } + + /* + // We don't do the reverification during the new destroy plan because + // it will use a different apply process. + if X_legacyGraph { + // Now that we have a diff, we can build the exact graph that Apply will use + // and catch any possible cycles during the Plan phase. + if _, err := c.Graph(GraphTypeLegacy, nil); err != nil { + return nil, err + } + } + */ + var errs error if len(walker.ValidationErrors) > 0 { errs = multierror.Append(errs, walker.ValidationErrors...) @@ -418,17 +627,16 @@ func (c *Context) Plan() (*Plan, error) { // to their latest state. This will update the state that this context // works with, along with returning it. // -// Even in the case an error is returned, the state will be returned and +// Even in the case an error is returned, the state may be returned and // will potentially be partially updated. func (c *Context) Refresh() (*State, error) { - v := c.acquireRun() - defer c.releaseRun(v) + defer c.acquireRun("refresh")() // Copy our own state c.state = c.state.DeepCopy() - // Build the graph - graph, err := c.Graph(&ContextGraphOpts{Validate: true}) + // Build the graph. + graph, err := c.Graph(GraphTypeRefresh, nil) if err != nil { return nil, err } @@ -448,68 +656,84 @@ func (c *Context) Refresh() (*State, error) { // // Stop will block until the task completes. func (c *Context) Stop() { + log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence") + c.l.Lock() - ch := c.runCh + defer c.l.Unlock() + + // If we're running, then stop + if c.runContextCancel != nil { + log.Printf("[WARN] terraform: run context exists, stopping") + + // Tell the hook we want to stop + c.sh.Stop() - // If we aren't running, then just return - if ch == nil { - c.l.Unlock() - return + // Stop the context + c.runContextCancel() + c.runContextCancel = nil } - // Tell the hook we want to stop - c.sh.Stop() + // Grab the condition var before we exit + if cond := c.runCond; cond != nil { + cond.Wait() + } - // Wait for us to stop - c.l.Unlock() - <-ch + log.Printf("[WARN] terraform: stop complete") } // Validate validates the configuration and returns any warnings or errors. -func (c *Context) Validate() ([]string, []error) { - v := c.acquireRun() - defer c.releaseRun(v) +func (c *Context) Validate() tfdiags.Diagnostics { + defer c.acquireRun("validate")() - var errs error + var diags tfdiags.Diagnostics // Validate the configuration itself - if err := c.module.Validate(); err != nil { - errs = multierror.Append(errs, err) - } + diags = diags.Append(c.module.Validate()) // This only needs to be done for the root module, since inter-module // variables are validated in the module tree. if config := c.module.Config(); config != nil { // Validate the user variables - if err := smcUserVariables(config, c.variables); len(err) > 0 { - errs = multierror.Append(errs, err...) + for _, err := range smcUserVariables(config, c.variables) { + diags = diags.Append(err) } } // If we have errors at this point, the graphing has no chance, // so just bail early. - if errs != nil { - return nil, []error{errs} + if diags.HasErrors() { + return diags } // Build the graph so we can walk it and run Validate on nodes. // We also validate the graph generated here, but this graph doesn't // necessarily match the graph that Plan will generate, so we'll validate the // graph again later after Planning. - graph, err := c.Graph(&ContextGraphOpts{Validate: true}) + graph, err := c.Graph(GraphTypeValidate, nil) if err != nil { - return nil, []error{err} + diags = diags.Append(err) + return diags } // Walk walker, err := c.walk(graph, walkValidate) if err != nil { - return nil, multierror.Append(errs, err).Errors + diags = diags.Append(err) + } + + sort.Strings(walker.ValidationWarnings) + sort.Slice(walker.ValidationErrors, func(i, j int) bool { + return walker.ValidationErrors[i].Error() < walker.ValidationErrors[j].Error() + }) + + for _, warn := range walker.ValidationWarnings { + diags = diags.Append(tfdiags.SimpleWarning(warn)) + } + for _, err := range walker.ValidationErrors { + diags = diags.Append(err) } - // Return the result - rerrs := multierror.Append(errs, walker.ValidationErrors...) - return walker.ValidationWarnings, rerrs.Errors + return diags } // Module returns the module tree associated with this context. @@ -529,38 +753,154 @@ func (c *Context) SetVariable(k string, v interface{}) { c.variables[k] = v } -func (c *Context) acquireRun() chan<- struct{} { +func (c *Context) acquireRun(phase string) func() { + // With the run lock held, grab the context lock to make changes + // to the run context. c.l.Lock() defer c.l.Unlock() - // Wait for no channel to exist - for c.runCh != nil { - c.l.Unlock() - ch := c.runCh - <-ch - c.l.Lock() + // Wait until we're no longer running + for c.runCond != nil { + c.runCond.Wait() } - ch := make(chan struct{}) - c.runCh = ch - return ch + // Build our lock + c.runCond = sync.NewCond(&c.l) + + // Setup debugging + dbug.SetPhase(phase) + + // Create a new run context + c.runContext, c.runContextCancel = context.WithCancel(context.Background()) + + // Reset the stop hook so we're not stopped + c.sh.Reset() + + // Reset the shadow errors + c.shadowErr = nil + + return c.releaseRun } -func (c *Context) releaseRun(ch chan<- struct{}) { +func (c *Context) releaseRun() { + // Grab the context lock so that we can make modifications to fields c.l.Lock() defer c.l.Unlock() - close(ch) - c.runCh = nil - c.sh.Reset() + // setting the phase to "INVALID" lets us easily detect if we have + // operations happening outside of a run, or we missed setting the proper + // phase + dbug.SetPhase("INVALID") + + // End our run. We check if runContext is non-nil because it can be + // set to nil if it was cancelled via Stop() + if c.runContextCancel != nil { + c.runContextCancel() + } + + // Unlock all waiting our condition + cond := c.runCond + c.runCond = nil + cond.Broadcast() + + // Unset the context + c.runContext = nil } -func (c *Context) walk( - graph *Graph, operation walkOperation) (*ContextGraphWalker, error) { - // Walk the graph +func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, error) { + // Keep track of the "real" context which is the context that does + // the real work: talking to real providers, modifying real state, etc. + realCtx := c + log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) - walker := &ContextGraphWalker{Context: c, Operation: operation} - return walker, graph.Walk(walker) + + walker := &ContextGraphWalker{ + Context: realCtx, + Operation: operation, + StopContext: c.runContext, + } + + // Watch for a stop so we can call the provider Stop() API. + watchStop, watchWait := c.watchStop(walker) + + // Walk the real graph, this will block until it completes + realErr := graph.Walk(walker) + + // Close the channel so the watcher stops, and wait for it to return. + close(watchStop) + <-watchWait + + return walker, realErr +} + +// watchStop immediately returns a `stop` and a `wait` chan after dispatching +// the watchStop goroutine. This will watch the runContext for cancellation and +// stop the providers accordingly. When the watch is no longer needed, the +// `stop` chan should be closed before waiting on the `wait` chan. +// The `wait` chan is important, because without synchronizing with the end of +// the watchStop goroutine, the runContext may also be closed during the select +// incorrectly causing providers to be stopped. Even if the graph walk is done +// at that point, stopping a provider permanently cancels its StopContext which +// can cause later actions to fail. +func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) { + stop := make(chan struct{}) + wait := make(chan struct{}) + + // get the runContext cancellation channel now, because releaseRun will + // write to the runContext field. + done := c.runContext.Done() + + go func() { + defer close(wait) + // Wait for a stop or completion + select { + case <-done: + // done means the context was canceled, so we need to try and stop + // providers. + case <-stop: + // our own stop channel was closed. + return + } + + // If we're here, we're stopped, trigger the call. + + { + // Copy the providers so that a misbehaved blocking Stop doesn't + // completely hang Terraform. + walker.providerLock.Lock() + ps := make([]ResourceProvider, 0, len(walker.providerCache)) + for _, p := range walker.providerCache { + ps = append(ps, p) + } + defer walker.providerLock.Unlock() + + for _, p := range ps { + // We ignore the error for now since there isn't any reasonable + // action to take if there is an error here, since the stop is still + // advisory: Terraform will exit once the graph node completes. + p.Stop() + } + } + + { + // Call stop on all the provisioners + walker.provisionerLock.Lock() + ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache)) + for _, p := range walker.provisionerCache { + ps = append(ps, p) + } + defer walker.provisionerLock.Unlock() + + for _, p := range ps { + // We ignore the error for now since there isn't any reasonable + // action to take if there is an error here, since the stop is still + // advisory: Terraform will exit once the graph node completes. + p.Stop() + } + } + }() + + return stop, wait } // parseVariableAsHCL parses the value of a single variable as would have been specified diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go new file mode 100644 index 00000000..6f507445 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/context_components.go @@ -0,0 +1,65 @@ +package terraform + +import ( + "fmt" +) + +// contextComponentFactory is the interface that Context uses +// to initialize various components such as providers and provisioners. +// This factory gets more information than the raw maps using to initialize +// a Context. This information is used for debugging. +type contextComponentFactory interface { + // ResourceProvider creates a new ResourceProvider with the given + // type. The "uid" is a unique identifier for this provider being + // initialized that can be used for internal tracking. + ResourceProvider(typ, uid string) (ResourceProvider, error) + ResourceProviders() []string + + // ResourceProvisioner creates a new ResourceProvisioner with the + // given type. The "uid" is a unique identifier for this provisioner + // being initialized that can be used for internal tracking. + ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) + ResourceProvisioners() []string +} + +// basicComponentFactory just calls a factory from a map directly. +type basicComponentFactory struct { + providers map[string]ResourceProviderFactory + provisioners map[string]ResourceProvisionerFactory +} + +func (c *basicComponentFactory) ResourceProviders() []string { + result := make([]string, len(c.providers)) + for k, _ := range c.providers { + result = append(result, k) + } + + return result +} + +func (c *basicComponentFactory) ResourceProvisioners() []string { + result := make([]string, len(c.provisioners)) + for k, _ := range c.provisioners { + result = append(result, k) + } + + return result +} + +func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvider, error) { + f, ok := c.providers[typ] + if !ok { + return nil, fmt.Errorf("unknown provider %q", typ) + } + + return f() +} + +func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) { + f, ok := c.provisioners[typ] + if !ok { + return nil, fmt.Errorf("unknown provisioner %q", typ) + } + + return f() +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go new file mode 100644 index 00000000..084f0105 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go @@ -0,0 +1,32 @@ +package terraform + +//go:generate stringer -type=GraphType context_graph_type.go + +// GraphType is an enum of the type of graph to create with a Context. +// The values of the constants may change so they shouldn't be depended on; +// always use the constant name. +type GraphType byte + +const ( + GraphTypeInvalid GraphType = 0 + GraphTypeLegacy GraphType = iota + GraphTypeRefresh + GraphTypePlan + GraphTypePlanDestroy + GraphTypeApply + GraphTypeInput + GraphTypeValidate +) + +// GraphTypeMap is a mapping of human-readable string to GraphType. This +// is useful to use as the mechanism for human input for configurable +// graph types. +var GraphTypeMap = map[string]GraphType{ + "apply": GraphTypeApply, + "input": GraphTypeInput, + "plan": GraphTypePlan, + "plan-destroy": GraphTypePlanDestroy, + "refresh": GraphTypeRefresh, + "legacy": GraphTypeLegacy, + "validate": GraphTypeValidate, +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go index 20969ae0..e9401431 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context_import.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go @@ -23,6 +23,9 @@ type ImportTarget struct { // ID is the ID of the resource to import. This is resource-specific. ID string + + // Provider string + Provider string } // Import takes already-created external resources and brings them @@ -37,23 +40,23 @@ type ImportTarget struct { // imported. func (c *Context) Import(opts *ImportOpts) (*State, error) { // Hold a lock since we can modify our own state here - v := c.acquireRun() - defer c.releaseRun(v) + defer c.acquireRun("import")() // Copy our own state c.state = c.state.DeepCopy() - // Get supported providers (for the graph builder) - providers := make([]string, 0, len(c.providers)) - for k, _ := range c.providers { - providers = append(providers, k) + // If no module is given, default to the module configured with + // the Context. + module := opts.Module + if module == nil { + module = c.module } // Initialize our graph builder builder := &ImportGraphBuilder{ ImportTargets: opts.Targets, - Module: opts.Module, - Providers: providers, + Module: module, + Providers: c.components.ResourceProviders(), } // Build the graph! diff --git a/vendor/github.com/hashicorp/terraform/terraform/debug.go b/vendor/github.com/hashicorp/terraform/terraform/debug.go new file mode 100644 index 00000000..265339f6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/debug.go @@ -0,0 +1,523 @@ +package terraform + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "time" +) + +// DebugInfo is the global handler for writing the debug archive. All methods +// are safe to call concurrently. Setting DebugInfo to nil will disable writing +// the debug archive. All methods are safe to call on the nil value. +var dbug *debugInfo + +// SetDebugInfo initializes the debug handler with a backing file in the +// provided directory. This must be called before any other terraform package +// operations or not at all. Once his is called, CloseDebugInfo should be +// called before program exit. +func SetDebugInfo(path string) error { + if os.Getenv("TF_DEBUG") == "" { + return nil + } + + di, err := newDebugInfoFile(path) + if err != nil { + return err + } + + dbug = di + return nil +} + +// CloseDebugInfo is the exported interface to Close the debug info handler. +// The debug handler needs to be closed before program exit, so we export this +// function to be deferred in the appropriate entrypoint for our executable. +func CloseDebugInfo() error { + return dbug.Close() +} + +// newDebugInfoFile initializes the global debug handler with a backing file in +// the provided directory. +func newDebugInfoFile(dir string) (*debugInfo, error) { + err := os.MkdirAll(dir, 0755) + if err != nil { + return nil, err + } + + // FIXME: not guaranteed unique, but good enough for now + name := fmt.Sprintf("debug-%s", time.Now().Format("2006-01-02-15-04-05.999999999")) + archivePath := filepath.Join(dir, name+".tar.gz") + + f, err := os.OpenFile(archivePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666) + if err != nil { + return nil, err + } + return newDebugInfo(name, f) +} + +// newDebugInfo initializes the global debug handler. +func newDebugInfo(name string, w io.Writer) (*debugInfo, error) { + gz := gzip.NewWriter(w) + + d := &debugInfo{ + name: name, + w: w, + gz: gz, + tar: tar.NewWriter(gz), + } + + // create the subdirs we need + topHdr := &tar.Header{ + Name: name, + Typeflag: tar.TypeDir, + Mode: 0755, + } + graphsHdr := &tar.Header{ + Name: name + "/graphs", + Typeflag: tar.TypeDir, + Mode: 0755, + } + err := d.tar.WriteHeader(topHdr) + // if the first errors, the second will too + err = d.tar.WriteHeader(graphsHdr) + if err != nil { + return nil, err + } + + return d, nil +} + +// debugInfo provides various methods for writing debug information to a +// central archive. The debugInfo struct should be initialized once before any +// output is written, and Close should be called before program exit. All +// exported methods on debugInfo will be safe for concurrent use. The exported +// methods are also all safe to call on a nil pointer, so that there is no need +// for conditional blocks before writing debug information. +// +// Each write operation done by the debugInfo will flush the gzip.Writer and +// tar.Writer, and call Sync() or Flush() on the output writer as needed. This +// ensures that as much data as possible is written to storage in the event of +// a crash. The append format of the tar file, and the stream format of the +// gzip writer allow easy recovery f the data in the event that the debugInfo +// is not closed before program exit. +type debugInfo struct { + sync.Mutex + + // archive root directory name + name string + + // current operation phase + phase string + + // step is monotonic counter for for recording the order of operations + step int + + // flag to protect Close() + closed bool + + // the debug log output is in a tar.gz format, written to the io.Writer w + w io.Writer + gz *gzip.Writer + tar *tar.Writer +} + +// Set the name of the current operational phase in the debug handler. Each file +// in the archive will contain the name of the phase in which it was created, +// i.e. "input", "apply", "plan", "refresh", "validate" +func (d *debugInfo) SetPhase(phase string) { + if d == nil { + return + } + d.Lock() + defer d.Unlock() + + d.phase = phase +} + +// Close the debugInfo, finalizing the data in storage. This closes the +// tar.Writer, the gzip.Wrtier, and if the output writer is an io.Closer, it is +// also closed. +func (d *debugInfo) Close() error { + if d == nil { + return nil + } + + d.Lock() + defer d.Unlock() + + if d.closed { + return nil + } + d.closed = true + + d.tar.Close() + d.gz.Close() + + if c, ok := d.w.(io.Closer); ok { + return c.Close() + } + return nil +} + +// debug buffer is an io.WriteCloser that will write itself to the debug +// archive when closed. +type debugBuffer struct { + debugInfo *debugInfo + name string + buf bytes.Buffer +} + +func (b *debugBuffer) Write(d []byte) (int, error) { + return b.buf.Write(d) +} + +func (b *debugBuffer) Close() error { + return b.debugInfo.WriteFile(b.name, b.buf.Bytes()) +} + +// ioutils only has a noop ReadCloser +type nopWriteCloser struct{} + +func (nopWriteCloser) Write([]byte) (int, error) { return 0, nil } +func (nopWriteCloser) Close() error { return nil } + +// NewFileWriter returns an io.WriteClose that will be buffered and written to +// the debug archive when closed. +func (d *debugInfo) NewFileWriter(name string) io.WriteCloser { + if d == nil { + return nopWriteCloser{} + } + + return &debugBuffer{ + debugInfo: d, + name: name, + } +} + +type syncer interface { + Sync() error +} + +type flusher interface { + Flush() error +} + +// Flush the tar.Writer and the gzip.Writer. Flush() or Sync() will be called +// on the output writer if they are available. +func (d *debugInfo) flush() { + d.tar.Flush() + d.gz.Flush() + + if f, ok := d.w.(flusher); ok { + f.Flush() + } + + if s, ok := d.w.(syncer); ok { + s.Sync() + } +} + +// WriteFile writes data as a single file to the debug arhive. +func (d *debugInfo) WriteFile(name string, data []byte) error { + if d == nil { + return nil + } + + d.Lock() + defer d.Unlock() + return d.writeFile(name, data) +} + +func (d *debugInfo) writeFile(name string, data []byte) error { + defer d.flush() + path := fmt.Sprintf("%s/%d-%s-%s", d.name, d.step, d.phase, name) + d.step++ + + hdr := &tar.Header{ + Name: path, + Mode: 0644, + Size: int64(len(data)), + } + err := d.tar.WriteHeader(hdr) + if err != nil { + return err + } + + _, err = d.tar.Write(data) + return err +} + +// DebugHook implements all methods of the terraform.Hook interface, and writes +// the arguments to a file in the archive. When a suitable format for the +// argument isn't available, the argument is encoded using json.Marshal. If the +// debug handler is nil, all DebugHook methods are noop, so no time is spent in +// marshaling the data structures. +type DebugHook struct{} + +func (*DebugHook) PreApply(ii *InstanceInfo, is *InstanceState, id *InstanceDiff) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + + if is != nil { + buf.WriteString(is.String() + "\n") + } + + idCopy, err := id.Copy() + if err != nil { + return HookActionContinue, err + } + js, err := json.MarshalIndent(idCopy, "", " ") + if err != nil { + return HookActionContinue, err + } + buf.Write(js) + + dbug.WriteFile("hook-PreApply", buf.Bytes()) + + return HookActionContinue, nil +} + +func (*DebugHook) PostApply(ii *InstanceInfo, is *InstanceState, err error) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + + if is != nil { + buf.WriteString(is.String() + "\n") + } + + if err != nil { + buf.WriteString(err.Error()) + } + + dbug.WriteFile("hook-PostApply", buf.Bytes()) + + return HookActionContinue, nil +} + +func (*DebugHook) PreDiff(ii *InstanceInfo, is *InstanceState) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + + if is != nil { + buf.WriteString(is.String()) + buf.WriteString("\n") + } + dbug.WriteFile("hook-PreDiff", buf.Bytes()) + + return HookActionContinue, nil +} + +func (*DebugHook) PostDiff(ii *InstanceInfo, id *InstanceDiff) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + + idCopy, err := id.Copy() + if err != nil { + return HookActionContinue, err + } + js, err := json.MarshalIndent(idCopy, "", " ") + if err != nil { + return HookActionContinue, err + } + buf.Write(js) + + dbug.WriteFile("hook-PostDiff", buf.Bytes()) + + return HookActionContinue, nil +} + +func (*DebugHook) PreProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + + if is != nil { + buf.WriteString(is.String()) + buf.WriteString("\n") + } + dbug.WriteFile("hook-PreProvisionResource", buf.Bytes()) + + return HookActionContinue, nil +} + +func (*DebugHook) PostProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId()) + buf.WriteString("\n") + } + + if is != nil { + buf.WriteString(is.String()) + buf.WriteString("\n") + } + dbug.WriteFile("hook-PostProvisionResource", buf.Bytes()) + return HookActionContinue, nil +} + +func (*DebugHook) PreProvision(ii *InstanceInfo, s string) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId()) + buf.WriteString("\n") + } + buf.WriteString(s + "\n") + + dbug.WriteFile("hook-PreProvision", buf.Bytes()) + return HookActionContinue, nil +} + +func (*DebugHook) PostProvision(ii *InstanceInfo, s string, err error) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + buf.WriteString(s + "\n") + + dbug.WriteFile("hook-PostProvision", buf.Bytes()) + return HookActionContinue, nil +} + +func (*DebugHook) ProvisionOutput(ii *InstanceInfo, s1 string, s2 string) { + if dbug == nil { + return + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId()) + buf.WriteString("\n") + } + buf.WriteString(s1 + "\n") + buf.WriteString(s2 + "\n") + + dbug.WriteFile("hook-ProvisionOutput", buf.Bytes()) +} + +func (*DebugHook) PreRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + + if is != nil { + buf.WriteString(is.String()) + buf.WriteString("\n") + } + dbug.WriteFile("hook-PreRefresh", buf.Bytes()) + return HookActionContinue, nil +} + +func (*DebugHook) PostRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId()) + buf.WriteString("\n") + } + + if is != nil { + buf.WriteString(is.String()) + buf.WriteString("\n") + } + dbug.WriteFile("hook-PostRefresh", buf.Bytes()) + return HookActionContinue, nil +} + +func (*DebugHook) PreImportState(ii *InstanceInfo, s string) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + if ii != nil { + buf.WriteString(ii.HumanId()) + buf.WriteString("\n") + } + buf.WriteString(s + "\n") + + dbug.WriteFile("hook-PreImportState", buf.Bytes()) + return HookActionContinue, nil +} + +func (*DebugHook) PostImportState(ii *InstanceInfo, iss []*InstanceState) (HookAction, error) { + if dbug == nil { + return HookActionContinue, nil + } + + var buf bytes.Buffer + + if ii != nil { + buf.WriteString(ii.HumanId() + "\n") + } + + for _, is := range iss { + if is != nil { + buf.WriteString(is.String() + "\n") + } + } + dbug.WriteFile("hook-PostImportState", buf.Bytes()) + return HookActionContinue, nil +} + +// skip logging this for now, since it could be huge +func (*DebugHook) PostStateUpdate(*State) (HookAction, error) { + return HookActionContinue, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go index 351a3c48..d6dc5506 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go @@ -9,6 +9,8 @@ import ( "sort" "strings" "sync" + + "github.com/mitchellh/copystructure" ) // DiffChangeType is an enum with the kind of changes a diff has planned. @@ -21,15 +23,48 @@ const ( DiffUpdate DiffDestroy DiffDestroyCreate + + // DiffRefresh is only used in the UI for displaying diffs. + // Managed resource reads never appear in plan, and when data source + // reads appear they are represented as DiffCreate in core before + // transforming to DiffRefresh in the UI layer. + DiffRefresh // TODO: Actually use DiffRefresh in core too, for less confusion ) -// Diff trackes the changes that are necessary to apply a configuration +// multiVal matches the index key to a flatmapped set, list or map +var multiVal = regexp.MustCompile(`\.(#|%)$`) + +// Diff tracks the changes that are necessary to apply a configuration // to an existing infrastructure. type Diff struct { // Modules contains all the modules that have a diff Modules []*ModuleDiff } +// Prune cleans out unused structures in the diff without affecting +// the behavior of the diff at all. +// +// This is not safe to call concurrently. This is safe to call on a +// nil Diff. +func (d *Diff) Prune() { + if d == nil { + return + } + + // Prune all empty modules + newModules := make([]*ModuleDiff, 0, len(d.Modules)) + for _, m := range d.Modules { + // If the module isn't empty, we keep it + if !m.Empty() { + newModules = append(newModules, m) + } + } + if len(newModules) == 0 { + newModules = nil + } + d.Modules = newModules +} + // AddModule adds the module with the given path to the diff. // // This should be the preferred method to add module diffs since it @@ -70,6 +105,10 @@ func (d *Diff) RootModule() *ModuleDiff { // Empty returns true if the diff has no changes. func (d *Diff) Empty() bool { + if d == nil { + return true + } + for _, m := range d.Modules { if !m.Empty() { return false @@ -79,6 +118,48 @@ func (d *Diff) Empty() bool { return true } +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *Diff) Equal(d2 *Diff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Sort the modules + sort.Sort(moduleDiffSort(d.Modules)) + sort.Sort(moduleDiffSort(d2.Modules)) + + // Copy since we have to modify the module destroy flag to false so + // we don't compare that. TODO: delete this when we get rid of the + // destroy flag on modules. + dCopy := d.DeepCopy() + d2Copy := d2.DeepCopy() + for _, m := range dCopy.Modules { + m.Destroy = false + } + for _, m := range d2Copy.Modules { + m.Destroy = false + } + + // Use DeepEqual + return reflect.DeepEqual(dCopy, d2Copy) +} + +// DeepCopy performs a deep copy of all parts of the Diff, making the +// resulting Diff safe to use without modifying this one. +func (d *Diff) DeepCopy() *Diff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*Diff) +} + func (d *Diff) String() string { var buf bytes.Buffer @@ -164,6 +245,10 @@ func (d *ModuleDiff) ChangeType() DiffChangeType { // Empty returns true if the diff has no changes within this module. func (d *ModuleDiff) Empty() bool { + if d.Destroy { + return false + } + if len(d.Resources) == 0 { return true } @@ -202,10 +287,6 @@ func (d *ModuleDiff) IsRoot() bool { func (d *ModuleDiff) String() string { var buf bytes.Buffer - if d.Destroy { - buf.WriteString("DESTROY MODULE\n") - } - names := make([]string, 0, len(d.Resources)) for name, _ := range d.Resources { names = append(names, name) @@ -219,16 +300,22 @@ func (d *ModuleDiff) String() string { switch { case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()): crud = "DESTROY/CREATE" - case rdiff.GetDestroy(): + case rdiff.GetDestroy() || rdiff.GetDestroyDeposed(): crud = "DESTROY" case rdiff.RequiresNew(): crud = "CREATE" } + extra := "" + if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() { + extra = " (deposed only)" + } + buf.WriteString(fmt.Sprintf( - "%s: %s\n", + "%s: %s%s\n", crud, - name)) + name, + extra)) keyLen := 0 rdiffAttrs := rdiff.CopyAttributes() @@ -284,9 +371,19 @@ type InstanceDiff struct { mu sync.Mutex Attributes map[string]*ResourceAttrDiff Destroy bool + DestroyDeposed bool DestroyTainted bool + + // Meta is a simple K/V map that is stored in a diff and persisted to + // plans but otherwise is completely ignored by Terraform core. It is + // meant to be used for additional data a resource may want to pass through. + // The value here must only contain Go primitives and collections. + Meta map[string]interface{} } +func (d *InstanceDiff) Lock() { d.mu.Lock() } +func (d *InstanceDiff) Unlock() { d.mu.Unlock() } + // ResourceAttrDiff is the diff of a single attribute of a resource. type ResourceAttrDiff struct { Old string // Old Value @@ -331,6 +428,19 @@ func NewInstanceDiff() *InstanceDiff { return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} } +func (d *InstanceDiff) Copy() (*InstanceDiff, error) { + if d == nil { + return nil, nil + } + + dCopy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + return nil, err + } + + return dCopy.(*InstanceDiff), nil +} + // ChangeType returns the DiffChangeType represented by the diff // for this single instance. func (d *InstanceDiff) ChangeType() DiffChangeType { @@ -342,7 +452,7 @@ func (d *InstanceDiff) ChangeType() DiffChangeType { return DiffDestroyCreate } - if d.GetDestroy() { + if d.GetDestroy() || d.GetDestroyDeposed() { return DiffDestroy } @@ -361,7 +471,35 @@ func (d *InstanceDiff) Empty() bool { d.mu.Lock() defer d.mu.Unlock() - return !d.Destroy && !d.DestroyTainted && len(d.Attributes) == 0 + return !d.Destroy && + !d.DestroyTainted && + !d.DestroyDeposed && + len(d.Attributes) == 0 +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Use DeepEqual + return reflect.DeepEqual(d, d2) +} + +// DeepCopy performs a deep copy of all parts of the InstanceDiff +func (d *InstanceDiff) DeepCopy() *InstanceDiff { + copy, err := copystructure.Config{Lock: true}.Copy(d) + if err != nil { + panic(err) + } + + return copy.(*InstanceDiff) } func (d *InstanceDiff) GoString() string { @@ -369,6 +507,7 @@ func (d *InstanceDiff) GoString() string { Attributes: d.Attributes, Destroy: d.Destroy, DestroyTainted: d.DestroyTainted, + DestroyDeposed: d.DestroyDeposed, }) } @@ -403,8 +542,22 @@ func (d *InstanceDiff) requiresNew() bool { return false } +func (d *InstanceDiff) GetDestroyDeposed() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyDeposed +} + +func (d *InstanceDiff) SetDestroyDeposed(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyDeposed = b +} + // These methods are properly locked, for use outside other InstanceDiff -// methods but everywhere else within in the terraform package. +// methods but everywhere else within the terraform package. // TODO refactor the locking scheme func (d *InstanceDiff) SetTainted(b bool) { d.mu.Lock() @@ -493,13 +646,74 @@ func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { d.mu.Lock() defer d.mu.Unlock() - if d.Destroy != d2.GetDestroy() { + // If we're going from requiring new to NOT requiring new, then we have + // to see if all required news were computed. If so, it is allowed since + // computed may also mean "same value and therefore not new". + oldNew := d.requiresNew() + newNew := d2.RequiresNew() + if oldNew && !newNew { + oldNew = false + + // This section builds a list of ignorable attributes for requiresNew + // by removing off any elements of collections going to zero elements. + // For collections going to zero, they may not exist at all in the + // new diff (and hence RequiresNew == false). + ignoreAttrs := make(map[string]struct{}) + for k, diffOld := range d.Attributes { + if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { + continue + } + + // This case is in here as a protection measure. The bug that this + // code originally fixed (GH-11349) didn't have to deal with computed + // so I'm not 100% sure what the correct behavior is. Best to leave + // the old behavior. + if diffOld.NewComputed { + continue + } + + // We're looking for the case a map goes to exactly 0. + if diffOld.New != "0" { + continue + } + + // Found it! Ignore all of these. The prefix here is stripping + // off the "%" so it is just "k." + prefix := k[:len(k)-1] + for k2, _ := range d.Attributes { + if strings.HasPrefix(k2, prefix) { + ignoreAttrs[k2] = struct{}{} + } + } + } + + for k, rd := range d.Attributes { + if _, ok := ignoreAttrs[k]; ok { + continue + } + + // If the field is requires new and NOT computed, then what + // we have is a diff mismatch for sure. We set that the old + // diff does REQUIRE a ForceNew. + if rd != nil && rd.RequiresNew && !rd.NewComputed { + oldNew = true + break + } + } + } + + if oldNew != newNew { return false, fmt.Sprintf( - "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) + "diff RequiresNew; old: %t, new: %t", oldNew, newNew) } - if d.requiresNew() != d2.RequiresNew() { + + // Verify that destroy matches. The second boolean here allows us to + // have mismatching Destroy if we're moving from RequiresNew true + // to false above. Therefore, the second boolean will only pass if + // we're moving from Destroy: true to false as well. + if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { return false, fmt.Sprintf( - "diff RequiresNew; old: %t, new: %t", d.requiresNew(), d2.RequiresNew()) + "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) } // Go through the old diff and make sure the new diff has all the @@ -542,6 +756,13 @@ func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { continue } + // If the last diff was a computed value then the absense of + // that value is allowed since it may mean the value ended up + // being the same. + if diffOld.NewComputed { + ok = true + } + // No exact match, but maybe this is a set containing computed // values. So check if there is an approximate hash in the key // and if so, try to match the key. @@ -596,7 +817,6 @@ func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { } // search for the suffix of the base of a [computed] map, list or set. - multiVal := regexp.MustCompile(`\.(#|~#|%)$`) match := multiVal.FindStringSubmatch(k) if diffOld.NewComputed && len(match) == 2 { @@ -617,7 +837,14 @@ func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { } } - // TODO: check for the same value if not computed + // We don't compare the values because we can't currently actually + // guarantee to generate the same value two two diffs created from + // the same state+config: we have some pesky interpolation functions + // that do not behave as pure functions (uuid, timestamp) and so they + // can be different each time a diff is produced. + // FIXME: Re-organize our config handling so that we don't re-evaluate + // expressions when we produce a second comparison diff during + // apply (for EvalCompareDiff). } // Check for leftover attributes @@ -632,3 +859,21 @@ func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { return true, "" } + +// moduleDiffSort implements sort.Interface to sort module diffs by path. +type moduleDiffSort []*ModuleDiff + +func (s moduleDiffSort) Len() int { return len(s) } +func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s moduleDiffSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go new file mode 100644 index 00000000..bc9d638a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go @@ -0,0 +1,17 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/dag" +) + +// DestroyEdge is an edge that represents a standard "destroy" relationship: +// Target depends on Source because Source is destroying. +type DestroyEdge struct { + S, T dag.Vertex +} + +func (e *DestroyEdge) Hashcode() interface{} { return fmt.Sprintf("%p-%p", e.S, e.T) } +func (e *DestroyEdge) Source() dag.Vertex { return e.S } +func (e *DestroyEdge) Target() dag.Vertex { return e.T } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go index 3cb088a2..10d9c228 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go @@ -49,11 +49,11 @@ func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { path = strings.Join(ctx.Path(), ".") } - log.Printf("[DEBUG] %s: eval: %T", path, n) + log.Printf("[TRACE] %s: eval: %T", path, n) output, err := n.Eval(ctx) if err != nil { if _, ok := err.(EvalEarlyExitError); ok { - log.Printf("[DEBUG] %s: eval: %T, err: %s", path, n, err) + log.Printf("[TRACE] %s: eval: %T, err: %s", path, n, err) } else { log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go index 5dced013..b9b48064 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go @@ -52,16 +52,6 @@ func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew() } - { - // Call pre-apply hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreApply(n.Info, state, diff) - }) - if err != nil { - return nil, err - } - } - // With the completed diff, apply! log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id) state, err := provider.Apply(n.Info, state, diff) @@ -104,6 +94,37 @@ func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { return nil, nil } +// EvalApplyPre is an EvalNode implementation that does the pre-Apply work +type EvalApplyPre struct { + Info *InstanceInfo + State **InstanceState + Diff **InstanceDiff +} + +// TODO: test +func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + diff := *n.Diff + + // If the state is nil, make it non-nil + if state == nil { + state = new(InstanceState) + } + state.init() + + if resourceHasUserVisibleApply(n.Info) { + // Call post-apply hook + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreApply(n.Info, state, diff) + }) + if err != nil { + return nil, err + } + } + + return nil, nil +} + // EvalApplyPost is an EvalNode implementation that does the post-Apply work type EvalApplyPost struct { Info *InstanceInfo @@ -115,7 +136,7 @@ type EvalApplyPost struct { func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { state := *n.State - { + if resourceHasUserVisibleApply(n.Info) { // Call post-apply hook err := ctx.Hook(func(h Hook) (HookAction, error) { return h.PostApply(n.Info, state, *n.Error) @@ -128,6 +149,22 @@ func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { return nil, *n.Error } +// resourceHasUserVisibleApply returns true if the given resource is one where +// apply actions should be exposed to the user. +// +// Certain resources do apply actions only as an implementation detail, so +// these should not be advertised to code outside of this package. +func resourceHasUserVisibleApply(info *InstanceInfo) bool { + addr := info.ResourceAddress() + + // Only managed resources have user-visible apply actions. + // In particular, this excludes data resources since we "apply" these + // only as an implementation detail of removing them from state when + // they are destroyed. (When reading, they don't get here at all because + // we present them as "Refresh" actions.) + return addr.Mode == config.ManagedResourceMode +} + // EvalApplyProvisioners is an EvalNode implementation that executes // the provisioners for a resource. // @@ -140,25 +177,33 @@ type EvalApplyProvisioners struct { InterpResource *Resource CreateNew *bool Error *error + + // When is the type of provisioner to run at this point + When config.ProvisionerWhen } // TODO: test func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { state := *n.State - if !*n.CreateNew { + if n.CreateNew != nil && !*n.CreateNew { // If we're not creating a new resource, then don't run provisioners return nil, nil } - if len(n.Resource.Provisioners) == 0 { + provs := n.filterProvisioners() + if len(provs) == 0 { // We have no provisioners, so don't do anything return nil, nil } + // taint tells us whether to enable tainting. + taint := n.When == config.ProvisionerWhenCreate + if n.Error != nil && *n.Error != nil { - // We're already errored creating, so mark as tainted and continue - state.Tainted = true + if taint { + state.Tainted = true + } // We're already tainted, so just return out return nil, nil @@ -176,16 +221,14 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { // If there are no errors, then we append it to our output error // if we have one, otherwise we just output it. - err := n.apply(ctx) + err := n.apply(ctx, provs) if err != nil { - // Provisioning failed, so mark the resource as tainted - state.Tainted = true - - if n.Error != nil { - *n.Error = multierror.Append(*n.Error, err) - } else { - return nil, err + if taint { + state.Tainted = true } + + *n.Error = multierror.Append(*n.Error, err) + return nil, err } { @@ -201,7 +244,29 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { return nil, nil } -func (n *EvalApplyProvisioners) apply(ctx EvalContext) error { +// filterProvisioners filters the provisioners on the resource to only +// the provisioners specified by the "when" option. +func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner { + // Fast path the zero case + if n.Resource == nil { + return nil + } + + if len(n.Resource.Provisioners) == 0 { + return nil + } + + result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners)) + for _, p := range n.Resource.Provisioners { + if p.When == n.When { + result = append(result, p) + } + } + + return result +} + +func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error { state := *n.State // Store the original connection info, restore later @@ -210,18 +275,18 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext) error { state.Ephemeral.ConnInfo = origConnInfo }() - for _, prov := range n.Resource.Provisioners { + for _, prov := range provs { // Get the provisioner provisioner := ctx.Provisioner(prov.Type) // Interpolate the provisioner config - provConfig, err := ctx.Interpolate(prov.RawConfig, n.InterpResource) + provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource) if err != nil { return err } // Interpolate the conn info, since it may contain variables - connInfo, err := ctx.Interpolate(prov.ConnInfo, n.InterpResource) + connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource) if err != nil { return err } @@ -275,19 +340,31 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext) error { // Invoke the Provisioner output := CallbackUIOutput{OutputFn: outputFn} - if err := provisioner.Apply(&output, state, provConfig); err != nil { - return err - } + applyErr := provisioner.Apply(&output, state, provConfig) - { - // Call post hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvision(n.Info, prov.Type) - }) - if err != nil { - return err + // Call post hook + hookErr := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostProvision(n.Info, prov.Type, applyErr) + }) + + // Handle the error before we deal with the hook + if applyErr != nil { + // Determine failure behavior + switch prov.OnFailure { + case config.ProvisionerOnFailureContinue: + log.Printf( + "[INFO] apply: %s [%s]: error during provision, continue requested", + n.Info.Id, prov.Type) + + case config.ProvisionerOnFailureFail: + return applyErr } } + + // Deal with the hook + if hookErr != nil { + return hookErr + } } return nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go index aec0ae13..715e79e1 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go @@ -10,8 +10,9 @@ import ( // error if a resource has PreventDestroy configured and the diff // would destroy the resource. type EvalCheckPreventDestroy struct { - Resource *config.Resource - Diff **InstanceDiff + Resource *config.Resource + ResourceId string + Diff **InstanceDiff } func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { @@ -23,7 +24,12 @@ func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { preventDestroy := n.Resource.Lifecycle.PreventDestroy if diff.GetDestroy() && preventDestroy { - return nil, fmt.Errorf(preventDestroyErrStr, n.Resource.Id()) + resourceId := n.ResourceId + if resourceId == "" { + resourceId = n.Resource.Id() + } + + return nil, fmt.Errorf(preventDestroyErrStr, resourceId) } return nil, nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go index f2867511..86481ded 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go @@ -8,6 +8,10 @@ import ( // EvalContext is the interface that is given to eval nodes to execute. type EvalContext interface { + // Stopped returns a channel that is closed when evaluation is stopped + // via Terraform.Context.Stop() + Stopped() <-chan struct{} + // Path is the current module path. Path() []string @@ -18,11 +22,11 @@ type EvalContext interface { // Input is the UIInput object for interacting with the UI. Input() UIInput - // InitProvider initializes the provider with the given name and + // InitProvider initializes the provider with the given type and name, and // returns the implementation of the resource provider or an error. // // It is an error to initialize the same provider more than once. - InitProvider(string) (ResourceProvider, error) + InitProvider(typ string, name string) (ResourceProvider, error) // Provider gets the provider instance with the given name (already // initialized) or returns nil if the provider isn't initialized. @@ -36,8 +40,6 @@ type EvalContext interface { // is used to store the provider configuration for inheritance lookups // with ParentProviderConfig(). ConfigureProvider(string, *ResourceConfig) error - SetProviderConfig(string, *ResourceConfig) error - ParentProviderConfig(string) *ResourceConfig // ProviderInput and SetProviderInput are used to configure providers // from user input. @@ -65,6 +67,13 @@ type EvalContext interface { // that is currently being acted upon. Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error) + // InterpolateProvider takes a ProviderConfig and interpolates it with the + // stored interpolation scope. Since provider configurations can be + // inherited, the interpolation scope may be different from the current + // context path. Interplation is otherwise executed the same as in the + // Interpolation method. + InterpolateProvider(*config.ProviderConfig, *Resource) (*ResourceConfig, error) + // SetVariables sets the variables for the module within // this context with the name n. This function call is additive: // the second parameter is merged with any previous call. diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go index d85cb77f..1b6ee5a6 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go @@ -1,9 +1,9 @@ package terraform import ( + "context" "fmt" "log" - "strings" "sync" "github.com/hashicorp/terraform/config" @@ -12,6 +12,9 @@ import ( // BuiltinEvalContext is an EvalContext implementation that is used by // Terraform by default. type BuiltinEvalContext struct { + // StopContext is the context used to track whether we're complete + StopContext context.Context + // PathValue is the Path that this context is operating within. PathValue []string @@ -26,14 +29,12 @@ type BuiltinEvalContext struct { InterpolaterVars map[string]map[string]interface{} InterpolaterVarLock *sync.Mutex + Components contextComponentFactory Hooks []Hook InputValue UIInput - Providers map[string]ResourceProviderFactory ProviderCache map[string]ResourceProvider - ProviderConfigCache map[string]*ResourceConfig ProviderInputConfig map[string]map[string]interface{} ProviderLock *sync.Mutex - Provisioners map[string]ResourceProvisionerFactory ProvisionerCache map[string]ResourceProvisioner ProvisionerLock *sync.Mutex DiffValue *Diff @@ -44,6 +45,15 @@ type BuiltinEvalContext struct { once sync.Once } +func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { + // This can happen during tests. During tests, we just block forever. + if ctx.StopContext == nil { + return nil + } + + return ctx.StopContext.Done() +} + func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error { for _, h := range ctx.Hooks { action, err := fn(h) @@ -68,12 +78,12 @@ func (ctx *BuiltinEvalContext) Input() UIInput { return ctx.InputValue } -func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) { +func (ctx *BuiltinEvalContext) InitProvider(typeName, name string) (ResourceProvider, error) { ctx.once.Do(ctx.init) // If we already initialized, it is an error - if p := ctx.Provider(n); p != nil { - return nil, fmt.Errorf("Provider '%s' already initialized", n) + if p := ctx.Provider(name); p != nil { + return nil, fmt.Errorf("Provider '%s' already initialized", name) } // Warning: make sure to acquire these locks AFTER the call to Provider @@ -81,23 +91,12 @@ func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() - typeName := strings.SplitN(n, ".", 2)[0] - - f, ok := ctx.Providers[typeName] - if !ok { - return nil, fmt.Errorf("Provider '%s' not found", typeName) - } - - p, err := f() + p, err := ctx.Components.ResourceProvider(typeName, name) if err != nil { return nil, err } - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - - ctx.ProviderCache[PathCacheKey(providerPath)] = p + ctx.ProviderCache[name] = p return p, nil } @@ -107,11 +106,7 @@ func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider { ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - - return ctx.ProviderCache[PathCacheKey(providerPath)] + return ctx.ProviderCache[n] } func (ctx *BuiltinEvalContext) CloseProvider(n string) error { @@ -120,15 +115,11 @@ func (ctx *BuiltinEvalContext) CloseProvider(n string) error { ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - var provider interface{} - provider = ctx.ProviderCache[PathCacheKey(providerPath)] + provider = ctx.ProviderCache[n] if provider != nil { if p, ok := provider.(ResourceProviderCloser); ok { - delete(ctx.ProviderCache, PathCacheKey(providerPath)) + delete(ctx.ProviderCache, n) return p.Close() } } @@ -142,28 +133,9 @@ func (ctx *BuiltinEvalContext) ConfigureProvider( if p == nil { return fmt.Errorf("Provider '%s' not initialized", n) } - - if err := ctx.SetProviderConfig(n, cfg); err != nil { - return nil - } - return p.Configure(cfg) } -func (ctx *BuiltinEvalContext) SetProviderConfig( - n string, cfg *ResourceConfig) error { - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - - // Save the configuration - ctx.ProviderLock.Lock() - ctx.ProviderConfigCache[PathCacheKey(providerPath)] = cfg - ctx.ProviderLock.Unlock() - - return nil -} - func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} { ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() @@ -196,27 +168,6 @@ func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface ctx.ProviderLock.Unlock() } -func (ctx *BuiltinEvalContext) ParentProviderConfig(n string) *ResourceConfig { - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - // Make a copy of the path so we can safely edit it - path := ctx.Path() - pathCopy := make([]string, len(path)+1) - copy(pathCopy, path) - - // Go up the tree. - for i := len(path) - 1; i >= 0; i-- { - pathCopy[i+1] = n - k := PathCacheKey(pathCopy[:i+2]) - if v, ok := ctx.ProviderConfigCache[k]; ok { - return v - } - } - - return nil -} - func (ctx *BuiltinEvalContext) InitProvisioner( n string) (ResourceProvisioner, error) { ctx.once.Do(ctx.init) @@ -231,21 +182,17 @@ func (ctx *BuiltinEvalContext) InitProvisioner( ctx.ProvisionerLock.Lock() defer ctx.ProvisionerLock.Unlock() - f, ok := ctx.Provisioners[n] - if !ok { - return nil, fmt.Errorf("Provisioner '%s' not found", n) - } + provPath := make([]string, len(ctx.Path())+1) + copy(provPath, ctx.Path()) + provPath[len(provPath)-1] = n + key := PathCacheKey(provPath) - p, err := f() + p, err := ctx.Components.ResourceProvisioner(n, key) if err != nil { return nil, err } - provPath := make([]string, len(ctx.Path())+1) - copy(provPath, ctx.Path()) - provPath[len(provPath)-1] = n - - ctx.ProvisionerCache[PathCacheKey(provPath)] = p + ctx.ProvisionerCache[key] = p return p, nil } @@ -286,6 +233,7 @@ func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { func (ctx *BuiltinEvalContext) Interpolate( cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) { + if cfg != nil { scope := &InterpolationScope{ Path: ctx.Path(), @@ -308,6 +256,35 @@ func (ctx *BuiltinEvalContext) Interpolate( return result, nil } +func (ctx *BuiltinEvalContext) InterpolateProvider( + pc *config.ProviderConfig, r *Resource) (*ResourceConfig, error) { + + var cfg *config.RawConfig + + if pc != nil && pc.RawConfig != nil { + scope := &InterpolationScope{ + Path: ctx.Path(), + Resource: r, + } + + cfg = pc.RawConfig + + vs, err := ctx.Interpolater.Values(scope, cfg.Variables) + if err != nil { + return nil, err + } + + // Do the interpolation + if err := cfg.Interpolate(vs); err != nil { + return nil, err + } + } + + result := NewResourceConfig(cfg) + result.interpolateForce() + return result, nil +} + func (ctx *BuiltinEvalContext) Path() []string { return ctx.PathValue } @@ -341,9 +318,4 @@ func (ctx *BuiltinEvalContext) State() (*State, *sync.RWMutex) { } func (ctx *BuiltinEvalContext) init() { - // We nil-check the things below because they're meant to be configured, - // and we just default them to non-nil. - if ctx.Providers == nil { - ctx.Providers = make(map[string]ResourceProviderFactory) - } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go index 4f5c23bc..64645179 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go @@ -9,6 +9,9 @@ import ( // MockEvalContext is a mock version of EvalContext that can be used // for tests. type MockEvalContext struct { + StoppedCalled bool + StoppedValue <-chan struct{} + HookCalled bool HookHook Hook HookError error @@ -42,14 +45,6 @@ type MockEvalContext struct { ConfigureProviderConfig *ResourceConfig ConfigureProviderError error - SetProviderConfigCalled bool - SetProviderConfigName string - SetProviderConfigConfig *ResourceConfig - - ParentProviderConfigCalled bool - ParentProviderConfigName string - ParentProviderConfigConfig *ResourceConfig - InitProvisionerCalled bool InitProvisionerName string InitProvisionerProvisioner ResourceProvisioner @@ -69,6 +64,12 @@ type MockEvalContext struct { InterpolateConfigResult *ResourceConfig InterpolateError error + InterpolateProviderCalled bool + InterpolateProviderConfig *config.ProviderConfig + InterpolateProviderResource *Resource + InterpolateProviderConfigResult *ResourceConfig + InterpolateProviderError error + PathCalled bool PathPath []string @@ -85,6 +86,11 @@ type MockEvalContext struct { StateLock *sync.RWMutex } +func (c *MockEvalContext) Stopped() <-chan struct{} { + c.StoppedCalled = true + return c.StoppedValue +} + func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error { c.HookCalled = true if c.HookHook != nil { @@ -101,7 +107,7 @@ func (c *MockEvalContext) Input() UIInput { return c.InputInput } -func (c *MockEvalContext) InitProvider(n string) (ResourceProvider, error) { +func (c *MockEvalContext) InitProvider(t, n string) (ResourceProvider, error) { c.InitProviderCalled = true c.InitProviderName = n return c.InitProviderProvider, c.InitProviderError @@ -126,20 +132,6 @@ func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error return c.ConfigureProviderError } -func (c *MockEvalContext) SetProviderConfig( - n string, cfg *ResourceConfig) error { - c.SetProviderConfigCalled = true - c.SetProviderConfigName = n - c.SetProviderConfigConfig = cfg - return nil -} - -func (c *MockEvalContext) ParentProviderConfig(n string) *ResourceConfig { - c.ParentProviderConfigCalled = true - c.ParentProviderConfigName = n - return c.ParentProviderConfigConfig -} - func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} { c.ProviderInputCalled = true c.ProviderInputName = n @@ -178,6 +170,14 @@ func (c *MockEvalContext) Interpolate( return c.InterpolateConfigResult, c.InterpolateError } +func (c *MockEvalContext) InterpolateProvider( + config *config.ProviderConfig, resource *Resource) (*ResourceConfig, error) { + c.InterpolateProviderCalled = true + c.InterpolateProviderConfig = config + c.InterpolateProviderResource = resource + return c.InterpolateProviderConfigResult, c.InterpolateError +} + func (c *MockEvalContext) Path() []string { c.PathCalled = true return c.PathPath diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go new file mode 100644 index 00000000..91e2b904 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go @@ -0,0 +1,78 @@ +package terraform + +import ( + "log" +) + +// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state +// when there is a resource count with zero/one boundary, i.e. fixing +// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. +// +// This works on the global state. +type EvalCountFixZeroOneBoundaryGlobal struct{} + +// TODO: test +func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) { + // Get the state and lock it since we'll potentially modify it + state, lock := ctx.State() + lock.Lock() + defer lock.Unlock() + + // Prune the state since we require a clean state to work + state.prune() + + // Go through each modules since the boundaries are restricted to a + // module scope. + for _, m := range state.Modules { + if err := n.fixModule(m); err != nil { + return nil, err + } + } + + return nil, nil +} + +func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(m *ModuleState) error { + // Counts keeps track of keys and their counts + counts := make(map[string]int) + for k, _ := range m.Resources { + // Parse the key + key, err := ParseResourceStateKey(k) + if err != nil { + return err + } + + // Set the index to -1 so that we can keep count + key.Index = -1 + + // Increment + counts[key.String()]++ + } + + // Go through the counts and do the fixup for each resource + for raw, count := range counts { + // Search and replace this resource + search := raw + replace := raw + ".0" + if count < 2 { + search, replace = replace, search + } + log.Printf("[TRACE] EvalCountFixZeroOneBoundaryGlobal: count %d, search %q, replace %q", count, search, replace) + + // Look for the resource state. If we don't have one, then it is okay. + rs, ok := m.Resources[search] + if !ok { + continue + } + + // If the replacement key exists, we just keep both + if _, ok := m.Resources[replace]; ok { + continue + } + + m.Resources[replace] = rs + delete(m.Resources, search) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go new file mode 100644 index 00000000..54a8333e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go @@ -0,0 +1,25 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// EvalCountCheckComputed is an EvalNode that checks if a resource count +// is computed and errors if so. This can possibly happen across a +// module boundary and we don't yet support this. +type EvalCountCheckComputed struct { + Resource *config.Resource +} + +// TODO: test +func (n *EvalCountCheckComputed) Eval(ctx EvalContext) (interface{}, error) { + if n.Resource.RawCount.Value() == unknownValue() { + return nil, fmt.Errorf( + "%s: value of 'count' cannot be computed", + n.Resource.Id()) + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go index e9eea189..26205ce5 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/version" ) // EvalCompareDiff is an EvalNode implementation that compares two diffs @@ -60,7 +61,7 @@ func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) { "\n"+ "Also include as much context as you can about your config, state, "+ "and the steps you performed to trigger this error.\n", - n.Info.Id, Version, n.Info.Id, reason, one, two) + n.Info.Id, version.Version, n.Info.Id, reason, one, two) } return nil, nil @@ -69,6 +70,7 @@ func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) { // EvalDiff is an EvalNode implementation that does a refresh for // a resource. type EvalDiff struct { + Name string Info *InstanceInfo Config **ResourceConfig Provider *ResourceProvider @@ -80,6 +82,12 @@ type EvalDiff struct { // Resource is needed to fetch the ignore_changes list so we can // filter user-requested ignored attributes from the diff. Resource *config.Resource + + // Stub is used to flag the generated InstanceDiff as a stub. This is used to + // ensure that the node exists to perform interpolations and generate + // computed paths off of, but not as an actual diff where resouces should be + // counted, and not as a diff that should be acted on. + Stub bool } // TODO: test @@ -89,11 +97,13 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { provider := *n.Provider // Call pre-diff hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.Info, state) - }) - if err != nil { - return nil, err + if !n.Stub { + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(n.Info, state) + }) + if err != nil { + return nil, err + } } // The state for the diff must never be nil @@ -112,6 +122,18 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { diff = new(InstanceDiff) } + // Set DestroyDeposed if we have deposed instances + _, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) { + if len(rs.Deposed) > 0 { + diff.DestroyDeposed = true + } + + return nil, nil + }) + if err != nil { + return nil, err + } + // Preserve the DestroyTainted flag if n.Diff != nil { diff.SetTainted((*n.Diff).GetDestroyTainted()) @@ -139,20 +161,25 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { }) } + // filter out ignored resources if err := n.processIgnoreChanges(diff); err != nil { return nil, err } // Call post-refresh hook - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(n.Info, diff) - }) - if err != nil { - return nil, err + if !n.Stub { + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(n.Info, diff) + }) + if err != nil { + return nil, err + } } - // Update our output - *n.OutputDiff = diff + // Update our output if we care + if n.OutputDiff != nil { + *n.OutputDiff = diff + } // Update the state if we care if n.OutputState != nil { @@ -177,66 +204,85 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { return nil } - changeType := diff.ChangeType() - // If we're just creating the resource, we shouldn't alter the // Diff at all - if changeType == DiffCreate { + if diff.ChangeType() == DiffCreate { return nil } + // If the resource has been tainted then we don't process ignore changes + // since we MUST recreate the entire resource. + if diff.GetDestroyTainted() { + return nil + } + + attrs := diff.CopyAttributes() + + // get the complete set of keys we want to ignore ignorableAttrKeys := make(map[string]bool) for _, ignoredKey := range ignoreChanges { - for k := range diff.CopyAttributes() { + for k := range attrs { if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) { ignorableAttrKeys[k] = true } } } - // If we are replacing the resource, then we expect there to be a bunch of - // extraneous attribute diffs we need to filter out for the other - // non-requires-new attributes going from "" -> "configval" or "" -> - // "". Filtering these out allows us to see if we might be able to - // skip this diff altogether. - if changeType == DiffDestroyCreate { - for k, v := range diff.CopyAttributes() { - if v.Empty() || v.NewComputed { - ignorableAttrKeys[k] = true - } - } - - // Here we emulate the implementation of diff.RequiresNew() with one small - // tweak, we ignore the "id" attribute diff that gets added by EvalDiff, - // since that was added in reaction to RequiresNew being true. - requiresNewAfterIgnores := false - for k, v := range diff.CopyAttributes() { + // If the resource was being destroyed, check to see if we can ignore the + // reason for it being destroyed. + if diff.GetDestroy() { + for k, v := range attrs { if k == "id" { + // id will always be changed if we intended to replace this instance continue } - if _, ok := ignorableAttrKeys[k]; ok { + if v.Empty() || v.NewComputed { continue } - if v.RequiresNew == true { - requiresNewAfterIgnores = true + + // If any RequiresNew attribute isn't ignored, we need to keep the diff + // as-is to be able to replace the resource. + if v.RequiresNew && !ignorableAttrKeys[k] { + return nil } } - // If we still require resource replacement after ignores, we - // can't touch the diff, as all of the attributes will be - // required to process the replacement. - if requiresNewAfterIgnores { - return nil + // Now that we know that we aren't replacing the instance, we can filter + // out all the empty and computed attributes. There may be a bunch of + // extraneous attribute diffs for the other non-requires-new attributes + // going from "" -> "configval" or "" -> "". + // We must make sure any flatmapped containers are filterred (or not) as a + // whole. + containers := groupContainers(diff) + keep := map[string]bool{} + for _, v := range containers { + if v.keepDiff(ignorableAttrKeys) { + // At least one key has changes, so list all the sibling keys + // to keep in the diff + for k := range v { + keep[k] = true + // this key may have been added by the user to ignore, but + // if it's a subkey in a container, we need to un-ignore it + // to keep the complete containter. + delete(ignorableAttrKeys, k) + } + } } - // Here we undo the two reactions to RequireNew in EvalDiff - the "id" - // attribute diff and the Destroy boolean field - log.Printf("[DEBUG] Removing 'id' diff and setting Destroy to false " + - "because after ignore_changes, this diff no longer requires replacement") - diff.DelAttribute("id") - diff.SetDestroy(false) + for k, v := range attrs { + if (v.Empty() || v.NewComputed) && !keep[k] { + ignorableAttrKeys[k] = true + } + } } + // Here we undo the two reactions to RequireNew in EvalDiff - the "id" + // attribute diff and the Destroy boolean field + log.Printf("[DEBUG] Removing 'id' diff and setting Destroy to false " + + "because after ignore_changes, this diff no longer requires replacement") + diff.DelAttribute("id") + diff.SetDestroy(false) + // If we didn't hit any of our early exit conditions, we can filter the diff. for k := range ignorableAttrKeys { log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s", @@ -247,6 +293,53 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { return nil } +// a group of key-*ResourceAttrDiff pairs from the same flatmapped container +type flatAttrDiff map[string]*ResourceAttrDiff + +// we need to keep all keys if any of them have a diff that's not ignored +func (f flatAttrDiff) keepDiff(ignoreChanges map[string]bool) bool { + for k, v := range f { + ignore := false + for attr := range ignoreChanges { + if strings.HasPrefix(k, attr) { + ignore = true + } + } + + if !v.Empty() && !v.NewComputed && !ignore { + return true + } + } + return false +} + +// sets, lists and maps need to be compared for diff inclusion as a whole, so +// group the flatmapped keys together for easier comparison. +func groupContainers(d *InstanceDiff) map[string]flatAttrDiff { + isIndex := multiVal.MatchString + containers := map[string]flatAttrDiff{} + attrs := d.CopyAttributes() + // we need to loop once to find the index key + for k := range attrs { + if isIndex(k) { + // add the key, always including the final dot to fully qualify it + containers[k[:len(k)-1]] = flatAttrDiff{} + } + } + + // loop again to find all the sub keys + for prefix, values := range containers { + for k, attrDiff := range attrs { + // we include the index value as well, since it could be part of the diff + if strings.HasPrefix(k, prefix) { + values[k] = attrDiff + } + } + } + + return containers +} + // EvalDiffDestroy is an EvalNode implementation that returns a plain // destroy diff. type EvalDiffDestroy struct { diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go index 6825ff59..6a78a6bb 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go @@ -1,17 +1,49 @@ package terraform -import "github.com/hashicorp/terraform/config" +import ( + "log" + + "github.com/hashicorp/terraform/config" +) // EvalInterpolate is an EvalNode implementation that takes a raw // configuration and interpolates it. type EvalInterpolate struct { - Config *config.RawConfig - Resource *Resource - Output **ResourceConfig + Config *config.RawConfig + Resource *Resource + Output **ResourceConfig + ContinueOnErr bool } func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) { rc, err := ctx.Interpolate(n.Config, n.Resource) + if err != nil { + if n.ContinueOnErr { + log.Printf("[WARN] Interpolation %q failed: %s", n.Config.Key, err) + return nil, EvalEarlyExitError{} + } + return nil, err + } + + if n.Output != nil { + *n.Output = rc + } + + return nil, nil +} + +// EvalInterpolateProvider is an EvalNode implementation that takes a +// ProviderConfig and interpolates it. Provider configurations are the only +// "inherited" type of configuration we have, and the original raw config may +// have a different interpolation scope. +type EvalInterpolateProvider struct { + Config *config.ProviderConfig + Resource *Resource + Output **ResourceConfig +} + +func (n *EvalInterpolateProvider) Eval(ctx EvalContext) (interface{}, error) { + rc, err := ctx.InterpolateProvider(n.Config, n.Resource) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_local.go b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go new file mode 100644 index 00000000..a4b2a505 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go @@ -0,0 +1,86 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// EvalLocal is an EvalNode implementation that evaluates the +// expression for a local value and writes it into a transient part of +// the state. +type EvalLocal struct { + Name string + Value *config.RawConfig +} + +func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) { + cfg, err := ctx.Interpolate(n.Value, nil) + if err != nil { + return nil, fmt.Errorf("local.%s: %s", n.Name, err) + } + + state, lock := ctx.State() + if state == nil { + return nil, fmt.Errorf("cannot write local value to nil state") + } + + // Get a write lock so we can access the state + lock.Lock() + defer lock.Unlock() + + // Look for the module state. If we don't have one, create it. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + mod = state.AddModule(ctx.Path()) + } + + // Get the value from the config + var valueRaw interface{} = config.UnknownVariableValue + if cfg != nil { + var ok bool + valueRaw, ok = cfg.Get("value") + if !ok { + valueRaw = "" + } + if cfg.IsComputed("value") { + valueRaw = config.UnknownVariableValue + } + } + + if mod.Locals == nil { + // initialize + mod.Locals = map[string]interface{}{} + } + mod.Locals[n.Name] = valueRaw + + return nil, nil +} + +// EvalDeleteLocal is an EvalNode implementation that deletes a Local value +// from the state. Locals aren't persisted, but we don't need to evaluate them +// during destroy. +type EvalDeleteLocal struct { + Name string +} + +func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + if state == nil { + return nil, nil + } + + // Get a write lock so we can access this instance + lock.Lock() + defer lock.Unlock() + + // Look for the module state. If we don't have one, create it. + mod := state.ModuleByPath(ctx.Path()) + if mod == nil { + return nil, nil + } + + delete(mod.Locals, n.Name) + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go index bee4f108..a8346276 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go @@ -41,15 +41,16 @@ type EvalWriteOutput struct { Name string Sensitive bool Value *config.RawConfig + // ContinueOnErr allows interpolation to fail during Input + ContinueOnErr bool } // TODO: test func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { + // This has to run before we have a state lock, since interpolation also + // reads the state cfg, err := ctx.Interpolate(n.Value, nil) - if err != nil { - // Log error but continue anyway - log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err) - } + // handle the error after we have the module from the state state, lock := ctx.State() if state == nil { @@ -59,13 +60,27 @@ func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { // Get a write lock so we can access this instance lock.Lock() defer lock.Unlock() - // Look for the module state. If we don't have one, create it. mod := state.ModuleByPath(ctx.Path()) if mod == nil { mod = state.AddModule(ctx.Path()) } + // handling the interpolation error + if err != nil { + if n.ContinueOnErr || flagWarnOutputErrors { + log.Printf("[ERROR] Output interpolation %q failed: %s", n.Name, err) + // if we're continuing, make sure the output is included, and + // marked as unknown + mod.Outputs[n.Name] = &OutputState{ + Type: "string", + Value: config.UnknownVariableValue, + } + return nil, EvalEarlyExitError{} + } + return nil, err + } + // Get the value from the config var valueRaw interface{} = config.UnknownVariableValue if cfg != nil { @@ -98,6 +113,19 @@ func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { Sensitive: n.Sensitive, Value: valueTyped, } + case []map[string]interface{}: + // an HCL map is multi-valued, so if this was read out of a config the + // map may still be in a slice. + if len(valueTyped) == 1 { + mod.Outputs[n.Name] = &OutputState{ + Type: "map", + Sensitive: n.Sensitive, + Value: valueTyped[0], + } + break + } + return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map", + n.Name, valueTyped, len(valueTyped)) default: return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go index 61efcc23..61f6ff94 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go @@ -6,17 +6,6 @@ import ( "github.com/hashicorp/terraform/config" ) -// EvalSetProviderConfig sets the parent configuration for a provider -// without configuring that provider, validating it, etc. -type EvalSetProviderConfig struct { - Provider string - Config **ResourceConfig -} - -func (n *EvalSetProviderConfig) Eval(ctx EvalContext) (interface{}, error) { - return nil, ctx.SetProviderConfig(n.Provider, *n.Config) -} - // EvalBuildProviderConfig outputs a *ResourceConfig that is properly // merged with parents and inputs on top of what is configured in the file. type EvalBuildProviderConfig struct { @@ -28,20 +17,19 @@ type EvalBuildProviderConfig struct { func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) { cfg := *n.Config - // If we have a configuration set, then merge that in + // If we have an Input configuration set, then merge that in if input := ctx.ProviderInput(n.Provider); input != nil { + // "input" is a map of the subset of config values that were known + // during the input walk, set by EvalInputProvider. Note that + // in particular it does *not* include attributes that had + // computed values at input time; those appear *only* in + // "cfg" here. rc, err := config.NewRawConfig(input) if err != nil { return nil, err } - merged := cfg.raw.Merge(rc) - cfg = NewResourceConfig(merged) - } - - // Get the parent configuration if there is one - if parent := ctx.ParentProviderConfig(n.Provider); parent != nil { - merged := cfg.raw.Merge(parent.raw) + merged := rc.Merge(cfg.raw) cfg = NewResourceConfig(merged) } @@ -64,11 +52,12 @@ func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { // and returns nothing. The provider can be retrieved again with the // EvalGetProvider node. type EvalInitProvider struct { - Name string + TypeName string + Name string } func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { - return ctx.InitProvider(n.Name) + return ctx.InitProvider(n.TypeName, n.Name) } // EvalCloseProvider is an EvalNode implementation that closes provider @@ -111,12 +100,8 @@ type EvalInputProvider struct { } func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) { - // If we already configured this provider, then don't do this again - if v := ctx.ProviderInput(n.Name); v != nil { - return nil, nil - } - rc := *n.Config + orig := rc.DeepCopy() // Wrap the input into a namespace input := &PrefixUIInput{ @@ -133,13 +118,20 @@ func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) { "Error configuring %s: %s", n.Name, err) } - // Set the input that we received so that child modules don't attempt - // to ask for input again. + // We only store values that have changed through Input. + // The goal is to cache cache input responses, not to provide a complete + // config for other providers. + confMap := make(map[string]interface{}) if config != nil && len(config.Config) > 0 { - ctx.SetProviderInput(n.Name, config.Config) - } else { - ctx.SetProviderInput(n.Name, map[string]interface{}{}) + // any values that weren't in the original ResourcConfig will be cached + for k, v := range config.Config { + if _, ok := orig.Config[k]; !ok { + confMap[k] = v + } + } } + ctx.SetProviderInput(n.Name, confMap) + return nil, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go index aeb2ebae..fb85a284 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go @@ -47,14 +47,17 @@ func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) { diff = new(InstanceDiff) } - // id is always computed, because we're always "creating a new resource" + // if id isn't explicitly set then it's always computed, because we're + // always "creating a new resource". diff.init() - diff.SetAttribute("id", &ResourceAttrDiff{ - Old: "", - NewComputed: true, - RequiresNew: true, - Type: DiffAttrOutput, - }) + if _, ok := diff.Attributes["id"]; !ok { + diff.SetAttribute("id", &ResourceAttrDiff{ + Old: "", + NewComputed: true, + RequiresNew: true, + Type: DiffAttrOutput, + }) + } } err = ctx.Hook(func(h Hook) (HookAction, error) { diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go index 6c3c6a62..82d81782 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go @@ -7,6 +7,10 @@ type EvalSequence struct { func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) { for _, n := range n.Nodes { + if n == nil { + continue + } + if _, err := EvalRaw(n, ctx); err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go index 35e7c2f2..11826907 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go @@ -1,6 +1,8 @@ package terraform -import "fmt" +import ( + "fmt" +) // EvalReadState is an EvalNode implementation that reads the // primary InstanceState for a specific resource out of the state. @@ -107,9 +109,10 @@ type EvalUpdateStateHook struct{} func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { state, lock := ctx.State() - // Get a read lock so it doesn't change while we're calling this - lock.RLock() - defer lock.RUnlock() + // Get a full lock. Even calling something like WriteState can modify + // (prune) the state, so we need the full lock. + lock.Lock() + defer lock.Unlock() // Call the hook err := ctx.Hook(func(h Hook) (HookAction, error) { @@ -211,37 +214,6 @@ func writeInstanceToState( return nil, nil } -// EvalClearPrimaryState is an EvalNode implementation that clears the primary -// instance from a resource state. -type EvalClearPrimaryState struct { - Name string -} - -func (n *EvalClearPrimaryState) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() - - // Get a read lock so we can access this instance - lock.RLock() - defer lock.RUnlock() - - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } - - // Look for the resource state. If we don't have one, then it is okay. - rs := mod.Resources[n.Name] - if rs == nil { - return nil, nil - } - - // Clear primary from the resource state - rs.Primary = nil - - return nil, nil -} - // EvalDeposeState is an EvalNode implementation that takes the primary // out of a state and makes it Deposed. This is done at the beginning of // create-before-destroy calls so that the create can create while preserving diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go index 9ae221aa..3e5a84ce 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/hashicorp/terraform/config" + "github.com/mitchellh/mapstructure" ) // EvalValidateError is the error structure returned if there were @@ -42,6 +43,7 @@ func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) { c[n.Resource.RawCount.Key] = "1" count = 1 } + err = nil if count < 0 { errs = append(errs, fmt.Errorf( @@ -84,12 +86,31 @@ func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) { type EvalValidateProvisioner struct { Provisioner *ResourceProvisioner Config **ResourceConfig + ConnConfig **ResourceConfig } func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { provisioner := *n.Provisioner config := *n.Config - warns, errs := provisioner.Validate(config) + var warns []string + var errs []error + + { + // Validate the provisioner's own config first + w, e := provisioner.Validate(config) + warns = append(warns, w...) + errs = append(errs, e...) + } + + { + // Now validate the connection config, which might either be from + // the provisioner block itself or inherited from the resource's + // shared connection info. + w, e := n.validateConnConfig(*n.ConnConfig) + warns = append(warns, w...) + errs = append(errs, e...) + } + if len(warns) == 0 && len(errs) == 0 { return nil, nil } @@ -100,6 +121,68 @@ func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { } } +func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) (warns []string, errs []error) { + // We can't comprehensively validate the connection config since its + // final structure is decided by the communicator and we can't instantiate + // that until we have a complete instance state. However, we *can* catch + // configuration keys that are not valid for *any* communicator, catching + // typos early rather than waiting until we actually try to run one of + // the resource's provisioners. + + type connConfigSuperset struct { + // All attribute types are interface{} here because at this point we + // may still have unresolved interpolation expressions, which will + // appear as strings regardless of the final goal type. + + Type interface{} `mapstructure:"type"` + User interface{} `mapstructure:"user"` + Password interface{} `mapstructure:"password"` + Host interface{} `mapstructure:"host"` + Port interface{} `mapstructure:"port"` + Timeout interface{} `mapstructure:"timeout"` + ScriptPath interface{} `mapstructure:"script_path"` + + // For type=ssh only (enforced in ssh communicator) + PrivateKey interface{} `mapstructure:"private_key"` + HostKey interface{} `mapstructure:"host_key"` + Agent interface{} `mapstructure:"agent"` + BastionHost interface{} `mapstructure:"bastion_host"` + BastionHostKey interface{} `mapstructure:"bastion_host_key"` + BastionPort interface{} `mapstructure:"bastion_port"` + BastionUser interface{} `mapstructure:"bastion_user"` + BastionPassword interface{} `mapstructure:"bastion_password"` + BastionPrivateKey interface{} `mapstructure:"bastion_private_key"` + AgentIdentity interface{} `mapstructure:"agent_identity"` + + // For type=winrm only (enforced in winrm communicator) + HTTPS interface{} `mapstructure:"https"` + Insecure interface{} `mapstructure:"insecure"` + NTLM interface{} `mapstructure:"use_ntlm"` + CACert interface{} `mapstructure:"cacert"` + } + + var metadata mapstructure.Metadata + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Metadata: &metadata, + Result: &connConfigSuperset{}, // result is disregarded; we only care about unused keys + }) + if err != nil { + // should never happen + errs = append(errs, err) + return + } + + if err := decoder.Decode(connConfig.Config); err != nil { + errs = append(errs, err) + return + } + + for _, attrName := range metadata.Unused { + errs = append(errs, fmt.Errorf("unknown 'connection' argument %q", attrName)) + } + return +} + // EvalValidateResource is an EvalNode implementation that validates // the configuration of a resource. type EvalValidateResource struct { diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go new file mode 100644 index 00000000..ae4436a2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go @@ -0,0 +1,74 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// EvalValidateResourceSelfRef is an EvalNode implementation that validates that +// a configuration doesn't contain a reference to the resource itself. +// +// This must be done prior to interpolating configuration in order to avoid +// any infinite loop scenarios. +type EvalValidateResourceSelfRef struct { + Addr **ResourceAddress + Config **config.RawConfig +} + +func (n *EvalValidateResourceSelfRef) Eval(ctx EvalContext) (interface{}, error) { + addr := *n.Addr + conf := *n.Config + + // Go through the variables and find self references + var errs []error + for k, raw := range conf.Variables { + rv, ok := raw.(*config.ResourceVariable) + if !ok { + continue + } + + // Build an address from the variable + varAddr := &ResourceAddress{ + Path: addr.Path, + Mode: rv.Mode, + Type: rv.Type, + Name: rv.Name, + Index: rv.Index, + InstanceType: TypePrimary, + } + + // If the variable access is a multi-access (*), then we just + // match the index so that we'll match our own addr if everything + // else matches. + if rv.Multi && rv.Index == -1 { + varAddr.Index = addr.Index + } + + // This is a weird thing where ResourceAddres has index "-1" when + // index isn't set at all. This means index "0" for resource access. + // So, if we have this scenario, just set our varAddr to -1 so it + // matches. + if addr.Index == -1 && varAddr.Index == 0 { + varAddr.Index = -1 + } + + // If the addresses match, then this is a self reference + if varAddr.Equals(addr) && varAddr.Index == addr.Index { + errs = append(errs, fmt.Errorf( + "%s: self reference not allowed: %q", + addr, k)) + } + } + + // If no errors, no errors! + if len(errs) == 0 { + return nil, nil + } + + // Wrap the errors in the proper wrapper so we can handle validation + // formatting properly upstream. + return nil, &EvalValidateError{ + Errors: errs, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go index 47bd2ea2..e39a33c2 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go @@ -114,7 +114,6 @@ type EvalVariableBlock struct { VariableValues map[string]interface{} } -// TODO: test func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) { // Clear out the existing mapping for k, _ := range n.VariableValues { @@ -124,22 +123,27 @@ func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) { // Get our configuration rc := *n.Config for k, v := range rc.Config { - var vString string - if err := hilmapstructure.WeakDecode(v, &vString); err == nil { - n.VariableValues[k] = vString - continue - } - - var vMap map[string]interface{} - if err := hilmapstructure.WeakDecode(v, &vMap); err == nil { - n.VariableValues[k] = vMap - continue - } + vKind := reflect.ValueOf(v).Type().Kind() - var vSlice []interface{} - if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil { - n.VariableValues[k] = vSlice - continue + switch vKind { + case reflect.Slice: + var vSlice []interface{} + if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil { + n.VariableValues[k] = vSlice + continue + } + case reflect.Map: + var vMap map[string]interface{} + if err := hilmapstructure.WeakDecode(v, &vMap); err == nil { + n.VariableValues[k] = vMap + continue + } + default: + var vString string + if err := hilmapstructure.WeakDecode(v, &vString); err == nil { + n.VariableValues[k] = vString + continue + } } return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k) @@ -174,9 +178,15 @@ func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error { // Otherwise find the correct point in the tree and then set to unknown var current interface{} = n.VariableValues[pathComponents[0]] for i := 1; i < len(pathComponents); i++ { - switch current.(type) { - case []interface{}, []map[string]interface{}: - tCurrent := current.([]interface{}) + switch tCurrent := current.(type) { + case []interface{}: + index, err := strconv.Atoi(pathComponents[i]) + if err != nil { + return fmt.Errorf("Cannot convert %s to slice index in path %s", + pathComponents[i], path) + } + current = tCurrent[index] + case []map[string]interface{}: index, err := strconv.Atoi(pathComponents[i]) if err != nil { return fmt.Errorf("Cannot convert %s to slice index in path %s", @@ -184,7 +194,6 @@ func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error { } current = tCurrent[index] case map[string]interface{}: - tCurrent := current.(map[string]interface{}) if val, hasVal := tCurrent[pathComponents[i]]; hasVal { current = val continue diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go index 00392efe..0c3da48f 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go @@ -1,17 +1,24 @@ package terraform import ( + "strings" + "github.com/hashicorp/terraform/config" ) // ProviderEvalTree returns the evaluation tree for initializing and // configuring providers. -func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { +func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) EvalNode { var provider ResourceProvider var resourceConfig *ResourceConfig + typeName := strings.SplitN(n.NameValue, ".", 2)[0] + seq := make([]EvalNode, 0, 5) - seq = append(seq, &EvalInitProvider{Name: n}) + seq = append(seq, &EvalInitProvider{ + TypeName: typeName, + Name: n.Name(), + }) // Input stuff seq = append(seq, &EvalOpFilter{ @@ -19,20 +26,20 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n, + Name: n.Name(), Output: &provider, }, - &EvalInterpolate{ + &EvalInterpolateProvider{ Config: config, Output: &resourceConfig, }, &EvalBuildProviderConfig{ - Provider: n, + Provider: n.NameValue, Config: &resourceConfig, Output: &resourceConfig, }, &EvalInputProvider{ - Name: n, + Name: n.NameValue, Provider: &provider, Config: &resourceConfig, }, @@ -45,15 +52,15 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n, + Name: n.Name(), Output: &provider, }, - &EvalInterpolate{ + &EvalInterpolateProvider{ Config: config, Output: &resourceConfig, }, &EvalBuildProviderConfig{ - Provider: n, + Provider: n.NameValue, Config: &resourceConfig, Output: &resourceConfig, }, @@ -61,10 +68,6 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { Provider: &provider, Config: &resourceConfig, }, - &EvalSetProviderConfig{ - Provider: n, - Config: &resourceConfig, - }, }, }, }) @@ -75,22 +78,18 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n, + Name: n.Name(), Output: &provider, }, - &EvalInterpolate{ + &EvalInterpolateProvider{ Config: config, Output: &resourceConfig, }, &EvalBuildProviderConfig{ - Provider: n, + Provider: n.NameValue, Config: &resourceConfig, Output: &resourceConfig, }, - &EvalSetProviderConfig{ - Provider: n, - Config: &resourceConfig, - }, }, }, }) @@ -102,7 +101,7 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { Node: &EvalSequence{ Nodes: []EvalNode{ &EvalConfigProvider{ - Provider: n, + Provider: n.Name(), Config: &resourceConfig, }, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/features.go b/vendor/github.com/hashicorp/terraform/terraform/features.go new file mode 100644 index 00000000..97c77bdb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/features.go @@ -0,0 +1,7 @@ +package terraform + +import "os" + +// This file holds feature flags for the next release + +var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != "" diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go index e75d9366..735ec4ec 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go @@ -3,8 +3,8 @@ package terraform import ( "fmt" "log" + "runtime/debug" "strings" - "sync" "github.com/hashicorp/terraform/dag" ) @@ -16,8 +16,7 @@ const RootModuleName = "root" var RootModulePath = []string{RootModuleName} // Graph represents the graph that Terraform uses to represent resources -// and their dependencies. Each graph represents only one module, but it -// can contain further modules, which themselves have their own graph. +// and their dependencies. type Graph struct { // Graph is the actual DAG. This is embedded so you can call the DAG // methods directly. @@ -28,121 +27,14 @@ type Graph struct { // RootModuleName Path []string - // dependableMap is a lookaside table for fast lookups for connecting - // dependencies by their GraphNodeDependable value to avoid O(n^3)-like - // situations and turn them into O(1) with respect to the number of new - // edges. - dependableMap map[string]dag.Vertex - - once sync.Once -} - -// Add is the same as dag.Graph.Add. -func (g *Graph) Add(v dag.Vertex) dag.Vertex { - g.once.Do(g.init) - - // Call upwards to add it to the actual graph - g.Graph.Add(v) - - // If this is a depend-able node, then store the lookaside info - if dv, ok := v.(GraphNodeDependable); ok { - for _, n := range dv.DependableName() { - g.dependableMap[n] = v - } - } - - return v -} - -// Remove is the same as dag.Graph.Remove -func (g *Graph) Remove(v dag.Vertex) dag.Vertex { - g.once.Do(g.init) - - // If this is a depend-able node, then remove the lookaside info - if dv, ok := v.(GraphNodeDependable); ok { - for _, n := range dv.DependableName() { - delete(g.dependableMap, n) - } - } - - // Call upwards to remove it from the actual graph - return g.Graph.Remove(v) -} - -// Replace is the same as dag.Graph.Replace -func (g *Graph) Replace(o, n dag.Vertex) bool { - // Go through and update our lookaside to point to the new vertex - for k, v := range g.dependableMap { - if v == o { - if _, ok := n.(GraphNodeDependable); ok { - g.dependableMap[k] = n - } else { - delete(g.dependableMap, k) - } - } - } - - return g.Graph.Replace(o, n) -} - -// ConnectDependent connects a GraphNodeDependent to all of its -// GraphNodeDependables. It returns the list of dependents it was -// unable to connect to. -func (g *Graph) ConnectDependent(raw dag.Vertex) []string { - v, ok := raw.(GraphNodeDependent) - if !ok { - return nil - } - - return g.ConnectTo(v, v.DependentOn()) -} - -// ConnectDependents goes through the graph, connecting all the -// GraphNodeDependents to GraphNodeDependables. This is safe to call -// multiple times. -// -// To get details on whether dependencies could be found/made, the more -// specific ConnectDependent should be used. -func (g *Graph) ConnectDependents() { - for _, v := range g.Vertices() { - if dv, ok := v.(GraphNodeDependent); ok { - g.ConnectDependent(dv) - } - } -} - -// ConnectFrom creates an edge by finding the source from a DependableName -// and connecting it to the specific vertex. -func (g *Graph) ConnectFrom(source string, target dag.Vertex) { - g.once.Do(g.init) - - if source := g.dependableMap[source]; source != nil { - g.Connect(dag.BasicEdge(source, target)) - } + // debugName is a name for reference in the debug output. This is usually + // to indicate what topmost builder was, and if this graph is a shadow or + // not. + debugName string } -// ConnectTo connects a vertex to a raw string of targets that are the -// result of DependableName, and returns the list of targets that are missing. -func (g *Graph) ConnectTo(v dag.Vertex, targets []string) []string { - g.once.Do(g.init) - - var missing []string - for _, t := range targets { - if dest := g.dependableMap[t]; dest != nil { - g.Connect(dag.BasicEdge(v, dest)) - } else { - missing = append(missing, t) - } - } - - return missing -} - -// Dependable finds the vertices in the graph that have the given dependable -// names and returns them. -func (g *Graph) Dependable(n string) dag.Vertex { - // TODO: do we need this? - return nil +func (g *Graph) DirectedGraph() dag.Grapher { + return &g.AcyclicGraph } // Walk walks the graph with the given walker for callbacks. The graph @@ -152,12 +44,6 @@ func (g *Graph) Walk(walker GraphWalker) error { return g.walk(walker) } -func (g *Graph) init() { - if g.dependableMap == nil { - g.dependableMap = make(map[string]dag.Vertex) - } -} - func (g *Graph) walk(walker GraphWalker) error { // The callbacks for enter/exiting a graph ctx := walker.EnterPath(g.Path) @@ -166,20 +52,59 @@ func (g *Graph) walk(walker GraphWalker) error { // Get the path for logs path := strings.Join(ctx.Path(), ".") + // Determine if our walker is a panic wrapper + panicwrap, ok := walker.(GraphWalkerPanicwrapper) + if !ok { + panicwrap = nil // just to be sure + } + + debugName := "walk-graph.json" + if g.debugName != "" { + debugName = g.debugName + "-" + debugName + } + + debugBuf := dbug.NewFileWriter(debugName) + g.SetDebugWriter(debugBuf) + defer debugBuf.Close() + // Walk the graph. var walkFn dag.WalkFunc walkFn = func(v dag.Vertex) (rerr error) { - log.Printf("[DEBUG] vertex %s.%s: walking", path, dag.VertexName(v)) + log.Printf("[TRACE] vertex '%s.%s': walking", path, dag.VertexName(v)) + g.DebugVisitInfo(v, g.debugName) + + // If we have a panic wrap GraphWalker and a panic occurs, recover + // and call that. We ensure the return value is an error, however, + // so that future nodes are not called. + defer func() { + // If no panicwrap, do nothing + if panicwrap == nil { + return + } + + // If no panic, do nothing + err := recover() + if err == nil { + return + } + + // Modify the return value to show the error + rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s", + dag.VertexName(v), err, debug.Stack()) + + // Call the panic wrapper + panicwrap.Panic(v, err) + }() walker.EnterVertex(v) - defer func() { walker.ExitVertex(v, rerr) }() + defer walker.ExitVertex(v, rerr) // vertexCtx is the context that we use when evaluating. This // is normally the context of our graph but can be overridden // with a GraphNodeSubPath impl. vertexCtx := ctx if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 { - vertexCtx = walker.EnterPath(pn.Path()) + vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path())) defer walker.ExitPath(pn.Path()) } @@ -193,7 +118,10 @@ func (g *Graph) walk(walker GraphWalker) error { // Allow the walker to change our tree if needed. Eval, // then callback with the output. - log.Printf("[DEBUG] vertex %s.%s: evaluating", path, dag.VertexName(v)) + log.Printf("[TRACE] vertex '%s.%s': evaluating", path, dag.VertexName(v)) + + g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) + tree = walker.EnterEvalTree(v, tree) output, err := Eval(tree, vertexCtx) if rerr = walker.ExitEvalTree(v, output, err); rerr != nil { @@ -204,29 +132,35 @@ func (g *Graph) walk(walker GraphWalker) error { // If the node is dynamically expanded, then expand it if ev, ok := v.(GraphNodeDynamicExpandable); ok { log.Printf( - "[DEBUG] vertex %s.%s: expanding/walking dynamic subgraph", + "[TRACE] vertex '%s.%s': expanding/walking dynamic subgraph", path, dag.VertexName(v)) + + g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path)) + g, err := ev.DynamicExpand(vertexCtx) if err != nil { rerr = err return } - - // Walk the subgraph - if rerr = g.walk(walker); rerr != nil { - return + if g != nil { + // Walk the subgraph + if rerr = g.walk(walker); rerr != nil { + return + } } } // If the node has a subgraph, then walk the subgraph if sn, ok := v.(GraphNodeSubgraph); ok { log.Printf( - "[DEBUG] vertex %s.%s: walking subgraph", + "[TRACE] vertex '%s.%s': walking subgraph", path, dag.VertexName(v)) - if rerr = sn.Subgraph().walk(walker); rerr != nil { + g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path)) + + if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil { return } } @@ -236,20 +170,3 @@ func (g *Graph) walk(walker GraphWalker) error { return g.AcyclicGraph.Walk(walkFn) } - -// GraphNodeDependable is an interface which says that a node can be -// depended on (an edge can be placed between this node and another) according -// to the well-known name returned by DependableName. -// -// DependableName can return multiple names it is known by. -type GraphNodeDependable interface { - DependableName() []string -} - -// GraphNodeDependent is an interface which says that a node depends -// on another GraphNodeDependable by some name. By implementing this -// interface, Graph.ConnectDependents() can be called multiple times -// safely and efficiently. -type GraphNodeDependent interface { - DependentOn() []string -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go index abc9aca3..6374bb90 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go @@ -1,9 +1,9 @@ package terraform import ( + "fmt" "log" - - "github.com/hashicorp/terraform/config/module" + "strings" ) // GraphBuilder is an interface that can be implemented and used with @@ -21,18 +21,48 @@ type GraphBuilder interface { type BasicGraphBuilder struct { Steps []GraphTransformer Validate bool + // Optional name to add to the graph debug log + Name string } func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) { g := &Graph{Path: path} + + debugName := "graph.json" + if b.Name != "" { + debugName = b.Name + "-" + debugName + } + debugBuf := dbug.NewFileWriter(debugName) + g.SetDebugWriter(debugBuf) + defer debugBuf.Close() + for _, step := range b.Steps { - if err := step.Transform(g); err != nil { - return g, err + if step == nil { + continue + } + + stepName := fmt.Sprintf("%T", step) + dot := strings.LastIndex(stepName, ".") + if dot >= 0 { + stepName = stepName[dot+1:] } + debugOp := g.DebugOperation(stepName, "") + err := step.Transform(g) + + errMsg := "" + if err != nil { + errMsg = err.Error() + } + debugOp.End(errMsg) + log.Printf( "[TRACE] Graph after step %T:\n\n%s", step, g.StringWithNodeTypes()) + + if err != nil { + return g, err + } } // Validate the graph structure @@ -45,156 +75,3 @@ func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) { return g, nil } - -// BuiltinGraphBuilder is responsible for building the complete graph that -// Terraform uses for execution. It is an opinionated builder that defines -// the step order required to build a complete graph as is used and expected -// by Terraform. -// -// If you require a custom graph, you'll have to build it up manually -// on your own by building a new GraphBuilder implementation. -type BuiltinGraphBuilder struct { - // Root is the root module of the graph to build. - Root *module.Tree - - // Diff is the diff. The proper module diffs will be looked up. - Diff *Diff - - // State is the global state. The proper module states will be looked - // up by graph path. - State *State - - // Providers is the list of providers supported. - Providers []string - - // Provisioners is the list of provisioners supported. - Provisioners []string - - // Targets is the user-specified list of resources to target. - Targets []string - - // Destroy is set to true when we're in a `terraform destroy` or a - // `terraform plan -destroy` - Destroy bool - - // Determines whether the GraphBuilder should perform graph validation before - // returning the Graph. Generally you want this to be done, except when you'd - // like to inspect a problematic graph. - Validate bool - - // Verbose is set to true when the graph should be built "worst case", - // skipping any prune steps. This is used for early cycle detection during - // Validate and for manual inspection via `terraform graph -verbose`. - Verbose bool -} - -// Build builds the graph according to the steps returned by Steps. -func (b *BuiltinGraphBuilder) Build(path []string) (*Graph, error) { - basic := &BasicGraphBuilder{ - Steps: b.Steps(path), - Validate: b.Validate, - } - - return basic.Build(path) -} - -// Steps returns the ordered list of GraphTransformers that must be executed -// to build a complete graph. -func (b *BuiltinGraphBuilder) Steps(path []string) []GraphTransformer { - steps := []GraphTransformer{ - // Create all our resources from the configuration and state - &ConfigTransformer{Module: b.Root}, - &OrphanTransformer{ - State: b.State, - Module: b.Root, - }, - - // Output-related transformations - &AddOutputOrphanTransformer{State: b.State}, - - // Provider-related transformations - &MissingProviderTransformer{Providers: b.Providers}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - - // Provisioner-related transformations - &MissingProvisionerTransformer{Provisioners: b.Provisioners}, - &ProvisionerTransformer{}, - - // Run our vertex-level transforms - &VertexTransformer{ - Transforms: []GraphVertexTransformer{ - // Expand any statically expanded nodes, such as module graphs - &ExpandTransform{ - Builder: b, - }, - }, - }, - - // Flatten stuff - &FlattenTransformer{}, - - // Make sure all the connections that are proxies are connected through - &ProxyTransformer{}, - } - - // If we're on the root path, then we do a bunch of other stuff. - // We don't do the following for modules. - if len(path) <= 1 { - steps = append(steps, - // Optionally reduces the graph to a user-specified list of targets and - // their dependencies. - &TargetsTransformer{Targets: b.Targets, Destroy: b.Destroy}, - - // Prune the providers. This must happen only once because flattened - // modules might depend on empty providers. - &PruneProviderTransformer{}, - - // Create the destruction nodes - &DestroyTransformer{FullDestroy: b.Destroy}, - b.conditional(&conditionalOpts{ - If: func() bool { return !b.Destroy }, - Then: &CreateBeforeDestroyTransformer{}, - }), - b.conditional(&conditionalOpts{ - If: func() bool { return !b.Verbose }, - Then: &PruneDestroyTransformer{Diff: b.Diff, State: b.State}, - }), - - // Remove the noop nodes - &PruneNoopTransformer{Diff: b.Diff, State: b.State}, - - // Insert nodes to close opened plugin connections - &CloseProviderTransformer{}, - &CloseProvisionerTransformer{}, - - // Perform the transitive reduction to make our graph a bit - // more sane if possible (it usually is possible). - &TransitiveReductionTransformer{}, - ) - } - - // Make sure we have a single root - steps = append(steps, &RootTransformer{}) - - // Remove nils - for i, s := range steps { - if s == nil { - steps = append(steps[:i], steps[i+1:]...) - } - } - - return steps -} - -type conditionalOpts struct { - If func() bool - Then GraphTransformer -} - -func (b *BuiltinGraphBuilder) conditional(o *conditionalOpts) GraphTransformer { - if o.If != nil && o.Then != nil && o.If() { - return o.Then - } - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go new file mode 100644 index 00000000..0c2b2332 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go @@ -0,0 +1,158 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// ApplyGraphBuilder implements GraphBuilder and is responsible for building +// a graph for applying a Terraform diff. +// +// Because the graph is built from the diff (vs. the config or state), +// this helps ensure that the apply-time graph doesn't modify any resources +// that aren't explicitly in the diff. There are other scenarios where the +// diff can be deviated, so this is just one layer of protection. +type ApplyGraphBuilder struct { + // Module is the root module for the graph to build. + Module *module.Tree + + // Diff is the diff to apply. + Diff *Diff + + // State is the current state + State *State + + // Providers is the list of providers supported. + Providers []string + + // Provisioners is the list of provisioners supported. + Provisioners []string + + // Targets are resources to target. This is only required to make sure + // unnecessary outputs aren't included in the apply graph. The plan + // builder successfully handles targeting resources. In the future, + // outputs should go into the diff so that this is unnecessary. + Targets []string + + // DisableReduce, if true, will not reduce the graph. Great for testing. + DisableReduce bool + + // Destroy, if true, represents a pure destroy operation + Destroy bool + + // Validate will do structural validation of the graph. + Validate bool +} + +// See GraphBuilder +func (b *ApplyGraphBuilder) Build(path []string) (*Graph, error) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "ApplyGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *ApplyGraphBuilder) Steps() []GraphTransformer { + // Custom factory for creating providers. + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeApplyableResource{ + NodeAbstractResource: a, + } + } + + steps := []GraphTransformer{ + // Creates all the nodes represented in the diff. + &DiffTransformer{ + Concrete: concreteResource, + + Diff: b.Diff, + Module: b.Module, + State: b.State, + }, + + // Create orphan output nodes + &OrphanOutputTransformer{Module: b.Module, State: b.State}, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Module: b.Module}, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // add providers + TransformProviders(b.Providers, concreteProvider, b.Module), + + // Destruction ordering + &DestroyEdgeTransformer{Module: b.Module, State: b.State}, + GraphTransformIf( + func() bool { return !b.Destroy }, + &CBDEdgeTransformer{Module: b.Module, State: b.State}, + ), + + // Provisioner-related transformations + &MissingProvisionerTransformer{Provisioners: b.Provisioners}, + &ProvisionerTransformer{}, + + // Add root variables + &RootVariableTransformer{Module: b.Module}, + + // Add the local values + &LocalTransformer{Module: b.Module}, + + // Add the outputs + &OutputTransformer{Module: b.Module}, + + // Add module variables + &ModuleVariableTransformer{Module: b.Module}, + + // Remove modules no longer present in the config + &RemovedModuleTransformer{Module: b.Module, State: b.State}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Handle destroy time transformations for output and local values. + // Reverse the edges from outputs and locals, so that + // interpolations don't fail during destroy. + // Create a destroy node for outputs to remove them from the state. + // Prune unreferenced values, which may have interpolations that can't + // be resolved. + GraphTransformIf( + func() bool { return b.Destroy }, + GraphTransformMulti( + &DestroyValueReferenceTransformer{}, + &DestroyOutputTransformer{}, + &PruneUnusedValuesTransformer{}, + ), + ), + + // Add the node to fix the state count boundaries + &CountBoundaryTransformer{}, + + // Target + &TargetsTransformer{Targets: b.Targets}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + &CloseProvisionerTransformer{}, + + // Single root + &RootTransformer{}, + } + + if !b.DisableReduce { + // Perform the transitive reduction to make our graph a bit + // more sane if possible (it usually is possible). + steps = append(steps, &TransitiveReductionTransformer{}) + } + + return steps +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go new file mode 100644 index 00000000..014b348e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go @@ -0,0 +1,67 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for +// planning a pure-destroy. +// +// Planning a pure destroy operation is simple because we can ignore most +// ordering configuration and simply reverse the state. +type DestroyPlanGraphBuilder struct { + // Module is the root module for the graph to build. + Module *module.Tree + + // State is the current state + State *State + + // Targets are resources to target + Targets []string + + // Validate will do structural validation of the graph. + Validate bool +} + +// See GraphBuilder +func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "DestroyPlanGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodePlanDestroyableResource{ + NodeAbstractResource: a, + } + } + + steps := []GraphTransformer{ + // Creates all the nodes represented in the state. + &StateTransformer{ + Concrete: concreteResource, + State: b.State, + }, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Module: b.Module}, + + // Destruction ordering. We require this only so that + // targeting below will prune the correct things. + &DestroyEdgeTransformer{Module: b.Module, State: b.State}, + + // Target. Note we don't set "Destroy: true" here since we already + // created proper destroy ordering. + &TargetsTransformer{Targets: b.Targets}, + + // Single root + &RootTransformer{}, + } + + return steps +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go index 06763710..07a1eaf8 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go @@ -2,6 +2,7 @@ package terraform import ( "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" ) // ImportGraphBuilder implements GraphBuilder and is responsible for building @@ -23,6 +24,7 @@ func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) { return (&BasicGraphBuilder{ Steps: b.Steps(), Validate: true, + Name: "ImportGraphBuilder", }).Build(path) } @@ -36,6 +38,13 @@ func (b *ImportGraphBuilder) Steps() []GraphTransformer { mod = module.NewEmptyTree() } + // Custom factory for creating providers. + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + steps := []GraphTransformer{ // Create all our resources from the configuration and state &ConfigTransformer{Module: mod}, @@ -43,18 +52,17 @@ func (b *ImportGraphBuilder) Steps() []GraphTransformer { // Add the import steps &ImportStateTransformer{Targets: b.ImportTargets}, - // Provider-related transformations - &MissingProviderTransformer{Providers: b.Providers}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &PruneProviderTransformer{}, + TransformProviders(b.Providers, concreteProvider, mod), - // Single root - &RootTransformer{}, + // This validates that the providers only depend on variables + &ImportProviderValidateTransformer{}, - // Insert nodes to close opened plugin connections + // Close opened plugin connections &CloseProviderTransformer{}, + // Single root + &RootTransformer{}, + // Optimize &TransitiveReductionTransformer{}, } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go new file mode 100644 index 00000000..0df48cdb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go @@ -0,0 +1,27 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// InputGraphBuilder creates the graph for the input operation. +// +// Unlike other graph builders, this is a function since it currently modifies +// and is based on the PlanGraphBuilder. The PlanGraphBuilder passed in will be +// modified and should not be used for any other operations. +func InputGraphBuilder(p *PlanGraphBuilder) GraphBuilder { + // We're going to customize the concrete functions + p.CustomConcrete = true + + // Set the provider to the normal provider. This will ask for input. + p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + // We purposely don't set any more concrete fields since the remainder + // should be no-ops. + + return p +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go new file mode 100644 index 00000000..f8dd0fc9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go @@ -0,0 +1,181 @@ +package terraform + +import ( + "sync" + + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// PlanGraphBuilder implements GraphBuilder and is responsible for building +// a graph for planning (creating a Terraform Diff). +// +// The primary difference between this graph and others: +// +// * Based on the config since it represents the target state +// +// * Ignores lifecycle options since no lifecycle events occur here. This +// simplifies the graph significantly since complex transforms such as +// create-before-destroy can be completely ignored. +// +type PlanGraphBuilder struct { + // Module is the root module for the graph to build. + Module *module.Tree + + // State is the current state + State *State + + // Providers is the list of providers supported. + Providers []string + + // Provisioners is the list of provisioners supported. + Provisioners []string + + // Targets are resources to target + Targets []string + + // DisableReduce, if true, will not reduce the graph. Great for testing. + DisableReduce bool + + // Validate will do structural validation of the graph. + Validate bool + + // CustomConcrete can be set to customize the node types created + // for various parts of the plan. This is useful in order to customize + // the plan behavior. + CustomConcrete bool + ConcreteProvider ConcreteProviderNodeFunc + ConcreteResource ConcreteResourceNodeFunc + ConcreteResourceOrphan ConcreteResourceNodeFunc + + once sync.Once +} + +// See GraphBuilder +func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "PlanGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *PlanGraphBuilder) Steps() []GraphTransformer { + b.once.Do(b.init) + + steps := []GraphTransformer{ + // Creates all the resources represented in the config + &ConfigTransformer{ + Concrete: b.ConcreteResource, + Module: b.Module, + }, + + // Add the local values + &LocalTransformer{Module: b.Module}, + + // Add the outputs + &OutputTransformer{Module: b.Module}, + + // Add orphan resources + &OrphanResourceTransformer{ + Concrete: b.ConcreteResourceOrphan, + State: b.State, + Module: b.Module, + }, + + // Create orphan output nodes + &OrphanOutputTransformer{ + Module: b.Module, + State: b.State, + }, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Module: b.Module}, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Add root variables + &RootVariableTransformer{Module: b.Module}, + + TransformProviders(b.Providers, b.ConcreteProvider, b.Module), + + // Provisioner-related transformations. Only add these if requested. + GraphTransformIf( + func() bool { return b.Provisioners != nil }, + GraphTransformMulti( + &MissingProvisionerTransformer{Provisioners: b.Provisioners}, + &ProvisionerTransformer{}, + ), + ), + + // Add module variables + &ModuleVariableTransformer{ + Module: b.Module, + }, + + // Remove modules no longer present in the config + &RemovedModuleTransformer{Module: b.Module, State: b.State}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Add the node to fix the state count boundaries + &CountBoundaryTransformer{}, + + // Target + &TargetsTransformer{ + Targets: b.Targets, + + // Resource nodes from config have not yet been expanded for + // "count", so we must apply targeting without indices. Exact + // targeting will be dealt with later when these resources + // DynamicExpand. + IgnoreIndices: true, + }, + + // Close opened plugin connections + &CloseProviderTransformer{}, + &CloseProvisionerTransformer{}, + + // Single root + &RootTransformer{}, + } + + if !b.DisableReduce { + // Perform the transitive reduction to make our graph a bit + // more sane if possible (it usually is possible). + steps = append(steps, &TransitiveReductionTransformer{}) + } + + return steps +} + +func (b *PlanGraphBuilder) init() { + // Do nothing if the user requests customizing the fields + if b.CustomConcrete { + return + } + + b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { + return &NodePlannableResource{ + NodeAbstractCountResource: &NodeAbstractCountResource{ + NodeAbstractResource: a, + }, + } + } + + b.ConcreteResourceOrphan = func(a *NodeAbstractResource) dag.Vertex { + return &NodePlannableResourceOrphan{ + NodeAbstractResource: a, + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go new file mode 100644 index 00000000..9638d4c8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go @@ -0,0 +1,169 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// RefreshGraphBuilder implements GraphBuilder and is responsible for building +// a graph for refreshing (updating the Terraform state). +// +// The primary difference between this graph and others: +// +// * Based on the state since it represents the only resources that +// need to be refreshed. +// +// * Ignores lifecycle options since no lifecycle events occur here. This +// simplifies the graph significantly since complex transforms such as +// create-before-destroy can be completely ignored. +// +type RefreshGraphBuilder struct { + // Module is the root module for the graph to build. + Module *module.Tree + + // State is the current state + State *State + + // Providers is the list of providers supported. + Providers []string + + // Targets are resources to target + Targets []string + + // DisableReduce, if true, will not reduce the graph. Great for testing. + DisableReduce bool + + // Validate will do structural validation of the graph. + Validate bool +} + +// See GraphBuilder +func (b *RefreshGraphBuilder) Build(path []string) (*Graph, error) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: b.Validate, + Name: "RefreshGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *RefreshGraphBuilder) Steps() []GraphTransformer { + // Custom factory for creating providers. + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeRefreshableManagedResource{ + NodeAbstractCountResource: &NodeAbstractCountResource{ + NodeAbstractResource: a, + }, + } + } + + concreteManagedResourceInstance := func(a *NodeAbstractResource) dag.Vertex { + return &NodeRefreshableManagedResourceInstance{ + NodeAbstractResource: a, + } + } + + concreteDataResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeRefreshableDataResource{ + NodeAbstractCountResource: &NodeAbstractCountResource{ + NodeAbstractResource: a, + }, + } + } + + steps := []GraphTransformer{ + // Creates all the managed resources that aren't in the state, but only if + // we have a state already. No resources in state means there's not + // anything to refresh. + func() GraphTransformer { + if b.State.HasResources() { + return &ConfigTransformer{ + Concrete: concreteManagedResource, + Module: b.Module, + Unique: true, + ModeFilter: true, + Mode: config.ManagedResourceMode, + } + } + log.Println("[TRACE] No managed resources in state during refresh, skipping managed resource transformer") + return nil + }(), + + // Creates all the data resources that aren't in the state. This will also + // add any orphans from scaling in as destroy nodes. + &ConfigTransformer{ + Concrete: concreteDataResource, + Module: b.Module, + Unique: true, + ModeFilter: true, + Mode: config.DataResourceMode, + }, + + // Add any fully-orphaned resources from config (ones that have been + // removed completely, not ones that are just orphaned due to a scaled-in + // count. + &OrphanResourceTransformer{ + Concrete: concreteManagedResourceInstance, + State: b.State, + Module: b.Module, + }, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Module: b.Module}, + + // Add root variables + &RootVariableTransformer{Module: b.Module}, + + TransformProviders(b.Providers, concreteProvider, b.Module), + + // Add the local values + &LocalTransformer{Module: b.Module}, + + // Add the outputs + &OutputTransformer{Module: b.Module}, + + // Add module variables + &ModuleVariableTransformer{Module: b.Module}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Target + &TargetsTransformer{ + Targets: b.Targets, + + // Resource nodes from config have not yet been expanded for + // "count", so we must apply targeting without indices. Exact + // targeting will be dealt with later when these resources + // DynamicExpand. + IgnoreIndices: true, + }, + + // Close opened plugin connections + &CloseProviderTransformer{}, + + // Single root + &RootTransformer{}, + } + + if !b.DisableReduce { + // Perform the transitive reduction to make our graph a bit + // more sane if possible (it usually is possible). + steps = append(steps, &TransitiveReductionTransformer{}) + } + + return steps +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go new file mode 100644 index 00000000..645ec7be --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go @@ -0,0 +1,36 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// ValidateGraphBuilder creates the graph for the validate operation. +// +// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that +// we only have to validate what we'd normally plan anyways. The +// PlanGraphBuilder given will be modified so it shouldn't be used for anything +// else after calling this function. +func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder { + // We're going to customize the concrete functions + p.CustomConcrete = true + + // Set the provider to the normal provider. This will ask for input. + p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, + } + } + + p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { + return &NodeValidatableResource{ + NodeAbstractCountResource: &NodeAbstractCountResource{ + NodeAbstractResource: a, + }, + } + } + + // We purposely don't set any other concrete types since they don't + // require validation. + + return p +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_config_node.go b/vendor/github.com/hashicorp/terraform/terraform/graph_config_node.go deleted file mode 100644 index ea4645d7..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_config_node.go +++ /dev/null @@ -1,41 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -// graphNodeConfig is an interface that all graph nodes for the -// configuration graph need to implement in order to build the variable -// dependencies properly. -type graphNodeConfig interface { - dag.NamedVertex - - // All graph nodes should be dependent on other things, and able to - // be depended on. - GraphNodeDependable - GraphNodeDependent - - // ConfigType returns the type of thing in the configuration that - // this node represents, such as a resource, module, etc. - ConfigType() GraphNodeConfigType -} - -// GraphNodeAddressable is an interface that all graph nodes for the -// configuration graph need to implement in order to be be addressed / targeted -// properly. -type GraphNodeAddressable interface { - graphNodeConfig - - ResourceAddress() *ResourceAddress -} - -// GraphNodeTargetable is an interface for graph nodes to implement when they -// need to be told about incoming targets. This is useful for nodes that need -// to respect targets as they dynamically expand. Note that the list of targets -// provided will contain every target provided, and each implementing graph -// node must filter this list to targets considered relevant. -type GraphNodeTargetable interface { - GraphNodeAddressable - - SetTargets([]ResourceAddress) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_module.go b/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_module.go deleted file mode 100644 index 3e36e1ea..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_module.go +++ /dev/null @@ -1,213 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/dot" -) - -// GraphNodeConfigModule represents a module within the configuration graph. -type GraphNodeConfigModule struct { - Path []string - Module *config.Module - Tree *module.Tree -} - -func (n *GraphNodeConfigModule) ConfigType() GraphNodeConfigType { - return GraphNodeConfigTypeModule -} - -func (n *GraphNodeConfigModule) DependableName() []string { - config := n.Tree.Config() - - result := make([]string, 1, len(config.Outputs)+1) - result[0] = n.Name() - for _, o := range config.Outputs { - result = append(result, fmt.Sprintf("%s.output.%s", n.Name(), o.Name)) - } - - return result -} - -func (n *GraphNodeConfigModule) DependentOn() []string { - vars := n.Module.RawConfig.Variables - result := make([]string, 0, len(vars)) - for _, v := range vars { - if vn := varNameForVar(v); vn != "" { - result = append(result, vn) - } - } - - return result -} - -func (n *GraphNodeConfigModule) Name() string { - return fmt.Sprintf("module.%s", n.Module.Name) -} - -// GraphNodeExpandable -func (n *GraphNodeConfigModule) Expand(b GraphBuilder) (GraphNodeSubgraph, error) { - // Build the graph first - graph, err := b.Build(n.Path) - if err != nil { - return nil, err - } - - { - // Add the destroy marker to the graph - t := &ModuleDestroyTransformer{} - if err := t.Transform(graph); err != nil { - return nil, err - } - } - - // Build the actual subgraph node - return &graphNodeModuleExpanded{ - Original: n, - Graph: graph, - Variables: make(map[string]interface{}), - }, nil -} - -// GraphNodeExpandable -func (n *GraphNodeConfigModule) ProvidedBy() []string { - // Build up the list of providers by simply going over our configuration - // to find the providers that are configured there as well as the - // providers that the resources use. - config := n.Tree.Config() - providers := make(map[string]struct{}) - for _, p := range config.ProviderConfigs { - providers[p.Name] = struct{}{} - } - for _, r := range config.Resources { - providers[resourceProvider(r.Type, r.Provider)] = struct{}{} - } - - // Turn the map into a string. This makes sure that the list is - // de-dupped since we could be going over potentially many resources. - result := make([]string, 0, len(providers)) - for p, _ := range providers { - result = append(result, p) - } - - return result -} - -// graphNodeModuleExpanded represents a module where the graph has -// been expanded. It stores the graph of the module as well as a reference -// to the map of variables. -type graphNodeModuleExpanded struct { - Original *GraphNodeConfigModule - Graph *Graph - - // Variables is a map of the input variables. This reference should - // be shared with ModuleInputTransformer in order to create a connection - // where the variables are set properly. - Variables map[string]interface{} -} - -func (n *graphNodeModuleExpanded) Name() string { - return fmt.Sprintf("%s (expanded)", dag.VertexName(n.Original)) -} - -func (n *graphNodeModuleExpanded) ConfigType() GraphNodeConfigType { - return GraphNodeConfigTypeModule -} - -// GraphNodeDependable -func (n *graphNodeModuleExpanded) DependableName() []string { - return n.Original.DependableName() -} - -// GraphNodeDependent -func (n *graphNodeModuleExpanded) DependentOn() []string { - return n.Original.DependentOn() -} - -// GraphNodeDotter impl. -func (n *graphNodeModuleExpanded) DotNode(name string, opts *GraphDotOpts) *dot.Node { - return dot.NewNode(name, map[string]string{ - "label": dag.VertexName(n.Original), - "shape": "component", - }) -} - -// GraphNodeEvalable impl. -func (n *graphNodeModuleExpanded) EvalTree() EvalNode { - var resourceConfig *ResourceConfig - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalInterpolate{ - Config: n.Original.Module.RawConfig, - Output: &resourceConfig, - }, - - &EvalVariableBlock{ - Config: &resourceConfig, - VariableValues: n.Variables, - }, - }, - } -} - -// GraphNodeFlattenable impl. -func (n *graphNodeModuleExpanded) FlattenGraph() *Graph { - graph := n.Subgraph() - input := n.Original.Module.RawConfig - - // Go over each vertex and do some modifications to the graph for - // flattening. We have to skip some nodes (graphNodeModuleSkippable) - // as well as setup the variable values. - for _, v := range graph.Vertices() { - // If this is a variable, then look it up in the raw configuration. - // If it exists in the raw configuration, set the value of it. - if vn, ok := v.(*GraphNodeConfigVariable); ok && input != nil { - key := vn.VariableName() - if v, ok := input.Raw[key]; ok { - config, err := config.NewRawConfig(map[string]interface{}{ - key: v, - }) - if err != nil { - // This shouldn't happen because it is already in - // a RawConfig above meaning it worked once before. - panic(err) - } - - // Set the variable value so it is interpolated properly. - // Also set the module so we set the value on it properly. - vn.Module = graph.Path[len(graph.Path)-1] - vn.Value = config - } - } - } - - return graph -} - -// GraphNodeSubgraph impl. -func (n *graphNodeModuleExpanded) Subgraph() *Graph { - return n.Graph -} - -func modulePrefixStr(p []string) string { - parts := make([]string, 0, len(p)*2) - for _, p := range p[1:] { - parts = append(parts, "module", p) - } - - return strings.Join(parts, ".") -} - -func modulePrefixList(result []string, prefix string) []string { - if prefix != "" { - for i, v := range result { - result[i] = fmt.Sprintf("%s.%s", prefix, v) - } - } - - return result -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_output.go b/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_output.go deleted file mode 100644 index 0704a0cb..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_output.go +++ /dev/null @@ -1,106 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeConfigOutput represents an output configured within the -// configuration. -type GraphNodeConfigOutput struct { - Output *config.Output -} - -func (n *GraphNodeConfigOutput) Name() string { - return fmt.Sprintf("output.%s", n.Output.Name) -} - -func (n *GraphNodeConfigOutput) ConfigType() GraphNodeConfigType { - return GraphNodeConfigTypeOutput -} - -func (n *GraphNodeConfigOutput) OutputName() string { - return n.Output.Name -} - -func (n *GraphNodeConfigOutput) DependableName() []string { - return []string{n.Name()} -} - -func (n *GraphNodeConfigOutput) DependentOn() []string { - vars := n.Output.RawConfig.Variables - result := make([]string, 0, len(vars)) - for _, v := range vars { - if vn := varNameForVar(v); vn != "" { - result = append(result, vn) - } - } - - return result -} - -// GraphNodeEvalable impl. -func (n *GraphNodeConfigOutput) EvalTree() EvalNode { - return &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, - walkDestroy, walkInput, walkValidate}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalWriteOutput{ - Name: n.Output.Name, - Sensitive: n.Output.Sensitive, - Value: n.Output.RawConfig, - }, - }, - }, - } -} - -// GraphNodeProxy impl. -func (n *GraphNodeConfigOutput) Proxy() bool { - return true -} - -// GraphNodeDestroyEdgeInclude impl. -func (n *GraphNodeConfigOutput) DestroyEdgeInclude(dag.Vertex) bool { - return false -} - -// GraphNodeFlattenable impl. -func (n *GraphNodeConfigOutput) Flatten(p []string) (dag.Vertex, error) { - return &GraphNodeConfigOutputFlat{ - GraphNodeConfigOutput: n, - PathValue: p, - }, nil -} - -// Same as GraphNodeConfigOutput, but for flattening -type GraphNodeConfigOutputFlat struct { - *GraphNodeConfigOutput - - PathValue []string -} - -func (n *GraphNodeConfigOutputFlat) Name() string { - return fmt.Sprintf( - "%s.%s", modulePrefixStr(n.PathValue), n.GraphNodeConfigOutput.Name()) -} - -func (n *GraphNodeConfigOutputFlat) Path() []string { - return n.PathValue -} - -func (n *GraphNodeConfigOutputFlat) DependableName() []string { - return modulePrefixList( - n.GraphNodeConfigOutput.DependableName(), - modulePrefixStr(n.PathValue)) -} - -func (n *GraphNodeConfigOutputFlat) DependentOn() []string { - prefix := modulePrefixStr(n.PathValue) - return modulePrefixList( - n.GraphNodeConfigOutput.DependentOn(), - prefix) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_provider.go deleted file mode 100644 index c0e15eff..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_provider.go +++ /dev/null @@ -1,131 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/dot" -) - -// GraphNodeConfigProvider represents a configured provider within the -// configuration graph. These are only immediately in the graph when an -// explicit `provider` configuration block is in the configuration. -type GraphNodeConfigProvider struct { - Provider *config.ProviderConfig -} - -func (n *GraphNodeConfigProvider) Name() string { - return fmt.Sprintf("provider.%s", n.ProviderName()) -} - -func (n *GraphNodeConfigProvider) ConfigType() GraphNodeConfigType { - return GraphNodeConfigTypeProvider -} - -func (n *GraphNodeConfigProvider) DependableName() []string { - return []string{n.Name()} -} - -func (n *GraphNodeConfigProvider) DependentOn() []string { - vars := n.Provider.RawConfig.Variables - result := make([]string, 0, len(vars)) - for _, v := range vars { - if vn := varNameForVar(v); vn != "" { - result = append(result, vn) - } - } - - return result -} - -// GraphNodeEvalable impl. -func (n *GraphNodeConfigProvider) EvalTree() EvalNode { - return ProviderEvalTree(n.ProviderName(), n.Provider.RawConfig) -} - -// GraphNodeProvider implementation -func (n *GraphNodeConfigProvider) ProviderName() string { - if n.Provider.Alias == "" { - return n.Provider.Name - } else { - return fmt.Sprintf("%s.%s", n.Provider.Name, n.Provider.Alias) - } -} - -// GraphNodeProvider implementation -func (n *GraphNodeConfigProvider) ProviderConfig() *config.RawConfig { - return n.Provider.RawConfig -} - -// GraphNodeDotter impl. -func (n *GraphNodeConfigProvider) DotNode(name string, opts *GraphDotOpts) *dot.Node { - return dot.NewNode(name, map[string]string{ - "label": n.Name(), - "shape": "diamond", - }) -} - -// GraphNodeDotterOrigin impl. -func (n *GraphNodeConfigProvider) DotOrigin() bool { - return true -} - -// GraphNodeFlattenable impl. -func (n *GraphNodeConfigProvider) Flatten(p []string) (dag.Vertex, error) { - return &GraphNodeConfigProviderFlat{ - GraphNodeConfigProvider: n, - PathValue: p, - }, nil -} - -// Same as GraphNodeConfigProvider, but for flattening -type GraphNodeConfigProviderFlat struct { - *GraphNodeConfigProvider - - PathValue []string -} - -func (n *GraphNodeConfigProviderFlat) Name() string { - return fmt.Sprintf( - "%s.%s", modulePrefixStr(n.PathValue), n.GraphNodeConfigProvider.Name()) -} - -func (n *GraphNodeConfigProviderFlat) Path() []string { - return n.PathValue -} - -func (n *GraphNodeConfigProviderFlat) DependableName() []string { - return modulePrefixList( - n.GraphNodeConfigProvider.DependableName(), - modulePrefixStr(n.PathValue)) -} - -func (n *GraphNodeConfigProviderFlat) DependentOn() []string { - prefixed := modulePrefixList( - n.GraphNodeConfigProvider.DependentOn(), - modulePrefixStr(n.PathValue)) - - result := make([]string, len(prefixed), len(prefixed)+1) - copy(result, prefixed) - - // If we're in a module, then depend on our parent's provider - if len(n.PathValue) > 1 { - prefix := modulePrefixStr(n.PathValue[:len(n.PathValue)-1]) - if prefix != "" { - prefix += "." - } - - result = append(result, fmt.Sprintf( - "%s%s", - prefix, n.GraphNodeConfigProvider.Name())) - } - - return result -} - -func (n *GraphNodeConfigProviderFlat) ProviderName() string { - return fmt.Sprintf( - "%s.%s", modulePrefixStr(n.PathValue), - n.GraphNodeConfigProvider.ProviderName()) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_resource.go b/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_resource.go deleted file mode 100644 index 1c45289a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_resource.go +++ /dev/null @@ -1,531 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - "strings" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/dot" -) - -// GraphNodeCountDependent is implemented by resources for giving only -// the dependencies they have from the "count" field. -type GraphNodeCountDependent interface { - CountDependentOn() []string -} - -// GraphNodeConfigResource represents a resource within the config graph. -type GraphNodeConfigResource struct { - Resource *config.Resource - - // If set to true, this resource represents a resource - // that will be destroyed in some way. - Destroy bool - - // Used during DynamicExpand to target indexes - Targets []ResourceAddress - - Path []string -} - -func (n *GraphNodeConfigResource) Copy() *GraphNodeConfigResource { - ncr := &GraphNodeConfigResource{ - Resource: n.Resource.Copy(), - Destroy: n.Destroy, - Targets: make([]ResourceAddress, 0, len(n.Targets)), - Path: make([]string, 0, len(n.Path)), - } - for _, t := range n.Targets { - ncr.Targets = append(ncr.Targets, *t.Copy()) - } - for _, p := range n.Path { - ncr.Path = append(ncr.Path, p) - } - return ncr -} - -func (n *GraphNodeConfigResource) ConfigType() GraphNodeConfigType { - return GraphNodeConfigTypeResource -} - -func (n *GraphNodeConfigResource) DependableName() []string { - return []string{n.Resource.Id()} -} - -// GraphNodeCountDependent impl. -func (n *GraphNodeConfigResource) CountDependentOn() []string { - result := make([]string, 0, len(n.Resource.RawCount.Variables)) - for _, v := range n.Resource.RawCount.Variables { - if vn := varNameForVar(v); vn != "" { - result = append(result, vn) - } - } - - return result -} - -// GraphNodeDependent impl. -func (n *GraphNodeConfigResource) DependentOn() []string { - result := make([]string, len(n.Resource.DependsOn), - (len(n.Resource.RawCount.Variables)+ - len(n.Resource.RawConfig.Variables)+ - len(n.Resource.DependsOn))*2) - copy(result, n.Resource.DependsOn) - - for _, v := range n.Resource.RawCount.Variables { - if vn := varNameForVar(v); vn != "" { - result = append(result, vn) - } - } - for _, v := range n.Resource.RawConfig.Variables { - if vn := varNameForVar(v); vn != "" { - result = append(result, vn) - } - } - for _, p := range n.Resource.Provisioners { - for _, v := range p.ConnInfo.Variables { - if vn := varNameForVar(v); vn != "" && vn != n.Resource.Id() { - result = append(result, vn) - } - } - for _, v := range p.RawConfig.Variables { - if vn := varNameForVar(v); vn != "" && vn != n.Resource.Id() { - result = append(result, vn) - } - } - } - - return result -} - -// VarWalk calls a callback for all the variables that this resource -// depends on. -func (n *GraphNodeConfigResource) VarWalk(fn func(config.InterpolatedVariable)) { - for _, v := range n.Resource.RawCount.Variables { - fn(v) - } - for _, v := range n.Resource.RawConfig.Variables { - fn(v) - } - for _, p := range n.Resource.Provisioners { - for _, v := range p.ConnInfo.Variables { - fn(v) - } - for _, v := range p.RawConfig.Variables { - fn(v) - } - } -} - -func (n *GraphNodeConfigResource) Name() string { - result := n.Resource.Id() - if n.Destroy { - result += " (destroy)" - } - return result -} - -// GraphNodeDotter impl. -func (n *GraphNodeConfigResource) DotNode(name string, opts *GraphDotOpts) *dot.Node { - if n.Destroy && !opts.Verbose { - return nil - } - return dot.NewNode(name, map[string]string{ - "label": n.Name(), - "shape": "box", - }) -} - -// GraphNodeFlattenable impl. -func (n *GraphNodeConfigResource) Flatten(p []string) (dag.Vertex, error) { - return &GraphNodeConfigResourceFlat{ - GraphNodeConfigResource: n, - PathValue: p, - }, nil -} - -// GraphNodeDynamicExpandable impl. -func (n *GraphNodeConfigResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - - // Start creating the steps - steps := make([]GraphTransformer, 0, 5) - - // Expand counts. - steps = append(steps, &ResourceCountTransformer{ - Resource: n.Resource, - Destroy: n.Destroy, - Targets: n.Targets, - }) - - // Additional destroy modifications. - if n.Destroy { - // If we're destroying a primary or tainted resource, we want to - // expand orphans, which have all the same semantics in a destroy - // as a primary or tainted resource. - steps = append(steps, &OrphanTransformer{ - State: state, - View: n.Resource.Id(), - }) - - steps = append(steps, &DeposedTransformer{ - State: state, - View: n.Resource.Id(), - }) - } - - // We always want to apply targeting - steps = append(steps, &TargetsTransformer{ - ParsedTargets: n.Targets, - Destroy: n.Destroy, - }) - - // Always end with the root being added - steps = append(steps, &RootTransformer{}) - - // Build the graph - b := &BasicGraphBuilder{Steps: steps} - return b.Build(ctx.Path()) -} - -// GraphNodeAddressable impl. -func (n *GraphNodeConfigResource) ResourceAddress() *ResourceAddress { - return &ResourceAddress{ - Path: n.Path[1:], - Index: -1, - InstanceType: TypePrimary, - Name: n.Resource.Name, - Type: n.Resource.Type, - Mode: n.Resource.Mode, - } -} - -// GraphNodeTargetable impl. -func (n *GraphNodeConfigResource) SetTargets(targets []ResourceAddress) { - n.Targets = targets -} - -// GraphNodeEvalable impl. -func (n *GraphNodeConfigResource) EvalTree() EvalNode { - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalInterpolate{Config: n.Resource.RawCount}, - &EvalOpFilter{ - Ops: []walkOperation{walkValidate}, - Node: &EvalValidateCount{Resource: n.Resource}, - }, - &EvalCountFixZeroOneBoundary{Resource: n.Resource}, - }, - } -} - -// GraphNodeProviderConsumer -func (n *GraphNodeConfigResource) ProvidedBy() []string { - return []string{resourceProvider(n.Resource.Type, n.Resource.Provider)} -} - -// GraphNodeProvisionerConsumer -func (n *GraphNodeConfigResource) ProvisionedBy() []string { - result := make([]string, len(n.Resource.Provisioners)) - for i, p := range n.Resource.Provisioners { - result[i] = p.Type - } - - return result -} - -// GraphNodeDestroyable -func (n *GraphNodeConfigResource) DestroyNode() GraphNodeDestroy { - // If we're already a destroy node, then don't do anything - if n.Destroy { - return nil - } - - result := &graphNodeResourceDestroy{ - GraphNodeConfigResource: *n.Copy(), - Original: n, - } - result.Destroy = true - - return result -} - -// GraphNodeNoopPrunable -func (n *GraphNodeConfigResource) Noop(opts *NoopOpts) bool { - log.Printf("[DEBUG] Checking resource noop: %s", n.Name()) - // We don't have any noop optimizations for destroy nodes yet - if n.Destroy { - log.Printf("[DEBUG] Destroy node, not a noop") - return false - } - - // If there is no diff, then we aren't a noop since something needs to - // be done (such as a plan). We only check if we're a noop in a diff. - if opts.Diff == nil || opts.Diff.Empty() { - log.Printf("[DEBUG] No diff, not a noop") - return false - } - - // If the count has any interpolations, we can't prune this node since - // we need to be sure to evaluate the count so that splat variables work - // later (which need to know the full count). - if len(n.Resource.RawCount.Interpolations) > 0 { - log.Printf("[DEBUG] Count has interpolations, not a noop") - return false - } - - // If we have no module diff, we're certainly a noop. This is because - // it means there is a diff, and that the module we're in just isn't - // in it, meaning we're not doing anything. - if opts.ModDiff == nil || opts.ModDiff.Empty() { - log.Printf("[DEBUG] No mod diff, treating resource as a noop") - return true - } - - // Grab the ID which is the prefix (in the case count > 0 at some point) - prefix := n.Resource.Id() - - // Go through the diff and if there are any with our name on it, keep us - found := false - for k, _ := range opts.ModDiff.Resources { - if strings.HasPrefix(k, prefix) { - log.Printf("[DEBUG] Diff has %s, resource is not a noop", k) - found = true - break - } - } - - log.Printf("[DEBUG] Final noop value: %t", !found) - return !found -} - -// Same as GraphNodeConfigResource, but for flattening -type GraphNodeConfigResourceFlat struct { - *GraphNodeConfigResource - - PathValue []string -} - -func (n *GraphNodeConfigResourceFlat) Name() string { - return fmt.Sprintf( - "%s.%s", modulePrefixStr(n.PathValue), n.GraphNodeConfigResource.Name()) -} - -func (n *GraphNodeConfigResourceFlat) Path() []string { - return n.PathValue -} - -func (n *GraphNodeConfigResourceFlat) DependableName() []string { - return modulePrefixList( - n.GraphNodeConfigResource.DependableName(), - modulePrefixStr(n.PathValue)) -} - -func (n *GraphNodeConfigResourceFlat) DependentOn() []string { - prefix := modulePrefixStr(n.PathValue) - return modulePrefixList( - n.GraphNodeConfigResource.DependentOn(), - prefix) -} - -func (n *GraphNodeConfigResourceFlat) ProvidedBy() []string { - prefix := modulePrefixStr(n.PathValue) - return modulePrefixList( - n.GraphNodeConfigResource.ProvidedBy(), - prefix) -} - -func (n *GraphNodeConfigResourceFlat) ProvisionedBy() []string { - prefix := modulePrefixStr(n.PathValue) - return modulePrefixList( - n.GraphNodeConfigResource.ProvisionedBy(), - prefix) -} - -// GraphNodeDestroyable impl. -func (n *GraphNodeConfigResourceFlat) DestroyNode() GraphNodeDestroy { - // Get our parent destroy node. If we don't have any, just return - raw := n.GraphNodeConfigResource.DestroyNode() - if raw == nil { - return nil - } - - node, ok := raw.(*graphNodeResourceDestroy) - if !ok { - panic(fmt.Sprintf("unknown destroy node: %s %T", dag.VertexName(raw), raw)) - } - - // Otherwise, wrap it so that it gets the proper module treatment. - return &graphNodeResourceDestroyFlat{ - graphNodeResourceDestroy: node, - PathValue: n.PathValue, - FlatCreateNode: n, - } -} - -type graphNodeResourceDestroyFlat struct { - *graphNodeResourceDestroy - - PathValue []string - - // Needs to be able to properly yield back a flattened create node to prevent - FlatCreateNode *GraphNodeConfigResourceFlat -} - -func (n *graphNodeResourceDestroyFlat) Name() string { - return fmt.Sprintf( - "%s.%s", modulePrefixStr(n.PathValue), n.graphNodeResourceDestroy.Name()) -} - -func (n *graphNodeResourceDestroyFlat) Path() []string { - return n.PathValue -} - -func (n *graphNodeResourceDestroyFlat) CreateNode() dag.Vertex { - return n.FlatCreateNode -} - -func (n *graphNodeResourceDestroyFlat) ProvidedBy() []string { - prefix := modulePrefixStr(n.PathValue) - return modulePrefixList( - n.GraphNodeConfigResource.ProvidedBy(), - prefix) -} - -// graphNodeResourceDestroy represents the logical destruction of a -// resource. This node doesn't mean it will be destroyed for sure, but -// instead that if a destroy were to happen, it must happen at this point. -type graphNodeResourceDestroy struct { - GraphNodeConfigResource - Original *GraphNodeConfigResource -} - -func (n *graphNodeResourceDestroy) CreateBeforeDestroy() bool { - // CBD is enabled if the resource enables it - return n.Original.Resource.Lifecycle.CreateBeforeDestroy && n.Destroy -} - -func (n *graphNodeResourceDestroy) CreateNode() dag.Vertex { - return n.Original -} - -func (n *graphNodeResourceDestroy) DestroyInclude(d *ModuleDiff, s *ModuleState) bool { - if n.Destroy { - return n.destroyInclude(d, s) - } - - return true -} - -func (n *graphNodeResourceDestroy) destroyInclude( - d *ModuleDiff, s *ModuleState) bool { - // Get the count, and specifically the raw value of the count - // (with interpolations and all). If the count is NOT a static "1", - // then we keep the destroy node no matter what. - // - // The reasoning for this is complicated and not intuitively obvious, - // but I attempt to explain it below. - // - // The destroy transform works by generating the worst case graph, - // with worst case being the case that every resource already exists - // and needs to be destroy/created (force-new). There is a single important - // edge case where this actually results in a real-life cycle: if a - // create-before-destroy (CBD) resource depends on a non-CBD resource. - // Imagine a EC2 instance "foo" with CBD depending on a security - // group "bar" without CBD, and conceptualize the worst case destroy - // order: - // - // 1.) SG must be destroyed (non-CBD) - // 2.) SG must be created/updated - // 3.) EC2 instance must be created (CBD, requires the SG be made) - // 4.) EC2 instance must be destroyed (requires SG be destroyed) - // - // Except, #1 depends on #4, since the SG can't be destroyed while - // an EC2 instance is using it (AWS API requirements). As you can see, - // this is a real life cycle that can't be automatically reconciled - // except under two conditions: - // - // 1.) SG is also CBD. This doesn't work 100% of the time though - // since the non-CBD resource might not support CBD. To make matters - // worse, the entire transitive closure of dependencies must be - // CBD (if the SG depends on a VPC, you have the same problem). - // 2.) EC2 must not CBD. This can't happen automatically because CBD - // is used as a way to ensure zero (or minimal) downtime Terraform - // applies, and it isn't acceptable for TF to ignore this request, - // since it can result in unexpected downtime. - // - // Therefore, we compromise with this edge case here: if there is - // a static count of "1", we prune the diff to remove cycles during a - // graph optimization path if we don't see the resource in the diff. - // If the count is set to ANYTHING other than a static "1" (variable, - // computed attribute, static number greater than 1), then we keep the - // destroy, since it is required for dynamic graph expansion to find - // orphan count objects. - // - // This isn't ideal logic, but its strictly better without introducing - // new impossibilities. It breaks the cycle in practical cases, and the - // cycle comes back in no cases we've found to be practical, but just - // as the cycle would already exist without this anyways. - count := n.Original.Resource.RawCount - if raw := count.Raw[count.Key]; raw != "1" { - return true - } - - // Okay, we're dealing with a static count. There are a few ways - // to include this resource. - prefix := n.Original.Resource.Id() - - // If we're present in the diff proper, then keep it. We're looking - // only for resources in the diff that match our resource or a count-index - // of our resource that are marked for destroy. - if d != nil { - for k, v := range d.Resources { - match := k == prefix || strings.HasPrefix(k, prefix+".") - if match && v.GetDestroy() { - return true - } - } - } - - // If we're in the state as a primary in any form, then keep it. - // This does a prefix check so it will also catch orphans on count - // decreases to "1". - if s != nil { - for k, v := range s.Resources { - // Ignore exact matches - if k == prefix { - continue - } - - // Ignore anything that doesn't have a "." afterwards so that - // we only get our own resource and any counts on it. - if !strings.HasPrefix(k, prefix+".") { - continue - } - - // Ignore exact matches and the 0'th index. We only care - // about if there is a decrease in count. - if k == prefix+".0" { - continue - } - - if v.Primary != nil { - return true - } - } - - // If we're in the state as _both_ "foo" and "foo.0", then - // keep it, since we treat the latter as an orphan. - _, okOne := s.Resources[prefix] - _, okTwo := s.Resources[prefix+".0"] - if okOne && okTwo { - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_type.go b/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_type.go deleted file mode 100644 index 42dd8dc9..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_type.go +++ /dev/null @@ -1,16 +0,0 @@ -package terraform - -//go:generate stringer -type=GraphNodeConfigType graph_config_node_type.go - -// GraphNodeConfigType is an enum for the type of thing that a graph -// node represents from the configuration. -type GraphNodeConfigType int - -const ( - GraphNodeConfigTypeInvalid GraphNodeConfigType = 0 - GraphNodeConfigTypeResource GraphNodeConfigType = iota - GraphNodeConfigTypeProvider - GraphNodeConfigTypeModule - GraphNodeConfigTypeOutput - GraphNodeConfigTypeVariable -) diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_variable.go b/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_variable.go deleted file mode 100644 index ba62eb05..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_config_node_variable.go +++ /dev/null @@ -1,274 +0,0 @@ -package terraform - -import ( - "fmt" - "log" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeConfigVariable represents a Variable in the config. -type GraphNodeConfigVariable struct { - Variable *config.Variable - - // Value, if non-nil, will be used to set the value of the variable - // during evaluation. If this is nil, evaluation will do nothing. - // - // Module is the name of the module to set the variables on. - Module string - Value *config.RawConfig - - ModuleTree *module.Tree - ModulePath []string -} - -func (n *GraphNodeConfigVariable) Name() string { - return fmt.Sprintf("var.%s", n.Variable.Name) -} - -func (n *GraphNodeConfigVariable) ConfigType() GraphNodeConfigType { - return GraphNodeConfigTypeVariable -} - -func (n *GraphNodeConfigVariable) DependableName() []string { - return []string{n.Name()} -} - -// RemoveIfNotTargeted implements RemovableIfNotTargeted. -// When targeting is active, variables that are not targeted should be removed -// from the graph, because otherwise module variables trying to interpolate -// their references can fail when they're missing the referent resource node. -func (n *GraphNodeConfigVariable) RemoveIfNotTargeted() bool { - return true -} - -func (n *GraphNodeConfigVariable) DependentOn() []string { - // If we don't have any value set, we don't depend on anything - if n.Value == nil { - return nil - } - - // Get what we depend on based on our value - vars := n.Value.Variables - result := make([]string, 0, len(vars)) - for _, v := range vars { - if vn := varNameForVar(v); vn != "" { - result = append(result, vn) - } - } - - return result -} - -func (n *GraphNodeConfigVariable) VariableName() string { - return n.Variable.Name -} - -// GraphNodeDestroyEdgeInclude impl. -func (n *GraphNodeConfigVariable) DestroyEdgeInclude(v dag.Vertex) bool { - // Only include this variable in a destroy edge if the source vertex - // "v" has a count dependency on this variable. - log.Printf("[DEBUG] DestroyEdgeInclude: Checking: %s", dag.VertexName(v)) - cv, ok := v.(GraphNodeCountDependent) - if !ok { - log.Printf("[DEBUG] DestroyEdgeInclude: Not GraphNodeCountDependent: %s", dag.VertexName(v)) - return false - } - - for _, d := range cv.CountDependentOn() { - for _, d2 := range n.DependableName() { - log.Printf("[DEBUG] DestroyEdgeInclude: d = %s : d2 = %s", d, d2) - if d == d2 { - return true - } - } - } - - return false -} - -// GraphNodeNoopPrunable -func (n *GraphNodeConfigVariable) Noop(opts *NoopOpts) bool { - log.Printf("[DEBUG] Checking variable noop: %s", n.Name()) - // If we have no diff, always keep this in the graph. We have to do - // this primarily for validation: we want to validate that variable - // interpolations are valid even if there are no resources that - // depend on them. - if opts.Diff == nil || opts.Diff.Empty() { - log.Printf("[DEBUG] No diff, not a noop") - return false - } - - // We have to find our our module diff since we do funky things with - // the flat node's implementation of Path() below. - modDiff := opts.Diff.ModuleByPath(n.ModulePath) - - // If we're destroying, we have no need of variables unless they are depended - // on by the count of a resource. - if modDiff != nil && modDiff.Destroy { - if n.hasDestroyEdgeInPath(opts, nil) { - log.Printf("[DEBUG] Variable has destroy edge from %s, not a noop", - dag.VertexName(opts.Vertex)) - return false - } - log.Printf("[DEBUG] Variable has no included destroy edges: noop!") - return true - } - - for _, v := range opts.Graph.UpEdges(opts.Vertex).List() { - // This is terrible, but I can't think of a better way to do this. - if dag.VertexName(v) == rootNodeName { - continue - } - - log.Printf("[DEBUG] Found up edge to %s, var is not noop", dag.VertexName(v)) - return false - } - - log.Printf("[DEBUG] No up edges, treating variable as a noop") - return true -} - -// hasDestroyEdgeInPath recursively walks for a destroy edge, ensuring that -// a variable both has no immediate destroy edges or any in its full module -// path, ensuring that links do not get severed in the middle. -func (n *GraphNodeConfigVariable) hasDestroyEdgeInPath(opts *NoopOpts, vertex dag.Vertex) bool { - if vertex == nil { - vertex = opts.Vertex - } - - log.Printf("[DEBUG] hasDestroyEdgeInPath: Looking for destroy edge: %s - %T", dag.VertexName(vertex), vertex) - for _, v := range opts.Graph.UpEdges(vertex).List() { - if len(opts.Graph.UpEdges(v).List()) > 1 { - if n.hasDestroyEdgeInPath(opts, v) == true { - return true - } - } - - // Here we borrow the implementation of DestroyEdgeInclude, whose logic - // and semantics are exactly what we want here. We add a check for the - // the root node, since we have to always depend on its existance. - if cv, ok := vertex.(*GraphNodeConfigVariableFlat); ok { - if dag.VertexName(v) == rootNodeName || cv.DestroyEdgeInclude(v) { - return true - } - } - } - return false -} - -// GraphNodeProxy impl. -func (n *GraphNodeConfigVariable) Proxy() bool { - return true -} - -// GraphNodeEvalable impl. -func (n *GraphNodeConfigVariable) EvalTree() EvalNode { - // If we have no value, do nothing - if n.Value == nil { - return &EvalNoop{} - } - - // Otherwise, interpolate the value of this variable and set it - // within the variables mapping. - var config *ResourceConfig - variables := make(map[string]interface{}) - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalInterpolate{ - Config: n.Value, - Output: &config, - }, - - &EvalVariableBlock{ - Config: &config, - VariableValues: variables, - }, - - &EvalCoerceMapVariable{ - Variables: variables, - ModulePath: n.ModulePath, - ModuleTree: n.ModuleTree, - }, - - &EvalTypeCheckVariable{ - Variables: variables, - ModulePath: n.ModulePath, - ModuleTree: n.ModuleTree, - }, - - &EvalSetVariables{ - Module: &n.Module, - Variables: variables, - }, - }, - } -} - -// GraphNodeFlattenable impl. -func (n *GraphNodeConfigVariable) Flatten(p []string) (dag.Vertex, error) { - return &GraphNodeConfigVariableFlat{ - GraphNodeConfigVariable: n, - PathValue: p, - }, nil -} - -type GraphNodeConfigVariableFlat struct { - *GraphNodeConfigVariable - - PathValue []string -} - -func (n *GraphNodeConfigVariableFlat) Name() string { - return fmt.Sprintf( - "%s.%s", modulePrefixStr(n.PathValue), n.GraphNodeConfigVariable.Name()) -} - -func (n *GraphNodeConfigVariableFlat) DependableName() []string { - return []string{n.Name()} -} - -func (n *GraphNodeConfigVariableFlat) DependentOn() []string { - // We only wrap the dependencies and such if we have a path that is - // longer than 2 elements (root, child, more). This is because when - // flattened, variables can point outside the graph. - prefix := "" - if len(n.PathValue) > 2 { - prefix = modulePrefixStr(n.PathValue[:len(n.PathValue)-1]) - } - - return modulePrefixList( - n.GraphNodeConfigVariable.DependentOn(), - prefix) -} - -func (n *GraphNodeConfigVariableFlat) Path() []string { - if len(n.PathValue) > 2 { - return n.PathValue[:len(n.PathValue)-1] - } - - return nil -} - -func (n *GraphNodeConfigVariableFlat) Noop(opts *NoopOpts) bool { - // First look for provider nodes that depend on this variable downstream - modDiff := opts.Diff.ModuleByPath(n.ModulePath) - if modDiff != nil && modDiff.Destroy { - ds, err := opts.Graph.Descendents(n) - if err != nil { - log.Printf("[ERROR] Error looking up descendents of %s: %s", n.Name(), err) - } else { - for _, d := range ds.List() { - if _, ok := d.(GraphNodeProvider); ok { - log.Printf("[DEBUG] This variable is depended on by a provider, can't be a noop.") - return false - } - } - } - } - - // Then fall back to existing impl - return n.GraphNodeConfigVariable.Noop(opts) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go index b9313fa2..73e3821f 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go @@ -1,185 +1,9 @@ package terraform -import ( - "fmt" - - "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/dot" -) - -// GraphNodeDotter can be implemented by a node to cause it to be included -// in the dot graph. The Dot method will be called which is expected to -// return a representation of this node. -type GraphNodeDotter interface { - // Dot is called to return the dot formatting for the node. - // The first parameter is the title of the node. - // The second parameter includes user-specified options that affect the dot - // graph. See GraphDotOpts below for details. - DotNode(string, *GraphDotOpts) *dot.Node -} - -type GraphNodeDotOrigin interface { - DotOrigin() bool -} - -// GraphDotOpts are the options for generating a dot formatted Graph. -type GraphDotOpts struct { - // Allows some nodes to decide to only show themselves when the user has - // requested the "verbose" graph. - Verbose bool - - // Highlight Cycles - DrawCycles bool - - // How many levels to expand modules as we draw - MaxDepth int -} +import "github.com/hashicorp/terraform/dag" // GraphDot returns the dot formatting of a visual representation of // the given Terraform graph. -func GraphDot(g *Graph, opts *GraphDotOpts) (string, error) { - dg := dot.NewGraph(map[string]string{ - "compound": "true", - "newrank": "true", - }) - dg.Directed = true - - err := graphDotSubgraph(dg, "root", g, opts, 0) - if err != nil { - return "", err - } - - return dg.String(), nil -} - -func graphDotSubgraph( - dg *dot.Graph, modName string, g *Graph, opts *GraphDotOpts, modDepth int) error { - // Respect user-specified module depth - if opts.MaxDepth >= 0 && modDepth > opts.MaxDepth { - return nil - } - - // Begin module subgraph - var sg *dot.Subgraph - if modDepth == 0 { - sg = dg.AddSubgraph(modName) - } else { - sg = dg.AddSubgraph(modName) - sg.Cluster = true - sg.AddAttr("label", modName) - } - - origins, err := graphDotFindOrigins(g) - if err != nil { - return err - } - - drawableVertices := make(map[dag.Vertex]struct{}) - toDraw := make([]dag.Vertex, 0, len(g.Vertices())) - subgraphVertices := make(map[dag.Vertex]*Graph) - - walk := func(v dag.Vertex, depth int) error { - // We only care about nodes that yield non-empty Dot strings. - if dn, ok := v.(GraphNodeDotter); !ok { - return nil - } else if dn.DotNode("fake", opts) == nil { - return nil - } - - drawableVertices[v] = struct{}{} - toDraw = append(toDraw, v) - - if sn, ok := v.(GraphNodeSubgraph); ok { - subgraphVertices[v] = sn.Subgraph() - } - return nil - } - - if err := g.ReverseDepthFirstWalk(origins, walk); err != nil { - return err - } - - for _, v := range toDraw { - dn := v.(GraphNodeDotter) - nodeName := graphDotNodeName(modName, v) - sg.AddNode(dn.DotNode(nodeName, opts)) - - // Draw all the edges from this vertex to other nodes - targets := dag.AsVertexList(g.DownEdges(v)) - for _, t := range targets { - target := t.(dag.Vertex) - // Only want edges where both sides are drawable. - if _, ok := drawableVertices[target]; !ok { - continue - } - - if err := sg.AddEdgeBetween( - graphDotNodeName(modName, v), - graphDotNodeName(modName, target), - map[string]string{}); err != nil { - return err - } - } - } - - // Recurse into any subgraphs - for _, v := range toDraw { - subgraph, ok := subgraphVertices[v] - if !ok { - continue - } - - err := graphDotSubgraph(dg, dag.VertexName(v), subgraph, opts, modDepth+1) - if err != nil { - return err - } - } - - if opts.DrawCycles { - colors := []string{"red", "green", "blue"} - for ci, cycle := range g.Cycles() { - for i, c := range cycle { - // Catch the last wrapping edge of the cycle - if i+1 >= len(cycle) { - i = -1 - } - edgeAttrs := map[string]string{ - "color": colors[ci%len(colors)], - "penwidth": "2.0", - } - - if err := sg.AddEdgeBetween( - graphDotNodeName(modName, c), - graphDotNodeName(modName, cycle[i+1]), - edgeAttrs); err != nil { - return err - } - - } - } - } - - return nil -} - -func graphDotNodeName(modName, v dag.Vertex) string { - return fmt.Sprintf("[%s] %s", modName, dag.VertexName(v)) -} - -func graphDotFindOrigins(g *Graph) ([]dag.Vertex, error) { - var origin []dag.Vertex - - for _, v := range g.Vertices() { - if dr, ok := v.(GraphNodeDotOrigin); ok { - if dr.DotOrigin() { - origin = append(origin, v) - } - } - } - - if len(origin) == 0 { - return nil, fmt.Errorf("No DOT origin nodes found.\nGraph: %s", g.String()) - } - - return origin, nil +func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) { + return string(g.Dot(opts)), nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go index ef3a4f6f..34ce6f64 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go @@ -15,12 +15,42 @@ type GraphWalker interface { ExitEvalTree(dag.Vertex, interface{}, error) error } +// GrpahWalkerPanicwrapper can be optionally implemented to catch panics +// that occur while walking the graph. This is not generally recommended +// since panics should crash Terraform and result in a bug report. However, +// this is particularly useful for situations like the shadow graph where +// you don't ever want to cause a panic. +type GraphWalkerPanicwrapper interface { + GraphWalker + + // Panic is called when a panic occurs. This will halt the panic from + // propogating so if the walker wants it to crash still it should panic + // again. This is called from within a defer so runtime/debug.Stack can + // be used to get the stack trace of the panic. + Panic(dag.Vertex, interface{}) +} + +// GraphWalkerPanicwrap wraps an existing Graphwalker to wrap and swallow +// the panics. This doesn't lose the panics since the panics are still +// returned as errors as part of a graph walk. +func GraphWalkerPanicwrap(w GraphWalker) GraphWalkerPanicwrapper { + return &graphWalkerPanicwrapper{ + GraphWalker: w, + } +} + +type graphWalkerPanicwrapper struct { + GraphWalker +} + +func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {} + // NullGraphWalker is a GraphWalker implementation that does nothing. // This can be embedded within other GraphWalker implementations for easily // implementing all the required functions. type NullGraphWalker struct{} -func (NullGraphWalker) EnterPath([]string) EvalContext { return nil } +func (NullGraphWalker) EnterPath([]string) EvalContext { return new(MockEvalContext) } func (NullGraphWalker) ExitPath([]string) {} func (NullGraphWalker) EnterVertex(dag.Vertex) {} func (NullGraphWalker) ExitVertex(dag.Vertex, error) {} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go index 7424fdbb..89f376e5 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go @@ -1,6 +1,7 @@ package terraform import ( + "context" "fmt" "log" "sync" @@ -15,8 +16,9 @@ type ContextGraphWalker struct { NullGraphWalker // Configurable values - Context *Context - Operation walkOperation + Context *Context + Operation walkOperation + StopContext context.Context // Outputs, do not set these. Do not read these while the graph // is being walked. @@ -30,7 +32,6 @@ type ContextGraphWalker struct { interpolaterVars map[string]map[string]interface{} interpolaterVarLock sync.Mutex providerCache map[string]ResourceProvider - providerConfigCache map[string]*ResourceConfig providerLock sync.Mutex provisionerCache map[string]ResourceProvisioner provisionerLock sync.Mutex @@ -65,15 +66,14 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { w.interpolaterVarLock.Unlock() ctx := &BuiltinEvalContext{ + StopContext: w.StopContext, PathValue: path, Hooks: w.Context.hooks, InputValue: w.Context.uiInput, - Providers: w.Context.providers, + Components: w.Context.components, ProviderCache: w.providerCache, - ProviderConfigCache: w.providerConfigCache, ProviderInputConfig: w.Context.providerInputConfig, ProviderLock: &w.providerLock, - Provisioners: w.Context.provisioners, ProvisionerCache: w.provisionerCache, ProvisionerLock: &w.provisionerLock, DiffValue: w.Context.diff, @@ -82,6 +82,7 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { StateLock: &w.Context.stateLock, Interpolater: &Interpolater{ Operation: w.Operation, + Meta: w.Context.meta, Module: w.Context.module, State: w.Context.state, StateLock: &w.Context.stateLock, @@ -148,7 +149,6 @@ func (w *ContextGraphWalker) ExitEvalTree( func (w *ContextGraphWalker) init() { w.contexts = make(map[string]*BuiltinEvalContext, 5) w.providerCache = make(map[string]ResourceProvider, 5) - w.providerConfigCache = make(map[string]*ResourceConfig, 5) w.provisionerCache = make(map[string]ResourceProvisioner, 5) w.interpolaterVars = make(map[string]map[string]interface{}, 5) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphnodeconfigtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphnodeconfigtype_string.go deleted file mode 100644 index 9ea0acbe..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/graphnodeconfigtype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=GraphNodeConfigType graph_config_node_type.go"; DO NOT EDIT - -package terraform - -import "fmt" - -const _GraphNodeConfigType_name = "GraphNodeConfigTypeInvalidGraphNodeConfigTypeResourceGraphNodeConfigTypeProviderGraphNodeConfigTypeModuleGraphNodeConfigTypeOutputGraphNodeConfigTypeVariable" - -var _GraphNodeConfigType_index = [...]uint8{0, 26, 53, 80, 105, 130, 157} - -func (i GraphNodeConfigType) String() string { - if i < 0 || i >= GraphNodeConfigType(len(_GraphNodeConfigType_index)-1) { - return fmt.Sprintf("GraphNodeConfigType(%d)", i) - } - return _GraphNodeConfigType_name[_GraphNodeConfigType_index[i]:_GraphNodeConfigType_index[i+1]] -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go new file mode 100644 index 00000000..95ef4e94 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate" + +var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125} + +func (i GraphType) String() string { + if i >= GraphType(len(_GraphType_index)-1) { + return "GraphType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go index 81a68842..ab11e8ee 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/hook.go +++ b/vendor/github.com/hashicorp/terraform/terraform/hook.go @@ -42,7 +42,7 @@ type Hook interface { PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) PreProvision(*InstanceInfo, string) (HookAction, error) - PostProvision(*InstanceInfo, string) (HookAction, error) + PostProvision(*InstanceInfo, string, error) (HookAction, error) ProvisionOutput(*InstanceInfo, string, string) // PreRefresh and PostRefresh are called before and after a single @@ -92,7 +92,7 @@ func (*NilHook) PreProvision(*InstanceInfo, string) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PostProvision(*InstanceInfo, string) (HookAction, error) { +func (*NilHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { return HookActionContinue, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go index 3797a1e1..0e464006 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go @@ -20,6 +20,7 @@ type MockHook struct { PostApplyError error PostApplyReturn HookAction PostApplyReturnError error + PostApplyFn func(*InstanceInfo, *InstanceState, error) (HookAction, error) PreDiffCalled bool PreDiffInfo *InstanceInfo @@ -54,6 +55,7 @@ type MockHook struct { PostProvisionCalled bool PostProvisionInfo *InstanceInfo PostProvisionProvisionerId string + PostProvisionErrorArg error PostProvisionReturn HookAction PostProvisionError error @@ -111,6 +113,11 @@ func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAc h.PostApplyInfo = n h.PostApplyState = s h.PostApplyError = e + + if h.PostApplyFn != nil { + return h.PostApplyFn(n, s, e) + } + return h.PostApplyReturn, h.PostApplyReturnError } @@ -164,13 +171,14 @@ func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, err return h.PreProvisionReturn, h.PreProvisionError } -func (h *MockHook) PostProvision(n *InstanceInfo, provId string) (HookAction, error) { +func (h *MockHook) PostProvision(n *InstanceInfo, provId string, err error) (HookAction, error) { h.Lock() defer h.Unlock() h.PostProvisionCalled = true h.PostProvisionInfo = n h.PostProvisionProvisionerId = provId + h.PostProvisionErrorArg = err return h.PostProvisionReturn, h.PostProvisionError } diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go index 4c9bbb7b..104d0098 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go +++ b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go @@ -38,7 +38,7 @@ func (h *stopHook) PreProvision(*InstanceInfo, string) (HookAction, error) { return h.hook() } -func (h *stopHook) PostProvision(*InstanceInfo, string) (HookAction, error) { +func (h *stopHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { return h.hook() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go index f65414b3..b8e7d1fb 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go @@ -1,8 +1,8 @@ -// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT +// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT. package terraform -import "fmt" +import "strconv" const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" @@ -10,7 +10,7 @@ var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} func (i InstanceType) String() string { if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { - return fmt.Sprintf("InstanceType(%d)", i) + return "InstanceType(" + strconv.FormatInt(int64(i), 10) + ")" } return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go index 2117fc3f..4f4e178c 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go @@ -4,8 +4,7 @@ import ( "fmt" "log" "os" - "regexp" - "sort" + "strconv" "strings" "sync" @@ -26,6 +25,7 @@ const ( // for interpolations such as `aws_instance.foo.bar`. type Interpolater struct { Operation walkOperation + Meta *ContextMeta Module *module.Tree State *State StateLock *sync.RWMutex @@ -45,6 +45,10 @@ type InterpolationScope struct { func (i *Interpolater) Values( scope *InterpolationScope, vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) { + if scope == nil { + scope = &InterpolationScope{} + } + result := make(map[string]ast.Variable, len(vars)) // Copy the default variables @@ -84,6 +88,10 @@ func (i *Interpolater) Values( err = i.valueSelfVar(scope, n, v, result) case *config.SimpleVariable: err = i.valueSimpleVar(scope, n, v, result) + case *config.TerraformVariable: + err = i.valueTerraformVar(scope, n, v, result) + case *config.LocalVariable: + err = i.valueLocalVar(scope, n, v, result) case *config.UserVariable: err = i.valueUserVar(scope, n, v, result) default: @@ -120,17 +128,20 @@ func (i *Interpolater) valueCountVar( func unknownVariable() ast.Variable { return ast.Variable{ - Type: ast.TypeString, + Type: ast.TypeUnknown, Value: config.UnknownVariableValue, } } +func unknownValue() string { + return hil.UnknownValue +} + func (i *Interpolater) valueModuleVar( scope *InterpolationScope, n string, v *config.ModuleVariable, result map[string]ast.Variable) error { - // Build the path to the child module we want path := make([]string, len(scope.Path), len(scope.Path)+1) copy(path, scope.Path) @@ -151,6 +162,13 @@ func (i *Interpolater) valueModuleVar( // ensure that the module is in the state, so if we reach this // point otherwise it really is a panic. result[n] = unknownVariable() + + // During apply this is always an error + if i.Operation == walkApply { + return fmt.Errorf( + "Couldn't find module %q for var: %s", + v.Name, v.FullKey()) + } } else { // Get the value from the outputs if outputState, ok := mod.Outputs[v.Field]; ok { @@ -162,6 +180,13 @@ func (i *Interpolater) valueModuleVar( } else { // Same reasons as the comment above. result[n] = unknownVariable() + + // During apply this is always an error + if i.Operation == walkApply { + return fmt.Errorf( + "Couldn't find output %q for module var: %s", + v.Field, v.FullKey()) + } } } @@ -214,10 +239,7 @@ func (i *Interpolater) valueResourceVar( // If we're computing all dynamic fields, then module vars count // and we mark it as computed. if i.Operation == walkValidate { - result[n] = ast.Variable{ - Value: config.UnknownVariableValue, - Type: ast.TypeString, - } + result[n] = unknownVariable() return nil } @@ -241,11 +263,8 @@ func (i *Interpolater) valueResourceVar( // If it truly is missing, we'll catch it on a later walk. // This applies only to graph nodes that interpolate during the // config walk, e.g. providers. - if i.Operation == walkInput { - result[n] = ast.Variable{ - Value: config.UnknownVariableValue, - Type: ast.TypeString, - } + if i.Operation == walkInput || i.Operation == walkRefresh { + result[n] = unknownVariable() return nil } @@ -284,11 +303,88 @@ func (i *Interpolater) valueSimpleVar( n string, v *config.SimpleVariable, result map[string]ast.Variable) error { - // SimpleVars are never handled by Terraform's interpolator - result[n] = ast.Variable{ - Value: config.UnknownVariableValue, - Type: ast.TypeString, + // This error message includes some information for people who + // relied on this for their template_file data sources. We should + // remove this at some point but there isn't any rush. + return fmt.Errorf( + "invalid variable syntax: %q. Did you mean 'var.%s'? If this is part of inline `template` parameter\n"+ + "then you must escape the interpolation with two dollar signs. For\n"+ + "example: ${a} becomes $${a}.", + n, n) +} + +func (i *Interpolater) valueTerraformVar( + scope *InterpolationScope, + n string, + v *config.TerraformVariable, + result map[string]ast.Variable) error { + // "env" is supported for backward compatibility, but it's deprecated and + // so we won't advertise it as being allowed in the error message. It will + // be removed in a future version of Terraform. + if v.Field != "workspace" && v.Field != "env" { + return fmt.Errorf( + "%s: only supported key for 'terraform.X' interpolations is 'workspace'", n) + } + + if i.Meta == nil { + return fmt.Errorf( + "%s: internal error: nil Meta. Please report a bug.", n) + } + + result[n] = ast.Variable{Type: ast.TypeString, Value: i.Meta.Env} + return nil +} + +func (i *Interpolater) valueLocalVar( + scope *InterpolationScope, + n string, + v *config.LocalVariable, + result map[string]ast.Variable, +) error { + i.StateLock.RLock() + defer i.StateLock.RUnlock() + + modTree := i.Module + if len(scope.Path) > 1 { + modTree = i.Module.Child(scope.Path[1:]) + } + + // Get the resource from the configuration so we can verify + // that the resource is in the configuration and so we can access + // the configuration if we need to. + var cl *config.Local + for _, l := range modTree.Config().Locals { + if l.Name == v.Name { + cl = l + break + } } + + if cl == nil { + return fmt.Errorf("%s: no local value of this name has been declared", n) + } + + // Get the relevant module + module := i.State.ModuleByPath(scope.Path) + if module == nil { + result[n] = unknownVariable() + return nil + } + + rawV, exists := module.Locals[v.Name] + if !exists { + result[n] = unknownVariable() + return nil + } + + varV, err := hil.InterfaceToVariable(rawV) + if err != nil { + // Should never happen, since interpolation should always produce + // something we can feed back in to interpolation. + return fmt.Errorf("%s: %s", n, err) + } + + result[n] = varV return nil } @@ -368,7 +464,12 @@ func (i *Interpolater) computeResourceVariable( // If we're requesting "count" its a special variable that we grab // directly from the config itself. if v.Field == "count" { - count, err := cr.Count() + var count int + if cr != nil { + count, err = cr.Count() + } else { + count, err = i.resourceCountMax(module, cr, v) + } if err != nil { return nil, fmt.Errorf( "Error reading %s count: %s", @@ -379,31 +480,52 @@ func (i *Interpolater) computeResourceVariable( return &ast.Variable{Type: ast.TypeInt, Value: count}, nil } - // If we have no module in the state yet or count, return empty - if module == nil || len(module.Resources) == 0 { - return nil, nil - } - // Get the resource out from the state. We know the state exists // at this point and if there is a state, we expect there to be a // resource with the given name. - r, ok := module.Resources[id] - if !ok && v.Multi && v.Index == 0 { - r, ok = module.Resources[v.ResourceId()] - } - if !ok { - r = nil - } - if r == nil { - goto MISSING + var r *ResourceState + if module != nil && len(module.Resources) > 0 { + var ok bool + r, ok = module.Resources[id] + if !ok && v.Multi && v.Index == 0 { + r, ok = module.Resources[v.ResourceId()] + } + if !ok { + r = nil + } } + if r == nil || r.Primary == nil { + if i.Operation == walkApply || i.Operation == walkPlan { + return nil, fmt.Errorf( + "Resource '%s' not found for variable '%s'", + v.ResourceId(), + v.FullKey()) + } + + // If we have no module in the state yet or count, return empty. + // NOTE(@mitchellh): I actually don't know why this is here. During + // a refactor I kept this here to maintain the same behavior, but + // I'm not sure why its here. + if module == nil || len(module.Resources) == 0 { + return nil, nil + } - if r.Primary == nil { goto MISSING } if attr, ok := r.Primary.Attributes[v.Field]; ok { - return &ast.Variable{Type: ast.TypeString, Value: attr}, nil + v, err := hil.InterfaceToVariable(attr) + return &v, err + } + + // special case for the "id" field which is usually also an attribute + if v.Field == "id" && r.Primary.ID != "" { + // This is usually pulled from the attributes, but is sometimes missing + // during destroy. We can return the ID field in this case. + // FIXME: there should only be one ID to rule them all. + log.Printf("[WARN] resource %s missing 'id' attribute", v.ResourceId()) + v, err := hil.InterfaceToVariable(r.Primary.ID) + return &v, err } // computed list or map attribute @@ -434,13 +556,15 @@ func (i *Interpolater) computeResourceVariable( // Lists and sets make this key := fmt.Sprintf("%s.#", strings.Join(parts[:i], ".")) if attr, ok := r.Primary.Attributes[key]; ok { - return &ast.Variable{Type: ast.TypeString, Value: attr}, nil + v, err := hil.InterfaceToVariable(attr) + return &v, err } // Maps make this key = fmt.Sprintf("%s", strings.Join(parts[:i], ".")) if attr, ok := r.Primary.Attributes[key]; ok { - return &ast.Variable{Type: ast.TypeString, Value: attr}, nil + v, err := hil.InterfaceToVariable(attr) + return &v, err } } } @@ -463,7 +587,7 @@ MISSING: // // For an input walk, computed values are okay to return because we're only // looking for missing variables to prompt the user for. - if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput { + if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput { return &unknownVariable, nil } @@ -483,6 +607,13 @@ func (i *Interpolater) computeResourceMultiVariable( unknownVariable := unknownVariable() + // If we're only looking for input, we don't need to expand a + // multi-variable. This prevents us from encountering things that should be + // known but aren't because the state has yet to be refreshed. + if i.Operation == walkInput { + return &unknownVariable, nil + } + // Get the information about this resource variable, and verify // that it exists and such. module, cr, err := i.resourceVariableInfo(scope, v) @@ -490,17 +621,14 @@ func (i *Interpolater) computeResourceMultiVariable( return nil, err } - // Get the count so we know how many to iterate over - count, err := cr.Count() + // Get the keys for all the resources that are created for this resource + countMax, err := i.resourceCountMax(module, cr, v) if err != nil { - return nil, fmt.Errorf( - "Error reading %s count: %s", - v.ResourceId(), - err) + return nil, err } // If count is zero, we return an empty list - if count == 0 { + if countMax == 0 { return &ast.Variable{Type: ast.TypeList, Value: []ast.Variable{}}, nil } @@ -510,13 +638,17 @@ func (i *Interpolater) computeResourceMultiVariable( } var values []interface{} - for j := 0; j < count; j++ { - id := fmt.Sprintf("%s.%d", v.ResourceId(), j) - - // If we're dealing with only a single resource, then the - // ID doesn't have a trailing index. - if count == 1 { - id = v.ResourceId() + for idx := 0; idx < countMax; idx++ { + id := fmt.Sprintf("%s.%d", v.ResourceId(), idx) + + // ID doesn't have a trailing index. We try both here, but if a value + // without a trailing index is found we prefer that. This choice + // is for legacy reasons: older versions of TF preferred it. + if id == v.ResourceId()+".0" { + potential := v.ResourceId() + if _, ok := module.Resources[potential]; ok { + id = potential + } } r, ok := module.Resources[id] @@ -529,14 +661,15 @@ func (i *Interpolater) computeResourceMultiVariable( } if singleAttr, ok := r.Primary.Attributes[v.Field]; ok { - if singleAttr == config.UnknownVariableValue { - return &unknownVariable, nil - } - values = append(values, singleAttr) continue } + if v.Field == "id" && r.Primary.ID != "" { + log.Printf("[WARN] resource %s missing 'id' attribute", v.ResourceId()) + values = append(values, r.Primary.ID) + } + // computed list or map attribute _, isList := r.Primary.Attributes[v.Field+".#"] _, isMap := r.Primary.Attributes[v.Field+".%"] @@ -548,10 +681,6 @@ func (i *Interpolater) computeResourceMultiVariable( return nil, err } - if multiAttr == unknownVariable { - return &ast.Variable{Type: ast.TypeString, Value: ""}, nil - } - values = append(values, multiAttr) } @@ -585,7 +714,6 @@ func (i *Interpolater) computeResourceMultiVariable( func (i *Interpolater) interpolateComplexTypeAttribute( resourceID string, attributes map[string]string) (ast.Variable, error) { - // We can now distinguish between lists and maps in state by the count field: // - lists (and by extension, sets) use the traditional .# notation // - maps use the newer .% notation @@ -603,21 +731,8 @@ func (i *Interpolater) interpolateComplexTypeAttribute( return unknownVariable(), nil } - keys := make([]string, 0) - listElementKey := regexp.MustCompile("^" + resourceID + "\\.[0-9]+$") - for id := range attributes { - if listElementKey.MatchString(id) { - keys = append(keys, id) - } - } - sort.Strings(keys) - - var members []string - for _, key := range keys { - members = append(members, attributes[key]) - } - - return hil.InterfaceToVariable(members) + expanded := flatmap.Expand(attributes, resourceID) + return hil.InterfaceToVariable(expanded) } if lengthAttr, isMap := attributes[resourceID+".%"]; isMap { @@ -632,15 +747,7 @@ func (i *Interpolater) interpolateComplexTypeAttribute( return unknownVariable(), nil } - resourceFlatMap := make(map[string]string) - mapElementKey := regexp.MustCompile("^" + resourceID + "\\.([^%]+)$") - for id, val := range attributes { - if mapElementKey.MatchString(id) { - resourceFlatMap[id] = val - } - } - - expanded := flatmap.Expand(resourceFlatMap, resourceID) + expanded := flatmap.Expand(attributes, resourceID) return hil.InterfaceToVariable(expanded) } @@ -667,14 +774,100 @@ func (i *Interpolater) resourceVariableInfo( break } } - if cr == nil { - return nil, nil, fmt.Errorf( - "Resource '%s' not found for variable '%s'", - v.ResourceId(), - v.FullKey()) - } // Get the relevant module module := i.State.ModuleByPath(scope.Path) return module, cr, nil } + +func (i *Interpolater) resourceCountMax( + ms *ModuleState, + cr *config.Resource, + v *config.ResourceVariable) (int, error) { + id := v.ResourceId() + + // If we're NOT applying, then we assume we can read the count + // from the state. Plan and so on may not have any state yet so + // we do a full interpolation. + // Don't forget walkDestroy, which is a special case of walkApply + if !(i.Operation == walkApply || i.Operation == walkDestroy) { + if cr == nil { + return 0, nil + } + + count, err := cr.Count() + if err != nil { + return 0, err + } + + return count, nil + } + + // If we have no module state in the apply walk, that suggests we've hit + // a rather awkward edge-case: the resource this variable refers to + // has count = 0 and is the only resource processed so far on this walk, + // and so we've ended up not creating any resource states yet. We don't + // create a module state until the first resource is written into it, + // so the module state doesn't exist when we get here. + // + // In this case we act as we would if we had been passed a module + // with an empty resource state map. + if ms == nil { + return 0, nil + } + + // We need to determine the list of resource keys to get values from. + // This needs to be sorted so the order is deterministic. We used to + // use "cr.Count()" but that doesn't work if the count is interpolated + // and we can't guarantee that so we instead depend on the state. + max := -1 + for k, s := range ms.Resources { + // This resource may have been just removed, in which case the Primary + // may be nil, or just empty. + if s == nil || s.Primary == nil || len(s.Primary.Attributes) == 0 { + continue + } + + // Get the index number for this resource + index := "" + if k == id { + // If the key is the id, then its just 0 (no explicit index) + index = "0" + } else if strings.HasPrefix(k, id+".") { + // Grab the index number out of the state + index = k[len(id+"."):] + if idx := strings.IndexRune(index, '.'); idx >= 0 { + index = index[:idx] + } + } + + // If there was no index then this resource didn't match + // the one we're looking for, exit. + if index == "" { + continue + } + + // Turn the index into an int + raw, err := strconv.ParseInt(index, 0, 0) + if err != nil { + return 0, fmt.Errorf( + "%s: error parsing index %q as int: %s", + id, index, err) + } + + // Keep track of this index if its the max + if new := int(raw); new > max { + max = new + } + } + + // If we never found any matching resources in the state, we + // have zero. + if max == -1 { + return 0, nil + } + + // The result value is "max+1" because we're returning the + // max COUNT, not the max INDEX, and we zero-index. + return max + 1, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go new file mode 100644 index 00000000..4594cb60 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go @@ -0,0 +1,155 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/moduledeps" + "github.com/hashicorp/terraform/plugin/discovery" +) + +// ModuleTreeDependencies returns the dependencies of the tree of modules +// described by the given configuration tree and state. +// +// Both configuration and state are required because there can be resources +// implied by instances in the state that no longer exist in config. +// +// This function will panic if any invalid version constraint strings are +// present in the configuration. This is guaranteed not to happen for any +// configuration that has passed a call to Config.Validate(). +func ModuleTreeDependencies(root *module.Tree, state *State) *moduledeps.Module { + // First we walk the configuration tree to build the overall structure + // and capture the explicit/implicit/inherited provider dependencies. + deps := moduleTreeConfigDependencies(root, nil) + + // Next we walk over the resources in the state to catch any additional + // dependencies created by existing resources that are no longer in config. + // Most things we find in state will already be present in 'deps', but + // we're interested in the rare thing that isn't. + moduleTreeMergeStateDependencies(deps, state) + + return deps +} + +func moduleTreeConfigDependencies(root *module.Tree, inheritProviders map[string]*config.ProviderConfig) *moduledeps.Module { + if root == nil { + // If no config is provided, we'll make a synthetic root. + // This isn't necessarily correct if we're called with a nil that + // *isn't* at the root, but in practice that can never happen. + return &moduledeps.Module{ + Name: "root", + } + } + + ret := &moduledeps.Module{ + Name: root.Name(), + } + + cfg := root.Config() + providerConfigs := cfg.ProviderConfigsByFullName() + + // Provider dependencies + { + providers := make(moduledeps.Providers, len(providerConfigs)) + + // Any providerConfigs elements are *explicit* provider dependencies, + // which is the only situation where the user might provide an actual + // version constraint. We'll take care of these first. + for fullName, pCfg := range providerConfigs { + inst := moduledeps.ProviderInstance(fullName) + versionSet := discovery.AllVersions + if pCfg.Version != "" { + versionSet = discovery.ConstraintStr(pCfg.Version).MustParse() + } + providers[inst] = moduledeps.ProviderDependency{ + Constraints: versionSet, + Reason: moduledeps.ProviderDependencyExplicit, + } + } + + // Each resource in the configuration creates an *implicit* provider + // dependency, though we'll only record it if there isn't already + // an explicit dependency on the same provider. + for _, rc := range cfg.Resources { + fullName := rc.ProviderFullName() + inst := moduledeps.ProviderInstance(fullName) + if _, exists := providers[inst]; exists { + // Explicit dependency already present + continue + } + + reason := moduledeps.ProviderDependencyImplicit + if _, inherited := inheritProviders[fullName]; inherited { + reason = moduledeps.ProviderDependencyInherited + } + + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discovery.AllVersions, + Reason: reason, + } + } + + ret.Providers = providers + } + + childInherit := make(map[string]*config.ProviderConfig) + for k, v := range inheritProviders { + childInherit[k] = v + } + for k, v := range providerConfigs { + childInherit[k] = v + } + for _, c := range root.Children() { + ret.Children = append(ret.Children, moduleTreeConfigDependencies(c, childInherit)) + } + + return ret +} + +func moduleTreeMergeStateDependencies(root *moduledeps.Module, state *State) { + if state == nil { + return + } + + findModule := func(path []string) *moduledeps.Module { + module := root + for _, name := range path[1:] { // skip initial "root" + var next *moduledeps.Module + for _, cm := range module.Children { + if cm.Name == name { + next = cm + break + } + } + + if next == nil { + // If we didn't find a next node, we'll need to make one + next = &moduledeps.Module{ + Name: name, + } + module.Children = append(module.Children, next) + } + + module = next + } + return module + } + + for _, ms := range state.Modules { + module := findModule(ms.Path) + + for _, is := range ms.Resources { + fullName := config.ResourceProviderFullName(is.Type, is.Provider) + inst := moduledeps.ProviderInstance(fullName) + if _, exists := module.Providers[inst]; !exists { + if module.Providers == nil { + module.Providers = make(moduledeps.Providers) + } + module.Providers[inst] = moduledeps.ProviderDependency{ + Constraints: discovery.AllVersions, + Reason: moduledeps.ProviderDependencyFromState, + } + } + } + } + +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go new file mode 100644 index 00000000..bd32c79f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go @@ -0,0 +1,14 @@ +package terraform + +// NodeCountBoundary fixes any "count boundarie" in the state: resources +// that are named "foo.0" when they should be named "foo" +type NodeCountBoundary struct{} + +func (n *NodeCountBoundary) Name() string { + return "meta.count-boundary (count boundary fixup)" +} + +// GraphNodeEvalable +func (n *NodeCountBoundary) EvalTree() EvalNode { + return &EvalCountFixZeroOneBoundaryGlobal{} +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go new file mode 100644 index 00000000..e32cea88 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go @@ -0,0 +1,22 @@ +package terraform + +// NodeDestroyableDataResource represents a resource that is "plannable": +// it is ready to be planned in order to create a diff. +type NodeDestroyableDataResource struct { + *NodeAbstractResource +} + +// GraphNodeEvalable +func (n *NodeDestroyableDataResource) EvalTree() EvalNode { + addr := n.NodeAbstractResource.Addr + + // stateId is the ID to put into the state + stateId := addr.stateId() + + // Just destroy it. + var state *InstanceState + return &EvalWriteState{ + Name: stateId, + State: &state, // state is nil here + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go new file mode 100644 index 00000000..d5ca641a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go @@ -0,0 +1,221 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// NodeRefreshableDataResource represents a resource that is "plannable": +// it is ready to be planned in order to create a diff. +type NodeRefreshableDataResource struct { + *NodeAbstractCountResource +} + +// GraphNodeDynamicExpandable +func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + // Grab the state which we read + state, lock := ctx.State() + lock.RLock() + defer lock.RUnlock() + + // Expand the resource count which must be available by now from EvalTree + count, err := n.Config.Count() + if err != nil { + return nil, err + } + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + + return &NodeRefreshableDataResourceInstance{ + NodeAbstractResource: a, + } + } + + // We also need a destroyable resource for orphans that are a result of a + // scaled-in count. + concreteResourceDestroyable := func(a *NodeAbstractResource) dag.Vertex { + // Add the config since we don't do that via transforms + a.Config = n.Config + + return &NodeDestroyableDataResource{ + NodeAbstractResource: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count. + &ResourceCountTransformer{ + Concrete: concreteResource, + Count: count, + Addr: n.ResourceAddr(), + }, + + // Add the count orphans. As these are orphaned refresh nodes, we add them + // directly as NodeDestroyableDataResource. + &OrphanResourceCountTransformer{ + Concrete: concreteResourceDestroyable, + Count: count, + Addr: n.ResourceAddr(), + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{ParsedTargets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodeRefreshableDataResource", + } + + return b.Build(ctx.Path()) +} + +// NodeRefreshableDataResourceInstance represents a _single_ resource instance +// that is refreshable. +type NodeRefreshableDataResourceInstance struct { + *NodeAbstractResource +} + +// GraphNodeEvalable +func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { + addr := n.NodeAbstractResource.Addr + + // stateId is the ID to put into the state + stateId := addr.stateId() + + // Build the instance info. More of this will be populated during eval + info := &InstanceInfo{ + Id: stateId, + Type: addr.Type, + } + + // Get the state if we have it, if not we build it + rs := n.ResourceState + if rs == nil { + rs = &ResourceState{ + Provider: n.ResolvedProvider, + } + } + + // If the config isn't empty we update the state + if n.Config != nil { + rs = &ResourceState{ + Type: n.Config.Type, + Provider: n.Config.Provider, + Dependencies: n.StateReferences(), + } + } + + // Build the resource for eval + resource := &Resource{ + Name: addr.Name, + Type: addr.Type, + CountIndex: addr.Index, + } + if resource.CountIndex < 0 { + resource.CountIndex = 0 + } + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var config *ResourceConfig + var diff *InstanceDiff + var provider ResourceProvider + var state *InstanceState + + return &EvalSequence{ + Nodes: []EvalNode{ + // Always destroy the existing state first, since we must + // make sure that values from a previous read will not + // get interpolated if we end up needing to defer our + // loading until apply time. + &EvalWriteState{ + Name: stateId, + ResourceType: rs.Type, + Provider: n.ResolvedProvider, + Dependencies: rs.Dependencies, + State: &state, // state is nil here + }, + + &EvalInterpolate{ + Config: n.Config.RawConfig.Copy(), + Resource: resource, + Output: &config, + }, + + // The rest of this pass can proceed only if there are no + // computed values in our config. + // (If there are, we'll deal with this during the plan and + // apply phases.) + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 { + return true, EvalEarlyExitError{} + } + + // If the config explicitly has a depends_on for this + // data source, assume the intention is to prevent + // refreshing ahead of that dependency. + if len(n.Config.DependsOn) > 0 { + return true, EvalEarlyExitError{} + } + + return true, nil + }, + + Then: EvalNoop{}, + }, + + // The remainder of this pass is the same as running + // a "plan" pass immediately followed by an "apply" pass, + // populating the state early so it'll be available to + // provider configurations that need this data during + // refresh/plan. + &EvalGetProvider{ + Name: n.ResolvedProvider, + Output: &provider, + }, + + &EvalReadDataDiff{ + Info: info, + Config: &config, + Provider: &provider, + Output: &diff, + OutputState: &state, + }, + + &EvalReadDataApply{ + Info: info, + Diff: &diff, + Provider: &provider, + Output: &state, + }, + + &EvalWriteState{ + Name: stateId, + ResourceType: rs.Type, + Provider: n.ResolvedProvider, + Dependencies: rs.Dependencies, + State: &state, + }, + + &EvalUpdateStateHook{}, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_local.go b/vendor/github.com/hashicorp/terraform/terraform/node_local.go new file mode 100644 index 00000000..d3872226 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_local.go @@ -0,0 +1,66 @@ +package terraform + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/config" +) + +// NodeLocal represents a named local value in a particular module. +// +// Local value nodes only have one operation, common to all walk types: +// evaluate the result and place it in state. +type NodeLocal struct { + PathValue []string + Config *config.Local +} + +func (n *NodeLocal) Name() string { + result := fmt.Sprintf("local.%s", n.Config.Name) + if len(n.PathValue) > 1 { + result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeLocal) Path() []string { + return n.PathValue +} + +// RemovableIfNotTargeted +func (n *NodeLocal) RemoveIfNotTargeted() bool { + return true +} + +// GraphNodeReferenceable +func (n *NodeLocal) ReferenceableName() []string { + name := fmt.Sprintf("local.%s", n.Config.Name) + return []string{name} +} + +// GraphNodeReferencer +func (n *NodeLocal) References() []string { + var result []string + result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) + for _, v := range result { + split := strings.Split(v, "/") + for i, s := range split { + split[i] = s + ".destroy" + } + + result = append(result, strings.Join(split, "/")) + } + + return result +} + +// GraphNodeEvalable +func (n *NodeLocal) EvalTree() EvalNode { + return &EvalLocal{ + Name: n.Config.Name, + Value: n.Config.RawConfig, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go new file mode 100644 index 00000000..bb3e5ee1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go @@ -0,0 +1,77 @@ +package terraform + +import ( + "fmt" + "log" + "reflect" +) + +// NodeModuleRemoved represents a module that is no longer in the +// config. +type NodeModuleRemoved struct { + PathValue []string +} + +func (n *NodeModuleRemoved) Name() string { + return fmt.Sprintf("%s (removed)", modulePrefixStr(n.PathValue)) +} + +// GraphNodeSubPath +func (n *NodeModuleRemoved) Path() []string { + return n.PathValue +} + +// GraphNodeEvalable +func (n *NodeModuleRemoved) EvalTree() EvalNode { + return &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, + Node: &EvalDeleteModule{ + PathValue: n.PathValue, + }, + } +} + +func (n *NodeModuleRemoved) ReferenceGlobal() bool { + return true +} + +func (n *NodeModuleRemoved) References() []string { + return []string{modulePrefixStr(n.PathValue)} +} + +// EvalDeleteModule is an EvalNode implementation that removes an empty module +// entry from the state. +type EvalDeleteModule struct { + PathValue []string +} + +func (n *EvalDeleteModule) Eval(ctx EvalContext) (interface{}, error) { + state, lock := ctx.State() + if state == nil { + return nil, nil + } + + // Get a write lock so we can access this instance + lock.Lock() + defer lock.Unlock() + + // Make sure we have a clean state + // Destroyed resources aren't deleted, they're written with an ID of "". + state.prune() + + // find the module and delete it + for i, m := range state.Modules { + if reflect.DeepEqual(m.Path, n.PathValue) { + if !m.Empty() { + // a targeted apply may leave module resources even without a config, + // so just log this and return. + log.Printf("[DEBUG] cannot remove module %s, not empty", modulePrefixStr(n.PathValue)) + break + } + state.Modules = append(state.Modules[:i], state.Modules[i+1:]...) + break + } + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go new file mode 100644 index 00000000..66ff7d5e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go @@ -0,0 +1,138 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" +) + +// NodeApplyableModuleVariable represents a module variable input during +// the apply step. +type NodeApplyableModuleVariable struct { + PathValue []string + Config *config.Variable // Config is the var in the config + Value *config.RawConfig // Value is the value that is set + + Module *module.Tree // Antiquated, want to remove +} + +func (n *NodeApplyableModuleVariable) Name() string { + result := fmt.Sprintf("var.%s", n.Config.Name) + if len(n.PathValue) > 1 { + result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeApplyableModuleVariable) Path() []string { + // We execute in the parent scope (above our own module) so that + // we can access the proper interpolations. + if len(n.PathValue) > 2 { + return n.PathValue[:len(n.PathValue)-1] + } + + return rootModulePath +} + +// RemovableIfNotTargeted +func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// GraphNodeReferenceGlobal +func (n *NodeApplyableModuleVariable) ReferenceGlobal() bool { + // We have to create fully qualified references because we cross + // boundaries here: our ReferenceableName is in one path and our + // References are from another path. + return true +} + +// GraphNodeReferenceable +func (n *NodeApplyableModuleVariable) ReferenceableName() []string { + return []string{n.Name()} +} + +// GraphNodeReferencer +func (n *NodeApplyableModuleVariable) References() []string { + // If we have no value set, we depend on nothing + if n.Value == nil { + return nil + } + + // Can't depend on anything if we're in the root + if len(n.PathValue) < 2 { + return nil + } + + // Otherwise, we depend on anything that is in our value, but + // specifically in the namespace of the parent path. + // Create the prefix based on the path + var prefix string + if p := n.Path(); len(p) > 0 { + prefix = modulePrefixStr(p) + } + + result := ReferencesFromConfig(n.Value) + return modulePrefixList(result, prefix) +} + +// GraphNodeEvalable +func (n *NodeApplyableModuleVariable) EvalTree() EvalNode { + // If we have no value, do nothing + if n.Value == nil { + return &EvalNoop{} + } + + // Otherwise, interpolate the value of this variable and set it + // within the variables mapping. + var config *ResourceConfig + variables := make(map[string]interface{}) + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalOpFilter{ + Ops: []walkOperation{walkInput}, + Node: &EvalInterpolate{ + Config: n.Value, + Output: &config, + ContinueOnErr: true, + }, + }, + &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, + walkDestroy, walkValidate}, + Node: &EvalInterpolate{ + Config: n.Value, + Output: &config, + }, + }, + + &EvalVariableBlock{ + Config: &config, + VariableValues: variables, + }, + + &EvalCoerceMapVariable{ + Variables: variables, + ModulePath: n.PathValue, + ModuleTree: n.Module, + }, + + &EvalTypeCheckVariable{ + Variables: variables, + ModulePath: n.PathValue, + ModuleTree: n.Module, + }, + + &EvalSetVariables{ + Module: &n.PathValue[len(n.PathValue)-1], + Variables: variables, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go new file mode 100644 index 00000000..83e9925a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_output.go @@ -0,0 +1,153 @@ +package terraform + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/dag" +) + +// NodeApplyableOutput represents an output that is "applyable": +// it is ready to be applied. +type NodeApplyableOutput struct { + PathValue []string + Config *config.Output // Config is the output in the config +} + +func (n *NodeApplyableOutput) Name() string { + result := fmt.Sprintf("output.%s", n.Config.Name) + if len(n.PathValue) > 1 { + result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeApplyableOutput) Path() []string { + return n.PathValue +} + +// RemovableIfNotTargeted +func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// GraphNodeTargetDownstream +func (n *NodeApplyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { + // If any of the direct dependencies of an output are targeted then + // the output must always be targeted as well, so its value will always + // be up-to-date at the completion of an apply walk. + return true +} + +// GraphNodeReferenceable +func (n *NodeApplyableOutput) ReferenceableName() []string { + name := fmt.Sprintf("output.%s", n.Config.Name) + return []string{name} +} + +// GraphNodeReferencer +func (n *NodeApplyableOutput) References() []string { + var result []string + result = append(result, n.Config.DependsOn...) + result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) + for _, v := range result { + split := strings.Split(v, "/") + for i, s := range split { + split[i] = s + ".destroy" + } + + result = append(result, strings.Join(split, "/")) + } + + return result +} + +// GraphNodeEvalable +func (n *NodeApplyableOutput) EvalTree() EvalNode { + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalOpFilter{ + // Don't let interpolation errors stop Input, since it happens + // before Refresh. + Ops: []walkOperation{walkInput}, + Node: &EvalWriteOutput{ + Name: n.Config.Name, + Sensitive: n.Config.Sensitive, + Value: n.Config.RawConfig, + ContinueOnErr: true, + }, + }, + &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy}, + Node: &EvalWriteOutput{ + Name: n.Config.Name, + Sensitive: n.Config.Sensitive, + Value: n.Config.RawConfig, + }, + }, + }, + } +} + +// NodeDestroyableOutput represents an output that is "destroybale": +// its application will remove the output from the state. +type NodeDestroyableOutput struct { + PathValue []string + Config *config.Output // Config is the output in the config +} + +func (n *NodeDestroyableOutput) Name() string { + result := fmt.Sprintf("output.%s (destroy)", n.Config.Name) + if len(n.PathValue) > 1 { + result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeDestroyableOutput) Path() []string { + return n.PathValue +} + +// RemovableIfNotTargeted +func (n *NodeDestroyableOutput) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// This will keep the destroy node in the graph if its corresponding output +// node is also in the destroy graph. +func (n *NodeDestroyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { + return true +} + +// GraphNodeReferencer +func (n *NodeDestroyableOutput) References() []string { + var result []string + result = append(result, n.Config.DependsOn...) + result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) + for _, v := range result { + split := strings.Split(v, "/") + for i, s := range split { + split[i] = s + ".destroy" + } + + result = append(result, strings.Join(split, "/")) + } + + return result +} + +// GraphNodeEvalable +func (n *NodeDestroyableOutput) EvalTree() EvalNode { + return &EvalDeleteOutput{ + Name: n.Config.Name, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go new file mode 100644 index 00000000..0fd1554a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go @@ -0,0 +1,40 @@ +package terraform + +import ( + "fmt" +) + +// NodeOutputOrphan represents an output that is an orphan. +type NodeOutputOrphan struct { + OutputName string + PathValue []string +} + +func (n *NodeOutputOrphan) Name() string { + result := fmt.Sprintf("output.%s (orphan)", n.OutputName) + if len(n.PathValue) > 1 { + result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + } + + return result +} + +// GraphNodeReferenceable +func (n *NodeOutputOrphan) ReferenceableName() []string { + return []string{"output." + n.OutputName} +} + +// GraphNodeSubPath +func (n *NodeOutputOrphan) Path() []string { + return n.PathValue +} + +// GraphNodeEvalable +func (n *NodeOutputOrphan) EvalTree() EvalNode { + return &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, + Node: &EvalDeleteOutput{ + Name: n.OutputName, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go new file mode 100644 index 00000000..2071ab16 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go @@ -0,0 +1,11 @@ +package terraform + +// NodeApplyableProvider represents a provider during an apply. +type NodeApplyableProvider struct { + *NodeAbstractProvider +} + +// GraphNodeEvalable +func (n *NodeApplyableProvider) EvalTree() EvalNode { + return ProviderEvalTree(n, n.ProviderConfig()) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go new file mode 100644 index 00000000..9e490f7b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go @@ -0,0 +1,95 @@ +package terraform + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/dag" +) + +// ConcreteProviderNodeFunc is a callback type used to convert an +// abstract provider to a concrete one of some type. +type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex + +// NodeAbstractProvider represents a provider that has no associated operations. +// It registers all the common interfaces across operations for providers. +type NodeAbstractProvider struct { + NameValue string + PathValue []string + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + Config *config.ProviderConfig +} + +func ResolveProviderName(name string, path []string) string { + if strings.Contains(name, "provider.") { + // already resolved + return name + } + + name = fmt.Sprintf("provider.%s", name) + if len(path) >= 1 { + name = fmt.Sprintf("%s.%s", modulePrefixStr(path), name) + } + + return name +} + +func (n *NodeAbstractProvider) Name() string { + return ResolveProviderName(n.NameValue, n.PathValue) +} + +// GraphNodeSubPath +func (n *NodeAbstractProvider) Path() []string { + return n.PathValue +} + +// RemovableIfNotTargeted +func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// GraphNodeReferencer +func (n *NodeAbstractProvider) References() []string { + if n.Config == nil { + return nil + } + + return ReferencesFromConfig(n.Config.RawConfig) +} + +// GraphNodeProvider +func (n *NodeAbstractProvider) ProviderName() string { + return n.NameValue +} + +// GraphNodeProvider +func (n *NodeAbstractProvider) ProviderConfig() *config.ProviderConfig { + if n.Config == nil { + return nil + } + + return n.Config +} + +// GraphNodeAttachProvider +func (n *NodeAbstractProvider) AttachProvider(c *config.ProviderConfig) { + n.Config = c +} + +// GraphNodeDotter impl. +func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "diamond", + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go new file mode 100644 index 00000000..a00bc46f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go @@ -0,0 +1,34 @@ +package terraform + +import ( + "fmt" +) + +// NodeDisabledProvider represents a provider that is disabled. A disabled +// provider does nothing. It exists to properly set inheritance information +// for child providers. +type NodeDisabledProvider struct { + *NodeAbstractProvider +} + +func (n *NodeDisabledProvider) Name() string { + return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name()) +} + +// GraphNodeEvalable +func (n *NodeDisabledProvider) EvalTree() EvalNode { + var resourceConfig *ResourceConfig + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalInterpolateProvider{ + Config: n.ProviderConfig(), + Output: &resourceConfig, + }, + &EvalBuildProviderConfig{ + Provider: n.ProviderName(), + Config: &resourceConfig, + Output: &resourceConfig, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go new file mode 100644 index 00000000..bb117c1d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go @@ -0,0 +1,44 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// NodeProvisioner represents a provider that has no associated operations. +// It registers all the common interfaces across operations for providers. +type NodeProvisioner struct { + NameValue string + PathValue []string + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + Config *config.ProviderConfig +} + +func (n *NodeProvisioner) Name() string { + result := fmt.Sprintf("provisioner.%s", n.NameValue) + if len(n.PathValue) > 1 { + result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + } + + return result +} + +// GraphNodeSubPath +func (n *NodeProvisioner) Path() []string { + return n.PathValue +} + +// GraphNodeProvisioner +func (n *NodeProvisioner) ProvisionerName() string { + return n.NameValue +} + +// GraphNodeEvalable impl. +func (n *NodeProvisioner) EvalTree() EvalNode { + return &EvalInitProvisioner{Name: n.NameValue} +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go new file mode 100644 index 00000000..73509c87 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go @@ -0,0 +1,247 @@ +package terraform + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/dag" +) + +// ConcreteResourceNodeFunc is a callback type used to convert an +// abstract resource to a concrete one of some type. +type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex + +// GraphNodeResource is implemented by any nodes that represent a resource. +// The type of operation cannot be assumed, only that this node represents +// the given resource. +type GraphNodeResource interface { + ResourceAddr() *ResourceAddress +} + +// NodeAbstractResource represents a resource that has no associated +// operations. It registers all the interfaces for a resource that common +// across multiple operation types. +type NodeAbstractResource struct { + Addr *ResourceAddress // Addr is the address for this resource + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + Config *config.Resource // Config is the resource in the config + ResourceState *ResourceState // ResourceState is the ResourceState for this + + Targets []ResourceAddress // Set from GraphNodeTargetable + + // The address of the provider this resource will use + ResolvedProvider string +} + +func (n *NodeAbstractResource) Name() string { + return n.Addr.String() +} + +// GraphNodeSubPath +func (n *NodeAbstractResource) Path() []string { + return n.Addr.Path +} + +// GraphNodeReferenceable +func (n *NodeAbstractResource) ReferenceableName() []string { + // We always are referenceable as "type.name" as long as + // we have a config or address. Determine what that value is. + var id string + if n.Config != nil { + id = n.Config.Id() + } else if n.Addr != nil { + addrCopy := n.Addr.Copy() + addrCopy.Path = nil // ReferenceTransformer handles paths + addrCopy.Index = -1 // We handle indexes below + id = addrCopy.String() + } else { + // No way to determine our type.name, just return + return nil + } + + var result []string + + // Always include our own ID. This is primarily for backwards + // compatibility with states that didn't yet support the more + // specific dep string. + result = append(result, id) + + // We represent all multi-access + result = append(result, fmt.Sprintf("%s.*", id)) + + // We represent either a specific number, or all numbers + suffix := "N" + if n.Addr != nil { + idx := n.Addr.Index + if idx == -1 { + idx = 0 + } + + suffix = fmt.Sprintf("%d", idx) + } + result = append(result, fmt.Sprintf("%s.%s", id, suffix)) + + return result +} + +// GraphNodeReferencer +func (n *NodeAbstractResource) References() []string { + // If we have a config, that is our source of truth + if c := n.Config; c != nil { + // Grab all the references + var result []string + result = append(result, c.DependsOn...) + result = append(result, ReferencesFromConfig(c.RawCount)...) + result = append(result, ReferencesFromConfig(c.RawConfig)...) + for _, p := range c.Provisioners { + if p.When == config.ProvisionerWhenCreate { + result = append(result, ReferencesFromConfig(p.ConnInfo)...) + result = append(result, ReferencesFromConfig(p.RawConfig)...) + } + } + + return uniqueStrings(result) + } + + // If we have state, that is our next source + if s := n.ResourceState; s != nil { + return s.Dependencies + } + + return nil +} + +// StateReferences returns the dependencies to put into the state for +// this resource. +func (n *NodeAbstractResource) StateReferences() []string { + self := n.ReferenceableName() + + // Determine what our "prefix" is for checking for references to + // ourself. + addrCopy := n.Addr.Copy() + addrCopy.Index = -1 + selfPrefix := addrCopy.String() + "." + + depsRaw := n.References() + deps := make([]string, 0, len(depsRaw)) + for _, d := range depsRaw { + // Ignore any variable dependencies + if strings.HasPrefix(d, "var.") { + continue + } + + // If this has a backup ref, ignore those for now. The old state + // file never contained those and I'd rather store the rich types we + // add in the future. + if idx := strings.IndexRune(d, '/'); idx != -1 { + d = d[:idx] + } + + // If we're referencing ourself, then ignore it + found := false + for _, s := range self { + if d == s { + found = true + } + } + if found { + continue + } + + // If this is a reference to ourself and a specific index, we keep + // it. For example, if this resource is "foo.bar" and the reference + // is "foo.bar.0" then we keep it exact. Otherwise, we strip it. + if strings.HasSuffix(d, ".0") && !strings.HasPrefix(d, selfPrefix) { + d = d[:len(d)-2] + } + + // This is sad. The dependencies are currently in the format of + // "module.foo.bar" (the full field). This strips the field off. + if strings.HasPrefix(d, "module.") { + parts := strings.SplitN(d, ".", 3) + d = strings.Join(parts[0:2], ".") + } + + deps = append(deps, d) + } + + return deps +} + +func (n *NodeAbstractResource) SetProvider(p string) { + n.ResolvedProvider = p +} + +// GraphNodeProviderConsumer +func (n *NodeAbstractResource) ProvidedBy() string { + // If we have a config we prefer that above all else + if n.Config != nil { + return resourceProvider(n.Config.Type, n.Config.Provider) + } + + // If we have state, then we will use the provider from there + if n.ResourceState != nil && n.ResourceState.Provider != "" { + return n.ResourceState.Provider + } + + // Use our type + return resourceProvider(n.Addr.Type, "") +} + +// GraphNodeProvisionerConsumer +func (n *NodeAbstractResource) ProvisionedBy() []string { + // If we have no configuration, then we have no provisioners + if n.Config == nil { + return nil + } + + // Build the list of provisioners we need based on the configuration. + // It is okay to have duplicates here. + result := make([]string, len(n.Config.Provisioners)) + for i, p := range n.Config.Provisioners { + result[i] = p.Type + } + + return result +} + +// GraphNodeResource, GraphNodeAttachResourceState +func (n *NodeAbstractResource) ResourceAddr() *ResourceAddress { + return n.Addr +} + +// GraphNodeAddressable, TODO: remove, used by target, should unify +func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress { + return n.ResourceAddr() +} + +// GraphNodeTargetable +func (n *NodeAbstractResource) SetTargets(targets []ResourceAddress) { + n.Targets = targets +} + +// GraphNodeAttachResourceState +func (n *NodeAbstractResource) AttachResourceState(s *ResourceState) { + n.ResourceState = s +} + +// GraphNodeAttachResourceConfig +func (n *NodeAbstractResource) AttachResourceConfig(c *config.Resource) { + n.Config = c +} + +// GraphNodeDotter impl. +func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "box", + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go new file mode 100644 index 00000000..573570d8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go @@ -0,0 +1,50 @@ +package terraform + +// NodeAbstractCountResource should be embedded instead of NodeAbstractResource +// if the resource has a `count` value that needs to be expanded. +// +// The embedder should implement `DynamicExpand` to process the count. +type NodeAbstractCountResource struct { + *NodeAbstractResource + + // Validate, if true, will perform the validation for the count. + // This should only be turned on for the "validate" operation. + Validate bool +} + +// GraphNodeEvalable +func (n *NodeAbstractCountResource) EvalTree() EvalNode { + // We only check if the count is computed if we're not validating. + // If we're validating we allow computed counts since they just turn + // into more computed values. + var evalCountCheckComputed EvalNode + if !n.Validate { + evalCountCheckComputed = &EvalCountCheckComputed{Resource: n.Config} + } + + return &EvalSequence{ + Nodes: []EvalNode{ + // The EvalTree for a plannable resource primarily involves + // interpolating the count since it can contain variables + // we only just received access to. + // + // With the interpolated count, we can then DynamicExpand + // into the proper number of instances. + &EvalInterpolate{Config: n.Config.RawCount}, + + // Check if the count is computed + evalCountCheckComputed, + + // If validation is enabled, perform the validation + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return n.Validate, nil + }, + + Then: &EvalValidateCount{Resource: n.Config}, + }, + + &EvalCountFixZeroOneBoundary{Resource: n.Config}, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go new file mode 100644 index 00000000..40ee1cf2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go @@ -0,0 +1,400 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// NodeApplyableResource represents a resource that is "applyable": +// it is ready to be applied and is represented by a diff. +type NodeApplyableResource struct { + *NodeAbstractResource +} + +// GraphNodeCreator +func (n *NodeApplyableResource) CreateAddr() *ResourceAddress { + return n.NodeAbstractResource.Addr +} + +// GraphNodeReferencer, overriding NodeAbstractResource +func (n *NodeApplyableResource) References() []string { + result := n.NodeAbstractResource.References() + + // The "apply" side of a resource generally also depends on the + // destruction of its dependencies as well. For example, if a LB + // references a set of VMs with ${vm.foo.*.id}, then we must wait for + // the destruction so we get the newly updated list of VMs. + // + // The exception here is CBD. When CBD is set, we don't do this since + // it would create a cycle. By not creating a cycle, we require two + // applies since the first apply the creation step will use the OLD + // values (pre-destroy) and the second step will update. + // + // This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x). + // We mimic that behavior here now and can improve upon it in the future. + // + // This behavior is tested in graph_build_apply_test.go to test ordering. + cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy + if !cbd { + // The "apply" side of a resource always depends on the destruction + // of all its dependencies in addition to the creation. + for _, v := range result { + result = append(result, v+".destroy") + } + } + + return result +} + +// GraphNodeEvalable +func (n *NodeApplyableResource) EvalTree() EvalNode { + addr := n.NodeAbstractResource.Addr + + // stateId is the ID to put into the state + stateId := addr.stateId() + + // Build the instance info. More of this will be populated during eval + info := &InstanceInfo{ + Id: stateId, + Type: addr.Type, + } + + // Build the resource for eval + resource := &Resource{ + Name: addr.Name, + Type: addr.Type, + CountIndex: addr.Index, + } + if resource.CountIndex < 0 { + resource.CountIndex = 0 + } + + // Determine the dependencies for the state. + stateDeps := n.StateReferences() + + // Eval info is different depending on what kind of resource this is + switch n.Config.Mode { + case config.ManagedResourceMode: + return n.evalTreeManagedResource( + stateId, info, resource, stateDeps, + ) + case config.DataResourceMode: + return n.evalTreeDataResource( + stateId, info, resource, stateDeps) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) + } +} + +func (n *NodeApplyableResource) evalTreeDataResource( + stateId string, info *InstanceInfo, + resource *Resource, stateDeps []string) EvalNode { + var provider ResourceProvider + var config *ResourceConfig + var diff *InstanceDiff + var state *InstanceState + + return &EvalSequence{ + Nodes: []EvalNode{ + // Build the instance info + &EvalInstanceInfo{ + Info: info, + }, + + // Get the saved diff for apply + &EvalReadDiff{ + Name: stateId, + Diff: &diff, + }, + + // Stop here if we don't actually have a diff + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if diff == nil { + return true, EvalEarlyExitError{} + } + + if diff.GetAttributesLen() == 0 { + return true, EvalEarlyExitError{} + } + + return true, nil + }, + Then: EvalNoop{}, + }, + + // Normally we interpolate count as a preparation step before + // a DynamicExpand, but an apply graph has pre-expanded nodes + // and so the count would otherwise never be interpolated. + // + // This is redundant when there are multiple instances created + // from the same config (count > 1) but harmless since the + // underlying structures have mutexes to make this concurrency-safe. + // + // In most cases this isn't actually needed because we dealt with + // all of the counts during the plan walk, but we do it here + // for completeness because other code assumes that the + // final count is always available during interpolation. + // + // Here we are just populating the interpolated value in-place + // inside this RawConfig object, like we would in + // NodeAbstractCountResource. + &EvalInterpolate{ + Config: n.Config.RawCount, + ContinueOnErr: true, + }, + + // We need to re-interpolate the config here, rather than + // just using the diff's values directly, because we've + // potentially learned more variable values during the + // apply pass that weren't known when the diff was produced. + &EvalInterpolate{ + Config: n.Config.RawConfig.Copy(), + Resource: resource, + Output: &config, + }, + + &EvalGetProvider{ + Name: n.ResolvedProvider, + Output: &provider, + }, + + // Make a new diff with our newly-interpolated config. + &EvalReadDataDiff{ + Info: info, + Config: &config, + Previous: &diff, + Provider: &provider, + Output: &diff, + }, + + &EvalReadDataApply{ + Info: info, + Diff: &diff, + Provider: &provider, + Output: &state, + }, + + &EvalWriteState{ + Name: stateId, + ResourceType: n.Config.Type, + Provider: n.ResolvedProvider, + Dependencies: stateDeps, + State: &state, + }, + + // Clear the diff now that we've applied it, so + // later nodes won't see a diff that's now a no-op. + &EvalWriteDiff{ + Name: stateId, + Diff: nil, + }, + + &EvalUpdateStateHook{}, + }, + } +} + +func (n *NodeApplyableResource) evalTreeManagedResource( + stateId string, info *InstanceInfo, + resource *Resource, stateDeps []string) EvalNode { + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider ResourceProvider + var diff, diffApply *InstanceDiff + var state *InstanceState + var resourceConfig *ResourceConfig + var err error + var createNew bool + var createBeforeDestroyEnabled bool + + return &EvalSequence{ + Nodes: []EvalNode{ + // Build the instance info + &EvalInstanceInfo{ + Info: info, + }, + + // Get the saved diff for apply + &EvalReadDiff{ + Name: stateId, + Diff: &diffApply, + }, + + // We don't want to do any destroys + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if diffApply == nil { + return true, EvalEarlyExitError{} + } + + if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 { + return true, EvalEarlyExitError{} + } + + diffApply.SetDestroy(false) + return true, nil + }, + Then: EvalNoop{}, + }, + + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + destroy := false + if diffApply != nil { + destroy = diffApply.GetDestroy() || diffApply.RequiresNew() + } + + createBeforeDestroyEnabled = + n.Config.Lifecycle.CreateBeforeDestroy && + destroy + + return createBeforeDestroyEnabled, nil + }, + Then: &EvalDeposeState{ + Name: stateId, + }, + }, + + // Normally we interpolate count as a preparation step before + // a DynamicExpand, but an apply graph has pre-expanded nodes + // and so the count would otherwise never be interpolated. + // + // This is redundant when there are multiple instances created + // from the same config (count > 1) but harmless since the + // underlying structures have mutexes to make this concurrency-safe. + // + // In most cases this isn't actually needed because we dealt with + // all of the counts during the plan walk, but we need to do this + // in order to support interpolation of resource counts from + // apply-time-interpolated expressions, such as those in + // "provisioner" blocks. + // + // Here we are just populating the interpolated value in-place + // inside this RawConfig object, like we would in + // NodeAbstractCountResource. + &EvalInterpolate{ + Config: n.Config.RawCount, + ContinueOnErr: true, + }, + + &EvalInterpolate{ + Config: n.Config.RawConfig.Copy(), + Resource: resource, + Output: &resourceConfig, + }, + &EvalGetProvider{ + Name: n.ResolvedProvider, + Output: &provider, + }, + &EvalReadState{ + Name: stateId, + Output: &state, + }, + // Re-run validation to catch any errors we missed, e.g. type + // mismatches on computed values. + &EvalValidateResource{ + Provider: &provider, + Config: &resourceConfig, + ResourceName: n.Config.Name, + ResourceType: n.Config.Type, + ResourceMode: n.Config.Mode, + IgnoreWarnings: true, + }, + &EvalDiff{ + Info: info, + Config: &resourceConfig, + Resource: n.Config, + Provider: &provider, + Diff: &diffApply, + State: &state, + OutputDiff: &diffApply, + }, + + // Get the saved diff + &EvalReadDiff{ + Name: stateId, + Diff: &diff, + }, + + // Compare the diffs + &EvalCompareDiff{ + Info: info, + One: &diff, + Two: &diffApply, + }, + + &EvalGetProvider{ + Name: n.ResolvedProvider, + Output: &provider, + }, + &EvalReadState{ + Name: stateId, + Output: &state, + }, + // Call pre-apply hook + &EvalApplyPre{ + Info: info, + State: &state, + Diff: &diffApply, + }, + &EvalApply{ + Info: info, + State: &state, + Diff: &diffApply, + Provider: &provider, + Output: &state, + Error: &err, + CreateNew: &createNew, + }, + &EvalWriteState{ + Name: stateId, + ResourceType: n.Config.Type, + Provider: n.ResolvedProvider, + Dependencies: stateDeps, + State: &state, + }, + &EvalApplyProvisioners{ + Info: info, + State: &state, + Resource: n.Config, + InterpResource: resource, + CreateNew: &createNew, + Error: &err, + When: config.ProvisionerWhenCreate, + }, + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return createBeforeDestroyEnabled && err != nil, nil + }, + Then: &EvalUndeposeState{ + Name: stateId, + State: &state, + }, + Else: &EvalWriteState{ + Name: stateId, + ResourceType: n.Config.Type, + Provider: n.ResolvedProvider, + Dependencies: stateDeps, + State: &state, + }, + }, + + // We clear the diff out here so that future nodes + // don't see a diff that is already complete. There + // is no longer a diff! + &EvalWriteDiff{ + Name: stateId, + Diff: nil, + }, + + &EvalApplyPost{ + Info: info, + State: &state, + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go new file mode 100644 index 00000000..657bbee7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go @@ -0,0 +1,291 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// NodeDestroyResource represents a resource that is to be destroyed. +type NodeDestroyResource struct { + *NodeAbstractResource +} + +func (n *NodeDestroyResource) Name() string { + return n.NodeAbstractResource.Name() + " (destroy)" +} + +// GraphNodeDestroyer +func (n *NodeDestroyResource) DestroyAddr() *ResourceAddress { + return n.Addr +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyResource) CreateBeforeDestroy() bool { + // If we have no config, we just assume no + if n.Config == nil { + return false + } + + return n.Config.Lifecycle.CreateBeforeDestroy +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyResource) ModifyCreateBeforeDestroy(v bool) error { + // If we have no config, do nothing since it won't affect the + // create step anyways. + if n.Config == nil { + return nil + } + + // Set CBD to true + n.Config.Lifecycle.CreateBeforeDestroy = true + + return nil +} + +// GraphNodeReferenceable, overriding NodeAbstractResource +func (n *NodeDestroyResource) ReferenceableName() []string { + // We modify our referenceable name to have the suffix of ".destroy" + // since depending on the creation side doesn't necessarilly mean + // depending on destruction. + suffix := ".destroy" + + // If we're CBD, we also append "-cbd". This is because CBD will setup + // its own edges (in CBDEdgeTransformer). Depending on the "destroy" + // side generally doesn't mean depending on CBD as well. See GH-11349 + if n.CreateBeforeDestroy() { + suffix += "-cbd" + } + + result := n.NodeAbstractResource.ReferenceableName() + for i, v := range result { + result[i] = v + suffix + } + + return result +} + +// GraphNodeReferencer, overriding NodeAbstractResource +func (n *NodeDestroyResource) References() []string { + // If we have a config, then we need to include destroy-time dependencies + if c := n.Config; c != nil { + var result []string + for _, p := range c.Provisioners { + // We include conn info and config for destroy time provisioners + // as dependencies that we have. + if p.When == config.ProvisionerWhenDestroy { + result = append(result, ReferencesFromConfig(p.ConnInfo)...) + result = append(result, ReferencesFromConfig(p.RawConfig)...) + } + } + + return result + } + + return nil +} + +// GraphNodeDynamicExpandable +func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + // If we have no config we do nothing + if n.Addr == nil { + return nil, nil + } + + state, lock := ctx.State() + lock.RLock() + defer lock.RUnlock() + + // Start creating the steps + steps := make([]GraphTransformer, 0, 5) + + // We want deposed resources in the state to be destroyed + steps = append(steps, &DeposedTransformer{ + State: state, + View: n.Addr.stateId(), + ResolvedProvider: n.ResolvedProvider, + }) + + // Target + steps = append(steps, &TargetsTransformer{ + ParsedTargets: n.Targets, + }) + + // Always end with the root being added + steps = append(steps, &RootTransformer{}) + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Name: "NodeResourceDestroy", + } + return b.Build(ctx.Path()) +} + +// GraphNodeEvalable +func (n *NodeDestroyResource) EvalTree() EvalNode { + // stateId is the ID to put into the state + stateId := n.Addr.stateId() + + // Build the instance info. More of this will be populated during eval + info := &InstanceInfo{ + Id: stateId, + Type: n.Addr.Type, + uniqueExtra: "destroy", + } + + // Build the resource for eval + addr := n.Addr + resource := &Resource{ + Name: addr.Name, + Type: addr.Type, + CountIndex: addr.Index, + } + if resource.CountIndex < 0 { + resource.CountIndex = 0 + } + + // Get our state + rs := n.ResourceState + if rs == nil { + rs = &ResourceState{ + Provider: n.ResolvedProvider, + } + } + + var diffApply *InstanceDiff + var provider ResourceProvider + var state *InstanceState + var err error + return &EvalOpFilter{ + Ops: []walkOperation{walkApply, walkDestroy}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + // Get the saved diff for apply + &EvalReadDiff{ + Name: stateId, + Diff: &diffApply, + }, + + // Filter the diff so we only get the destroy + &EvalFilterDiff{ + Diff: &diffApply, + Output: &diffApply, + Destroy: true, + }, + + // If we're not destroying, then compare diffs + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if diffApply != nil && diffApply.GetDestroy() { + return true, nil + } + + return true, EvalEarlyExitError{} + }, + Then: EvalNoop{}, + }, + + // Load the instance info so we have the module path set + &EvalInstanceInfo{Info: info}, + + &EvalGetProvider{ + Name: n.ResolvedProvider, + Output: &provider, + }, + &EvalReadState{ + Name: stateId, + Output: &state, + }, + &EvalRequireState{ + State: &state, + }, + + // Call pre-apply hook + &EvalApplyPre{ + Info: info, + State: &state, + Diff: &diffApply, + }, + + // Run destroy provisioners if not tainted + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if state != nil && state.Tainted { + return false, nil + } + + return true, nil + }, + + Then: &EvalApplyProvisioners{ + Info: info, + State: &state, + Resource: n.Config, + InterpResource: resource, + Error: &err, + When: config.ProvisionerWhenDestroy, + }, + }, + + // If we have a provisioning error, then we just call + // the post-apply hook now. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return err != nil, nil + }, + + Then: &EvalApplyPost{ + Info: info, + State: &state, + Error: &err, + }, + }, + + // Make sure we handle data sources properly. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if n.Addr == nil { + return false, fmt.Errorf("nil address") + } + + if n.Addr.Mode == config.DataResourceMode { + return true, nil + } + + return false, nil + }, + + Then: &EvalReadDataApply{ + Info: info, + Diff: &diffApply, + Provider: &provider, + Output: &state, + }, + Else: &EvalApply{ + Info: info, + State: &state, + Diff: &diffApply, + Provider: &provider, + Output: &state, + Error: &err, + }, + }, + &EvalWriteState{ + Name: stateId, + ResourceType: n.Addr.Type, + Provider: n.ResolvedProvider, + Dependencies: rs.Dependencies, + State: &state, + }, + &EvalApplyPost{ + Info: info, + State: &state, + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go new file mode 100644 index 00000000..1afae7a0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go @@ -0,0 +1,85 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// NodePlannableResource represents a resource that is "plannable": +// it is ready to be planned in order to create a diff. +type NodePlannableResource struct { + *NodeAbstractCountResource +} + +// GraphNodeDynamicExpandable +func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + // Grab the state which we read + state, lock := ctx.State() + lock.RLock() + defer lock.RUnlock() + + // Expand the resource count which must be available by now from EvalTree + count, err := n.Config.Count() + if err != nil { + return nil, err + } + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + + return &NodePlannableResourceInstance{ + NodeAbstractResource: a, + } + } + + // The concrete resource factory we'll use for oprhans + concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + + return &NodePlannableResourceOrphan{ + NodeAbstractResource: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count. + &ResourceCountTransformer{ + Concrete: concreteResource, + Count: count, + Addr: n.ResourceAddr(), + }, + + // Add the count orphans + &OrphanResourceCountTransformer{ + Concrete: concreteResourceOrphan, + Count: count, + Addr: n.ResourceAddr(), + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{ParsedTargets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodePlannableResource", + } + return b.Build(ctx.Path()) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go new file mode 100644 index 00000000..9b02362b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go @@ -0,0 +1,53 @@ +package terraform + +// NodePlanDestroyableResource represents a resource that is "applyable": +// it is ready to be applied and is represented by a diff. +type NodePlanDestroyableResource struct { + *NodeAbstractResource +} + +// GraphNodeDestroyer +func (n *NodePlanDestroyableResource) DestroyAddr() *ResourceAddress { + return n.Addr +} + +// GraphNodeEvalable +func (n *NodePlanDestroyableResource) EvalTree() EvalNode { + addr := n.NodeAbstractResource.Addr + + // stateId is the ID to put into the state + stateId := addr.stateId() + + // Build the instance info. More of this will be populated during eval + info := &InstanceInfo{ + Id: stateId, + Type: addr.Type, + } + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var diff *InstanceDiff + var state *InstanceState + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalReadState{ + Name: stateId, + Output: &state, + }, + &EvalDiffDestroy{ + Info: info, + State: &state, + Output: &diff, + }, + &EvalCheckPreventDestroy{ + Resource: n.Config, + Diff: &diff, + }, + &EvalWriteDiff{ + Name: stateId, + Diff: &diff, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go new file mode 100644 index 00000000..7d9fcddb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go @@ -0,0 +1,190 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// NodePlannableResourceInstance represents a _single_ resource +// instance that is plannable. This means this represents a single +// count index, for example. +type NodePlannableResourceInstance struct { + *NodeAbstractResource +} + +// GraphNodeEvalable +func (n *NodePlannableResourceInstance) EvalTree() EvalNode { + addr := n.NodeAbstractResource.Addr + + // stateId is the ID to put into the state + stateId := addr.stateId() + + // Build the instance info. More of this will be populated during eval + info := &InstanceInfo{ + Id: stateId, + Type: addr.Type, + ModulePath: normalizeModulePath(addr.Path), + } + + // Build the resource for eval + resource := &Resource{ + Name: addr.Name, + Type: addr.Type, + CountIndex: addr.Index, + } + if resource.CountIndex < 0 { + resource.CountIndex = 0 + } + + // Determine the dependencies for the state. + stateDeps := n.StateReferences() + + // Eval info is different depending on what kind of resource this is + switch n.Config.Mode { + case config.ManagedResourceMode: + return n.evalTreeManagedResource( + stateId, info, resource, stateDeps, + ) + case config.DataResourceMode: + return n.evalTreeDataResource( + stateId, info, resource, stateDeps) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) + } +} + +func (n *NodePlannableResourceInstance) evalTreeDataResource( + stateId string, info *InstanceInfo, + resource *Resource, stateDeps []string) EvalNode { + var provider ResourceProvider + var config *ResourceConfig + var diff *InstanceDiff + var state *InstanceState + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalReadState{ + Name: stateId, + Output: &state, + }, + + // We need to re-interpolate the config here because some + // of the attributes may have become computed during + // earlier planning, due to other resources having + // "requires new resource" diffs. + &EvalInterpolate{ + Config: n.Config.RawConfig.Copy(), + Resource: resource, + Output: &config, + }, + + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0 + + // If the configuration is complete and we + // already have a state then we don't need to + // do any further work during apply, because we + // already populated the state during refresh. + if !computed && state != nil { + return true, EvalEarlyExitError{} + } + + return true, nil + }, + Then: EvalNoop{}, + }, + + &EvalGetProvider{ + Name: n.ResolvedProvider, + Output: &provider, + }, + + &EvalReadDataDiff{ + Info: info, + Config: &config, + Provider: &provider, + Output: &diff, + OutputState: &state, + }, + + &EvalWriteState{ + Name: stateId, + ResourceType: n.Config.Type, + Provider: n.ResolvedProvider, + Dependencies: stateDeps, + State: &state, + }, + + &EvalWriteDiff{ + Name: stateId, + Diff: &diff, + }, + }, + } +} + +func (n *NodePlannableResourceInstance) evalTreeManagedResource( + stateId string, info *InstanceInfo, + resource *Resource, stateDeps []string) EvalNode { + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider ResourceProvider + var diff *InstanceDiff + var state *InstanceState + var resourceConfig *ResourceConfig + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalInterpolate{ + Config: n.Config.RawConfig.Copy(), + Resource: resource, + Output: &resourceConfig, + }, + &EvalGetProvider{ + Name: n.ResolvedProvider, + Output: &provider, + }, + // Re-run validation to catch any errors we missed, e.g. type + // mismatches on computed values. + &EvalValidateResource{ + Provider: &provider, + Config: &resourceConfig, + ResourceName: n.Config.Name, + ResourceType: n.Config.Type, + ResourceMode: n.Config.Mode, + IgnoreWarnings: true, + }, + &EvalReadState{ + Name: stateId, + Output: &state, + }, + &EvalDiff{ + Name: stateId, + Info: info, + Config: &resourceConfig, + Resource: n.Config, + Provider: &provider, + State: &state, + OutputDiff: &diff, + OutputState: &state, + }, + &EvalCheckPreventDestroy{ + Resource: n.Config, + Diff: &diff, + }, + &EvalWriteState{ + Name: stateId, + ResourceType: n.Config.Type, + Provider: n.ResolvedProvider, + Dependencies: stateDeps, + State: &state, + }, + &EvalWriteDiff{ + Name: stateId, + Diff: &diff, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go new file mode 100644 index 00000000..73d6e41f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go @@ -0,0 +1,54 @@ +package terraform + +// NodePlannableResourceOrphan represents a resource that is "applyable": +// it is ready to be applied and is represented by a diff. +type NodePlannableResourceOrphan struct { + *NodeAbstractResource +} + +func (n *NodePlannableResourceOrphan) Name() string { + return n.NodeAbstractResource.Name() + " (orphan)" +} + +// GraphNodeEvalable +func (n *NodePlannableResourceOrphan) EvalTree() EvalNode { + addr := n.NodeAbstractResource.Addr + + // stateId is the ID to put into the state + stateId := addr.stateId() + + // Build the instance info. More of this will be populated during eval + info := &InstanceInfo{ + Id: stateId, + Type: addr.Type, + ModulePath: normalizeModulePath(addr.Path), + } + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var diff *InstanceDiff + var state *InstanceState + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalReadState{ + Name: stateId, + Output: &state, + }, + &EvalDiffDestroy{ + Info: info, + State: &state, + Output: &diff, + }, + &EvalCheckPreventDestroy{ + Resource: n.Config, + ResourceId: stateId, + Diff: &diff, + }, + &EvalWriteDiff{ + Name: stateId, + Diff: &diff, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go new file mode 100644 index 00000000..697bd494 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go @@ -0,0 +1,266 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/dag" +) + +// NodeRefreshableManagedResource represents a resource that is expanabled into +// NodeRefreshableManagedResourceInstance. Resource count orphans are also added. +type NodeRefreshableManagedResource struct { + *NodeAbstractCountResource +} + +// GraphNodeDynamicExpandable +func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + // Grab the state which we read + state, lock := ctx.State() + lock.RLock() + defer lock.RUnlock() + + // Expand the resource count which must be available by now from EvalTree + count, err := n.Config.Count() + if err != nil { + return nil, err + } + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + + return &NodeRefreshableManagedResourceInstance{ + NodeAbstractResource: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count. + &ResourceCountTransformer{ + Concrete: concreteResource, + Count: count, + Addr: n.ResourceAddr(), + }, + + // Add the count orphans to make sure these resources are accounted for + // during a scale in. + &OrphanResourceCountTransformer{ + Concrete: concreteResource, + Count: count, + Addr: n.ResourceAddr(), + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{ParsedTargets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodeRefreshableManagedResource", + } + + return b.Build(ctx.Path()) +} + +// NodeRefreshableManagedResourceInstance represents a resource that is "applyable": +// it is ready to be applied and is represented by a diff. +type NodeRefreshableManagedResourceInstance struct { + *NodeAbstractResource +} + +// GraphNodeDestroyer +func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *ResourceAddress { + return n.Addr +} + +// GraphNodeEvalable +func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode { + // Eval info is different depending on what kind of resource this is + switch mode := n.Addr.Mode; mode { + case config.ManagedResourceMode: + if n.ResourceState == nil { + return n.evalTreeManagedResourceNoState() + } + return n.evalTreeManagedResource() + + case config.DataResourceMode: + // Get the data source node. If we don't have a configuration + // then it is an orphan so we destroy it (remove it from the state). + var dn GraphNodeEvalable + if n.Config != nil { + dn = &NodeRefreshableDataResourceInstance{ + NodeAbstractResource: n.NodeAbstractResource, + } + } else { + dn = &NodeDestroyableDataResource{ + NodeAbstractResource: n.NodeAbstractResource, + } + } + + return dn.EvalTree() + default: + panic(fmt.Errorf("unsupported resource mode %s", mode)) + } +} + +func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode { + addr := n.NodeAbstractResource.Addr + + // stateId is the ID to put into the state + stateId := addr.stateId() + + // Build the instance info. More of this will be populated during eval + info := &InstanceInfo{ + Id: stateId, + Type: addr.Type, + } + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider ResourceProvider + var state *InstanceState + + // This happened during initial development. All known cases were + // fixed and tested but as a sanity check let's assert here. + if n.ResourceState == nil { + err := fmt.Errorf( + "No resource state attached for addr: %s\n\n"+ + "This is a bug. Please report this to Terraform with your configuration\n"+ + "and state attached. Please be careful to scrub any sensitive information.", + addr) + return &EvalReturnError{Error: &err} + } + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Name: n.ResolvedProvider, + Output: &provider, + }, + &EvalReadState{ + Name: stateId, + Output: &state, + }, + &EvalRefresh{ + Info: info, + Provider: &provider, + State: &state, + Output: &state, + }, + &EvalWriteState{ + Name: stateId, + ResourceType: n.ResourceState.Type, + Provider: n.ResolvedProvider, + Dependencies: n.ResourceState.Dependencies, + State: &state, + }, + }, + } +} + +// evalTreeManagedResourceNoState produces an EvalSequence for refresh resource +// nodes that don't have state attached. An example of where this functionality +// is useful is when a resource that already exists in state is being scaled +// out, ie: has its resource count increased. In this case, the scaled out node +// needs to be available to other nodes (namely data sources) that may depend +// on it for proper interpolation, or confusing "index out of range" errors can +// occur. +// +// The steps in this sequence are very similar to the steps carried out in +// plan, but nothing is done with the diff after it is created - it is dropped, +// and its changes are not counted in the UI. +func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode { + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider ResourceProvider + var state *InstanceState + var resourceConfig *ResourceConfig + + addr := n.NodeAbstractResource.Addr + stateID := addr.stateId() + info := &InstanceInfo{ + Id: stateID, + Type: addr.Type, + ModulePath: normalizeModulePath(addr.Path), + } + + // Build the resource for eval + resource := &Resource{ + Name: addr.Name, + Type: addr.Type, + CountIndex: addr.Index, + } + if resource.CountIndex < 0 { + resource.CountIndex = 0 + } + + // Determine the dependencies for the state. + stateDeps := n.StateReferences() + + // n.Config can be nil if the config and state don't match + var raw *config.RawConfig + if n.Config != nil { + raw = n.Config.RawConfig.Copy() + } + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalInterpolate{ + Config: raw, + Resource: resource, + Output: &resourceConfig, + }, + &EvalGetProvider{ + Name: n.ResolvedProvider, + Output: &provider, + }, + // Re-run validation to catch any errors we missed, e.g. type + // mismatches on computed values. + &EvalValidateResource{ + Provider: &provider, + Config: &resourceConfig, + ResourceName: n.Config.Name, + ResourceType: n.Config.Type, + ResourceMode: n.Config.Mode, + IgnoreWarnings: true, + }, + &EvalReadState{ + Name: stateID, + Output: &state, + }, + &EvalDiff{ + Name: stateID, + Info: info, + Config: &resourceConfig, + Resource: n.Config, + Provider: &provider, + State: &state, + OutputState: &state, + Stub: true, + }, + &EvalWriteState{ + Name: stateID, + ResourceType: n.Config.Type, + Provider: n.ResolvedProvider, + Dependencies: stateDeps, + State: &state, + }, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go new file mode 100644 index 00000000..0df223d9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go @@ -0,0 +1,159 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// NodeValidatableResource represents a resource that is used for validation +// only. +type NodeValidatableResource struct { + *NodeAbstractCountResource +} + +// GraphNodeEvalable +func (n *NodeValidatableResource) EvalTree() EvalNode { + // Ensure we're validating + c := n.NodeAbstractCountResource + c.Validate = true + return c.EvalTree() +} + +// GraphNodeDynamicExpandable +func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + // Grab the state which we read + state, lock := ctx.State() + lock.RLock() + defer lock.RUnlock() + + // Expand the resource count which must be available by now from EvalTree + count := 1 + if n.Config.RawCount.Value() != unknownValue() { + var err error + count, err = n.Config.Count() + if err != nil { + return nil, err + } + } + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResource) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + + return &NodeValidatableResourceInstance{ + NodeAbstractResource: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count. + &ResourceCountTransformer{ + Concrete: concreteResource, + Count: count, + Addr: n.ResourceAddr(), + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{ParsedTargets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodeValidatableResource", + } + + return b.Build(ctx.Path()) +} + +// This represents a _single_ resource instance to validate. +type NodeValidatableResourceInstance struct { + *NodeAbstractResource +} + +// GraphNodeEvalable +func (n *NodeValidatableResourceInstance) EvalTree() EvalNode { + addr := n.NodeAbstractResource.Addr + + // Build the resource for eval + resource := &Resource{ + Name: addr.Name, + Type: addr.Type, + CountIndex: addr.Index, + } + if resource.CountIndex < 0 { + resource.CountIndex = 0 + } + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var config *ResourceConfig + var provider ResourceProvider + + seq := &EvalSequence{ + Nodes: []EvalNode{ + &EvalValidateResourceSelfRef{ + Addr: &addr, + Config: &n.Config.RawConfig, + }, + &EvalGetProvider{ + Name: n.ResolvedProvider, + Output: &provider, + }, + &EvalInterpolate{ + Config: n.Config.RawConfig.Copy(), + Resource: resource, + Output: &config, + }, + &EvalValidateResource{ + Provider: &provider, + Config: &config, + ResourceName: n.Config.Name, + ResourceType: n.Config.Type, + ResourceMode: n.Config.Mode, + }, + }, + } + + // Validate all the provisioners + for _, p := range n.Config.Provisioners { + var provisioner ResourceProvisioner + var connConfig *ResourceConfig + seq.Nodes = append( + seq.Nodes, + &EvalGetProvisioner{ + Name: p.Type, + Output: &provisioner, + }, + &EvalInterpolate{ + Config: p.RawConfig.Copy(), + Resource: resource, + Output: &config, + }, + &EvalInterpolate{ + Config: p.ConnInfo.Copy(), + Resource: resource, + Output: &connConfig, + }, + &EvalValidateProvisioner{ + Provisioner: &provisioner, + Config: &config, + ConnConfig: &connConfig, + }, + ) + } + + return seq +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go new file mode 100644 index 00000000..cb61a4e3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go @@ -0,0 +1,22 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// NodeRootVariable represents a root variable input. +type NodeRootVariable struct { + Config *config.Variable +} + +func (n *NodeRootVariable) Name() string { + result := fmt.Sprintf("var.%s", n.Config.Name) + return result +} + +// GraphNodeReferenceable +func (n *NodeRootVariable) ReferenceableName() []string { + return []string{n.Name()} +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go index ca99685a..51dd4122 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/path.go +++ b/vendor/github.com/hashicorp/terraform/terraform/path.go @@ -1,24 +1,10 @@ package terraform import ( - "crypto/md5" - "encoding/hex" + "strings" ) // PathCacheKey returns a cache key for a module path. -// -// TODO: test func PathCacheKey(path []string) string { - // There is probably a better way to do this, but this is working for now. - // We just create an MD5 hash of all the MD5 hashes of all the path - // elements. This gets us the property that it is unique per ordering. - hash := md5.New() - for _, p := range path { - single := md5.Sum([]byte(p)) - if _, err := hash.Write(single[:]); err != nil { - panic(err) - } - } - - return hex.EncodeToString(hash.Sum(nil)) + return strings.Join(path, "|") } diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go index 75023a0c..30db1950 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/plan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go @@ -6,9 +6,11 @@ import ( "errors" "fmt" "io" + "log" "sync" "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/version" ) func init() { @@ -20,32 +22,118 @@ func init() { // Plan represents a single Terraform execution plan, which contains // all the information necessary to make an infrastructure change. +// +// A plan has to contain basically the entire state of the world +// necessary to make a change: the state, diff, config, backend config, etc. +// This is so that it can run alone without any other data. type Plan struct { - Diff *Diff - Module *module.Tree - State *State - Vars map[string]interface{} + // Diff describes the resource actions that must be taken when this + // plan is applied. + Diff *Diff + + // Module represents the entire configuration that was present when this + // plan was created. + Module *module.Tree + + // State is the Terraform state that was current when this plan was + // created. + // + // It is not allowed to apply a plan that has a stale state, since its + // diff could be outdated. + State *State + + // Vars retains the variables that were set when creating the plan, so + // that the same variables can be applied during apply. + Vars map[string]interface{} + + // Targets, if non-empty, contains a set of resource address strings that + // identify graph nodes that were selected as targets for plan. + // + // When targets are set, any graph node that is not directly targeted or + // indirectly targeted via dependencies is excluded from the graph. Targets []string + // TerraformVersion is the version of Terraform that was used to create + // this plan. + // + // It is not allowed to apply a plan created with a different version of + // Terraform, since the other fields of this structure may be interpreted + // in different ways between versions. + TerraformVersion string + + // ProviderSHA256s is a map giving the SHA256 hashes of the exact binaries + // used as plugins for each provider during plan. + // + // These must match between plan and apply to ensure that the diff is + // correctly interpreted, since different provider versions may have + // different attributes or attribute value constraints. + ProviderSHA256s map[string][]byte + + // Backend is the backend that this plan should use and store data with. + Backend *BackendState + + // Destroy indicates that this plan was created for a full destroy operation + Destroy bool + once sync.Once } // Context returns a Context with the data encapsulated in this plan. // // The following fields in opts are overridden by the plan: Config, -// Diff, State, Variables. +// Diff, Variables. +// +// If State is not provided, it is set from the plan. If it _is_ provided, +// it must be Equal to the state stored in plan, but may have a newer +// serial. func (p *Plan) Context(opts *ContextOpts) (*Context, error) { + var err error + opts, err = p.contextOpts(opts) + if err != nil { + return nil, err + } + return NewContext(opts) +} + +// contextOpts mutates the given base ContextOpts in place to use input +// objects obtained from the receiving plan. +func (p *Plan) contextOpts(base *ContextOpts) (*ContextOpts, error) { + opts := base + opts.Diff = p.Diff opts.Module = p.Module - opts.State = p.State opts.Targets = p.Targets + opts.ProviderSHA256s = p.ProviderSHA256s + opts.Destroy = p.Destroy + + if opts.State == nil { + opts.State = p.State + } else if !opts.State.Equal(p.State) { + // Even if we're overriding the state, it should be logically equal + // to what's in plan. The only valid change to have made by the time + // we get here is to have incremented the serial. + // + // Due to the fact that serialization may change the representation of + // the state, there is little chance that these aren't actually equal. + // Log the error condition for reference, but continue with the state + // we have. + log.Println("[WARN] Plan state and ContextOpts state are not equal") + } + + thisVersion := version.String() + if p.TerraformVersion != "" && p.TerraformVersion != thisVersion { + return nil, fmt.Errorf( + "plan was created with a different version of Terraform (created with %s, but running %s)", + p.TerraformVersion, thisVersion, + ) + } opts.Variables = make(map[string]interface{}) for k, v := range p.Vars { opts.Variables[k] = v } - return NewContext(opts) + return opts, nil } func (p *Plan) String() string { @@ -79,7 +167,7 @@ func (p *Plan) init() { // the ability in the future to change the file format if we want for any // reason. const planFormatMagic = "tfplan" -const planFormatVersion byte = 1 +const planFormatVersion byte = 2 // ReadPlan reads a plan structure out of a reader in the format that // was written by WritePlan. diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go index 795bd967..2f5ebb5e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go @@ -3,10 +3,13 @@ package terraform import ( "fmt" "reflect" + "sort" "strconv" "strings" "github.com/hashicorp/terraform/config" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" ) // ResourceProvisionerConfig is used to pair a provisioner @@ -61,10 +64,20 @@ type InstanceInfo struct { // Type is the resource type of this instance Type string + + // uniqueExtra is an internal field that can be populated to supply + // extra metadata that is used to identify a unique instance in + // the graph walk. This will be appended to HumanID when uniqueId + // is called. + uniqueExtra string } // HumanId is a unique Id that is human-friendly and useful for UI elements. func (i *InstanceInfo) HumanId() string { + if i == nil { + return "" + } + if len(i.ModulePath) <= 1 { return i.Id } @@ -75,6 +88,55 @@ func (i *InstanceInfo) HumanId() string { i.Id) } +// ResourceAddress returns the address of the resource that the receiver is describing. +func (i *InstanceInfo) ResourceAddress() *ResourceAddress { + // GROSS: for tainted and deposed instances, their status gets appended + // to i.Id to create a unique id for the graph node. Historically these + // ids were displayed to the user, so it's designed to be human-readable: + // "aws_instance.bar.0 (deposed #0)" + // + // So here we detect such suffixes and try to interpret them back to + // their original meaning so we can then produce a ResourceAddress + // with a suitable InstanceType. + id := i.Id + instanceType := TypeInvalid + if idx := strings.Index(id, " ("); idx != -1 { + remain := id[idx:] + id = id[:idx] + + switch { + case strings.Contains(remain, "tainted"): + instanceType = TypeTainted + case strings.Contains(remain, "deposed"): + instanceType = TypeDeposed + } + } + + addr, err := parseResourceAddressInternal(id) + if err != nil { + // should never happen, since that would indicate a bug in the + // code that constructed this InstanceInfo. + panic(fmt.Errorf("InstanceInfo has invalid Id %s", id)) + } + if len(i.ModulePath) > 1 { + addr.Path = i.ModulePath[1:] // trim off "root" prefix, which is implied + } + if instanceType != TypeInvalid { + addr.InstanceTypeSet = true + addr.InstanceType = instanceType + } + return addr +} + +func (i *InstanceInfo) uniqueId() string { + prefix := i.HumanId() + if v := i.uniqueExtra; v != "" { + prefix += " " + v + } + + return prefix +} + // ResourceConfig holds the configuration given for a resource. This is // done instead of a raw `map[string]interface{}` type so that rich // methods can be added to it to make dealing with it easier. @@ -93,6 +155,59 @@ func NewResourceConfig(c *config.RawConfig) *ResourceConfig { return result } +// DeepCopy performs a deep copy of the configuration. This makes it safe +// to modify any of the structures that are part of the resource config without +// affecting the original configuration. +func (c *ResourceConfig) DeepCopy() *ResourceConfig { + // DeepCopying a nil should return a nil to avoid panics + if c == nil { + return nil + } + + // Copy, this will copy all the exported attributes + copy, err := copystructure.Config{Lock: true}.Copy(c) + if err != nil { + panic(err) + } + + // Force the type + result := copy.(*ResourceConfig) + + // For the raw configuration, we can just use its own copy method + result.raw = c.raw.Copy() + + return result +} + +// Equal checks the equality of two resource configs. +func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { + // If either are nil, then they're only equal if they're both nil + if c == nil || c2 == nil { + return c == c2 + } + + // Sort the computed keys so they're deterministic + sort.Strings(c.ComputedKeys) + sort.Strings(c2.ComputedKeys) + + // Two resource configs if their exported properties are equal. + // We don't compare "raw" because it is never used again after + // initialization and for all intents and purposes they are equal + // if the exported properties are equal. + check := [][2]interface{}{ + {c.ComputedKeys, c2.ComputedKeys}, + {c.Raw, c2.Raw}, + {c.Config, c2.Config}, + } + for _, pair := range check { + if !reflect.DeepEqual(pair[0], pair[1]) { + return false + } + } + + return true +} + // CheckSet checks that the given list of configuration keys is // properly set. If not, errors are returned for each unset key. // @@ -112,16 +227,17 @@ func (c *ResourceConfig) CheckSet(keys []string) []error { // Get looks up a configuration value by key and returns the value. // // The second return value is true if the get was successful. Get will -// not succeed if the value is being computed. +// return the raw value if the key is computed, so you should pair this +// with IsComputed. func (c *ResourceConfig) Get(k string) (interface{}, bool) { - // First try to get it from c.Config since that has interpolated values - result, ok := c.get(k, c.Config) - if ok { - return result, ok + // We aim to get a value from the configuration. If it is computed, + // then we return the pure raw value. + source := c.Config + if c.IsComputed(k) { + source = c.Raw } - // Otherwise, just get it from the raw config - return c.get(k, c.Raw) + return c.get(k, source) } // GetRaw looks up a configuration value by key and returns the value, @@ -135,9 +251,25 @@ func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { // IsComputed returns whether the given key is computed or not. func (c *ResourceConfig) IsComputed(k string) bool { - _, ok := c.get(k, c.Config) - _, okRaw := c.get(k, c.Raw) - return !ok && okRaw + // The next thing we do is check the config if we get a computed + // value out of it. + v, ok := c.get(k, c.Config) + if !ok { + return false + } + + // If value is nil, then it isn't computed + if v == nil { + return false + } + + // Test if the value contains an unknown value + var w unknownCheckWalker + if err := reflectwalk.Walk(v, &w); err != nil { + panic(err) + } + + return w.Unknown } // IsSet checks if the key in the configuration is set. A key is set if @@ -151,10 +283,8 @@ func (c *ResourceConfig) IsSet(k string) bool { return false } - for _, ck := range c.ComputedKeys { - if ck == k { - return true - } + if c.IsComputed(k) { + return true } if _, ok := c.Get(k); ok { @@ -190,22 +320,33 @@ func (c *ResourceConfig) get( if !v.IsValid() { return nil, false } + return v.Interface(), true } return nil, false } + current = v.Interface() case reflect.Slice: previous = current + if part == "#" { + // If any value in a list is computed, this whole thing + // is computed and we can't read any part of it. + for i := 0; i < cv.Len(); i++ { + if v := cv.Index(i).Interface(); v == unknownValue() { + return v, true + } + } + current = cv.Len() } else { i, err := strconv.ParseInt(part, 0, 0) if err != nil { return nil, false } - if i >= int64(cv.Len()) { + if int(i) < 0 || int(i) >= cv.Len() { return nil, false } current = cv.Index(int(i)).Interface() @@ -215,8 +356,10 @@ func (c *ResourceConfig) get( // prefix so were split as path components above. actualKey := strings.Join(parts[i-1:], ".") if prevMap, ok := previous.(map[string]interface{}); ok { - return prevMap[actualKey], true + v, ok := prevMap[actualKey] + return v, ok } + return nil, false default: panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) @@ -242,3 +385,16 @@ func (c *ResourceConfig) interpolateForce() { c.Raw = c.raw.RawMap() c.Config = c.raw.Config() } + +// unknownCheckWalker +type unknownCheckWalker struct { + Unknown bool +} + +func (w *unknownCheckWalker) Primitive(v reflect.Value) error { + if v.Interface() == unknownValue() { + w.Unknown = true + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go index da22b232..a64f5d84 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" ) // ResourceAddress is a way of identifying an individual resource (or, @@ -29,6 +30,10 @@ type ResourceAddress struct { // Copy returns a copy of this ResourceAddress func (r *ResourceAddress) Copy() *ResourceAddress { + if r == nil { + return nil + } + n := &ResourceAddress{ Path: make([]string, 0, len(r.Path)), Index: r.Index, @@ -37,9 +42,9 @@ func (r *ResourceAddress) Copy() *ResourceAddress { Type: r.Type, Mode: r.Mode, } - for _, p := range r.Path { - n.Path = append(n.Path, p) - } + + n.Path = append(n.Path, r.Path...) + return n } @@ -85,6 +90,126 @@ func (r *ResourceAddress) String() string { return strings.Join(result, ".") } +// HasResourceSpec returns true if the address has a resource spec, as +// defined in the documentation: +// https://www.terraform.io/docs/internals/resource-addressing.html +// In particular, this returns false if the address contains only +// a module path, thus addressing the entire module. +func (r *ResourceAddress) HasResourceSpec() bool { + return r.Type != "" && r.Name != "" +} + +// WholeModuleAddress returns the resource address that refers to all +// resources in the same module as the receiver address. +func (r *ResourceAddress) WholeModuleAddress() *ResourceAddress { + return &ResourceAddress{ + Path: r.Path, + Index: -1, + InstanceTypeSet: false, + } +} + +// MatchesConfig returns true if the receiver matches the given +// configuration resource within the given configuration module. +// +// Since resource configuration blocks represent all of the instances of +// a multi-instance resource, the index of the address (if any) is not +// considered. +func (r *ResourceAddress) MatchesConfig(mod *module.Tree, rc *config.Resource) bool { + if r.HasResourceSpec() { + if r.Mode != rc.Mode || r.Type != rc.Type || r.Name != rc.Name { + return false + } + } + + addrPath := r.Path + cfgPath := mod.Path() + + // normalize + if len(addrPath) == 0 { + addrPath = nil + } + if len(cfgPath) == 0 { + cfgPath = nil + } + return reflect.DeepEqual(addrPath, cfgPath) +} + +// stateId returns the ID that this resource should be entered with +// in the state. This is also used for diffs. In the future, we'd like to +// move away from this string field so I don't export this. +func (r *ResourceAddress) stateId() string { + result := fmt.Sprintf("%s.%s", r.Type, r.Name) + switch r.Mode { + case config.ManagedResourceMode: + // Done + case config.DataResourceMode: + result = fmt.Sprintf("data.%s", result) + default: + panic(fmt.Errorf("unknown resource mode: %s", r.Mode)) + } + if r.Index >= 0 { + result += fmt.Sprintf(".%d", r.Index) + } + + return result +} + +// parseResourceAddressConfig creates a resource address from a config.Resource +func parseResourceAddressConfig(r *config.Resource) (*ResourceAddress, error) { + return &ResourceAddress{ + Type: r.Type, + Name: r.Name, + Index: -1, + InstanceType: TypePrimary, + Mode: r.Mode, + }, nil +} + +// parseResourceAddressInternal parses the somewhat bespoke resource +// identifier used in states and diffs, such as "instance.name.0". +func parseResourceAddressInternal(s string) (*ResourceAddress, error) { + // Split based on ".". Every resource address should have at least two + // elements (type and name). + parts := strings.Split(s, ".") + if len(parts) < 2 || len(parts) > 4 { + return nil, fmt.Errorf("Invalid internal resource address format: %s", s) + } + + // Data resource if we have at least 3 parts and the first one is data + mode := config.ManagedResourceMode + if len(parts) > 2 && parts[0] == "data" { + mode = config.DataResourceMode + parts = parts[1:] + } + + // If we're not a data resource and we have more than 3, then it is an error + if len(parts) > 3 && mode != config.DataResourceMode { + return nil, fmt.Errorf("Invalid internal resource address format: %s", s) + } + + // Build the parts of the resource address that are guaranteed to exist + addr := &ResourceAddress{ + Type: parts[0], + Name: parts[1], + Index: -1, + InstanceType: TypePrimary, + Mode: mode, + } + + // If we have more parts, then we have an index. Parse that. + if len(parts) > 2 { + idx, err := strconv.ParseInt(parts[2], 0, 0) + if err != nil { + return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err) + } + + addr.Index = int(idx) + } + + return addr, nil +} + func ParseResourceAddress(s string) (*ResourceAddress, error) { matches, err := tokenizeResourceAddress(s) if err != nil { @@ -106,7 +231,10 @@ func ParseResourceAddress(s string) (*ResourceAddress, error) { // not allowed to say "data." without a type following if mode == config.DataResourceMode && matches["type"] == "" { - return nil, fmt.Errorf("must target specific data instance") + return nil, fmt.Errorf( + "invalid resource address %q: must target specific data instance", + s, + ) } return &ResourceAddress{ @@ -120,6 +248,75 @@ func ParseResourceAddress(s string) (*ResourceAddress, error) { }, nil } +// ParseResourceAddressForInstanceDiff creates a ResourceAddress for a +// resource name as described in a module diff. +// +// For historical reasons a different addressing format is used in this +// context. The internal format should not be shown in the UI and instead +// this function should be used to translate to a ResourceAddress and +// then, where appropriate, use the String method to produce a canonical +// resource address string for display in the UI. +// +// The given path slice must be empty (or nil) for the root module, and +// otherwise consist of a sequence of module names traversing down into +// the module tree. If a non-nil path is provided, the caller must not +// modify its underlying array after passing it to this function. +func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAddress, error) { + addr, err := parseResourceAddressInternal(key) + if err != nil { + return nil, err + } + addr.Path = path + return addr, nil +} + +// Contains returns true if and only if the given node is contained within +// the receiver. +// +// Containment is defined in terms of the module and resource heirarchy: +// a resource is contained within its module and any ancestor modules, +// an indexed resource instance is contained with the unindexed resource, etc. +func (addr *ResourceAddress) Contains(other *ResourceAddress) bool { + ourPath := addr.Path + givenPath := other.Path + if len(givenPath) < len(ourPath) { + return false + } + for i := range ourPath { + if ourPath[i] != givenPath[i] { + return false + } + } + + // If the receiver is a whole-module address then the path prefix + // matching is all we need. + if !addr.HasResourceSpec() { + return true + } + + if addr.Type != other.Type || addr.Name != other.Name || addr.Mode != other.Mode { + return false + } + + if addr.Index != -1 && addr.Index != other.Index { + return false + } + + if addr.InstanceTypeSet && (addr.InstanceTypeSet != other.InstanceTypeSet || addr.InstanceType != other.InstanceType) { + return false + } + + return true +} + +// Equals returns true if the receiver matches the given address. +// +// The name of this method is a misnomer, since it doesn't test for exact +// equality. Instead, it tests that the _specified_ parts of each +// address match, treating any unspecified parts as wildcards. +// +// See also Contains, which takes a more heirarchical approach to comparing +// addresses. func (addr *ResourceAddress) Equals(raw interface{}) bool { other, ok := raw.(*ResourceAddress) if !ok { @@ -154,6 +351,59 @@ func (addr *ResourceAddress) Equals(raw interface{}) bool { modeMatch } +// Less returns true if and only if the receiver should be sorted before +// the given address when presenting a list of resource addresses to +// an end-user. +// +// This sort uses lexicographic sorting for most components, but uses +// numeric sort for indices, thus causing index 10 to sort after +// index 9, rather than after index 1. +func (addr *ResourceAddress) Less(other *ResourceAddress) bool { + + switch { + + case len(addr.Path) != len(other.Path): + return len(addr.Path) < len(other.Path) + + case !reflect.DeepEqual(addr.Path, other.Path): + // If the two paths are the same length but don't match, we'll just + // cheat and compare the string forms since it's easier than + // comparing all of the path segments in turn, and lexicographic + // comparison is correct for the module path portion. + addrStr := addr.String() + otherStr := other.String() + return addrStr < otherStr + + case addr.Mode != other.Mode: + return addr.Mode == config.DataResourceMode + + case addr.Type != other.Type: + return addr.Type < other.Type + + case addr.Name != other.Name: + return addr.Name < other.Name + + case addr.Index != other.Index: + // Since "Index" is -1 for an un-indexed address, this also conveniently + // sorts unindexed addresses before indexed ones, should they both + // appear for some reason. + return addr.Index < other.Index + + case addr.InstanceTypeSet != other.InstanceTypeSet: + return !addr.InstanceTypeSet + + case addr.InstanceType != other.InstanceType: + // InstanceType is actually an enum, so this is just an arbitrary + // sort based on the enum numeric values, and thus not particularly + // meaningful. + return addr.InstanceType < other.InstanceType + + default: + return false + + } +} + func ParseResourceIndex(s string) (int, error) { if s == "" { return -1, nil @@ -196,7 +446,7 @@ func tokenizeResourceAddress(s string) (map[string]string, error) { // string "aws_instance.web.tainted[1]" re := regexp.MustCompile(`\A` + // "module.foo.module.bar" (optional) - `(?P(?:module\.[^.]+\.?)*)` + + `(?P(?:module\.(?P[^.]+)\.?)*)` + // possibly "data.", if targeting is a data resource `(?P(?:data\.)?)` + // "aws_instance.web" (optional when module path specified) @@ -206,14 +456,17 @@ func tokenizeResourceAddress(s string) (map[string]string, error) { // "1" (optional, omission implies: "0") `(?:\[(?P\d+)\])?` + `\z`) + groupNames := re.SubexpNames() rawMatches := re.FindAllStringSubmatch(s, -1) if len(rawMatches) != 1 { - return nil, fmt.Errorf("Problem parsing address: %q", s) + return nil, fmt.Errorf("invalid resource address %q", s) } + matches := make(map[string]string) for i, m := range rawMatches[0] { matches[groupNames[i]] = m } + return matches, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go index 37cd1d5c..93fd14fc 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go @@ -1,13 +1,35 @@ package terraform +import ( + "fmt" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/plugin/discovery" +) + // ResourceProvider is an interface that must be implemented by any // resource provider: the thing that creates and manages the resources in // a Terraform configuration. +// +// Important implementation note: All returned pointers, such as +// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to +// shared data. Terraform is highly parallel and assumes that this data is safe +// to read/write in parallel so it must be unique references. Note that it is +// safe to return arguments as results, however. type ResourceProvider interface { /********************************************************************* * Functions related to the provider *********************************************************************/ + // ProviderSchema returns the config schema for the main provider + // configuration, as would appear in a "provider" block in the + // configuration files. + // + // Currently not all providers support schema. Callers must therefore + // first call Resources and DataSources and ensure that at least one + // resource or data source has the SchemaAvailable flag set. + GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) + // Input is called to ask the provider to ask the user for input // for completing the configuration if necesarry. // @@ -41,6 +63,26 @@ type ResourceProvider interface { // knows how to manage. Resources() []ResourceType + // Stop is called when the provider should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for Terraform. + // Even if this isn't implemented to do anything (just returns nil), + // Terraform will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. Terraform core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + /********************************************************************* * Functions related to individual resources *********************************************************************/ @@ -128,6 +170,18 @@ type ResourceProvider interface { ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) } +// ResourceProviderError may be returned when creating a Context if the +// required providers cannot be satisfied. This error can then be used to +// format a more useful message for the user. +type ResourceProviderError struct { + Errors []error +} + +func (e *ResourceProviderError) Error() string { + // use multierror to format the default output + return multierror.Append(nil, e.Errors...).Error() +} + // ResourceProviderCloser is an interface that providers that can close // connections that aren't needed anymore must implement. type ResourceProviderCloser interface { @@ -138,11 +192,69 @@ type ResourceProviderCloser interface { type ResourceType struct { Name string // Name of the resource, example "instance" (no provider prefix) Importable bool // Whether this resource supports importing + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool } // DataSource is a data source that a resource provider implements. type DataSource struct { Name string + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// ResourceProviderResolver is an interface implemented by objects that are +// able to resolve a given set of resource provider version constraints +// into ResourceProviderFactory callbacks. +type ResourceProviderResolver interface { + // Given a constraint map, return a ResourceProviderFactory for each + // requested provider. If some or all of the constraints cannot be + // satisfied, return a non-nil slice of errors describing the problems. + ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) +} + +// ResourceProviderResolverFunc wraps a callback function and turns it into +// a ResourceProviderResolver implementation, for convenience in situations +// where a function and its associated closure are sufficient as a resolver +// implementation. +type ResourceProviderResolverFunc func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) + +// ResolveProviders implements ResourceProviderResolver by calling the +// wrapped function. +func (f ResourceProviderResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) { + return f(reqd) +} + +// ResourceProviderResolverFixed returns a ResourceProviderResolver that +// has a fixed set of provider factories provided by the caller. The returned +// resolver ignores version constraints entirely and just returns the given +// factory for each requested provider name. +// +// This function is primarily used in tests, to provide mock providers or +// in-process providers under test. +func ResourceProviderResolverFixed(factories map[string]ResourceProviderFactory) ResourceProviderResolver { + return ResourceProviderResolverFunc(func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) { + ret := make(map[string]ResourceProviderFactory, len(reqd)) + var errs []error + for name := range reqd { + if factory, exists := factories[name]; exists { + ret[name] = factory + } else { + errs = append(errs, fmt.Errorf("provider %q is not available", name)) + } + } + return ret, errs + }) } // ResourceProviderFactory is a function type that creates a new instance @@ -176,3 +288,21 @@ func ProviderHasDataSource(p ResourceProvider, n string) bool { return false } + +// resourceProviderFactories matches available plugins to the given version +// requirements to produce a map of compatible provider plugins if possible, +// or an error if the currently-available plugins are insufficient. +// +// This should be called only with configurations that have passed calls +// to config.Validate(), which ensures that all of the given version +// constraints are valid. It will panic if any invalid constraints are present. +func resourceProviderFactories(resolver ResourceProviderResolver, reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, error) { + ret, errs := resolver.ResolveProviders(reqd) + if errs != nil { + return nil, &ResourceProviderError{ + Errors: errs, + } + } + + return ret, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go index 8389fd0a..4000e3d2 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go @@ -1,6 +1,8 @@ package terraform -import "sync" +import ( + "sync" +) // MockResourceProvider implements ResourceProvider but mocks out all the // calls for testing purposes. @@ -12,6 +14,10 @@ type MockResourceProvider struct { CloseCalled bool CloseError error + GetSchemaCalled bool + GetSchemaRequest *ProviderSchemaRequest + GetSchemaReturn *ProviderSchema + GetSchemaReturnError error InputCalled bool InputInput UIInput InputConfig *ResourceConfig @@ -56,6 +62,9 @@ type MockResourceProvider struct { ReadDataDiffFn func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error) ReadDataDiffReturn *InstanceDiff ReadDataDiffReturnError error + StopCalled bool + StopFn func() error + StopReturnError error DataSourcesCalled bool DataSourcesReturn []DataSource ValidateCalled bool @@ -89,8 +98,19 @@ func (p *MockResourceProvider) Close() error { return p.CloseError } +func (p *MockResourceProvider) GetSchema(req *ProviderSchemaRequest) (*ProviderSchema, error) { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + p.GetSchemaRequest = req + return p.GetSchemaReturn, p.GetSchemaReturnError +} + func (p *MockResourceProvider) Input( input UIInput, c *ResourceConfig) (*ResourceConfig, error) { + p.Lock() + defer p.Unlock() p.InputCalled = true p.InputInput = input p.InputConfig = c @@ -141,6 +161,18 @@ func (p *MockResourceProvider) Configure(c *ResourceConfig) error { return p.ConfigureReturnError } +func (p *MockResourceProvider) Stop() error { + p.Lock() + defer p.Unlock() + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopReturnError +} + func (p *MockResourceProvider) Apply( info *InstanceInfo, state *InstanceState, @@ -157,7 +189,7 @@ func (p *MockResourceProvider) Apply( return p.ApplyFn(info, state, diff) } - return p.ApplyReturn, p.ApplyReturnError + return p.ApplyReturn.DeepCopy(), p.ApplyReturnError } func (p *MockResourceProvider) Diff( @@ -171,11 +203,12 @@ func (p *MockResourceProvider) Diff( p.DiffInfo = info p.DiffState = state p.DiffDesired = desired + if p.DiffFn != nil { return p.DiffFn(info, state, desired) } - return p.DiffReturn, p.DiffReturnError + return p.DiffReturn.DeepCopy(), p.DiffReturnError } func (p *MockResourceProvider) Refresh( @@ -192,7 +225,7 @@ func (p *MockResourceProvider) Refresh( return p.RefreshFn(info, s) } - return p.RefreshReturn, p.RefreshReturnError + return p.RefreshReturn.DeepCopy(), p.RefreshReturnError } func (p *MockResourceProvider) Resources() []ResourceType { @@ -214,7 +247,15 @@ func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*In return p.ImportStateFn(info, id) } - return p.ImportStateReturn, p.ImportStateReturnError + var result []*InstanceState + if p.ImportStateReturn != nil { + result = make([]*InstanceState, len(p.ImportStateReturn)) + for i, v := range p.ImportStateReturn { + result[i] = v.DeepCopy() + } + } + + return result, p.ImportStateReturnError } func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) { @@ -245,7 +286,7 @@ func (p *MockResourceProvider) ReadDataDiff( return p.ReadDataDiffFn(info, desired) } - return p.ReadDataDiffReturn, p.ReadDataDiffReturnError + return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError } func (p *MockResourceProvider) ReadDataApply( @@ -262,7 +303,7 @@ func (p *MockResourceProvider) ReadDataApply( return p.ReadDataApplyFn(info, d) } - return p.ReadDataApplyReturn, p.ReadDataApplyReturnError + return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError } func (p *MockResourceProvider) DataSources() []DataSource { diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go index 3327e300..361ec1ec 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go @@ -21,6 +21,26 @@ type ResourceProvisioner interface { // is provided since provisioners only run after a resource has been // newly created. Apply(UIOutput, *InstanceState, *ResourceConfig) error + + // Stop is called when the provisioner should halt any in-flight actions. + // + // This can be used to make a nicer Ctrl-C experience for Terraform. + // Even if this isn't implemented to do anything (just returns nil), + // Terraform will still cleanly stop after the currently executing + // graph node is complete. However, this API can be used to make more + // efficient halts. + // + // Stop doesn't have to and shouldn't block waiting for in-flight actions + // to complete. It should take any action it wants and return immediately + // acknowledging it has received the stop request. Terraform core will + // automatically not make any further API calls to the provider soon + // after Stop is called (technically exactly once the currently executing + // graph nodes are complete). + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error } // ResourceProvisionerCloser is an interface that provisioners that can close diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go index be04e981..f471a518 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go @@ -21,6 +21,10 @@ type MockResourceProvisioner struct { ValidateFn func(c *ResourceConfig) ([]string, []error) ValidateReturnWarns []string ValidateReturnErrors []error + + StopCalled bool + StopFn func() error + StopReturnError error } func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { @@ -40,14 +44,29 @@ func (p *MockResourceProvisioner) Apply( state *InstanceState, c *ResourceConfig) error { p.Lock() - defer p.Unlock() p.ApplyCalled = true p.ApplyOutput = output p.ApplyState = state p.ApplyConfig = c if p.ApplyFn != nil { - return p.ApplyFn(state, c) + fn := p.ApplyFn + p.Unlock() + return fn(state, c) } + + defer p.Unlock() return p.ApplyReturnError } + +func (p *MockResourceProvisioner) Stop() error { + p.Lock() + defer p.Unlock() + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopReturnError +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/schemas.go b/vendor/github.com/hashicorp/terraform/terraform/schemas.go new file mode 100644 index 00000000..ec46efcf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/schemas.go @@ -0,0 +1,34 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/configschema" +) + +type Schemas struct { + Providers ProviderSchemas +} + +// ProviderSchemas is a map from provider names to provider schemas. +// +// The names in this map are the direct plugin name (e.g. "aws") rather than +// any alias name (e.g. "aws.foo"), since. +type ProviderSchemas map[string]*ProviderSchema + +// ProviderSchema represents the schema for a provider's own configuration +// and the configuration for some or all of its resources and data sources. +// +// The completeness of this structure depends on how it was constructed. +// When constructed for a configuration, it will generally include only +// resource types and data sources used by that configuration. +type ProviderSchema struct { + Provider *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block +} + +// ProviderSchemaRequest is used to describe to a ResourceProvider which +// aspects of schema are required, when calling the GetSchema method. +type ProviderSchemaRequest struct { + ResourceTypes []string + DataSources []string +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/semantics.go b/vendor/github.com/hashicorp/terraform/terraform/semantics.go index e8e52b7a..20f1d8a2 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/semantics.go +++ b/vendor/github.com/hashicorp/terraform/terraform/semantics.go @@ -2,6 +2,7 @@ package terraform import ( "fmt" + "strings" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/config" @@ -48,25 +49,6 @@ type SemanticChecker interface { Check(*dag.Graph, dag.Vertex) error } -// SemanticCheckModulesExist is an implementation of SemanticChecker that -// verifies that all the modules that are referenced in the graph exist. -type SemanticCheckModulesExist struct{} - -// TODO: test -func (*SemanticCheckModulesExist) Check(g *dag.Graph, v dag.Vertex) error { - mn, ok := v.(*GraphNodeConfigModule) - if !ok { - return nil - } - - if mn.Tree == nil { - return fmt.Errorf( - "module '%s' not found", mn.Module.Name) - } - - return nil -} - // smcUserVariables does all the semantic checks to verify that the // variables given satisfy the configuration itself. func smcUserVariables(c *config.Config, vs map[string]interface{}) []error { @@ -96,6 +78,21 @@ func smcUserVariables(c *config.Config, vs map[string]interface{}) []error { // Check that types match up for name, proposedValue := range vs { + // Check for "map.key" fields. These stopped working with Terraform + // 0.7 but we do this to surface a better error message informing + // the user what happened. + if idx := strings.Index(name, "."); idx > 0 { + key := name[:idx] + if _, ok := cvs[key]; ok { + errs = append(errs, fmt.Errorf( + "%s: Overriding map keys with the format `name.key` is no "+ + "longer allowed. You may still override keys by setting "+ + "`name = { key = value }`. The maps will be merged. This "+ + "behavior appeared in 0.7.0.", name)) + continue + } + } + schema, ok := cvs[name] if !ok { continue @@ -108,30 +105,25 @@ func smcUserVariables(c *config.Config, vs map[string]interface{}) []error { switch proposedValue.(type) { case string: continue - default: - errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s", - name, declaredType.Printable(), hclTypeName(proposedValue))) } case config.VariableTypeMap: - switch proposedValue.(type) { + switch v := proposedValue.(type) { case map[string]interface{}: continue - default: - errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s", - name, declaredType.Printable(), hclTypeName(proposedValue))) + case []map[string]interface{}: + // if we have a list of 1 map, it will get coerced later as needed + if len(v) == 1 { + continue + } } case config.VariableTypeList: switch proposedValue.(type) { case []interface{}: continue - default: - errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s", - name, declaredType.Printable(), hclTypeName(proposedValue))) } - default: - errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s", - name, declaredType.Printable(), hclTypeName(proposedValue))) } + errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s", + name, declaredType.Printable(), hclTypeName(proposedValue))) } // TODO(mitchellh): variables that are unknown diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go index 2ab56ba6..04b14a65 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/state.go @@ -4,10 +4,12 @@ import ( "bufio" "bytes" "encoding/json" + "errors" "fmt" "io" "io/ioutil" "log" + "os" "reflect" "sort" "strconv" @@ -15,10 +17,12 @@ import ( "sync" "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-uuid" "github.com/hashicorp/go-version" "github.com/hashicorp/terraform/config" "github.com/mitchellh/copystructure" - "github.com/satori/go.uuid" + + tfversion "github.com/hashicorp/terraform/version" ) const ( @@ -78,6 +82,11 @@ type State struct { // pull and push state files from a remote storage endpoint. Remote *RemoteState `json:"remote,omitempty"` + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *BackendState `json:"backend,omitempty"` + // Modules contains all the modules in a breadth-first order Modules []*ModuleState `json:"modules"` @@ -108,6 +117,10 @@ func (s *State) Children(path []string) []*ModuleState { func (s *State) children(path []string) []*ModuleState { result := make([]*ModuleState, 0) for _, m := range s.Modules { + if m == nil { + continue + } + if len(m.Path) != len(path)+1 { continue } @@ -161,6 +174,9 @@ func (s *State) ModuleByPath(path []string) *ModuleState { func (s *State) moduleByPath(path []string) *ModuleState { for _, mod := range s.Modules { + if mod == nil { + continue + } if mod.Path == nil { panic("missing module path") } @@ -213,6 +229,10 @@ func (s *State) moduleOrphans(path []string, c *config.Config) [][]string { // Find the orphans that are nested... for _, m := range s.Modules { + if m == nil { + continue + } + // We only want modules that are at least grandchildren if len(m.Path) < len(path)+2 { continue @@ -262,6 +282,24 @@ func (s *State) Empty() bool { return len(s.Modules) == 0 } +// HasResources returns true if the state contains any resources. +// +// This is similar to !s.Empty, but returns true also in the case where the +// state has modules but all of them are devoid of resources. +func (s *State) HasResources() bool { + if s.Empty() { + return false + } + + for _, mod := range s.Modules { + if len(mod.Resources) > 0 { + return true + } + } + + return false +} + // IsRemote returns true if State represents a state that exists and is // remote. func (s *State) IsRemote() bool { @@ -310,6 +348,10 @@ func (s *State) Validate() error { { found := make(map[string]struct{}) for _, ms := range s.Modules { + if ms == nil { + continue + } + key := strings.Join(ms.Path, ".") if _, ok := found[key]; ok { result = multierror.Append(result, fmt.Errorf( @@ -494,6 +536,43 @@ func (s *State) equal(other *State) bool { return true } +// MarshalEqual is similar to Equal but provides a stronger definition of +// "equal", where two states are equal if and only if their serialized form +// is byte-for-byte identical. +// +// This is primarily useful for callers that are trying to save snapshots +// of state to persistent storage, allowing them to detect when a new +// snapshot must be taken. +// +// Note that the serial number and lineage are included in the serialized form, +// so it's the caller's responsibility to properly manage these attributes +// so that this method is only called on two states that have the same +// serial and lineage, unless detecting such differences is desired. +func (s *State) MarshalEqual(other *State) bool { + if s == nil && other == nil { + return true + } else if s == nil || other == nil { + return false + } + + recvBuf := &bytes.Buffer{} + otherBuf := &bytes.Buffer{} + + err := WriteState(s, recvBuf) + if err != nil { + // should never happen, since we're writing to a buffer + panic(err) + } + + err = WriteState(other, otherBuf) + if err != nil { + // should never happen, since we're writing to a buffer + panic(err) + } + + return bytes.Equal(recvBuf.Bytes(), otherBuf.Bytes()) +} + type StateAgeComparison int const ( @@ -545,7 +624,7 @@ func (s *State) CompareAges(other *State) (StateAgeComparison, error) { } // SameLineage returns true only if the state given in argument belongs -// to the same "lineage" of states as the reciever. +// to the same "lineage" of states as the receiver. func (s *State) SameLineage(other *State) bool { s.Lock() defer s.Unlock() @@ -564,6 +643,10 @@ func (s *State) SameLineage(other *State) bool { // DeepCopy performs a deep copy of the state structure and returns // a new structure. func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + copy, err := copystructure.Config{Lock: true}.Copy(s) if err != nil { panic(err) @@ -572,30 +655,6 @@ func (s *State) DeepCopy() *State { return copy.(*State) } -// IncrementSerialMaybe increments the serial number of this state -// if it different from the other state. -func (s *State) IncrementSerialMaybe(other *State) { - if s == nil { - return - } - if other == nil { - return - } - s.Lock() - defer s.Unlock() - - if s.Serial > other.Serial { - return - } - if other.TFVersion != s.TFVersion || !s.equal(other) { - if other.Serial > s.Serial { - s.Serial = other.Serial - } - - s.Serial++ - } -} - // FromFutureTerraform checks if this state was written by a Terraform // version from the future. func (s *State) FromFutureTerraform() bool { @@ -608,7 +667,7 @@ func (s *State) FromFutureTerraform() bool { } v := version.Must(version.NewVersion(s.TFVersion)) - return SemVersion.LessThan(v) + return tfversion.SemVer.LessThan(v) } func (s *State) Init() { @@ -621,18 +680,22 @@ func (s *State) init() { if s.Version == 0 { s.Version = StateVersion } + if s.moduleByPath(rootModulePath) == nil { s.addModule(rootModulePath) } s.ensureHasLineage() for _, mod := range s.Modules { - mod.init() + if mod != nil { + mod.init() + } } if s.Remote != nil { s.Remote.init() } + } func (s *State) EnsureHasLineage() { @@ -644,7 +707,11 @@ func (s *State) EnsureHasLineage() { func (s *State) ensureHasLineage() { if s.Lineage == "" { - s.Lineage = uuid.NewV4().String() + lineage, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Errorf("Failed to generate lineage: %v", err)) + } + s.Lineage = lineage log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) } else { log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) @@ -677,6 +744,18 @@ func (s *State) prune() { if s == nil { return } + + // Filter out empty modules. + // A module is always assumed to have a path, and it's length isn't always + // bounds checked later on. Modules may be "emptied" during destroy, but we + // never want to store those in the state. + for i := 0; i < len(s.Modules); i++ { + if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { + s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) + i-- + } + } + for _, mod := range s.Modules { mod.prune() } @@ -691,7 +770,9 @@ func (s *State) sort() { // Allow modules to be sorted for _, m := range s.Modules { - m.sort() + if m != nil { + m.sort() + } } } @@ -728,6 +809,43 @@ func (s *State) String() string { return strings.TrimSpace(buf.String()) } +// BackendState stores the configuration to connect to a remote backend. +type BackendState struct { + Type string `json:"type"` // Backend type + Config map[string]interface{} `json:"config"` // Backend raw config + + // Hash is the hash code to uniquely identify the original source + // configuration. We use this to detect when there is a change in + // configuration even when "type" isn't changed. + Hash uint64 `json:"hash"` +} + +// Empty returns true if BackendState has no state. +func (s *BackendState) Empty() bool { + return s == nil || s.Type == "" +} + +// Rehash returns a unique content hash for this backend's configuration +// as a uint64 value. +// The Hash stored in the backend state needs to match the config itself, but +// we need to compare the backend config after it has been combined with all +// options. +// This function must match the implementation used by config.Backend. +func (s *BackendState) Rehash() uint64 { + if s == nil { + return 0 + } + + cfg := config.Backend{ + Type: s.Type, + RawConfig: &config.RawConfig{ + Raw: s.Config, + }, + } + + return cfg.Rehash() +} + // RemoteState is used to track the information about a remote // state store that we push/pull state to. type RemoteState struct { @@ -866,6 +984,10 @@ type ModuleState struct { // always disjoint, so the path represents amodule tree Path []string `json:"path"` + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + // Outputs declared by the module and maintained for each module // even though only the root module technically needs to be kept. // This allows operators to inspect values at the boundaries. @@ -972,7 +1094,7 @@ func (m *ModuleState) Orphans(c *config.Config) []string { defer m.Unlock() keys := make(map[string]struct{}) - for k, _ := range m.Resources { + for k := range m.Resources { keys[k] = struct{}{} } @@ -980,7 +1102,7 @@ func (m *ModuleState) Orphans(c *config.Config) []string { for _, r := range c.Resources { delete(keys, r.Id()) - for k, _ := range keys { + for k := range keys { if strings.HasPrefix(k, r.Id()+".") { delete(keys, k) } @@ -989,7 +1111,32 @@ func (m *ModuleState) Orphans(c *config.Config) []string { } result := make([]string, 0, len(keys)) - for k, _ := range keys { + for k := range keys { + result = append(result, k) + } + + return result +} + +// RemovedOutputs returns a list of outputs that are in the State but aren't +// present in the configuration itself. +func (m *ModuleState) RemovedOutputs(c *config.Config) []string { + m.Lock() + defer m.Unlock() + + keys := make(map[string]struct{}) + for k := range m.Outputs { + keys[k] = struct{}{} + } + + if c != nil { + for _, o := range c.Outputs { + delete(keys, o.Name) + } + } + + result := make([]string, 0, len(keys)) + for k := range keys { result = append(result, k) } @@ -1056,11 +1203,12 @@ func (m *ModuleState) prune() { defer m.Unlock() for k, v := range m.Resources { - v.prune() - - if (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { + if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { delete(m.Resources, k) + continue } + + v.prune() } for k, v := range m.Outputs { @@ -1068,6 +1216,8 @@ func (m *ModuleState) prune() { delete(m.Outputs, k) } } + + m.Dependencies = uniqueStrings(m.Dependencies) } func (m *ModuleState) sort() { @@ -1090,7 +1240,8 @@ func (m *ModuleState) String() string { for name, _ := range m.Resources { names = append(names, name) } - sort.Strings(names) + + sort.Sort(resourceNameSort(names)) for _, k := range names { rs := m.Resources[k] @@ -1130,6 +1281,7 @@ func (m *ModuleState) String() string { attrKeys = append(attrKeys, ak) } + sort.Strings(attrKeys) for _, ak := range attrKeys { @@ -1160,6 +1312,7 @@ func (m *ModuleState) String() string { for k, _ := range m.Outputs { ks = append(ks, k) } + sort.Strings(ks) for _, k := range ks { @@ -1191,6 +1344,10 @@ func (m *ModuleState) String() string { return buf.String() } +func (m *ModuleState) Empty() bool { + return len(m.Locals) == 0 && len(m.Outputs) == 0 && len(m.Resources) == 0 +} + // ResourceStateKey is a structured representation of the key used for the // ModuleState.Resources mapping type ResourceStateKey struct { @@ -1402,10 +1559,6 @@ func (s *ResourceState) init() { if s.Deposed == nil { s.Deposed = make([]*InstanceState, 0) } - - for _, dep := range s.Deposed { - dep.init() - } } func (s *ResourceState) deepcopy() *ResourceState { @@ -1432,8 +1585,9 @@ func (s *ResourceState) prune() { i-- } } - s.Deposed = s.Deposed[:n] + + s.Dependencies = uniqueStrings(s.Dependencies) } func (s *ResourceState) sort() { @@ -1471,8 +1625,9 @@ type InstanceState struct { // Meta is a simple K/V map that is persisted to the State but otherwise // ignored by Terraform core. It's meant to be used for accounting by - // external client code. - Meta map[string]string `json:"meta"` + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` // Tainted is used to mark a resource for recreation. Tainted bool `json:"tainted"` @@ -1491,7 +1646,7 @@ func (s *InstanceState) init() { s.Attributes = make(map[string]string) } if s.Meta == nil { - s.Meta = make(map[string]string) + s.Meta = make(map[string]interface{}) } s.Ephemeral.init() } @@ -1562,13 +1717,24 @@ func (s *InstanceState) Equal(other *InstanceState) bool { if len(s.Meta) != len(other.Meta) { return false } - for k, v := range s.Meta { - otherV, ok := other.Meta[k] - if !ok { - return false + if s.Meta != nil && other.Meta != nil { + // We only do the deep check if both are non-nil. If one is nil + // we treat it as equal since their lengths are both zero (check + // above). + // + // Since this can contain numeric values that may change types during + // serialization, let's compare the serialized values. + sMeta, err := json.Marshal(s.Meta) + if err != nil { + // marshaling primitives shouldn't ever error out + panic(err) + } + otherMeta, err := json.Marshal(other.Meta) + if err != nil { + panic(err) } - if v != otherV { + if !bytes.Equal(sMeta, otherMeta) { return false } } @@ -1705,11 +1871,27 @@ func testForV0State(buf *bufio.Reader) error { return nil } +// ErrNoState is returned by ReadState when the io.Reader contains no data +var ErrNoState = errors.New("no state") + // ReadState reads a state structure out of a reader in the format that // was written by WriteState. func ReadState(src io.Reader) (*State, error) { + // check for a nil file specifically, since that produces a platform + // specific error if we try to use it in a bufio.Reader. + if f, ok := src.(*os.File); ok && f == nil { + return nil, ErrNoState + } + buf := bufio.NewReader(src) + if _, err := buf.Peek(1); err != nil { + if err == io.EOF { + return nil, ErrNoState + } + return nil, err + } + if err := testForV0State(buf); err != nil { return nil, err } @@ -1770,7 +1952,7 @@ func ReadState(src io.Reader) (*State, error) { result = v3State default: return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - SemVersion.String(), versionIdentifier.Version) + tfversion.SemVer.String(), versionIdentifier.Version) } // If we reached this place we must have a result set @@ -1778,6 +1960,10 @@ func ReadState(src io.Reader) (*State, error) { panic("resulting state in load not set, assertion failed") } + // Prune the state when read it. Its possible to write unpruned states or + // for a user to make a state unpruned (nil-ing a module state for example). + result.prune() + // Validate the state file is valid if err := result.Validate(); err != nil { return nil, err @@ -1810,7 +1996,7 @@ func ReadStateV2(jsonBytes []byte) (*State, error) { // version that we don't understand if state.Version > StateVersion { return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - SemVersion.String(), state.Version) + tfversion.SemVer.String(), state.Version) } // Make sure the version is semantic @@ -1826,12 +2012,12 @@ func ReadStateV2(jsonBytes []byte) (*State, error) { } } - // Sort it - state.sort() - // catch any unitialized fields in the state state.init() + // Sort it + state.sort() + return state, nil } @@ -1845,7 +2031,7 @@ func ReadStateV3(jsonBytes []byte) (*State, error) { // version that we don't understand if state.Version > StateVersion { return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - SemVersion.String(), state.Version) + tfversion.SemVer.String(), state.Version) } // Make sure the version is semantic @@ -1861,12 +2047,12 @@ func ReadStateV3(jsonBytes []byte) (*State, error) { } } - // Sort it - state.sort() - // catch any unitialized fields in the state state.init() + // Sort it + state.sort() + // Now we write the state back out to detect any changes in normaliztion. // If our state is now written out differently, bump the serial number to // prevent conflicts. @@ -1886,12 +2072,17 @@ func ReadStateV3(jsonBytes []byte) (*State, error) { // WriteState writes a state somewhere in a binary format. func WriteState(d *State, dst io.Writer) error { - // Make sure it is sorted - d.sort() + // writing a nil state is a noop. + if d == nil { + return nil + } // make sure we have no uninitialized fields d.init() + // Make sure it is sorted + d.sort() + // Ensure the version is set d.Version = StateVersion @@ -1925,6 +2116,48 @@ func WriteState(d *State, dst io.Writer) error { return nil } +// resourceNameSort implements the sort.Interface to sort name parts lexically for +// strings and numerically for integer indexes. +type resourceNameSort []string + +func (r resourceNameSort) Len() int { return len(r) } +func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } + +func (r resourceNameSort) Less(i, j int) bool { + iParts := strings.Split(r[i], ".") + jParts := strings.Split(r[j], ".") + + end := len(iParts) + if len(jParts) < end { + end = len(jParts) + } + + for idx := 0; idx < end; idx++ { + if iParts[idx] == jParts[idx] { + continue + } + + // sort on the first non-matching part + iInt, iIntErr := strconv.Atoi(iParts[idx]) + jInt, jIntErr := strconv.Atoi(jParts[idx]) + + switch { + case iIntErr == nil && jIntErr == nil: + // sort numerically if both parts are integers + return iInt < jInt + case iIntErr == nil: + // numbers sort before strings + return true + case jIntErr == nil: + return false + default: + return iParts[idx] < jParts[idx] + } + } + + return r[i] < r[j] +} + // moduleStateSort implements sort.Interface to sort module states type moduleStateSort []*ModuleState @@ -1936,6 +2169,11 @@ func (s moduleStateSort) Less(i, j int) bool { a := s[i] b := s[j] + // If either is nil, then the nil one is "less" than + if a == nil || b == nil { + return a == nil + } + // If the lengths are different, then the shorter one always wins if len(a.Path) != len(b.Path) { return len(a.Path) < len(b.Path) @@ -1949,6 +2187,19 @@ func (s moduleStateSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +// StateCompatible returns an error if the state is not compatible with the +// current version of terraform. +func CheckStateVersion(state *State) error { + if state == nil { + return nil + } + + if state.FromFutureTerraform() { + return fmt.Errorf(stateInvalidTerraformVersionErr, state.TFVersion) + } + return nil +} + const stateValidateErrMultiModule = ` Multiple modules with the same path: %s @@ -1957,3 +2208,11 @@ in your state file that point to the same module. This will cause Terraform to behave in unexpected and error prone ways and is invalid. Please back up and modify your state file manually to resolve this. ` + +const stateInvalidTerraformVersionErr = ` +Terraform doesn't allow running any operations against a state +that was written by a future Terraform version. The state is +reporting it is written by Terraform '%s' + +Please run at least that version of Terraform to continue. +` diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_add.go b/vendor/github.com/hashicorp/terraform/terraform/state_add.go index 688e05d7..11637303 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/state_add.go +++ b/vendor/github.com/hashicorp/terraform/terraform/state_add.go @@ -1,8 +1,6 @@ package terraform -import ( - "fmt" -) +import "fmt" // Add adds the item in the state at the given address. // @@ -34,6 +32,7 @@ import ( // func (s *State) Add(fromAddrRaw string, toAddrRaw string, raw interface{}) error { // Parse the address + toAddr, err := ParseResourceAddress(toAddrRaw) if err != nil { return err @@ -114,6 +113,7 @@ func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw i addrCopy.Type = resourceKey.Type addrCopy.Name = resourceKey.Name addrCopy.Index = resourceKey.Index + addrCopy.Mode = resourceKey.Mode // Perform an add if err := s.Add(fromAddr.String(), addrCopy.String(), v); err != nil { @@ -333,6 +333,7 @@ func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) { Name: addr.Name, Type: addr.Type, Index: addr.Index, + Mode: addr.Mode, }).String() exists = true resource, ok := mod.Resources[resourceKey] diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go index 1b41a3b7..2dcb11b7 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go +++ b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go @@ -34,7 +34,7 @@ func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) { as[i] = a } - // If we werent given any filters, then we list all + // If we weren't given any filters, then we list all if len(fs) == 0 { as = append(as, &ResourceAddress{Index: -1}) } @@ -85,15 +85,22 @@ func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult { // the modules to find relevant resources. for _, m := range modules { for n, r := range m.Resources { - if f.relevant(a, r) { - // The name in the state contains valuable information. Parse. - key, err := ParseResourceStateKey(n) - if err != nil { - // If we get an error parsing, then just ignore it - // out of the state. - continue - } + // The name in the state contains valuable information. Parse. + key, err := ParseResourceStateKey(n) + if err != nil { + // If we get an error parsing, then just ignore it + // out of the state. + continue + } + // Older states and test fixtures often don't contain the + // type directly on the ResourceState. We add this so StateFilter + // is a bit more robust. + if r.Type == "" { + r.Type = key.Type + } + + if f.relevant(a, r) { if a.Name != "" && a.Name != key.Name { // Name doesn't match continue @@ -243,6 +250,13 @@ func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s StateFilterResultSlice) Less(i, j int) bool { a, b := s[i], s[j] + // if these address contain an index, we want to sort by index rather than name + addrA, errA := ParseResourceAddress(a.Address) + addrB, errB := ParseResourceAddress(b.Address) + if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index { + return addrA.Index < addrB.Index + } + // If the addresses are different it is just lexographic sorting if a.Address != b.Address { return a.Address < b.Address diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go index 03861533..aa13cce8 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go +++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go @@ -64,10 +64,19 @@ func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) { return nil, nil } - path, err := copystructure.Copy(old.Path) + pathRaw, err := copystructure.Copy(old.Path) if err != nil { return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) } + path, ok := pathRaw.([]string) + if !ok { + return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") + } + if len(path) == 0 { + // We found some V1 states with a nil path. Assume root and catch + // duplicate path errors later (as part of Validate). + path = rootModulePath + } // Outputs needs upgrading to use the new structure outputs := make(map[string]*OutputState) @@ -94,7 +103,7 @@ func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) { } return &ModuleState{ - Path: path.([]string), + Path: path, Outputs: outputs, Resources: resources, Dependencies: dependencies.([]string), @@ -150,16 +159,22 @@ func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) { if err != nil { return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) } + meta, err := copystructure.Copy(old.Meta) if err != nil { return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) } + newMeta := make(map[string]interface{}) + for k, v := range meta.(map[string]string) { + newMeta[k] = v + } + return &InstanceState{ ID: old.ID, Attributes: attributes.(map[string]string), Ephemeral: *ephemeral, - Meta: meta.(map[string]string), + Meta: newMeta, }, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go index 1fc458d1..e52d35fc 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go +++ b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go @@ -18,7 +18,7 @@ func upgradeStateV2ToV3(old *State) (*State, error) { // Ensure the copied version is v2 before attempting to upgrade if new.Version != 2 { - return nil, fmt.Errorf("Cannot appply v2->v3 state upgrade to " + + return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " + "a state which is not version 2.") } diff --git a/vendor/github.com/hashicorp/terraform/terraform/testing.go b/vendor/github.com/hashicorp/terraform/terraform/testing.go new file mode 100644 index 00000000..3f0418d9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/testing.go @@ -0,0 +1,19 @@ +package terraform + +import ( + "os" + "testing" +) + +// TestStateFile writes the given state to the path. +func TestStateFile(t *testing.T, path string, state *State) { + f, err := os.Create(path) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + if err := WriteState(state, f); err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go index ca573694..0e47f208 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform.go @@ -1,6 +1,8 @@ package terraform import ( + "log" + "github.com/hashicorp/terraform/dag" ) @@ -19,3 +21,37 @@ type GraphTransformer interface { type GraphVertexTransformer interface { Transform(dag.Vertex) (dag.Vertex, error) } + +// GraphTransformIf is a helper function that conditionally returns a +// GraphTransformer given. This is useful for calling inline a sequence +// of transforms without having to split it up into multiple append() calls. +func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer { + if f() { + return then + } + + return nil +} + +type graphTransformerMulti struct { + Transforms []GraphTransformer +} + +func (t *graphTransformerMulti) Transform(g *Graph) error { + for _, t := range t.Transforms { + if err := t.Transform(g); err != nil { + return err + } + log.Printf( + "[TRACE] Graph after step %T:\n\n%s", + t, g.StringWithNodeTypes()) + } + + return nil +} + +// GraphTransformMulti combines multiple graph transformers into a single +// GraphTransformer that runs all the individual graph transformers. +func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer { + return &graphTransformerMulti{Transforms: ts} +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go new file mode 100644 index 00000000..39cf097a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go @@ -0,0 +1,18 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config" +) + +// GraphNodeAttachProvider is an interface that must be implemented by nodes +// that want provider configurations attached. +type GraphNodeAttachProvider interface { + // Must be implemented to determine the path for the configuration + GraphNodeSubPath + + // ProviderName with no module prefix. Example: "aws". + ProviderName() string + + // Sets the configuration + AttachProvider(*config.ProviderConfig) +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go new file mode 100644 index 00000000..f2ee37e5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go @@ -0,0 +1,78 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" +) + +// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes +// that want resource configurations attached. +type GraphNodeAttachResourceConfig interface { + // ResourceAddr is the address to the resource + ResourceAddr() *ResourceAddress + + // Sets the configuration + AttachResourceConfig(*config.Resource) +} + +// AttachResourceConfigTransformer goes through the graph and attaches +// resource configuration structures to nodes that implement the interfaces +// above. +// +// The attached configuration structures are directly from the configuration. +// If they're going to be modified, a copy should be made. +type AttachResourceConfigTransformer struct { + Module *module.Tree // Module is the root module for the config +} + +func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { + log.Printf("[TRACE] AttachResourceConfigTransformer: Beginning...") + + // Go through and find GraphNodeAttachResource + for _, v := range g.Vertices() { + // Only care about GraphNodeAttachResource implementations + arn, ok := v.(GraphNodeAttachResourceConfig) + if !ok { + continue + } + + // Determine what we're looking for + addr := arn.ResourceAddr() + log.Printf( + "[TRACE] AttachResourceConfigTransformer: Attach resource "+ + "config request: %s", addr) + + // Get the configuration. + path := normalizeModulePath(addr.Path) + path = path[1:] + tree := t.Module.Child(path) + if tree == nil { + continue + } + + // Go through the resource configs to find the matching config + for _, r := range tree.Config().Resources { + // Get a resource address so we can compare + a, err := parseResourceAddressConfig(r) + if err != nil { + panic(fmt.Sprintf( + "Error parsing config address, this is a bug: %#v", r)) + } + a.Path = addr.Path + + // If this is not the same resource, then continue + if !a.Equals(addr) { + continue + } + + log.Printf("[TRACE] Attaching resource config: %#v", r) + arn.AttachResourceConfig(r) + break + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go new file mode 100644 index 00000000..564ff08f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go @@ -0,0 +1,68 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeAttachResourceState is an interface that can be implemented +// to request that a ResourceState is attached to the node. +type GraphNodeAttachResourceState interface { + // The address to the resource for the state + ResourceAddr() *ResourceAddress + + // Sets the state + AttachResourceState(*ResourceState) +} + +// AttachStateTransformer goes through the graph and attaches +// state to nodes that implement the interfaces above. +type AttachStateTransformer struct { + State *State // State is the root state +} + +func (t *AttachStateTransformer) Transform(g *Graph) error { + // If no state, then nothing to do + if t.State == nil { + log.Printf("[DEBUG] Not attaching any state: state is nil") + return nil + } + + filter := &StateFilter{State: t.State} + for _, v := range g.Vertices() { + // Only care about nodes requesting we're adding state + an, ok := v.(GraphNodeAttachResourceState) + if !ok { + continue + } + addr := an.ResourceAddr() + + // Get the module state + results, err := filter.Filter(addr.String()) + if err != nil { + return err + } + + // Attach the first resource state we get + found := false + for _, result := range results { + if rs, ok := result.Value.(*ResourceState); ok { + log.Printf( + "[DEBUG] Attaching resource state to %q: %#v", + dag.VertexName(v), rs) + an.AttachResourceState(rs) + found = true + break + } + } + + if !found { + log.Printf( + "[DEBUG] Resource state not found for %q: %s", + dag.VertexName(v), addr) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go index bcfa1233..61bce853 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go @@ -3,121 +3,133 @@ package terraform import ( "errors" "fmt" + "log" + "sync" - "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" ) -// ConfigTransformer is a GraphTransformer that adds the configuration -// to the graph. The module used to configure this transformer must be -// the root module. We'll look up the child module by the Path in the -// Graph. +// ConfigTransformer is a GraphTransformer that adds all the resources +// from the configuration to the graph. +// +// The module used to configure this transformer must be the root module. +// +// Only resources are added to the graph. Variables, outputs, and +// providers must be added via other transforms. +// +// Unlike ConfigTransformerOld, this transformer creates a graph with +// all resources including module resources, rather than creating module +// nodes that are then "flattened". type ConfigTransformer struct { + Concrete ConcreteResourceNodeFunc + + // Module is the module to add resources from. Module *module.Tree + + // Unique will only add resources that aren't already present in the graph. + Unique bool + + // Mode will only add resources that match the given mode + ModeFilter bool + Mode config.ResourceMode + + l sync.Mutex + uniqueMap map[string]struct{} } func (t *ConfigTransformer) Transform(g *Graph) error { - // A module is required and also must be completely loaded. + // Lock since we use some internal state + t.l.Lock() + defer t.l.Unlock() + + // If no module is given, we don't do anything if t.Module == nil { - return errors.New("module must not be nil") + return nil } + + // If the module isn't loaded, that is simply an error if !t.Module.Loaded() { - return errors.New("module must be loaded") + return errors.New("module must be loaded for ConfigTransformer") } - // Get the module we care about - module := t.Module.Child(g.Path[1:]) - if module == nil { - return nil + // Reset the uniqueness map. If we're tracking uniques, then populate + // it with addresses. + t.uniqueMap = make(map[string]struct{}) + defer func() { t.uniqueMap = nil }() + if t.Unique { + for _, v := range g.Vertices() { + if rn, ok := v.(GraphNodeResource); ok { + t.uniqueMap[rn.ResourceAddr().String()] = struct{}{} + } + } } - // Get the configuration for this module - config := module.Config() - - // Create the node list we'll use for the graph - nodes := make([]graphNodeConfig, 0, - (len(config.Variables)+ - len(config.ProviderConfigs)+ - len(config.Modules)+ - len(config.Resources)+ - len(config.Outputs))*2) - - // Write all the variables out - for _, v := range config.Variables { - nodes = append(nodes, &GraphNodeConfigVariable{ - Variable: v, - ModuleTree: t.Module, - ModulePath: g.Path, - }) - } + // Start the transformation process + return t.transform(g, t.Module) +} - // Write all the provider configs out - for _, pc := range config.ProviderConfigs { - nodes = append(nodes, &GraphNodeConfigProvider{Provider: pc}) +func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error { + // If no config, do nothing + if m == nil { + return nil } - // Write all the resources out - for _, r := range config.Resources { - nodes = append(nodes, &GraphNodeConfigResource{ - Resource: r, - Path: g.Path, - }) + // Add our resources + if err := t.transformSingle(g, m); err != nil { + return err } - // Write all the modules out - children := module.Children() - for _, m := range config.Modules { - path := make([]string, len(g.Path), len(g.Path)+1) - copy(path, g.Path) - path = append(path, m.Name) - - nodes = append(nodes, &GraphNodeConfigModule{ - Path: path, - Module: m, - Tree: children[m.Name], - }) + // Transform all the children. + for _, c := range m.Children() { + if err := t.transform(g, c); err != nil { + return err + } } - // Write all the outputs out - for _, o := range config.Outputs { - nodes = append(nodes, &GraphNodeConfigOutput{Output: o}) - } + return nil +} - // Err is where the final error value will go if there is one - var err error +func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { + log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", m.Path()) - // Build the graph vertices - for _, n := range nodes { - g.Add(n) - } + // Get the configuration for this module + conf := m.Config() - // Build up the dependencies. We have to do this outside of the above - // loop since the nodes need to be in place for us to build the deps. - for _, n := range nodes { - if missing := g.ConnectDependent(n); len(missing) > 0 { - for _, m := range missing { - err = multierror.Append(err, fmt.Errorf( - "%s: missing dependency: %s", n.Name(), m)) - } + // Build the path we're at + path := m.Path() + + // Write all the resources out + for _, r := range conf.Resources { + // Build the resource address + addr, err := parseResourceAddressConfig(r) + if err != nil { + panic(fmt.Sprintf( + "Error parsing config address, this is a bug: %#v", r)) } - } + addr.Path = path - return err -} + // If this is already in our uniqueness map, don't add it again + if _, ok := t.uniqueMap[addr.String()]; ok { + continue + } + + // Remove non-matching modes + if t.ModeFilter && addr.Mode != t.Mode { + continue + } -// varNameForVar returns the VarName value for an interpolated variable. -// This value is compared to the VarName() value for the nodes within the -// graph to build the graph edges. -func varNameForVar(raw config.InterpolatedVariable) string { - switch v := raw.(type) { - case *config.ModuleVariable: - return fmt.Sprintf("module.%s.output.%s", v.Name, v.Field) - case *config.ResourceVariable: - return v.ResourceId() - case *config.UserVariable: - return fmt.Sprintf("var.%s", v.Name) - default: - return "" + // Build the abstract node and the concrete one + abstract := &NodeAbstractResource{Addr: addr} + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + // Add it to the graph + g.Add(node) } + + return nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go new file mode 100644 index 00000000..92f9888d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go @@ -0,0 +1,80 @@ +package terraform + +import ( + "errors" + + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// FlatConfigTransformer is a GraphTransformer that adds the configuration +// to the graph. The module used to configure this transformer must be +// the root module. +// +// This transform adds the nodes but doesn't connect any of the references. +// The ReferenceTransformer should be used for that. +// +// NOTE: In relation to ConfigTransformer: this is a newer generation config +// transformer. It puts the _entire_ config into the graph (there is no +// "flattening" step as before). +type FlatConfigTransformer struct { + Concrete ConcreteResourceNodeFunc // What to turn resources into + + Module *module.Tree +} + +func (t *FlatConfigTransformer) Transform(g *Graph) error { + // If no module, we do nothing + if t.Module == nil { + return nil + } + + // If the module is not loaded, that is an error + if !t.Module.Loaded() { + return errors.New("module must be loaded") + } + + return t.transform(g, t.Module) +} + +func (t *FlatConfigTransformer) transform(g *Graph, m *module.Tree) error { + // If no module, no problem + if m == nil { + return nil + } + + // Transform all the children. + for _, c := range m.Children() { + if err := t.transform(g, c); err != nil { + return err + } + } + + // Get the configuration for this module + config := m.Config() + + // Write all the resources out + for _, r := range config.Resources { + // Grab the address for this resource + addr, err := parseResourceAddressConfig(r) + if err != nil { + return err + } + addr.Path = m.Path() + + // Build the abstract resource. We have the config already so + // we'll just pre-populate that. + abstract := &NodeAbstractResource{ + Addr: addr, + Config: r, + } + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go new file mode 100644 index 00000000..ec412582 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go @@ -0,0 +1,23 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/config" +) + +// varNameForVar returns the VarName value for an interpolated variable. +// This value is compared to the VarName() value for the nodes within the +// graph to build the graph edges. +func varNameForVar(raw config.InterpolatedVariable) string { + switch v := raw.(type) { + case *config.ModuleVariable: + return fmt.Sprintf("module.%s.output.%s", v.Name, v.Field) + case *config.ResourceVariable: + return v.ResourceId() + case *config.UserVariable: + return fmt.Sprintf("var.%s", v.Name) + default: + return "" + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go new file mode 100644 index 00000000..83415f35 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go @@ -0,0 +1,28 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/dag" +) + +// CountBoundaryTransformer adds a node that depends on everything else +// so that it runs last in order to clean up the state for nodes that +// are on the "count boundary": "foo.0" when only one exists becomes "foo" +type CountBoundaryTransformer struct{} + +func (t *CountBoundaryTransformer) Transform(g *Graph) error { + node := &NodeCountBoundary{} + g.Add(node) + + // Depends on everything + for _, v := range g.Vertices() { + // Don't connect to ourselves + if v == node { + continue + } + + // Connect! + g.Connect(dag.BasicEdge(node, v)) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go index fa3143c3..87a1f9c9 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go @@ -12,6 +12,9 @@ type DeposedTransformer struct { // View, if non-empty, is the ModuleState.View used around the state // to find deposed resources. View string + + // The provider used by the resourced which were deposed + ResolvedProvider string } func (t *DeposedTransformer) Transform(g *Graph) error { @@ -33,14 +36,16 @@ func (t *DeposedTransformer) Transform(g *Graph) error { if len(rs.Deposed) == 0 { continue } + deposed := rs.Deposed for i, _ := range deposed { g.Add(&graphNodeDeposedResource{ - Index: i, - ResourceName: k, - ResourceType: rs.Type, - Provider: rs.Provider, + Index: i, + ResourceName: k, + ResourceType: rs.Type, + ProviderName: rs.Provider, + ResolvedProvider: t.ResolvedProvider, }) } } @@ -50,18 +55,23 @@ func (t *DeposedTransformer) Transform(g *Graph) error { // graphNodeDeposedResource is the graph vertex representing a deposed resource. type graphNodeDeposedResource struct { - Index int - ResourceName string - ResourceType string - Provider string + Index int + ResourceName string + ResourceType string + ProviderName string + ResolvedProvider string } func (n *graphNodeDeposedResource) Name() string { return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index) } -func (n *graphNodeDeposedResource) ProvidedBy() []string { - return []string{resourceProvider(n.ResourceName, n.Provider)} +func (n *graphNodeDeposedResource) ProvidedBy() string { + return resourceProvider(n.ResourceName, n.ProviderName) +} + +func (n *graphNodeDeposedResource) SetProvider(p string) { + n.ResolvedProvider = p } // GraphNodeEvalable impl. @@ -72,7 +82,7 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode { seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} // Build instance info - info := &InstanceInfo{Id: n.ResourceName, Type: n.ResourceType} + info := &InstanceInfo{Id: n.Name(), Type: n.ResourceType} seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info}) // Refresh the resource @@ -81,7 +91,7 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode { Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, &EvalReadStateDeposed{ @@ -98,7 +108,7 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode { &EvalWriteStateDeposed{ Name: n.ResourceName, ResourceType: n.ResourceType, - Provider: n.Provider, + Provider: n.ResolvedProvider, State: &state, Index: n.Index, }, @@ -114,7 +124,7 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode { Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, &EvalReadStateDeposed{ @@ -127,6 +137,12 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode { State: &state, Output: &diff, }, + // Call pre-apply hook + &EvalApplyPre{ + Info: info, + State: &state, + Diff: &diff, + }, &EvalApply{ Info: info, State: &state, @@ -141,10 +157,15 @@ func (n *graphNodeDeposedResource) EvalTree() EvalNode { &EvalWriteStateDeposed{ Name: n.ResourceName, ResourceType: n.ResourceType, - Provider: n.Provider, + Provider: n.ResolvedProvider, State: &state, Index: n.Index, }, + &EvalApplyPost{ + Info: info, + State: &state, + Error: &err, + }, &EvalReturnError{ Error: &err, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy.go deleted file mode 100644 index af8ccc4a..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy.go +++ /dev/null @@ -1,239 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeDestroyable is the interface that nodes that can be destroyed -// must implement. This is used to automatically handle the creation of -// destroy nodes in the graph and the dependency ordering of those destroys. -type GraphNodeDestroyable interface { - // DestroyNode returns the node used for the destroy with the given - // mode. If this returns nil, then a destroy node for that mode - // will not be added. - DestroyNode() GraphNodeDestroy -} - -// GraphNodeDestroy is the interface that must implemented by -// nodes that destroy. -type GraphNodeDestroy interface { - dag.Vertex - - // CreateBeforeDestroy is called to check whether this node - // should be created before it is destroyed. The CreateBeforeDestroy - // transformer uses this information to setup the graph. - CreateBeforeDestroy() bool - - // CreateNode returns the node used for the create side of this - // destroy. This must already exist within the graph. - CreateNode() dag.Vertex -} - -// GraphNodeDestroyPrunable is the interface that can be implemented to -// signal that this node can be pruned depending on state. -type GraphNodeDestroyPrunable interface { - // DestroyInclude is called to check if this node should be included - // with the given state. The state and diff must NOT be modified. - DestroyInclude(*ModuleDiff, *ModuleState) bool -} - -// GraphNodeEdgeInclude can be implemented to not include something -// as an edge within the destroy graph. This is usually done because it -// might cause unnecessary cycles. -type GraphNodeDestroyEdgeInclude interface { - DestroyEdgeInclude(dag.Vertex) bool -} - -// DestroyTransformer is a GraphTransformer that creates the destruction -// nodes for things that _might_ be destroyed. -type DestroyTransformer struct { - FullDestroy bool -} - -func (t *DestroyTransformer) Transform(g *Graph) error { - var connect, remove []dag.Edge - nodeToCn := make(map[dag.Vertex]dag.Vertex, len(g.Vertices())) - nodeToDn := make(map[dag.Vertex]dag.Vertex, len(g.Vertices())) - for _, v := range g.Vertices() { - // If it is not a destroyable, we don't care - cn, ok := v.(GraphNodeDestroyable) - if !ok { - continue - } - - // Grab the destroy side of the node and connect it through - n := cn.DestroyNode() - if n == nil { - continue - } - - // Store it - nodeToCn[n] = cn - nodeToDn[cn] = n - - // If the creation node is equal to the destroy node, then - // don't do any of the edge jump rope below. - if n.(interface{}) == cn.(interface{}) { - continue - } - - // Add it to the graph - g.Add(n) - - // Inherit all the edges from the old node - downEdges := g.DownEdges(v).List() - for _, edgeRaw := range downEdges { - // If this thing specifically requests to not be depended on - // by destroy nodes, then don't. - if i, ok := edgeRaw.(GraphNodeDestroyEdgeInclude); ok && - !i.DestroyEdgeInclude(v) { - continue - } - - g.Connect(dag.BasicEdge(n, edgeRaw.(dag.Vertex))) - } - - // Add a new edge to connect the node to be created to - // the destroy node. - connect = append(connect, dag.BasicEdge(v, n)) - } - - // Go through the nodes we added and determine if they depend - // on any nodes with a destroy node. If so, depend on that instead. - for n, _ := range nodeToCn { - for _, downRaw := range g.DownEdges(n).List() { - target := downRaw.(dag.Vertex) - cn2, ok := target.(GraphNodeDestroyable) - if !ok { - continue - } - - newTarget := nodeToDn[cn2] - if newTarget == nil { - continue - } - - // Make the new edge and transpose - connect = append(connect, dag.BasicEdge(newTarget, n)) - - // Remove the old edge - remove = append(remove, dag.BasicEdge(n, target)) - } - } - - // Atomatically add/remove the edges - for _, e := range connect { - g.Connect(e) - } - for _, e := range remove { - g.RemoveEdge(e) - } - - return nil -} - -// CreateBeforeDestroyTransformer is a GraphTransformer that modifies -// the destroys of some nodes so that the creation happens before the -// destroy. -type CreateBeforeDestroyTransformer struct{} - -func (t *CreateBeforeDestroyTransformer) Transform(g *Graph) error { - // We "stage" the edge connections/destroys in these slices so that - // while we're doing the edge transformations (transpositions) in - // the graph, we're not affecting future edge transpositions. These - // slices let us stage ALL the changes that WILL happen so that all - // of the transformations happen atomically. - var connect, destroy []dag.Edge - - for _, v := range g.Vertices() { - // We only care to use the destroy nodes - dn, ok := v.(GraphNodeDestroy) - if !ok { - continue - } - - // If the node doesn't need to create before destroy, then continue - if !dn.CreateBeforeDestroy() { - continue - } - - // Get the creation side of this node - cn := dn.CreateNode() - - // Take all the things which depend on the creation node and - // make them dependencies on the destruction. Clarifying this - // with an example: if you have a web server and a load balancer - // and the load balancer depends on the web server, then when we - // do a create before destroy, we want to make sure the steps are: - // - // 1.) Create new web server - // 2.) Update load balancer - // 3.) Delete old web server - // - // This ensures that. - for _, sourceRaw := range g.UpEdges(cn).List() { - source := sourceRaw.(dag.Vertex) - - // If the graph has a "root" node (one added by a RootTransformer and not - // just a resource that happens to have no ancestors), we don't want to - // add any edges to it, because then it ceases to be a root. - if _, ok := source.(graphNodeRoot); ok { - continue - } - - connect = append(connect, dag.BasicEdge(dn, source)) - } - - // Swap the edge so that the destroy depends on the creation - // happening... - connect = append(connect, dag.BasicEdge(dn, cn)) - destroy = append(destroy, dag.BasicEdge(cn, dn)) - } - - for _, edge := range connect { - g.Connect(edge) - } - for _, edge := range destroy { - g.RemoveEdge(edge) - } - - return nil -} - -// PruneDestroyTransformer is a GraphTransformer that removes the destroy -// nodes that aren't in the diff. -type PruneDestroyTransformer struct { - Diff *Diff - State *State -} - -func (t *PruneDestroyTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - // If it is not a destroyer, we don't care - dn, ok := v.(GraphNodeDestroyPrunable) - if !ok { - continue - } - - path := g.Path - if pn, ok := v.(GraphNodeSubPath); ok { - path = pn.Path() - } - - var modDiff *ModuleDiff - var modState *ModuleState - if t.Diff != nil { - modDiff = t.Diff.ModuleByPath(path) - } - if t.State != nil { - modState = t.State.ModuleByPath(path) - } - - // Remove it if we should - if !dn.DestroyInclude(modDiff, modState) { - g.Remove(v) - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go new file mode 100644 index 00000000..edfb460b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go @@ -0,0 +1,257 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeDestroyerCBD must be implemented by nodes that might be +// create-before-destroy destroyers. +type GraphNodeDestroyerCBD interface { + GraphNodeDestroyer + + // CreateBeforeDestroy returns true if this node represents a node + // that is doing a CBD. + CreateBeforeDestroy() bool + + // ModifyCreateBeforeDestroy is called when the CBD state of a node + // is changed dynamically. This can return an error if this isn't + // allowed. + ModifyCreateBeforeDestroy(bool) error +} + +// CBDEdgeTransformer modifies the edges of CBD nodes that went through +// the DestroyEdgeTransformer to have the right dependencies. There are +// two real tasks here: +// +// 1. With CBD, the destroy edge is inverted: the destroy depends on +// the creation. +// +// 2. A_d must depend on resources that depend on A. This is to enable +// the destroy to only happen once nodes that depend on A successfully +// update to A. Example: adding a web server updates the load balancer +// before deleting the old web server. +// +type CBDEdgeTransformer struct { + // Module and State are only needed to look up dependencies in + // any way possible. Either can be nil if not availabile. + Module *module.Tree + State *State +} + +func (t *CBDEdgeTransformer) Transform(g *Graph) error { + log.Printf("[TRACE] CBDEdgeTransformer: Beginning CBD transformation...") + + // Go through and reverse any destroy edges + destroyMap := make(map[string][]dag.Vertex) + for _, v := range g.Vertices() { + dn, ok := v.(GraphNodeDestroyerCBD) + if !ok { + continue + } + + if !dn.CreateBeforeDestroy() { + // If there are no CBD ancestors (dependent nodes), then we + // do nothing here. + if !t.hasCBDAncestor(g, v) { + continue + } + + // If this isn't naturally a CBD node, this means that an ancestor is + // and we need to auto-upgrade this node to CBD. We do this because + // a CBD node depending on non-CBD will result in cycles. To avoid this, + // we always attempt to upgrade it. + if err := dn.ModifyCreateBeforeDestroy(true); err != nil { + return fmt.Errorf( + "%s: must have create before destroy enabled because "+ + "a dependent resource has CBD enabled. However, when "+ + "attempting to automatically do this, an error occurred: %s", + dag.VertexName(v), err) + } + } + + // Find the destroy edge. There should only be one. + for _, e := range g.EdgesTo(v) { + // Not a destroy edge, ignore it + de, ok := e.(*DestroyEdge) + if !ok { + continue + } + + log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s", + dag.VertexName(de.Source()), dag.VertexName(de.Target())) + + // Found it! Invert. + g.RemoveEdge(de) + g.Connect(&DestroyEdge{S: de.Target(), T: de.Source()}) + } + + // If the address has an index, we strip that. Our depMap creation + // graph doesn't expand counts so we don't currently get _exact_ + // dependencies. One day when we limit dependencies more exactly + // this will have to change. We have a test case covering this + // (depNonCBDCountBoth) so it'll be caught. + addr := dn.DestroyAddr() + if addr.Index >= 0 { + addr = addr.Copy() // Copy so that we don't modify any pointers + addr.Index = -1 + } + + // Add this to the list of nodes that we need to fix up + // the edges for (step 2 above in the docs). + key := addr.String() + destroyMap[key] = append(destroyMap[key], v) + } + + // If we have no CBD nodes, then our work here is done + if len(destroyMap) == 0 { + return nil + } + + // We have CBD nodes. We now have to move on to the much more difficult + // task of connecting dependencies of the creation side of the destroy + // to the destruction node. The easiest way to explain this is an example: + // + // Given a pre-destroy dependence of: A => B + // And A has CBD set. + // + // The resulting graph should be: A => B => A_d + // + // They key here is that B happens before A is destroyed. This is to + // facilitate the primary purpose for CBD: making sure that downstreams + // are properly updated to avoid downtime before the resource is destroyed. + // + // We can't trust that the resource being destroyed or anything that + // depends on it is actually in our current graph so we make a new + // graph in order to determine those dependencies and add them in. + log.Printf("[TRACE] CBDEdgeTransformer: building graph to find dependencies...") + depMap, err := t.depMap(destroyMap) + if err != nil { + return err + } + + // We now have the mapping of resource addresses to the destroy + // nodes they need to depend on. We now go through our own vertices to + // find any matching these addresses and make the connection. + for _, v := range g.Vertices() { + // We're looking for creators + rn, ok := v.(GraphNodeCreator) + if !ok { + continue + } + + // Get the address + addr := rn.CreateAddr() + + // If the address has an index, we strip that. Our depMap creation + // graph doesn't expand counts so we don't currently get _exact_ + // dependencies. One day when we limit dependencies more exactly + // this will have to change. We have a test case covering this + // (depNonCBDCount) so it'll be caught. + if addr.Index >= 0 { + addr = addr.Copy() // Copy so that we don't modify any pointers + addr.Index = -1 + } + + // If there is nothing this resource should depend on, ignore it + key := addr.String() + dns, ok := depMap[key] + if !ok { + continue + } + + // We have nodes! Make the connection + for _, dn := range dns { + log.Printf("[TRACE] CBDEdgeTransformer: destroy depends on dependence: %s => %s", + dag.VertexName(dn), dag.VertexName(v)) + g.Connect(dag.BasicEdge(dn, v)) + } + } + + return nil +} + +func (t *CBDEdgeTransformer) depMap( + destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) { + // Build the graph of our config, this ensures that all resources + // are present in the graph. + g, err := (&BasicGraphBuilder{ + Steps: []GraphTransformer{ + &FlatConfigTransformer{Module: t.Module}, + &AttachResourceConfigTransformer{Module: t.Module}, + &AttachStateTransformer{State: t.State}, + &ReferenceTransformer{}, + }, + Name: "CBDEdgeTransformer", + }).Build(nil) + if err != nil { + return nil, err + } + + // Using this graph, build the list of destroy nodes that each resource + // address should depend on. For example, when we find B, we map the + // address of B to A_d in the "depMap" variable below. + depMap := make(map[string][]dag.Vertex) + for _, v := range g.Vertices() { + // We're looking for resources. + rn, ok := v.(GraphNodeResource) + if !ok { + continue + } + + // Get the address + addr := rn.ResourceAddr() + key := addr.String() + + // Get the destroy nodes that are destroying this resource. + // If there aren't any, then we don't need to worry about + // any connections. + dns, ok := destroyMap[key] + if !ok { + continue + } + + // Get the nodes that depend on this on. In the example above: + // finding B in A => B. + for _, v := range g.UpEdges(v).List() { + // We're looking for resources. + rn, ok := v.(GraphNodeResource) + if !ok { + continue + } + + // Keep track of the destroy nodes that this address + // needs to depend on. + key := rn.ResourceAddr().String() + depMap[key] = append(depMap[key], dns...) + } + } + + return depMap, nil +} + +// hasCBDAncestor returns true if any ancestor (node that depends on this) +// has CBD set. +func (t *CBDEdgeTransformer) hasCBDAncestor(g *Graph, v dag.Vertex) bool { + s, _ := g.Ancestors(v) + if s == nil { + return true + } + + for _, v := range s.List() { + dn, ok := v.(GraphNodeDestroyerCBD) + if !ok { + continue + } + + if dn.CreateBeforeDestroy() { + // some ancestor is CreateBeforeDestroy, so we need to follow suit + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go new file mode 100644 index 00000000..a06ff292 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go @@ -0,0 +1,267 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeDestroyer must be implemented by nodes that destroy resources. +type GraphNodeDestroyer interface { + dag.Vertex + + // ResourceAddr is the address of the resource that is being + // destroyed by this node. If this returns nil, then this node + // is not destroying anything. + DestroyAddr() *ResourceAddress +} + +// GraphNodeCreator must be implemented by nodes that create OR update resources. +type GraphNodeCreator interface { + // ResourceAddr is the address of the resource being created or updated + CreateAddr() *ResourceAddress +} + +// DestroyEdgeTransformer is a GraphTransformer that creates the proper +// references for destroy resources. Destroy resources are more complex +// in that they must be depend on the destruction of resources that +// in turn depend on the CREATION of the node being destroy. +// +// That is complicated. Visually: +// +// B_d -> A_d -> A -> B +// +// Notice that A destroy depends on B destroy, while B create depends on +// A create. They're inverted. This must be done for example because often +// dependent resources will block parent resources from deleting. Concrete +// example: VPC with subnets, the VPC can't be deleted while there are +// still subnets. +type DestroyEdgeTransformer struct { + // These are needed to properly build the graph of dependencies + // to determine what a destroy node depends on. Any of these can be nil. + Module *module.Tree + State *State +} + +func (t *DestroyEdgeTransformer) Transform(g *Graph) error { + log.Printf("[TRACE] DestroyEdgeTransformer: Beginning destroy edge transformation...") + + // Build a map of what is being destroyed (by address string) to + // the list of destroyers. In general there will only be one destroyer + // but to make it more robust we support multiple. + destroyers := make(map[string][]GraphNodeDestroyer) + for _, v := range g.Vertices() { + dn, ok := v.(GraphNodeDestroyer) + if !ok { + continue + } + + addr := dn.DestroyAddr() + if addr == nil { + continue + } + + key := addr.String() + log.Printf( + "[TRACE] DestroyEdgeTransformer: %s destroying %q", + dag.VertexName(dn), key) + destroyers[key] = append(destroyers[key], dn) + } + + // If we aren't destroying anything, there will be no edges to make + // so just exit early and avoid future work. + if len(destroyers) == 0 { + return nil + } + + // Go through and connect creators to destroyers. Going along with + // our example, this makes: A_d => A + for _, v := range g.Vertices() { + cn, ok := v.(GraphNodeCreator) + if !ok { + continue + } + + addr := cn.CreateAddr() + if addr == nil { + continue + } + + key := addr.String() + ds := destroyers[key] + if len(ds) == 0 { + continue + } + + for _, d := range ds { + // For illustrating our example + a_d := d.(dag.Vertex) + a := v + + log.Printf( + "[TRACE] DestroyEdgeTransformer: connecting creator/destroyer: %s, %s", + dag.VertexName(a), dag.VertexName(a_d)) + + g.Connect(&DestroyEdge{S: a, T: a_d}) + } + } + + // This is strange but is the easiest way to get the dependencies + // of a node that is being destroyed. We use another graph to make sure + // the resource is in the graph and ask for references. We have to do this + // because the node that is being destroyed may NOT be in the graph. + // + // Example: resource A is force new, then destroy A AND create A are + // in the graph. BUT if resource A is just pure destroy, then only + // destroy A is in the graph, and create A is not. + providerFn := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{NodeAbstractProvider: a} + } + steps := []GraphTransformer{ + // Add the local values + &LocalTransformer{Module: t.Module}, + + // Add outputs and metadata + &OutputTransformer{Module: t.Module}, + &AttachResourceConfigTransformer{Module: t.Module}, + &AttachStateTransformer{State: t.State}, + + TransformProviders(nil, providerFn, t.Module), + + // Add all the variables. We can depend on resources through + // variables due to module parameters, and we need to properly + // determine that. + &RootVariableTransformer{Module: t.Module}, + &ModuleVariableTransformer{Module: t.Module}, + + &ReferenceTransformer{}, + } + + // Go through all the nodes being destroyed and create a graph. + // The resulting graph is only of things being CREATED. For example, + // following our example, the resulting graph would be: + // + // A, B (with no edges) + // + var tempG Graph + var tempDestroyed []dag.Vertex + for d, _ := range destroyers { + // d is what is being destroyed. We parse the resource address + // which it came from it is a panic if this fails. + addr, err := ParseResourceAddress(d) + if err != nil { + panic(err) + } + + // This part is a little bit weird but is the best way to + // find the dependencies we need to: build a graph and use the + // attach config and state transformers then ask for references. + abstract := &NodeAbstractResource{Addr: addr} + tempG.Add(abstract) + tempDestroyed = append(tempDestroyed, abstract) + + // We also add the destroy version here since the destroy can + // depend on things that the creation doesn't (destroy provisioners). + destroy := &NodeDestroyResource{NodeAbstractResource: abstract} + tempG.Add(destroy) + tempDestroyed = append(tempDestroyed, destroy) + } + + // Run the graph transforms so we have the information we need to + // build references. + for _, s := range steps { + if err := s.Transform(&tempG); err != nil { + return err + } + } + + log.Printf("[TRACE] DestroyEdgeTransformer: reference graph: %s", tempG.String()) + + // Go through all the nodes in the graph and determine what they + // depend on. + for _, v := range tempDestroyed { + // Find all ancestors of this to determine the edges we'll depend on + vs, err := tempG.Ancestors(v) + if err != nil { + return err + } + + refs := make([]dag.Vertex, 0, vs.Len()) + for _, raw := range vs.List() { + refs = append(refs, raw.(dag.Vertex)) + } + + refNames := make([]string, len(refs)) + for i, ref := range refs { + refNames[i] = dag.VertexName(ref) + } + log.Printf( + "[TRACE] DestroyEdgeTransformer: creation node %q references %s", + dag.VertexName(v), refNames) + + // If we have no references, then we won't need to do anything + if len(refs) == 0 { + continue + } + + // Get the destroy node for this. In the example of our struct, + // we are currently at B and we're looking for B_d. + rn, ok := v.(GraphNodeResource) + if !ok { + continue + } + + addr := rn.ResourceAddr() + if addr == nil { + continue + } + + dns := destroyers[addr.String()] + + // We have dependencies, check if any are being destroyed + // to build the list of things that we must depend on! + // + // In the example of the struct, if we have: + // + // B_d => A_d => A => B + // + // Then at this point in the algorithm we started with B_d, + // we built B (to get dependencies), and we found A. We're now looking + // to see if A_d exists. + var depDestroyers []dag.Vertex + for _, v := range refs { + rn, ok := v.(GraphNodeResource) + if !ok { + continue + } + + addr := rn.ResourceAddr() + if addr == nil { + continue + } + + key := addr.String() + if ds, ok := destroyers[key]; ok { + for _, d := range ds { + depDestroyers = append(depDestroyers, d.(dag.Vertex)) + log.Printf( + "[TRACE] DestroyEdgeTransformer: destruction of %q depends on %s", + key, dag.VertexName(d)) + } + } + } + + // Go through and make the connections. Use the variable + // names "a_d" and "b_d" to reference our example. + for _, a_d := range dns { + for _, b_d := range depDestroyers { + if b_d != a_d { + g.Connect(dag.BasicEdge(b_d, a_d)) + } + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go new file mode 100644 index 00000000..ad46d3c6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go @@ -0,0 +1,86 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// DiffTransformer is a GraphTransformer that adds the elements of +// the diff to the graph. +// +// This transform is used for example by the ApplyGraphBuilder to ensure +// that only resources that are being modified are represented in the graph. +// +// Module and State is still required for the DiffTransformer for annotations +// since the Diff doesn't contain all the information required to build the +// complete graph (such as create-before-destroy information). The graph +// is built based on the diff first, though, ensuring that only resources +// that are being modified are present in the graph. +type DiffTransformer struct { + Concrete ConcreteResourceNodeFunc + + Diff *Diff + Module *module.Tree + State *State +} + +func (t *DiffTransformer) Transform(g *Graph) error { + // If the diff is nil or empty (nil is empty) then do nothing + if t.Diff.Empty() { + return nil + } + + // Go through all the modules in the diff. + log.Printf("[TRACE] DiffTransformer: starting") + var nodes []dag.Vertex + for _, m := range t.Diff.Modules { + log.Printf("[TRACE] DiffTransformer: Module: %s", m) + // TODO: If this is a destroy diff then add a module destroy node + + // Go through all the resources in this module. + for name, inst := range m.Resources { + log.Printf("[TRACE] DiffTransformer: Resource %q: %#v", name, inst) + + // We have changes! This is a create or update operation. + // First grab the address so we have a unique way to + // reference this resource. + addr, err := parseResourceAddressInternal(name) + if err != nil { + panic(fmt.Sprintf( + "Error parsing internal name, this is a bug: %q", name)) + } + + // Very important: add the module path for this resource to + // the address. Remove "root" from it. + addr.Path = m.Path[1:] + + // If we're destroying, add the destroy node + if inst.Destroy || inst.GetDestroyDeposed() { + abstract := &NodeAbstractResource{Addr: addr} + g.Add(&NodeDestroyResource{NodeAbstractResource: abstract}) + } + + // If we have changes, then add the applyable version + if len(inst.Attributes) > 0 { + // Add the resource to the graph + abstract := &NodeAbstractResource{Addr: addr} + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + nodes = append(nodes, node) + } + } + } + + // Add all the nodes to the graph + for _, n := range nodes { + g.Add(n) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go index 3251ea36..982c098b 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go @@ -24,7 +24,7 @@ type GraphNodeDynamicExpandable interface { // GraphNodeSubgraph is an interface a node can implement if it has // a larger subgraph that should be walked. type GraphNodeSubgraph interface { - Subgraph() *Graph + Subgraph() dag.Grapher } // ExpandTransform is a transformer that does a subgraph expansion @@ -43,23 +43,6 @@ func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) { } // Expand the subgraph! - log.Printf("[DEBUG] vertex %s: static expanding", dag.VertexName(ev)) + log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev)) return ev.Expand(t.Builder) } - -type GraphNodeBasicSubgraph struct { - NameValue string - Graph *Graph -} - -func (n *GraphNodeBasicSubgraph) Name() string { - return n.NameValue -} - -func (n *GraphNodeBasicSubgraph) Subgraph() *Graph { - return n.Graph -} - -func (n *GraphNodeBasicSubgraph) FlattenGraph() *Graph { - return n.Graph -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_flatten.go b/vendor/github.com/hashicorp/terraform/terraform/transform_flatten.go deleted file mode 100644 index 206bf97b..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_flatten.go +++ /dev/null @@ -1,107 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeFlatGraph must be implemented by nodes that have subgraphs -// that they want flattened into the graph. -type GraphNodeFlatGraph interface { - FlattenGraph() *Graph -} - -// GraphNodeFlattenable must be implemented by all nodes that can be -// flattened. If a FlattenGraph returns any nodes that can't be flattened, -// it will be an error. -// -// If Flatten returns nil for the Vertex along with a nil error, it will -// removed from the graph. -type GraphNodeFlattenable interface { - Flatten(path []string) (dag.Vertex, error) -} - -// FlattenTransformer is a transformer that goes through the graph, finds -// subgraphs that can be flattened, and flattens them into this graph, -// removing the prior subgraph node. -type FlattenTransformer struct{} - -func (t *FlattenTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - fn, ok := v.(GraphNodeFlatGraph) - if !ok { - continue - } - - // If we don't want to be flattened, don't do it - subgraph := fn.FlattenGraph() - if subgraph == nil { - continue - } - - // Get all the things that depend on this node. We'll re-connect - // dependents later. We have to copy these here since the UpEdges - // value will be deleted after the Remove below. - dependents := make([]dag.Vertex, 0, 5) - for _, v := range g.UpEdges(v).List() { - dependents = append(dependents, v) - } - - // Remove the old node - g.Remove(v) - - // Go through the subgraph and flatten all the nodes - for _, sv := range subgraph.Vertices() { - // If the vertex already has a subpath then we assume it has - // already been flattened. Ignore it. - if _, ok := sv.(GraphNodeSubPath); ok { - continue - } - - fn, ok := sv.(GraphNodeFlattenable) - if !ok { - return fmt.Errorf( - "unflattenable node: %s %T", - dag.VertexName(sv), sv) - } - - v, err := fn.Flatten(subgraph.Path) - if err != nil { - return fmt.Errorf( - "error flattening %s (%T): %s", - dag.VertexName(sv), sv, err) - } - - if v == nil { - subgraph.Remove(v) - } else { - subgraph.Replace(sv, v) - } - } - - // Now that we've handled any changes to the graph that are - // needed, we can add them all to our graph along with their edges. - for _, sv := range subgraph.Vertices() { - g.Add(sv) - } - for _, se := range subgraph.Edges() { - g.Connect(se) - } - - // Connect the dependencies for all the new nodes that we added. - // This will properly connect variables to their sources, for example. - for _, sv := range subgraph.Vertices() { - g.ConnectDependent(sv) - } - - // Re-connect all the things that dependent on the graph - // we just flattened. This should connect them back into the - // correct nodes if their DependentOn() is setup correctly. - for _, v := range dependents { - g.ConnectDependent(v) - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go new file mode 100644 index 00000000..3673771c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go @@ -0,0 +1,38 @@ +package terraform + +import ( + "fmt" + "strings" +) + +// ImportProviderValidateTransformer is a GraphTransformer that goes through +// the providers in the graph and validates that they only depend on variables. +type ImportProviderValidateTransformer struct{} + +func (t *ImportProviderValidateTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + // We only care about providers + pv, ok := v.(GraphNodeProvider) + if !ok { + continue + } + + // We only care about providers that reference things + rn, ok := pv.(GraphNodeReferencer) + if !ok { + continue + } + + for _, ref := range rn.References() { + if !strings.HasPrefix(ref, "var.") { + return fmt.Errorf( + "Provider %q depends on non-var %q. Providers for import can currently\n"+ + "only depend on variables or must be hardcoded. You can stop import\n"+ + "from loading configurations by specifying `-config=\"\"`.", + pv.ProviderName(), ref) + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go index 389c0464..fcbff653 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go @@ -21,8 +21,9 @@ func (t *ImportStateTransformer) Transform(g *Graph) error { } nodes = append(nodes, &graphNodeImportState{ - Addr: addr, - ID: target.ID, + Addr: addr, + ID: target.ID, + ProviderName: target.Provider, }) } @@ -35,8 +36,10 @@ func (t *ImportStateTransformer) Transform(g *Graph) error { } type graphNodeImportState struct { - Addr *ResourceAddress // Addr is the resource address to import to - ID string // ID is the ID to import as + Addr *ResourceAddress // Addr is the resource address to import to + ID string // ID is the ID to import as + ProviderName string // Provider string + ResolvedProvider string // provider node address states []*InstanceState } @@ -45,8 +48,12 @@ func (n *graphNodeImportState) Name() string { return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID) } -func (n *graphNodeImportState) ProvidedBy() []string { - return []string{resourceProvider(n.Addr.Type, "")} +func (n *graphNodeImportState) ProvidedBy() string { + return resourceProvider(n.Addr.Type, n.ProviderName) +} + +func (n *graphNodeImportState) SetProvider(p string) { + n.ResolvedProvider = p } // GraphNodeSubPath @@ -70,7 +77,7 @@ func (n *graphNodeImportState) EvalTree() EvalNode { return &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Name: n.ResolvedProvider, Output: &provider, }, &EvalImportState{ @@ -147,9 +154,11 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { // is safe. for i, state := range n.states { g.Add(&graphNodeImportStateSub{ - Target: addrs[i], - Path_: n.Path(), - State: state, + Target: addrs[i], + Path_: n.Path(), + State: state, + ProviderName: n.ProviderName, + ResolvedProvider: n.ResolvedProvider, }) } @@ -167,9 +176,11 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { // and is part of the subgraph. This node is responsible for refreshing // and adding a resource to the state once it is imported. type graphNodeImportStateSub struct { - Target *ResourceAddress - State *InstanceState - Path_ []string + Target *ResourceAddress + State *InstanceState + Path_ []string + ProviderName string + ResolvedProvider string } func (n *graphNodeImportStateSub) Name() string { @@ -212,7 +223,7 @@ func (n *graphNodeImportStateSub) EvalTree() EvalNode { return &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: resourceProvider(info.Type, ""), + Name: n.ResolvedProvider, Output: &provider, }, &EvalRefresh{ @@ -229,7 +240,7 @@ func (n *graphNodeImportStateSub) EvalTree() EvalNode { &EvalWriteState{ Name: key.String(), ResourceType: info.Type, - Provider: resourceProvider(info.Type, ""), + Provider: n.ResolvedProvider, State: &state, }, }, diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_local.go b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go new file mode 100644 index 00000000..95ecfc0a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go @@ -0,0 +1,40 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/module" +) + +// LocalTransformer is a GraphTransformer that adds all the local values +// from the configuration to the graph. +type LocalTransformer struct { + Module *module.Tree +} + +func (t *LocalTransformer) Transform(g *Graph) error { + return t.transformModule(g, t.Module) +} + +func (t *LocalTransformer) transformModule(g *Graph, m *module.Tree) error { + if m == nil { + // Can't have any locals if there's no config + return nil + } + + for _, local := range m.Config().Locals { + node := &NodeLocal{ + PathValue: normalizeModulePath(m.Path()), + Config: local, + } + + g.Add(node) + } + + // Also populate locals for child modules + for _, c := range m.Children() { + if err := t.transformModule(g, c); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module.go deleted file mode 100644 index 609873c4..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_module.go +++ /dev/null @@ -1,62 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/dag" -) - -// ModuleDestroyTransformer is a GraphTransformer that adds a node -// to the graph that will just mark the full module for destroy in -// the destroy scenario. -type ModuleDestroyTransformer struct{} - -func (t *ModuleDestroyTransformer) Transform(g *Graph) error { - // Create the node - n := &graphNodeModuleDestroy{Path: g.Path} - - // Add it to the graph. We don't need any edges because - // it can happen whenever. - g.Add(n) - - return nil -} - -type graphNodeModuleDestroy struct { - Path []string -} - -func (n *graphNodeModuleDestroy) Name() string { - return "plan-destroy" -} - -// GraphNodeEvalable impl. -func (n *graphNodeModuleDestroy) EvalTree() EvalNode { - return &EvalOpFilter{ - Ops: []walkOperation{walkPlanDestroy}, - Node: &EvalDiffDestroyModule{Path: n.Path}, - } -} - -// GraphNodeFlattenable impl. -func (n *graphNodeModuleDestroy) Flatten(p []string) (dag.Vertex, error) { - return &graphNodeModuleDestroyFlat{ - graphNodeModuleDestroy: n, - PathValue: p, - }, nil -} - -type graphNodeModuleDestroyFlat struct { - *graphNodeModuleDestroy - - PathValue []string -} - -func (n *graphNodeModuleDestroyFlat) Name() string { - return fmt.Sprintf( - "%s.%s", modulePrefixStr(n.PathValue), n.graphNodeModuleDestroy.Name()) -} - -func (n *graphNodeModuleDestroyFlat) Path() []string { - return n.PathValue -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go new file mode 100644 index 00000000..467950bd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go @@ -0,0 +1,120 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// ModuleVariableTransformer is a GraphTransformer that adds all the variables +// in the configuration to the graph. +// +// This only adds variables that are referenced by other things in the graph. +// If a module variable is not referenced, it won't be added to the graph. +type ModuleVariableTransformer struct { + Module *module.Tree + + DisablePrune bool // True if pruning unreferenced should be disabled +} + +func (t *ModuleVariableTransformer) Transform(g *Graph) error { + return t.transform(g, nil, t.Module) +} + +func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) error { + // If no config, no variables + if m == nil { + return nil + } + + // Transform all the children. This must be done BEFORE the transform + // above since child module variables can reference parent module variables. + for _, c := range m.Children() { + if err := t.transform(g, m, c); err != nil { + return err + } + } + + // If we have a parent, we can determine if a module variable is being + // used, so we transform this. + if parent != nil { + if err := t.transformSingle(g, parent, m); err != nil { + return err + } + } + + return nil +} + +func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, m *module.Tree) error { + // If we have no vars, we're done! + vars := m.Config().Variables + if len(vars) == 0 { + log.Printf("[TRACE] Module %#v has no variables, skipping.", m.Path()) + return nil + } + + // Look for usage of this module + var mod *config.Module + for _, modUse := range parent.Config().Modules { + if modUse.Name == m.Name() { + mod = modUse + break + } + } + if mod == nil { + log.Printf("[INFO] Module %#v not used, not adding variables", m.Path()) + return nil + } + + // Build the reference map so we can determine if we're referencing things. + refMap := NewReferenceMap(g.Vertices()) + + // Add all variables here + for _, v := range vars { + // Determine the value of the variable. If it isn't in the + // configuration then it was never set and that's not a problem. + var value *config.RawConfig + if raw, ok := mod.RawConfig.Raw[v.Name]; ok { + var err error + value, err = config.NewRawConfig(map[string]interface{}{ + v.Name: raw, + }) + if err != nil { + // This shouldn't happen because it is already in + // a RawConfig above meaning it worked once before. + panic(err) + } + } + + // Build the node. + // + // NOTE: For now this is just an "applyable" variable. As we build + // new graph builders for the other operations I suspect we'll + // find a way to parameterize this, require new transforms, etc. + node := &NodeApplyableModuleVariable{ + PathValue: normalizeModulePath(m.Path()), + Config: v, + Value: value, + Module: t.Module, + } + + if !t.DisablePrune { + // If the node is not referenced by anything, then we don't need + // to include it since it won't be used. + if matches := refMap.ReferencedBy(node); len(matches) == 0 { + log.Printf( + "[INFO] Not including %q in graph, nothing depends on it", + dag.VertexName(node)) + continue + } + } + + // Add it! + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_noop.go b/vendor/github.com/hashicorp/terraform/terraform/transform_noop.go deleted file mode 100644 index e36b6193..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_noop.go +++ /dev/null @@ -1,104 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeNoopPrunable can be implemented by nodes that can be -// pruned if they are noops. -type GraphNodeNoopPrunable interface { - Noop(*NoopOpts) bool -} - -// NoopOpts are the options available to determine if your node is a noop. -type NoopOpts struct { - Graph *Graph - Vertex dag.Vertex - Diff *Diff - State *State - ModDiff *ModuleDiff - ModState *ModuleState -} - -// PruneNoopTransformer is a graph transform that prunes nodes that -// consider themselves no-ops. This is done to both simplify the graph -// as well as to remove graph nodes that might otherwise cause problems -// during the graph run. Therefore, this transformer isn't completely -// an optimization step, and can instead be considered critical to -// Terraform operations. -// -// Example of the above case: variables for modules interpolate their values. -// Interpolation will fail on destruction (since attributes are being deleted), -// but variables shouldn't even eval if there is nothing that will consume -// the variable. Therefore, variables can note that they can be omitted -// safely in this case. -// -// The PruneNoopTransformer will prune nodes depth first, and will automatically -// create connect through the dependencies of pruned nodes. For example, -// if we have a graph A => B => C (A depends on B, etc.), and B decides to -// be removed, we'll still be left with A => C; the edge will be properly -// connected. -type PruneNoopTransformer struct { - Diff *Diff - State *State -} - -func (t *PruneNoopTransformer) Transform(g *Graph) error { - // Find the leaves. - leaves := make([]dag.Vertex, 0, 10) - for _, v := range g.Vertices() { - if g.DownEdges(v).Len() == 0 { - leaves = append(leaves, v) - } - } - - // Do a depth first walk from the leaves and remove things. - return g.ReverseDepthFirstWalk(leaves, func(v dag.Vertex, depth int) error { - // We need a prunable - pn, ok := v.(GraphNodeNoopPrunable) - if !ok { - return nil - } - - // Start building the noop opts - path := g.Path - if pn, ok := v.(GraphNodeSubPath); ok { - path = pn.Path() - } - - var modDiff *ModuleDiff - var modState *ModuleState - if t.Diff != nil { - modDiff = t.Diff.ModuleByPath(path) - } - if t.State != nil { - modState = t.State.ModuleByPath(path) - } - - // Determine if its a noop. If it isn't, just return - noop := pn.Noop(&NoopOpts{ - Graph: g, - Vertex: v, - Diff: t.Diff, - State: t.State, - ModDiff: modDiff, - ModState: modState, - }) - if !noop { - return nil - } - - // It is a noop! We first preserve edges. - up := g.UpEdges(v).List() - for _, downV := range g.DownEdges(v).List() { - for _, upV := range up { - g.Connect(dag.BasicEdge(upV, downV)) - } - } - - // Then remove it - g.Remove(v) - - return nil - }) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan.go deleted file mode 100644 index f47f5168..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan.go +++ /dev/null @@ -1,418 +0,0 @@ -package terraform - -import ( - "fmt" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeStateRepresentative is an interface that can be implemented by -// a node to say that it is representing a resource in the state. -type GraphNodeStateRepresentative interface { - StateId() []string -} - -// OrphanTransformer is a GraphTransformer that adds orphans to the -// graph. This transformer adds both resource and module orphans. -type OrphanTransformer struct { - // State is the global state. We require the global state to - // properly find module orphans at our path. - State *State - - // Module is the root module. We'll look up the proper configuration - // using the graph path. - Module *module.Tree - - // View, if non-nil will set a view on the module state. - View string -} - -func (t *OrphanTransformer) Transform(g *Graph) error { - if t.State == nil { - // If the entire state is nil, there can't be any orphans - return nil - } - - // Build up all our state representatives - resourceRep := make(map[string]struct{}) - for _, v := range g.Vertices() { - if sr, ok := v.(GraphNodeStateRepresentative); ok { - for _, k := range sr.StateId() { - resourceRep[k] = struct{}{} - } - } - } - - var config *config.Config - if t.Module != nil { - if module := t.Module.Child(g.Path[1:]); module != nil { - config = module.Config() - } - } - - var resourceVertexes []dag.Vertex - if state := t.State.ModuleByPath(g.Path); state != nil { - // If we have state, then we can have orphan resources - - // If we have a view, get the view - if t.View != "" { - state = state.View(t.View) - } - - resourceOrphans := state.Orphans(config) - - resourceVertexes = make([]dag.Vertex, len(resourceOrphans)) - for i, k := range resourceOrphans { - // If this orphan is represented by some other node somehow, - // then ignore it. - if _, ok := resourceRep[k]; ok { - continue - } - - rs := state.Resources[k] - - rsk, err := ParseResourceStateKey(k) - if err != nil { - return err - } - resourceVertexes[i] = g.Add(&graphNodeOrphanResource{ - Path: g.Path, - ResourceKey: rsk, - Provider: rs.Provider, - dependentOn: rs.Dependencies, - }) - } - } - - // Go over each module orphan and add it to the graph. We store the - // vertexes and states outside so that we can connect dependencies later. - moduleOrphans := t.State.ModuleOrphans(g.Path, config) - moduleVertexes := make([]dag.Vertex, len(moduleOrphans)) - for i, path := range moduleOrphans { - var deps []string - if s := t.State.ModuleByPath(path); s != nil { - deps = s.Dependencies - } - - moduleVertexes[i] = g.Add(&graphNodeOrphanModule{ - Path: path, - dependentOn: deps, - }) - } - - // Now do the dependencies. We do this _after_ adding all the orphan - // nodes above because there are cases in which the orphans themselves - // depend on other orphans. - - // Resource dependencies - for _, v := range resourceVertexes { - g.ConnectDependent(v) - } - - // Module dependencies - for _, v := range moduleVertexes { - g.ConnectDependent(v) - } - - return nil -} - -// graphNodeOrphanModule is the graph vertex representing an orphan resource.. -type graphNodeOrphanModule struct { - Path []string - - dependentOn []string -} - -func (n *graphNodeOrphanModule) DependableName() []string { - return []string{n.dependableName()} -} - -func (n *graphNodeOrphanModule) DependentOn() []string { - return n.dependentOn -} - -func (n *graphNodeOrphanModule) Name() string { - return fmt.Sprintf("%s (orphan)", n.dependableName()) -} - -func (n *graphNodeOrphanModule) dependableName() string { - return fmt.Sprintf("module.%s", n.Path[len(n.Path)-1]) -} - -// GraphNodeExpandable -func (n *graphNodeOrphanModule) Expand(b GraphBuilder) (GraphNodeSubgraph, error) { - g, err := b.Build(n.Path) - if err != nil { - return nil, err - } - - return &GraphNodeBasicSubgraph{ - NameValue: n.Name(), - Graph: g, - }, nil -} - -// graphNodeOrphanResource is the graph vertex representing an orphan resource.. -type graphNodeOrphanResource struct { - Path []string - ResourceKey *ResourceStateKey - Provider string - - dependentOn []string -} - -func (n *graphNodeOrphanResource) ConfigType() GraphNodeConfigType { - return GraphNodeConfigTypeResource -} - -func (n *graphNodeOrphanResource) ResourceAddress() *ResourceAddress { - return &ResourceAddress{ - Index: n.ResourceKey.Index, - InstanceType: TypePrimary, - Name: n.ResourceKey.Name, - Path: n.Path[1:], - Type: n.ResourceKey.Type, - Mode: n.ResourceKey.Mode, - } -} - -func (n *graphNodeOrphanResource) DependableName() []string { - return []string{n.dependableName()} -} - -func (n *graphNodeOrphanResource) DependentOn() []string { - return n.dependentOn -} - -func (n *graphNodeOrphanResource) Flatten(p []string) (dag.Vertex, error) { - return &graphNodeOrphanResourceFlat{ - graphNodeOrphanResource: n, - PathValue: p, - }, nil -} - -func (n *graphNodeOrphanResource) Name() string { - return fmt.Sprintf("%s (orphan)", n.ResourceKey) -} - -func (n *graphNodeOrphanResource) ProvidedBy() []string { - return []string{resourceProvider(n.ResourceKey.Type, n.Provider)} -} - -// GraphNodeEvalable impl. -func (n *graphNodeOrphanResource) EvalTree() EvalNode { - - seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} - - // Build instance info - info := &InstanceInfo{Id: n.ResourceKey.String(), Type: n.ResourceKey.Type} - seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info}) - - // Each resource mode has its own lifecycle - switch n.ResourceKey.Mode { - case config.ManagedResourceMode: - seq.Nodes = append( - seq.Nodes, - n.managedResourceEvalNodes(info)..., - ) - case config.DataResourceMode: - seq.Nodes = append( - seq.Nodes, - n.dataResourceEvalNodes(info)..., - ) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.ResourceKey.Mode)) - } - - return seq -} - -func (n *graphNodeOrphanResource) managedResourceEvalNodes(info *InstanceInfo) []EvalNode { - var provider ResourceProvider - var state *InstanceState - - nodes := make([]EvalNode, 0, 3) - - // Refresh the resource - nodes = append(nodes, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalReadState{ - Name: n.ResourceKey.String(), - Output: &state, - }, - &EvalRefresh{ - Info: info, - Provider: &provider, - State: &state, - Output: &state, - }, - &EvalWriteState{ - Name: n.ResourceKey.String(), - ResourceType: n.ResourceKey.Type, - Provider: n.Provider, - Dependencies: n.DependentOn(), - State: &state, - }, - }, - }, - }) - - // Diff the resource - var diff *InstanceDiff - nodes = append(nodes, &EvalOpFilter{ - Ops: []walkOperation{walkPlan, walkPlanDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalReadState{ - Name: n.ResourceKey.String(), - Output: &state, - }, - &EvalDiffDestroy{ - Info: info, - State: &state, - Output: &diff, - }, - &EvalWriteDiff{ - Name: n.ResourceKey.String(), - Diff: &diff, - }, - }, - }, - }) - - // Apply - var err error - nodes = append(nodes, &EvalOpFilter{ - Ops: []walkOperation{walkApply, walkDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalReadDiff{ - Name: n.ResourceKey.String(), - Diff: &diff, - }, - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalReadState{ - Name: n.ResourceKey.String(), - Output: &state, - }, - &EvalApply{ - Info: info, - State: &state, - Diff: &diff, - Provider: &provider, - Output: &state, - Error: &err, - }, - &EvalWriteState{ - Name: n.ResourceKey.String(), - ResourceType: n.ResourceKey.Type, - Provider: n.Provider, - Dependencies: n.DependentOn(), - State: &state, - }, - &EvalApplyPost{ - Info: info, - State: &state, - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - }, - }) - - return nodes -} - -func (n *graphNodeOrphanResource) dataResourceEvalNodes(info *InstanceInfo) []EvalNode { - nodes := make([]EvalNode, 0, 3) - - // This will remain nil, since we don't retain states for orphaned - // data resources. - var state *InstanceState - - // On both refresh and apply we just drop our state altogether, - // since the config resource validation pass will have proven that the - // resources remaining in the configuration don't need it. - nodes = append(nodes, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkApply}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalWriteState{ - Name: n.ResourceKey.String(), - ResourceType: n.ResourceKey.Type, - Provider: n.Provider, - Dependencies: n.DependentOn(), - State: &state, // state is nil - }, - }, - }, - }) - - return nodes -} - -func (n *graphNodeOrphanResource) dependableName() string { - return n.ResourceKey.String() -} - -// GraphNodeDestroyable impl. -func (n *graphNodeOrphanResource) DestroyNode() GraphNodeDestroy { - return n -} - -// GraphNodeDestroy impl. -func (n *graphNodeOrphanResource) CreateBeforeDestroy() bool { - return false -} - -func (n *graphNodeOrphanResource) CreateNode() dag.Vertex { - return n -} - -// Same as graphNodeOrphanResource, but for flattening -type graphNodeOrphanResourceFlat struct { - *graphNodeOrphanResource - - PathValue []string -} - -func (n *graphNodeOrphanResourceFlat) Name() string { - return fmt.Sprintf( - "%s.%s", modulePrefixStr(n.PathValue), n.graphNodeOrphanResource.Name()) -} - -func (n *graphNodeOrphanResourceFlat) Path() []string { - return n.PathValue -} - -// GraphNodeDestroyable impl. -func (n *graphNodeOrphanResourceFlat) DestroyNode() GraphNodeDestroy { - return n -} - -// GraphNodeDestroy impl. -func (n *graphNodeOrphanResourceFlat) CreateBeforeDestroy() bool { - return false -} - -func (n *graphNodeOrphanResourceFlat) CreateNode() dag.Vertex { - return n -} - -func (n *graphNodeOrphanResourceFlat) ProvidedBy() []string { - return modulePrefixList( - n.graphNodeOrphanResource.ProvidedBy(), - modulePrefixStr(n.PathValue)) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go new file mode 100644 index 00000000..b256a25b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go @@ -0,0 +1,110 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/dag" +) + +// OrphanResourceCountTransformer is a GraphTransformer that adds orphans +// for an expanded count to the graph. The determination of this depends +// on the count argument given. +// +// Orphans are found by comparing the count to what is found in the state. +// This transform assumes that if an element in the state is within the count +// bounds given, that it is not an orphan. +type OrphanResourceCountTransformer struct { + Concrete ConcreteResourceNodeFunc + + Count int // Actual count of the resource + Addr *ResourceAddress // Addr of the resource to look for orphans + State *State // Full global state +} + +func (t *OrphanResourceCountTransformer) Transform(g *Graph) error { + log.Printf("[TRACE] OrphanResourceCount: Starting...") + + // Grab the module in the state just for this resource address + ms := t.State.ModuleByPath(normalizeModulePath(t.Addr.Path)) + if ms == nil { + // If no state, there can't be orphans + return nil + } + + orphanIndex := -1 + if t.Count == 1 { + orphanIndex = 0 + } + + // Go through the orphans and add them all to the state + for key, _ := range ms.Resources { + // Build the address + addr, err := parseResourceAddressInternal(key) + if err != nil { + return err + } + addr.Path = ms.Path[1:] + + // Copy the address for comparison. If we aren't looking at + // the same resource, then just ignore it. + addrCopy := addr.Copy() + addrCopy.Index = -1 + if !addrCopy.Equals(t.Addr) { + continue + } + + log.Printf("[TRACE] OrphanResourceCount: Checking: %s", addr) + + idx := addr.Index + + // If we have zero and the index here is 0 or 1, then we + // change the index to a high number so that we treat it as + // an orphan. + if t.Count <= 0 && idx <= 0 { + idx = t.Count + 1 + } + + // If we have a count greater than 0 and we're at the zero index, + // we do a special case check to see if our state also has a + // -1 index value. If so, this is an orphan because our rules are + // that if both a -1 and 0 are in the state, the 0 is destroyed. + if t.Count > 0 && idx == orphanIndex { + // This is a piece of cleverness (beware), but its simple: + // if orphanIndex is 0, then check -1, else check 0. + checkIndex := (orphanIndex + 1) * -1 + + key := &ResourceStateKey{ + Name: addr.Name, + Type: addr.Type, + Mode: addr.Mode, + Index: checkIndex, + } + + if _, ok := ms.Resources[key.String()]; ok { + // We have a -1 index, too. Make an arbitrarily high + // index so that we always mark this as an orphan. + log.Printf( + "[WARN] OrphanResourceCount: %q both -1 and 0 index found, orphaning %d", + addr, orphanIndex) + idx = t.Count + 1 + } + } + + // If the index is within the count bounds, it is not an orphan + if idx < t.Count { + continue + } + + // Build the abstract node and the concrete one + abstract := &NodeAbstractResource{Addr: addr} + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + // Add it to the graph + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go new file mode 100644 index 00000000..aea2bd0e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go @@ -0,0 +1,53 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" +) + +// OrphanOutputTransformer finds the outputs that aren't present +// in the given config that are in the state and adds them to the graph +// for deletion. +type OrphanOutputTransformer struct { + Module *module.Tree // Root module + State *State // State is the root state +} + +func (t *OrphanOutputTransformer) Transform(g *Graph) error { + if t.State == nil { + log.Printf("[DEBUG] No state, no orphan outputs") + return nil + } + + for _, ms := range t.State.Modules { + if err := t.transform(g, ms); err != nil { + return err + } + } + return nil +} + +func (t *OrphanOutputTransformer) transform(g *Graph, ms *ModuleState) error { + if ms == nil { + return nil + } + + path := normalizeModulePath(ms.Path) + + // Get the config for this path, which is nil if the entire module has been + // removed. + var c *config.Config + if m := t.Module.Child(path[1:]); m != nil { + c = m.Config() + } + + // add all the orphaned outputs to the graph + for _, n := range ms.RemovedOutputs(c) { + g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path}) + + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go new file mode 100644 index 00000000..e42d3c84 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go @@ -0,0 +1,78 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/dag" +) + +// OrphanResourceTransformer is a GraphTransformer that adds resource +// orphans to the graph. A resource orphan is a resource that is +// represented in the state but not in the configuration. +// +// This only adds orphans that have no representation at all in the +// configuration. +type OrphanResourceTransformer struct { + Concrete ConcreteResourceNodeFunc + + // State is the global state. We require the global state to + // properly find module orphans at our path. + State *State + + // Module is the root module. We'll look up the proper configuration + // using the graph path. + Module *module.Tree +} + +func (t *OrphanResourceTransformer) Transform(g *Graph) error { + if t.State == nil { + // If the entire state is nil, there can't be any orphans + return nil + } + + // Go through the modules and for each module transform in order + // to add the orphan. + for _, ms := range t.State.Modules { + if err := t.transform(g, ms); err != nil { + return err + } + } + + return nil +} + +func (t *OrphanResourceTransformer) transform(g *Graph, ms *ModuleState) error { + if ms == nil { + return nil + } + + // Get the configuration for this path. The configuration might be + // nil if the module was removed from the configuration. This is okay, + // this just means that every resource is an orphan. + var c *config.Config + if m := t.Module.Child(ms.Path[1:]); m != nil { + c = m.Config() + } + + // Go through the orphans and add them all to the state + for _, key := range ms.Orphans(c) { + // Build the abstract resource + addr, err := parseResourceAddressInternal(key) + if err != nil { + return err + } + addr.Path = ms.Path[1:] + + // Build the abstract node and the concrete one + abstract := &NodeAbstractResource{Addr: addr} + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + // Add it to the graph + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go index d3e839ce..faa25e41 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go @@ -1,98 +1,95 @@ package terraform import ( - "fmt" + "log" + "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/dag" ) -// GraphNodeOutput is an interface that nodes that are outputs must -// implement. The OutputName returned is the name of the output key -// that they manage. -type GraphNodeOutput interface { - OutputName() string +// OutputTransformer is a GraphTransformer that adds all the outputs +// in the configuration to the graph. +// +// This is done for the apply graph builder even if dependent nodes +// aren't changing since there is no downside: the state will be available +// even if the dependent items aren't changing. +type OutputTransformer struct { + Module *module.Tree } -// AddOutputOrphanTransformer is a transformer that adds output orphans -// to the graph. Output orphans are outputs that are no longer in the -// configuration and therefore need to be removed from the state. -type AddOutputOrphanTransformer struct { - State *State +func (t *OutputTransformer) Transform(g *Graph) error { + return t.transform(g, t.Module) } -func (t *AddOutputOrphanTransformer) Transform(g *Graph) error { - // Get the state for this module. If we have no state, we have no orphans - state := t.State.ModuleByPath(g.Path) - if state == nil { +func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error { + // If no config, no outputs + if m == nil { return nil } - // Create the set of outputs we do have in the graph - found := make(map[string]struct{}) - for _, v := range g.Vertices() { - on, ok := v.(GraphNodeOutput) - if !ok { - continue + // Transform all the children. We must do this first because + // we can reference module outputs and they must show up in the + // reference map. + for _, c := range m.Children() { + if err := t.transform(g, c); err != nil { + return err } + } - found[on.OutputName()] = struct{}{} + // If we have no outputs, we're done! + os := m.Config().Outputs + if len(os) == 0 { + return nil } - // Go over all the outputs. If we don't have a graph node for it, - // create it. It doesn't need to depend on anything, since its just - // setting it empty. - for k, _ := range state.Outputs { - if _, ok := found[k]; ok { - continue + // Add all outputs here + for _, o := range os { + node := &NodeApplyableOutput{ + PathValue: normalizeModulePath(m.Path()), + Config: o, } - g.Add(&graphNodeOrphanOutput{OutputName: k}) + // Add it! + g.Add(node) } return nil } -type graphNodeOrphanOutput struct { - OutputName string +// DestroyOutputTransformer is a GraphTransformer that adds nodes to delete +// outputs during destroy. We need to do this to ensure that no stale outputs +// are ever left in the state. +type DestroyOutputTransformer struct { } -func (n *graphNodeOrphanOutput) Name() string { - return fmt.Sprintf("output.%s (orphan)", n.OutputName) -} - -func (n *graphNodeOrphanOutput) EvalTree() EvalNode { - return &EvalOpFilter{ - Ops: []walkOperation{walkApply, walkDestroy, walkRefresh}, - Node: &EvalDeleteOutput{ - Name: n.OutputName, - }, - } -} +func (t *DestroyOutputTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + output, ok := v.(*NodeApplyableOutput) + if !ok { + continue + } -// GraphNodeFlattenable impl. -func (n *graphNodeOrphanOutput) Flatten(p []string) (dag.Vertex, error) { - return &graphNodeOrphanOutputFlat{ - graphNodeOrphanOutput: n, - PathValue: p, - }, nil -} + // create the destroy node for this output + node := &NodeDestroyableOutput{ + PathValue: output.PathValue, + Config: output.Config, + } -type graphNodeOrphanOutputFlat struct { - *graphNodeOrphanOutput + log.Printf("[TRACE] creating %s", node.Name()) + g.Add(node) - PathValue []string -} + deps, err := g.Descendents(v) + if err != nil { + return err + } -func (n *graphNodeOrphanOutputFlat) Name() string { - return fmt.Sprintf( - "%s.%s", modulePrefixStr(n.PathValue), n.graphNodeOrphanOutput.Name()) -} + // the destroy node must depend on the eval node + deps.Add(v) -func (n *graphNodeOrphanOutputFlat) EvalTree() EvalNode { - return &EvalOpFilter{ - Ops: []walkOperation{walkApply, walkDestroy, walkRefresh}, - Node: &EvalDeleteOutput{ - Name: n.OutputName, - }, + for _, d := range deps.List() { + log.Printf("[TRACE] %s depends on %s", node.Name(), dag.VertexName(d)) + g.Connect(dag.BasicEdge(node, d)) + } } + return nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go index cbb56dec..c4772b40 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go @@ -1,22 +1,46 @@ package terraform import ( + "errors" "fmt" "log" "strings" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/dag" - "github.com/hashicorp/terraform/dot" ) +func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, mod *module.Tree) GraphTransformer { + return GraphTransformMulti( + // Add providers from the config + &ProviderConfigTransformer{ + Module: mod, + Providers: providers, + Concrete: concrete, + }, + // Add any remaining missing providers + &MissingProviderTransformer{ + Providers: providers, + Concrete: concrete, + }, + // Connect the providers + &ProviderTransformer{}, + // Remove unused providers and proxies + &PruneProviderTransformer{}, + // Connect provider to their parent provider nodes + &ParentProviderTransformer{}, + ) +} + // GraphNodeProvider is an interface that nodes that can be a provider -// must implement. The ProviderName returned is the name of the provider -// they satisfy. +// must implement. +// ProviderName returns the name of the provider this satisfies. +// Name returns the full name of the provider in the config. type GraphNodeProvider interface { ProviderName() string - ProviderConfig() *config.RawConfig + Name() string } // GraphNodeCloseProvider is an interface that nodes that can be a close @@ -28,58 +52,12 @@ type GraphNodeCloseProvider interface { // GraphNodeProviderConsumer is an interface that nodes that require // a provider must implement. ProvidedBy must return the name of the provider -// to use. +// to use. This may be a provider by type, type.alias or a fully resolved +// provider name type GraphNodeProviderConsumer interface { - ProvidedBy() []string -} - -// DisableProviderTransformer "disables" any providers that are only -// depended on by modules. -type DisableProviderTransformer struct{} - -func (t *DisableProviderTransformer) Transform(g *Graph) error { - // Since we're comparing against edges, we need to make sure we connect - g.ConnectDependents() - - for _, v := range g.Vertices() { - // We only care about providers - pn, ok := v.(GraphNodeProvider) - if !ok || pn.ProviderName() == "" { - continue - } - - // Go through all the up-edges (things that depend on this - // provider) and if any is not a module, then ignore this node. - nonModule := false - for _, sourceRaw := range g.UpEdges(v).List() { - source := sourceRaw.(dag.Vertex) - cn, ok := source.(graphNodeConfig) - if !ok { - nonModule = true - break - } - - if cn.ConfigType() != GraphNodeConfigTypeModule { - nonModule = true - break - } - } - if nonModule { - // We found something that depends on this provider that - // isn't a module, so skip it. - continue - } - - // Disable the provider by replacing it with a "disabled" provider - disabled := &graphNodeDisabledProvider{GraphNodeProvider: pn} - if !g.Replace(v, disabled) { - panic(fmt.Sprintf( - "vertex disappeared from under us: %s", - dag.VertexName(v))) - } - } - - return nil + ProvidedBy() string + // Set the resolved provider address for this resource. + SetProvider(string) } // ProviderTransformer is a GraphTransformer that maps resources to @@ -93,18 +71,52 @@ func (t *ProviderTransformer) Transform(g *Graph) error { m := providerVertexMap(g) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProviderConsumer); ok { - for _, p := range pv.ProvidedBy() { - target := m[providerMapKey(p, pv)] - if target == nil { - println(fmt.Sprintf("%#v\n\n%#v", m, providerMapKey(p, pv))) - err = multierror.Append(err, fmt.Errorf( - "%s: provider %s couldn't be found", - dag.VertexName(v), p)) - continue + p := pv.ProvidedBy() + + key := providerMapKey(p, pv) + target := m[key] + + sp, ok := pv.(GraphNodeSubPath) + if !ok && target == nil { + // no target, and no path to walk up + err = multierror.Append(err, fmt.Errorf( + "%s: provider %s couldn't be found", + dag.VertexName(v), p)) + break + } + + // if we don't have a provider at this level, walk up the path looking for one + for i := 1; target == nil; i++ { + path := normalizeModulePath(sp.Path()) + if len(path) < i { + break } - g.Connect(dag.BasicEdge(v, target)) + key = ResolveProviderName(p, path[:len(path)-i]) + target = m[key] + if target != nil { + break + } + } + + if target == nil { + err = multierror.Append(err, fmt.Errorf( + "%s: configuration for %s is not present; a provider configuration block is required for all operations", + dag.VertexName(v), p, + )) + break + } + + // see if this in an inherited provider + if p, ok := target.(*graphNodeProxyProvider); ok { + g.Remove(p) + target = p.Target() + key = target.(GraphNodeProvider).Name() } + + log.Printf("[DEBUG] resource %s using provider %s", dag.VertexName(pv), key) + pv.SetProvider(key) + g.Connect(dag.BasicEdge(v, target)) } } @@ -119,36 +131,32 @@ type CloseProviderTransformer struct{} func (t *CloseProviderTransformer) Transform(g *Graph) error { pm := providerVertexMap(g) - cpm := closeProviderVertexMap(g) + cpm := make(map[string]*graphNodeCloseProvider) var err error - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProviderConsumer); ok { - for _, p := range pv.ProvidedBy() { - key := p - source := cpm[key] - - if source == nil { - // Create a new graphNodeCloseProvider and add it to the graph - source = &graphNodeCloseProvider{ProviderNameValue: p} - g.Add(source) - - // Close node needs to depend on provider - provider, ok := pm[key] - if !ok { - err = multierror.Append(err, fmt.Errorf( - "%s: provider %s couldn't be found for closing", - dag.VertexName(v), p)) - continue - } - g.Connect(dag.BasicEdge(source, provider)) - - // Make sure we also add the new graphNodeCloseProvider to the map - // so we don't create and add any duplicate graphNodeCloseProviders. - cpm[key] = source - } - // Close node depends on all nodes provided by the provider - g.Connect(dag.BasicEdge(source, v)) + for _, v := range pm { + p := v.(GraphNodeProvider) + + // get the close provider of this type if we alread created it + closer := cpm[p.Name()] + + if closer == nil { + // create a closer for this provider type + closer = &graphNodeCloseProvider{ProviderNameValue: p.Name()} + g.Add(closer) + cpm[p.Name()] = closer + } + + // Close node depends on the provider itself + // this is added unconditionally, so it will connect to all instances + // of the provider. Extra edges will be removed by transitive + // reduction. + g.Connect(dag.BasicEdge(closer, p)) + + // connect all the provider's resources to the close node + for _, s := range g.UpEdges(p).List() { + if _, ok := s.(GraphNodeProviderConsumer); ok { + g.Connect(dag.BasicEdge(closer, s)) } } } @@ -156,108 +164,126 @@ func (t *CloseProviderTransformer) Transform(g *Graph) error { return err } -// MissingProviderTransformer is a GraphTransformer that adds nodes -// for missing providers into the graph. Specifically, it creates provider -// configuration nodes for all the providers that we support. These are -// pruned later during an optimization pass. +// MissingProviderTransformer is a GraphTransformer that adds nodes for all +// required providers into the graph. Specifically, it creates provider +// configuration nodes for all the providers that we support. These are pruned +// later during an optimization pass. type MissingProviderTransformer struct { // Providers is the list of providers we support. Providers []string + + // Concrete, if set, overrides how the providers are made. + Concrete ConcreteProviderNodeFunc } func (t *MissingProviderTransformer) Transform(g *Graph) error { - // Create a set of our supported providers - supported := make(map[string]struct{}, len(t.Providers)) - for _, v := range t.Providers { - supported[v] = struct{}{} + // Initialize factory + if t.Concrete == nil { + t.Concrete = func(a *NodeAbstractProvider) dag.Vertex { + return a + } } - // Get the map of providers we already have in our graph + var err error m := providerVertexMap(g) - - // Go through all the provider consumers and make sure we add - // that provider if it is missing. We use a for loop here instead - // of "range" since we'll modify check as we go to add more to check. - check := g.Vertices() - for i := 0; i < len(check); i++ { - v := check[i] - + for _, v := range g.Vertices() { pv, ok := v.(GraphNodeProviderConsumer) if !ok { continue } - // If this node has a subpath, then we use that as a prefix - // into our map to check for an existing provider. - var path []string - if sp, ok := pv.(GraphNodeSubPath); ok { - raw := normalizeModulePath(sp.Path()) - if len(raw) > len(rootModulePath) { - path = raw - } + p := pv.ProvidedBy() + // this may be the resolved provider from the state, so we need to get + // the base provider name. + parts := strings.SplitAfter(p, "provider.") + p = parts[len(parts)-1] + + key := ResolveProviderName(p, nil) + provider := m[key] + + // we already have it + if provider != nil { + continue } - for _, p := range pv.ProvidedBy() { - key := providerMapKey(p, pv) - if _, ok := m[key]; ok { - // This provider already exists as a configure node - continue - } + // we don't implicitly create aliased providers + if strings.Contains(p, ".") { + log.Println("[DEBUG] not adding missing provider alias:", p) + continue + } - // If the provider has an alias in it, we just want the type - ptype := p - if idx := strings.IndexRune(p, '.'); idx != -1 { - ptype = p[:idx] - } + log.Println("[DEBUG] adding missing provider:", p) - if _, ok := supported[ptype]; !ok { - // If we don't support the provider type, skip it. - // Validation later will catch this as an error. - continue - } + // create the misisng top-level provider + provider = t.Concrete(&NodeAbstractProvider{ + NameValue: p, + }).(dag.Vertex) - // Add the missing provider node to the graph - raw := &graphNodeProvider{ProviderNameValue: p} - var v dag.Vertex = raw - if len(path) > 0 { - var err error - v, err = raw.Flatten(path) - if err != nil { - return err - } + m[key] = g.Add(provider) + } + + return err +} + +// ParentProviderTransformer connects provider nodes to their parents. +// +// This works by finding nodes that are both GraphNodeProviders and +// GraphNodeSubPath. It then connects the providers to their parent +// path. The parent provider is always at the root level. +type ParentProviderTransformer struct{} + +func (t *ParentProviderTransformer) Transform(g *Graph) error { + pm := providerVertexMap(g) + for _, v := range g.Vertices() { + // Only care about providers + pn, ok := v.(GraphNodeProvider) + if !ok || pn.ProviderName() == "" { + continue + } - // We'll need the parent provider as well, so let's - // add a dummy node to check to make sure that we add - // that parent provider. - check = append(check, &graphNodeProviderConsumerDummy{ - ProviderValue: p, - PathValue: path[:len(path)-1], - }) + // Also require a subpath, if there is no subpath then we + // can't have a parent. + if pn, ok := v.(GraphNodeSubPath); ok { + if len(normalizeModulePath(pn.Path())) <= 1 { + continue } + } - m[key] = g.Add(v) + // this provider may be disabled, but we can only get it's name from + // the ProviderName string + name := ResolveProviderName(strings.SplitN(pn.ProviderName(), " ", 2)[0], nil) + parent := pm[name] + if parent != nil { + g.Connect(dag.BasicEdge(v, parent)) } - } + } return nil } -// PruneProviderTransformer is a GraphTransformer that prunes all the -// providers that aren't needed from the graph. A provider is unneeded if -// no resource or module is using that provider. +// PruneProviderTransformer removes any providers that are not actually used by +// anything, and provider proxies. This avoids the provider being initialized +// and configured. This both saves resources but also avoids errors since +// configuration may imply initialization which may require auth. type PruneProviderTransformer struct{} func (t *PruneProviderTransformer) Transform(g *Graph) error { for _, v := range g.Vertices() { - // We only care about the providers - if pn, ok := v.(GraphNodeProvider); !ok || pn.ProviderName() == "" { + // We only care about providers + pn, ok := v.(GraphNodeProvider) + if !ok || pn.ProviderName() == "" { continue } - // Does anything depend on this? If not, then prune it. - if s := g.UpEdges(v); s.Len() == 0 { - if nv, ok := v.(dag.NamedVertex); ok { - log.Printf("[DEBUG] Pruning provider with no dependencies: %s", nv.Name()) - } + + // ProxyProviders will have up edges, but we're now done with them in the graph + if _, ok := v.(*graphNodeProxyProvider); ok { + log.Printf("[DEBUG] pruning proxy provider %s", dag.VertexName(v)) + g.Remove(v) + } + + // Remove providers with no dependencies. + if g.UpEdges(v).Len() == 0 { + log.Printf("[DEBUG] pruning unused provider %s", dag.VertexName(v)) g.Remove(v) } } @@ -268,22 +294,26 @@ func (t *PruneProviderTransformer) Transform(g *Graph) error { // providerMapKey is a helper that gives us the key to use for the // maps returned by things such as providerVertexMap. func providerMapKey(k string, v dag.Vertex) string { - pathPrefix := "" - if sp, ok := v.(GraphNodeSubPath); ok { - raw := normalizeModulePath(sp.Path()) - if len(raw) > len(rootModulePath) { - pathPrefix = modulePrefixStr(raw) + "." - } + if strings.Contains(k, "provider.") { + // this is already resolved + return k } - return pathPrefix + k + // we create a dummy provider to + var path []string + if sp, ok := v.(GraphNodeSubPath); ok { + path = normalizeModulePath(sp.Path()) + } + return ResolveProviderName(k, path) } func providerVertexMap(g *Graph) map[string]dag.Vertex { m := make(map[string]dag.Vertex) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProvider); ok { - m[pv.ProviderName()] = v + // TODO: The Name may have meta info, like " (disabled)" + name := strings.SplitN(pv.Name(), " ", 2)[0] + m[name] = v } } @@ -301,250 +331,276 @@ func closeProviderVertexMap(g *Graph) map[string]dag.Vertex { return m } -type graphNodeDisabledProvider struct { - GraphNodeProvider +type graphNodeCloseProvider struct { + ProviderNameValue string +} + +func (n *graphNodeCloseProvider) Name() string { + return n.ProviderNameValue + " (close)" } // GraphNodeEvalable impl. -func (n *graphNodeDisabledProvider) EvalTree() EvalNode { - var resourceConfig *ResourceConfig - - return &EvalOpFilter{ - Ops: []walkOperation{walkInput, walkValidate, walkRefresh, walkPlan, walkApply, walkDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalInterpolate{ - Config: n.ProviderConfig(), - Output: &resourceConfig, - }, - &EvalBuildProviderConfig{ - Provider: n.ProviderName(), - Config: &resourceConfig, - Output: &resourceConfig, - }, - &EvalSetProviderConfig{ - Provider: n.ProviderName(), - Config: &resourceConfig, - }, - }, - }, - } +func (n *graphNodeCloseProvider) EvalTree() EvalNode { + return CloseProviderEvalTree(n.ProviderNameValue) } -// GraphNodeFlattenable impl. -func (n *graphNodeDisabledProvider) Flatten(p []string) (dag.Vertex, error) { - return &graphNodeDisabledProviderFlat{ - graphNodeDisabledProvider: n, - PathValue: p, - }, nil +// GraphNodeDependable impl. +func (n *graphNodeCloseProvider) DependableName() []string { + return []string{n.Name()} } -func (n *graphNodeDisabledProvider) Name() string { - return fmt.Sprintf("%s (disabled)", dag.VertexName(n.GraphNodeProvider)) +func (n *graphNodeCloseProvider) CloseProviderName() string { + return n.ProviderNameValue } // GraphNodeDotter impl. -func (n *graphNodeDisabledProvider) DotNode(name string, opts *GraphDotOpts) *dot.Node { - return dot.NewNode(name, map[string]string{ - "label": n.Name(), - "shape": "diamond", - }) +func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + if !opts.Verbose { + return nil + } + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "diamond", + }, + } } -// GraphNodeDotterOrigin impl. -func (n *graphNodeDisabledProvider) DotOrigin() bool { +// RemovableIfNotTargeted +func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. return true } -// GraphNodeDependable impl. -func (n *graphNodeDisabledProvider) DependableName() []string { - return []string{"provider." + n.ProviderName()} +// graphNodeProxyProvider is a GraphNodeProvider implementation that is used to +// store the name and value of a provider node for inheritance between modules. +// These nodes are only used to store the data while loading the provider +// configurations, and are removed after all the resources have been connected +// to their providers. +type graphNodeProxyProvider struct { + nameValue string + path []string + target GraphNodeProvider } -// GraphNodeProvider impl. -func (n *graphNodeDisabledProvider) ProviderName() string { - return n.GraphNodeProvider.ProviderName() +func (n *graphNodeProxyProvider) ProviderName() string { + return n.Target().ProviderName() } -// GraphNodeProvider impl. -func (n *graphNodeDisabledProvider) ProviderConfig() *config.RawConfig { - return n.GraphNodeProvider.ProviderConfig() +func (n *graphNodeProxyProvider) Name() string { + return ResolveProviderName(n.nameValue, n.path) } -// Same as graphNodeDisabledProvider, but for flattening -type graphNodeDisabledProviderFlat struct { - *graphNodeDisabledProvider - - PathValue []string +// find the concrete provider instance +func (n *graphNodeProxyProvider) Target() GraphNodeProvider { + switch t := n.target.(type) { + case *graphNodeProxyProvider: + return t.Target() + default: + return n.target + } } -func (n *graphNodeDisabledProviderFlat) Name() string { - return fmt.Sprintf( - "%s.%s", modulePrefixStr(n.PathValue), n.graphNodeDisabledProvider.Name()) -} +// ProviderConfigTransformer adds all provider nodes from the configuration and +// attaches the configs. +type ProviderConfigTransformer struct { + Providers []string + Concrete ConcreteProviderNodeFunc -func (n *graphNodeDisabledProviderFlat) Path() []string { - return n.PathValue -} + // each provider node is stored here so that the proxy nodes can look up + // their targets by name. + providers map[string]GraphNodeProvider + // record providers that can be overriden with a proxy + proxiable map[string]bool -func (n *graphNodeDisabledProviderFlat) ProviderName() string { - return fmt.Sprintf( - "%s.%s", modulePrefixStr(n.PathValue), - n.graphNodeDisabledProvider.ProviderName()) + // Module is the module to add resources from. + Module *module.Tree } -// GraphNodeDependable impl. -func (n *graphNodeDisabledProviderFlat) DependableName() []string { - return modulePrefixList( - n.graphNodeDisabledProvider.DependableName(), - modulePrefixStr(n.PathValue)) -} +func (t *ProviderConfigTransformer) Transform(g *Graph) error { + // If no module is given, we don't do anything + if t.Module == nil { + return nil + } -func (n *graphNodeDisabledProviderFlat) DependentOn() []string { - var result []string + // If the module isn't loaded, that is simply an error + if !t.Module.Loaded() { + return errors.New("module must be loaded for ProviderConfigTransformer") + } + + t.providers = make(map[string]GraphNodeProvider) + t.proxiable = make(map[string]bool) - // If we're in a module, then depend on our parent's provider - if len(n.PathValue) > 1 { - prefix := modulePrefixStr(n.PathValue[:len(n.PathValue)-1]) - result = modulePrefixList( - n.graphNodeDisabledProvider.DependableName(), prefix) + // Start the transformation process + if err := t.transform(g, t.Module); err != nil { + return err } - return result + // finally attach the configs to the new nodes + return t.attachProviderConfigs(g) } -type graphNodeCloseProvider struct { - ProviderNameValue string -} +func (t *ProviderConfigTransformer) transform(g *Graph, m *module.Tree) error { + // If no config, do nothing + if m == nil { + return nil + } -func (n *graphNodeCloseProvider) Name() string { - return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue) -} + // Add our resources + if err := t.transformSingle(g, m); err != nil { + return err + } -// GraphNodeEvalable impl. -func (n *graphNodeCloseProvider) EvalTree() EvalNode { - return CloseProviderEvalTree(n.ProviderNameValue) + // Transform all the children. + for _, c := range m.Children() { + if err := t.transform(g, c); err != nil { + return err + } + } + return nil } -// GraphNodeDependable impl. -func (n *graphNodeCloseProvider) DependableName() []string { - return []string{n.Name()} -} +func (t *ProviderConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { + log.Printf("[TRACE] ProviderConfigTransformer: Starting for path: %v", m.Path()) -func (n *graphNodeCloseProvider) CloseProviderName() string { - return n.ProviderNameValue -} + // Get the configuration for this module + conf := m.Config() -// GraphNodeDotter impl. -func (n *graphNodeCloseProvider) DotNode(name string, opts *GraphDotOpts) *dot.Node { - if !opts.Verbose { - return nil + // Build the path we're at + path := m.Path() + if len(path) > 0 { + path = append([]string{RootModuleName}, path...) } - return dot.NewNode(name, map[string]string{ - "label": n.Name(), - "shape": "diamond", - }) -} -type graphNodeProvider struct { - ProviderNameValue string -} + // add all providers from the configuration + for _, p := range conf.ProviderConfigs { + name := p.Name + if p.Alias != "" { + name += "." + p.Alias + } -func (n *graphNodeProvider) Name() string { - return fmt.Sprintf("provider.%s", n.ProviderNameValue) -} + v := t.Concrete(&NodeAbstractProvider{ + NameValue: name, + PathValue: path, + }) -// GraphNodeEvalable impl. -func (n *graphNodeProvider) EvalTree() EvalNode { - return ProviderEvalTree(n.ProviderNameValue, nil) -} + // Add it to the graph + g.Add(v) + fullName := ResolveProviderName(name, path) + t.providers[fullName] = v.(GraphNodeProvider) + t.proxiable[fullName] = len(p.RawConfig.RawMap()) == 0 + } -// GraphNodeDependable impl. -func (n *graphNodeProvider) DependableName() []string { - return []string{n.Name()} + // Now replace the provider nodes with proxy nodes if a provider was being + // passed in, and create implicit proxies if there was no config. Any extra + // proxies will be removed in the prune step. + return t.addProxyProviders(g, m) } -func (n *graphNodeProvider) ProviderName() string { - return n.ProviderNameValue -} +func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree) error { + path := m.Path() -func (n *graphNodeProvider) ProviderConfig() *config.RawConfig { - return nil -} + // can't add proxies at the root + if len(path) == 0 { + return nil + } -// GraphNodeDotter impl. -func (n *graphNodeProvider) DotNode(name string, opts *GraphDotOpts) *dot.Node { - return dot.NewNode(name, map[string]string{ - "label": n.Name(), - "shape": "diamond", - }) -} + parentPath := path[:len(path)-1] + parent := t.Module.Child(parentPath) + if parent == nil { + return nil + } -// GraphNodeDotterOrigin impl. -func (n *graphNodeProvider) DotOrigin() bool { - return true -} + var parentCfg *config.Module + for _, mod := range parent.Config().Modules { + if mod.Name == m.Name() { + parentCfg = mod + break + } + } -// GraphNodeFlattenable impl. -func (n *graphNodeProvider) Flatten(p []string) (dag.Vertex, error) { - return &graphNodeProviderFlat{ - graphNodeProvider: n, - PathValue: p, - }, nil -} + if parentCfg == nil { + // this can't really happen during normal execution. + return fmt.Errorf("parent module config not found for %s", m.Name()) + } -// Same as graphNodeMissingProvider, but for flattening -type graphNodeProviderFlat struct { - *graphNodeProvider + // Go through all the providers the parent is passing in, and add proxies to + // the parent provider nodes. + for name, parentName := range parentCfg.Providers { + fullName := ResolveProviderName(name, path) + fullParentName := ResolveProviderName(parentName, parentPath) - PathValue []string -} + parentProvider := t.providers[fullParentName] -func (n *graphNodeProviderFlat) Name() string { - return fmt.Sprintf( - "%s.%s", modulePrefixStr(n.PathValue), n.graphNodeProvider.Name()) -} + if parentProvider == nil { + return fmt.Errorf("missing provider %s", fullParentName) + } -func (n *graphNodeProviderFlat) Path() []string { - return n.PathValue -} + proxy := &graphNodeProxyProvider{ + nameValue: name, + path: path, + target: parentProvider, + } -func (n *graphNodeProviderFlat) ProviderName() string { - return fmt.Sprintf( - "%s.%s", modulePrefixStr(n.PathValue), - n.graphNodeProvider.ProviderName()) -} + concreteProvider := t.providers[fullName] -// GraphNodeDependable impl. -func (n *graphNodeProviderFlat) DependableName() []string { - return []string{n.Name()} -} + // replace the concrete node with the provider passed in + if concreteProvider != nil && t.proxiable[fullName] { + g.Replace(concreteProvider, proxy) + t.providers[fullName] = proxy + continue + } -func (n *graphNodeProviderFlat) DependentOn() []string { - var result []string + // aliased providers can't be implicitly passed in + if strings.Contains(name, ".") { + continue + } - // If we're in a module, then depend on our parent's provider - if len(n.PathValue) > 1 { - prefix := modulePrefixStr(n.PathValue[:len(n.PathValue)-1]) - result = modulePrefixList(n.graphNodeProvider.DependableName(), prefix) + // There was no concrete provider, so add this as an implicit provider. + // The extra proxy will be pruned later if it's unused. + g.Add(proxy) + t.providers[fullName] = proxy } - - return result + return nil } -// graphNodeProviderConsumerDummy is a struct that never enters the real -// graph (though it could to no ill effect). It implements -// GraphNodeProviderConsumer and GraphNodeSubpath as a way to force -// certain transformations. -type graphNodeProviderConsumerDummy struct { - ProviderValue string - PathValue []string -} +func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { + for _, v := range g.Vertices() { + // Only care about GraphNodeAttachProvider implementations + apn, ok := v.(GraphNodeAttachProvider) + if !ok { + continue + } -func (n *graphNodeProviderConsumerDummy) Path() []string { - return n.PathValue -} + // Determine what we're looking for + path := normalizeModulePath(apn.Path())[1:] + name := apn.ProviderName() + log.Printf("[TRACE] Attach provider request: %#v %s", path, name) + + // Get the configuration. + tree := t.Module.Child(path) + if tree == nil { + continue + } + + // Go through the provider configs to find the matching config + for _, p := range tree.Config().ProviderConfigs { + // Build the name, which is "name.alias" if an alias exists + current := p.Name + if p.Alias != "" { + current += "." + p.Alias + } -func (n *graphNodeProviderConsumerDummy) ProvidedBy() []string { - return []string{n.ProviderValue} + // If the configs match then attach! + if current == name { + log.Printf("[TRACE] Attaching provider config: %#v", p) + apn.AttachProvider(p) + break + } + } + } + + return nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go index 2d86275d..f49d8241 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go @@ -40,14 +40,15 @@ func (t *ProvisionerTransformer) Transform(g *Graph) error { for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProvisionerConsumer); ok { for _, p := range pv.ProvisionedBy() { - if m[p] == nil { + key := provisionerMapKey(p, pv) + if m[key] == nil { err = multierror.Append(err, fmt.Errorf( "%s: provisioner %s couldn't be found", dag.VertexName(v), p)) continue } - g.Connect(dag.BasicEdge(v, m[p])) + g.Connect(dag.BasicEdge(v, m[key])) } } } @@ -80,8 +81,21 @@ func (t *MissingProvisionerTransformer) Transform(g *Graph) error { continue } + // If this node has a subpath, then we use that as a prefix + // into our map to check for an existing provider. + var path []string + if sp, ok := pv.(GraphNodeSubPath); ok { + raw := normalizeModulePath(sp.Path()) + if len(raw) > len(rootModulePath) { + path = raw + } + } + for _, p := range pv.ProvisionedBy() { - if _, ok := m[p]; ok { + // Build the key for storing in the map + key := provisionerMapKey(p, pv) + + if _, ok := m[key]; ok { // This provisioner already exists as a configure node continue } @@ -92,8 +106,14 @@ func (t *MissingProvisionerTransformer) Transform(g *Graph) error { continue } + // Build the vertex + var newV dag.Vertex = &NodeProvisioner{ + NameValue: p, + PathValue: path, + } + // Add the missing provisioner node to the graph - m[p] = g.Add(&graphNodeProvisioner{ProvisionerNameValue: p}) + m[key] = g.Add(newV) } } @@ -131,11 +151,26 @@ func (t *CloseProvisionerTransformer) Transform(g *Graph) error { return nil } +// provisionerMapKey is a helper that gives us the key to use for the +// maps returned by things such as provisionerVertexMap. +func provisionerMapKey(k string, v dag.Vertex) string { + pathPrefix := "" + if sp, ok := v.(GraphNodeSubPath); ok { + raw := normalizeModulePath(sp.Path()) + if len(raw) > len(rootModulePath) { + pathPrefix = modulePrefixStr(raw) + "." + } + } + + return pathPrefix + k +} + func provisionerVertexMap(g *Graph) map[string]dag.Vertex { m := make(map[string]dag.Vertex) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProvisioner); ok { - m[pv.ProvisionerName()] = v + key := provisionerMapKey(pv.ProvisionerName(), v) + m[key] = v } } @@ -169,50 +204,3 @@ func (n *graphNodeCloseProvisioner) EvalTree() EvalNode { func (n *graphNodeCloseProvisioner) CloseProvisionerName() string { return n.ProvisionerNameValue } - -type graphNodeProvisioner struct { - ProvisionerNameValue string -} - -func (n *graphNodeProvisioner) Name() string { - return fmt.Sprintf("provisioner.%s", n.ProvisionerNameValue) -} - -// GraphNodeEvalable impl. -func (n *graphNodeProvisioner) EvalTree() EvalNode { - return &EvalInitProvisioner{Name: n.ProvisionerNameValue} -} - -func (n *graphNodeProvisioner) ProvisionerName() string { - return n.ProvisionerNameValue -} - -// GraphNodeFlattenable impl. -func (n *graphNodeProvisioner) Flatten(p []string) (dag.Vertex, error) { - return &graphNodeProvisionerFlat{ - graphNodeProvisioner: n, - PathValue: p, - }, nil -} - -// Same as graphNodeMissingProvisioner, but for flattening -type graphNodeProvisionerFlat struct { - *graphNodeProvisioner - - PathValue []string -} - -func (n *graphNodeProvisionerFlat) Name() string { - return fmt.Sprintf( - "%s.%s", modulePrefixStr(n.PathValue), n.graphNodeProvisioner.Name()) -} - -func (n *graphNodeProvisionerFlat) Path() []string { - return n.PathValue -} - -func (n *graphNodeProvisionerFlat) ProvisionerName() string { - return fmt.Sprintf( - "%s.%s", modulePrefixStr(n.PathValue), - n.graphNodeProvisioner.ProvisionerName()) -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_proxy.go b/vendor/github.com/hashicorp/terraform/terraform/transform_proxy.go deleted file mode 100644 index db7b34ed..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_proxy.go +++ /dev/null @@ -1,62 +0,0 @@ -package terraform - -import ( - "github.com/hashicorp/terraform/dag" -) - -// GraphNodeProxy must be implemented by nodes that are proxies. -// -// A node that is a proxy says that anything that depends on this -// node (the proxy), should also copy all the things that the proxy -// itself depends on. Example: -// -// A => proxy => C -// -// Should transform into (two edges): -// -// A => proxy => C -// A => C -// -// The purpose for this is because some transforms only look at direct -// edge connections and the proxy generally isn't meaningful in those -// situations, so we should complete all the edges. -type GraphNodeProxy interface { - Proxy() bool -} - -// ProxyTransformer is a transformer that goes through the graph, finds -// vertices that are marked as proxies, and connects through their -// dependents. See above for what a proxy is. -type ProxyTransformer struct{} - -func (t *ProxyTransformer) Transform(g *Graph) error { - for _, v := range g.Vertices() { - pn, ok := v.(GraphNodeProxy) - if !ok { - continue - } - - // If we don't want to be proxies, don't do it - if !pn.Proxy() { - continue - } - - // Connect all the things that depend on this to things that - // we depend on as the proxy. See docs for GraphNodeProxy for - // a visual explanation. - for _, s := range g.UpEdges(v).List() { - for _, t := range g.DownEdges(v).List() { - g.Connect(GraphProxyEdge{ - Edge: dag.BasicEdge(s, t), - }) - } - } - } - - return nil -} - -// GraphProxyEdge is the edge that is used for proxied edges. -type GraphProxyEdge struct { - dag.Edge -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go new file mode 100644 index 00000000..be8c7f96 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go @@ -0,0 +1,403 @@ +package terraform + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeReferenceable must be implemented by any node that represents +// a Terraform thing that can be referenced (resource, module, etc.). +// +// Even if the thing has no name, this should return an empty list. By +// implementing this and returning a non-nil result, you say that this CAN +// be referenced and other methods of referencing may still be possible (such +// as by path!) +type GraphNodeReferenceable interface { + // ReferenceableName is the name by which this can be referenced. + // This can be either just the type, or include the field. Example: + // "aws_instance.bar" or "aws_instance.bar.id". + ReferenceableName() []string +} + +// GraphNodeReferencer must be implemented by nodes that reference other +// Terraform items and therefore depend on them. +type GraphNodeReferencer interface { + // References are the list of things that this node references. This + // can include fields or just the type, just like GraphNodeReferenceable + // above. + References() []string +} + +// GraphNodeReferenceGlobal is an interface that can optionally be +// implemented. If ReferenceGlobal returns true, then the References() +// and ReferenceableName() must be _fully qualified_ with "module.foo.bar" +// etc. +// +// This allows a node to reference and be referenced by a specific name +// that may cross module boundaries. This can be very dangerous so use +// this wisely. +// +// The primary use case for this is module boundaries (variables coming in). +type GraphNodeReferenceGlobal interface { + // Set to true to signal that references and name are fully + // qualified. See the above docs for more information. + ReferenceGlobal() bool +} + +// ReferenceTransformer is a GraphTransformer that connects all the +// nodes that reference each other in order to form the proper ordering. +type ReferenceTransformer struct{} + +func (t *ReferenceTransformer) Transform(g *Graph) error { + // Build a reference map so we can efficiently look up the references + vs := g.Vertices() + m := NewReferenceMap(vs) + + // Find the things that reference things and connect them + for _, v := range vs { + parents, _ := m.References(v) + parentsDbg := make([]string, len(parents)) + for i, v := range parents { + parentsDbg[i] = dag.VertexName(v) + } + log.Printf( + "[DEBUG] ReferenceTransformer: %q references: %v", + dag.VertexName(v), parentsDbg) + + for _, parent := range parents { + g.Connect(dag.BasicEdge(v, parent)) + } + } + + return nil +} + +// DestroyReferenceTransformer is a GraphTransformer that reverses the edges +// for locals and outputs that depend on other nodes which will be +// removed during destroy. If a destroy node is evaluated before the local or +// output value, it will be removed from the state, and the later interpolation +// will fail. +type DestroyValueReferenceTransformer struct{} + +func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error { + vs := g.Vertices() + for _, v := range vs { + switch v.(type) { + case *NodeApplyableOutput, *NodeLocal: + // OK + default: + continue + } + + // reverse any outgoing edges so that the value is evaluated first. + for _, e := range g.EdgesFrom(v) { + target := e.Target() + + // only destroy nodes will be evaluated in reverse + if _, ok := target.(GraphNodeDestroyer); !ok { + continue + } + + log.Printf("[TRACE] output dep: %s", dag.VertexName(target)) + + g.RemoveEdge(e) + g.Connect(&DestroyEdge{S: target, T: v}) + } + } + + return nil +} + +// PruneUnusedValuesTransformer is s GraphTransformer that removes local and +// output values which are not referenced in the graph. Since outputs and +// locals always need to be evaluated, if they reference a resource that is not +// available in the state the interpolation could fail. +type PruneUnusedValuesTransformer struct{} + +func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error { + // this might need multiple runs in order to ensure that pruning a value + // doesn't effect a previously checked value. + for removed := 0; ; removed = 0 { + for _, v := range g.Vertices() { + switch v.(type) { + case *NodeApplyableOutput, *NodeLocal: + // OK + default: + continue + } + + dependants := g.UpEdges(v) + + switch dependants.Len() { + case 0: + // nothing at all depends on this + g.Remove(v) + removed++ + case 1: + // because an output's destroy node always depends on the output, + // we need to check for the case of a single destroy node. + d := dependants.List()[0] + if _, ok := d.(*NodeDestroyableOutput); ok { + g.Remove(v) + removed++ + } + } + } + if removed == 0 { + break + } + } + + return nil +} + +// ReferenceMap is a structure that can be used to efficiently check +// for references on a graph. +type ReferenceMap struct { + // m is the mapping of referenceable name to list of verticies that + // implement that name. This is built on initialization. + references map[string][]dag.Vertex + referencedBy map[string][]dag.Vertex +} + +// References returns the list of vertices that this vertex +// references along with any missing references. +func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) { + rn, ok := v.(GraphNodeReferencer) + if !ok { + return nil, nil + } + + var matches []dag.Vertex + var missing []string + prefix := m.prefix(v) + + for _, ns := range rn.References() { + found := false + for _, n := range strings.Split(ns, "/") { + n = prefix + n + parents, ok := m.references[n] + if !ok { + continue + } + + // Mark that we found a match + found = true + + for _, p := range parents { + // don't include self-references + if p == v { + continue + } + matches = append(matches, p) + } + + break + } + + if !found { + missing = append(missing, ns) + } + } + + return matches, missing +} + +// ReferencedBy returns the list of vertices that reference the +// vertex passed in. +func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex { + rn, ok := v.(GraphNodeReferenceable) + if !ok { + return nil + } + + var matches []dag.Vertex + prefix := m.prefix(v) + for _, n := range rn.ReferenceableName() { + n = prefix + n + children, ok := m.referencedBy[n] + if !ok { + continue + } + + // Make sure this isn't a self reference, which isn't included + selfRef := false + for _, p := range children { + if p == v { + selfRef = true + break + } + } + if selfRef { + continue + } + + matches = append(matches, children...) + } + + return matches +} + +func (m *ReferenceMap) prefix(v dag.Vertex) string { + // If the node is stating it is already fully qualified then + // we don't have to create the prefix! + if gn, ok := v.(GraphNodeReferenceGlobal); ok && gn.ReferenceGlobal() { + return "" + } + + // Create the prefix based on the path + var prefix string + if pn, ok := v.(GraphNodeSubPath); ok { + if path := normalizeModulePath(pn.Path()); len(path) > 1 { + prefix = modulePrefixStr(path) + "." + } + } + + return prefix +} + +// NewReferenceMap is used to create a new reference map for the +// given set of vertices. +func NewReferenceMap(vs []dag.Vertex) *ReferenceMap { + var m ReferenceMap + + // Build the lookup table + refMap := make(map[string][]dag.Vertex) + for _, v := range vs { + // We're only looking for referenceable nodes + rn, ok := v.(GraphNodeReferenceable) + if !ok { + continue + } + + // Go through and cache them + prefix := m.prefix(v) + for _, n := range rn.ReferenceableName() { + n = prefix + n + refMap[n] = append(refMap[n], v) + } + + // If there is a path, it is always referenceable by that. For + // example, if this is a referenceable thing at path []string{"foo"}, + // then it can be referenced at "module.foo" + if pn, ok := v.(GraphNodeSubPath); ok { + for _, p := range ReferenceModulePath(pn.Path()) { + refMap[p] = append(refMap[p], v) + } + } + } + + // Build the lookup table for referenced by + refByMap := make(map[string][]dag.Vertex) + for _, v := range vs { + // We're only looking for referenceable nodes + rn, ok := v.(GraphNodeReferencer) + if !ok { + continue + } + + // Go through and cache them + prefix := m.prefix(v) + for _, n := range rn.References() { + n = prefix + n + refByMap[n] = append(refByMap[n], v) + } + } + + m.references = refMap + m.referencedBy = refByMap + return &m +} + +// Returns the reference name for a module path. The path "foo" would return +// "module.foo". If this is a deeply nested module, it will be every parent +// as well. For example: ["foo", "bar"] would return both "module.foo" and +// "module.foo.module.bar" +func ReferenceModulePath(p []string) []string { + p = normalizeModulePath(p) + if len(p) == 1 { + // Root, no name + return nil + } + + result := make([]string, 0, len(p)-1) + for i := len(p); i > 1; i-- { + result = append(result, modulePrefixStr(p[:i])) + } + + return result +} + +// ReferencesFromConfig returns the references that a configuration has +// based on the interpolated variables in a configuration. +func ReferencesFromConfig(c *config.RawConfig) []string { + var result []string + for _, v := range c.Variables { + if r := ReferenceFromInterpolatedVar(v); len(r) > 0 { + result = append(result, r...) + } + } + + return result +} + +// ReferenceFromInterpolatedVar returns the reference from this variable, +// or an empty string if there is no reference. +func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string { + switch v := v.(type) { + case *config.ModuleVariable: + return []string{fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)} + case *config.ResourceVariable: + id := v.ResourceId() + + // If we have a multi-reference (splat), then we depend on ALL + // resources with this type/name. + if v.Multi && v.Index == -1 { + return []string{fmt.Sprintf("%s.*", id)} + } + + // Otherwise, we depend on a specific index. + idx := v.Index + if !v.Multi || v.Index == -1 { + idx = 0 + } + + // Depend on the index, as well as "N" which represents the + // un-expanded set of resources. + return []string{fmt.Sprintf("%s.%d/%s.N", id, idx, id)} + case *config.UserVariable: + return []string{fmt.Sprintf("var.%s", v.Name)} + case *config.LocalVariable: + return []string{fmt.Sprintf("local.%s", v.Name)} + default: + return nil + } +} + +func modulePrefixStr(p []string) string { + // strip "root" + if len(p) > 0 && p[0] == rootModulePath[0] { + p = p[1:] + } + + parts := make([]string, 0, len(p)*2) + for _, p := range p { + parts = append(parts, "module", p) + } + + return strings.Join(parts, ".") +} + +func modulePrefixList(result []string, prefix string) []string { + if prefix != "" { + for i, v := range result { + result[i] = fmt.Sprintf("%s.%s", prefix, v) + } + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go new file mode 100644 index 00000000..2e05edba --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go @@ -0,0 +1,32 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/config/module" +) + +// RemoveModuleTransformer implements GraphTransformer to add nodes indicating +// when a module was removed from the configuration. +type RemovedModuleTransformer struct { + Module *module.Tree // root module + State *State +} + +func (t *RemovedModuleTransformer) Transform(g *Graph) error { + // nothing to remove if there's no state! + if t.State == nil { + return nil + } + + for _, m := range t.State.Modules { + c := t.Module.Child(m.Path[1:]) + if c != nil { + continue + } + + log.Printf("[DEBUG] module %s no longer in config\n", modulePrefixStr(m.Path)) + g.Add(&NodeModuleRemoved{PathValue: m.Path}) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource.go deleted file mode 100644 index 23a23f3c..00000000 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_resource.go +++ /dev/null @@ -1,953 +0,0 @@ -package terraform - -import ( - "fmt" - "strings" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/dag" -) - -// ResourceCountTransformer is a GraphTransformer that expands the count -// out for a specific resource. -type ResourceCountTransformer struct { - Resource *config.Resource - Destroy bool - Targets []ResourceAddress -} - -func (t *ResourceCountTransformer) Transform(g *Graph) error { - // Expand the resource count - count, err := t.Resource.Count() - if err != nil { - return err - } - - // Don't allow the count to be negative - if count < 0 { - return fmt.Errorf("negative count: %d", count) - } - - // For each count, build and add the node - nodes := make([]dag.Vertex, 0, count) - for i := 0; i < count; i++ { - // Set the index. If our count is 1 we special case it so that - // we handle the "resource.0" and "resource" boundary properly. - index := i - if count == 1 { - index = -1 - } - - // Save the node for later so we can do connections. Make the - // proper node depending on if we're just a destroy node or if - // were a regular node. - var node dag.Vertex = &graphNodeExpandedResource{ - Index: index, - Resource: t.Resource, - Path: g.Path, - } - if t.Destroy { - node = &graphNodeExpandedResourceDestroy{ - graphNodeExpandedResource: node.(*graphNodeExpandedResource), - } - } - - // Skip nodes if targeting excludes them - if !t.nodeIsTargeted(node) { - continue - } - - // Add the node now - nodes = append(nodes, node) - g.Add(node) - } - - // Make the dependency connections - for _, n := range nodes { - // Connect the dependents. We ignore the return value for missing - // dependents since that should've been caught at a higher level. - g.ConnectDependent(n) - } - - return nil -} - -func (t *ResourceCountTransformer) nodeIsTargeted(node dag.Vertex) bool { - // no targets specified, everything stays in the graph - if len(t.Targets) == 0 { - return true - } - addressable, ok := node.(GraphNodeAddressable) - if !ok { - return false - } - - addr := addressable.ResourceAddress() - for _, targetAddr := range t.Targets { - if targetAddr.Equals(addr) { - return true - } - } - return false -} - -type graphNodeExpandedResource struct { - Index int - Resource *config.Resource - Path []string -} - -func (n *graphNodeExpandedResource) Name() string { - if n.Index == -1 { - return n.Resource.Id() - } - - return fmt.Sprintf("%s #%d", n.Resource.Id(), n.Index) -} - -// GraphNodeAddressable impl. -func (n *graphNodeExpandedResource) ResourceAddress() *ResourceAddress { - // We want this to report the logical index properly, so we must undo the - // special case from the expand - index := n.Index - if index == -1 { - index = 0 - } - return &ResourceAddress{ - Path: n.Path[1:], - Index: index, - InstanceType: TypePrimary, - Name: n.Resource.Name, - Type: n.Resource.Type, - Mode: n.Resource.Mode, - } -} - -// graphNodeConfig impl. -func (n *graphNodeExpandedResource) ConfigType() GraphNodeConfigType { - return GraphNodeConfigTypeResource -} - -// GraphNodeDependable impl. -func (n *graphNodeExpandedResource) DependableName() []string { - return []string{ - n.Resource.Id(), - n.stateId(), - } -} - -// GraphNodeDependent impl. -func (n *graphNodeExpandedResource) DependentOn() []string { - configNode := &GraphNodeConfigResource{Resource: n.Resource} - result := configNode.DependentOn() - - // Walk the variables to find any count-specific variables we depend on. - configNode.VarWalk(func(v config.InterpolatedVariable) { - rv, ok := v.(*config.ResourceVariable) - if !ok { - return - } - - // We only want ourselves - if rv.ResourceId() != n.Resource.Id() { - return - } - - // If this isn't a multi-access (which shouldn't be allowed but - // is verified elsewhere), then we depend on the specific count - // of this resource, ignoring ourself (which again should be - // validated elsewhere). - if rv.Index > -1 { - id := fmt.Sprintf("%s.%d", rv.ResourceId(), rv.Index) - if id != n.stateId() && id != n.stateId()+".0" { - result = append(result, id) - } - } - }) - - return result -} - -// GraphNodeProviderConsumer -func (n *graphNodeExpandedResource) ProvidedBy() []string { - return []string{resourceProvider(n.Resource.Type, n.Resource.Provider)} -} - -func (n *graphNodeExpandedResource) StateDependencies() []string { - depsRaw := n.DependentOn() - deps := make([]string, 0, len(depsRaw)) - for _, d := range depsRaw { - // Ignore any variable dependencies - if strings.HasPrefix(d, "var.") { - continue - } - - // This is sad. The dependencies are currently in the format of - // "module.foo.bar" (the full field). This strips the field off. - if strings.HasPrefix(d, "module.") { - parts := strings.SplitN(d, ".", 3) - d = strings.Join(parts[0:2], ".") - } - deps = append(deps, d) - } - - return deps -} - -// GraphNodeEvalable impl. -func (n *graphNodeExpandedResource) EvalTree() EvalNode { - var provider ResourceProvider - var resourceConfig *ResourceConfig - - // Build the resource. If we aren't part of a multi-resource, then - // we still consider ourselves as count index zero. - index := n.Index - if index < 0 { - index = 0 - } - resource := &Resource{ - Name: n.Resource.Name, - Type: n.Resource.Type, - CountIndex: index, - } - - seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} - - // Validate the resource - vseq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} - vseq.Nodes = append(vseq.Nodes, &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }) - vseq.Nodes = append(vseq.Nodes, &EvalInterpolate{ - Config: n.Resource.RawConfig.Copy(), - Resource: resource, - Output: &resourceConfig, - }) - vseq.Nodes = append(vseq.Nodes, &EvalValidateResource{ - Provider: &provider, - Config: &resourceConfig, - ResourceName: n.Resource.Name, - ResourceType: n.Resource.Type, - ResourceMode: n.Resource.Mode, - }) - - // Validate all the provisioners - for _, p := range n.Resource.Provisioners { - var provisioner ResourceProvisioner - vseq.Nodes = append(vseq.Nodes, &EvalGetProvisioner{ - Name: p.Type, - Output: &provisioner, - }, &EvalInterpolate{ - Config: p.RawConfig.Copy(), - Resource: resource, - Output: &resourceConfig, - }, &EvalValidateProvisioner{ - Provisioner: &provisioner, - Config: &resourceConfig, - }) - } - - // Add the validation operations - seq.Nodes = append(seq.Nodes, &EvalOpFilter{ - Ops: []walkOperation{walkValidate}, - Node: vseq, - }) - - // Build instance info - info := n.instanceInfo() - seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info}) - - // Each resource mode has its own lifecycle - switch n.Resource.Mode { - case config.ManagedResourceMode: - seq.Nodes = append( - seq.Nodes, - n.managedResourceEvalNodes(resource, info, resourceConfig)..., - ) - case config.DataResourceMode: - seq.Nodes = append( - seq.Nodes, - n.dataResourceEvalNodes(resource, info, resourceConfig)..., - ) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Resource.Mode)) - } - - return seq -} - -func (n *graphNodeExpandedResource) managedResourceEvalNodes(resource *Resource, info *InstanceInfo, resourceConfig *ResourceConfig) []EvalNode { - var diff *InstanceDiff - var provider ResourceProvider - var state *InstanceState - - nodes := make([]EvalNode, 0, 5) - - // Refresh the resource - nodes = append(nodes, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalReadState{ - Name: n.stateId(), - Output: &state, - }, - &EvalRefresh{ - Info: info, - Provider: &provider, - State: &state, - Output: &state, - }, - &EvalWriteState{ - Name: n.stateId(), - ResourceType: n.Resource.Type, - Provider: n.Resource.Provider, - Dependencies: n.StateDependencies(), - State: &state, - }, - }, - }, - }) - - // Diff the resource - nodes = append(nodes, &EvalOpFilter{ - Ops: []walkOperation{walkPlan}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalInterpolate{ - Config: n.Resource.RawConfig.Copy(), - Resource: resource, - Output: &resourceConfig, - }, - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - // Re-run validation to catch any errors we missed, e.g. type - // mismatches on computed values. - &EvalValidateResource{ - Provider: &provider, - Config: &resourceConfig, - ResourceName: n.Resource.Name, - ResourceType: n.Resource.Type, - ResourceMode: n.Resource.Mode, - IgnoreWarnings: true, - }, - &EvalReadState{ - Name: n.stateId(), - Output: &state, - }, - &EvalDiff{ - Info: info, - Config: &resourceConfig, - Resource: n.Resource, - Provider: &provider, - State: &state, - OutputDiff: &diff, - OutputState: &state, - }, - &EvalCheckPreventDestroy{ - Resource: n.Resource, - Diff: &diff, - }, - &EvalWriteState{ - Name: n.stateId(), - ResourceType: n.Resource.Type, - Provider: n.Resource.Provider, - Dependencies: n.StateDependencies(), - State: &state, - }, - &EvalWriteDiff{ - Name: n.stateId(), - Diff: &diff, - }, - }, - }, - }) - - // Diff the resource for destruction - nodes = append(nodes, &EvalOpFilter{ - Ops: []walkOperation{walkPlanDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalReadState{ - Name: n.stateId(), - Output: &state, - }, - &EvalDiffDestroy{ - Info: info, - State: &state, - Output: &diff, - }, - &EvalCheckPreventDestroy{ - Resource: n.Resource, - Diff: &diff, - }, - &EvalWriteDiff{ - Name: n.stateId(), - Diff: &diff, - }, - }, - }, - }) - - // Apply - var diffApply *InstanceDiff - var err error - var createNew bool - var createBeforeDestroyEnabled bool - nodes = append(nodes, &EvalOpFilter{ - Ops: []walkOperation{walkApply, walkDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - // Get the saved diff for apply - &EvalReadDiff{ - Name: n.stateId(), - Diff: &diffApply, - }, - - // We don't want to do any destroys - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diffApply == nil { - return true, EvalEarlyExitError{} - } - - if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 { - return true, EvalEarlyExitError{} - } - - diffApply.SetDestroy(false) - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - destroy := false - if diffApply != nil { - destroy = diffApply.GetDestroy() || diffApply.RequiresNew() - } - - createBeforeDestroyEnabled = - n.Resource.Lifecycle.CreateBeforeDestroy && - destroy - - return createBeforeDestroyEnabled, nil - }, - Then: &EvalDeposeState{ - Name: n.stateId(), - }, - }, - - &EvalInterpolate{ - Config: n.Resource.RawConfig.Copy(), - Resource: resource, - Output: &resourceConfig, - }, - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalReadState{ - Name: n.stateId(), - Output: &state, - }, - // Re-run validation to catch any errors we missed, e.g. type - // mismatches on computed values. - &EvalValidateResource{ - Provider: &provider, - Config: &resourceConfig, - ResourceName: n.Resource.Name, - ResourceType: n.Resource.Type, - ResourceMode: n.Resource.Mode, - IgnoreWarnings: true, - }, - &EvalDiff{ - Info: info, - Config: &resourceConfig, - Resource: n.Resource, - Provider: &provider, - Diff: &diffApply, - State: &state, - OutputDiff: &diffApply, - }, - - // Get the saved diff - &EvalReadDiff{ - Name: n.stateId(), - Diff: &diff, - }, - - // Compare the diffs - &EvalCompareDiff{ - Info: info, - One: &diff, - Two: &diffApply, - }, - - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalReadState{ - Name: n.stateId(), - Output: &state, - }, - &EvalApply{ - Info: info, - State: &state, - Diff: &diffApply, - Provider: &provider, - Output: &state, - Error: &err, - CreateNew: &createNew, - }, - &EvalWriteState{ - Name: n.stateId(), - ResourceType: n.Resource.Type, - Provider: n.Resource.Provider, - Dependencies: n.StateDependencies(), - State: &state, - }, - &EvalApplyProvisioners{ - Info: info, - State: &state, - Resource: n.Resource, - InterpResource: resource, - CreateNew: &createNew, - Error: &err, - }, - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return createBeforeDestroyEnabled && err != nil, nil - }, - Then: &EvalUndeposeState{ - Name: n.stateId(), - State: &state, - }, - Else: &EvalWriteState{ - Name: n.stateId(), - ResourceType: n.Resource.Type, - Provider: n.Resource.Provider, - Dependencies: n.StateDependencies(), - State: &state, - }, - }, - - // We clear the diff out here so that future nodes - // don't see a diff that is already complete. There - // is no longer a diff! - &EvalWriteDiff{ - Name: n.stateId(), - Diff: nil, - }, - - &EvalApplyPost{ - Info: info, - State: &state, - Error: &err, - }, - &EvalUpdateStateHook{}, - }, - }, - }) - - return nodes -} - -func (n *graphNodeExpandedResource) dataResourceEvalNodes(resource *Resource, info *InstanceInfo, resourceConfig *ResourceConfig) []EvalNode { - //var diff *InstanceDiff - var provider ResourceProvider - var config *ResourceConfig - var diff *InstanceDiff - var state *InstanceState - - nodes := make([]EvalNode, 0, 5) - - // Refresh the resource - nodes = append(nodes, &EvalOpFilter{ - Ops: []walkOperation{walkRefresh}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - - // Always destroy the existing state first, since we must - // make sure that values from a previous read will not - // get interpolated if we end up needing to defer our - // loading until apply time. - &EvalWriteState{ - Name: n.stateId(), - ResourceType: n.Resource.Type, - Provider: n.Resource.Provider, - Dependencies: n.StateDependencies(), - State: &state, // state is nil here - }, - - &EvalInterpolate{ - Config: n.Resource.RawConfig.Copy(), - Resource: resource, - Output: &config, - }, - - // The rest of this pass can proceed only if there are no - // computed values in our config. - // (If there are, we'll deal with this during the plan and - // apply phases.) - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 { - return true, EvalEarlyExitError{} - } - - return true, nil - }, - Then: EvalNoop{}, - }, - - // The remainder of this pass is the same as running - // a "plan" pass immediately followed by an "apply" pass, - // populating the state early so it'll be available to - // provider configurations that need this data during - // refresh/plan. - - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - - &EvalReadDataDiff{ - Info: info, - Config: &config, - Provider: &provider, - Output: &diff, - OutputState: &state, - }, - - &EvalReadDataApply{ - Info: info, - Diff: &diff, - Provider: &provider, - Output: &state, - }, - - &EvalWriteState{ - Name: n.stateId(), - ResourceType: n.Resource.Type, - Provider: n.Resource.Provider, - Dependencies: n.StateDependencies(), - State: &state, - }, - - &EvalUpdateStateHook{}, - }, - }, - }) - - // Diff the resource - nodes = append(nodes, &EvalOpFilter{ - Ops: []walkOperation{walkPlan}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - - &EvalReadState{ - Name: n.stateId(), - Output: &state, - }, - - // We need to re-interpolate the config here because some - // of the attributes may have become computed during - // earlier planning, due to other resources having - // "requires new resource" diffs. - &EvalInterpolate{ - Config: n.Resource.RawConfig.Copy(), - Resource: resource, - Output: &config, - }, - - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0 - - // If the configuration is complete and we - // already have a state then we don't need to - // do any further work during apply, because we - // already populated the state during refresh. - if !computed && state != nil { - return true, EvalEarlyExitError{} - } - - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - - &EvalReadDataDiff{ - Info: info, - Config: &config, - Provider: &provider, - Output: &diff, - OutputState: &state, - }, - - &EvalWriteState{ - Name: n.stateId(), - ResourceType: n.Resource.Type, - Provider: n.Resource.Provider, - Dependencies: n.StateDependencies(), - State: &state, - }, - - &EvalWriteDiff{ - Name: n.stateId(), - Diff: &diff, - }, - }, - }, - }) - - // Diff the resource for destruction - nodes = append(nodes, &EvalOpFilter{ - Ops: []walkOperation{walkPlanDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - - &EvalReadState{ - Name: n.stateId(), - Output: &state, - }, - - // Since EvalDiffDestroy doesn't interact with the - // provider at all, we can safely share the same - // implementation for data vs. managed resources. - &EvalDiffDestroy{ - Info: info, - State: &state, - Output: &diff, - }, - - &EvalWriteDiff{ - Name: n.stateId(), - Diff: &diff, - }, - }, - }, - }) - - // Apply - nodes = append(nodes, &EvalOpFilter{ - Ops: []walkOperation{walkApply, walkDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - // Get the saved diff for apply - &EvalReadDiff{ - Name: n.stateId(), - Diff: &diff, - }, - - // Stop here if we don't actually have a diff - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diff == nil { - return true, EvalEarlyExitError{} - } - - if diff.GetAttributesLen() == 0 { - return true, EvalEarlyExitError{} - } - - return true, nil - }, - Then: EvalNoop{}, - }, - - // We need to re-interpolate the config here, rather than - // just using the diff's values directly, because we've - // potentially learned more variable values during the - // apply pass that weren't known when the diff was produced. - &EvalInterpolate{ - Config: n.Resource.RawConfig.Copy(), - Resource: resource, - Output: &config, - }, - - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - - // Make a new diff with our newly-interpolated config. - &EvalReadDataDiff{ - Info: info, - Config: &config, - Previous: &diff, - Provider: &provider, - Output: &diff, - }, - - &EvalReadDataApply{ - Info: info, - Diff: &diff, - Provider: &provider, - Output: &state, - }, - - &EvalWriteState{ - Name: n.stateId(), - ResourceType: n.Resource.Type, - Provider: n.Resource.Provider, - Dependencies: n.StateDependencies(), - State: &state, - }, - - // Clear the diff now that we've applied it, so - // later nodes won't see a diff that's now a no-op. - &EvalWriteDiff{ - Name: n.stateId(), - Diff: nil, - }, - - &EvalUpdateStateHook{}, - }, - }, - }) - - return nodes -} - -// instanceInfo is used for EvalTree. -func (n *graphNodeExpandedResource) instanceInfo() *InstanceInfo { - return &InstanceInfo{Id: n.stateId(), Type: n.Resource.Type} -} - -// stateId is the name used for the state key -func (n *graphNodeExpandedResource) stateId() string { - if n.Index == -1 { - return n.Resource.Id() - } - - return fmt.Sprintf("%s.%d", n.Resource.Id(), n.Index) -} - -// GraphNodeStateRepresentative impl. -func (n *graphNodeExpandedResource) StateId() []string { - return []string{n.stateId()} -} - -// graphNodeExpandedResourceDestroy represents an expanded resource that -// is to be destroyed. -type graphNodeExpandedResourceDestroy struct { - *graphNodeExpandedResource -} - -func (n *graphNodeExpandedResourceDestroy) Name() string { - return fmt.Sprintf("%s (destroy)", n.graphNodeExpandedResource.Name()) -} - -// graphNodeConfig impl. -func (n *graphNodeExpandedResourceDestroy) ConfigType() GraphNodeConfigType { - return GraphNodeConfigTypeResource -} - -// GraphNodeEvalable impl. -func (n *graphNodeExpandedResourceDestroy) EvalTree() EvalNode { - info := n.instanceInfo() - - var diffApply *InstanceDiff - var provider ResourceProvider - var state *InstanceState - var err error - return &EvalOpFilter{ - Ops: []walkOperation{walkApply, walkDestroy}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - // Get the saved diff for apply - &EvalReadDiff{ - Name: n.stateId(), - Diff: &diffApply, - }, - - // Filter the diff so we only get the destroy - &EvalFilterDiff{ - Diff: &diffApply, - Output: &diffApply, - Destroy: true, - }, - - // If we're not destroying, then compare diffs - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diffApply != nil && diffApply.GetDestroy() { - return true, nil - } - - return true, EvalEarlyExitError{} - }, - Then: EvalNoop{}, - }, - - // Load the instance info so we have the module path set - &EvalInstanceInfo{Info: info}, - - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalReadState{ - Name: n.stateId(), - Output: &state, - }, - &EvalRequireState{ - State: &state, - }, - // Make sure we handle data sources properly. - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if n.Resource.Mode == config.DataResourceMode { - return true, nil - } - - return false, nil - }, - - Then: &EvalReadDataApply{ - Info: info, - Diff: &diffApply, - Provider: &provider, - Output: &state, - }, - Else: &EvalApply{ - Info: info, - State: &state, - Diff: &diffApply, - Provider: &provider, - Output: &state, - Error: &err, - }, - }, - &EvalWriteState{ - Name: n.stateId(), - ResourceType: n.Resource.Type, - Provider: n.Resource.Provider, - Dependencies: n.StateDependencies(), - State: &state, - }, - &EvalApplyPost{ - Info: info, - State: &state, - Error: &err, - }, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go new file mode 100644 index 00000000..e528b37b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go @@ -0,0 +1,53 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/dag" +) + +// ResourceCountTransformer is a GraphTransformer that expands the count +// out for a specific resource. +// +// This assumes that the count is already interpolated. +type ResourceCountTransformer struct { + Concrete ConcreteResourceNodeFunc + + Count int + Addr *ResourceAddress +} + +func (t *ResourceCountTransformer) Transform(g *Graph) error { + // Don't allow the count to be negative + if t.Count < 0 { + return fmt.Errorf("negative count: %d", t.Count) + } + + // For each count, build and add the node + for i := 0; i < t.Count; i++ { + // Set the index. If our count is 1 we special case it so that + // we handle the "resource.0" and "resource" boundary properly. + index := i + if t.Count == 1 { + index = -1 + } + + // Build the resource address + addr := t.Addr.Copy() + addr.Index = index + + // Build the abstract node and the concrete one + abstract := &NodeAbstractResource{ + Addr: addr, + } + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + // Add it to the graph + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go index 7a422b82..aee053d1 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go @@ -36,7 +36,3 @@ type graphNodeRoot struct{} func (n graphNodeRoot) Name() string { return rootNodeName } - -func (n graphNodeRoot) Flatten(p []string) (dag.Vertex, error) { - return n, nil -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go new file mode 100644 index 00000000..471cd746 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go @@ -0,0 +1,65 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/dag" +) + +// StateTransformer is a GraphTransformer that adds the elements of +// the state to the graph. +// +// This transform is used for example by the DestroyPlanGraphBuilder to ensure +// that only resources that are in the state are represented in the graph. +type StateTransformer struct { + Concrete ConcreteResourceNodeFunc + + State *State +} + +func (t *StateTransformer) Transform(g *Graph) error { + // If the state is nil or empty (nil is empty) then do nothing + if t.State.Empty() { + return nil + } + + // Go through all the modules in the diff. + log.Printf("[TRACE] StateTransformer: starting") + var nodes []dag.Vertex + for _, ms := range t.State.Modules { + log.Printf("[TRACE] StateTransformer: Module: %v", ms.Path) + + // Go through all the resources in this module. + for name, rs := range ms.Resources { + log.Printf("[TRACE] StateTransformer: Resource %q: %#v", name, rs) + + // Add the resource to the graph + addr, err := parseResourceAddressInternal(name) + if err != nil { + panic(fmt.Sprintf( + "Error parsing internal name, this is a bug: %q", name)) + } + + // Very important: add the module path for this resource to + // the address. Remove "root" from it. + addr.Path = ms.Path[1:] + + // Add the resource to the graph + abstract := &NodeAbstractResource{Addr: addr} + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + nodes = append(nodes, node) + } + } + + // Add all the nodes to the graph + for _, n := range nodes { + g.Add(n) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go index 4e99badd..af6defe3 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go @@ -6,6 +6,30 @@ import ( "github.com/hashicorp/terraform/dag" ) +// GraphNodeTargetable is an interface for graph nodes to implement when they +// need to be told about incoming targets. This is useful for nodes that need +// to respect targets as they dynamically expand. Note that the list of targets +// provided will contain every target provided, and each implementing graph +// node must filter this list to targets considered relevant. +type GraphNodeTargetable interface { + SetTargets([]ResourceAddress) +} + +// GraphNodeTargetDownstream is an interface for graph nodes that need to +// be remain present under targeting if any of their dependencies are targeted. +// TargetDownstream is called with the set of vertices that are direct +// dependencies for the node, and it should return true if the node must remain +// in the graph in support of those dependencies. +// +// This is used in situations where the dependency edges are representing an +// ordering relationship but the dependency must still be visited if its +// dependencies are visited. This is true for outputs, for example, since +// they must get updated if any of their dependent resources get updated, +// which would not normally be true if one of their dependencies were targeted. +type GraphNodeTargetDownstream interface { + TargetDownstream(targeted, untargeted *dag.Set) bool +} + // TargetsTransformer is a GraphTransformer that, when the user specifies a // list of resources to target, limits the graph to only those resources and // their dependencies. @@ -17,6 +41,12 @@ type TargetsTransformer struct { // that already have the targets parsed ParsedTargets []ResourceAddress + // If set, the index portions of resource addresses will be ignored + // for comparison. This is used when transforming a graph where + // counted resources have not yet been expanded, since otherwise + // the unexpanded nodes (which never have indices) would not match. + IgnoreIndices bool + // Set to true when we're in a `terraform destroy` or a // `terraform plan -destroy` Destroy bool @@ -28,8 +58,10 @@ func (t *TargetsTransformer) Transform(g *Graph) error { if err != nil { return err } + t.ParsedTargets = addrs } + if len(t.ParsedTargets) > 0 { targetedNodes, err := t.selectTargetedNodes(g, t.ParsedTargets) if err != nil { @@ -38,18 +70,21 @@ func (t *TargetsTransformer) Transform(g *Graph) error { for _, v := range g.Vertices() { removable := false - if _, ok := v.(GraphNodeAddressable); ok { + if _, ok := v.(GraphNodeResource); ok { removable = true } + if vr, ok := v.(RemovableIfNotTargeted); ok { removable = vr.RemoveIfNotTargeted() } + if removable && !targetedNodes.Include(v) { log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) g.Remove(v) } } } + return nil } @@ -62,6 +97,7 @@ func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) { } addrs[i] = *ta } + return addrs, nil } @@ -71,7 +107,10 @@ func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) { func (t *TargetsTransformer) selectTargetedNodes( g *Graph, addrs []ResourceAddress) (*dag.Set, error) { targetedNodes := new(dag.Set) - for _, v := range g.Vertices() { + + vertices := g.Vertices() + + for _, v := range vertices { if t.nodeIsTarget(v, addrs) { targetedNodes.Add(v) @@ -98,21 +137,128 @@ func (t *TargetsTransformer) selectTargetedNodes( } } } - return targetedNodes, nil + return t.addDependencies(targetedNodes, g) +} + +func (t *TargetsTransformer) addDependencies(targetedNodes *dag.Set, g *Graph) (*dag.Set, error) { + // Handle nodes that need to be included if their dependencies are included. + // This requires multiple passes since we need to catch transitive + // dependencies if and only if they are via other nodes that also + // support TargetDownstream. For example: + // output -> output -> targeted-resource: both outputs need to be targeted + // output -> non-targeted-resource -> targeted-resource: output not targeted + // + // We'll keep looping until we stop targeting more nodes. + queue := targetedNodes.List() + for len(queue) > 0 { + vertices := queue + queue = nil // ready to append for next iteration if neccessary + for _, v := range vertices { + dependers := g.UpEdges(v) + if dependers == nil { + // indicates that there are no up edges for this node, so + // we have nothing to do here. + continue + } + + dependers = dependers.Filter(func(dv interface{}) bool { + _, ok := dv.(GraphNodeTargetDownstream) + return ok + }) + + if dependers.Len() == 0 { + continue + } + + for _, dv := range dependers.List() { + if targetedNodes.Include(dv) { + // Already present, so nothing to do + continue + } + + // We'll give the node some information about what it's + // depending on in case that informs its decision about whether + // it is safe to be targeted. + deps := g.DownEdges(v) + + depsTargeted := deps.Intersection(targetedNodes) + depsUntargeted := deps.Difference(depsTargeted) + + if dv.(GraphNodeTargetDownstream).TargetDownstream(depsTargeted, depsUntargeted) { + targetedNodes.Add(dv) + // Need to visit this node on the next pass to see if it + // has any transitive dependers. + queue = append(queue, dv) + } + } + } + } + + return targetedNodes.Filter(func(dv interface{}) bool { + return filterPartialOutputs(dv, targetedNodes, g) + }), nil +} + +// Outputs may have been included transitively, but if any of their +// dependencies have been pruned they won't be resolvable. +// If nothing depends on the output, and the output is missing any +// dependencies, remove it from the graph. +// This essentially maintains the previous behavior where interpolation in +// outputs would fail silently, but can now surface errors where the output +// is required. +func filterPartialOutputs(v interface{}, targetedNodes *dag.Set, g *Graph) bool { + // should this just be done with TargetDownstream? + if _, ok := v.(*NodeApplyableOutput); !ok { + return true + } + + dependers := g.UpEdges(v) + for _, d := range dependers.List() { + if _, ok := d.(*NodeCountBoundary); ok { + continue + } + + if !targetedNodes.Include(d) { + // this one is going to be removed, so it doesn't count + continue + } + + // as soon as we see a real dependency, we mark this as + // non-removable + return true + } + + depends := g.DownEdges(v) + + for _, d := range depends.List() { + if !targetedNodes.Include(d) { + log.Printf("[WARN] %s missing targeted dependency %s, removing from the graph", + dag.VertexName(v), dag.VertexName(d)) + return false + } + } + return true } func (t *TargetsTransformer) nodeIsTarget( v dag.Vertex, addrs []ResourceAddress) bool { - r, ok := v.(GraphNodeAddressable) + r, ok := v.(GraphNodeResource) if !ok { return false } - addr := r.ResourceAddress() + + addr := r.ResourceAddr() for _, targetAddr := range addrs { - if targetAddr.Equals(addr) { + if t.IgnoreIndices { + // targetAddr is not a pointer, so we can safely mutate it without + // interfering with references elsewhere. + targetAddr.Index = -1 + } + if targetAddr.Contains(addr) { return true } } + return false } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go new file mode 100644 index 00000000..b31e2c76 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go @@ -0,0 +1,40 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/config/module" +) + +// RootVariableTransformer is a GraphTransformer that adds all the root +// variables to the graph. +// +// Root variables are currently no-ops but they must be added to the +// graph since downstream things that depend on them must be able to +// reach them. +type RootVariableTransformer struct { + Module *module.Tree +} + +func (t *RootVariableTransformer) Transform(g *Graph) error { + // If no config, no variables + if t.Module == nil { + return nil + } + + // If we have no vars, we're done! + vars := t.Module.Config().Variables + if len(vars) == 0 { + return nil + } + + // Add all variables here + for _, v := range vars { + node := &NodeRootVariable{ + Config: v, + } + + // Add it! + g.Add(node) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go index 7852bc42..d828c921 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go @@ -1,13 +1,18 @@ package terraform +import "sync" + // MockUIOutput is an implementation of UIOutput that can be used for tests. type MockUIOutput struct { + sync.Mutex OutputCalled bool OutputMessage string OutputFn func(string) } func (o *MockUIOutput) Output(v string) { + o.Lock() + defer o.Unlock() o.OutputCalled = true o.OutputMessage = v if o.OutputFn != nil { diff --git a/vendor/github.com/hashicorp/terraform/terraform/user_agent.go b/vendor/github.com/hashicorp/terraform/terraform/user_agent.go new file mode 100644 index 00000000..a42613e8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/user_agent.go @@ -0,0 +1,13 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/httpclient" +) + +// Generate a UserAgent string +// +// Deprecated: Use httpclient.UserAgentString if you are setting your +// own User-Agent header. +func UserAgentString() string { + return httpclient.UserAgentString() +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/util.go b/vendor/github.com/hashicorp/terraform/terraform/util.go index d1ca197e..752241af 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/util.go +++ b/vendor/github.com/hashicorp/terraform/terraform/util.go @@ -1,7 +1,9 @@ package terraform import ( - "strings" + "sort" + + "github.com/hashicorp/terraform/config" ) // Semaphore is a wrapper around a channel to provide @@ -46,18 +48,8 @@ func (s Semaphore) Release() { } } -// resourceProvider returns the provider name for the given type. -func resourceProvider(t, alias string) string { - if alias != "" { - return alias - } - - idx := strings.IndexRune(t, '_') - if idx == -1 { - return "" - } - - return t[:idx] +func resourceProvider(resourceType, explicitProvider string) string { + return config.ResourceProviderFullName(resourceType, explicitProvider) } // strSliceContains checks if a given string is contained in a slice @@ -70,3 +62,20 @@ func strSliceContains(haystack []string, needle string) bool { } return false } + +// deduplicate a slice of strings +func uniqueStrings(s []string) []string { + if len(s) < 2 { + return s + } + + sort.Strings(s) + result := make([]string, 1, len(s)) + result[0] = s[0] + for i := 1; i < len(s); i++ { + if s[i] != result[len(result)-1] { + result = append(result, s[i]) + } + } + return result +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go index 95b607fd..300f2adb 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/variables.go +++ b/vendor/github.com/hashicorp/terraform/terraform/variables.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/helper/hilmapstructure" ) // Variables returns the fully loaded set of variables to use with @@ -89,7 +90,9 @@ func Variables( switch varType { case config.VariableTypeMap: - varSetMap(result, k, varVal) + if err := varSetMap(result, k, varVal); err != nil { + return nil, err + } default: result[k] = varVal } @@ -104,10 +107,27 @@ func Variables( } switch schema.Type() { + case config.VariableTypeList: + result[k] = v case config.VariableTypeMap: - varSetMap(result, k, v) + if err := varSetMap(result, k, v); err != nil { + return nil, err + } + case config.VariableTypeString: + // Convert to a string and set. We don't catch any errors + // here because the validation step later should catch + // any type errors. + var strVal string + if err := hilmapstructure.WeakDecode(v, &strVal); err == nil { + result[k] = strVal + } else { + result[k] = v + } default: - result[k] = v + panic(fmt.Sprintf( + "Unhandled var type: %T\n\n"+ + "THIS IS A BUG. Please report it.", + schema.Type())) } } } @@ -118,16 +138,16 @@ func Variables( // varSetMap sets or merges the map in "v" with the key "k" in the // "current" set of variables. This is just a private function to remove // duplicate logic in Variables -func varSetMap(current map[string]interface{}, k string, v interface{}) { +func varSetMap(current map[string]interface{}, k string, v interface{}) error { existing, ok := current[k] if !ok { current[k] = v - return + return nil } existingMap, ok := existing.(map[string]interface{}) if !ok { - panic(fmt.Sprintf("%s is not a map, this is a bug in Terraform.", k)) + panic(fmt.Sprintf("%q is not a map, this is a bug in Terraform.", k)) } switch typedV := v.(type) { @@ -140,6 +160,7 @@ func varSetMap(current map[string]interface{}, k string, v interface{}) { existingMap[newKey] = newVal } default: - panic(fmt.Sprintf("%s is not a map, this is a bug in Terraform.", k)) + return fmt.Errorf("variable %q should be type map, got %s", k, hclTypeName(v)) } + return nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go index ffd49bbb..ac730154 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/version.go +++ b/vendor/github.com/hashicorp/terraform/terraform/version.go @@ -1,31 +1,10 @@ package terraform import ( - "fmt" - - "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/version" ) -// The main version number that is being run at the moment. -const Version = "0.7.4" - -// A pre-release marker for the version. If this is "" (empty string) -// then it means that it is a final release. Otherwise, this is a pre-release -// such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" - -// SemVersion is an instance of version.Version. This has the secondary -// benefit of verifying during tests and init time that our version is a -// proper semantic version, which should always be the case. -var SemVersion = version.Must(version.NewVersion(Version)) - -// VersionHeader is the header name used to send the current terraform version -// in http requests. -const VersionHeader = "Terraform-Version" - +// TODO: update providers to use the version package directly func VersionString() string { - if VersionPrerelease != "" { - return fmt.Sprintf("%s-%s", Version, VersionPrerelease) - } - return Version + return version.String() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go new file mode 100644 index 00000000..1f430457 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/version_required.go @@ -0,0 +1,71 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/module" + + tfversion "github.com/hashicorp/terraform/version" +) + +// CheckRequiredVersion verifies that any version requirements specified by +// the configuration are met. +// +// This checks the root module as well as any additional version requirements +// from child modules. +// +// This is tested in context_test.go. +func CheckRequiredVersion(m *module.Tree) error { + // Check any children + for _, c := range m.Children() { + if err := CheckRequiredVersion(c); err != nil { + return err + } + } + + var tf *config.Terraform + if c := m.Config(); c != nil { + tf = c.Terraform + } + + // If there is no Terraform config or the required version isn't set, + // we move on. + if tf == nil || tf.RequiredVersion == "" { + return nil + } + + // Path for errors + module := "root" + if path := normalizeModulePath(m.Path()); len(path) > 1 { + module = modulePrefixStr(path) + } + + // Check this version requirement of this module + cs, err := version.NewConstraint(tf.RequiredVersion) + if err != nil { + return fmt.Errorf( + "%s: terraform.required_version %q syntax error: %s", + module, + tf.RequiredVersion, err) + } + + if !cs.Check(tfversion.SemVer) { + return fmt.Errorf( + "The currently running version of Terraform doesn't meet the\n"+ + "version requirements explicitly specified by the configuration.\n"+ + "Please use the required version or update the configuration.\n"+ + "Note that version requirements are usually set for a reason, so\n"+ + "we recommend verifying with whoever set the version requirements\n"+ + "prior to making any manual changes.\n\n"+ + " Module: %s\n"+ + " Required version: %s\n"+ + " Current version: %s", + module, + tf.RequiredVersion, + tfversion.SemVer) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go index 8fb33d7b..4cfc528e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go @@ -1,8 +1,8 @@ -// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT +// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT. package terraform -import "fmt" +import "strconv" const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport" @@ -10,7 +10,7 @@ var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96} func (i walkOperation) String() string { if i >= walkOperation(len(_walkOperation_index)-1) { - return fmt.Sprintf("walkOperation(%d)", i) + return "walkOperation(" + strconv.FormatInt(int64(i), 10) + ")" } return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go new file mode 100644 index 00000000..2c23f76a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go @@ -0,0 +1,26 @@ +package tfdiags + +type Diagnostic interface { + Severity() Severity + Description() Description + Source() Source +} + +type Severity rune + +//go:generate stringer -type=Severity + +const ( + Error Severity = 'E' + Warning Severity = 'W' +) + +type Description struct { + Summary string + Detail string +} + +type Source struct { + Subject *SourceRange + Context *SourceRange +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go new file mode 100644 index 00000000..667ba809 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go @@ -0,0 +1,181 @@ +package tfdiags + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl2/hcl" +) + +// Diagnostics is a list of diagnostics. Diagnostics is intended to be used +// where a Go "error" might normally be used, allowing richer information +// to be conveyed (more context, support for warnings). +// +// A nil Diagnostics is a valid, empty diagnostics list, thus allowing +// heap allocation to be avoided in the common case where there are no +// diagnostics to report at all. +type Diagnostics []Diagnostic + +// Append is the main interface for constructing Diagnostics lists, taking +// an existing list (which may be nil) and appending the new objects to it +// after normalizing them to be implementations of Diagnostic. +// +// The usual pattern for a function that natively "speaks" diagnostics is: +// +// // Create a nil Diagnostics at the start of the function +// var diags diag.Diagnostics +// +// // At later points, build on it if errors / warnings occur: +// foo, err := DoSomethingRisky() +// if err != nil { +// diags = diags.Append(err) +// } +// +// // Eventually return the result and diagnostics in place of error +// return result, diags +// +// Append accepts a variety of different diagnostic-like types, including +// native Go errors and HCL diagnostics. It also knows how to unwrap +// a multierror.Error into separate error diagnostics. It can be passed +// another Diagnostics to concatenate the two lists. If given something +// it cannot handle, this function will panic. +func (diags Diagnostics) Append(new ...interface{}) Diagnostics { + for _, item := range new { + if item == nil { + continue + } + + switch ti := item.(type) { + case Diagnostic: + diags = append(diags, ti) + case Diagnostics: + diags = append(diags, ti...) // flatten + case diagnosticsAsError: + diags = diags.Append(ti.Diagnostics) // unwrap + case hcl.Diagnostics: + for _, hclDiag := range ti { + diags = append(diags, hclDiagnostic{hclDiag}) + } + case *hcl.Diagnostic: + diags = append(diags, hclDiagnostic{ti}) + case *multierror.Error: + for _, err := range ti.Errors { + diags = append(diags, nativeError{err}) + } + case error: + switch { + case errwrap.ContainsType(ti, Diagnostics(nil)): + // If we have an errwrap wrapper with a Diagnostics hiding + // inside then we'll unpick it here to get access to the + // individual diagnostics. + diags = diags.Append(errwrap.GetType(ti, Diagnostics(nil))) + case errwrap.ContainsType(ti, hcl.Diagnostics(nil)): + // Likewise, if we have HCL diagnostics we'll unpick that too. + diags = diags.Append(errwrap.GetType(ti, hcl.Diagnostics(nil))) + default: + diags = append(diags, nativeError{ti}) + } + default: + panic(fmt.Errorf("can't construct diagnostic(s) from %T", item)) + } + } + + // Given the above, we should never end up with a non-nil empty slice + // here, but we'll make sure of that so callers can rely on empty == nil + if len(diags) == 0 { + return nil + } + + return diags +} + +// HasErrors returns true if any of the diagnostics in the list have +// a severity of Error. +func (diags Diagnostics) HasErrors() bool { + for _, diag := range diags { + if diag.Severity() == Error { + return true + } + } + return false +} + +// ForRPC returns a version of the receiver that has been simplified so that +// it is friendly to RPC protocols. +// +// Currently this means that it can be serialized with encoding/gob and +// subsequently re-inflated. It may later grow to include other serialization +// formats. +// +// Note that this loses information about the original objects used to +// construct the diagnostics, so e.g. the errwrap API will not work as +// expected on an error-wrapped Diagnostics that came from ForRPC. +func (diags Diagnostics) ForRPC() Diagnostics { + ret := make(Diagnostics, len(diags)) + for i := range diags { + ret[i] = makeRPCFriendlyDiag(diags[i]) + } + return ret +} + +// Err flattens a diagnostics list into a single Go error, or to nil +// if the diagnostics list does not include any error-level diagnostics. +// +// This can be used to smuggle diagnostics through an API that deals in +// native errors, but unfortunately it will lose naked warnings (warnings +// that aren't accompanied by at least one error) since such APIs have no +// mechanism through which to report these. +// +// return result, diags.Error() +func (diags Diagnostics) Err() error { + if !diags.HasErrors() { + return nil + } + return diagnosticsAsError{diags} +} + +type diagnosticsAsError struct { + Diagnostics +} + +func (dae diagnosticsAsError) Error() string { + diags := dae.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + for _, diag := range dae.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped +// diagnostics object can be picked apart by errwrap-aware code. +func (dae diagnosticsAsError) WrappedErrors() []error { + var errs []error + for _, diag := range dae.Diagnostics { + if wrapper, isErr := diag.(nativeError); isErr { + errs = append(errs, wrapper.err) + } + } + return errs +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/doc.go b/vendor/github.com/hashicorp/terraform/tfdiags/doc.go new file mode 100644 index 00000000..c427879e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/doc.go @@ -0,0 +1,16 @@ +// Package tfdiags is a utility package for representing errors and +// warnings in a manner that allows us to produce good messages for the +// user. +// +// "diag" is short for "diagnostics", and is meant as a general word for +// feedback to a user about potential or actual problems. +// +// A design goal for this package is for it to be able to provide rich +// messaging where possible but to also be pragmatic about dealing with +// generic errors produced by system components that _can't_ provide +// such rich messaging. As a consequence, the main types in this package -- +// Diagnostics and Diagnostic -- are designed so that they can be "smuggled" +// over an error channel and then be unpacked at the other end, so that +// error diagnostics (at least) can transit through APIs that are not +// aware of this package. +package tfdiags diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/error.go b/vendor/github.com/hashicorp/terraform/tfdiags/error.go new file mode 100644 index 00000000..35edc304 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/error.go @@ -0,0 +1,23 @@ +package tfdiags + +// nativeError is a Diagnostic implementation that wraps a normal Go error +type nativeError struct { + err error +} + +var _ Diagnostic = nativeError{} + +func (e nativeError) Severity() Severity { + return Error +} + +func (e nativeError) Description() Description { + return Description{ + Summary: e.err.Error(), + } +} + +func (e nativeError) Source() Source { + // No source information available for a native error + return Source{} +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go new file mode 100644 index 00000000..24851f4d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go @@ -0,0 +1,77 @@ +package tfdiags + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// hclDiagnostic is a Diagnostic implementation that wraps a HCL Diagnostic +type hclDiagnostic struct { + diag *hcl.Diagnostic +} + +var _ Diagnostic = hclDiagnostic{} + +func (d hclDiagnostic) Severity() Severity { + switch d.diag.Severity { + case hcl.DiagWarning: + return Warning + default: + return Error + } +} + +func (d hclDiagnostic) Description() Description { + return Description{ + Summary: d.diag.Summary, + Detail: d.diag.Detail, + } +} + +func (d hclDiagnostic) Source() Source { + var ret Source + if d.diag.Subject != nil { + rng := SourceRangeFromHCL(*d.diag.Subject) + ret.Subject = &rng + } + if d.diag.Context != nil { + rng := SourceRangeFromHCL(*d.diag.Context) + ret.Context = &rng + } + return ret +} + +// SourceRangeFromHCL constructs a SourceRange from the corresponding range +// type within the HCL package. +func SourceRangeFromHCL(hclRange hcl.Range) SourceRange { + return SourceRange{ + Filename: hclRange.Filename, + Start: SourcePos{ + Line: hclRange.Start.Line, + Column: hclRange.Start.Column, + Byte: hclRange.Start.Byte, + }, + End: SourcePos{ + Line: hclRange.End.Line, + Column: hclRange.End.Column, + Byte: hclRange.End.Byte, + }, + } +} + +// ToHCL constructs a HCL Range from the receiving SourceRange. This is the +// opposite of SourceRangeFromHCL. +func (r SourceRange) ToHCL() hcl.Range { + return hcl.Range{ + Filename: r.Filename, + Start: hcl.Pos{ + Line: r.Start.Line, + Column: r.Start.Column, + Byte: r.Start.Byte, + }, + End: hcl.Pos{ + Line: r.End.Line, + Column: r.End.Column, + Byte: r.End.Byte, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go new file mode 100644 index 00000000..6cc95cc2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go @@ -0,0 +1,53 @@ +package tfdiags + +import ( + "encoding/gob" +) + +type rpcFriendlyDiag struct { + Severity_ Severity + Summary_ string + Detail_ string + Subject_ *SourceRange + Context_ *SourceRange +} + +// rpcFriendlyDiag transforms a given diagnostic so that is more friendly to +// RPC. +// +// In particular, it currently returns an object that can be serialized and +// later re-inflated using gob. This definition may grow to include other +// serializations later. +func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { + desc := diag.Description() + source := diag.Source() + return &rpcFriendlyDiag{ + Severity_: diag.Severity(), + Summary_: desc.Summary, + Detail_: desc.Detail, + Subject_: source.Subject, + Context_: source.Context, + } +} + +func (d *rpcFriendlyDiag) Severity() Severity { + return d.Severity_ +} + +func (d *rpcFriendlyDiag) Description() Description { + return Description{ + Summary: d.Summary_, + Detail: d.Detail_, + } +} + +func (d *rpcFriendlyDiag) Source() Source { + return Source{ + Subject: d.Subject_, + Context: d.Context_, + } +} + +func init() { + gob.Register((*rpcFriendlyDiag)(nil)) +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go new file mode 100644 index 00000000..0b1249bb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go @@ -0,0 +1,21 @@ +// Code generated by "stringer -type=Severity"; DO NOT EDIT. + +package tfdiags + +import "strconv" + +const ( + _Severity_name_0 = "Error" + _Severity_name_1 = "Warning" +) + +func (i Severity) String() string { + switch { + case i == 69: + return _Severity_name_0 + case i == 87: + return _Severity_name_1 + default: + return "Severity(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go new file mode 100644 index 00000000..fb3ac989 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go @@ -0,0 +1,25 @@ +package tfdiags + +type simpleWarning string + +var _ Diagnostic = simpleWarning("") + +// SimpleWarning constructs a simple (summary-only) warning diagnostic. +func SimpleWarning(msg string) Diagnostic { + return simpleWarning(msg) +} + +func (e simpleWarning) Severity() Severity { + return Warning +} + +func (e simpleWarning) Description() Description { + return Description{ + Summary: string(e), + } +} + +func (e simpleWarning) Source() Source { + // No source information available for a native error + return Source{} +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go b/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go new file mode 100644 index 00000000..3031168d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go @@ -0,0 +1,35 @@ +package tfdiags + +import ( + "fmt" + "os" + "path/filepath" +) + +type SourceRange struct { + Filename string + Start, End SourcePos +} + +type SourcePos struct { + Line, Column, Byte int +} + +// StartString returns a string representation of the start of the range, +// including the filename and the line and column numbers. +func (r SourceRange) StartString() string { + filename := r.Filename + + // We'll try to relative-ize our filename here so it's less verbose + // in the common case of being in the current working directory. If not, + // we'll just show the full path. + wd, err := os.Getwd() + if err == nil { + relFn, err := filepath.Rel(wd, filename) + if err == nil { + filename = relFn + } + } + + return fmt.Sprintf("%s:%d,%d", filename, r.Start.Line, r.Start.Column) +} diff --git a/vendor/github.com/hashicorp/terraform/version/version.go b/vendor/github.com/hashicorp/terraform/version/version.go new file mode 100644 index 00000000..e3180820 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/version/version.go @@ -0,0 +1,36 @@ +// The version package provides a location to set the release versions for all +// packages to consume, without creating import cycles. +// +// This package should not import any other terraform packages. +package version + +import ( + "fmt" + + version "github.com/hashicorp/go-version" +) + +// The main version number that is being run at the moment. +const Version = "0.11.10" + +// A pre-release marker for the version. If this is "" (empty string) +// then it means that it is a final release. Otherwise, this is a pre-release +// such as "dev" (in development), "beta", "rc1", etc. +var Prerelease = "" + +// SemVer is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVer = version.Must(version.NewVersion(Version)) + +// Header is the header name used to send the current terraform version +// in http requests. +const Header = "Terraform-Version" + +// String returns the complete version string, including prerelease +func String() string { + if Prerelease != "" { + return fmt.Sprintf("%s-%s", Version, Prerelease) + } + return Version +} diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore new file mode 100644 index 00000000..531fcc11 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.gitignore @@ -0,0 +1,4 @@ +jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml new file mode 100644 index 00000000..1f980775 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml @@ -0,0 +1,9 @@ +language: go + +sudo: false + +go: + - 1.4 + +install: go get -v -t ./... +script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile index ad17bf00..a828d284 100644 --- a/vendor/github.com/jmespath/go-jmespath/Makefile +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -35,7 +35,7 @@ buildfuzz: go-fuzz-build github.com/jmespath/go-jmespath/fuzz fuzz: buildfuzz - go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/corpus + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata bench: go test -bench . -cpuprofile cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go index 8a3f2ef0..9b7cd89b 100644 --- a/vendor/github.com/jmespath/go-jmespath/functions.go +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "math" + "reflect" "sort" "strconv" "strings" @@ -124,197 +125,197 @@ type functionCaller struct { func newFunctionCaller() *functionCaller { caller := &functionCaller{} caller.functionTable = map[string]functionEntry{ - "length": functionEntry{ + "length": { name: "length", arguments: []argSpec{ - argSpec{types: []jpType{jpString, jpArray, jpObject}}, + {types: []jpType{jpString, jpArray, jpObject}}, }, handler: jpfLength, }, - "starts_with": functionEntry{ + "starts_with": { name: "starts_with", arguments: []argSpec{ - argSpec{types: []jpType{jpString}}, - argSpec{types: []jpType{jpString}}, + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, }, handler: jpfStartsWith, }, - "abs": functionEntry{ + "abs": { name: "abs", arguments: []argSpec{ - argSpec{types: []jpType{jpNumber}}, + {types: []jpType{jpNumber}}, }, handler: jpfAbs, }, - "avg": functionEntry{ + "avg": { name: "avg", arguments: []argSpec{ - argSpec{types: []jpType{jpArrayNumber}}, + {types: []jpType{jpArrayNumber}}, }, handler: jpfAvg, }, - "ceil": functionEntry{ + "ceil": { name: "ceil", arguments: []argSpec{ - argSpec{types: []jpType{jpNumber}}, + {types: []jpType{jpNumber}}, }, handler: jpfCeil, }, - "contains": functionEntry{ + "contains": { name: "contains", arguments: []argSpec{ - argSpec{types: []jpType{jpArray, jpString}}, - argSpec{types: []jpType{jpAny}}, + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, }, handler: jpfContains, }, - "ends_with": functionEntry{ + "ends_with": { name: "ends_with", arguments: []argSpec{ - argSpec{types: []jpType{jpString}}, - argSpec{types: []jpType{jpString}}, + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, }, handler: jpfEndsWith, }, - "floor": functionEntry{ + "floor": { name: "floor", arguments: []argSpec{ - argSpec{types: []jpType{jpNumber}}, + {types: []jpType{jpNumber}}, }, handler: jpfFloor, }, - "map": functionEntry{ + "map": { name: "amp", arguments: []argSpec{ - argSpec{types: []jpType{jpExpref}}, - argSpec{types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, }, handler: jpfMap, hasExpRef: true, }, - "max": functionEntry{ + "max": { name: "max", arguments: []argSpec{ - argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, + {types: []jpType{jpArrayNumber, jpArrayString}}, }, handler: jpfMax, }, - "merge": functionEntry{ + "merge": { name: "merge", arguments: []argSpec{ - argSpec{types: []jpType{jpObject}, variadic: true}, + {types: []jpType{jpObject}, variadic: true}, }, handler: jpfMerge, }, - "max_by": functionEntry{ + "max_by": { name: "max_by", arguments: []argSpec{ - argSpec{types: []jpType{jpArray}}, - argSpec{types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, }, handler: jpfMaxBy, hasExpRef: true, }, - "sum": functionEntry{ + "sum": { name: "sum", arguments: []argSpec{ - argSpec{types: []jpType{jpArrayNumber}}, + {types: []jpType{jpArrayNumber}}, }, handler: jpfSum, }, - "min": functionEntry{ + "min": { name: "min", arguments: []argSpec{ - argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, + {types: []jpType{jpArrayNumber, jpArrayString}}, }, handler: jpfMin, }, - "min_by": functionEntry{ + "min_by": { name: "min_by", arguments: []argSpec{ - argSpec{types: []jpType{jpArray}}, - argSpec{types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, }, handler: jpfMinBy, hasExpRef: true, }, - "type": functionEntry{ + "type": { name: "type", arguments: []argSpec{ - argSpec{types: []jpType{jpAny}}, + {types: []jpType{jpAny}}, }, handler: jpfType, }, - "keys": functionEntry{ + "keys": { name: "keys", arguments: []argSpec{ - argSpec{types: []jpType{jpObject}}, + {types: []jpType{jpObject}}, }, handler: jpfKeys, }, - "values": functionEntry{ + "values": { name: "values", arguments: []argSpec{ - argSpec{types: []jpType{jpObject}}, + {types: []jpType{jpObject}}, }, handler: jpfValues, }, - "sort": functionEntry{ + "sort": { name: "sort", arguments: []argSpec{ - argSpec{types: []jpType{jpArrayString, jpArrayNumber}}, + {types: []jpType{jpArrayString, jpArrayNumber}}, }, handler: jpfSort, }, - "sort_by": functionEntry{ + "sort_by": { name: "sort_by", arguments: []argSpec{ - argSpec{types: []jpType{jpArray}}, - argSpec{types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, }, handler: jpfSortBy, hasExpRef: true, }, - "join": functionEntry{ + "join": { name: "join", arguments: []argSpec{ - argSpec{types: []jpType{jpString}}, - argSpec{types: []jpType{jpArrayString}}, + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, }, handler: jpfJoin, }, - "reverse": functionEntry{ + "reverse": { name: "reverse", arguments: []argSpec{ - argSpec{types: []jpType{jpArray, jpString}}, + {types: []jpType{jpArray, jpString}}, }, handler: jpfReverse, }, - "to_array": functionEntry{ + "to_array": { name: "to_array", arguments: []argSpec{ - argSpec{types: []jpType{jpAny}}, + {types: []jpType{jpAny}}, }, handler: jpfToArray, }, - "to_string": functionEntry{ + "to_string": { name: "to_string", arguments: []argSpec{ - argSpec{types: []jpType{jpAny}}, + {types: []jpType{jpAny}}, }, handler: jpfToString, }, - "to_number": functionEntry{ + "to_number": { name: "to_number", arguments: []argSpec{ - argSpec{types: []jpType{jpAny}}, + {types: []jpType{jpAny}}, }, handler: jpfToNumber, }, - "not_null": functionEntry{ + "not_null": { name: "not_null", arguments: []argSpec{ - argSpec{types: []jpType{jpAny}, variadic: true}, + {types: []jpType{jpAny}, variadic: true}, }, handler: jpfNotNull, }, @@ -357,7 +358,7 @@ func (a *argSpec) typeCheck(arg interface{}) error { return nil } case jpArray: - if _, ok := arg.([]interface{}); ok { + if isSliceType(arg) { return nil } case jpObject: @@ -409,8 +410,9 @@ func jpfLength(arguments []interface{}) (interface{}, error) { arg := arguments[0] if c, ok := arg.(string); ok { return float64(utf8.RuneCountInString(c)), nil - } else if c, ok := arg.([]interface{}); ok { - return float64(len(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil } else if c, ok := arg.(map[string]interface{}); ok { return float64(len(c)), nil } diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go index c8f4bceb..1240a175 100644 --- a/vendor/github.com/jmespath/go-jmespath/parser.go +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -353,7 +353,7 @@ func (p *Parser) nud(token token) (ASTNode, error) { case tFlatten: left := ASTNode{ nodeType: ASTFlatten, - children: []ASTNode{ASTNode{nodeType: ASTIdentity}}, + children: []ASTNode{{nodeType: ASTIdentity}}, } right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) if err != nil { @@ -378,7 +378,7 @@ func (p *Parser) nud(token token) (ASTNode, error) { } return ASTNode{ nodeType: ASTProjection, - children: []ASTNode{ASTNode{nodeType: ASTIdentity}, right}, + children: []ASTNode{{nodeType: ASTIdentity}, right}, }, nil } else { return p.parseMultiSelectList() diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml new file mode 100644 index 00000000..5597e026 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - tip + +os: + - linux + - osx + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5 diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 00000000..65dc692b --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 00000000..1e69004b --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 00000000..17d4f90e --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go new file mode 100644 index 00000000..9584a988 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_appengine.go @@ -0,0 +1,15 @@ +// +build appengine + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on on appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 00000000..42f2514d --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go new file mode 100644 index 00000000..7384cf99 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go @@ -0,0 +1,18 @@ +// +build linux +// +build !appengine,!ppc64,!ppc64le + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go new file mode 100644 index 00000000..44e5d213 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go @@ -0,0 +1,19 @@ +// +build linux +// +build ppc64 ppc64le + +package isatty + +import ( + "unsafe" + + syscall "golang.org/x/sys/unix" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 00000000..9d8b4a59 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,10 @@ +// +build !windows +// +build !appengine + +package isatty + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 00000000..1f0c6bf5 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,16 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 00000000..af51cbca --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,94 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + fileNameInfo uintptr = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && token[0] != `\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + return false + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/mitchellh/cli/.travis.yml b/vendor/github.com/mitchellh/cli/.travis.yml new file mode 100644 index 00000000..974234b1 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/.travis.yml @@ -0,0 +1,13 @@ +sudo: false + +language: go + +go: + - 1.8 + - 1.9 + +branches: + only: + - master + +script: make updatedeps test testrace diff --git a/vendor/github.com/mitchellh/cli/LICENSE b/vendor/github.com/mitchellh/cli/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/mitchellh/cli/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/mitchellh/cli/Makefile b/vendor/github.com/mitchellh/cli/Makefile new file mode 100644 index 00000000..4874b008 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/Makefile @@ -0,0 +1,20 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code +test: + go list $(TEST) | xargs -n1 go test -timeout=60s -parallel=10 $(TESTARGS) + +# testrace runs the race checker +testrace: + go list $(TEST) | xargs -n1 go test -race $(TESTARGS) + +# updatedeps installs all the dependencies to run and build +updatedeps: + go list ./... \ + | xargs go list -f '{{ join .Deps "\n" }}{{ printf "\n" }}{{ join .TestImports "\n" }}' \ + | grep -v github.com/mitchellh/cli \ + | xargs go get -f -u -v + +.PHONY: test testrace updatedeps diff --git a/vendor/github.com/mitchellh/cli/README.md b/vendor/github.com/mitchellh/cli/README.md new file mode 100644 index 00000000..8f02cdd0 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/README.md @@ -0,0 +1,67 @@ +# Go CLI Library [![GoDoc](https://godoc.org/github.com/mitchellh/cli?status.png)](https://godoc.org/github.com/mitchellh/cli) + +cli is a library for implementing powerful command-line interfaces in Go. +cli is the library that powers the CLI for +[Packer](https://github.com/mitchellh/packer), +[Serf](https://github.com/hashicorp/serf), +[Consul](https://github.com/hashicorp/consul), +[Vault](https://github.com/hashicorp/vault), +[Terraform](https://github.com/hashicorp/terraform), and +[Nomad](https://github.com/hashicorp/nomad). + +## Features + +* Easy sub-command based CLIs: `cli foo`, `cli bar`, etc. + +* Support for nested subcommands such as `cli foo bar`. + +* Optional support for default subcommands so `cli` does something + other than error. + +* Support for shell autocompletion of subcommands, flags, and arguments + with callbacks in Go. You don't need to write any shell code. + +* Automatic help generation for listing subcommands + +* Automatic help flag recognition of `-h`, `--help`, etc. + +* Automatic version flag recognition of `-v`, `--version`. + +* Helpers for interacting with the terminal, such as outputting information, + asking for input, etc. These are optional, you can always interact with the + terminal however you choose. + +* Use of Go interfaces/types makes augmenting various parts of the library a + piece of cake. + +## Example + +Below is a simple example of creating and running a CLI + +```go +package main + +import ( + "log" + "os" + + "github.com/mitchellh/cli" +) + +func main() { + c := cli.NewCLI("app", "1.0.0") + c.Args = os.Args[1:] + c.Commands = map[string]cli.CommandFactory{ + "foo": fooCommandFactory, + "bar": barCommandFactory, + } + + exitStatus, err := c.Run() + if err != nil { + log.Println(err) + } + + os.Exit(exitStatus) +} +``` + diff --git a/vendor/github.com/mitchellh/cli/autocomplete.go b/vendor/github.com/mitchellh/cli/autocomplete.go new file mode 100644 index 00000000..3bec6258 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/autocomplete.go @@ -0,0 +1,43 @@ +package cli + +import ( + "github.com/posener/complete/cmd/install" +) + +// autocompleteInstaller is an interface to be implemented to perform the +// autocomplete installation and uninstallation with a CLI. +// +// This interface is not exported because it only exists for unit tests +// to be able to test that the installation is called properly. +type autocompleteInstaller interface { + Install(string) error + Uninstall(string) error +} + +// realAutocompleteInstaller uses the real install package to do the +// install/uninstall. +type realAutocompleteInstaller struct{} + +func (i *realAutocompleteInstaller) Install(cmd string) error { + return install.Install(cmd) +} + +func (i *realAutocompleteInstaller) Uninstall(cmd string) error { + return install.Uninstall(cmd) +} + +// mockAutocompleteInstaller is used for tests to record the install/uninstall. +type mockAutocompleteInstaller struct { + InstallCalled bool + UninstallCalled bool +} + +func (i *mockAutocompleteInstaller) Install(cmd string) error { + i.InstallCalled = true + return nil +} + +func (i *mockAutocompleteInstaller) Uninstall(cmd string) error { + i.UninstallCalled = true + return nil +} diff --git a/vendor/github.com/mitchellh/cli/cli.go b/vendor/github.com/mitchellh/cli/cli.go new file mode 100644 index 00000000..a25a5822 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/cli.go @@ -0,0 +1,715 @@ +package cli + +import ( + "fmt" + "io" + "os" + "regexp" + "sort" + "strings" + "sync" + "text/template" + + "github.com/armon/go-radix" + "github.com/posener/complete" +) + +// CLI contains the state necessary to run subcommands and parse the +// command line arguments. +// +// CLI also supports nested subcommands, such as "cli foo bar". To use +// nested subcommands, the key in the Commands mapping below contains the +// full subcommand. In this example, it would be "foo bar". +// +// If you use a CLI with nested subcommands, some semantics change due to +// ambiguities: +// +// * We use longest prefix matching to find a matching subcommand. This +// means if you register "foo bar" and the user executes "cli foo qux", +// the "foo" command will be executed with the arg "qux". It is up to +// you to handle these args. One option is to just return the special +// help return code `RunResultHelp` to display help and exit. +// +// * The help flag "-h" or "-help" will look at all args to determine +// the help function. For example: "otto apps list -h" will show the +// help for "apps list" but "otto apps -h" will show it for "apps". +// In the normal CLI, only the first subcommand is used. +// +// * The help flag will list any subcommands that a command takes +// as well as the command's help itself. If there are no subcommands, +// it will note this. If the CLI itself has no subcommands, this entire +// section is omitted. +// +// * Any parent commands that don't exist are automatically created as +// no-op commands that just show help for other subcommands. For example, +// if you only register "foo bar", then "foo" is automatically created. +// +type CLI struct { + // Args is the list of command-line arguments received excluding + // the name of the app. For example, if the command "./cli foo bar" + // was invoked, then Args should be []string{"foo", "bar"}. + Args []string + + // Commands is a mapping of subcommand names to a factory function + // for creating that Command implementation. If there is a command + // with a blank string "", then it will be used as the default command + // if no subcommand is specified. + // + // If the key has a space in it, this will create a nested subcommand. + // For example, if the key is "foo bar", then to access it our CLI + // must be accessed with "./cli foo bar". See the docs for CLI for + // notes on how this changes some other behavior of the CLI as well. + // + // The factory should be as cheap as possible, ideally only allocating + // a struct. The factory may be called multiple times in the course + // of a command execution and certain events such as help require the + // instantiation of all commands. Expensive initialization should be + // deferred to function calls within the interface implementation. + Commands map[string]CommandFactory + + // HiddenCommands is a list of commands that are "hidden". Hidden + // commands are not given to the help function callback and do not + // show up in autocomplete. The values in the slice should be equivalent + // to the keys in the command map. + HiddenCommands []string + + // Name defines the name of the CLI. + Name string + + // Version of the CLI. + Version string + + // Autocomplete enables or disables subcommand auto-completion support. + // This is enabled by default when NewCLI is called. Otherwise, this + // must enabled explicitly. + // + // Autocomplete requires the "Name" option to be set on CLI. This name + // should be set exactly to the binary name that is autocompleted. + // + // Autocompletion is supported via the github.com/posener/complete + // library. This library supports both bash and zsh. To add support + // for other shells, please see that library. + // + // AutocompleteInstall and AutocompleteUninstall are the global flag + // names for installing and uninstalling the autocompletion handlers + // for the user's shell. The flag should omit the hyphen(s) in front of + // the value. Both single and double hyphens will automatically be supported + // for the flag name. These default to `autocomplete-install` and + // `autocomplete-uninstall` respectively. + // + // AutocompleteNoDefaultFlags is a boolean which controls if the default auto- + // complete flags like -help and -version are added to the output. + // + // AutocompleteGlobalFlags are a mapping of global flags for + // autocompletion. The help and version flags are automatically added. + Autocomplete bool + AutocompleteInstall string + AutocompleteUninstall string + AutocompleteNoDefaultFlags bool + AutocompleteGlobalFlags complete.Flags + autocompleteInstaller autocompleteInstaller // For tests + + // HelpFunc and HelpWriter are used to output help information, if + // requested. + // + // HelpFunc is the function called to generate the generic help + // text that is shown if help must be shown for the CLI that doesn't + // pertain to a specific command. + // + // HelpWriter is the Writer where the help text is outputted to. If + // not specified, it will default to Stderr. + HelpFunc HelpFunc + HelpWriter io.Writer + + //--------------------------------------------------------------- + // Internal fields set automatically + + once sync.Once + autocomplete *complete.Complete + commandTree *radix.Tree + commandNested bool + commandHidden map[string]struct{} + subcommand string + subcommandArgs []string + topFlags []string + + // These are true when special global flags are set. We can/should + // probably use a bitset for this one day. + isHelp bool + isVersion bool + isAutocompleteInstall bool + isAutocompleteUninstall bool +} + +// NewClI returns a new CLI instance with sensible defaults. +func NewCLI(app, version string) *CLI { + return &CLI{ + Name: app, + Version: version, + HelpFunc: BasicHelpFunc(app), + Autocomplete: true, + } + +} + +// IsHelp returns whether or not the help flag is present within the +// arguments. +func (c *CLI) IsHelp() bool { + c.once.Do(c.init) + return c.isHelp +} + +// IsVersion returns whether or not the version flag is present within the +// arguments. +func (c *CLI) IsVersion() bool { + c.once.Do(c.init) + return c.isVersion +} + +// Run runs the actual CLI based on the arguments given. +func (c *CLI) Run() (int, error) { + c.once.Do(c.init) + + // If this is a autocompletion request, satisfy it. This must be called + // first before anything else since its possible to be autocompleting + // -help or -version or other flags and we want to show completions + // and not actually write the help or version. + if c.Autocomplete && c.autocomplete.Complete() { + return 0, nil + } + + // Just show the version and exit if instructed. + if c.IsVersion() && c.Version != "" { + c.HelpWriter.Write([]byte(c.Version + "\n")) + return 0, nil + } + + // Just print the help when only '-h' or '--help' is passed. + if c.IsHelp() && c.Subcommand() == "" { + c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.Subcommand())) + "\n")) + return 0, nil + } + + // If we're attempting to install or uninstall autocomplete then handle + if c.Autocomplete { + // Autocomplete requires the "Name" to be set so that we know what + // command to setup the autocomplete on. + if c.Name == "" { + return 1, fmt.Errorf( + "internal error: CLI.Name must be specified for autocomplete to work") + } + + // If both install and uninstall flags are specified, then error + if c.isAutocompleteInstall && c.isAutocompleteUninstall { + return 1, fmt.Errorf( + "Either the autocomplete install or uninstall flag may " + + "be specified, but not both.") + } + + // If the install flag is specified, perform the install or uninstall + if c.isAutocompleteInstall { + if err := c.autocompleteInstaller.Install(c.Name); err != nil { + return 1, err + } + + return 0, nil + } + + if c.isAutocompleteUninstall { + if err := c.autocompleteInstaller.Uninstall(c.Name); err != nil { + return 1, err + } + + return 0, nil + } + } + + // Attempt to get the factory function for creating the command + // implementation. If the command is invalid or blank, it is an error. + raw, ok := c.commandTree.Get(c.Subcommand()) + if !ok { + c.HelpWriter.Write([]byte(c.HelpFunc(c.helpCommands(c.subcommandParent())) + "\n")) + return 127, nil + } + + command, err := raw.(CommandFactory)() + if err != nil { + return 1, err + } + + // If we've been instructed to just print the help, then print it + if c.IsHelp() { + c.commandHelp(command) + return 0, nil + } + + // If there is an invalid flag, then error + if len(c.topFlags) > 0 { + c.HelpWriter.Write([]byte( + "Invalid flags before the subcommand. If these flags are for\n" + + "the subcommand, please put them after the subcommand.\n\n")) + c.commandHelp(command) + return 1, nil + } + + code := command.Run(c.SubcommandArgs()) + if code == RunResultHelp { + // Requesting help + c.commandHelp(command) + return 1, nil + } + + return code, nil +} + +// Subcommand returns the subcommand that the CLI would execute. For +// example, a CLI from "--version version --help" would return a Subcommand +// of "version" +func (c *CLI) Subcommand() string { + c.once.Do(c.init) + return c.subcommand +} + +// SubcommandArgs returns the arguments that will be passed to the +// subcommand. +func (c *CLI) SubcommandArgs() []string { + c.once.Do(c.init) + return c.subcommandArgs +} + +// subcommandParent returns the parent of this subcommand, if there is one. +// If there isn't on, "" is returned. +func (c *CLI) subcommandParent() string { + // Get the subcommand, if it is "" alread just return + sub := c.Subcommand() + if sub == "" { + return sub + } + + // Clear any trailing spaces and find the last space + sub = strings.TrimRight(sub, " ") + idx := strings.LastIndex(sub, " ") + + if idx == -1 { + // No space means our parent is root + return "" + } + + return sub[:idx] +} + +func (c *CLI) init() { + if c.HelpFunc == nil { + c.HelpFunc = BasicHelpFunc("app") + + if c.Name != "" { + c.HelpFunc = BasicHelpFunc(c.Name) + } + } + + if c.HelpWriter == nil { + c.HelpWriter = os.Stderr + } + + // Build our hidden commands + if len(c.HiddenCommands) > 0 { + c.commandHidden = make(map[string]struct{}) + for _, h := range c.HiddenCommands { + c.commandHidden[h] = struct{}{} + } + } + + // Build our command tree + c.commandTree = radix.New() + c.commandNested = false + for k, v := range c.Commands { + k = strings.TrimSpace(k) + c.commandTree.Insert(k, v) + if strings.ContainsRune(k, ' ') { + c.commandNested = true + } + } + + // Go through the key and fill in any missing parent commands + if c.commandNested { + var walkFn radix.WalkFn + toInsert := make(map[string]struct{}) + walkFn = func(k string, raw interface{}) bool { + idx := strings.LastIndex(k, " ") + if idx == -1 { + // If there is no space, just ignore top level commands + return false + } + + // Trim up to that space so we can get the expected parent + k = k[:idx] + if _, ok := c.commandTree.Get(k); ok { + // Yay we have the parent! + return false + } + + // We're missing the parent, so let's insert this + toInsert[k] = struct{}{} + + // Call the walk function recursively so we check this one too + return walkFn(k, nil) + } + + // Walk! + c.commandTree.Walk(walkFn) + + // Insert any that we're missing + for k := range toInsert { + var f CommandFactory = func() (Command, error) { + return &MockCommand{ + HelpText: "This command is accessed by using one of the subcommands below.", + RunResult: RunResultHelp, + }, nil + } + + c.commandTree.Insert(k, f) + } + } + + // Setup autocomplete if we have it enabled. We have to do this after + // the command tree is setup so we can use the radix tree to easily find + // all subcommands. + if c.Autocomplete { + c.initAutocomplete() + } + + // Process the args + c.processArgs() +} + +func (c *CLI) initAutocomplete() { + if c.AutocompleteInstall == "" { + c.AutocompleteInstall = defaultAutocompleteInstall + } + + if c.AutocompleteUninstall == "" { + c.AutocompleteUninstall = defaultAutocompleteUninstall + } + + if c.autocompleteInstaller == nil { + c.autocompleteInstaller = &realAutocompleteInstaller{} + } + + // Build the root command + cmd := c.initAutocompleteSub("") + + // For the root, we add the global flags to the "Flags". This way + // they don't show up on every command. + if !c.AutocompleteNoDefaultFlags { + cmd.Flags = map[string]complete.Predictor{ + "-" + c.AutocompleteInstall: complete.PredictNothing, + "-" + c.AutocompleteUninstall: complete.PredictNothing, + "-help": complete.PredictNothing, + "-version": complete.PredictNothing, + } + } + cmd.GlobalFlags = c.AutocompleteGlobalFlags + + c.autocomplete = complete.New(c.Name, cmd) +} + +// initAutocompleteSub creates the complete.Command for a subcommand with +// the given prefix. This will continue recursively for all subcommands. +// The prefix "" (empty string) can be used for the root command. +func (c *CLI) initAutocompleteSub(prefix string) complete.Command { + var cmd complete.Command + walkFn := func(k string, raw interface{}) bool { + // Keep track of the full key so that we can nest further if necessary + fullKey := k + + if len(prefix) > 0 { + // If we have a prefix, trim the prefix + 1 (for the space) + // Example: turns "sub one" to "one" with prefix "sub" + k = k[len(prefix)+1:] + } + + if idx := strings.Index(k, " "); idx >= 0 { + // If there is a space, we trim up to the space. This turns + // "sub sub2 sub3" into "sub". The prefix trim above will + // trim our current depth properly. + k = k[:idx] + } + + if _, ok := cmd.Sub[k]; ok { + // If we already tracked this subcommand then ignore + return false + } + + // If the command is hidden, don't record it at all + if _, ok := c.commandHidden[fullKey]; ok { + return false + } + + if cmd.Sub == nil { + cmd.Sub = complete.Commands(make(map[string]complete.Command)) + } + subCmd := c.initAutocompleteSub(fullKey) + + // Instantiate the command so that we can check if the command is + // a CommandAutocomplete implementation. If there is an error + // creating the command, we just ignore it since that will be caught + // later. + impl, err := raw.(CommandFactory)() + if err != nil { + impl = nil + } + + // Check if it implements ComandAutocomplete. If so, setup the autocomplete + if c, ok := impl.(CommandAutocomplete); ok { + subCmd.Args = c.AutocompleteArgs() + subCmd.Flags = c.AutocompleteFlags() + } + + cmd.Sub[k] = subCmd + return false + } + + walkPrefix := prefix + if walkPrefix != "" { + walkPrefix += " " + } + + c.commandTree.WalkPrefix(walkPrefix, walkFn) + return cmd +} + +func (c *CLI) commandHelp(command Command) { + // Get the template to use + tpl := strings.TrimSpace(defaultHelpTemplate) + if t, ok := command.(CommandHelpTemplate); ok { + tpl = t.HelpTemplate() + } + if !strings.HasSuffix(tpl, "\n") { + tpl += "\n" + } + + // Parse it + t, err := template.New("root").Parse(tpl) + if err != nil { + t = template.Must(template.New("root").Parse(fmt.Sprintf( + "Internal error! Failed to parse command help template: %s\n", err))) + } + + // Template data + data := map[string]interface{}{ + "Name": c.Name, + "Help": command.Help(), + } + + // Build subcommand list if we have it + var subcommandsTpl []map[string]interface{} + if c.commandNested { + // Get the matching keys + subcommands := c.helpCommands(c.Subcommand()) + keys := make([]string, 0, len(subcommands)) + for k := range subcommands { + keys = append(keys, k) + } + + // Sort the keys + sort.Strings(keys) + + // Figure out the padding length + var longest int + for _, k := range keys { + if v := len(k); v > longest { + longest = v + } + } + + // Go through and create their structures + subcommandsTpl = make([]map[string]interface{}, 0, len(subcommands)) + for _, k := range keys { + // Get the command + raw, ok := subcommands[k] + if !ok { + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Error getting subcommand %q", k))) + } + sub, err := raw() + if err != nil { + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Error instantiating %q: %s", k, err))) + } + + // Find the last space and make sure we only include that last part + name := k + if idx := strings.LastIndex(k, " "); idx > -1 { + name = name[idx+1:] + } + + subcommandsTpl = append(subcommandsTpl, map[string]interface{}{ + "Name": name, + "NameAligned": name + strings.Repeat(" ", longest-len(k)), + "Help": sub.Help(), + "Synopsis": sub.Synopsis(), + }) + } + } + data["Subcommands"] = subcommandsTpl + + // Write + err = t.Execute(c.HelpWriter, data) + if err == nil { + return + } + + // An error, just output... + c.HelpWriter.Write([]byte(fmt.Sprintf( + "Internal error rendering help: %s", err))) +} + +// helpCommands returns the subcommands for the HelpFunc argument. +// This will only contain immediate subcommands. +func (c *CLI) helpCommands(prefix string) map[string]CommandFactory { + // If our prefix isn't empty, make sure it ends in ' ' + if prefix != "" && prefix[len(prefix)-1] != ' ' { + prefix += " " + } + + // Get all the subkeys of this command + var keys []string + c.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool { + // Ignore any sub-sub keys, i.e. "foo bar baz" when we want "foo bar" + if !strings.Contains(k[len(prefix):], " ") { + keys = append(keys, k) + } + + return false + }) + + // For each of the keys return that in the map + result := make(map[string]CommandFactory, len(keys)) + for _, k := range keys { + raw, ok := c.commandTree.Get(k) + if !ok { + // We just got it via WalkPrefix above, so we just panic + panic("not found: " + k) + } + + // If this is a hidden command, don't show it + if _, ok := c.commandHidden[k]; ok { + continue + } + + result[k] = raw.(CommandFactory) + } + + return result +} + +func (c *CLI) processArgs() { + for i, arg := range c.Args { + if arg == "--" { + break + } + + // Check for help flags. + if arg == "-h" || arg == "-help" || arg == "--help" { + c.isHelp = true + continue + } + + // Check for autocomplete flags + if c.Autocomplete { + if arg == "-"+c.AutocompleteInstall || arg == "--"+c.AutocompleteInstall { + c.isAutocompleteInstall = true + continue + } + + if arg == "-"+c.AutocompleteUninstall || arg == "--"+c.AutocompleteUninstall { + c.isAutocompleteUninstall = true + continue + } + } + + if c.subcommand == "" { + // Check for version flags if not in a subcommand. + if arg == "-v" || arg == "-version" || arg == "--version" { + c.isVersion = true + continue + } + + if arg != "" && arg[0] == '-' { + // Record the arg... + c.topFlags = append(c.topFlags, arg) + } + } + + // If we didn't find a subcommand yet and this is the first non-flag + // argument, then this is our subcommand. + if c.subcommand == "" && arg != "" && arg[0] != '-' { + c.subcommand = arg + if c.commandNested { + // If the command has a space in it, then it is invalid. + // Set a blank command so that it fails. + if strings.ContainsRune(arg, ' ') { + c.subcommand = "" + return + } + + // Determine the argument we look to to end subcommands. + // We look at all arguments until one has a space. This + // disallows commands like: ./cli foo "bar baz". An argument + // with a space is always an argument. + j := 0 + for k, v := range c.Args[i:] { + if strings.ContainsRune(v, ' ') { + break + } + + j = i + k + 1 + } + + // Nested CLI, the subcommand is actually the entire + // arg list up to a flag that is still a valid subcommand. + searchKey := strings.Join(c.Args[i:j], " ") + k, _, ok := c.commandTree.LongestPrefix(searchKey) + if ok { + // k could be a prefix that doesn't contain the full + // command such as "foo" instead of "foobar", so we + // need to verify that we have an entire key. To do that, + // we look for an ending in a space or an end of string. + reVerify := regexp.MustCompile(regexp.QuoteMeta(k) + `( |$)`) + if reVerify.MatchString(searchKey) { + c.subcommand = k + i += strings.Count(k, " ") + } + } + } + + // The remaining args the subcommand arguments + c.subcommandArgs = c.Args[i+1:] + } + } + + // If we never found a subcommand and support a default command, then + // switch to using that. + if c.subcommand == "" { + if _, ok := c.Commands[""]; ok { + args := c.topFlags + args = append(args, c.subcommandArgs...) + c.topFlags = nil + c.subcommandArgs = args + } + } +} + +// defaultAutocompleteInstall and defaultAutocompleteUninstall are the +// default values for the autocomplete install and uninstall flags. +const defaultAutocompleteInstall = "autocomplete-install" +const defaultAutocompleteUninstall = "autocomplete-uninstall" + +const defaultHelpTemplate = ` +{{.Help}}{{if gt (len .Subcommands) 0}} + +Subcommands: +{{- range $value := .Subcommands }} + {{ $value.NameAligned }} {{ $value.Synopsis }}{{ end }} +{{- end }} +` diff --git a/vendor/github.com/mitchellh/cli/command.go b/vendor/github.com/mitchellh/cli/command.go new file mode 100644 index 00000000..bed11faf --- /dev/null +++ b/vendor/github.com/mitchellh/cli/command.go @@ -0,0 +1,67 @@ +package cli + +import ( + "github.com/posener/complete" +) + +const ( + // RunResultHelp is a value that can be returned from Run to signal + // to the CLI to render the help output. + RunResultHelp = -18511 +) + +// A command is a runnable sub-command of a CLI. +type Command interface { + // Help should return long-form help text that includes the command-line + // usage, a brief few sentences explaining the function of the command, + // and the complete list of flags the command accepts. + Help() string + + // Run should run the actual command with the given CLI instance and + // command-line arguments. It should return the exit status when it is + // finished. + // + // There are a handful of special exit codes this can return documented + // above that change behavior. + Run(args []string) int + + // Synopsis should return a one-line, short synopsis of the command. + // This should be less than 50 characters ideally. + Synopsis() string +} + +// CommandAutocomplete is an extension of Command that enables fine-grained +// autocompletion. Subcommand autocompletion will work even if this interface +// is not implemented. By implementing this interface, more advanced +// autocompletion is enabled. +type CommandAutocomplete interface { + // AutocompleteArgs returns the argument predictor for this command. + // If argument completion is not supported, this should return + // complete.PredictNothing. + AutocompleteArgs() complete.Predictor + + // AutocompleteFlags returns a mapping of supported flags and autocomplete + // options for this command. The map key for the Flags map should be the + // complete flag such as "-foo" or "--foo". + AutocompleteFlags() complete.Flags +} + +// CommandHelpTemplate is an extension of Command that also has a function +// for returning a template for the help rather than the help itself. In +// this scenario, both Help and HelpTemplate should be implemented. +// +// If CommandHelpTemplate isn't implemented, the Help is output as-is. +type CommandHelpTemplate interface { + // HelpTemplate is the template in text/template format to use for + // displaying the Help. The keys available are: + // + // * ".Help" - The help text itself + // * ".Subcommands" + // + HelpTemplate() string +} + +// CommandFactory is a type of function that is a factory for commands. +// We need a factory because we may need to setup some state on the +// struct that implements the command itself. +type CommandFactory func() (Command, error) diff --git a/vendor/github.com/mitchellh/cli/command_mock.go b/vendor/github.com/mitchellh/cli/command_mock.go new file mode 100644 index 00000000..7a584b7e --- /dev/null +++ b/vendor/github.com/mitchellh/cli/command_mock.go @@ -0,0 +1,63 @@ +package cli + +import ( + "github.com/posener/complete" +) + +// MockCommand is an implementation of Command that can be used for tests. +// It is publicly exported from this package in case you want to use it +// externally. +type MockCommand struct { + // Settable + HelpText string + RunResult int + SynopsisText string + + // Set by the command + RunCalled bool + RunArgs []string +} + +func (c *MockCommand) Help() string { + return c.HelpText +} + +func (c *MockCommand) Run(args []string) int { + c.RunCalled = true + c.RunArgs = args + + return c.RunResult +} + +func (c *MockCommand) Synopsis() string { + return c.SynopsisText +} + +// MockCommandAutocomplete is an implementation of CommandAutocomplete. +type MockCommandAutocomplete struct { + MockCommand + + // Settable + AutocompleteArgsValue complete.Predictor + AutocompleteFlagsValue complete.Flags +} + +func (c *MockCommandAutocomplete) AutocompleteArgs() complete.Predictor { + return c.AutocompleteArgsValue +} + +func (c *MockCommandAutocomplete) AutocompleteFlags() complete.Flags { + return c.AutocompleteFlagsValue +} + +// MockCommandHelpTemplate is an implementation of CommandHelpTemplate. +type MockCommandHelpTemplate struct { + MockCommand + + // Settable + HelpTemplateText string +} + +func (c *MockCommandHelpTemplate) HelpTemplate() string { + return c.HelpTemplateText +} diff --git a/vendor/github.com/mitchellh/cli/help.go b/vendor/github.com/mitchellh/cli/help.go new file mode 100644 index 00000000..f5ca58f5 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/help.go @@ -0,0 +1,79 @@ +package cli + +import ( + "bytes" + "fmt" + "log" + "sort" + "strings" +) + +// HelpFunc is the type of the function that is responsible for generating +// the help output when the CLI must show the general help text. +type HelpFunc func(map[string]CommandFactory) string + +// BasicHelpFunc generates some basic help output that is usually good enough +// for most CLI applications. +func BasicHelpFunc(app string) HelpFunc { + return func(commands map[string]CommandFactory) string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf( + "Usage: %s [--version] [--help] []\n\n", + app)) + buf.WriteString("Available commands are:\n") + + // Get the list of keys so we can sort them, and also get the maximum + // key length so they can be aligned properly. + keys := make([]string, 0, len(commands)) + maxKeyLen := 0 + for key := range commands { + if len(key) > maxKeyLen { + maxKeyLen = len(key) + } + + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + commandFunc, ok := commands[key] + if !ok { + // This should never happen since we JUST built the list of + // keys. + panic("command not found: " + key) + } + + command, err := commandFunc() + if err != nil { + log.Printf("[ERR] cli: Command '%s' failed to load: %s", + key, err) + continue + } + + key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key))) + buf.WriteString(fmt.Sprintf(" %s %s\n", key, command.Synopsis())) + } + + return buf.String() + } +} + +// FilteredHelpFunc will filter the commands to only include the keys +// in the include parameter. +func FilteredHelpFunc(include []string, f HelpFunc) HelpFunc { + return func(commands map[string]CommandFactory) string { + set := make(map[string]struct{}) + for _, k := range include { + set[k] = struct{}{} + } + + filtered := make(map[string]CommandFactory) + for k, f := range commands { + if _, ok := set[k]; ok { + filtered[k] = f + } + } + + return f(filtered) + } +} diff --git a/vendor/github.com/mitchellh/cli/ui.go b/vendor/github.com/mitchellh/cli/ui.go new file mode 100644 index 00000000..a2d6f94f --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui.go @@ -0,0 +1,187 @@ +package cli + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "os/signal" + "strings" + + "github.com/bgentry/speakeasy" + "github.com/mattn/go-isatty" +) + +// Ui is an interface for interacting with the terminal, or "interface" +// of a CLI. This abstraction doesn't have to be used, but helps provide +// a simple, layerable way to manage user interactions. +type Ui interface { + // Ask asks the user for input using the given query. The response is + // returned as the given string, or an error. + Ask(string) (string, error) + + // AskSecret asks the user for input using the given query, but does not echo + // the keystrokes to the terminal. + AskSecret(string) (string, error) + + // Output is called for normal standard output. + Output(string) + + // Info is called for information related to the previous output. + // In general this may be the exact same as Output, but this gives + // Ui implementors some flexibility with output formats. + Info(string) + + // Error is used for any error messages that might appear on standard + // error. + Error(string) + + // Warn is used for any warning messages that might appear on standard + // error. + Warn(string) +} + +// BasicUi is an implementation of Ui that just outputs to the given +// writer. This UI is not threadsafe by default, but you can wrap it +// in a ConcurrentUi to make it safe. +type BasicUi struct { + Reader io.Reader + Writer io.Writer + ErrorWriter io.Writer +} + +func (u *BasicUi) Ask(query string) (string, error) { + return u.ask(query, false) +} + +func (u *BasicUi) AskSecret(query string) (string, error) { + return u.ask(query, true) +} + +func (u *BasicUi) ask(query string, secret bool) (string, error) { + if _, err := fmt.Fprint(u.Writer, query+" "); err != nil { + return "", err + } + + // Register for interrupts so that we can catch it and immediately + // return... + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt) + defer signal.Stop(sigCh) + + // Ask for input in a go-routine so that we can ignore it. + errCh := make(chan error, 1) + lineCh := make(chan string, 1) + go func() { + var line string + var err error + if secret && isatty.IsTerminal(os.Stdin.Fd()) { + line, err = speakeasy.Ask("") + } else { + r := bufio.NewReader(u.Reader) + line, err = r.ReadString('\n') + } + if err != nil { + errCh <- err + return + } + + lineCh <- strings.TrimRight(line, "\r\n") + }() + + select { + case err := <-errCh: + return "", err + case line := <-lineCh: + return line, nil + case <-sigCh: + // Print a newline so that any further output starts properly + // on a new line. + fmt.Fprintln(u.Writer) + + return "", errors.New("interrupted") + } +} + +func (u *BasicUi) Error(message string) { + w := u.Writer + if u.ErrorWriter != nil { + w = u.ErrorWriter + } + + fmt.Fprint(w, message) + fmt.Fprint(w, "\n") +} + +func (u *BasicUi) Info(message string) { + u.Output(message) +} + +func (u *BasicUi) Output(message string) { + fmt.Fprint(u.Writer, message) + fmt.Fprint(u.Writer, "\n") +} + +func (u *BasicUi) Warn(message string) { + u.Error(message) +} + +// PrefixedUi is an implementation of Ui that prefixes messages. +type PrefixedUi struct { + AskPrefix string + AskSecretPrefix string + OutputPrefix string + InfoPrefix string + ErrorPrefix string + WarnPrefix string + Ui Ui +} + +func (u *PrefixedUi) Ask(query string) (string, error) { + if query != "" { + query = fmt.Sprintf("%s%s", u.AskPrefix, query) + } + + return u.Ui.Ask(query) +} + +func (u *PrefixedUi) AskSecret(query string) (string, error) { + if query != "" { + query = fmt.Sprintf("%s%s", u.AskSecretPrefix, query) + } + + return u.Ui.AskSecret(query) +} + +func (u *PrefixedUi) Error(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.ErrorPrefix, message) + } + + u.Ui.Error(message) +} + +func (u *PrefixedUi) Info(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.InfoPrefix, message) + } + + u.Ui.Info(message) +} + +func (u *PrefixedUi) Output(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.OutputPrefix, message) + } + + u.Ui.Output(message) +} + +func (u *PrefixedUi) Warn(message string) { + if message != "" { + message = fmt.Sprintf("%s%s", u.WarnPrefix, message) + } + + u.Ui.Warn(message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_colored.go b/vendor/github.com/mitchellh/cli/ui_colored.go new file mode 100644 index 00000000..e3d5131d --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_colored.go @@ -0,0 +1,69 @@ +package cli + +import ( + "fmt" +) + +// UiColor is a posix shell color code to use. +type UiColor struct { + Code int + Bold bool +} + +// A list of colors that are useful. These are all non-bolded by default. +var ( + UiColorNone UiColor = UiColor{-1, false} + UiColorRed = UiColor{31, false} + UiColorGreen = UiColor{32, false} + UiColorYellow = UiColor{33, false} + UiColorBlue = UiColor{34, false} + UiColorMagenta = UiColor{35, false} + UiColorCyan = UiColor{36, false} +) + +// ColoredUi is a Ui implementation that colors its output according +// to the given color schemes for the given type of output. +type ColoredUi struct { + OutputColor UiColor + InfoColor UiColor + ErrorColor UiColor + WarnColor UiColor + Ui Ui +} + +func (u *ColoredUi) Ask(query string) (string, error) { + return u.Ui.Ask(u.colorize(query, u.OutputColor)) +} + +func (u *ColoredUi) AskSecret(query string) (string, error) { + return u.Ui.AskSecret(u.colorize(query, u.OutputColor)) +} + +func (u *ColoredUi) Output(message string) { + u.Ui.Output(u.colorize(message, u.OutputColor)) +} + +func (u *ColoredUi) Info(message string) { + u.Ui.Info(u.colorize(message, u.InfoColor)) +} + +func (u *ColoredUi) Error(message string) { + u.Ui.Error(u.colorize(message, u.ErrorColor)) +} + +func (u *ColoredUi) Warn(message string) { + u.Ui.Warn(u.colorize(message, u.WarnColor)) +} + +func (u *ColoredUi) colorize(message string, color UiColor) string { + if color.Code == -1 { + return message + } + + attr := 0 + if color.Bold { + attr = 1 + } + + return fmt.Sprintf("\033[%d;%dm%s\033[0m", attr, color.Code, message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_concurrent.go b/vendor/github.com/mitchellh/cli/ui_concurrent.go new file mode 100644 index 00000000..b4f4dbfa --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_concurrent.go @@ -0,0 +1,54 @@ +package cli + +import ( + "sync" +) + +// ConcurrentUi is a wrapper around a Ui interface (and implements that +// interface) making the underlying Ui concurrency safe. +type ConcurrentUi struct { + Ui Ui + l sync.Mutex +} + +func (u *ConcurrentUi) Ask(query string) (string, error) { + u.l.Lock() + defer u.l.Unlock() + + return u.Ui.Ask(query) +} + +func (u *ConcurrentUi) AskSecret(query string) (string, error) { + u.l.Lock() + defer u.l.Unlock() + + return u.Ui.AskSecret(query) +} + +func (u *ConcurrentUi) Error(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Error(message) +} + +func (u *ConcurrentUi) Info(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Info(message) +} + +func (u *ConcurrentUi) Output(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Output(message) +} + +func (u *ConcurrentUi) Warn(message string) { + u.l.Lock() + defer u.l.Unlock() + + u.Ui.Warn(message) +} diff --git a/vendor/github.com/mitchellh/cli/ui_mock.go b/vendor/github.com/mitchellh/cli/ui_mock.go new file mode 100644 index 00000000..0bfe0a19 --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_mock.go @@ -0,0 +1,111 @@ +package cli + +import ( + "bytes" + "fmt" + "io" + "sync" +) + +// NewMockUi returns a fully initialized MockUi instance +// which is safe for concurrent use. +func NewMockUi() *MockUi { + m := new(MockUi) + m.once.Do(m.init) + return m +} + +// MockUi is a mock UI that is used for tests and is exported publicly +// for use in external tests if needed as well. Do not instantite this +// directly since the buffers will be initialized on the first write. If +// there is no write then you will get a nil panic. Please use the +// NewMockUi() constructor function instead. You can fix your code with +// +// sed -i -e 's/new(cli.MockUi)/cli.NewMockUi()/g' *_test.go +type MockUi struct { + InputReader io.Reader + ErrorWriter *syncBuffer + OutputWriter *syncBuffer + + once sync.Once +} + +func (u *MockUi) Ask(query string) (string, error) { + u.once.Do(u.init) + + var result string + fmt.Fprint(u.OutputWriter, query) + if _, err := fmt.Fscanln(u.InputReader, &result); err != nil { + return "", err + } + + return result, nil +} + +func (u *MockUi) AskSecret(query string) (string, error) { + return u.Ask(query) +} + +func (u *MockUi) Error(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.ErrorWriter, message) + fmt.Fprint(u.ErrorWriter, "\n") +} + +func (u *MockUi) Info(message string) { + u.Output(message) +} + +func (u *MockUi) Output(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.OutputWriter, message) + fmt.Fprint(u.OutputWriter, "\n") +} + +func (u *MockUi) Warn(message string) { + u.once.Do(u.init) + + fmt.Fprint(u.ErrorWriter, message) + fmt.Fprint(u.ErrorWriter, "\n") +} + +func (u *MockUi) init() { + u.ErrorWriter = new(syncBuffer) + u.OutputWriter = new(syncBuffer) +} + +type syncBuffer struct { + sync.RWMutex + b bytes.Buffer +} + +func (b *syncBuffer) Write(data []byte) (int, error) { + b.Lock() + defer b.Unlock() + return b.b.Write(data) +} + +func (b *syncBuffer) Read(data []byte) (int, error) { + b.RLock() + defer b.RUnlock() + return b.b.Read(data) +} + +func (b *syncBuffer) Reset() { + b.Lock() + b.b.Reset() + b.Unlock() +} + +func (b *syncBuffer) String() string { + return string(b.Bytes()) +} + +func (b *syncBuffer) Bytes() []byte { + b.RLock() + data := b.b.Bytes() + b.RUnlock() + return data +} diff --git a/vendor/github.com/mitchellh/cli/ui_writer.go b/vendor/github.com/mitchellh/cli/ui_writer.go new file mode 100644 index 00000000..1e1db3cf --- /dev/null +++ b/vendor/github.com/mitchellh/cli/ui_writer.go @@ -0,0 +1,18 @@ +package cli + +// UiWriter is an io.Writer implementation that can be used with +// loggers that writes every line of log output data to a Ui at the +// Info level. +type UiWriter struct { + Ui Ui +} + +func (w *UiWriter) Write(p []byte) (n int, err error) { + n = len(p) + if n > 0 && p[n-1] == '\n' { + p = p[:n-1] + } + + w.Ui.Info(string(p)) + return n, nil +} diff --git a/vendor/github.com/mitchellh/copystructure/.travis.yml b/vendor/github.com/mitchellh/copystructure/.travis.yml new file mode 100644 index 00000000..d7b9589a --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.7 + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go index 831c37de..14043525 100644 --- a/vendor/github.com/mitchellh/copystructure/copystructure.go +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -28,6 +28,18 @@ type CopierFunc func(interface{}) (interface{}, error) // this map as well as to Copy in a mutex. var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) +// Must is a helper that wraps a call to a function returning +// (interface{}, error) and panics if the error is non-nil. It is intended +// for use in variable initializations and should only be used when a copy +// error should be a crashing case. +func Must(v interface{}, err error) interface{} { + if err != nil { + panic("copy error: " + err.Error()) + } + + return v +} + var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") type Config struct { @@ -70,6 +82,14 @@ func (c Config) Copy(v interface{}) (interface{}, error) { return result, nil } +// Return the key used to index interfaces types we've seen. Store the number +// of pointers in the upper 32bits, and the depth in the lower 32bits. This is +// easy to calculate, easy to match a key with our current depth, and we don't +// need to deal with initializing and cleaning up nested maps or slices. +func ifaceKey(pointers, depth int) uint64 { + return uint64(pointers)<<32 | uint64(depth) +} + type walker struct { Result interface{} @@ -77,7 +97,16 @@ type walker struct { ignoreDepth int vals []reflect.Value cs []reflect.Value - ps []bool + + // This stores the number of pointers we've walked over, indexed by depth. + ps []int + + // If an interface is indirected by a pointer, we need to know the type of + // interface to create when creating the new value. Store the interface + // types here, indexed by both the walk depth and the number of pointers + // already seen at that depth. Use ifaceKey to calculate the proper uint64 + // value. + ifaceTypes map[uint64]reflect.Type // any locks we've taken, indexed by depth locks []sync.Locker @@ -93,6 +122,10 @@ func (w *walker) Enter(l reflectwalk.Location) error { w.locks = append(w.locks, nil) } + for len(w.ps) < w.depth+1 { + w.ps = append(w.ps, 0) + } + return nil } @@ -103,6 +136,16 @@ func (w *walker) Exit(l reflectwalk.Location) error { defer locker.Unlock() } + // clear out pointers and interfaces as we exit the stack + w.ps[w.depth] = 0 + + for k := range w.ifaceTypes { + mask := uint64(^uint32(0)) + if k&mask == uint64(w.depth) { + delete(w.ifaceTypes, k) + } + } + w.depth-- if w.ignoreDepth > w.depth { w.ignoreDepth = 0 @@ -113,9 +156,13 @@ func (w *walker) Exit(l reflectwalk.Location) error { } switch l { + case reflectwalk.Array: + fallthrough case reflectwalk.Map: fallthrough case reflectwalk.Slice: + w.replacePointerMaybe() + // Pop map off our container w.cs = w.cs[:len(w.cs)-1] case reflectwalk.MapValue: @@ -128,16 +175,27 @@ func (w *walker) Exit(l reflectwalk.Location) error { // or in this case never adds it. We need to create a properly typed // zero value so that this key can be set. if !mv.IsValid() { - mv = reflect.Zero(m.Type().Elem()) + mv = reflect.Zero(m.Elem().Type().Elem()) + } + m.Elem().SetMapIndex(mk, mv) + case reflectwalk.ArrayElem: + // Pop off the value and the index and set it on the array + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + a := w.cs[len(w.cs)-1] + ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call + if ae.CanSet() { + ae.Set(v) + } } - m.SetMapIndex(mk, mv) case reflectwalk.SliceElem: // Pop off the value and the index and set it on the slice v := w.valPop() + i := w.valPop().Interface().(int) if v.IsValid() { - i := w.valPop().Interface().(int) s := w.cs[len(w.cs)-1] - se := s.Index(i) + se := s.Elem().Index(i) if se.CanSet() { se.Set(v) } @@ -154,6 +212,7 @@ func (w *walker) Exit(l reflectwalk.Location) error { if v.IsValid() { s := w.cs[len(w.cs)-1] sf := reflect.Indirect(s).FieldByName(f.Name) + if sf.CanSet() { sf.Set(v) } @@ -176,9 +235,9 @@ func (w *walker) Map(m reflect.Value) error { // Create the map. If the map itself is nil, then just make a nil map var newMap reflect.Value if m.IsNil() { - newMap = reflect.Indirect(reflect.New(m.Type())) + newMap = reflect.New(m.Type()) } else { - newMap = reflect.MakeMap(m.Type()) + newMap = wrapPtr(reflect.MakeMap(m.Type())) } w.cs = append(w.cs, newMap) @@ -191,20 +250,28 @@ func (w *walker) MapElem(m, k, v reflect.Value) error { } func (w *walker) PointerEnter(v bool) error { - if w.ignoring() { - return nil + if v { + w.ps[w.depth]++ } + return nil +} - w.ps = append(w.ps, v) +func (w *walker) PointerExit(v bool) error { + if v { + w.ps[w.depth]-- + } return nil } -func (w *walker) PointerExit(bool) error { - if w.ignoring() { +func (w *walker) Interface(v reflect.Value) error { + if !v.IsValid() { return nil } + if w.ifaceTypes == nil { + w.ifaceTypes = make(map[uint64]reflect.Type) + } - w.ps = w.ps[:len(w.ps)-1] + w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() return nil } @@ -219,7 +286,7 @@ func (w *walker) Primitive(v reflect.Value) error { var newV reflect.Value if v.IsValid() && v.CanInterface() { newV = reflect.New(v.Type()) - reflect.Indirect(newV).Set(v) + newV.Elem().Set(v) } w.valPush(newV) @@ -235,9 +302,9 @@ func (w *walker) Slice(s reflect.Value) error { var newS reflect.Value if s.IsNil() { - newS = reflect.Indirect(reflect.New(s.Type())) + newS = reflect.New(s.Type()) } else { - newS = reflect.MakeSlice(s.Type(), s.Len(), s.Cap()) + newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) } w.cs = append(w.cs, newS) @@ -257,6 +324,31 @@ func (w *walker) SliceElem(i int, elem reflect.Value) error { return nil } +func (w *walker) Array(a reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(a) + + newA := reflect.New(a.Type()) + + w.cs = append(w.cs, newA) + w.valPush(newA) + return nil +} + +func (w *walker) ArrayElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the array here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + func (w *walker) Struct(s reflect.Value) error { if w.ignoring() { return nil @@ -274,7 +366,10 @@ func (w *walker) Struct(s reflect.Value) error { return err } - v = reflect.ValueOf(dup) + // We need to put a pointer to the value on the value stack, + // so allocate a new pointer and set it. + v = reflect.New(s.Type()) + reflect.Indirect(v).Set(reflect.ValueOf(dup)) } else { // No copier, we copy ourselves and allow reflectwalk to guide // us deeper into the structure for copying. @@ -295,18 +390,29 @@ func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { return nil } + // If PkgPath is non-empty, this is a private (unexported) field. + // We do not set this unexported since the Go runtime doesn't allow us. + if f.PkgPath != "" { + return reflectwalk.SkipEntry + } + // Push the field onto the stack, we'll handle it when we exit // the struct field in Exit... w.valPush(reflect.ValueOf(f)) return nil } +// ignore causes the walker to ignore any more values until we exit this on +func (w *walker) ignore() { + w.ignoreDepth = w.depth +} + func (w *walker) ignoring() bool { return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth } func (w *walker) pointerPeek() bool { - return w.ps[len(w.ps)-1] + return w.ps[w.depth] > 0 } func (w *walker) valPop() reflect.Value { @@ -338,7 +444,40 @@ func (w *walker) replacePointerMaybe() { // we need to push that onto the stack. if !w.pointerPeek() { w.valPush(reflect.Indirect(w.valPop())) + return + } + + v := w.valPop() + + // If the expected type is a pointer to an interface of any depth, + // such as *interface{}, **interface{}, etc., then we need to convert + // the value "v" from *CONCRETE to *interface{} so types match for + // Set. + // + // Example if v is type *Foo where Foo is a struct, v would become + // *interface{} instead. This only happens if we have an interface expectation + // at this depth. + // + // For more info, see GH-16 + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { + y := reflect.New(iType) // Create *interface{} + y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) + v = y // v is now typed *interface{} (where *v = Foo) + } + + for i := 1; i < w.ps[w.depth]; i++ { + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { + iface := reflect.New(iType).Elem() + iface.Set(v) + v = iface + } + + p := reflect.New(v.Type()) + p.Elem().Set(v) + v = p } + + w.valPush(v) } // if this value is a Locker, lock it and add it to the locks slice @@ -396,3 +535,14 @@ func (w *walker) lock(v reflect.Value) { locker.Lock() w.locks[w.depth] = locker } + +// wrapPtr is a helper that takes v and always make it *v. copystructure +// stores things internally as pointers until the last moment before unwrapping +func wrapPtr(v reflect.Value) reflect.Value { + if !v.IsValid() { + return v + } + vPtr := reflect.New(v.Type()) + vPtr.Elem().Set(v) + return vPtr +} diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go index 6944957d..47e1f9ef 100644 --- a/vendor/github.com/mitchellh/go-homedir/homedir.go +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -87,7 +87,7 @@ func dirUnix() (string, error) { cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) cmd.Stdout = &stdout if err := cmd.Run(); err != nil { - // If "getent" is missing, ignore it + // If the error is ErrNotFound, we ignore it. Otherwise, return it. if err != exec.ErrNotFound { return "", err } @@ -118,6 +118,11 @@ func dirUnix() (string, error) { } func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + drive := os.Getenv("HOMEDRIVE") path := os.Getenv("HOMEPATH") home := drive + path diff --git a/vendor/github.com/mitchellh/go-testing-interface/.travis.yml b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml new file mode 100644 index 00000000..928d000e --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.8 + - 1.x + - tip + +script: + - go test + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/mitchellh/go-testing-interface/LICENSE b/vendor/github.com/mitchellh/go-testing-interface/LICENSE new file mode 100644 index 00000000..a3866a29 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-testing-interface/README.md b/vendor/github.com/mitchellh/go-testing-interface/README.md new file mode 100644 index 00000000..26781bba --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/README.md @@ -0,0 +1,52 @@ +# go-testing-interface + +go-testing-interface is a Go library that exports an interface that +`*testing.T` implements as well as a runtime version you can use in its +place. + +The purpose of this library is so that you can export test helpers as a +public API without depending on the "testing" package, since you can't +create a `*testing.T` struct manually. This lets you, for example, use the +public testing APIs to generate mock data at runtime, rather than just at +test time. + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/go-testing-interface). + +Given a test helper written using `go-testing-interface` like this: + + import "github.com/mitchellh/go-testing-interface" + + func TestHelper(t testing.T) { + t.Fatal("I failed") + } + +You can call the test helper in a real test easily: + + import "testing" + + func TestThing(t *testing.T) { + TestHelper(t) + } + +You can also call the test helper at runtime if needed: + + import "github.com/mitchellh/go-testing-interface" + + func main() { + TestHelper(&testing.RuntimeT{}) + } + +## Why?! + +**Why would I call a test helper that takes a *testing.T at runtime?** + +You probably shouldn't. The only use case I've seen (and I've had) for this +is to implement a "dev mode" for a service where the test helpers are used +to populate mock data, create a mock DB, perhaps run service dependencies +in-memory, etc. + +Outside of a "dev mode", I've never seen a use case for this and I think +there shouldn't be one since the point of the `testing.T` interface is that +you can fail immediately. diff --git a/vendor/github.com/mitchellh/go-testing-interface/go.mod b/vendor/github.com/mitchellh/go-testing-interface/go.mod new file mode 100644 index 00000000..062796de --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/go-testing-interface diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go new file mode 100644 index 00000000..204afb42 --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go @@ -0,0 +1,84 @@ +// +build !go1.9 + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. Name and Skip methods are +// unimplemented noops. +type RuntimeT struct { + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.Fail() +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) + t.FailNow() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { return "" } +func (t *RuntimeT) Skip(args ...interface{}) {} +func (t *RuntimeT) SkipNow() {} +func (t *RuntimeT) Skipf(format string, args ...interface{}) {} +func (t *RuntimeT) Skipped() bool { return false } diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go new file mode 100644 index 00000000..31b42cad --- /dev/null +++ b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go @@ -0,0 +1,108 @@ +// +build go1.9 + +// NOTE: This is a temporary copy of testing.go for Go 1.9 with the addition +// of "Helper" to the T interface. Go 1.9 at the time of typing is in RC +// and is set for release shortly. We'll support this on master as the default +// as soon as 1.9 is released. + +package testing + +import ( + "fmt" + "log" +) + +// T is the interface that mimics the standard library *testing.T. +// +// In unit tests you can just pass a *testing.T struct. At runtime, outside +// of tests, you can pass in a RuntimeT struct from this package. +type T interface { + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() + FailNow() + Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Log(args ...interface{}) + Logf(format string, args ...interface{}) + Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool + Helper() +} + +// RuntimeT implements T and can be instantiated and run at runtime to +// mimic *testing.T behavior. Unlike *testing.T, this will simply panic +// for calls to Fatal. For calls to Error, you'll have to check the errors +// list to determine whether to exit yourself. +type RuntimeT struct { + skipped bool + failed bool +} + +func (t *RuntimeT) Error(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) + t.Fail() +} + +func (t *RuntimeT) Errorf(format string, args ...interface{}) { + log.Printf(format, args...) + t.Fail() +} + +func (t *RuntimeT) Fail() { + t.failed = true +} + +func (t *RuntimeT) FailNow() { + panic("testing.T failed, see logs for output (if any)") +} + +func (t *RuntimeT) Failed() bool { + return t.failed +} + +func (t *RuntimeT) Fatal(args ...interface{}) { + log.Print(args...) + t.FailNow() +} + +func (t *RuntimeT) Fatalf(format string, args ...interface{}) { + log.Printf(format, args...) + t.FailNow() +} + +func (t *RuntimeT) Log(args ...interface{}) { + log.Println(fmt.Sprintln(args...)) +} + +func (t *RuntimeT) Logf(format string, args ...interface{}) { + log.Println(fmt.Sprintf(format, args...)) +} + +func (t *RuntimeT) Name() string { + return "" +} + +func (t *RuntimeT) Skip(args ...interface{}) { + log.Print(args...) + t.SkipNow() +} + +func (t *RuntimeT) SkipNow() { + t.skipped = true +} + +func (t *RuntimeT) Skipf(format string, args ...interface{}) { + log.Printf(format, args...) + t.SkipNow() +} + +func (t *RuntimeT) Skipped() bool { + return t.skipped +} + +func (t *RuntimeT) Helper() {} diff --git a/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md new file mode 100644 index 00000000..22985159 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-wordwrap/README.md b/vendor/github.com/mitchellh/go-wordwrap/README.md new file mode 100644 index 00000000..60ae3117 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/README.md @@ -0,0 +1,39 @@ +# go-wordwrap + +`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that +automatically wraps words into multiple lines. The primary use case for this +is in formatting CLI output, but of course word wrapping is a generally useful +thing to do. + +## Installation and Usage + +Install using `go get github.com/mitchellh/go-wordwrap`. + +Full documentation is available at +http://godoc.org/github.com/mitchellh/go-wordwrap + +Below is an example of its usage ignoring errors: + +```go +wrapped := wordwrap.WrapString("foo bar baz", 3) +fmt.Println(wrapped) +``` + +Would output: + +``` +foo +bar +baz +``` + +## Word Wrap Algorithm + +This library doesn't use any clever algorithm for word wrapping. The wrapping +is actually very naive: whenever there is whitespace or an explicit linebreak. +The goal of this library is for word wrapping CLI output, so the input is +typically pretty well controlled human language. Because of this, the naive +approach typically works just fine. + +In the future, we'd like to make the algorithm more advanced. We would do +so without breaking the API. diff --git a/vendor/github.com/mitchellh/go-wordwrap/go.mod b/vendor/github.com/mitchellh/go-wordwrap/go.mod new file mode 100644 index 00000000..2ae411b2 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/go-wordwrap diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go new file mode 100644 index 00000000..ac67205b --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go @@ -0,0 +1,73 @@ +package wordwrap + +import ( + "bytes" + "unicode" +) + +// WrapString wraps the given string within lim width in characters. +// +// Wrapping is currently naive and only happens at white-space. A future +// version of the library will implement smarter wrapping. This means that +// pathological cases can dramatically reach past the limit, such as a very +// long word. +func WrapString(s string, lim uint) string { + // Initialize a buffer with a slightly larger size to account for breaks + init := make([]byte, 0, len(s)) + buf := bytes.NewBuffer(init) + + var current uint + var wordBuf, spaceBuf bytes.Buffer + + for _, char := range s { + if char == '\n' { + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) > lim { + current = 0 + } else { + current += uint(spaceBuf.Len()) + spaceBuf.WriteTo(buf) + } + spaceBuf.Reset() + } else { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + buf.WriteRune(char) + current = 0 + } else if unicode.IsSpace(char) { + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + + spaceBuf.WriteRune(char) + } else { + + wordBuf.WriteRune(char) + + if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim { + buf.WriteRune('\n') + current = 0 + spaceBuf.Reset() + } + } + } + + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) <= lim { + spaceBuf.WriteTo(buf) + } + } else { + spaceBuf.WriteTo(buf) + wordBuf.WriteTo(buf) + } + + return buf.String() +} diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE new file mode 100644 index 00000000..a3866a29 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md new file mode 100644 index 00000000..7d0de5bf --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/README.md @@ -0,0 +1,61 @@ +# hashstructure + +hashstructure is a Go library for creating a unique hash value +for arbitrary values in Go. + +This can be used to key values in a hash (for use in a map, set, etc.) +that are complex. The most common use case is comparing two values without +sending data across the network, caching values locally (de-dup), and so on. + +## Features + + * Hash any arbitrary Go value, including complex types. + + * Tag a struct field to ignore it and not affect the hash value. + + * Tag a slice type struct field to treat it as a set where ordering + doesn't affect the hash code but the field itself is still taken into + account to create the hash value. + + * Optionally specify a custom hash function to optimize for speed, collision + avoidance for your data set, etc. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/hashstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure). + +A quick code example is shown below: + + + type ComplexStruct struct { + Name string + Age uint + Metadata map[string]interface{} + } + + v := ComplexStruct{ + Name: "mitchellh", + Age: 64, + Metadata: map[string]interface{}{ + "car": true, + "location": "California", + "siblings": []string{"Bob", "John"}, + }, + } + + hash, err := hashstructure.Hash(v, nil) + if err != nil { + panic(err) + } + + fmt.Printf("%d", hash) + // Output: + // 2307517237273902113 diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go new file mode 100644 index 00000000..6f586fa7 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/hashstructure.go @@ -0,0 +1,323 @@ +package hashstructure + +import ( + "encoding/binary" + "fmt" + "hash" + "hash/fnv" + "reflect" +) + +// HashOptions are options that are available for hashing. +type HashOptions struct { + // Hasher is the hash function to use. If this isn't set, it will + // default to FNV. + Hasher hash.Hash64 + + // TagName is the struct tag to look at when hashing the structure. + // By default this is "hash". + TagName string +} + +// Hash returns the hash value of an arbitrary value. +// +// If opts is nil, then default options will be used. See HashOptions +// for the default values. +// +// Notes on the value: +// +// * Unexported fields on structs are ignored and do not affect the +// hash value. +// +// * Adding an exported field to a struct with the zero value will change +// the hash value. +// +// For structs, the hashing can be controlled using tags. For example: +// +// struct { +// Name string +// UUID string `hash:"ignore"` +// } +// +// The available tag values are: +// +// * "ignore" - The field will be ignored and not affect the hash code. +// +// * "set" - The field will be treated as a set, where ordering doesn't +// affect the hash code. This only works for slices. +// +func Hash(v interface{}, opts *HashOptions) (uint64, error) { + // Create default options + if opts == nil { + opts = &HashOptions{} + } + if opts.Hasher == nil { + opts.Hasher = fnv.New64() + } + if opts.TagName == "" { + opts.TagName = "hash" + } + + // Reset the hash + opts.Hasher.Reset() + + // Create our walker and walk the structure + w := &walker{ + h: opts.Hasher, + tag: opts.TagName, + } + return w.visit(reflect.ValueOf(v), nil) +} + +type walker struct { + h hash.Hash64 + tag string +} + +type visitOpts struct { + // Flags are a bitmask of flags to affect behavior of this visit + Flags visitFlag + + // Information about the struct containing this field + Struct interface{} + StructField string +} + +func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { + // Loop since these can be wrapped in multiple layers of pointers + // and interfaces. + for { + // If we have an interface, dereference it. We have to do this up + // here because it might be a nil in there and the check below must + // catch that. + if v.Kind() == reflect.Interface { + v = v.Elem() + continue + } + + if v.Kind() == reflect.Ptr { + v = reflect.Indirect(v) + continue + } + + break + } + + // If it is nil, treat it like a zero. + if !v.IsValid() { + var tmp int8 + v = reflect.ValueOf(tmp) + } + + // Binary writing can use raw ints, we have to convert to + // a sized-int, we'll choose the largest... + switch v.Kind() { + case reflect.Int: + v = reflect.ValueOf(int64(v.Int())) + case reflect.Uint: + v = reflect.ValueOf(uint64(v.Uint())) + case reflect.Bool: + var tmp int8 + if v.Bool() { + tmp = 1 + } + v = reflect.ValueOf(tmp) + } + + k := v.Kind() + + // We can shortcut numeric values by directly binary writing them + if k >= reflect.Int && k <= reflect.Complex64 { + // A direct hash calculation + w.h.Reset() + err := binary.Write(w.h, binary.LittleEndian, v.Interface()) + return w.h.Sum64(), err + } + + switch k { + case reflect.Array: + var h uint64 + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + h = hashUpdateOrdered(w.h, h, current) + } + + return h, nil + + case reflect.Map: + var includeMap IncludableMap + if opts != nil && opts.Struct != nil { + if v, ok := opts.Struct.(IncludableMap); ok { + includeMap = v + } + } + + // Build the hash for the map. We do this by XOR-ing all the key + // and value hashes. This makes it deterministic despite ordering. + var h uint64 + for _, k := range v.MapKeys() { + v := v.MapIndex(k) + if includeMap != nil { + incl, err := includeMap.HashIncludeMap( + opts.StructField, k.Interface(), v.Interface()) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + kh, err := w.visit(k, nil) + if err != nil { + return 0, err + } + vh, err := w.visit(v, nil) + if err != nil { + return 0, err + } + + fieldHash := hashUpdateOrdered(w.h, kh, vh) + h = hashUpdateUnordered(h, fieldHash) + } + + return h, nil + + case reflect.Struct: + var include Includable + parent := v.Interface() + if impl, ok := parent.(Includable); ok { + include = impl + } + + t := v.Type() + h, err := w.visit(reflect.ValueOf(t.Name()), nil) + if err != nil { + return 0, err + } + + l := v.NumField() + for i := 0; i < l; i++ { + if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + var f visitFlag + fieldType := t.Field(i) + if fieldType.PkgPath != "" { + // Unexported + continue + } + + tag := fieldType.Tag.Get(w.tag) + if tag == "ignore" { + // Ignore this field + continue + } + + // Check if we implement includable and check it + if include != nil { + incl, err := include.HashInclude(fieldType.Name, v) + if err != nil { + return 0, err + } + if !incl { + continue + } + } + + switch tag { + case "set": + f |= visitFlagSet + } + + kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil) + if err != nil { + return 0, err + } + + vh, err := w.visit(v, &visitOpts{ + Flags: f, + Struct: parent, + StructField: fieldType.Name, + }) + if err != nil { + return 0, err + } + + fieldHash := hashUpdateOrdered(w.h, kh, vh) + h = hashUpdateUnordered(h, fieldHash) + } + } + + return h, nil + + case reflect.Slice: + // We have two behaviors here. If it isn't a set, then we just + // visit all the elements. If it is a set, then we do a deterministic + // hash code. + var h uint64 + var set bool + if opts != nil { + set = (opts.Flags & visitFlagSet) != 0 + } + l := v.Len() + for i := 0; i < l; i++ { + current, err := w.visit(v.Index(i), nil) + if err != nil { + return 0, err + } + + if set { + h = hashUpdateUnordered(h, current) + } else { + h = hashUpdateOrdered(w.h, h, current) + } + } + + return h, nil + + case reflect.String: + // Directly hash + w.h.Reset() + _, err := w.h.Write([]byte(v.String())) + return w.h.Sum64(), err + + default: + return 0, fmt.Errorf("unknown kind to hash: %s", k) + } + + return 0, nil +} + +func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 { + // For ordered updates, use a real hash function + h.Reset() + + // We just panic if the binary writes fail because we are writing + // an int64 which should never be fail-able. + e1 := binary.Write(h, binary.LittleEndian, a) + e2 := binary.Write(h, binary.LittleEndian, b) + if e1 != nil { + panic(e1) + } + if e2 != nil { + panic(e2) + } + + return h.Sum64() +} + +func hashUpdateUnordered(a, b uint64) uint64 { + return a ^ b +} + +// visitFlag is used as a bitmask for affecting visit behavior +type visitFlag uint + +const ( + visitFlagInvalid visitFlag = iota + visitFlagSet = iota << 1 +) diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go new file mode 100644 index 00000000..b6289c0b --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/include.go @@ -0,0 +1,15 @@ +package hashstructure + +// Includable is an interface that can optionally be implemented by +// a struct. It will be called for each field in the struct to check whether +// it should be included in the hash. +type Includable interface { + HashInclude(field string, v interface{}) (bool, error) +} + +// IncludableMap is an interface that can optionally be implemented by +// a struct. It will be called when a map-type field is found to ask the +// struct if the map item should be included in the hash. +type IncludableMap interface { + HashIncludeMap(field string, k, v interface{}) (bool, error) +} diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml new file mode 100644 index 00000000..7f3fe9a9 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.4 + +script: + - go test diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index aa91f76c..115ae67c 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -72,7 +72,10 @@ func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { } // Modify the from kind to be correct with the new data - f = reflect.ValueOf(data).Type() + f = nil + if val := reflect.ValueOf(data); val.IsValid() { + f = val.Type() + } } return data, nil diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 40be5116..6dee0ef0 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -1,5 +1,5 @@ // The mapstructure package exposes functionality to convert an -// abitrary map[string]interface{} into a native Go structure. +// arbitrary map[string]interface{} into a native Go structure. // // The Go structure can be arbitrarily complex, containing slices, // other structs, etc. and the decoder will properly decode nested @@ -8,6 +8,7 @@ package mapstructure import ( + "encoding/json" "errors" "fmt" "reflect" @@ -67,6 +68,10 @@ type DecoderConfig struct { // FALSE, false, False. Anything else is an error) // - empty array = empty map and vice versa // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. // WeaklyTypedInput bool @@ -200,7 +205,7 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error d.config.DecodeHook, dataVal.Type(), val.Type(), data) if err != nil { - return err + return fmt.Errorf("error decoding '%s': %s", name, err) } } @@ -227,6 +232,8 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error err = d.decodePtr(name, data, val) case reflect.Slice: err = d.decodeSlice(name, data, val) + case reflect.Func: + err = d.decodeFunc(name, data, val) default: // If we reached this point then we weren't able to decode it return fmt.Errorf("%s: unsupported type: %s", name, dataKind) @@ -245,6 +252,10 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error // value to "data" of that type. func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + dataValType := dataVal.Type() if !dataValType.AssignableTo(val.Type()) { return fmt.Errorf( @@ -301,6 +312,7 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { dataVal := reflect.ValueOf(data) dataKind := getKind(dataVal) + dataType := dataVal.Type() switch { case dataKind == reflect.Int: @@ -322,6 +334,14 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er } else { return fmt.Errorf("cannot parse '%s' as int: %s", name, err) } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) default: return fmt.Errorf( "'%s' expected type '%s', got unconvertible type '%s'", @@ -408,6 +428,7 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { dataVal := reflect.ValueOf(data) dataKind := getKind(dataVal) + dataType := dataVal.Type() switch { case dataKind == reflect.Int: @@ -429,6 +450,14 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) } else { return fmt.Errorf("cannot parse '%s' as float: %s", name, err) } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) default: return fmt.Errorf( "'%s' expected type '%s', got unconvertible type '%s'", @@ -456,15 +485,30 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er // Check input type dataVal := reflect.Indirect(reflect.ValueOf(data)) if dataVal.Kind() != reflect.Map { - // Accept empty array/slice instead of an empty map in weakly typed mode - if d.config.WeaklyTypedInput && - (dataVal.Kind() == reflect.Slice || dataVal.Kind() == reflect.Array) && - dataVal.Len() == 0 { - val.Set(valMap) - return nil - } else { - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + // In weak mode, we accept a slice of maps as an input... + if d.config.WeaklyTypedInput { + switch dataVal.Kind() { + case reflect.Array, reflect.Slice: + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + fmt.Sprintf("%s[%d]", name, i), + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil + } } + + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) } // Accumulate errors @@ -507,7 +551,12 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er // into that. Then set the value of the pointer to this type. valType := val.Type() valElemType := valType.Elem() - realVal := reflect.New(valElemType) + + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { return err } @@ -516,6 +565,19 @@ func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) er return nil } +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + val.Set(dataVal) + return nil +} + func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { dataVal := reflect.Indirect(reflect.ValueOf(data)) dataValKind := dataVal.Kind() @@ -523,26 +585,44 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) valElemType := valType.Elem() sliceType := reflect.SliceOf(valElemType) - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - // Accept empty map instead of array/slice in weakly typed mode - if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } else { + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + return fmt.Errorf( "'%s': source data must be an array or slice, got %s", name, dataValKind) + } - } - // Make a new slice to hold our result, same size as the original data. - valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } // Accumulate any errors errors := make([]string, 0) for i := 0; i < dataVal.Len(); i++ { currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } currentField := valSlice.Index(i) fieldName := fmt.Sprintf("%s[%d]", name, i) @@ -607,17 +687,10 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) structs = structs[1:] structType := structVal.Type() + for i := 0; i < structType.NumField(); i++ { fieldType := structType.Field(i) - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind)) - continue - } - } + fieldKind := fieldType.Type.Kind() // If "squash" is specified in the tag, we squash the field down. squash := false @@ -630,7 +703,12 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) } if squash { - structs = append(structs, val.FieldByName(fieldType.Name)) + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + } else { + structs = append(structs, val.FieldByName(fieldType.Name)) + } continue } @@ -653,7 +731,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) if !rawMapVal.IsValid() { // Do a slower search by iterating over each key and // doing case-insensitive search. - for dataValKey, _ := range dataValKeys { + for dataValKey := range dataValKeys { mK, ok := dataValKey.Interface().(string) if !ok { // Not a string key @@ -701,7 +779,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey, _ := range dataValKeysUnused { + for rawKey := range dataValKeysUnused { keys = append(keys, rawKey.(string)) } sort.Strings(keys) @@ -716,7 +794,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) // Add the unused keys to the list of unused keys if we're tracking metadata if d.config.Metadata != nil { - for rawKey, _ := range dataValKeysUnused { + for rawKey := range dataValKeysUnused { key := rawKey.(string) if name != "" { key = fmt.Sprintf("%s.%s", name, key) diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml new file mode 100644 index 00000000..4f2ee4d9 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go index 7c59d764..6a7f1761 100644 --- a/vendor/github.com/mitchellh/reflectwalk/location.go +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -11,6 +11,8 @@ const ( MapValue Slice SliceElem + Array + ArrayElem Struct StructField WalkLoc diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go index d3cfe854..70760cf4 100644 --- a/vendor/github.com/mitchellh/reflectwalk/location_string.go +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -1,15 +1,15 @@ -// generated by stringer -type=Location location.go; DO NOT EDIT +// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. package reflectwalk import "fmt" -const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemStructStructFieldWalkLoc" +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" -var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 41, 52, 59} +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} func (i Location) String() string { - if i+1 >= Location(len(_Location_index)) { + if i >= Location(len(_Location_index)-1) { return fmt.Sprintf("Location(%d)", i) } return _Location_name[_Location_index[i]:_Location_index[i+1]] diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go index 1f206659..d7ab7b6d 100644 --- a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -5,6 +5,7 @@ package reflectwalk import ( + "errors" "reflect" ) @@ -18,6 +19,12 @@ type PrimitiveWalker interface { Primitive(reflect.Value) error } +// InterfaceWalker implementations are able to handle interface values as they +// are encountered during the walk. +type InterfaceWalker interface { + Interface(reflect.Value) error +} + // MapWalker implementations are able to handle individual elements // found within a map structure. type MapWalker interface { @@ -32,6 +39,13 @@ type SliceWalker interface { SliceElem(int, reflect.Value) error } +// ArrayWalker implementations are able to handle array elements found +// within complex structures. +type ArrayWalker interface { + Array(reflect.Value) error + ArrayElem(int, reflect.Value) error +} + // StructWalker is an interface that has methods that are called for // structs when a Walk is done. type StructWalker interface { @@ -55,6 +69,14 @@ type PointerWalker interface { PointerExit(bool) error } +// SkipEntry can be returned from walk functions to skip walking +// the value of this field. This is only valid in the following functions: +// +// - Struct: skips all fields from being walked +// - StructField: skips walking the struct value +// +var SkipEntry = errors.New("skip this entry") + // Walk takes an arbitrary value and an interface and traverses the // value, calling callbacks on the interface if they are supported. // The interface should implement one or more of the walker interfaces @@ -79,23 +101,63 @@ func Walk(data, walker interface{}) (err error) { func walk(v reflect.Value, w interface{}) (err error) { // Determine if we're receiving a pointer and if so notify the walker. + // The logic here is convoluted but very important (tests will fail if + // almost any part is changed). I will try to explain here. + // + // First, we check if the value is an interface, if so, we really need + // to check the interface's VALUE to see whether it is a pointer. + // + // Check whether the value is then a pointer. If so, then set pointer + // to true to notify the user. + // + // If we still have a pointer or an interface after the indirections, then + // we unwrap another level + // + // At this time, we also set "v" to be the dereferenced value. This is + // because once we've unwrapped the pointer we want to use that value. pointer := false - if v.Kind() == reflect.Ptr { - pointer = true - v = reflect.Indirect(v) - } - if pw, ok := w.(PointerWalker); ok { - if err = pw.PointerEnter(pointer); err != nil { - return + pointerV := v + + for { + if pointerV.Kind() == reflect.Interface { + if iw, ok := w.(InterfaceWalker); ok { + if err = iw.Interface(pointerV); err != nil { + return + } + } + + pointerV = pointerV.Elem() } - defer func() { - if err != nil { + if pointerV.Kind() == reflect.Ptr { + pointer = true + v = reflect.Indirect(pointerV) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { return } - err = pw.PointerExit(pointer) - }() + defer func(pointer bool) { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }(pointer) + } + + if pointer { + pointerV = v + } + pointer = false + + // If we still have a pointer or interface we have to indirect another level. + switch pointerV.Kind() { + case reflect.Ptr, reflect.Interface: + continue + } + break } // We preserve the original value here because if it is an interface @@ -125,6 +187,9 @@ func walk(v reflect.Value, w interface{}) (err error) { case reflect.Struct: err = walkStruct(v, w) return + case reflect.Array: + err = walkArray(v, w) + return default: panic("unsupported type: " + k.String()) } @@ -232,42 +297,99 @@ func walkSlice(v reflect.Value, w interface{}) (err error) { return nil } -func walkStruct(v reflect.Value, w interface{}) (err error) { - ew, ewok := w.(EnterExitWalker) - if ewok { - ew.Enter(Struct) +func walkArray(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Array) } - if sw, ok := w.(StructWalker); ok { - if err = sw.Struct(v); err != nil { - return + if aw, ok := w.(ArrayWalker); ok { + if err := aw.Array(v); err != nil { + return err } } - vt := v.Type() - for i := 0; i < vt.NumField(); i++ { - sf := vt.Field(i) - f := v.FieldByIndex([]int{i}) + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) - if sw, ok := w.(StructWalker); ok { - err = sw.StructField(sf, f) - if err != nil { - return + if aw, ok := w.(ArrayWalker); ok { + if err := aw.ArrayElem(i, elem); err != nil { + return err } } ew, ok := w.(EnterExitWalker) if ok { - ew.Enter(StructField) + ew.Enter(ArrayElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(ArrayElem) } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Array) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } - err = walk(f, w) + skip := false + if sw, ok := w.(StructWalker); ok { + err = sw.Struct(v) + if err == SkipEntry { + skip = true + err = nil + } if err != nil { return } + } - if ok { - ew.Exit(StructField) + if !skip { + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + + // SkipEntry just pretends this field doesn't even exist + if err == SkipEntry { + continue + } + + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } } } diff --git a/vendor/github.com/posener/complete/.gitignore b/vendor/github.com/posener/complete/.gitignore new file mode 100644 index 00000000..293955f9 --- /dev/null +++ b/vendor/github.com/posener/complete/.gitignore @@ -0,0 +1,4 @@ +.idea +coverage.txt +gocomplete/gocomplete +example/self/self diff --git a/vendor/github.com/posener/complete/.travis.yml b/vendor/github.com/posener/complete/.travis.yml new file mode 100644 index 00000000..2fae9454 --- /dev/null +++ b/vendor/github.com/posener/complete/.travis.yml @@ -0,0 +1,16 @@ +language: go +sudo: false +go: + - 1.11 + - 1.10.x + - 1.9 + - 1.8 + +before_install: + - go get -u -t ./... + +script: + - GO111MODULE=on ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/posener/complete/LICENSE.txt b/vendor/github.com/posener/complete/LICENSE.txt new file mode 100644 index 00000000..16249b4a --- /dev/null +++ b/vendor/github.com/posener/complete/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License + +Copyright (c) 2017 Eyal Posener + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/posener/complete/args.go b/vendor/github.com/posener/complete/args.go new file mode 100644 index 00000000..17ab2c6d --- /dev/null +++ b/vendor/github.com/posener/complete/args.go @@ -0,0 +1,111 @@ +package complete + +import ( + "os" + "path/filepath" + "strings" + "unicode" +) + +// Args describes command line arguments +type Args struct { + // All lists of all arguments in command line (not including the command itself) + All []string + // Completed lists of all completed arguments in command line, + // If the last one is still being typed - no space after it, + // it won't appear in this list of arguments. + Completed []string + // Last argument in command line, the one being typed, if the last + // character in the command line is a space, this argument will be empty, + // otherwise this would be the last word. + Last string + // LastCompleted is the last argument that was fully typed. + // If the last character in the command line is space, this would be the + // last word, otherwise, it would be the word before that. + LastCompleted string +} + +// Directory gives the directory of the current written +// last argument if it represents a file name being written. +// in case that it is not, we fall back to the current directory. +func (a Args) Directory() string { + if info, err := os.Stat(a.Last); err == nil && info.IsDir() { + return fixPathForm(a.Last, a.Last) + } + dir := filepath.Dir(a.Last) + if info, err := os.Stat(dir); err != nil || !info.IsDir() { + return "./" + } + return fixPathForm(a.Last, dir) +} + +func newArgs(line string) Args { + var ( + all []string + completed []string + ) + parts := splitFields(line) + if len(parts) > 0 { + all = parts[1:] + completed = removeLast(parts[1:]) + } + return Args{ + All: all, + Completed: completed, + Last: last(parts), + LastCompleted: last(completed), + } +} + +// splitFields returns a list of fields from the given command line. +// If the last character is space, it appends an empty field in the end +// indicating that the field before it was completed. +// If the last field is of the form "a=b", it splits it to two fields: "a", "b", +// So it can be completed. +func splitFields(line string) []string { + parts := strings.Fields(line) + + // Add empty field if the last field was completed. + if len(line) > 0 && unicode.IsSpace(rune(line[len(line)-1])) { + parts = append(parts, "") + } + + // Treat the last field if it is of the form "a=b" + parts = splitLastEqual(parts) + return parts +} + +func splitLastEqual(line []string) []string { + if len(line) == 0 { + return line + } + parts := strings.Split(line[len(line)-1], "=") + return append(line[:len(line)-1], parts...) +} + +func (a Args) from(i int) Args { + if i > len(a.All) { + i = len(a.All) + } + a.All = a.All[i:] + + if i > len(a.Completed) { + i = len(a.Completed) + } + a.Completed = a.Completed[i:] + return a +} + +func removeLast(a []string) []string { + if len(a) > 0 { + return a[:len(a)-1] + } + return a +} + +func last(args []string) string { + if len(args) == 0 { + return "" + } + return args[len(args)-1] +} diff --git a/vendor/github.com/posener/complete/cmd/cmd.go b/vendor/github.com/posener/complete/cmd/cmd.go new file mode 100644 index 00000000..b99fe529 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/cmd.go @@ -0,0 +1,128 @@ +// Package cmd used for command line options for the complete tool +package cmd + +import ( + "errors" + "flag" + "fmt" + "os" + "strings" + + "github.com/posener/complete/cmd/install" +) + +// CLI for command line +type CLI struct { + Name string + InstallName string + UninstallName string + + install bool + uninstall bool + yes bool +} + +const ( + defaultInstallName = "install" + defaultUninstallName = "uninstall" +) + +// Run is used when running complete in command line mode. +// this is used when the complete is not completing words, but to +// install it or uninstall it. +func (f *CLI) Run() bool { + err := f.validate() + if err != nil { + os.Stderr.WriteString(err.Error() + "\n") + os.Exit(1) + } + + switch { + case f.install: + f.prompt() + err = install.Install(f.Name) + case f.uninstall: + f.prompt() + err = install.Uninstall(f.Name) + default: + // non of the action flags matched, + // returning false should make the real program execute + return false + } + + if err != nil { + fmt.Printf("%s failed! %s\n", f.action(), err) + os.Exit(3) + } + fmt.Println("Done!") + return true +} + +// prompt use for approval +// exit if approval was not given +func (f *CLI) prompt() { + defer fmt.Println(f.action() + "ing...") + if f.yes { + return + } + fmt.Printf("%s completion for %s? ", f.action(), f.Name) + var answer string + fmt.Scanln(&answer) + + switch strings.ToLower(answer) { + case "y", "yes": + return + default: + fmt.Println("Cancelling...") + os.Exit(1) + } +} + +// AddFlags adds the CLI flags to the flag set. +// If flags is nil, the default command line flags will be taken. +// Pass non-empty strings as installName and uninstallName to override the default +// flag names. +func (f *CLI) AddFlags(flags *flag.FlagSet) { + if flags == nil { + flags = flag.CommandLine + } + + if f.InstallName == "" { + f.InstallName = defaultInstallName + } + if f.UninstallName == "" { + f.UninstallName = defaultUninstallName + } + + if flags.Lookup(f.InstallName) == nil { + flags.BoolVar(&f.install, f.InstallName, false, + fmt.Sprintf("Install completion for %s command", f.Name)) + } + if flags.Lookup(f.UninstallName) == nil { + flags.BoolVar(&f.uninstall, f.UninstallName, false, + fmt.Sprintf("Uninstall completion for %s command", f.Name)) + } + if flags.Lookup("y") == nil { + flags.BoolVar(&f.yes, "y", false, "Don't prompt user for typing 'yes' when installing completion") + } +} + +// validate the CLI +func (f *CLI) validate() error { + if f.install && f.uninstall { + return errors.New("Install and uninstall are mutually exclusive") + } + return nil +} + +// action name according to the CLI values. +func (f *CLI) action() string { + switch { + case f.install: + return "Install" + case f.uninstall: + return "Uninstall" + default: + return "unknown" + } +} diff --git a/vendor/github.com/posener/complete/cmd/install/bash.go b/vendor/github.com/posener/complete/cmd/install/bash.go new file mode 100644 index 00000000..a287f998 --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/bash.go @@ -0,0 +1,32 @@ +package install + +import "fmt" + +// (un)install in bash +// basically adds/remove from .bashrc: +// +// complete -C +type bash struct { + rc string +} + +func (b bash) Install(cmd, bin string) error { + completeCmd := b.cmd(cmd, bin) + if lineInFile(b.rc, completeCmd) { + return fmt.Errorf("already installed in %s", b.rc) + } + return appendToFile(b.rc, completeCmd) +} + +func (b bash) Uninstall(cmd, bin string) error { + completeCmd := b.cmd(cmd, bin) + if !lineInFile(b.rc, completeCmd) { + return fmt.Errorf("does not installed in %s", b.rc) + } + + return removeFromFile(b.rc, completeCmd) +} + +func (bash) cmd(cmd, bin string) string { + return fmt.Sprintf("complete -C %s %s", bin, cmd) +} diff --git a/vendor/github.com/posener/complete/cmd/install/fish.go b/vendor/github.com/posener/complete/cmd/install/fish.go new file mode 100644 index 00000000..6467196b --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/fish.go @@ -0,0 +1,56 @@ +package install + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "text/template" +) + +// (un)install in fish + +type fish struct { + configDir string +} + +func (f fish) Install(cmd, bin string) error { + completionFile := filepath.Join(f.configDir, "completions", fmt.Sprintf("%s.fish", cmd)) + completeCmd, err := f.cmd(cmd, bin) + if err != nil { + return err + } + if _, err := os.Stat(completionFile); err == nil { + return fmt.Errorf("already installed at %s", completionFile) + } + + return createFile(completionFile, completeCmd) +} + +func (f fish) Uninstall(cmd, bin string) error { + completionFile := filepath.Join(f.configDir, "completions", fmt.Sprintf("%s.fish", cmd)) + if _, err := os.Stat(completionFile); err != nil { + return fmt.Errorf("does not installed in %s", f.configDir) + } + + return os.Remove(completionFile) +} + +func (f fish) cmd(cmd, bin string) (string, error) { + var buf bytes.Buffer + params := struct{ Cmd, Bin string }{cmd, bin} + tmpl := template.Must(template.New("cmd").Parse(` +function __complete_{{.Cmd}} + set -lx COMP_LINE (string join ' ' (commandline -o)) + test (commandline -ct) = "" + and set COMP_LINE "$COMP_LINE " + {{.Bin}} +end +complete -c {{.Cmd}} -a "(__complete_{{.Cmd}})" +`)) + err := tmpl.Execute(&buf, params) + if err != nil { + return "", err + } + return buf.String(), nil +} diff --git a/vendor/github.com/posener/complete/cmd/install/install.go b/vendor/github.com/posener/complete/cmd/install/install.go new file mode 100644 index 00000000..dfa1963b --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/install.go @@ -0,0 +1,119 @@ +package install + +import ( + "errors" + "os" + "os/user" + "path/filepath" + + "github.com/hashicorp/go-multierror" +) + +type installer interface { + Install(cmd, bin string) error + Uninstall(cmd, bin string) error +} + +// Install complete command given: +// cmd: is the command name +func Install(cmd string) error { + is := installers() + if len(is) == 0 { + return errors.New("Did not find any shells to install") + } + bin, err := getBinaryPath() + if err != nil { + return err + } + + for _, i := range is { + errI := i.Install(cmd, bin) + if errI != nil { + err = multierror.Append(err, errI) + } + } + + return err +} + +// Uninstall complete command given: +// cmd: is the command name +func Uninstall(cmd string) error { + is := installers() + if len(is) == 0 { + return errors.New("Did not find any shells to uninstall") + } + bin, err := getBinaryPath() + if err != nil { + return err + } + + for _, i := range is { + errI := i.Uninstall(cmd, bin) + if errI != nil { + err = multierror.Append(err, errI) + } + } + + return err +} + +func installers() (i []installer) { + for _, rc := range [...]string{".bashrc", ".bash_profile", ".bash_login", ".profile"} { + if f := rcFile(rc); f != "" { + i = append(i, bash{f}) + break + } + } + if f := rcFile(".zshrc"); f != "" { + i = append(i, zsh{f}) + } + if d := fishConfigDir(); d != "" { + i = append(i, fish{d}) + } + return +} + +func fishConfigDir() string { + configDir := filepath.Join(getConfigHomePath(), "fish") + if configDir == "" { + return "" + } + if info, err := os.Stat(configDir); err != nil || !info.IsDir() { + return "" + } + return configDir +} + +func getConfigHomePath() string { + u, err := user.Current() + if err != nil { + return "" + } + + configHome := os.Getenv("XDG_CONFIG_HOME") + if configHome == "" { + return filepath.Join(u.HomeDir, ".config") + } + return configHome +} + +func getBinaryPath() (string, error) { + bin, err := os.Executable() + if err != nil { + return "", err + } + return filepath.Abs(bin) +} + +func rcFile(name string) string { + u, err := user.Current() + if err != nil { + return "" + } + path := filepath.Join(u.HomeDir, name) + if _, err := os.Stat(path); err != nil { + return "" + } + return path +} diff --git a/vendor/github.com/posener/complete/cmd/install/utils.go b/vendor/github.com/posener/complete/cmd/install/utils.go new file mode 100644 index 00000000..d34ac8ca --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/utils.go @@ -0,0 +1,140 @@ +package install + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" +) + +func lineInFile(name string, lookFor string) bool { + f, err := os.Open(name) + if err != nil { + return false + } + defer f.Close() + r := bufio.NewReader(f) + prefix := []byte{} + for { + line, isPrefix, err := r.ReadLine() + if err == io.EOF { + return false + } + if err != nil { + return false + } + if isPrefix { + prefix = append(prefix, line...) + continue + } + line = append(prefix, line...) + if string(line) == lookFor { + return true + } + prefix = prefix[:0] + } +} + +func createFile(name string, content string) error { + // make sure file directory exists + if err := os.MkdirAll(filepath.Dir(name), 0775); err != nil { + return err + } + + // create the file + f, err := os.Create(name) + if err != nil { + return err + } + defer f.Close() + + // write file content + _, err = f.WriteString(fmt.Sprintf("%s\n", content)) + return err +} + +func appendToFile(name string, content string) error { + f, err := os.OpenFile(name, os.O_RDWR|os.O_APPEND, 0) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString(fmt.Sprintf("\n%s\n", content)) + return err +} + +func removeFromFile(name string, content string) error { + backup := name + ".bck" + err := copyFile(name, backup) + if err != nil { + return err + } + temp, err := removeContentToTempFile(name, content) + if err != nil { + return err + } + + err = copyFile(temp, name) + if err != nil { + return err + } + + return os.Remove(backup) +} + +func removeContentToTempFile(name, content string) (string, error) { + rf, err := os.Open(name) + if err != nil { + return "", err + } + defer rf.Close() + wf, err := ioutil.TempFile("/tmp", "complete-") + if err != nil { + return "", err + } + defer wf.Close() + + r := bufio.NewReader(rf) + prefix := []byte{} + for { + line, isPrefix, err := r.ReadLine() + if err == io.EOF { + break + } + if err != nil { + return "", err + } + if isPrefix { + prefix = append(prefix, line...) + continue + } + line = append(prefix, line...) + str := string(line) + if str == content { + continue + } + _, err = wf.WriteString(str + "\n") + if err != nil { + return "", err + } + prefix = prefix[:0] + } + return wf.Name(), nil +} + +func copyFile(src string, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + _, err = io.Copy(out, in) + return err +} diff --git a/vendor/github.com/posener/complete/cmd/install/zsh.go b/vendor/github.com/posener/complete/cmd/install/zsh.go new file mode 100644 index 00000000..a625f53c --- /dev/null +++ b/vendor/github.com/posener/complete/cmd/install/zsh.go @@ -0,0 +1,39 @@ +package install + +import "fmt" + +// (un)install in zsh +// basically adds/remove from .zshrc: +// +// autoload -U +X bashcompinit && bashcompinit" +// complete -C +type zsh struct { + rc string +} + +func (z zsh) Install(cmd, bin string) error { + completeCmd := z.cmd(cmd, bin) + if lineInFile(z.rc, completeCmd) { + return fmt.Errorf("already installed in %s", z.rc) + } + + bashCompInit := "autoload -U +X bashcompinit && bashcompinit" + if !lineInFile(z.rc, bashCompInit) { + completeCmd = bashCompInit + "\n" + completeCmd + } + + return appendToFile(z.rc, completeCmd) +} + +func (z zsh) Uninstall(cmd, bin string) error { + completeCmd := z.cmd(cmd, bin) + if !lineInFile(z.rc, completeCmd) { + return fmt.Errorf("does not installed in %s", z.rc) + } + + return removeFromFile(z.rc, completeCmd) +} + +func (zsh) cmd(cmd, bin string) string { + return fmt.Sprintf("complete -o nospace -C %s %s", bin, cmd) +} diff --git a/vendor/github.com/posener/complete/command.go b/vendor/github.com/posener/complete/command.go new file mode 100644 index 00000000..82d37d52 --- /dev/null +++ b/vendor/github.com/posener/complete/command.go @@ -0,0 +1,111 @@ +package complete + +// Command represents a command line +// It holds the data that enables auto completion of command line +// Command can also be a sub command. +type Command struct { + // Sub is map of sub commands of the current command + // The key refer to the sub command name, and the value is it's + // Command descriptive struct. + Sub Commands + + // Flags is a map of flags that the command accepts. + // The key is the flag name, and the value is it's predictions. + Flags Flags + + // GlobalFlags is a map of flags that the command accepts. + // Global flags that can appear also after a sub command. + GlobalFlags Flags + + // Args are extra arguments that the command accepts, those who are + // given without any flag before. + Args Predictor +} + +// Predict returns all possible predictions for args according to the command struct +func (c *Command) Predict(a Args) []string { + options, _ := c.predict(a) + return options +} + +// Commands is the type of Sub member, it maps a command name to a command struct +type Commands map[string]Command + +// Predict completion of sub command names names according to command line arguments +func (c Commands) Predict(a Args) (prediction []string) { + for sub := range c { + prediction = append(prediction, sub) + } + return +} + +// Flags is the type Flags of the Flags member, it maps a flag name to the flag predictions. +type Flags map[string]Predictor + +// Predict completion of flags names according to command line arguments +func (f Flags) Predict(a Args) (prediction []string) { + for flag := range f { + // If the flag starts with a hyphen, we avoid emitting the prediction + // unless the last typed arg contains a hyphen as well. + flagHyphenStart := len(flag) != 0 && flag[0] == '-' + lastHyphenStart := len(a.Last) != 0 && a.Last[0] == '-' + if flagHyphenStart && !lastHyphenStart { + continue + } + prediction = append(prediction, flag) + } + return +} + +// predict options +// only is set to true if no more options are allowed to be returned +// those are in cases of special flag that has specific completion arguments, +// and other flags or sub commands can't come after it. +func (c *Command) predict(a Args) (options []string, only bool) { + + // search sub commands for predictions first + subCommandFound := false + for i, arg := range a.Completed { + if cmd, ok := c.Sub[arg]; ok { + subCommandFound = true + + // recursive call for sub command + options, only = cmd.predict(a.from(i)) + if only { + return + } + + // We matched so stop searching. Continuing to search can accidentally + // match a subcommand with current set of commands, see issue #46. + break + } + } + + // if last completed word is a global flag that we need to complete + if predictor, ok := c.GlobalFlags[a.LastCompleted]; ok && predictor != nil { + Log("Predicting according to global flag %s", a.LastCompleted) + return predictor.Predict(a), true + } + + options = append(options, c.GlobalFlags.Predict(a)...) + + // if a sub command was entered, we won't add the parent command + // completions and we return here. + if subCommandFound { + return + } + + // if last completed word is a command flag that we need to complete + if predictor, ok := c.Flags[a.LastCompleted]; ok && predictor != nil { + Log("Predicting according to flag %s", a.LastCompleted) + return predictor.Predict(a), true + } + + options = append(options, c.Sub.Predict(a)...) + options = append(options, c.Flags.Predict(a)...) + if c.Args != nil { + options = append(options, c.Args.Predict(a)...) + } + + return +} diff --git a/vendor/github.com/posener/complete/complete.go b/vendor/github.com/posener/complete/complete.go new file mode 100644 index 00000000..725c4deb --- /dev/null +++ b/vendor/github.com/posener/complete/complete.go @@ -0,0 +1,109 @@ +// Package complete provides a tool for bash writing bash completion in go. +// +// Writing bash completion scripts is a hard work. This package provides an easy way +// to create bash completion scripts for any command, and also an easy way to install/uninstall +// the completion of the command. +package complete + +import ( + "flag" + "fmt" + "io" + "os" + "strconv" + + "github.com/posener/complete/cmd" + "github.com/posener/complete/match" +) + +const ( + envLine = "COMP_LINE" + envPoint = "COMP_POINT" + envDebug = "COMP_DEBUG" +) + +// Complete structs define completion for a command with CLI options +type Complete struct { + Command Command + cmd.CLI + Out io.Writer +} + +// New creates a new complete command. +// name is the name of command we want to auto complete. +// IMPORTANT: it must be the same name - if the auto complete +// completes the 'go' command, name must be equal to "go". +// command is the struct of the command completion. +func New(name string, command Command) *Complete { + return &Complete{ + Command: command, + CLI: cmd.CLI{Name: name}, + Out: os.Stdout, + } +} + +// Run runs the completion and add installation flags beforehand. +// The flags are added to the main flag CommandLine variable. +func (c *Complete) Run() bool { + c.AddFlags(nil) + flag.Parse() + return c.Complete() +} + +// Complete a command from completion line in environment variable, +// and print out the complete options. +// returns success if the completion ran or if the cli matched +// any of the given flags, false otherwise +// For installation: it assumes that flags were added and parsed before +// it was called. +func (c *Complete) Complete() bool { + line, point, ok := getEnv() + if !ok { + // make sure flags parsed, + // in case they were not added in the main program + return c.CLI.Run() + } + + if point >= 0 && point < len(line) { + line = line[:point] + } + + Log("Completing phrase: %s", line) + a := newArgs(line) + Log("Completing last field: %s", a.Last) + options := c.Command.Predict(a) + Log("Options: %s", options) + + // filter only options that match the last argument + matches := []string{} + for _, option := range options { + if match.Prefix(option, a.Last) { + matches = append(matches, option) + } + } + Log("Matches: %s", matches) + c.output(matches) + return true +} + +func getEnv() (line string, point int, ok bool) { + line = os.Getenv(envLine) + if line == "" { + return + } + point, err := strconv.Atoi(os.Getenv(envPoint)) + if err != nil { + // If failed parsing point for some reason, set it to point + // on the end of the line. + Log("Failed parsing point %s: %v", os.Getenv(envPoint), err) + point = len(line) + } + return line, point, true +} + +func (c *Complete) output(options []string) { + // stdout of program defines the complete options + for _, option := range options { + fmt.Fprintln(c.Out, option) + } +} diff --git a/vendor/github.com/posener/complete/go.mod b/vendor/github.com/posener/complete/go.mod new file mode 100644 index 00000000..fef0c440 --- /dev/null +++ b/vendor/github.com/posener/complete/go.mod @@ -0,0 +1,3 @@ +module github.com/posener/complete + +require github.com/hashicorp/go-multierror v1.0.0 diff --git a/vendor/github.com/posener/complete/go.sum b/vendor/github.com/posener/complete/go.sum new file mode 100644 index 00000000..d2f13301 --- /dev/null +++ b/vendor/github.com/posener/complete/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= diff --git a/vendor/github.com/posener/complete/log.go b/vendor/github.com/posener/complete/log.go new file mode 100644 index 00000000..c3029556 --- /dev/null +++ b/vendor/github.com/posener/complete/log.go @@ -0,0 +1,22 @@ +package complete + +import ( + "io/ioutil" + "log" + "os" +) + +// Log is used for debugging purposes +// since complete is running on tab completion, it is nice to +// have logs to the stderr (when writing your own completer) +// to write logs, set the COMP_DEBUG environment variable and +// use complete.Log in the complete program +var Log = getLogger() + +func getLogger() func(format string, args ...interface{}) { + var logfile = ioutil.Discard + if os.Getenv(envDebug) != "" { + logfile = os.Stderr + } + return log.New(logfile, "complete ", log.Flags()).Printf +} diff --git a/vendor/github.com/posener/complete/match/file.go b/vendor/github.com/posener/complete/match/file.go new file mode 100644 index 00000000..051171e8 --- /dev/null +++ b/vendor/github.com/posener/complete/match/file.go @@ -0,0 +1,19 @@ +package match + +import "strings" + +// File returns true if prefix can match the file +func File(file, prefix string) bool { + // special case for current directory completion + if file == "./" && (prefix == "." || prefix == "") { + return true + } + if prefix == "." && strings.HasPrefix(file, ".") { + return true + } + + file = strings.TrimPrefix(file, "./") + prefix = strings.TrimPrefix(prefix, "./") + + return strings.HasPrefix(file, prefix) +} diff --git a/vendor/github.com/posener/complete/match/match.go b/vendor/github.com/posener/complete/match/match.go new file mode 100644 index 00000000..812fcac9 --- /dev/null +++ b/vendor/github.com/posener/complete/match/match.go @@ -0,0 +1,6 @@ +package match + +// Match matches two strings +// it is used for comparing a term to the last typed +// word, the prefix, and see if it is a possible auto complete option. +type Match func(term, prefix string) bool diff --git a/vendor/github.com/posener/complete/match/prefix.go b/vendor/github.com/posener/complete/match/prefix.go new file mode 100644 index 00000000..9a01ba63 --- /dev/null +++ b/vendor/github.com/posener/complete/match/prefix.go @@ -0,0 +1,9 @@ +package match + +import "strings" + +// Prefix is a simple Matcher, if the word is it's prefix, there is a match +// Match returns true if a has the prefix as prefix +func Prefix(long, prefix string) bool { + return strings.HasPrefix(long, prefix) +} diff --git a/vendor/github.com/posener/complete/predict.go b/vendor/github.com/posener/complete/predict.go new file mode 100644 index 00000000..82070632 --- /dev/null +++ b/vendor/github.com/posener/complete/predict.go @@ -0,0 +1,41 @@ +package complete + +// Predictor implements a predict method, in which given +// command line arguments returns a list of options it predicts. +type Predictor interface { + Predict(Args) []string +} + +// PredictOr unions two predicate functions, so that the result predicate +// returns the union of their predication +func PredictOr(predictors ...Predictor) Predictor { + return PredictFunc(func(a Args) (prediction []string) { + for _, p := range predictors { + if p == nil { + continue + } + prediction = append(prediction, p.Predict(a)...) + } + return + }) +} + +// PredictFunc determines what terms can follow a command or a flag +// It is used for auto completion, given last - the last word in the already +// in the command line, what words can complete it. +type PredictFunc func(Args) []string + +// Predict invokes the predict function and implements the Predictor interface +func (p PredictFunc) Predict(a Args) []string { + if p == nil { + return nil + } + return p(a) +} + +// PredictNothing does not expect anything after. +var PredictNothing Predictor + +// PredictAnything expects something, but nothing particular, such as a number +// or arbitrary name. +var PredictAnything = PredictFunc(func(Args) []string { return nil }) diff --git a/vendor/github.com/posener/complete/predict_files.go b/vendor/github.com/posener/complete/predict_files.go new file mode 100644 index 00000000..c8adf7e8 --- /dev/null +++ b/vendor/github.com/posener/complete/predict_files.go @@ -0,0 +1,108 @@ +package complete + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/posener/complete/match" +) + +// PredictDirs will search for directories in the given started to be typed +// path, if no path was started to be typed, it will complete to directories +// in the current working directory. +func PredictDirs(pattern string) Predictor { + return files(pattern, false) +} + +// PredictFiles will search for files matching the given pattern in the started to +// be typed path, if no path was started to be typed, it will complete to files that +// match the pattern in the current working directory. +// To match any file, use "*" as pattern. To match go files use "*.go", and so on. +func PredictFiles(pattern string) Predictor { + return files(pattern, true) +} + +func files(pattern string, allowFiles bool) PredictFunc { + + // search for files according to arguments, + // if only one directory has matched the result, search recursively into + // this directory to give more results. + return func(a Args) (prediction []string) { + prediction = predictFiles(a, pattern, allowFiles) + + // if the number of prediction is not 1, we either have many results or + // have no results, so we return it. + if len(prediction) != 1 { + return + } + + // only try deeper, if the one item is a directory + if stat, err := os.Stat(prediction[0]); err != nil || !stat.IsDir() { + return + } + + a.Last = prediction[0] + return predictFiles(a, pattern, allowFiles) + } +} + +func predictFiles(a Args, pattern string, allowFiles bool) []string { + if strings.HasSuffix(a.Last, "/..") { + return nil + } + + dir := a.Directory() + files := listFiles(dir, pattern, allowFiles) + + // add dir if match + files = append(files, dir) + + return PredictFilesSet(files).Predict(a) +} + +// PredictFilesSet predict according to file rules to a given set of file names +func PredictFilesSet(files []string) PredictFunc { + return func(a Args) (prediction []string) { + // add all matching files to prediction + for _, f := range files { + f = fixPathForm(a.Last, f) + + // test matching of file to the argument + if match.File(f, a.Last) { + prediction = append(prediction, f) + } + } + return + } +} + +func listFiles(dir, pattern string, allowFiles bool) []string { + // set of all file names + m := map[string]bool{} + + // list files + if files, err := filepath.Glob(filepath.Join(dir, pattern)); err == nil { + for _, f := range files { + if stat, err := os.Stat(f); err != nil || stat.IsDir() || allowFiles { + m[f] = true + } + } + } + + // list directories + if dirs, err := ioutil.ReadDir(dir); err == nil { + for _, d := range dirs { + if d.IsDir() { + m[filepath.Join(dir, d.Name())] = true + } + } + } + + list := make([]string, 0, len(m)) + for k := range m { + list = append(list, k) + } + return list +} diff --git a/vendor/github.com/posener/complete/predict_set.go b/vendor/github.com/posener/complete/predict_set.go new file mode 100644 index 00000000..fa4a34ae --- /dev/null +++ b/vendor/github.com/posener/complete/predict_set.go @@ -0,0 +1,12 @@ +package complete + +// PredictSet expects specific set of terms, given in the options argument. +func PredictSet(options ...string) Predictor { + return predictSet(options) +} + +type predictSet []string + +func (p predictSet) Predict(a Args) []string { + return p +} diff --git a/vendor/github.com/posener/complete/readme.md b/vendor/github.com/posener/complete/readme.md new file mode 100644 index 00000000..6d757ef8 --- /dev/null +++ b/vendor/github.com/posener/complete/readme.md @@ -0,0 +1,118 @@ +# complete + +A tool for bash writing bash completion in go, and bash completion for the go command line. + +[![Build Status](https://travis-ci.org/posener/complete.svg?branch=master)](https://travis-ci.org/posener/complete) +[![codecov](https://codecov.io/gh/posener/complete/branch/master/graph/badge.svg)](https://codecov.io/gh/posener/complete) +[![golangci](https://golangci.com/badges/github.com/posener/complete.svg)](https://golangci.com/r/github.com/posener/complete) +[![GoDoc](https://godoc.org/github.com/posener/complete?status.svg)](http://godoc.org/github.com/posener/complete) +[![Go Report Card](https://goreportcard.com/badge/github.com/posener/complete)](https://goreportcard.com/report/github.com/posener/complete) + +Writing bash completion scripts is a hard work. This package provides an easy way +to create bash completion scripts for any command, and also an easy way to install/uninstall +the completion of the command. + +## go command bash completion + +In [gocomplete](./gocomplete) there is an example for bash completion for the `go` command line. + +This is an example that uses the `complete` package on the `go` command - the `complete` package +can also be used to implement any completions, see [Usage](#usage). + +### Install + +1. Type in your shell: +``` +go get -u github.com/posener/complete/gocomplete +gocomplete -install +``` + +2. Restart your shell + +Uninstall by `gocomplete -uninstall` + +### Features + +- Complete `go` command, including sub commands and all flags. +- Complete packages names or `.go` files when necessary. +- Complete test names after `-run` flag. + +## complete package + +Supported shells: + +- [x] bash +- [x] zsh +- [x] fish + +### Usage + +Assuming you have program called `run` and you want to have bash completion +for it, meaning, if you type `run` then space, then press the `Tab` key, +the shell will suggest relevant complete options. + +In that case, we will create a program called `runcomplete`, a go program, +with a `func main()` and so, that will make the completion of the `run` +program. Once the `runcomplete` will be in a binary form, we could +`runcomplete -install` and that will add to our shell all the bash completion +options for `run`. + +So here it is: + +```go +import "github.com/posener/complete" + +func main() { + + // create a Command object, that represents the command we want + // to complete. + run := complete.Command{ + + // Sub defines a list of sub commands of the program, + // this is recursive, since every command is of type command also. + Sub: complete.Commands{ + + // add a build sub command + "build": complete.Command { + + // define flags of the build sub command + Flags: complete.Flags{ + // build sub command has a flag '-cpus', which + // expects number of cpus after it. in that case + // anything could complete this flag. + "-cpus": complete.PredictAnything, + }, + }, + }, + + // define flags of the 'run' main command + Flags: complete.Flags{ + // a flag -o, which expects a file ending with .out after + // it, the tab completion will auto complete for files matching + // the given pattern. + "-o": complete.PredictFiles("*.out"), + }, + + // define global flags of the 'run' main command + // those will show up also when a sub command was entered in the + // command line + GlobalFlags: complete.Flags{ + + // a flag '-h' which does not expects anything after it + "-h": complete.PredictNothing, + }, + } + + // run the command completion, as part of the main() function. + // this triggers the autocompletion when needed. + // name must be exactly as the binary that we want to complete. + complete.New("run", run).Run() +} +``` + +### Self completing program + +In case that the program that we want to complete is written in go we +can make it self completing. + +Here is an [example](./example/self/main.go) diff --git a/vendor/github.com/posener/complete/test.sh b/vendor/github.com/posener/complete/test.sh new file mode 100755 index 00000000..56bfcf15 --- /dev/null +++ b/vendor/github.com/posener/complete/test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -v -race -coverprofile=profile.out -covermode=atomic $d + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done \ No newline at end of file diff --git a/vendor/github.com/posener/complete/utils.go b/vendor/github.com/posener/complete/utils.go new file mode 100644 index 00000000..58b8b792 --- /dev/null +++ b/vendor/github.com/posener/complete/utils.go @@ -0,0 +1,46 @@ +package complete + +import ( + "os" + "path/filepath" + "strings" +) + +// fixPathForm changes a file name to a relative name +func fixPathForm(last string, file string) string { + // get wording directory for relative name + workDir, err := os.Getwd() + if err != nil { + return file + } + + abs, err := filepath.Abs(file) + if err != nil { + return file + } + + // if last is absolute, return path as absolute + if filepath.IsAbs(last) { + return fixDirPath(abs) + } + + rel, err := filepath.Rel(workDir, abs) + if err != nil { + return file + } + + // fix ./ prefix of path + if rel != "." && strings.HasPrefix(last, ".") { + rel = "./" + rel + } + + return fixDirPath(rel) +} + +func fixDirPath(path string) string { + info, err := os.Stat(path) + if err == nil && info.IsDir() && !strings.HasSuffix(path, "/") { + path += "/" + } + return path +} diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE deleted file mode 100644 index 6a1fb910..00000000 --- a/vendor/github.com/satori/go.uuid/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013-2015 by Maxim Bublis - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md deleted file mode 100644 index 48d4937f..00000000 --- a/vendor/github.com/satori/go.uuid/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# UUID package for Go language - -[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid) -[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid) - -This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs. - -With 100% test coverage and benchmarks out of box. - -Supported versions: -* Version 1, based on timestamp and MAC address (RFC 4122) -* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) -* Version 3, based on MD5 hashing (RFC 4122) -* Version 4, based on random numbers (RFC 4122) -* Version 5, based on SHA-1 hashing (RFC 4122) - -## Installation - -Use the `go` command: - - $ go get github.com/satori/go.uuid - -## Requirements - -UUID package requires any stable version of Go Programming Language. - -It is tested against following versions of Go: 1.0-1.5 - -## Example - -```go -package main - -import ( - "fmt" - "github.com/satori/go.uuid" -) - -func main() { - // Creating UUID Version 4 - u1 := uuid.NewV4() - fmt.Printf("UUIDv4: %s\n", u1) - - // Parsing UUID from string input - u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - if err != nil { - fmt.Printf("Something gone wrong: %s", err) - } - fmt.Printf("Successfully parsed: %s", u2) -} -``` - -## Documentation - -[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project. - -## Links -* [RFC 4122](http://tools.ietf.org/html/rfc4122) -* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) - -## Copyright - -Copyright (C) 2013-2015 by Maxim Bublis . - -UUID package released under MIT License. -See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go deleted file mode 100644 index 03841d86..00000000 --- a/vendor/github.com/satori/go.uuid/uuid.go +++ /dev/null @@ -1,435 +0,0 @@ -// Copyright (C) 2013-2015 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Package uuid provides implementation of Universally Unique Identifier (UUID). -// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and -// version 2 (as specified in DCE 1.1). -package uuid - -import ( - "bytes" - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "database/sql/driver" - "encoding/binary" - "encoding/hex" - "fmt" - "hash" - "net" - "os" - "sync" - "time" -) - -// UUID layout variants. -const ( - VariantNCS = iota - VariantRFC4122 - VariantMicrosoft - VariantFuture -) - -// UUID DCE domains. -const ( - DomainPerson = iota - DomainGroup - DomainOrg -) - -// Difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). -const epochStart = 122192928000000000 - -// Used in string method conversion -const dash byte = '-' - -// UUID v1/v2 storage. -var ( - storageMutex sync.Mutex - storageOnce sync.Once - epochFunc = unixTimeFunc - clockSequence uint16 - lastTime uint64 - hardwareAddr [6]byte - posixUID = uint32(os.Getuid()) - posixGID = uint32(os.Getgid()) -) - -// String parse helpers. -var ( - urnPrefix = []byte("urn:uuid:") - byteGroups = []int{8, 4, 4, 4, 12} -) - -func initClockSequence() { - buf := make([]byte, 2) - safeRandom(buf) - clockSequence = binary.BigEndian.Uint16(buf) -} - -func initHardwareAddr() { - interfaces, err := net.Interfaces() - if err == nil { - for _, iface := range interfaces { - if len(iface.HardwareAddr) >= 6 { - copy(hardwareAddr[:], iface.HardwareAddr) - return - } - } - } - - // Initialize hardwareAddr randomly in case - // of real network interfaces absence - safeRandom(hardwareAddr[:]) - - // Set multicast bit as recommended in RFC 4122 - hardwareAddr[0] |= 0x01 -} - -func initStorage() { - initClockSequence() - initHardwareAddr() -} - -func safeRandom(dest []byte) { - if _, err := rand.Read(dest); err != nil { - panic(err) - } -} - -// Returns difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and current time. -// This is default epoch calculation function. -func unixTimeFunc() uint64 { - return epochStart + uint64(time.Now().UnixNano()/100) -} - -// UUID representation compliant with specification -// described in RFC 4122. -type UUID [16]byte - -// The nil UUID is special form of UUID that is specified to have all -// 128 bits set to zero. -var Nil = UUID{} - -// Predefined namespace UUIDs. -var ( - NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8") - NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8") - NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8") -) - -// And returns result of binary AND of two UUIDs. -func And(u1 UUID, u2 UUID) UUID { - u := UUID{} - for i := 0; i < 16; i++ { - u[i] = u1[i] & u2[i] - } - return u -} - -// Or returns result of binary OR of two UUIDs. -func Or(u1 UUID, u2 UUID) UUID { - u := UUID{} - for i := 0; i < 16; i++ { - u[i] = u1[i] | u2[i] - } - return u -} - -// Equal returns true if u1 and u2 equals, otherwise returns false. -func Equal(u1 UUID, u2 UUID) bool { - return bytes.Equal(u1[:], u2[:]) -} - -// Version returns algorithm version used to generate UUID. -func (u UUID) Version() uint { - return uint(u[6] >> 4) -} - -// Variant returns UUID layout variant. -func (u UUID) Variant() uint { - switch { - case (u[8] & 0x80) == 0x00: - return VariantNCS - case (u[8]&0xc0)|0x80 == 0x80: - return VariantRFC4122 - case (u[8]&0xe0)|0xc0 == 0xc0: - return VariantMicrosoft - } - return VariantFuture -} - -// Bytes returns bytes slice representation of UUID. -func (u UUID) Bytes() []byte { - return u[:] -} - -// Returns canonical string representation of UUID: -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. -func (u UUID) String() string { - buf := make([]byte, 36) - - hex.Encode(buf[0:8], u[0:4]) - buf[8] = dash - hex.Encode(buf[9:13], u[4:6]) - buf[13] = dash - hex.Encode(buf[14:18], u[6:8]) - buf[18] = dash - hex.Encode(buf[19:23], u[8:10]) - buf[23] = dash - hex.Encode(buf[24:], u[10:]) - - return string(buf) -} - -// SetVersion sets version bits. -func (u *UUID) SetVersion(v byte) { - u[6] = (u[6] & 0x0f) | (v << 4) -} - -// SetVariant sets variant bits as described in RFC 4122. -func (u *UUID) SetVariant() { - u[8] = (u[8] & 0xbf) | 0x80 -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The encoding is the same as returned by String. -func (u UUID) MarshalText() (text []byte, err error) { - text = []byte(u.String()) - return -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Following formats are supported: -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -func (u *UUID) UnmarshalText(text []byte) (err error) { - if len(text) < 32 { - err = fmt.Errorf("uuid: invalid UUID string: %s", text) - return - } - - if bytes.Equal(text[:9], urnPrefix) { - text = text[9:] - } else if text[0] == '{' { - text = text[1:] - } - - b := u[:] - - for _, byteGroup := range byteGroups { - if text[0] == '-' { - text = text[1:] - } - - _, err = hex.Decode(b[:byteGroup/2], text[:byteGroup]) - - if err != nil { - return - } - - text = text[byteGroup:] - b = b[byteGroup/2:] - } - - return -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (u UUID) MarshalBinary() (data []byte, err error) { - data = u.Bytes() - return -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -// It will return error if the slice isn't 16 bytes long. -func (u *UUID) UnmarshalBinary(data []byte) (err error) { - if len(data) != 16 { - err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) - return - } - copy(u[:], data) - - return -} - -// Value implements the driver.Valuer interface. -func (u UUID) Value() (driver.Value, error) { - return u.String(), nil -} - -// Scan implements the sql.Scanner interface. -// A 16-byte slice is handled by UnmarshalBinary, while -// a longer byte slice or a string is handled by UnmarshalText. -func (u *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - if len(src) == 16 { - return u.UnmarshalBinary(src) - } - return u.UnmarshalText(src) - - case string: - return u.UnmarshalText([]byte(src)) - } - - return fmt.Errorf("uuid: cannot convert %T to UUID", src) -} - -// FromBytes returns UUID converted from raw byte slice input. -// It will return error if the slice isn't 16 bytes long. -func FromBytes(input []byte) (u UUID, err error) { - err = u.UnmarshalBinary(input) - return -} - -// FromBytesOrNil returns UUID converted from raw byte slice input. -// Same behavior as FromBytes, but returns a Nil UUID on error. -func FromBytesOrNil(input []byte) UUID { - uuid, err := FromBytes(input) - if err != nil { - return Nil - } - return uuid -} - -// FromString returns UUID parsed from string input. -// Input is expected in a form accepted by UnmarshalText. -func FromString(input string) (u UUID, err error) { - err = u.UnmarshalText([]byte(input)) - return -} - -// FromStringOrNil returns UUID parsed from string input. -// Same behavior as FromString, but returns a Nil UUID on error. -func FromStringOrNil(input string) UUID { - uuid, err := FromString(input) - if err != nil { - return Nil - } - return uuid -} - -// Returns UUID v1/v2 storage state. -// Returns epoch timestamp, clock sequence, and hardware address. -func getStorage() (uint64, uint16, []byte) { - storageOnce.Do(initStorage) - - storageMutex.Lock() - defer storageMutex.Unlock() - - timeNow := epochFunc() - // Clock changed backwards since last UUID generation. - // Should increase clock sequence. - if timeNow <= lastTime { - clockSequence++ - } - lastTime = timeNow - - return timeNow, clockSequence, hardwareAddr[:] -} - -// NewV1 returns UUID based on current timestamp and MAC address. -func NewV1() UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := getStorage() - - binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - - copy(u[10:], hardwareAddr) - - u.SetVersion(1) - u.SetVariant() - - return u -} - -// NewV2 returns DCE Security UUID based on POSIX UID/GID. -func NewV2(domain byte) UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := getStorage() - - switch domain { - case DomainPerson: - binary.BigEndian.PutUint32(u[0:], posixUID) - case DomainGroup: - binary.BigEndian.PutUint32(u[0:], posixGID) - } - - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - u[9] = domain - - copy(u[10:], hardwareAddr) - - u.SetVersion(2) - u.SetVariant() - - return u -} - -// NewV3 returns UUID based on MD5 hash of namespace UUID and name. -func NewV3(ns UUID, name string) UUID { - u := newFromHash(md5.New(), ns, name) - u.SetVersion(3) - u.SetVariant() - - return u -} - -// NewV4 returns random generated UUID. -func NewV4() UUID { - u := UUID{} - safeRandom(u[:]) - u.SetVersion(4) - u.SetVariant() - - return u -} - -// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. -func NewV5(ns UUID, name string) UUID { - u := newFromHash(sha1.New(), ns, name) - u.SetVersion(5) - u.SetVariant() - - return u -} - -// Returns UUID based on hashing of namespace UUID and name. -func newFromHash(h hash.Hash, ns UUID, name string) UUID { - u := UUID{} - h.Write(ns[:]) - h.Write([]byte(name)) - copy(u[:], h.Sum(nil)) - - return u -} diff --git a/vendor/github.com/ulikunitz/xz/.gitignore b/vendor/github.com/ulikunitz/xz/.gitignore new file mode 100644 index 00000000..e3c2fc2f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/.gitignore @@ -0,0 +1,25 @@ +# .gitignore + +TODO.html +README.html + +lzma/writer.txt +lzma/reader.txt + +cmd/gxz/gxz +cmd/xb/xb + +# test executables +*.test + +# profile files +*.out + +# vim swap file +.*.swp + +# executables on windows +*.exe + +# default compression test file +enwik8* diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE new file mode 100644 index 00000000..58ebdc16 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2014-2016 Ulrich Kunitz +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* My name, Ulrich Kunitz, may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md new file mode 100644 index 00000000..0a2dc828 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -0,0 +1,73 @@ +# Package xz + +This Go language package supports the reading and writing of xz +compressed streams. It includes also a gxz command for compressing and +decompressing data. The package is completely written in Go and doesn't +have any dependency on any C code. + +The package is currently under development. There might be bugs and APIs +are not considered stable. At this time the package cannot compete with +the xz tool regarding compression speed and size. The algorithms there +have been developed over a long time and are highly optimized. However +there are a number of improvements planned and I'm very optimistic about +parallel compression and decompression. Stay tuned! + +## Using the API + +The following example program shows how to use the API. + +```go +package main + +import ( + "bytes" + "io" + "log" + "os" + + "github.com/ulikunitz/xz" +) + +func main() { + const text = "The quick brown fox jumps over the lazy dog.\n" + var buf bytes.Buffer + // compress text + w, err := xz.NewWriter(&buf) + if err != nil { + log.Fatalf("xz.NewWriter error %s", err) + } + if _, err := io.WriteString(w, text); err != nil { + log.Fatalf("WriteString error %s", err) + } + if err := w.Close(); err != nil { + log.Fatalf("w.Close error %s", err) + } + // decompress buffer and write output to stdout + r, err := xz.NewReader(&buf) + if err != nil { + log.Fatalf("NewReader error %s", err) + } + if _, err = io.Copy(os.Stdout, r); err != nil { + log.Fatalf("io.Copy error %s", err) + } +} +``` + +## Using the gxz compression tool + +The package includes a gxz command line utility for compression and +decompression. + +Use following command for installation: + + $ go get github.com/ulikunitz/xz/cmd/gxz + +To test it call the following command. + + $ gxz bigfile + +After some time a much smaller file bigfile.xz will replace bigfile. +To decompress it use the following command. + + $ gxz -d bigfile.xz + diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md new file mode 100644 index 00000000..c10e51b9 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -0,0 +1,319 @@ +# TODO list + +## Release v0.6 + +1. Review encoder and check for lzma improvements under xz. +2. Fix binary tree matcher. +3. Compare compression ratio with xz tool using comparable parameters + and optimize parameters +4. Do some optimizations + - rename operation action and make it a simple type of size 8 + - make maxMatches, wordSize parameters + - stop searching after a certain length is found (parameter sweetLen) + +## Release v0.7 + +1. Optimize code +2. Do statistical analysis to get linear presets. +3. Test sync.Pool compatability for xz and lzma Writer and Reader +3. Fuzz optimized code. + +## Release v0.8 + +1. Support parallel go routines for writing and reading xz files. +2. Support a ReaderAt interface for xz files with small block sizes. +3. Improve compatibility between gxz and xz +4. Provide manual page for gxz + +## Release v0.9 + +1. Improve documentation +2. Fuzz again + +## Release v1.0 + +1. Full functioning gxz +2. Add godoc URL to README.md (godoc.org) +3. Resolve all issues. +4. Define release candidates. +5. Public announcement. + +## Package lzma + +### Release v0.6 + +- Rewrite Encoder into a simple greedy one-op-at-a-time encoder + including + + simple scan at the dictionary head for the same byte + + use the killer byte (requiring matches to get longer, the first + test should be the byte that would make the match longer) + + +## Optimizations + +- There may be a lot of false sharing in lzma.State; check whether this + can be improved by reorganizing the internal structure of it. +- Check whether batching encoding and decoding improves speed. + +### DAG optimizations + +- Use full buffer to create minimal bit-length above range encoder. +- Might be too slow (see v0.4) + +### Different match finders + +- hashes with 2, 3 characters additional to 4 characters +- binary trees with 2-7 characters (uint64 as key, use uint32 as + pointers into a an array) +- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + into an array with bit-steeling for the colors) + +## Release Procedure + +- execute goch -l for all packages; probably with lower param like 0.5. +- check orthography with gospell +- Write release notes in doc/relnotes. +- Update README.md +- xb copyright . in xz directory to ensure all new files have Copyright + header +- VERSION= go generate github.com/ulikunitz/xz/... to update + version files +- Execute test for Linux/amd64, Linux/x86 and Windows/amd64. +- Update TODO.md - write short log entry +- git checkout master && git merge dev +- git tag -a +- git push + +## Log + +### 2018-10-28 + +Release v0.5.5 fixes issues #19 observing ErrLimit outputs. + +### 2017-06-05 + +Release v0.5.4 fixes issues #15 of another problem with the padding size +check for the xz block header. I removed the check completely. + +### 2017-02-15 + +Release v0.5.3 fixes issue #12 regarding the decompression of an empty +XZ stream. Many thanks to Tomasz Kłak, who reported the issue. + +### 2016-12-02 + +Release v0.5.2 became necessary to allow the decoding of xz files with +4-byte padding in the block header. Many thanks to Greg, who reported +the issue. + +### 2016-07-23 + +Release v0.5.1 became necessary to fix problems with 32-bit platforms. +Many thanks to Bruno Brigas, who reported the issue. + +### 2016-07-04 + +Release v0.5 provides improvements to the compressor and provides support for +the decompression of xz files with multiple xz streams. + +### 2016-01-31 + +Another compression rate increase by checking the byte at length of the +best match first, before checking the whole prefix. This makes the +compressor even faster. We have now a large time budget to beat the +compression ratio of the xz tool. For enwik8 we have now over 40 seconds +to reduce the compressed file size for another 7 MiB. + +### 2016-01-30 + +I simplified the encoder. Speed and compression rate increased +dramatically. A high compression rate affects also the decompression +speed. The approach with the buffer and optimizing for operation +compression rate has not been successful. Going for the maximum length +appears to be the best approach. + +### 2016-01-28 + +The release v0.4 is ready. It provides a working xz implementation, +which is rather slow, but works and is interoperable with the xz tool. +It is an important milestone. + +### 2016-01-10 + +I have the first working implementation of an xz reader and writer. I'm +happy about reaching this milestone. + +### 2015-12-02 + +I'm now ready to implement xz because, I have a working LZMA2 +implementation. I decided today that v0.4 will use the slow encoder +using the operations buffer to be able to go back, if I intend to do so. + +### 2015-10-21 + +I have restarted the work on the library. While trying to implement +LZMA2, I discovered that I need to resimplify the encoder and decoder +functions. The option approach is too complicated. Using a limited byte +writer and not caring for written bytes at all and not to try to handle +uncompressed data simplifies the LZMA encoder and decoder much. +Processing uncompressed data and handling limits is a feature of the +LZMA2 format not of LZMA. + +I learned an interesting method from the LZO format. If the last copy is +too far away they are moving the head one 2 bytes and not 1 byte to +reduce processing times. + +### 2015-08-26 + +I have now reimplemented the lzma package. The code is reasonably fast, +but can still be optimized. The next step is to implement LZMA2 and then +xz. + +### 2015-07-05 + +Created release v0.3. The version is the foundation for a full xz +implementation that is the target of v0.4. + +### 2015-06-11 + +The gflag package has been developed because I couldn't use flag and +pflag for a fully compatible support of gzip's and lzma's options. It +seems to work now quite nicely. + +### 2015-06-05 + +The overflow issue was interesting to research, however Henry S. Warren +Jr. Hacker's Delight book was very helpful as usual and had the issue +explained perfectly. Fefe's information on his website was based on the +C FAQ and quite bad, because it didn't address the issue of -MININT == +MININT. + +### 2015-06-04 + +It has been a productive day. I improved the interface of lzma.Reader +and lzma.Writer and fixed the error handling. + +### 2015-06-01 + +By computing the bit length of the LZMA operations I was able to +improve the greedy algorithm implementation. By using an 8 MByte buffer +the compression rate was not as good as for xz but already better then +gzip default. + +Compression is currently slow, but this is something we will be able to +improve over time. + +### 2015-05-26 + +Checked the license of ogier/pflag. The binary lzmago binary should +include the license terms for the pflag library. + +I added the endorsement clause as used by Google for the Go sources the +LICENSE file. + +### 2015-05-22 + +The package lzb contains now the basic implementation for creating or +reading LZMA byte streams. It allows the support for the implementation +of the DAG-shortest-path algorithm for the compression function. + +### 2015-04-23 + +Completed yesterday the lzbase classes. I'm a little bit concerned that +using the components may require too much code, but on the other hand +there is a lot of flexibility. + +### 2015-04-22 + +Implemented Reader and Writer during the Bayern game against Porto. The +second half gave me enough time. + +### 2015-04-21 + +While showering today morning I discovered that the design for OpEncoder +and OpDecoder doesn't work, because encoding/decoding might depend on +the current status of the dictionary. This is not exactly the right way +to start the day. + +Therefore we need to keep the Reader and Writer design. This time around +we simplify it by ignoring size limits. These can be added by wrappers +around the Reader and Writer interfaces. The Parameters type isn't +needed anymore. + +However I will implement a ReaderState and WriterState type to use +static typing to ensure the right State object is combined with the +right lzbase.Reader and lzbase.Writer. + +As a start I have implemented ReaderState and WriterState to ensure +that the state for reading is only used by readers and WriterState only +used by Writers. + +### 2015-04-20 + +Today I implemented the OpDecoder and tested OpEncoder and OpDecoder. + +### 2015-04-08 + +Came up with a new simplified design for lzbase. I implemented already +the type State that replaces OpCodec. + +### 2015-04-06 + +The new lzma package is now fully usable and lzmago is using it now. The +old lzma package has been completely removed. + +### 2015-04-05 + +Implemented lzma.Reader and tested it. + +### 2015-04-04 + +Implemented baseReader by adapting code form lzma.Reader. + +### 2015-04-03 + +The opCodec has been copied yesterday to lzma2. opCodec has a high +number of dependencies on other files in lzma2. Therefore I had to copy +almost all files from lzma. + +### 2015-03-31 + +Removed only a TODO item. + +However in Francesco Campoy's presentation "Go for Javaneros +(Javaïstes?)" is the the idea that using an embedded field E, all the +methods of E will be defined on T. If E is an interface T satisfies E. + +https://talks.golang.org/2014/go4java.slide#51 + +I have never used this, but it seems to be a cool idea. + +### 2015-03-30 + +Finished the type writerDict and wrote a simple test. + +### 2015-03-25 + +I started to implement the writerDict. + +### 2015-03-24 + +After thinking long about the LZMA2 code and several false starts, I +have now a plan to create a self-sufficient lzma2 package that supports +the classic LZMA format as well as LZMA2. The core idea is to support a +baseReader and baseWriter type that support the basic LZMA stream +without any headers. Both types must support the reuse of dictionaries +and the opCodec. + +### 2015-01-10 + +1. Implemented simple lzmago tool +2. Tested tool against large 4.4G file + - compression worked correctly; tested decompression with lzma + - decompression hits a full buffer condition +3. Fixed a bug in the compressor and wrote a test for it +4. Executed full cycle for 4.4 GB file; performance can be improved ;-) + +### 2015-01-11 + +- Release v0.2 because of the working LZMA encoder and decoder diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go new file mode 100644 index 00000000..fadc1a59 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/bits.go @@ -0,0 +1,74 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "io" +) + +// putUint32LE puts the little-endian representation of x into the first +// four bytes of p. +func putUint32LE(p []byte, x uint32) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) +} + +// putUint64LE puts the little-endian representation of x into the first +// eight bytes of p. +func putUint64LE(p []byte, x uint64) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) + p[4] = byte(x >> 32) + p[5] = byte(x >> 40) + p[6] = byte(x >> 48) + p[7] = byte(x >> 56) +} + +// uint32LE converts a little endian representation to an uint32 value. +func uint32LE(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | + uint32(p[3])<<24 +} + +// putUvarint puts a uvarint representation of x into the byte slice. +func putUvarint(p []byte, x uint64) int { + i := 0 + for x >= 0x80 { + p[i] = byte(x) | 0x80 + x >>= 7 + i++ + } + p[i] = byte(x) + return i + 1 +} + +// errOverflow indicates an overflow of the 64-bit unsigned integer. +var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") + +// readUvarint reads a uvarint from the given byte reader. +func readUvarint(r io.ByteReader) (x uint64, n int, err error) { + var s uint + i := 0 + for { + b, err := r.ReadByte() + if err != nil { + return x, i, err + } + i++ + if b < 0x80 { + if i > 10 || i == 10 && b > 1 { + return x, i, errOverflowU64 + } + return x | uint64(b)< 0 { + k = 4 - k + } + return k +} + +/*** Header ***/ + +// headerMagic stores the magic bytes for the header +var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00} + +// HeaderLen provides the length of the xz file header. +const HeaderLen = 12 + +// Constants for the checksum methods supported by xz. +const ( + CRC32 byte = 0x1 + CRC64 = 0x4 + SHA256 = 0xa +) + +// errInvalidFlags indicates that flags are invalid. +var errInvalidFlags = errors.New("xz: invalid flags") + +// verifyFlags returns the error errInvalidFlags if the value is +// invalid. +func verifyFlags(flags byte) error { + switch flags { + case CRC32, CRC64, SHA256: + return nil + default: + return errInvalidFlags + } +} + +// flagstrings maps flag values to strings. +var flagstrings = map[byte]string{ + CRC32: "CRC-32", + CRC64: "CRC-64", + SHA256: "SHA-256", +} + +// flagString returns the string representation for the given flags. +func flagString(flags byte) string { + s, ok := flagstrings[flags] + if !ok { + return "invalid" + } + return s +} + +// newHashFunc returns a function that creates hash instances for the +// hash method encoded in flags. +func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { + switch flags { + case CRC32: + newHash = newCRC32 + case CRC64: + newHash = newCRC64 + case SHA256: + newHash = sha256.New + default: + err = errInvalidFlags + } + return +} + +// header provides the actual content of the xz file header: the flags. +type header struct { + flags byte +} + +// Errors returned by readHeader. +var errHeaderMagic = errors.New("xz: invalid header magic bytes") + +// ValidHeader checks whether data is a correct xz file header. The +// length of data must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + err := h.UnmarshalBinary(data) + return err == nil +} + +// String returns a string representation of the flags. +func (h header) String() string { + return flagString(h.flags) +} + +// UnmarshalBinary reads header from the provided data slice. +func (h *header) UnmarshalBinary(data []byte) error { + // header length + if len(data) != HeaderLen { + return errors.New("xz: wrong file header length") + } + + // magic header + if !bytes.Equal(headerMagic, data[:6]) { + return errHeaderMagic + } + + // checksum + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + if uint32LE(data[8:]) != crc.Sum32() { + return errors.New("xz: invalid checksum for file header") + } + + // stream flags + if data[6] != 0 { + return errInvalidFlags + } + flags := data[7] + if err := verifyFlags(flags); err != nil { + return err + } + + h.flags = flags + return nil +} + +// MarshalBinary generates the xz file header. +func (h *header) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(h.flags); err != nil { + return nil, err + } + + data = make([]byte, 12) + copy(data, headerMagic) + data[7] = h.flags + + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + putUint32LE(data[8:], crc.Sum32()) + + return data, nil +} + +/*** Footer ***/ + +// footerLen defines the length of the footer. +const footerLen = 12 + +// footerMagic contains the footer magic bytes. +var footerMagic = []byte{'Y', 'Z'} + +// footer represents the content of the xz file footer. +type footer struct { + indexSize int64 + flags byte +} + +// String prints a string representation of the footer structure. +func (f footer) String() string { + return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize) +} + +// Minimum and maximum for the size of the index (backward size). +const ( + minIndexSize = 4 + maxIndexSize = (1 << 32) * 4 +) + +// MarshalBinary converts footer values into an xz file footer. Note +// that the footer value is checked for correctness. +func (f *footer) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(f.flags); err != nil { + return nil, err + } + if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { + return nil, errors.New("xz: index size out of range") + } + if f.indexSize%4 != 0 { + return nil, errors.New( + "xz: index size not aligned to four bytes") + } + + data = make([]byte, footerLen) + + // backward size (index size) + s := (f.indexSize / 4) - 1 + putUint32LE(data[4:], uint32(s)) + // flags + data[9] = f.flags + // footer magic + copy(data[10:], footerMagic) + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + putUint32LE(data, crc.Sum32()) + + return data, nil +} + +// UnmarshalBinary sets the footer value by unmarshalling an xz file +// footer. +func (f *footer) UnmarshalBinary(data []byte) error { + if len(data) != footerLen { + return errors.New("xz: wrong footer length") + } + + // magic bytes + if !bytes.Equal(data[10:], footerMagic) { + return errors.New("xz: footer magic invalid") + } + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + if uint32LE(data) != crc.Sum32() { + return errors.New("xz: footer checksum error") + } + + var g footer + // backward size (index size) + g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 + + // flags + if data[8] != 0 { + return errInvalidFlags + } + g.flags = data[9] + if err := verifyFlags(g.flags); err != nil { + return err + } + + *f = g + return nil +} + +/*** Block Header ***/ + +// blockHeader represents the content of an xz block header. +type blockHeader struct { + compressedSize int64 + uncompressedSize int64 + filters []filter +} + +// String converts the block header into a string. +func (h blockHeader) String() string { + var buf bytes.Buffer + first := true + if h.compressedSize >= 0 { + fmt.Fprintf(&buf, "compressed size %d", h.compressedSize) + first = false + } + if h.uncompressedSize >= 0 { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize) + first = false + } + for _, f := range h.filters { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "filter %s", f) + first = false + } + return buf.String() +} + +// Masks for the block flags. +const ( + filterCountMask = 0x03 + compressedSizePresent = 0x40 + uncompressedSizePresent = 0x80 + reservedBlockFlags = 0x3C +) + +// errIndexIndicator signals that an index indicator (0x00) has been found +// instead of an expected block header indicator. +var errIndexIndicator = errors.New("xz: found index indicator") + +// readBlockHeader reads the block header. +func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) { + var buf bytes.Buffer + buf.Grow(20) + + // block header size + z, err := io.CopyN(&buf, r, 1) + n = int(z) + if err != nil { + return nil, n, err + } + s := buf.Bytes()[0] + if s == 0 { + return nil, n, errIndexIndicator + } + + // read complete header + headerLen := (int(s) + 1) * 4 + buf.Grow(headerLen - 1) + z, err = io.CopyN(&buf, r, int64(headerLen-1)) + n += int(z) + if err != nil { + return nil, n, err + } + + // unmarshal block header + h = new(blockHeader) + if err = h.UnmarshalBinary(buf.Bytes()); err != nil { + return nil, n, err + } + + return h, n, nil +} + +// readSizeInBlockHeader reads the uncompressed or compressed size +// fields in the block header. The present value informs the function +// whether the respective field is actually present in the header. +func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) { + if !present { + return -1, nil + } + x, _, err := readUvarint(r) + if err != nil { + return 0, err + } + if x >= 1<<63 { + return 0, errors.New("xz: size overflow in block header") + } + return int64(x), nil +} + +// UnmarshalBinary unmarshals the block header. +func (h *blockHeader) UnmarshalBinary(data []byte) error { + // Check header length + s := data[0] + if data[0] == 0 { + return errIndexIndicator + } + headerLen := (int(s) + 1) * 4 + if len(data) != headerLen { + return fmt.Errorf("xz: data length %d; want %d", len(data), + headerLen) + } + n := headerLen - 4 + + // Check CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[:n]) + if crc.Sum32() != uint32LE(data[n:]) { + return errors.New("xz: checksum error for block header") + } + + // Block header flags + flags := data[1] + if flags&reservedBlockFlags != 0 { + return errors.New("xz: reserved block header flags set") + } + + r := bytes.NewReader(data[2:n]) + + // Compressed size + var err error + h.compressedSize, err = readSizeInBlockHeader( + r, flags&compressedSizePresent != 0) + if err != nil { + return err + } + + // Uncompressed size + h.uncompressedSize, err = readSizeInBlockHeader( + r, flags&uncompressedSizePresent != 0) + if err != nil { + return err + } + + h.filters, err = readFilters(r, int(flags&filterCountMask)+1) + if err != nil { + return err + } + + // Check padding + // Since headerLen is a multiple of 4 we don't need to check + // alignment. + k := r.Len() + // The standard spec says that the padding should have not more + // than 3 bytes. However we found paddings of 4 or 5 in the + // wild. See https://github.com/ulikunitz/xz/pull/11 and + // https://github.com/ulikunitz/xz/issues/15 + // + // The only reasonable approach seems to be to ignore the + // padding size. We still check that all padding bytes are zero. + if !allZeros(data[n-k : n]) { + return errPadding + } + return nil +} + +// MarshalBinary marshals the binary header. +func (h *blockHeader) MarshalBinary() (data []byte, err error) { + if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) { + return nil, errors.New("xz: filter count wrong") + } + for i, f := range h.filters { + if i < len(h.filters)-1 { + if f.id() == lzmaFilterID { + return nil, errors.New( + "xz: LZMA2 filter is not the last") + } + } else { + // last filter + if f.id() != lzmaFilterID { + return nil, errors.New("xz: " + + "last filter must be the LZMA2 filter") + } + } + } + + var buf bytes.Buffer + // header size must set at the end + buf.WriteByte(0) + + // flags + flags := byte(len(h.filters) - 1) + if h.compressedSize >= 0 { + flags |= compressedSizePresent + } + if h.uncompressedSize >= 0 { + flags |= uncompressedSizePresent + } + buf.WriteByte(flags) + + p := make([]byte, 10) + if h.compressedSize >= 0 { + k := putUvarint(p, uint64(h.compressedSize)) + buf.Write(p[:k]) + } + if h.uncompressedSize >= 0 { + k := putUvarint(p, uint64(h.uncompressedSize)) + buf.Write(p[:k]) + } + + for _, f := range h.filters { + fp, err := f.MarshalBinary() + if err != nil { + return nil, err + } + buf.Write(fp) + } + + // padding + for i := padLen(int64(buf.Len())); i > 0; i-- { + buf.WriteByte(0) + } + + // crc place holder + buf.Write(p[:4]) + + data = buf.Bytes() + if len(data)%4 != 0 { + panic("data length not aligned") + } + s := len(data)/4 - 1 + if !(1 < s && s <= 255) { + panic("wrong block header size") + } + data[0] = byte(s) + + crc := crc32.NewIEEE() + crc.Write(data[:len(data)-4]) + putUint32LE(data[len(data)-4:], crc.Sum32()) + + return data, nil +} + +// Constants used for marshalling and unmarshalling filters in the xz +// block header. +const ( + minFilters = 1 + maxFilters = 4 + minReservedID = 1 << 62 +) + +// filter represents a filter in the block header. +type filter interface { + id() uint64 + UnmarshalBinary(data []byte) error + MarshalBinary() (data []byte, err error) + reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error) + writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error) + // filter must be last filter + last() bool +} + +// readFilter reads a block filter from the block header. At this point +// in time only the LZMA2 filter is supported. +func readFilter(r io.Reader) (f filter, err error) { + br := lzma.ByteReader(r) + + // index + id, _, err := readUvarint(br) + if err != nil { + return nil, err + } + + var data []byte + switch id { + case lzmaFilterID: + data = make([]byte, lzmaFilterLen) + data[0] = lzmaFilterID + if _, err = io.ReadFull(r, data[1:]); err != nil { + return nil, err + } + f = new(lzmaFilter) + default: + if id >= minReservedID { + return nil, errors.New( + "xz: reserved filter id in block stream header") + } + return nil, errors.New("xz: invalid filter id") + } + if err = f.UnmarshalBinary(data); err != nil { + return nil, err + } + return f, err +} + +// readFilters reads count filters. At this point in time only the count +// 1 is supported. +func readFilters(r io.Reader, count int) (filters []filter, err error) { + if count != 1 { + return nil, errors.New("xz: unsupported filter count") + } + f, err := readFilter(r) + if err != nil { + return nil, err + } + return []filter{f}, err +} + +// writeFilters writes the filters. +func writeFilters(w io.Writer, filters []filter) (n int, err error) { + for _, f := range filters { + p, err := f.MarshalBinary() + if err != nil { + return n, err + } + k, err := w.Write(p) + n += k + if err != nil { + return n, err + } + } + return n, nil +} + +/*** Index ***/ + +// record describes a block in the xz file index. +type record struct { + unpaddedSize int64 + uncompressedSize int64 +} + +// readRecord reads an index record. +func readRecord(r io.ByteReader) (rec record, n int, err error) { + u, k, err := readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.unpaddedSize = int64(u) + if rec.unpaddedSize < 0 { + return rec, n, errors.New("xz: unpadded size negative") + } + + u, k, err = readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.uncompressedSize = int64(u) + if rec.uncompressedSize < 0 { + return rec, n, errors.New("xz: uncompressed size negative") + } + + return rec, n, nil +} + +// MarshalBinary converts an index record in its binary encoding. +func (rec *record) MarshalBinary() (data []byte, err error) { + // maximum length of a uvarint is 10 + p := make([]byte, 20) + n := putUvarint(p, uint64(rec.unpaddedSize)) + n += putUvarint(p[n:], uint64(rec.uncompressedSize)) + return p[:n], nil +} + +// writeIndex writes the index, a sequence of records. +func writeIndex(w io.Writer, index []record) (n int64, err error) { + crc := crc32.NewIEEE() + mw := io.MultiWriter(w, crc) + + // index indicator + k, err := mw.Write([]byte{0}) + n += int64(k) + if err != nil { + return n, err + } + + // number of records + p := make([]byte, 10) + k = putUvarint(p, uint64(len(index))) + k, err = mw.Write(p[:k]) + n += int64(k) + if err != nil { + return n, err + } + + // list of records + for _, rec := range index { + p, err := rec.MarshalBinary() + if err != nil { + return n, err + } + k, err = mw.Write(p) + n += int64(k) + if err != nil { + return n, err + } + } + + // index padding + k, err = mw.Write(make([]byte, padLen(int64(n)))) + n += int64(k) + if err != nil { + return n, err + } + + // crc32 checksum + putUint32LE(p, crc.Sum32()) + k, err = w.Write(p[:4]) + n += int64(k) + + return n, err +} + +// readIndexBody reads the index from the reader. It assumes that the +// index indicator has already been read. +func readIndexBody(r io.Reader) (records []record, n int64, err error) { + crc := crc32.NewIEEE() + // index indicator + crc.Write([]byte{0}) + + br := lzma.ByteReader(io.TeeReader(r, crc)) + + // number of records + u, k, err := readUvarint(br) + n += int64(k) + if err != nil { + return nil, n, err + } + recLen := int(u) + if recLen < 0 || uint64(recLen) != u { + return nil, n, errors.New("xz: record number overflow") + } + + // list of records + records = make([]record, recLen) + for i := range records { + records[i], k, err = readRecord(br) + n += int64(k) + if err != nil { + return nil, n, err + } + } + + p := make([]byte, padLen(int64(n+1)), 4) + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return nil, n, err + } + if !allZeros(p) { + return nil, n, errors.New("xz: non-zero byte in index padding") + } + + // crc32 + s := crc.Sum32() + p = p[:4] + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return records, n, err + } + if uint32LE(p) != s { + return nil, n, errors.New("xz: wrong checksum for index") + } + + return records, n, nil +} diff --git a/vendor/github.com/ulikunitz/xz/fox.xz b/vendor/github.com/ulikunitz/xz/fox.xz new file mode 100644 index 0000000000000000000000000000000000000000..4b820bd5a16e83fe5db4fb315639a4337f862483 GIT binary patch literal 104 zcmexsUKJ6=z`*kC+7>q^21Q0O1_p)_{ill=8FWH2QWXkIGn2Cwl8W-n^AytZD-^Oy za|?dFO$zmVVdxt0+m!4eq- E0K@hlng9R* literal 0 HcmV?d00001 diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go new file mode 100644 index 00000000..a3288787 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -0,0 +1,181 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// CyclicPoly provides a cyclic polynomial rolling hash. +type CyclicPoly struct { + h uint64 + p []uint64 + i int +} + +// ror rotates the unsigned 64-bit integer to right. The argument s must be +// less than 64. +func ror(x uint64, s uint) uint64 { + return (x >> s) | (x << (64 - s)) +} + +// NewCyclicPoly creates a new instance of the CyclicPoly structure. The +// argument n gives the number of bytes for which a hash will be executed. +// This number must be positive; the method panics if this isn't the case. +func NewCyclicPoly(n int) *CyclicPoly { + if n < 1 { + panic("argument n must be positive") + } + return &CyclicPoly{p: make([]uint64, 0, n)} +} + +// Len returns the length of the byte sequence for which a hash is generated. +func (r *CyclicPoly) Len() int { + return cap(r.p) +} + +// RollByte hashes the next byte and returns a hash value. The complete becomes +// available after at least Len() bytes have been hashed. +func (r *CyclicPoly) RollByte(x byte) uint64 { + y := hash[x] + if len(r.p) < cap(r.p) { + r.h = ror(r.h, 1) ^ y + r.p = append(r.p, y) + } else { + r.h ^= ror(r.p[r.i], uint(cap(r.p)-1)) + r.h = ror(r.h, 1) ^ y + r.p[r.i] = y + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} + +// Stores the hash for the individual bytes. +var hash = [256]uint64{ + 0x2e4fc3f904065142, 0xc790984cfbc99527, + 0x879f95eb8c62f187, 0x3b61be86b5021ef2, + 0x65a896a04196f0a5, 0xc5b307b80470b59e, + 0xd3bff376a70df14b, 0xc332f04f0b3f1701, + 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53, + 0x1906a10c2c1c0208, 0xfb0c712a03421c0d, + 0x38be311a65c9552b, 0xfee7ee4ca6445c7e, + 0x71aadeded184f21e, 0xd73426fccda23b2d, + 0x29773fb5fb9600b5, 0xce410261cd32981a, + 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c, + 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc, + 0xa5f10b3910482cea, 0x2945d59be02dfaad, + 0x06ee334ff70571b5, 0xbabf9d8070f44380, + 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7, + 0x26183cb9f7b1664c, 0xea71dac7da068f21, + 0xea92eca5bd1d0bb7, 0x415595862defcd75, + 0x248a386023c60648, 0x9cf021ab284b3c8a, + 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc, + 0x73e799d139dc6975, 0x7b15ae312486363c, + 0xb70e5454a2239c80, 0x208e3fb31d3b2263, + 0x01f563cabb930f44, 0x2ac4533d2a3240d8, + 0x84231ed1064f6f7c, 0xa9f020977c2a6d19, + 0x213c227271c20122, 0x09fe8a9a0a03d07a, + 0x4236dc75bcaf910c, 0x460a8b2bead8f17e, + 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e, + 0x70adb010543bea12, 0xcdae938f7ea6f579, + 0x3f3d870208672f4d, 0x8e6ccbce9d349536, + 0xe4c0871a389095ae, 0xf5f2a49152bca080, + 0x9a43f9b97269934e, 0xc17b3753cb6f475c, + 0xd56d941e8e206bd4, 0xac0a4f3e525eda00, + 0xa06d5a011912a550, 0x5537ed19537ad1df, + 0xa32fe713d611449d, 0x2a1d05b47c3b579f, + 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0, + 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97, + 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44, + 0x0b63d5d801708420, 0x8f227ca8f37ffaec, + 0x0256278670887c24, 0x107e14877dbf540b, + 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61, + 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001, + 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7, + 0xd99264421147eb03, 0x535a2d6d38aefcfe, + 0x6ba8b4454a916237, 0xfa39366eaae4719c, + 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4, + 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8, + 0xd61c2503fe639144, 0x30ce625441eb92d3, + 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5, + 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf, + 0xc7ea4872c96b83ae, 0x6dd5d376f4392382, + 0x1be88681aaa9792f, 0xfef465ee1b6c10d9, + 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9, + 0x7808e902b3857d0b, 0x171c9c4ea4607972, + 0x58d66274850146df, 0x42b311c10d3981d1, + 0x647fa8c621c41a4c, 0xf472771c66ddfedc, + 0x338d27e3f847b46b, 0x6402ce3da97545ce, + 0x5162db616fc38638, 0x9c83be97bc22a50e, + 0x2d3d7478a78d5e72, 0xe621a9b938fd5397, + 0x9454614eb0f81c45, 0x395fb6e742ed39b6, + 0x77dd9179d06037bf, 0xc478d0fee4d2656d, + 0x35d9d6cb772007af, 0x83a56e92c883f0f6, + 0x27937453250c00a1, 0x27bd6ebc3a46a97d, + 0x9f543bf784342d51, 0xd158f38c48b0ed52, + 0x8dd8537c045f66b4, 0x846a57230226f6d5, + 0x6b13939e0c4e7cdf, 0xfca25425d8176758, + 0x92e5fc6cd52788e6, 0x9992e13d7a739170, + 0x518246f7a199e8ea, 0xf104c2a71b9979c7, + 0x86b3ffaabea4768f, 0x6388061cf3e351ad, + 0x09d9b5295de5bbb5, 0x38bf1638c2599e92, + 0x1d759846499e148d, 0x4c0ff015e5f96ef4, + 0xa41a94cfa270f565, 0x42d76f9cb2326c0b, + 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a, + 0x337523aabbe6cf8d, 0x646bb14001d42b12, + 0xc178729d138adc74, 0xf900ef4491f24086, + 0xee1a90d334bb5ac4, 0x9755c92247301a50, + 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9, + 0x0fa8084cf91ac6ff, 0x10d226cf136e6189, + 0xd302057a07d4fb21, 0x5f03800e20a0fcc3, + 0x80118d4ae46bd210, 0x58ab61a522843733, + 0x51edd575c5432a4b, 0x94ee6ff67f9197f7, + 0x765669e0e5e8157b, 0xa5347830737132f0, + 0x3ba485a69f01510c, 0x0b247d7b957a01c3, + 0x1b3d63449fd807dc, 0x0fdc4721c30ad743, + 0x8b535ed3829b2b14, 0xee41d0cad65d232c, + 0xe6a99ed97a6a982f, 0x65ac6194c202003d, + 0x692accf3a70573eb, 0xcc3c02c3e200d5af, + 0x0d419e8b325914a3, 0x320f160f42c25e40, + 0x00710d647a51fe7a, 0x3c947692330aed60, + 0x9288aa280d355a7a, 0xa1806a9b791d1696, + 0x5d60e38496763da1, 0x6c69e22e613fd0f4, + 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba, + 0x460c17992cbaece1, 0xf7822c5444d3297f, + 0x344a9790c69b74aa, 0xb80a42e6cae09dce, + 0x1b1361eaf2b1e757, 0xd84c1e758e236f01, + 0x88e0b7be347627cc, 0x45246009b7a99490, + 0x8011c6dd3fe50472, 0xc341d682bffb99d7, + 0x2511be93808e2d15, 0xd5bc13d7fd739840, + 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157, + 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0, + 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc, + 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e, + 0xa559cce0d9199aac, 0xde39d47ef3723380, + 0xe5b69d848ce42e35, 0xefa24296f8e79f52, + 0x70190b59db9a5afc, 0x26f166cdb211e7bf, + 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017, + 0xb9059b05e9420d90, 0x2f0da855c9388754, + 0x611d5e9ab77949cc, 0x2912038ac01163f4, + 0x0231df50402b2fba, 0x45660fc4f3245f58, + 0xb91cc97c7c8dac50, 0xb72d2aafe4953427, + 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2, + 0x1310e1c1a48d21c3, 0xad48a7810cdd8544, + 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de, + 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442, + 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93, + 0x9b37db9d0335a39c, 0x494b6f870f5cfebc, + 0x6d1b3c1149dda943, 0x372c943a518c1093, + 0xad27af45e77c09c4, 0x3b6f92b646044604, + 0xac2917909f5fcf4f, 0x2069a60e977e5557, + 0x353a469e71014de5, 0x24be356281f55c15, + 0x2b6d710ba8e9adea, 0x404ad1751c749c29, + 0xed7311bf23d7f185, 0xba4f6976b4acc43e, + 0x32d7198d2bc39000, 0xee667019014d6e01, + 0x494ef3e128d14c83, 0x1f95a152baecd6be, + 0x201648dff1f483a5, 0x68c28550c8384af6, + 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28, + 0xd82bbd95e9b30909, 0x234f0d1694c53f6d, + 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e, + 0xf8f6b97f5585080a, 0x74236084be57b95b, + 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b, + 0x4378ffe93e1528c5, 0x94ca92a17118e2d2, +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go new file mode 100644 index 00000000..f99ec220 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -0,0 +1,14 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package hash provides rolling hashes. + +Rolling hashes have to be used for maintaining the positions of n-byte +sequences in the dictionary buffer. + +The package provides currently the Rabin-Karp rolling hash and a Cyclic +Polynomial hash. Both support the Hashes method to be used with an interface. +*/ +package hash diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go new file mode 100644 index 00000000..58635b11 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -0,0 +1,66 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// A is the default constant for Robin-Karp rolling hash. This is a random +// prime. +const A = 0x97b548add41d5da1 + +// RabinKarp supports the computation of a rolling hash. +type RabinKarp struct { + A uint64 + // a^n + aOldest uint64 + h uint64 + p []byte + i int +} + +// NewRabinKarp creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The default constant will will be +// used. +func NewRabinKarp(n int) *RabinKarp { + return NewRabinKarpConst(n, A) +} + +// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The argument a provides the +// constant used to compute the hash. +func NewRabinKarpConst(n int, a uint64) *RabinKarp { + if n <= 0 { + panic("number of bytes n must be positive") + } + aOldest := uint64(1) + // There are faster methods. For the small n required by the LZMA + // compressor O(n) is sufficient. + for i := 0; i < n; i++ { + aOldest *= a + } + return &RabinKarp{ + A: a, aOldest: aOldest, + p: make([]byte, 0, n), + } +} + +// Len returns the length of the byte sequence. +func (r *RabinKarp) Len() int { + return cap(r.p) +} + +// RollByte computes the hash after x has been added. +func (r *RabinKarp) RollByte(x byte) uint64 { + if len(r.p) < cap(r.p) { + r.h += uint64(x) + r.h *= r.A + r.p = append(r.p, x) + } else { + r.h -= uint64(r.p[r.i]) * r.aOldest + r.h += uint64(x) + r.h *= r.A + r.p[r.i] = x + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go new file mode 100644 index 00000000..ab6a19ca --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -0,0 +1,29 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// Roller provides an interface for rolling hashes. The hash value will become +// valid after hash has been called Len times. +type Roller interface { + Len() int + RollByte(x byte) uint64 +} + +// Hashes computes all hash values for the array p. Note that the state of the +// roller is changed. +func Hashes(r Roller, p []byte) []uint64 { + n := r.Len() + if len(p) < n { + return nil + } + h := make([]uint64, len(p)-n+1) + for i := 0; i < n-1; i++ { + r.RollByte(p[i]) + } + for i := range h { + h[i] = r.RollByte(p[i+n-1]) + } + return h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go new file mode 100644 index 00000000..0ba45e8f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -0,0 +1,457 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xlog provides a simple logging package that allows to disable +// certain message categories. It defines a type, Logger, with multiple +// methods for formatting output. The package has also a predefined +// 'standard' Logger accessible through helper function Print[f|ln], +// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln] +// that are easier to use then creating a Logger manually. That logger +// writes to standard error and prints the date and time of each logged +// message, which can be configured using the function SetFlags. +// +// The Fatal functions call os.Exit(1) after the message is output +// unless not suppressed by the flags. The Panic functions call panic +// after the writing the log message unless suppressed. +package xlog + +import ( + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +// The flags define what information is prefixed to each log entry +// generated by the Logger. The Lno* versions allow the suppression of +// specific output. The bits are or'ed together to control what will be +// printed. There is no control over the order of the items printed and +// the format. The full format is: +// +// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message +// +const ( + Ldate = 1 << iota // the date: 2009-01-23 + Ltime // the time: 01:23:23 + Lmicroseconds // microsecond resolution: 01:23:23.123123 + Llongfile // full file name and line number: /a/b/c/d.go:23 + Lshortfile // final file name element and line number: d.go:23 + Lnopanic // suppresses output from Panic[f|ln] but not the panic call + Lnofatal // suppresses output from Fatal[f|ln] but not the exit + Lnowarn // suppresses output from Warn[f|ln] + Lnoprint // suppresses output from Print[f|ln] + Lnodebug // suppresses output from Debug[f|ln] + // initial values for the standard logger + Lstdflags = Ldate | Ltime | Lnodebug +) + +// A Logger represents an active logging object that generates lines of +// output to an io.Writer. Each logging operation if not suppressed +// makes a single call to the Writer's Write method. A Logger can be +// used simultaneously from multiple goroutines; it guarantees to +// serialize access to the Writer. +type Logger struct { + mu sync.Mutex // ensures atomic writes; and protects the following + // fields + prefix string // prefix to write at beginning of each line + flag int // properties + out io.Writer // destination for output + buf []byte // for accumulating text to write +} + +// New creates a new Logger. The out argument sets the destination to +// which the log output will be written. The prefix appears at the +// beginning of each log line. The flag argument defines the logging +// properties. +func New(out io.Writer, prefix string, flag int) *Logger { + return &Logger{out: out, prefix: prefix, flag: flag} +} + +// std is the standard logger used by the package scope functions. +var std = New(os.Stderr, "", Lstdflags) + +// itoa converts the integer to ASCII. A negative widths will avoid +// zero-padding. The function supports only non-negative integers. +func itoa(buf *[]byte, i int, wid int) { + var u = uint(i) + if u == 0 && wid <= 1 { + *buf = append(*buf, '0') + return + } + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + *buf = append(*buf, b[bp:]...) +} + +// formatHeader puts the header into the buf field of the buffer. +func (l *Logger) formatHeader(t time.Time, file string, line int) { + l.buf = append(l.buf, l.prefix...) + if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { + if l.flag&Ldate != 0 { + year, month, day := t.Date() + itoa(&l.buf, year, 4) + l.buf = append(l.buf, '-') + itoa(&l.buf, int(month), 2) + l.buf = append(l.buf, '-') + itoa(&l.buf, day, 2) + l.buf = append(l.buf, ' ') + } + if l.flag&(Ltime|Lmicroseconds) != 0 { + hour, min, sec := t.Clock() + itoa(&l.buf, hour, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, min, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, sec, 2) + if l.flag&Lmicroseconds != 0 { + l.buf = append(l.buf, '.') + itoa(&l.buf, t.Nanosecond()/1e3, 6) + } + l.buf = append(l.buf, ' ') + } + } + if l.flag&(Lshortfile|Llongfile) != 0 { + if l.flag&Lshortfile != 0 { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + } + l.buf = append(l.buf, file...) + l.buf = append(l.buf, ':') + itoa(&l.buf, line, -1) + l.buf = append(l.buf, ": "...) + } +} + +func (l *Logger) output(calldepth int, now time.Time, s string) error { + var file string + var line int + if l.flag&(Lshortfile|Llongfile) != 0 { + l.mu.Unlock() + var ok bool + _, file, line, ok = runtime.Caller(calldepth) + if !ok { + file = "???" + line = 0 + } + l.mu.Lock() + } + l.buf = l.buf[:0] + l.formatHeader(now, file, line) + l.buf = append(l.buf, s...) + if len(s) == 0 || s[len(s)-1] != '\n' { + l.buf = append(l.buf, '\n') + } + _, err := l.out.Write(l.buf) + return err +} + +// Output writes the string s with the header controlled by the flags to +// the l.out writer. A newline will be appended if s doesn't end in a +// newline. Calldepth is used to recover the PC, although all current +// calls of Output use the call depth 2. Access to the function is serialized. +func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprint(v...) + return l.output(calldepth+1, now, s) +} + +// Outputf works like output but formats the output like Printf. +func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintf(format, v...) + return l.output(calldepth+1, now, s) +} + +// Outputln works like output but formats the output like Println. +func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintln(v...) + return l.output(calldepth+1, now, s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panic(v ...interface{}) { + l.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panic(v ...interface{}) { + std.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicf(format string, v ...interface{}) { + l.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicf(format string, v ...interface{}) { + std.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicln(v ...interface{}) { + l.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicln(v ...interface{}) { + std.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatal(v ...interface{}) { + l.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatal(v ...interface{}) { + std.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalf(format string, v ...interface{}) { + std.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalln(format string, v ...interface{}) { + l.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalln(format string, v ...interface{}) { + std.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warn(v ...interface{}) { + l.Output(2, Lnowarn, v...) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func Warn(v ...interface{}) { + std.Output(2, Lnowarn, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnf(format string, v ...interface{}) { + l.Outputf(2, Lnowarn, format, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func Warnf(format string, v ...interface{}) { + std.Outputf(2, Lnowarn, format, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnln(v ...interface{}) { + l.Outputln(2, Lnowarn, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func Warnln(v ...interface{}) { + std.Outputln(2, Lnowarn, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Print(v ...interface{}) { + l.Output(2, Lnoprint, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func Print(v ...interface{}) { + std.Output(2, Lnoprint, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Printf(format string, v ...interface{}) { + l.Outputf(2, Lnoprint, format, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func Printf(format string, v ...interface{}) { + std.Outputf(2, Lnoprint, format, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func (l *Logger) Println(v ...interface{}) { + l.Outputln(2, Lnoprint, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func Println(v ...interface{}) { + std.Outputln(2, Lnoprint, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debug(v ...interface{}) { + l.Output(2, Lnodebug, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func Debug(v ...interface{}) { + std.Output(2, Lnodebug, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugf(format string, v ...interface{}) { + l.Outputf(2, Lnodebug, format, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func Debugf(format string, v ...interface{}) { + std.Outputf(2, Lnodebug, format, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugln(v ...interface{}) { + l.Outputln(2, Lnodebug, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func Debugln(v ...interface{}) { + std.Outputln(2, Lnodebug, v...) +} + +// Flags returns the current flags used by the logger. +func (l *Logger) Flags() int { + l.mu.Lock() + defer l.mu.Unlock() + return l.flag +} + +// Flags returns the current flags used by the standard logger. +func Flags() int { + return std.Flags() +} + +// SetFlags sets the flags of the logger. +func (l *Logger) SetFlags(flag int) { + l.mu.Lock() + defer l.mu.Unlock() + l.flag = flag +} + +// SetFlags sets the flags for the standard logger. +func SetFlags(flag int) { + std.SetFlags(flag) +} + +// Prefix returns the prefix used by the logger. +func (l *Logger) Prefix() string { + l.mu.Lock() + defer l.mu.Unlock() + return l.prefix +} + +// Prefix returns the prefix used by the standard logger of the package. +func Prefix() string { + return std.Prefix() +} + +// SetPrefix sets the prefix for the logger. +func (l *Logger) SetPrefix(prefix string) { + l.mu.Lock() + defer l.mu.Unlock() + l.prefix = prefix +} + +// SetPrefix sets the prefix of the standard logger of the package. +func SetPrefix(prefix string) { + std.SetPrefix(prefix) +} + +// SetOutput sets the output of the logger. +func (l *Logger) SetOutput(w io.Writer) { + l.mu.Lock() + defer l.mu.Unlock() + l.out = w +} + +// SetOutput sets the output for the standard logger of the package. +func SetOutput(w io.Writer) { + std.SetOutput(w) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go new file mode 100644 index 00000000..a781bd19 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -0,0 +1,523 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bufio" + "errors" + "fmt" + "io" + "unicode" +) + +// node represents a node in the binary tree. +type node struct { + // x is the search value + x uint32 + // p parent node + p uint32 + // l left child + l uint32 + // r right child + r uint32 +} + +// wordLen is the number of bytes represented by the v field of a node. +const wordLen = 4 + +// binTree supports the identification of the next operation based on a +// binary tree. +// +// Nodes will be identified by their index into the ring buffer. +type binTree struct { + dict *encoderDict + // ring buffer of nodes + node []node + // absolute offset of the entry for the next node. Position 4 + // byte larger. + hoff int64 + // front position in the node ring buffer + front uint32 + // index of the root node + root uint32 + // current x value + x uint32 + // preallocated array + data []byte +} + +// null represents the nonexistent index. We can't use zero because it +// would always exist or we would need to decrease the index for each +// reference. +const null uint32 = 1<<32 - 1 + +// newBinTree initializes the binTree structure. The capacity defines +// the size of the buffer and defines the maximum distance for which +// matches will be found. +func newBinTree(capacity int) (t *binTree, err error) { + if capacity < 1 { + return nil, errors.New( + "newBinTree: capacity must be larger than zero") + } + if int64(capacity) >= int64(null) { + return nil, errors.New( + "newBinTree: capacity must less 2^{32}-1") + } + t = &binTree{ + node: make([]node, capacity), + hoff: -int64(wordLen), + root: null, + data: make([]byte, maxMatchLen), + } + return t, nil +} + +func (t *binTree) SetDict(d *encoderDict) { t.dict = d } + +// WriteByte writes a single byte into the binary tree. +func (t *binTree) WriteByte(c byte) error { + t.x = (t.x << 8) | uint32(c) + t.hoff++ + if t.hoff < 0 { + return nil + } + v := t.front + if int64(v) < t.hoff { + // We are overwriting old nodes stored in the tree. + t.remove(v) + } + t.node[v].x = t.x + t.add(v) + t.front++ + if int64(t.front) >= int64(len(t.node)) { + t.front = 0 + } + return nil +} + +// Writes writes a sequence of bytes into the binTree structure. +func (t *binTree) Write(p []byte) (n int, err error) { + for _, c := range p { + t.WriteByte(c) + } + return len(p), nil +} + +// add puts the node v into the tree. The node must not be part of the +// tree before. +func (t *binTree) add(v uint32) { + vn := &t.node[v] + // Set left and right to null indices. + vn.l, vn.r = null, null + // If the binary tree is empty make v the root. + if t.root == null { + t.root = v + vn.p = null + return + } + x := vn.x + p := t.root + // Search for the right leave link and add the new node. + for { + pn := &t.node[p] + if x <= pn.x { + if pn.l == null { + pn.l = v + vn.p = p + return + } + p = pn.l + } else { + if pn.r == null { + pn.r = v + vn.p = p + return + } + p = pn.r + } + } +} + +// parent returns the parent node index of v and the pointer to v value +// in the parent. +func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) { + if t.root == v { + return null, &t.root + } + p = t.node[v].p + if t.node[p].l == v { + ptr = &t.node[p].l + } else { + ptr = &t.node[p].r + } + return +} + +// Remove node v. +func (t *binTree) remove(v uint32) { + vn := &t.node[v] + p, ptr := t.parent(v) + l, r := vn.l, vn.r + if l == null { + // Move the right child up. + *ptr = r + if r != null { + t.node[r].p = p + } + return + } + if r == null { + // Move the left child up. + *ptr = l + t.node[l].p = p + return + } + + // Search the in-order predecessor u. + un := &t.node[l] + ur := un.r + if ur == null { + // In order predecessor is l. Move it up. + un.r = r + t.node[r].p = l + un.p = p + *ptr = l + return + } + var u uint32 + for { + // Look for the max value in the tree where l is root. + u = ur + ur = t.node[u].r + if ur == null { + break + } + } + // replace u with ul + un = &t.node[u] + ul := un.l + up := un.p + t.node[up].r = ul + if ul != null { + t.node[ul].p = up + } + + // replace v by u + un.l, un.r = l, r + t.node[l].p = u + t.node[r].p = u + *ptr = u + un.p = p +} + +// search looks for the node that have the value x or for the nodes that +// brace it. The node highest in the tree with the value x will be +// returned. All other nodes with the same value live in left subtree of +// the returned node. +func (t *binTree) search(v uint32, x uint32) (a, b uint32) { + a, b = null, null + if v == null { + return + } + for { + vn := &t.node[v] + if x <= vn.x { + if x == vn.x { + return v, v + } + b = v + if vn.l == null { + return + } + v = vn.l + } else { + a = v + if vn.r == null { + return + } + v = vn.r + } + } +} + +// max returns the node with maximum value in the subtree with v as +// root. +func (t *binTree) max(v uint32) uint32 { + if v == null { + return null + } + for { + r := t.node[v].r + if r == null { + return v + } + v = r + } +} + +// min returns the node with the minimum value in the subtree with v as +// root. +func (t *binTree) min(v uint32) uint32 { + if v == null { + return null + } + for { + l := t.node[v].l + if l == null { + return v + } + v = l + } +} + +// pred returns the in-order predecessor of node v. +func (t *binTree) pred(v uint32) uint32 { + if v == null { + return null + } + u := t.max(t.node[v].l) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].r == v { + return p + } + v = p + } +} + +// succ returns the in-order successor of node v. +func (t *binTree) succ(v uint32) uint32 { + if v == null { + return null + } + u := t.min(t.node[v].r) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].l == v { + return p + } + v = p + } +} + +// xval converts the first four bytes of a into an 32-bit unsigned +// integer in big-endian order. +func xval(a []byte) uint32 { + var x uint32 + switch len(a) { + default: + x |= uint32(a[3]) + fallthrough + case 3: + x |= uint32(a[2]) << 8 + fallthrough + case 2: + x |= uint32(a[1]) << 16 + fallthrough + case 1: + x |= uint32(a[0]) << 24 + case 0: + } + return x +} + +// dumpX converts value x into a four-letter string. +func dumpX(x uint32) string { + a := make([]byte, 4) + for i := 0; i < 4; i++ { + c := byte(x >> uint((3-i)*8)) + if unicode.IsGraphic(rune(c)) { + a[i] = c + } else { + a[i] = '.' + } + } + return string(a) +} + +// dumpNode writes a representation of the node v into the io.Writer. +func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { + if v == null { + return + } + + vn := &t.node[v] + + t.dumpNode(w, vn.r, indent+2) + + for i := 0; i < indent; i++ { + fmt.Fprint(w, " ") + } + if vn.p == null { + fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x)) + } else { + fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p) + } + + t.dumpNode(w, vn.l, indent+2) +} + +// dump prints a representation of the binary tree into the writer. +func (t *binTree) dump(w io.Writer) error { + bw := bufio.NewWriter(w) + t.dumpNode(bw, t.root, 0) + return bw.Flush() +} + +func (t *binTree) distance(v uint32) int { + dist := int(t.front) - int(v) + if dist <= 0 { + dist += len(t.node) + } + return dist +} + +type matchParams struct { + rep [4]uint32 + // length when match will be accepted + nAccept int + // nodes to check + check int + // finish if length get shorter + stopShorter bool +} + +func (t *binTree) match(m match, distIter func() (int, bool), p matchParams, +) (r match, checked int, accepted bool) { + buf := &t.dict.buf + for { + if checked >= p.check { + return m, checked, true + } + dist, ok := distIter() + if !ok { + return m, checked, false + } + checked++ + if m.n > 0 { + i := buf.rear - dist + m.n - 1 + if i < 0 { + i += len(buf.data) + } else if i >= len(buf.data) { + i -= len(buf.data) + } + if buf.data[i] != t.data[m.n-1] { + if p.stopShorter { + return m, checked, false + } + continue + } + } + n := buf.matchLen(dist, t.data) + switch n { + case 0: + if p.stopShorter { + return m, checked, false + } + continue + case 1: + if uint32(dist-minDistance) != p.rep[0] { + continue + } + } + if n < m.n || (n == m.n && int64(dist) >= m.distance) { + continue + } + m = match{int64(dist), n} + if n >= p.nAccept { + return m, checked, true + } + } +} + +func (t *binTree) NextOp(rep [4]uint32) operation { + // retrieve maxMatchLen data + n, _ := t.dict.buf.Peek(t.data[:maxMatchLen]) + if n == 0 { + panic("no data in buffer") + } + t.data = t.data[:n] + + var ( + m match + x, u, v uint32 + iterPred, iterSucc func() (int, bool) + ) + p := matchParams{ + rep: rep, + nAccept: maxMatchLen, + check: 32, + } + i := 4 + iterSmall := func() (dist int, ok bool) { + i-- + if i <= 0 { + return 0, false + } + return i, true + } + m, checked, accepted := t.match(m, iterSmall, p) + if accepted { + goto end + } + p.check -= checked + x = xval(t.data) + u, v = t.search(t.root, x) + if u == v && len(t.data) == 4 { + iter := func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u, v = t.search(t.node[u].l, x) + if u != v { + u = null + } + return dist, true + } + m, _, _ = t.match(m, iter, p) + goto end + } + p.stopShorter = true + iterSucc = func() (dist int, ok bool) { + if v == null { + return 0, false + } + dist = t.distance(v) + v = t.succ(v) + return dist, true + } + m, checked, accepted = t.match(m, iterSucc, p) + if accepted { + goto end + } + p.check -= checked + iterPred = func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u = t.pred(u) + return dist, true + } + m, _, _ = t.match(m, iterPred, p) +end: + if m.n == 0 { + return lit{t.data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go new file mode 100644 index 00000000..e9bab019 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -0,0 +1,45 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +/* Naming conventions follows the CodeReviewComments in the Go Wiki. */ + +// ntz32Const is used by the functions NTZ and NLZ. +const ntz32Const = 0x04d7651f + +// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé. +// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26. +var ntz32Table = [32]int8{ + 0, 1, 2, 24, 3, 19, 6, 25, + 22, 4, 20, 10, 16, 7, 12, 26, + 31, 23, 18, 5, 21, 9, 15, 11, + 30, 17, 8, 14, 29, 13, 28, 27, +} + +// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. +func ntz32(x uint32) int { + if x == 0 { + return 32 + } + x = (x & -x) * ntz32Const + return int(ntz32Table[x>>27]) +} + +// nlz32 computes the number of leading zeros for an unsigned 32-bit integer. +func nlz32(x uint32) int { + // Smear left most bit to the right + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + // Use ntz mechanism to calculate nlz. + x++ + if x == 0 { + return 0 + } + x *= ntz32Const + return 32 - int(ntz32Table[x>>27]) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go new file mode 100644 index 00000000..5350d814 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -0,0 +1,39 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// breader provides the ReadByte function for a Reader. It doesn't read +// more data from the reader than absolutely necessary. +type breader struct { + io.Reader + // helper slice to save allocations + p []byte +} + +// ByteReader converts an io.Reader into an io.ByteReader. +func ByteReader(r io.Reader) io.ByteReader { + br, ok := r.(io.ByteReader) + if !ok { + return &breader{r, make([]byte, 1)} + } + return br +} + +// ReadByte read byte function. +func (r *breader) ReadByte() (c byte, err error) { + n, err := r.Reader.Read(r.p) + if n < 1 { + if err == nil { + err = errors.New("breader.ReadByte: no data") + } + return 0, err + } + return r.p[0], nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go new file mode 100644 index 00000000..50e0b6d5 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -0,0 +1,171 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" +) + +// buffer provides a circular buffer of bytes. If the front index equals +// the rear index the buffer is empty. As a consequence front cannot be +// equal rear for a full buffer. So a full buffer has a length that is +// one byte less the the length of the data slice. +type buffer struct { + data []byte + front int + rear int +} + +// newBuffer creates a buffer with the given size. +func newBuffer(size int) *buffer { + return &buffer{data: make([]byte, size+1)} +} + +// Cap returns the capacity of the buffer. +func (b *buffer) Cap() int { + return len(b.data) - 1 +} + +// Resets the buffer. The front and rear index are set to zero. +func (b *buffer) Reset() { + b.front = 0 + b.rear = 0 +} + +// Buffered returns the number of bytes buffered. +func (b *buffer) Buffered() int { + delta := b.front - b.rear + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// Available returns the number of bytes available for writing. +func (b *buffer) Available() int { + delta := b.rear - 1 - b.front + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// addIndex adds a non-negative integer to the index i and returns the +// resulting index. The function takes care of wrapping the index as +// well as potential overflow situations. +func (b *buffer) addIndex(i int, n int) int { + // subtraction of len(b.data) prevents overflow + i += n - len(b.data) + if i < 0 { + i += len(b.data) + } + return i +} + +// Read reads bytes from the buffer into p and returns the number of +// bytes read. The function never returns an error but might return less +// data than requested. +func (b *buffer) Read(p []byte) (n int, err error) { + n, err = b.Peek(p) + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// Peek reads bytes from the buffer into p without changing the buffer. +// Peek will never return an error but might return less data than +// requested. +func (b *buffer) Peek(p []byte) (n int, err error) { + m := b.Buffered() + n = len(p) + if m < n { + n = m + p = p[:n] + } + k := copy(p, b.data[b.rear:]) + if k < n { + copy(p[k:], b.data) + } + return n, nil +} + +// Discard skips the n next bytes to read from the buffer, returning the +// bytes discarded. +// +// If Discards skips fewer than n bytes, it returns an error. +func (b *buffer) Discard(n int) (discarded int, err error) { + if n < 0 { + return 0, errors.New("buffer.Discard: negative argument") + } + m := b.Buffered() + if m < n { + n = m + err = errors.New( + "buffer.Discard: discarded less bytes then requested") + } + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// ErrNoSpace indicates that there is insufficient space for the Write +// operation. +var ErrNoSpace = errors.New("insufficient space") + +// Write puts data into the buffer. If less bytes are written than +// requested ErrNoSpace is returned. +func (b *buffer) Write(p []byte) (n int, err error) { + m := b.Available() + n = len(p) + if m < n { + n = m + p = p[:m] + err = ErrNoSpace + } + k := copy(b.data[b.front:], p) + if k < n { + copy(b.data, p[k:]) + } + b.front = b.addIndex(b.front, n) + return n, err +} + +// WriteByte writes a single byte into the buffer. The error ErrNoSpace +// is returned if no single byte is available in the buffer for writing. +func (b *buffer) WriteByte(c byte) error { + if b.Available() < 1 { + return ErrNoSpace + } + b.data[b.front] = c + b.front = b.addIndex(b.front, 1) + return nil +} + +// prefixLen returns the length of the common prefix of a and b. +func prefixLen(a, b []byte) int { + if len(a) > len(b) { + a, b = b, a + } + for i, c := range a { + if b[i] != c { + return i + } + } + return len(a) +} + +// matchLen returns the length of the common prefix for the given +// distance from the rear and the byte slice p. +func (b *buffer) matchLen(distance int, p []byte) int { + var n int + i := b.rear - distance + if i < 0 { + if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { + return n + } + p = p[n:] + i = 0 + } + n += prefixLen(p, b.data[i:]) + return n +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go new file mode 100644 index 00000000..a3696ba0 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -0,0 +1,37 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// ErrLimit indicates that the limit of the LimitedByteWriter has been +// reached. +var ErrLimit = errors.New("limit reached") + +// LimitedByteWriter provides a byte writer that can be written until a +// limit is reached. The field N provides the number of remaining +// bytes. +type LimitedByteWriter struct { + BW io.ByteWriter + N int64 +} + +// WriteByte writes a single byte to the limited byte writer. It returns +// ErrLimit if the limit has been reached. If the byte is successfully +// written the field N of the LimitedByteWriter will be decremented by +// one. +func (l *LimitedByteWriter) WriteByte(c byte) error { + if l.N <= 0 { + return ErrLimit + } + if err := l.BW.WriteByte(c); err != nil { + return err + } + l.N-- + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go new file mode 100644 index 00000000..16e14db3 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -0,0 +1,277 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// decoder decodes a raw LZMA stream without any header. +type decoder struct { + // dictionary; the rear pointer of the buffer will be used for + // reading the data. + Dict *decoderDict + // decoder state + State *state + // range decoder + rd *rangeDecoder + // start stores the head value of the dictionary for the LZMA + // stream + start int64 + // size of uncompressed data + size int64 + // end-of-stream encountered + eos bool + // EOS marker found + eosMarker bool +} + +// newDecoder creates a new decoder instance. The parameter size provides +// the expected byte size of the decompressed data. If the size is +// unknown use a negative value. In that case the decoder will look for +// a terminating end-of-stream marker. +func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) { + rd, err := newRangeDecoder(br) + if err != nil { + return nil, err + } + d = &decoder{ + State: state, + Dict: dict, + rd: rd, + size: size, + start: dict.pos(), + } + return d, nil +} + +// Reopen restarts the decoder with a new byte reader and a new size. Reopen +// resets the Decompressed counter to zero. +func (d *decoder) Reopen(br io.ByteReader, size int64) error { + var err error + if d.rd, err = newRangeDecoder(br); err != nil { + return err + } + d.start = d.Dict.pos() + d.size = size + d.eos = false + return nil +} + +// decodeLiteral decodes a single literal from the LZMA stream. +func (d *decoder) decodeLiteral() (op operation, err error) { + litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head) + match := d.Dict.byteAt(int(d.State.rep[0]) + 1) + s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState) + if err != nil { + return nil, err + } + return lit{s}, nil +} + +// errEOS indicates that an EOS marker has been found. +var errEOS = errors.New("EOS marker found") + +// readOp decodes the next operation from the compressed stream. It +// returns the operation. If an explicit end of stream marker is +// identified the eos error is returned. +func (d *decoder) readOp() (op operation, err error) { + // Value of the end of stream (EOS) marker + const eosDist = 1<<32 - 1 + + state, state2, posState := d.State.states(d.Dict.head) + + b, err := d.State.isMatch[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // literal + op, err := d.decodeLiteral() + if err != nil { + return nil, err + } + d.State.updateStateLiteral() + return op, nil + } + b, err = d.State.isRep[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // simple match + d.State.rep[3], d.State.rep[2], d.State.rep[1] = + d.State.rep[2], d.State.rep[1], d.State.rep[0] + + d.State.updateStateMatch() + // The length decoder returns the length offset. + n, err := d.State.lenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + // The dist decoder returns the distance offset. The actual + // distance is 1 higher. + d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n) + if err != nil { + return nil, err + } + if d.State.rep[0] == eosDist { + d.eosMarker = true + return nil, errEOS + } + op = match{n: int(n) + minMatchLen, + distance: int64(d.State.rep[0]) + minDistance} + return op, nil + } + b, err = d.State.isRepG0[state].Decode(d.rd) + if err != nil { + return nil, err + } + dist := d.State.rep[0] + if b == 0 { + // rep match 0 + b, err = d.State.isRepG0Long[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + d.State.updateStateShortRep() + op = match{n: 1, distance: int64(dist) + minDistance} + return op, nil + } + } else { + b, err = d.State.isRepG1[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[1] + } else { + b, err = d.State.isRepG2[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[2] + } else { + dist = d.State.rep[3] + d.State.rep[3] = d.State.rep[2] + } + d.State.rep[2] = d.State.rep[1] + } + d.State.rep[1] = d.State.rep[0] + d.State.rep[0] = dist + } + n, err := d.State.repLenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + d.State.updateStateRep() + op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance} + return op, nil +} + +// apply takes the operation and transforms the decoder dictionary accordingly. +func (d *decoder) apply(op operation) error { + var err error + switch x := op.(type) { + case match: + err = d.Dict.writeMatch(x.distance, x.n) + case lit: + err = d.Dict.WriteByte(x.b) + default: + panic("op is neither a match nor a literal") + } + return err +} + +// decompress fills the dictionary unless no space for new data is +// available. If the end of the LZMA stream has been reached io.EOF will +// be returned. +func (d *decoder) decompress() error { + if d.eos { + return io.EOF + } + for d.Dict.Available() >= maxMatchLen { + op, err := d.readOp() + switch err { + case nil: + break + case errEOS: + d.eos = true + if !d.rd.possiblyAtEnd() { + return errDataAfterEOS + } + if d.size >= 0 && d.size != d.Decompressed() { + return errSize + } + return io.EOF + case io.EOF: + d.eos = true + return io.ErrUnexpectedEOF + default: + return err + } + if err = d.apply(op); err != nil { + return err + } + if d.size >= 0 && d.Decompressed() >= d.size { + d.eos = true + if d.Decompressed() > d.size { + return errSize + } + if !d.rd.possiblyAtEnd() { + switch _, err = d.readOp(); err { + case nil: + return errSize + case io.EOF: + return io.ErrUnexpectedEOF + case errEOS: + break + default: + return err + } + } + return io.EOF + } + } + return nil +} + +// Errors that may be returned while decoding data. +var ( + errDataAfterEOS = errors.New("lzma: data after end of stream marker") + errSize = errors.New("lzma: wrong uncompressed data size") +) + +// Read reads data from the buffer. If no more data is available io.EOF is +// returned. +func (d *decoder) Read(p []byte) (n int, err error) { + var k int + for { + // Read of decoder dict never returns an error. + k, err = d.Dict.Read(p[n:]) + if err != nil { + panic(fmt.Errorf("dictionary read error %s", err)) + } + if k == 0 && d.eos { + return n, io.EOF + } + n += k + if n >= len(p) { + return n, nil + } + if err = d.decompress(); err != nil && err != io.EOF { + return n, err + } + } +} + +// Decompressed returns the number of bytes decompressed by the decoder. +func (d *decoder) Decompressed() int64 { + return d.Dict.pos() - d.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go new file mode 100644 index 00000000..564a12b8 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -0,0 +1,135 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// decoderDict provides the dictionary for the decoder. The whole +// dictionary is used as reader buffer. +type decoderDict struct { + buf buffer + head int64 +} + +// newDecoderDict creates a new decoder dictionary. The whole dictionary +// will be used as reader buffer. +func newDecoderDict(dictCap int) (d *decoderDict, err error) { + // lower limit supports easy test cases + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New("lzma: dictCap out of range") + } + d = &decoderDict{buf: *newBuffer(dictCap)} + return d, nil +} + +// Reset clears the dictionary. The read buffer is not changed, so the +// buffered data can still be read. +func (d *decoderDict) Reset() { + d.head = 0 +} + +// WriteByte writes a single byte into the dictionary. It is used to +// write literals into the dictionary. +func (d *decoderDict) WriteByte(c byte) error { + if err := d.buf.WriteByte(c); err != nil { + return err + } + d.head++ + return nil +} + +// pos returns the position of the dictionary head. +func (d *decoderDict) pos() int64 { return d.head } + +// dictLen returns the actual length of the dictionary. +func (d *decoderDict) dictLen() int { + capacity := d.buf.Cap() + if d.head >= int64(capacity) { + return capacity + } + return int(d.head) +} + +// byteAt returns a byte stored in the dictionary. If the distance is +// non-positive or exceeds the current length of the dictionary the zero +// byte is returned. +func (d *decoderDict) byteAt(dist int) byte { + if !(0 < dist && dist <= d.dictLen()) { + return 0 + } + i := d.buf.front - dist + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// writeMatch writes the match at the top of the dictionary. The given +// distance must point in the current dictionary and the length must not +// exceed the maximum length 273 supported in LZMA. +// +// The error value ErrNoSpace indicates that no space is available in +// the dictionary for writing. You need to read from the dictionary +// first. +func (d *decoderDict) writeMatch(dist int64, length int) error { + if !(0 < dist && dist <= int64(d.dictLen())) { + return errors.New("writeMatch: distance out of range") + } + if !(0 < length && length <= maxMatchLen) { + return errors.New("writeMatch: length out of range") + } + if length > d.buf.Available() { + return ErrNoSpace + } + d.head += int64(length) + + i := d.buf.front - int(dist) + if i < 0 { + i += len(d.buf.data) + } + for length > 0 { + var p []byte + if i >= d.buf.front { + p = d.buf.data[i:] + i = 0 + } else { + p = d.buf.data[i:d.buf.front] + i = d.buf.front + } + if len(p) > length { + p = p[:length] + } + if _, err := d.buf.Write(p); err != nil { + panic(fmt.Errorf("d.buf.Write returned error %s", err)) + } + length -= len(p) + } + return nil +} + +// Write writes the given bytes into the dictionary and advances the +// head. +func (d *decoderDict) Write(p []byte) (n int, err error) { + n, err = d.buf.Write(p) + d.head += int64(n) + return n, err +} + +// Available returns the number of available bytes for writing into the +// decoder dictionary. +func (d *decoderDict) Available() int { return d.buf.Available() } + +// Read reads data from the buffer contained in the decoder dictionary. +func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } + +// Buffered returns the number of bytes currently buffered in the +// decoder dictionary. +func (d *decoderDict) buffered() int { return d.buf.Buffered() } + +// Peek gets data from the buffer without advancing the rear index. +func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) } diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go new file mode 100644 index 00000000..e08eb989 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -0,0 +1,49 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "fmt" + +// directCodec allows the encoding and decoding of values with a fixed number +// of bits. The number of bits must be in the range [1,32]. +type directCodec byte + +// makeDirectCodec creates a directCodec. The function panics if the number of +// bits is not in the range [1,32]. +func makeDirectCodec(bits int) directCodec { + if !(1 <= bits && bits <= 32) { + panic(fmt.Errorf("bits=%d out of range", bits)) + } + return directCodec(bits) +} + +// Bits returns the number of bits supported by this codec. +func (dc directCodec) Bits() int { + return int(dc) +} + +// Encode uses the range encoder to encode a value with the fixed number of +// bits. The most-significant bit is encoded first. +func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { + for i := int(dc) - 1; i >= 0; i-- { + if err := e.DirectEncodeBit(v >> uint(i)); err != nil { + return err + } + } + return nil +} + +// Decode uses the range decoder to decode a value with the given number of +// given bits. The most-significant bit is decoded first. +func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { + for i := int(dc) - 1; i >= 0; i-- { + x, err := d.DirectDecodeBit() + if err != nil { + return 0, err + } + v = (v << 1) | x + } + return v, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go new file mode 100644 index 00000000..b053a2dc --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -0,0 +1,156 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// Constants used by the distance codec. +const ( + // minimum supported distance + minDistance = 1 + // maximum supported distance, value is used for the eos marker. + maxDistance = 1 << 32 + // number of the supported len states + lenStates = 4 + // start for the position models + startPosModel = 4 + // first index with align bits support + endPosModel = 14 + // bits for the position slots + posSlotBits = 6 + // number of align bits + alignBits = 4 + // maximum position slot + maxPosSlot = 63 +) + +// distCodec provides encoding and decoding of distance values. +type distCodec struct { + posSlotCodecs [lenStates]treeCodec + posModel [endPosModel - startPosModel]treeReverseCodec + alignCodec treeReverseCodec +} + +// deepcopy initializes dc as deep copy of the source. +func (dc *distCodec) deepcopy(src *distCodec) { + if dc == src { + return + } + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) + } + for i := range dc.posModel { + dc.posModel[i].deepcopy(&src.posModel[i]) + } + dc.alignCodec.deepcopy(&src.alignCodec) +} + +// distBits returns the number of bits required to encode dist. +func distBits(dist uint32) int { + if dist < startPosModel { + return 6 + } + // slot s > 3, dist d + // s = 2(bits(d)-1) + bit(d, bits(d)-2) + // s>>1 = bits(d)-1 + // bits(d) = 32-nlz32(d) + // s>>1=31-nlz32(d) + // n = 5 + (s>>1) = 36 - nlz32(d) + return 36 - nlz32(dist) +} + +// newDistCodec creates a new distance codec. +func (dc *distCodec) init() { + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) + } + for i := range dc.posModel { + posSlot := startPosModel + i + bits := (posSlot >> 1) - 1 + dc.posModel[i] = makeTreeReverseCodec(bits) + } + dc.alignCodec = makeTreeReverseCodec(alignBits) +} + +// lenState converts the value l to a supported lenState value. +func lenState(l uint32) uint32 { + if l >= lenStates { + l = lenStates - 1 + } + return l +} + +// Encode encodes the distance using the parameter l. Dist can have values from +// the full range of uint32 values. To get the distance offset the actual match +// distance has to be decreased by 1. A distance offset of 0xffffffff (eos) +// indicates the end of the stream. +func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { + // Compute the posSlot using nlz32 + var posSlot uint32 + var bits uint32 + if dist < startPosModel { + posSlot = dist + } else { + bits = uint32(30 - nlz32(dist)) + posSlot = startPosModel - 2 + (bits << 1) + posSlot += (dist >> uint(bits)) & 1 + } + + if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { + return + } + + switch { + case posSlot < startPosModel: + return nil + case posSlot < endPosModel: + tc := &dc.posModel[posSlot-startPosModel] + return tc.Encode(dist, e) + } + dic := directCodec(bits - alignBits) + if err = dic.Encode(e, dist>>alignBits); err != nil { + return + } + return dc.alignCodec.Encode(dist, e) +} + +// Decode decodes the distance offset using the parameter l. The dist value +// 0xffffffff (eos) indicates the end of the stream. Add one to the distance +// offset to get the actual match distance. +func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { + posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) + if err != nil { + return + } + + // posSlot equals distance + if posSlot < startPosModel { + return posSlot, nil + } + + // posSlot uses the individual models + bits := (posSlot >> 1) - 1 + dist = (2 | (posSlot & 1)) << bits + var u uint32 + if posSlot < endPosModel { + tc := &dc.posModel[posSlot-startPosModel] + if u, err = tc.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil + } + + // posSlots use direct encoding and a single model for the four align + // bits. + dic := directCodec(bits - alignBits) + if u, err = dic.Decode(d); err != nil { + return 0, err + } + dist += u << alignBits + if u, err = dc.alignCodec.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go new file mode 100644 index 00000000..fe1900a6 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -0,0 +1,268 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "io" +) + +// opLenMargin provides the upper limit of the number of bytes required +// to encode a single operation. +const opLenMargin = 16 + +// compressFlags control the compression process. +type compressFlags uint32 + +// Values for compressFlags. +const ( + // all data should be compressed, even if compression is not + // optimal. + all compressFlags = 1 << iota +) + +// encoderFlags provide the flags for an encoder. +type encoderFlags uint32 + +// Flags for the encoder. +const ( + // eosMarker requests an EOS marker to be written. + eosMarker encoderFlags = 1 << iota +) + +// Encoder compresses data buffered in the encoder dictionary and writes +// it into a byte writer. +type encoder struct { + dict *encoderDict + state *state + re *rangeEncoder + start int64 + // generate eos marker + marker bool + limit bool + margin int +} + +// newEncoder creates a new encoder. If the byte writer must be +// limited use LimitedByteWriter provided by this package. The flags +// argument supports the eosMarker flag, controlling whether a +// terminating end-of-stream marker must be written. +func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict, + flags encoderFlags) (e *encoder, err error) { + + re, err := newRangeEncoder(bw) + if err != nil { + return nil, err + } + e = &encoder{ + dict: dict, + state: state, + re: re, + marker: flags&eosMarker != 0, + start: dict.Pos(), + margin: opLenMargin, + } + if e.marker { + e.margin += 5 + } + return e, nil +} + +// Write writes the bytes from p into the dictionary. If not enough +// space is available the data in the dictionary buffer will be +// compressed to make additional space available. If the limit of the +// underlying writer has been reached ErrLimit will be returned. +func (e *encoder) Write(p []byte) (n int, err error) { + for { + k, err := e.dict.Write(p[n:]) + n += k + if err == ErrNoSpace { + if err = e.compress(0); err != nil { + return n, err + } + continue + } + return n, err + } +} + +// Reopen reopens the encoder with a new byte writer. +func (e *encoder) Reopen(bw io.ByteWriter) error { + var err error + if e.re, err = newRangeEncoder(bw); err != nil { + return err + } + e.start = e.dict.Pos() + e.limit = false + return nil +} + +// writeLiteral writes a literal into the LZMA stream +func (e *encoder) writeLiteral(l lit) error { + var err error + state, state2, _ := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil { + return err + } + litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos()) + match := e.dict.ByteAt(int(e.state.rep[0]) + 1) + err = e.state.litCodec.Encode(e.re, l.b, state, match, litState) + if err != nil { + return err + } + e.state.updateStateLiteral() + return nil +} + +// iverson implements the Iverson operator as proposed by Donald Knuth in his +// book Concrete Mathematics. +func iverson(ok bool) uint32 { + if ok { + return 1 + } + return 0 +} + +// writeMatch writes a repetition operation into the operation stream +func (e *encoder) writeMatch(m match) error { + var err error + if !(minDistance <= m.distance && m.distance <= maxDistance) { + panic(fmt.Errorf("match distance %d out of range", m.distance)) + } + dist := uint32(m.distance - minDistance) + if !(minMatchLen <= m.n && m.n <= maxMatchLen) && + !(dist == e.state.rep[0] && m.n == 1) { + panic(fmt.Errorf( + "match length %d out of range; dist %d rep[0] %d", + m.n, dist, e.state.rep[0])) + } + state, state2, posState := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil { + return err + } + g := 0 + for ; g < 4; g++ { + if e.state.rep[g] == dist { + break + } + } + b := iverson(g < 4) + if err = e.state.isRep[state].Encode(e.re, b); err != nil { + return err + } + n := uint32(m.n - minMatchLen) + if b == 0 { + // simple match + e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] = + e.state.rep[2], e.state.rep[1], e.state.rep[0], dist + e.state.updateStateMatch() + if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil { + return err + } + return e.state.distCodec.Encode(e.re, dist, n) + } + b = iverson(g != 0) + if err = e.state.isRepG0[state].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + // g == 0 + b = iverson(m.n != 1) + if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + e.state.updateStateShortRep() + return nil + } + } else { + // g in {1,2,3} + b = iverson(g != 1) + if err = e.state.isRepG1[state].Encode(e.re, b); err != nil { + return err + } + if b == 1 { + // g in {2,3} + b = iverson(g != 2) + err = e.state.isRepG2[state].Encode(e.re, b) + if err != nil { + return err + } + if b == 1 { + e.state.rep[3] = e.state.rep[2] + } + e.state.rep[2] = e.state.rep[1] + } + e.state.rep[1] = e.state.rep[0] + e.state.rep[0] = dist + } + e.state.updateStateRep() + return e.state.repLenCodec.Encode(e.re, n, posState) +} + +// writeOp writes a single operation to the range encoder. The function +// checks whether there is enough space available to close the LZMA +// stream. +func (e *encoder) writeOp(op operation) error { + if e.re.Available() < int64(e.margin) { + return ErrLimit + } + switch x := op.(type) { + case lit: + return e.writeLiteral(x) + case match: + return e.writeMatch(x) + default: + panic("unexpected operation") + } +} + +// compress compressed data from the dictionary buffer. If the flag all +// is set, all data in the dictionary buffer will be compressed. The +// function returns ErrLimit if the underlying writer has reached its +// limit. +func (e *encoder) compress(flags compressFlags) error { + n := 0 + if flags&all == 0 { + n = maxMatchLen - 1 + } + d := e.dict + m := d.m + for d.Buffered() > n { + op := m.NextOp(e.state.rep) + if err := e.writeOp(op); err != nil { + return err + } + d.Discard(op.Len()) + } + return nil +} + +// eosMatch is a pseudo operation that indicates the end of the stream. +var eosMatch = match{distance: maxDistance, n: minMatchLen} + +// Close terminates the LZMA stream. If requested the end-of-stream +// marker will be written. If the byte writer limit has been or will be +// reached during compression of the remaining data in the buffer the +// LZMA stream will be closed and data will remain in the buffer. +func (e *encoder) Close() error { + err := e.compress(all) + if err != nil && err != ErrLimit { + return err + } + if e.marker { + if err := e.writeMatch(eosMatch); err != nil { + return err + } + } + err = e.re.Close() + return err +} + +// Compressed returns the number bytes of the input data that been +// compressed. +func (e *encoder) Compressed() int64 { + return e.dict.Pos() - e.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go new file mode 100644 index 00000000..9d0fbc70 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -0,0 +1,149 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// matcher is an interface that supports the identification of the next +// operation. +type matcher interface { + io.Writer + SetDict(d *encoderDict) + NextOp(rep [4]uint32) operation +} + +// encoderDict provides the dictionary of the encoder. It includes an +// addtional buffer atop of the actual dictionary. +type encoderDict struct { + buf buffer + m matcher + head int64 + capacity int + // preallocated array + data [maxMatchLen]byte +} + +// newEncoderDict creates the encoder dictionary. The argument bufSize +// defines the size of the additional buffer. +func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New( + "lzma: dictionary capacity out of range") + } + if bufSize < 1 { + return nil, errors.New( + "lzma: buffer size must be larger than zero") + } + d = &encoderDict{ + buf: *newBuffer(dictCap + bufSize), + capacity: dictCap, + m: m, + } + m.SetDict(d) + return d, nil +} + +// Discard discards n bytes. Note that n must not be larger than +// MaxMatchLen. +func (d *encoderDict) Discard(n int) { + p := d.data[:n] + k, _ := d.buf.Read(p) + if k < n { + panic(fmt.Errorf("lzma: can't discard %d bytes", n)) + } + d.head += int64(n) + d.m.Write(p) +} + +// Len returns the data available in the encoder dictionary. +func (d *encoderDict) Len() int { + n := d.buf.Available() + if int64(n) > d.head { + return int(d.head) + } + return n +} + +// DictLen returns the actual length of data in the dictionary. +func (d *encoderDict) DictLen() int { + if d.head < int64(d.capacity) { + return int(d.head) + } + return d.capacity +} + +// Available returns the number of bytes that can be written by a +// following Write call. +func (d *encoderDict) Available() int { + return d.buf.Available() - d.DictLen() +} + +// Write writes data into the dictionary buffer. Note that the position +// of the dictionary head will not be moved. If there is not enough +// space in the buffer ErrNoSpace will be returned. +func (d *encoderDict) Write(p []byte) (n int, err error) { + m := d.Available() + if len(p) > m { + p = p[:m] + err = ErrNoSpace + } + var e error + if n, e = d.buf.Write(p); e != nil { + err = e + } + return n, err +} + +// Pos returns the position of the head. +func (d *encoderDict) Pos() int64 { return d.head } + +// ByteAt returns the byte at the given distance. +func (d *encoderDict) ByteAt(distance int) byte { + if !(0 < distance && distance <= d.Len()) { + return 0 + } + i := d.buf.rear - distance + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// CopyN copies the last n bytes from the dictionary into the provided +// writer. This is used for copying uncompressed data into an +// uncompressed segment. +func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { + if n <= 0 { + return 0, nil + } + m := d.Len() + if n > m { + n = m + err = ErrNoSpace + } + i := d.buf.rear - n + var e error + if i < 0 { + i += len(d.buf.data) + if written, e = w.Write(d.buf.data[i:]); e != nil { + return written, e + } + i = 0 + } + var k int + k, e = w.Write(d.buf.data[i:d.buf.rear]) + written += k + if e != nil { + err = e + } + return written, err +} + +// Buffered returns the number of bytes in the buffer. +func (d *encoderDict) Buffered() int { return d.buf.Buffered() } diff --git a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma new file mode 100644 index 0000000000000000000000000000000000000000..5edad633266eb5173a7c39761dc8b9e71efbfe80 GIT binary patch literal 67 zcma!LU}#|Y4+RWbQXGqzRntCtR~%i$`d{za%}WYWYfXMUl6~Q5_UjH?=5CuO0w(I5 UuQ#VXelz{mI_3ZW`W7$%0HEw6g#Z8m literal 0 HcmV?d00001 diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go new file mode 100644 index 00000000..d786a974 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -0,0 +1,309 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + + "github.com/ulikunitz/xz/internal/hash" +) + +/* For compression we need to find byte sequences that match the byte + * sequence at the dictionary head. A hash table is a simple method to + * provide this capability. + */ + +// maxMatches limits the number of matches requested from the Matches +// function. This controls the speed of the overall encoding. +const maxMatches = 16 + +// shortDists defines the number of short distances supported by the +// implementation. +const shortDists = 8 + +// The minimum is somehow arbitrary but the maximum is limited by the +// memory requirements of the hash table. +const ( + minTableExponent = 9 + maxTableExponent = 20 +) + +// newRoller contains the function used to create an instance of the +// hash.Roller. +var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) } + +// hashTable stores the hash table including the rolling hash method. +// +// We implement chained hashing into a circular buffer. Each entry in +// the circular buffer stores the delta distance to the next position with a +// word that has the same hash value. +type hashTable struct { + dict *encoderDict + // actual hash table + t []int64 + // circular list data with the offset to the next word + data []uint32 + front int + // mask for computing the index for the hash table + mask uint64 + // hash offset; initial value is -int64(wordLen) + hoff int64 + // length of the hashed word + wordLen int + // hash roller for computing the hash values for the Write + // method + wr hash.Roller + // hash roller for computing arbitrary hashes + hr hash.Roller + // preallocated slices + p [maxMatches]int64 + distances [maxMatches + shortDists]int +} + +// hashTableExponent derives the hash table exponent from the dictionary +// capacity. +func hashTableExponent(n uint32) int { + e := 30 - nlz32(n) + switch { + case e < minTableExponent: + e = minTableExponent + case e > maxTableExponent: + e = maxTableExponent + } + return e +} + +// newHashTable creates a new hash table for words of length wordLen +func newHashTable(capacity int, wordLen int) (t *hashTable, err error) { + if !(0 < capacity) { + return nil, errors.New( + "newHashTable: capacity must not be negative") + } + exp := hashTableExponent(uint32(capacity)) + if !(1 <= wordLen && wordLen <= 4) { + return nil, errors.New("newHashTable: " + + "argument wordLen out of range") + } + n := 1 << uint(exp) + if n <= 0 { + panic("newHashTable: exponent is too large") + } + t = &hashTable{ + t: make([]int64, n), + data: make([]uint32, capacity), + mask: (uint64(1) << uint(exp)) - 1, + hoff: -int64(wordLen), + wordLen: wordLen, + wr: newRoller(wordLen), + hr: newRoller(wordLen), + } + return t, nil +} + +func (t *hashTable) SetDict(d *encoderDict) { t.dict = d } + +// buffered returns the number of bytes that are currently hashed. +func (t *hashTable) buffered() int { + n := t.hoff + 1 + switch { + case n <= 0: + return 0 + case n >= int64(len(t.data)): + return len(t.data) + } + return int(n) +} + +// addIndex adds n to an index ensuring that is stays inside the +// circular buffer for the hash chain. +func (t *hashTable) addIndex(i, n int) int { + i += n - len(t.data) + if i < 0 { + i += len(t.data) + } + return i +} + +// putDelta puts the delta instance at the current front of the circular +// chain buffer. +func (t *hashTable) putDelta(delta uint32) { + t.data[t.front] = delta + t.front = t.addIndex(t.front, 1) +} + +// putEntry puts a new entry into the hash table. If there is already a +// value stored it is moved into the circular chain buffer. +func (t *hashTable) putEntry(h uint64, pos int64) { + if pos < 0 { + return + } + i := h & t.mask + old := t.t[i] - 1 + t.t[i] = pos + 1 + var delta int64 + if old >= 0 { + delta = pos - old + if delta > 1<<32-1 || delta > int64(t.buffered()) { + delta = 0 + } + } + t.putDelta(uint32(delta)) +} + +// WriteByte converts a single byte into a hash and puts them into the hash +// table. +func (t *hashTable) WriteByte(b byte) error { + h := t.wr.RollByte(b) + t.hoff++ + t.putEntry(h, t.hoff) + return nil +} + +// Write converts the bytes provided into hash tables and stores the +// abbreviated offsets into the hash table. The method will never return an +// error. +func (t *hashTable) Write(p []byte) (n int, err error) { + for _, b := range p { + // WriteByte doesn't generate an error. + t.WriteByte(b) + } + return len(p), nil +} + +// getMatches the matches for a specific hash. The functions returns the +// number of positions found. +// +// TODO: Make a getDistances because that we are actually interested in. +func (t *hashTable) getMatches(h uint64, positions []int64) (n int) { + if t.hoff < 0 || len(positions) == 0 { + return 0 + } + buffered := t.buffered() + tailPos := t.hoff + 1 - int64(buffered) + rear := t.front - buffered + if rear >= 0 { + rear -= len(t.data) + } + // get the slot for the hash + pos := t.t[h&t.mask] - 1 + delta := pos - tailPos + for { + if delta < 0 { + return n + } + positions[n] = tailPos + delta + n++ + if n >= len(positions) { + return n + } + i := rear + int(delta) + if i < 0 { + i += len(t.data) + } + u := t.data[i] + if u == 0 { + return n + } + delta -= int64(u) + } +} + +// hash computes the rolling hash for the word stored in p. For correct +// results its length must be equal to t.wordLen. +func (t *hashTable) hash(p []byte) uint64 { + var h uint64 + for _, b := range p { + h = t.hr.RollByte(b) + } + return h +} + +// Matches fills the positions slice with potential matches. The +// functions returns the number of positions filled into positions. The +// byte slice p must have word length of the hash table. +func (t *hashTable) Matches(p []byte, positions []int64) int { + if len(p) != t.wordLen { + panic(fmt.Errorf( + "byte slice must have length %d", t.wordLen)) + } + h := t.hash(p) + return t.getMatches(h, positions) +} + +// NextOp identifies the next operation using the hash table. +// +// TODO: Use all repetitions to find matches. +func (t *hashTable) NextOp(rep [4]uint32) operation { + // get positions + data := t.dict.data[:maxMatchLen] + n, _ := t.dict.buf.Peek(data) + data = data[:n] + var p []int64 + if n < t.wordLen { + p = t.p[:0] + } else { + p = t.p[:maxMatches] + n = t.Matches(data[:t.wordLen], p) + p = p[:n] + } + + // convert positions in potential distances + head := t.dict.head + dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8) + for _, pos := range p { + dis := int(head - pos) + if dis > shortDists { + dists = append(dists, dis) + } + } + + // check distances + var m match + dictLen := t.dict.DictLen() + for _, dist := range dists { + if dist > dictLen { + continue + } + + // Here comes a trick. We are only interested in matches + // that are longer than the matches we have been found + // before. So before we test the whole byte sequence at + // the given distance, we test the first byte that would + // make the match longer. If it doesn't match the byte + // to match, we don't to care any longer. + i := t.dict.buf.rear - dist + m.n + if i < 0 { + i += len(t.dict.buf.data) + } + if t.dict.buf.data[i] != data[m.n] { + // We can't get a longer match. Jump to the next + // distance. + continue + } + + n := t.dict.buf.matchLen(dist, data) + switch n { + case 0: + continue + case 1: + if uint32(dist-minDistance) != rep[0] { + continue + } + } + if n > m.n { + m = match{int64(dist), n} + if n == len(data) { + // No better match will be found. + break + } + } + } + + if m.n == 0 { + return lit{data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go new file mode 100644 index 00000000..bc708969 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -0,0 +1,167 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// uint32LE reads an uint32 integer from a byte slice +func uint32LE(b []byte) uint32 { + x := uint32(b[3]) << 24 + x |= uint32(b[2]) << 16 + x |= uint32(b[1]) << 8 + x |= uint32(b[0]) + return x +} + +// uint64LE converts the uint64 value stored as little endian to an uint64 +// value. +func uint64LE(b []byte) uint64 { + x := uint64(b[7]) << 56 + x |= uint64(b[6]) << 48 + x |= uint64(b[5]) << 40 + x |= uint64(b[4]) << 32 + x |= uint64(b[3]) << 24 + x |= uint64(b[2]) << 16 + x |= uint64(b[1]) << 8 + x |= uint64(b[0]) + return x +} + +// putUint32LE puts an uint32 integer into a byte slice that must have at least +// a length of 4 bytes. +func putUint32LE(b []byte, x uint32) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) +} + +// putUint64LE puts the uint64 value into the byte slice as little endian +// value. The byte slice b must have at least place for 8 bytes. +func putUint64LE(b []byte, x uint64) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) +} + +// noHeaderSize defines the value of the length field in the LZMA header. +const noHeaderSize uint64 = 1<<64 - 1 + +// HeaderLen provides the length of the LZMA file header. +const HeaderLen = 13 + +// header represents the header of an LZMA file. +type header struct { + properties Properties + dictCap int + // uncompressed size; negative value if no size is given + size int64 +} + +// marshalBinary marshals the header. +func (h *header) marshalBinary() (data []byte, err error) { + if err = h.properties.verify(); err != nil { + return nil, err + } + if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { + return nil, fmt.Errorf("lzma: DictCap %d out of range", + h.dictCap) + } + + data = make([]byte, 13) + + // property byte + data[0] = h.properties.Code() + + // dictionary capacity + putUint32LE(data[1:5], uint32(h.dictCap)) + + // uncompressed size + var s uint64 + if h.size > 0 { + s = uint64(h.size) + } else { + s = noHeaderSize + } + putUint64LE(data[5:], s) + + return data, nil +} + +// unmarshalBinary unmarshals the header. +func (h *header) unmarshalBinary(data []byte) error { + if len(data) != HeaderLen { + return errors.New("lzma.unmarshalBinary: data has wrong length") + } + + // properties + var err error + if h.properties, err = PropertiesForCode(data[0]); err != nil { + return err + } + + // dictionary capacity + h.dictCap = int(uint32LE(data[1:])) + if h.dictCap < 0 { + return errors.New( + "LZMA header: dictionary capacity exceeds maximum " + + "integer") + } + + // uncompressed size + s := uint64LE(data[5:]) + if s == noHeaderSize { + h.size = -1 + } else { + h.size = int64(s) + if h.size < 0 { + return errors.New( + "LZMA header: uncompressed size " + + "out of int64 range") + } + } + + return nil +} + +// validDictCap checks whether the dictionary capacity is correct. This +// is used to weed out wrong file headers. +func validDictCap(dictcap int) bool { + if int64(dictcap) == MaxDictCap { + return true + } + for n := uint(10); n < 32; n++ { + if dictcap == 1<= 10 or 2^32-1. If +// there is an explicit size it must not exceed 256 GiB. The length of +// the data argument must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + if err := h.unmarshalBinary(data); err != nil { + return false + } + if !validDictCap(h.dictCap) { + return false + } + return h.size < 0 || h.size <= 1<<38 +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go new file mode 100644 index 00000000..ac6a71a5 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -0,0 +1,398 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +const ( + // maximum size of compressed data in a chunk + maxCompressed = 1 << 16 + // maximum size of uncompressed data in a chunk + maxUncompressed = 1 << 21 +) + +// chunkType represents the type of an LZMA2 chunk. Note that this +// value is an internal representation and no actual encoding of a LZMA2 +// chunk header. +type chunkType byte + +// Possible values for the chunk type. +const ( + // end of stream + cEOS chunkType = iota + // uncompressed; reset dictionary + cUD + // uncompressed; no reset of dictionary + cU + // LZMA compressed; no reset + cL + // LZMA compressed; reset state + cLR + // LZMA compressed; reset state; new property value + cLRN + // LZMA compressed; reset state; new property value; reset dictionary + cLRND +) + +// chunkTypeStrings provide a string representation for the chunk types. +var chunkTypeStrings = [...]string{ + cEOS: "EOS", + cU: "U", + cUD: "UD", + cL: "L", + cLR: "LR", + cLRN: "LRN", + cLRND: "LRND", +} + +// String returns a string representation of the chunk type. +func (c chunkType) String() string { + if !(cEOS <= c && c <= cLRND) { + return "unknown" + } + return chunkTypeStrings[c] +} + +// Actual encodings for the chunk types in the value. Note that the high +// uncompressed size bits are stored in the header byte additionally. +const ( + hEOS = 0 + hUD = 1 + hU = 2 + hL = 1 << 7 + hLR = 1<<7 | 1<<5 + hLRN = 1<<7 | 1<<6 + hLRND = 1<<7 | 1<<6 | 1<<5 +) + +// errHeaderByte indicates an unsupported value for the chunk header +// byte. These bytes starts the variable-length chunk header. +var errHeaderByte = errors.New("lzma: unsupported chunk header byte") + +// headerChunkType converts the header byte into a chunk type. It +// ignores the uncompressed size bits in the chunk header byte. +func headerChunkType(h byte) (c chunkType, err error) { + if h&hL == 0 { + // no compression + switch h { + case hEOS: + c = cEOS + case hUD: + c = cUD + case hU: + c = cU + default: + return 0, errHeaderByte + } + return + } + switch h & hLRND { + case hL: + c = cL + case hLR: + c = cLR + case hLRN: + c = cLRN + case hLRND: + c = cLRND + default: + return 0, errHeaderByte + } + return +} + +// uncompressedHeaderLen provides the length of an uncompressed header +const uncompressedHeaderLen = 3 + +// headerLen returns the length of the LZMA2 header for a given chunk +// type. +func headerLen(c chunkType) int { + switch c { + case cEOS: + return 1 + case cU, cUD: + return uncompressedHeaderLen + case cL, cLR: + return 5 + case cLRN, cLRND: + return 6 + } + panic(fmt.Errorf("unsupported chunk type %d", c)) +} + +// chunkHeader represents the contents of a chunk header. +type chunkHeader struct { + ctype chunkType + uncompressed uint32 + compressed uint16 + props Properties +} + +// String returns a string representation of the chunk header. +func (h *chunkHeader) String() string { + return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed, + h.compressed, &h.props) +} + +// UnmarshalBinary reads the content of the chunk header from the data +// slice. The slice must have the correct length. +func (h *chunkHeader) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return errors.New("no data") + } + c, err := headerChunkType(data[0]) + if err != nil { + return err + } + + n := headerLen(c) + if len(data) < n { + return errors.New("incomplete data") + } + if len(data) > n { + return errors.New("invalid data length") + } + + *h = chunkHeader{ctype: c} + if c == cEOS { + return nil + } + + h.uncompressed = uint32(uint16BE(data[1:3])) + if c <= cU { + return nil + } + h.uncompressed |= uint32(data[0]&^hLRND) << 16 + + h.compressed = uint16BE(data[3:5]) + if c <= cLR { + return nil + } + + h.props, err = PropertiesForCode(data[5]) + return err +} + +// MarshalBinary encodes the chunk header value. The function checks +// whether the content of the chunk header is correct. +func (h *chunkHeader) MarshalBinary() (data []byte, err error) { + if h.ctype > cLRND { + return nil, errors.New("invalid chunk type") + } + if err = h.props.verify(); err != nil { + return nil, err + } + + data = make([]byte, headerLen(h.ctype)) + + switch h.ctype { + case cEOS: + return data, nil + case cUD: + data[0] = hUD + case cU: + data[0] = hU + case cL: + data[0] = hL + case cLR: + data[0] = hLR + case cLRN: + data[0] = hLRN + case cLRND: + data[0] = hLRND + } + + putUint16BE(data[1:3], uint16(h.uncompressed)) + if h.ctype <= cU { + return data, nil + } + data[0] |= byte(h.uncompressed>>16) &^ hLRND + + putUint16BE(data[3:5], h.compressed) + if h.ctype <= cLR { + return data, nil + } + + data[5] = h.props.Code() + return data, nil +} + +// readChunkHeader reads the chunk header from the IO reader. +func readChunkHeader(r io.Reader) (h *chunkHeader, err error) { + p := make([]byte, 1, 6) + if _, err = io.ReadFull(r, p); err != nil { + return + } + c, err := headerChunkType(p[0]) + if err != nil { + return + } + p = p[:headerLen(c)] + if _, err = io.ReadFull(r, p[1:]); err != nil { + return + } + h = new(chunkHeader) + if err = h.UnmarshalBinary(p); err != nil { + return nil, err + } + return h, nil +} + +// uint16BE converts a big-endian uint16 representation to an uint16 +// value. +func uint16BE(p []byte) uint16 { + return uint16(p[0])<<8 | uint16(p[1]) +} + +// putUint16BE puts the big-endian uint16 presentation into the given +// slice. +func putUint16BE(p []byte, x uint16) { + p[0] = byte(x >> 8) + p[1] = byte(x) +} + +// chunkState is used to manage the state of the chunks +type chunkState byte + +// start and stop define the initial and terminating state of the chunk +// state +const ( + start chunkState = 'S' + stop = 'T' +) + +// errors for the chunk state handling +var ( + errChunkType = errors.New("lzma: unexpected chunk type") + errState = errors.New("lzma: wrong chunk state") +) + +// next transitions state based on chunk type input +func (c *chunkState) next(ctype chunkType) error { + switch *c { + // start state + case 'S': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cLRND: + *c = 'L' + default: + return errChunkType + } + // normal LZMA mode + case 'L': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + *c = 'U' + case cL, cLR, cLRN, cLRND: + break + default: + return errChunkType + } + // reset required + case 'R': + switch ctype { + case cEOS: + *c = 'T' + case cUD, cU: + break + case cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // uncompressed + case 'U': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + break + case cL, cLR, cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // terminal state + case 'T': + return errChunkType + default: + return errState + } + return nil +} + +// defaultChunkType returns the default chunk type for each chunk state. +func (c chunkState) defaultChunkType() chunkType { + switch c { + case 'S': + return cLRND + case 'L', 'U': + return cL + case 'R': + return cLRN + default: + // no error + return cEOS + } +} + +// maxDictCap defines the maximum dictionary capacity supported by the +// LZMA2 dictionary capacity encoding. +const maxDictCap = 1<<32 - 1 + +// maxDictCapCode defines the maximum dictionary capacity code. +const maxDictCapCode = 40 + +// The function decodes the dictionary capacity byte, but doesn't change +// for the correct range of the given byte. +func decodeDictCap(c byte) int64 { + return (2 | int64(c)&1) << (11 + (c>>1)&0x1f) +} + +// DecodeDictCap decodes the encoded dictionary capacity. The function +// returns an error if the code is out of range. +func DecodeDictCap(c byte) (n int64, err error) { + if c >= maxDictCapCode { + if c == maxDictCapCode { + return maxDictCap, nil + } + return 0, errors.New("lzma: invalid dictionary size code") + } + return decodeDictCap(c), nil +} + +// EncodeDictCap encodes a dictionary capacity. The function returns the +// code for the capacity that is greater or equal n. If n exceeds the +// maximum support dictionary capacity, the maximum value is returned. +func EncodeDictCap(n int64) byte { + a, b := byte(0), byte(40) + for a < b { + c := a + (b-a)>>1 + m := decodeDictCap(c) + if n <= m { + if n == m { + return c + } + b = c + } else { + a = c + 1 + } + } + return a +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go new file mode 100644 index 00000000..e5177309 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -0,0 +1,129 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// maxPosBits defines the number of bits of the position value that are used to +// to compute the posState value. The value is used to select the tree codec +// for length encoding and decoding. +const maxPosBits = 4 + +// minMatchLen and maxMatchLen give the minimum and maximum values for +// encoding and decoding length values. minMatchLen is also used as base +// for the encoded length values. +const ( + minMatchLen = 2 + maxMatchLen = minMatchLen + 16 + 256 - 1 +) + +// lengthCodec support the encoding of the length value. +type lengthCodec struct { + choice [2]prob + low [1 << maxPosBits]treeCodec + mid [1 << maxPosBits]treeCodec + high treeCodec +} + +// deepcopy initializes the lc value as deep copy of the source value. +func (lc *lengthCodec) deepcopy(src *lengthCodec) { + if lc == src { + return + } + lc.choice = src.choice + for i := range lc.low { + lc.low[i].deepcopy(&src.low[i]) + } + for i := range lc.mid { + lc.mid[i].deepcopy(&src.mid[i]) + } + lc.high.deepcopy(&src.high) +} + +// init initializes a new length codec. +func (lc *lengthCodec) init() { + for i := range lc.choice { + lc.choice[i] = probInit + } + for i := range lc.low { + lc.low[i] = makeTreeCodec(3) + } + for i := range lc.mid { + lc.mid[i] = makeTreeCodec(3) + } + lc.high = makeTreeCodec(8) +} + +// lBits gives the number of bits used for the encoding of the l value +// provided to the range encoder. +func lBits(l uint32) int { + switch { + case l < 8: + return 4 + case l < 16: + return 5 + default: + return 10 + } +} + +// Encode encodes the length offset. The length offset l can be compute by +// subtracting minMatchLen (2) from the actual length. +// +// l = length - minMatchLen +// +func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, +) (err error) { + if l > maxMatchLen-minMatchLen { + return errors.New("lengthCodec.Encode: l out of range") + } + if l < 8 { + if err = lc.choice[0].Encode(e, 0); err != nil { + return + } + return lc.low[posState].Encode(e, l) + } + if err = lc.choice[0].Encode(e, 1); err != nil { + return + } + if l < 16 { + if err = lc.choice[1].Encode(e, 0); err != nil { + return + } + return lc.mid[posState].Encode(e, l-8) + } + if err = lc.choice[1].Encode(e, 1); err != nil { + return + } + if err = lc.high.Encode(e, l-16); err != nil { + return + } + return nil +} + +// Decode reads the length offset. Add minMatchLen to compute the actual length +// to the length offset l. +func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, +) (l uint32, err error) { + var b uint32 + if b, err = lc.choice[0].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.low[posState].Decode(d) + return + } + if b, err = lc.choice[1].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.mid[posState].Decode(d) + l += 8 + return + } + l, err = lc.high.Decode(d) + l += 16 + return +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go new file mode 100644 index 00000000..c949d6eb --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -0,0 +1,132 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// literalCodec supports the encoding of literal. It provides 768 probability +// values per literal state. The upper 512 probabilities are used with the +// context of a match bit. +type literalCodec struct { + probs []prob +} + +// deepcopy initializes literal codec c as a deep copy of the source. +func (c *literalCodec) deepcopy(src *literalCodec) { + if c == src { + return + } + c.probs = make([]prob, len(src.probs)) + copy(c.probs, src.probs) +} + +// init initializes the literal codec. +func (c *literalCodec) init(lc, lp int) { + switch { + case !(minLC <= lc && lc <= maxLC): + panic("lc out of range") + case !(minLP <= lp && lp <= maxLP): + panic("lp out of range") + } + c.probs = make([]prob, 0x300<= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + bit := (r >> 7) & 1 + r <<= 1 + i := ((1 + matchBit) << 8) | symbol + if err = probs[i].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit := (r >> 7) & 1 + r <<= 1 + if err = probs[symbol].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + } + return nil +} + +// Decode decodes a literal byte using the range decoder as well as the LZMA +// state, a match byte, and the literal state. +func (c *literalCodec) Decode(d *rangeDecoder, + state uint32, match byte, litState uint32, +) (s byte, err error) { + k := litState * 0x300 + probs := c.probs[k : k+0x300] + symbol := uint32(1) + if state >= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + i := ((1 + matchBit) << 8) | symbol + bit, err := d.DecodeBit(&probs[i]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit, err := d.DecodeBit(&probs[symbol]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + } + s = byte(symbol - 0x100) + return s, nil +} + +// minLC and maxLC define the range for LC values. +const ( + minLC = 0 + maxLC = 8 +) + +// minLC and maxLC define the range for LP values. +const ( + minLP = 0 + maxLP = 4 +) + +// minState and maxState define a range for the state values stored in +// the State values. +const ( + minState = 0 + maxState = 11 +) diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go new file mode 100644 index 00000000..4a244eb1 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -0,0 +1,52 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// MatchAlgorithm identifies an algorithm to find matches in the +// dictionary. +type MatchAlgorithm byte + +// Supported matcher algorithms. +const ( + HashTable4 MatchAlgorithm = iota + BinaryTree +) + +// maStrings are used by the String method. +var maStrings = map[MatchAlgorithm]string{ + HashTable4: "HashTable4", + BinaryTree: "BinaryTree", +} + +// String returns a string representation of the Matcher. +func (a MatchAlgorithm) String() string { + if s, ok := maStrings[a]; ok { + return s + } + return "unknown" +} + +var errUnsupportedMatchAlgorithm = errors.New( + "lzma: unsupported match algorithm value") + +// verify checks whether the matcher value is supported. +func (a MatchAlgorithm) verify() error { + if _, ok := maStrings[a]; !ok { + return errUnsupportedMatchAlgorithm + } + return nil +} + +func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { + switch a { + case HashTable4: + return newHashTable(dictCap, 4) + case BinaryTree: + return newBinTree(dictCap) + } + return nil, errUnsupportedMatchAlgorithm +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go new file mode 100644 index 00000000..733bb99d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -0,0 +1,80 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "unicode" +) + +// operation represents an operation on the dictionary during encoding or +// decoding. +type operation interface { + Len() int +} + +// rep represents a repetition at the given distance and the given length +type match struct { + // supports all possible distance values, including the eos marker + distance int64 + // length + n int +} + +// verify checks whether the match is valid. If that is not the case an +// error is returned. +func (m match) verify() error { + if !(minDistance <= m.distance && m.distance <= maxDistance) { + return errors.New("distance out of range") + } + if !(1 <= m.n && m.n <= maxMatchLen) { + return errors.New("length out of range") + } + return nil +} + +// l return the l-value for the match, which is the difference of length +// n and 2. +func (m match) l() uint32 { + return uint32(m.n - minMatchLen) +} + +// dist returns the dist value for the match, which is one less of the +// distance stored in the match. +func (m match) dist() uint32 { + return uint32(m.distance - minDistance) +} + +// Len returns the number of bytes matched. +func (m match) Len() int { + return m.n +} + +// String returns a string representation for the repetition. +func (m match) String() string { + return fmt.Sprintf("M{%d,%d}", m.distance, m.n) +} + +// lit represents a single byte literal. +type lit struct { + b byte +} + +// Len returns 1 for the single byte literal. +func (l lit) Len() int { + return 1 +} + +// String returns a string representation for the literal. +func (l lit) String() string { + var c byte + if unicode.IsPrint(rune(l.b)) { + c = l.b + } else { + c = '.' + } + return fmt.Sprintf("L{%c/%02x}", c, l.b) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go new file mode 100644 index 00000000..24d50ec6 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -0,0 +1,53 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// movebits defines the number of bits used for the updates of probability +// values. +const movebits = 5 + +// probbits defines the number of bits of a probability value. +const probbits = 11 + +// probInit defines 0.5 as initial value for prob values. +const probInit prob = 1 << (probbits - 1) + +// Type prob represents probabilities. The type can also be used to encode and +// decode single bits. +type prob uint16 + +// Dec decreases the probability. The decrease is proportional to the +// probability value. +func (p *prob) dec() { + *p -= *p >> movebits +} + +// Inc increases the probability. The Increase is proportional to the +// difference of 1 and the probability value. +func (p *prob) inc() { + *p += ((1 << probbits) - *p) >> movebits +} + +// Computes the new bound for a given range using the probability value. +func (p prob) bound(r uint32) uint32 { + return (r >> probbits) * uint32(p) +} + +// Bits returns 1. One is the number of bits that can be encoded or decoded +// with a single prob value. +func (p prob) Bits() int { + return 1 +} + +// Encode encodes the least-significant bit of v. Note that the p value will be +// changed. +func (p *prob) Encode(e *rangeEncoder, v uint32) error { + return e.EncodeBit(v, p) +} + +// Decode decodes a single bit. Note that the p value will change. +func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { + return d.DecodeBit(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go new file mode 100644 index 00000000..23418e25 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -0,0 +1,69 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// maximum and minimum values for the LZMA properties. +const ( + minPB = 0 + maxPB = 4 +) + +// maxPropertyCode is the possible maximum of a properties code byte. +const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 + +// Properties contains the parameters LC, LP and PB. The parameter LC +// defines the number of literal context bits; parameter LP the number +// of literal position bits and PB the number of position bits. +type Properties struct { + LC int + LP int + PB int +} + +// String returns the properties in a string representation. +func (p *Properties) String() string { + return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) +} + +// PropertiesForCode converts a properties code byte into a Properties value. +func PropertiesForCode(code byte) (p Properties, err error) { + if code > maxPropertyCode { + return p, errors.New("lzma: invalid properties code") + } + p.LC = int(code % 9) + code /= 9 + p.LP = int(code % 5) + code /= 5 + p.PB = int(code % 5) + return p, err +} + +// verify checks the properties for correctness. +func (p *Properties) verify() error { + if p == nil { + return errors.New("lzma: properties are nil") + } + if !(minLC <= p.LC && p.LC <= maxLC) { + return errors.New("lzma: lc out of range") + } + if !(minLP <= p.LP && p.LP <= maxLP) { + return errors.New("lzma: lp out of range") + } + if !(minPB <= p.PB && p.PB <= maxPB) { + return errors.New("lzma: pb out of range") + } + return nil +} + +// Code converts the properties to a byte. The function assumes that +// the properties components are all in range. +func (p Properties) Code() byte { + return byte((p.PB*5+p.LP)*9 + p.LC) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go new file mode 100644 index 00000000..6361c5e7 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -0,0 +1,248 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// rangeEncoder implements range encoding of single bits. The low value can +// overflow therefore we need uint64. The cache value is used to handle +// overflows. +type rangeEncoder struct { + lbw *LimitedByteWriter + nrange uint32 + low uint64 + cacheLen int64 + cache byte +} + +// maxInt64 provides the maximal value of the int64 type +const maxInt64 = 1<<63 - 1 + +// newRangeEncoder creates a new range encoder. +func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) { + lbw, ok := bw.(*LimitedByteWriter) + if !ok { + lbw = &LimitedByteWriter{BW: bw, N: maxInt64} + } + return &rangeEncoder{ + lbw: lbw, + nrange: 0xffffffff, + cacheLen: 1}, nil +} + +// Available returns the number of bytes that still can be written. The +// method takes the bytes that will be currently written by Close into +// account. +func (e *rangeEncoder) Available() int64 { + return e.lbw.N - (e.cacheLen + 4) +} + +// writeByte writes a single byte to the underlying writer. An error is +// returned if the limit is reached. The written byte will be counted if +// the underlying writer doesn't return an error. +func (e *rangeEncoder) writeByte(c byte) error { + if e.Available() < 1 { + return ErrLimit + } + return e.lbw.WriteByte(c) +} + +// DirectEncodeBit encodes the least-significant bit of b with probability 1/2. +func (e *rangeEncoder) DirectEncodeBit(b uint32) error { + e.nrange >>= 1 + e.low += uint64(e.nrange) & (0 - (uint64(b) & 1)) + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// EncodeBit encodes the least significant bit of b. The p value will be +// updated by the function depending on the bit encoded. +func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error { + bound := p.bound(e.nrange) + if b&1 == 0 { + e.nrange = bound + p.inc() + } else { + e.low += uint64(bound) + e.nrange -= bound + p.dec() + } + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// Close writes a complete copy of the low value. +func (e *rangeEncoder) Close() error { + for i := 0; i < 5; i++ { + if err := e.shiftLow(); err != nil { + return err + } + } + return nil +} + +// shiftLow shifts the low value for 8 bit. The shifted byte is written into +// the byte writer. The cache value is used to handle overflows. +func (e *rangeEncoder) shiftLow() error { + if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 { + tmp := e.cache + for { + err := e.writeByte(tmp + byte(e.low>>32)) + if err != nil { + return err + } + tmp = 0xff + e.cacheLen-- + if e.cacheLen <= 0 { + if e.cacheLen < 0 { + panic("negative cacheLen") + } + break + } + } + e.cache = byte(uint32(e.low) >> 24) + } + e.cacheLen++ + e.low = uint64(uint32(e.low) << 8) + return nil +} + +// rangeDecoder decodes single bits of the range encoding stream. +type rangeDecoder struct { + br io.ByteReader + nrange uint32 + code uint32 +} + +// init initializes the range decoder, by reading from the byte reader. +func (d *rangeDecoder) init() error { + d.nrange = 0xffffffff + d.code = 0 + + b, err := d.br.ReadByte() + if err != nil { + return err + } + if b != 0 { + return errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return err + } + } + + if d.code >= d.nrange { + return errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return nil +} + +// newRangeDecoder initializes a range decoder. It reads five bytes from the +// reader and therefore may return an error. +func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { + d = &rangeDecoder{br: br, nrange: 0xffffffff} + + b, err := d.br.ReadByte() + if err != nil { + return nil, err + } + if b != 0 { + return nil, errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return nil, err + } + } + + if d.code >= d.nrange { + return nil, errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return d, nil +} + +// possiblyAtEnd checks whether the decoder may be at the end of the stream. +func (d *rangeDecoder) possiblyAtEnd() bool { + return d.code == 0 +} + +// DirectDecodeBit decodes a bit with probability 1/2. The return value b will +// contain the bit at the least-significant position. All other bits will be +// zero. +func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) { + d.nrange >>= 1 + d.code -= d.nrange + t := 0 - (d.code >> 31) + d.code += d.nrange & t + b = (t + 1) & 1 + + // d.code will stay less then d.nrange + + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// decodeBit decodes a single bit. The bit will be returned at the +// least-significant position. All other bits will be zero. The probability +// value will be updated. +func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) { + bound := p.bound(d.nrange) + if d.code < bound { + d.nrange = bound + p.inc() + b = 0 + } else { + d.code -= bound + d.nrange -= bound + p.dec() + b = 1 + } + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// updateCode reads a new byte into the code. +func (d *rangeDecoder) updateCode() error { + b, err := d.br.ReadByte() + if err != nil { + return err + } + d.code = (d.code << 8) | uint32(b) + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go new file mode 100644 index 00000000..2ef3dcaa --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -0,0 +1,100 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lzma supports the decoding and encoding of LZMA streams. +// Reader and Writer support the classic LZMA format. Reader2 and +// Writer2 support the decoding and encoding of LZMA2 streams. +// +// The package is written completely in Go and doesn't rely on any external +// library. +package lzma + +import ( + "errors" + "io" +) + +// ReaderConfig stores the parameters for the reader of the classic LZMA +// format. +type ReaderConfig struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero values will +// be replaced by default values. +func (c *ReaderConfig) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader provides a reader for LZMA files or streams. +type Reader struct { + lzma io.Reader + h header + d *decoder +} + +// NewReader creates a new reader for an LZMA stream using the classic +// format. NewReader reads and checks the header of the LZMA stream. +func NewReader(lzma io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(lzma) +} + +// NewReader creates a new reader for an LZMA stream in the classic +// format. The function reads and verifies the the header of the LZMA +// stream. +func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(lzma, data); err != nil { + if err == io.EOF { + return nil, errors.New("lzma: unexpected EOF") + } + return nil, err + } + r = &Reader{lzma: lzma} + if err = r.h.unmarshalBinary(data); err != nil { + return nil, err + } + if r.h.dictCap < MinDictCap { + return nil, errors.New("lzma: dictionary capacity too small") + } + dictCap := r.h.dictCap + if c.DictCap > dictCap { + dictCap = c.DictCap + } + + state := newState(r.h.properties) + dict, err := newDecoderDict(dictCap) + if err != nil { + return nil, err + } + r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) + if err != nil { + return nil, err + } + return r, nil +} + +// EOSMarker indicates that an EOS marker has been encountered. +func (r *Reader) EOSMarker() bool { + return r.d.eosMarker +} + +// Read returns uncompressed data. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.d.Read(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go new file mode 100644 index 00000000..a55cfaa4 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -0,0 +1,232 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" + + "github.com/ulikunitz/xz/internal/xlog" +) + +// Reader2Config stores the parameters for the LZMA2 reader. +// format. +type Reader2Config struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *Reader2Config) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero configuration values +// will be replaced by default values. +func (c *Reader2Config) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader2 supports the reading of LZMA2 chunk sequences. Note that the +// first chunk should have a dictionary reset and the first compressed +// chunk a properties reset. The chunk sequence may not be terminated by +// an end-of-stream chunk. +type Reader2 struct { + r io.Reader + err error + + dict *decoderDict + ur *uncompressedReader + decoder *decoder + chunkReader io.Reader + + cstate chunkState + ctype chunkType +} + +// NewReader2 creates a reader for an LZMA2 chunk sequence. +func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + return Reader2Config{}.NewReader2(lzma2) +} + +// NewReader2 creates an LZMA2 reader using the given configuration. +func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader2{r: lzma2, cstate: start} + r.dict, err = newDecoderDict(c.DictCap) + if err != nil { + return nil, err + } + if err = r.startChunk(); err != nil { + r.err = err + } + return r, nil +} + +// uncompressed tests whether the chunk type specifies an uncompressed +// chunk. +func uncompressed(ctype chunkType) bool { + return ctype == cU || ctype == cUD +} + +// startChunk parses a new chunk. +func (r *Reader2) startChunk() error { + r.chunkReader = nil + header, err := readChunkHeader(r.r) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + xlog.Debugf("chunk header %v", header) + if err = r.cstate.next(header.ctype); err != nil { + return err + } + if r.cstate == stop { + return io.EOF + } + if header.ctype == cUD || header.ctype == cLRND { + r.dict.Reset() + } + size := int64(header.uncompressed) + 1 + if uncompressed(header.ctype) { + if r.ur != nil { + r.ur.Reopen(r.r, size) + } else { + r.ur = newUncompressedReader(r.r, r.dict, size) + } + r.chunkReader = r.ur + return nil + } + br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) + if r.decoder == nil { + state := newState(header.props) + r.decoder, err = newDecoder(br, state, r.dict, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil + } + switch header.ctype { + case cLR: + r.decoder.State.Reset() + case cLRN, cLRND: + r.decoder.State = newState(header.props) + } + err = r.decoder.Reopen(br, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil +} + +// Read reads data from the LZMA2 chunk sequence. +func (r *Reader2) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + for n < len(p) { + var k int + k, err = r.chunkReader.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + err = r.startChunk() + if err == nil { + continue + } + } + r.err = err + return n, err + } + if k == 0 { + r.err = errors.New("lzma: Reader2 doesn't get data") + return n, r.err + } + } + return n, nil +} + +// EOS returns whether the LZMA2 stream has been terminated by an +// end-of-stream chunk. +func (r *Reader2) EOS() bool { + return r.cstate == stop +} + +// uncompressedReader is used to read uncompressed chunks. +type uncompressedReader struct { + lr io.LimitedReader + Dict *decoderDict + eof bool + err error +} + +// newUncompressedReader initializes a new uncompressedReader. +func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { + ur := &uncompressedReader{ + lr: io.LimitedReader{R: r, N: size}, + Dict: dict, + } + return ur +} + +// Reopen reinitializes an uncompressed reader. +func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { + ur.err = nil + ur.eof = false + ur.lr = io.LimitedReader{R: r, N: size} +} + +// fill reads uncompressed data into the dictionary. +func (ur *uncompressedReader) fill() error { + if !ur.eof { + n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) + if err != io.EOF { + return err + } + ur.eof = true + if n > 0 { + return nil + } + } + if ur.lr.N != 0 { + return io.ErrUnexpectedEOF + } + return io.EOF +} + +// Read reads uncompressed data from the limited reader. +func (ur *uncompressedReader) Read(p []byte) (n int, err error) { + if ur.err != nil { + return 0, ur.err + } + for { + var k int + k, err = ur.Dict.Read(p[n:]) + n += k + if n >= len(p) { + return n, nil + } + if err != nil { + break + } + err = ur.fill() + if err != nil { + break + } + } + ur.err = err + return n, err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go new file mode 100644 index 00000000..50235105 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -0,0 +1,151 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// states defines the overall state count +const states = 12 + +// State maintains the full state of the operation encoding or decoding +// process. +type state struct { + rep [4]uint32 + isMatch [states << maxPosBits]prob + isRepG0Long [states << maxPosBits]prob + isRep [states]prob + isRepG0 [states]prob + isRepG1 [states]prob + isRepG2 [states]prob + litCodec literalCodec + lenCodec lengthCodec + repLenCodec lengthCodec + distCodec distCodec + state uint32 + posBitMask uint32 + Properties Properties +} + +// initProbSlice initializes a slice of probabilities. +func initProbSlice(p []prob) { + for i := range p { + p[i] = probInit + } +} + +// Reset sets all state information to the original values. +func (s *state) Reset() { + p := s.Properties + *s = state{ + Properties: p, + // dict: s.dict, + posBitMask: (uint32(1) << uint(p.PB)) - 1, + } + initProbSlice(s.isMatch[:]) + initProbSlice(s.isRep[:]) + initProbSlice(s.isRepG0[:]) + initProbSlice(s.isRepG1[:]) + initProbSlice(s.isRepG2[:]) + initProbSlice(s.isRepG0Long[:]) + s.litCodec.init(p.LC, p.LP) + s.lenCodec.init() + s.repLenCodec.init() + s.distCodec.init() +} + +// initState initializes the state. +func initState(s *state, p Properties) { + *s = state{Properties: p} + s.Reset() +} + +// newState creates a new state from the give Properties. +func newState(p Properties) *state { + s := &state{Properties: p} + s.Reset() + return s +} + +// deepcopy initializes s as a deep copy of the source. +func (s *state) deepcopy(src *state) { + if s == src { + return + } + s.rep = src.rep + s.isMatch = src.isMatch + s.isRepG0Long = src.isRepG0Long + s.isRep = src.isRep + s.isRepG0 = src.isRepG0 + s.isRepG1 = src.isRepG1 + s.isRepG2 = src.isRepG2 + s.litCodec.deepcopy(&src.litCodec) + s.lenCodec.deepcopy(&src.lenCodec) + s.repLenCodec.deepcopy(&src.repLenCodec) + s.distCodec.deepcopy(&src.distCodec) + s.state = src.state + s.posBitMask = src.posBitMask + s.Properties = src.Properties +} + +// cloneState creates a new clone of the give state. +func cloneState(src *state) *state { + s := new(state) + s.deepcopy(src) + return s +} + +// updateStateLiteral updates the state for a literal. +func (s *state) updateStateLiteral() { + switch { + case s.state < 4: + s.state = 0 + return + case s.state < 10: + s.state -= 3 + return + } + s.state -= 6 +} + +// updateStateMatch updates the state for a match. +func (s *state) updateStateMatch() { + if s.state < 7 { + s.state = 7 + } else { + s.state = 10 + } +} + +// updateStateRep updates the state for a repetition. +func (s *state) updateStateRep() { + if s.state < 7 { + s.state = 8 + } else { + s.state = 11 + } +} + +// updateStateShortRep updates the state for a short repetition. +func (s *state) updateStateShortRep() { + if s.state < 7 { + s.state = 9 + } else { + s.state = 11 + } +} + +// states computes the states of the operation codec. +func (s *state) states(dictHead int64) (state1, state2, posState uint32) { + state1 = s.state + posState = uint32(dictHead) & s.posBitMask + state2 = (s.state << maxPosBits) | posState + return +} + +// litState computes the literal state. +func (s *state) litState(prev byte, dictHead int64) uint32 { + lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) + litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | + (uint32(prev) >> (8 - lc)) + return litState +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go new file mode 100644 index 00000000..504b3d78 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -0,0 +1,133 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// treeCodec encodes or decodes values with a fixed bit size. It is using a +// tree of probability value. The root of the tree is the most-significant bit. +type treeCodec struct { + probTree +} + +// makeTreeCodec makes a tree codec. The bits value must be inside the range +// [1,32]. +func makeTreeCodec(bits int) treeCodec { + return treeCodec{makeProbTree(bits)} +} + +// deepcopy initializes tc as a deep copy of the source. +func (tc *treeCodec) deepcopy(src *treeCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// Encode uses the range encoder to encode a fixed-bit-size value. +func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { + m := uint32(1) + for i := int(tc.bits) - 1; i >= 0; i-- { + b := (v >> uint(i)) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may +// be caused by the range decoder. +func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := 0; j < int(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + } + return m - (1 << uint(tc.bits)), nil +} + +// treeReverseCodec is another tree codec, where the least-significant bit is +// the start of the probability tree. +type treeReverseCodec struct { + probTree +} + +// deepcopy initializes the treeReverseCodec as a deep copy of the +// source. +func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must +// be in the range [1,32]. +func makeTreeReverseCodec(bits int) treeReverseCodec { + return treeReverseCodec{makeProbTree(bits)} +} + +// Encode uses range encoder to encode a fixed-bit-size value. The range +// encoder may cause errors. +func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { + m := uint32(1) + for i := uint(0); i < uint(tc.bits); i++ { + b := (v >> i) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors +// returned by the range decoder will be returned. +func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := uint(0); j < uint(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + v |= b << j + } + return v, nil +} + +// probTree stores enough probability values to be used by the treeEncode and +// treeDecode methods of the range coder types. +type probTree struct { + probs []prob + bits byte +} + +// deepcopy initializes the probTree value as a deep copy of the source. +func (t *probTree) deepcopy(src *probTree) { + if t == src { + return + } + t.probs = make([]prob, len(src.probs)) + copy(t.probs, src.probs) + t.bits = src.bits +} + +// makeProbTree initializes a probTree structure. +func makeProbTree(bits int) probTree { + if !(1 <= bits && bits <= 32) { + panic("bits outside of range [1,32]") + } + t := probTree{ + bits: byte(bits), + probs: make([]prob, 1< 0 { + c.SizeInHeader = true + } + if !c.SizeInHeader { + c.EOSMarker = true + } +} + +// Verify checks WriterConfig for errors. Verify will replace zero +// values with default values. +func (c *WriterConfig) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.SizeInHeader { + if c.Size < 0 { + return errors.New("lzma: negative size not supported") + } + } else if !c.EOSMarker { + return errors.New("lzma: EOS marker is required") + } + if err = c.Matcher.verify(); err != nil { + return err + } + + return nil +} + +// header returns the header structure for this configuration. +func (c *WriterConfig) header() header { + h := header{ + properties: *c.Properties, + dictCap: c.DictCap, + size: -1, + } + if c.SizeInHeader { + h.size = c.Size + } + return h +} + +// Writer writes an LZMA stream in the classic format. +type Writer struct { + h header + bw io.ByteWriter + buf *bufio.Writer + e *encoder +} + +// NewWriter creates a new LZMA writer for the classic format. The +// method will write the header to the underlying stream. +func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{h: c.header()} + + var ok bool + w.bw, ok = lzma.(io.ByteWriter) + if !ok { + w.buf = bufio.NewWriter(lzma) + w.bw = w.buf + } + state := newState(w.h.properties) + m, err := c.Matcher.new(w.h.dictCap) + if err != nil { + return nil, err + } + dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) + if err != nil { + return nil, err + } + var flags encoderFlags + if c.EOSMarker { + flags = eosMarker + } + if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { + return nil, err + } + + if err = w.writeHeader(); err != nil { + return nil, err + } + return w, nil +} + +// NewWriter creates a new LZMA writer using the classic format. The +// function writes the header to the underlying stream. +func NewWriter(lzma io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(lzma) +} + +// writeHeader writes the LZMA header into the stream. +func (w *Writer) writeHeader() error { + data, err := w.h.marshalBinary() + if err != nil { + return err + } + _, err = w.bw.(io.Writer).Write(data) + return err +} + +// Write puts data into the Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.h.size >= 0 { + m := w.h.size + m -= w.e.Compressed() + int64(w.e.dict.Buffered()) + if m < 0 { + m = 0 + } + if m < int64(len(p)) { + p = p[:m] + err = ErrNoSpace + } + } + var werr error + if n, werr = w.e.Write(p); werr != nil { + err = werr + } + return n, err +} + +// Close closes the writer stream. It ensures that all data from the +// buffer will be compressed and the LZMA stream will be finished. +func (w *Writer) Close() error { + if w.h.size >= 0 { + n := w.e.Compressed() + int64(w.e.dict.Buffered()) + if n != w.h.size { + return errSize + } + } + err := w.e.Close() + if w.buf != nil { + ferr := w.buf.Flush() + if err == nil { + err = ferr + } + } + return err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go new file mode 100644 index 00000000..7c1afe15 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -0,0 +1,305 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bytes" + "errors" + "io" +) + +// Writer2Config is used to create a Writer2 using parameters. +type Writer2Config struct { + // The properties for the encoding. If the it is nil the value + // {LC: 3, LP: 0, PB: 2} will be chosen. + Properties *Properties + // The capacity of the dictionary. If DictCap is zero, the value + // 8 MiB will be chosen. + DictCap int + // Size of the lookahead buffer; value 0 indicates default size + // 4096 + BufSize int + // Match algorithm + Matcher MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *Writer2Config) fill() { + if c.Properties == nil { + c.Properties = &Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } +} + +// Verify checks the Writer2Config for correctness. Zero values will be +// replaced by default values. +func (c *Writer2Config) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.Properties.LC+c.Properties.LP > 4 { + return errors.New("lzma: sum of lc and lp exceeds 4") + } + if err = c.Matcher.verify(); err != nil { + return err + } + return nil +} + +// Writer2 supports the creation of an LZMA2 stream. But note that +// written data is buffered, so call Flush or Close to write data to the +// underlying writer. The Close method writes the end-of-stream marker +// to the stream. So you may be able to concatenate the output of two +// writers as long the output of the first writer has only been flushed +// but not closed. +// +// Any change to the fields Properties, DictCap must be done before the +// first call to Write, Flush or Close. +type Writer2 struct { + w io.Writer + + start *state + encoder *encoder + + cstate chunkState + ctype chunkType + + buf bytes.Buffer + lbw LimitedByteWriter +} + +// NewWriter2 creates an LZMA2 chunk sequence writer with the default +// parameters and options. +func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + return Writer2Config{}.NewWriter2(lzma2) +} + +// NewWriter2 creates a new LZMA2 writer using the given configuration. +func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer2{ + w: lzma2, + start: newState(*c.Properties), + cstate: start, + ctype: start.defaultChunkType(), + } + w.buf.Grow(maxCompressed) + w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed} + m, err := c.Matcher.new(c.DictCap) + if err != nil { + return nil, err + } + d, err := newEncoderDict(c.DictCap, c.BufSize, m) + if err != nil { + return nil, err + } + w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0) + if err != nil { + return nil, err + } + return w, nil +} + +// written returns the number of bytes written to the current chunk +func (w *Writer2) written() int { + if w.encoder == nil { + return 0 + } + return int(w.encoder.Compressed()) + w.encoder.dict.Buffered() +} + +// errClosed indicates that the writer is closed. +var errClosed = errors.New("lzma: writer closed") + +// Writes data to LZMA2 stream. Note that written data will be buffered. +// Use Flush or Close to ensure that data is written to the underlying +// writer. +func (w *Writer2) Write(p []byte) (n int, err error) { + if w.cstate == stop { + return 0, errClosed + } + for n < len(p) { + m := maxUncompressed - w.written() + if m <= 0 { + panic("lzma: maxUncompressed reached") + } + var q []byte + if n+m < len(p) { + q = p[n : n+m] + } else { + q = p[n:] + } + k, err := w.encoder.Write(q) + n += k + if err != nil && err != ErrLimit { + return n, err + } + if err == ErrLimit || k == m { + if err = w.flushChunk(); err != nil { + return n, err + } + } + } + return n, nil +} + +// writeUncompressedChunk writes an uncompressed chunk to the LZMA2 +// stream. +func (w *Writer2) writeUncompressedChunk() error { + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("lzma: can't write empty uncompressed chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + switch w.ctype { + case cLRND: + w.ctype = cUD + default: + w.ctype = cU + } + w.encoder.state = w.start + + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = w.encoder.dict.CopyN(w.w, int(u)) + return err +} + +// writeCompressedChunk writes a compressed chunk to the underlying +// writer. +func (w *Writer2) writeCompressedChunk() error { + if w.ctype == cU || w.ctype == cUD { + panic("chunk type uncompressed") + } + + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("writeCompressedChunk: empty chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + c := w.buf.Len() + if c <= 0 { + panic("no compressed data") + } + if c > maxCompressed { + panic("overrun of compressed data limit") + } + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + compressed: uint16(c - 1), + props: w.encoder.state.Properties, + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = io.Copy(w.w, &w.buf) + return err +} + +// writes a single chunk to the underlying writer. +func (w *Writer2) writeChunk() error { + u := int(uncompressedHeaderLen + w.encoder.Compressed()) + c := headerLen(w.ctype) + w.buf.Len() + if u < c { + return w.writeUncompressedChunk() + } + return w.writeCompressedChunk() +} + +// flushChunk terminates the current chunk. The encoder will be reset +// to support the next chunk. +func (w *Writer2) flushChunk() error { + if w.written() == 0 { + return nil + } + var err error + if err = w.encoder.Close(); err != nil { + return err + } + if err = w.writeChunk(); err != nil { + return err + } + w.buf.Reset() + w.lbw.N = maxCompressed + if err = w.encoder.Reopen(&w.lbw); err != nil { + return err + } + if err = w.cstate.next(w.ctype); err != nil { + return err + } + w.ctype = w.cstate.defaultChunkType() + w.start = cloneState(w.encoder.state) + return nil +} + +// Flush writes all buffered data out to the underlying stream. This +// could result in multiple chunks to be created. +func (w *Writer2) Flush() error { + if w.cstate == stop { + return errClosed + } + for w.written() > 0 { + if err := w.flushChunk(); err != nil { + return err + } + } + return nil +} + +// Close terminates the LZMA2 stream with an EOS chunk. +func (w *Writer2) Close() error { + if w.cstate == stop { + return errClosed + } + if err := w.Flush(); err != nil { + return nil + } + // write zero byte EOS chunk + _, err := w.w.Write([]byte{0}) + if err != nil { + return err + } + w.cstate = stop + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go new file mode 100644 index 00000000..69cf5f7c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -0,0 +1,117 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// LZMA filter constants. +const ( + lzmaFilterID = 0x21 + lzmaFilterLen = 3 +) + +// lzmaFilter declares the LZMA2 filter information stored in an xz +// block header. +type lzmaFilter struct { + dictCap int64 +} + +// String returns a representation of the LZMA filter. +func (f lzmaFilter) String() string { + return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) +} + +// id returns the ID for the LZMA2 filter. +func (f lzmaFilter) id() uint64 { return lzmaFilterID } + +// MarshalBinary converts the lzmaFilter in its encoded representation. +func (f lzmaFilter) MarshalBinary() (data []byte, err error) { + c := lzma.EncodeDictCap(f.dictCap) + return []byte{lzmaFilterID, 1, c}, nil +} + +// UnmarshalBinary unmarshals the given data representation of the LZMA2 +// filter. +func (f *lzmaFilter) UnmarshalBinary(data []byte) error { + if len(data) != lzmaFilterLen { + return errors.New("xz: data for LZMA2 filter has wrong length") + } + if data[0] != lzmaFilterID { + return errors.New("xz: wrong LZMA2 filter id") + } + if data[1] != 1 { + return errors.New("xz: wrong LZMA2 filter size") + } + dc, err := lzma.DecodeDictCap(data[2]) + if err != nil { + return errors.New("xz: wrong LZMA2 dictionary size property") + } + + f.dictCap = dc + return nil +} + +// reader creates a new reader for the LZMA2 filter. +func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, + err error) { + + config := new(lzma.Reader2Config) + if c != nil { + config.DictCap = c.DictCap + } + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fr, err = config.NewReader2(r) + if err != nil { + return nil, err + } + return fr, nil +} + +// writeCloser creates a io.WriteCloser for the LZMA2 filter. +func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, +) (fw io.WriteCloser, err error) { + config := new(lzma.Writer2Config) + if c != nil { + *config = lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + } + + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fw, err = config.NewWriter2(w) + if err != nil { + return nil, err + } + return fw, nil +} + +// last returns true, because an LZMA2 filter must be the last filter in +// the filter list. +func (f lzmaFilter) last() bool { return true } diff --git a/vendor/github.com/ulikunitz/xz/make-docs b/vendor/github.com/ulikunitz/xz/make-docs new file mode 100755 index 00000000..a8c612ce --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/make-docs @@ -0,0 +1,5 @@ +#!/bin/sh + +set -x +pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md +pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go new file mode 100644 index 00000000..0634c6bc --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -0,0 +1,373 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xz supports the compression and decompression of xz files. It +// supports version 1.0.4 of the specification without the non-LZMA2 +// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt +package xz + +import ( + "bytes" + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/internal/xlog" + "github.com/ulikunitz/xz/lzma" +) + +// ReaderConfig defines the parameters for the xz reader. The +// SingleStream parameter requests the reader to assume that the +// underlying stream contains only a single stream. +type ReaderConfig struct { + DictCap int + SingleStream bool +} + +// fill replaces all zero values with their default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader parameters for Validity. Zero values will be +// replaced by default values. +func (c *ReaderConfig) Verify() error { + if c == nil { + return errors.New("xz: reader parameters are nil") + } + lc := lzma.Reader2Config{DictCap: c.DictCap} + if err := lc.Verify(); err != nil { + return err + } + return nil +} + +// Reader supports the reading of one or multiple xz streams. +type Reader struct { + ReaderConfig + + xz io.Reader + sr *streamReader +} + +// streamReader decodes a single xz stream +type streamReader struct { + ReaderConfig + + xz io.Reader + br *blockReader + newHash func() hash.Hash + h header + index []record +} + +// NewReader creates a new xz reader using the default parameters. +// The function reads and checks the header of the first XZ stream. The +// reader will process multiple streams including padding. +func NewReader(xz io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(xz) +} + +// NewReader creates an xz stream reader. The created reader will be +// able to process multiple streams and padding unless a SingleStream +// has been set in the reader configuration c. +func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader{ + ReaderConfig: c, + xz: xz, + } + if r.sr, err = c.newStreamReader(xz); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + return r, nil +} + +var errUnexpectedData = errors.New("xz: unexpected data after stream") + +// Read reads uncompressed data from the stream. +func (r *Reader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.sr == nil { + if r.SingleStream { + data := make([]byte, 1) + _, err = io.ReadFull(r.xz, data) + if err != io.EOF { + return n, errUnexpectedData + } + return n, io.EOF + } + for { + r.sr, err = r.ReaderConfig.newStreamReader(r.xz) + if err != errPadding { + break + } + } + if err != nil { + return n, err + } + } + k, err := r.sr.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.sr = nil + continue + } + return n, err + } + } + return n, nil +} + +var errPadding = errors.New("xz: padding (4 zero bytes) encountered") + +// newStreamReader creates a new xz stream reader using the given configuration +// parameters. NewReader reads and checks the header of the xz stream. +func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(xz, data[:4]); err != nil { + return nil, err + } + if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) { + return nil, errPadding + } + if _, err = io.ReadFull(xz, data[4:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + r = &streamReader{ + ReaderConfig: c, + xz: xz, + index: make([]record, 0, 4), + } + if err = r.h.UnmarshalBinary(data); err != nil { + return nil, err + } + xlog.Debugf("xz header %s", r.h) + if r.newHash, err = newHashFunc(r.h.flags); err != nil { + return nil, err + } + return r, nil +} + +// errIndex indicates an error with the xz file index. +var errIndex = errors.New("xz: error in xz file index") + +// readTail reads the index body and the xz footer. +func (r *streamReader) readTail() error { + index, n, err := readIndexBody(r.xz) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + if len(index) != len(r.index) { + return fmt.Errorf("xz: index length is %d; want %d", + len(index), len(r.index)) + } + for i, rec := range r.index { + if rec != index[i] { + return fmt.Errorf("xz: record %d is %v; want %v", + i, rec, index[i]) + } + } + + p := make([]byte, footerLen) + if _, err = io.ReadFull(r.xz, p); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + var f footer + if err = f.UnmarshalBinary(p); err != nil { + return err + } + xlog.Debugf("xz footer %s", f) + if f.flags != r.h.flags { + return errors.New("xz: footer flags incorrect") + } + if f.indexSize != int64(n)+1 { + return errors.New("xz: index size in footer wrong") + } + return nil +} + +// Read reads actual data from the xz stream. +func (r *streamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.br == nil { + bh, hlen, err := readBlockHeader(r.xz) + if err != nil { + if err == errIndexIndicator { + if err = r.readTail(); err != nil { + return n, err + } + return n, io.EOF + } + return n, err + } + xlog.Debugf("block %v", *bh) + r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh, + hlen, r.newHash()) + if err != nil { + return n, err + } + } + k, err := r.br.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.index = append(r.index, r.br.record()) + r.br = nil + } else { + return n, err + } + } + } + return n, nil +} + +// countingReader is a reader that counts the bytes read. +type countingReader struct { + r io.Reader + n int64 +} + +// Read reads data from the wrapped reader and adds it to the n field. +func (lr *countingReader) Read(p []byte) (n int, err error) { + n, err = lr.r.Read(p) + lr.n += int64(n) + return n, err +} + +// blockReader supports the reading of a block. +type blockReader struct { + lxz countingReader + header *blockHeader + headerLen int + n int64 + hash hash.Hash + r io.Reader + err error +} + +// newBlockReader creates a new block reader. +func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, + hlen int, hash hash.Hash) (br *blockReader, err error) { + + br = &blockReader{ + lxz: countingReader{r: xz}, + header: h, + headerLen: hlen, + hash: hash, + } + + fr, err := c.newFilterReader(&br.lxz, h.filters) + if err != nil { + return nil, err + } + br.r = io.TeeReader(fr, br.hash) + + return br, nil +} + +// uncompressedSize returns the uncompressed size of the block. +func (br *blockReader) uncompressedSize() int64 { + return br.n +} + +// compressedSize returns the compressed size of the block. +func (br *blockReader) compressedSize() int64 { + return br.lxz.n +} + +// unpaddedSize computes the unpadded size for the block. +func (br *blockReader) unpaddedSize() int64 { + n := int64(br.headerLen) + n += br.compressedSize() + n += int64(br.hash.Size()) + return n +} + +// record returns the index record for the current block. +func (br *blockReader) record() record { + return record{br.unpaddedSize(), br.uncompressedSize()} +} + +// errBlockSize indicates that the size of the block in the block header +// is wrong. +var errBlockSize = errors.New("xz: wrong uncompressed size for block") + +// Read reads data from the block. +func (br *blockReader) Read(p []byte) (n int, err error) { + n, err = br.r.Read(p) + br.n += int64(n) + + u := br.header.uncompressedSize + if u >= 0 && br.uncompressedSize() > u { + return n, errors.New("xz: wrong uncompressed size for block") + } + c := br.header.compressedSize + if c >= 0 && br.compressedSize() > c { + return n, errors.New("xz: wrong compressed size for block") + } + if err != io.EOF { + return n, err + } + if br.uncompressedSize() < u || br.compressedSize() < c { + return n, io.ErrUnexpectedEOF + } + + s := br.hash.Size() + k := padLen(br.lxz.n) + q := make([]byte, k+s, k+2*s) + if _, err = io.ReadFull(br.lxz.r, q); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err + } + if !allZeros(q[:k]) { + return n, errors.New("xz: non-zero block padding") + } + checkSum := q[k:] + computedSum := br.hash.Sum(checkSum[s:]) + if !bytes.Equal(checkSum, computedSum) { + return n, errors.New("xz: checksum error for block") + } + return n, io.EOF +} + +func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader, + err error) { + + if err = verifyFilters(f); err != nil { + return nil, err + } + + fr = r + for i := len(f) - 1; i >= 0; i-- { + fr, err = f[i].reader(fr, c) + if err != nil { + return nil, err + } + } + return fr, nil +} diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go new file mode 100644 index 00000000..c126f709 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -0,0 +1,386 @@ +// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "hash" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// WriterConfig describe the parameters for an xz writer. +type WriterConfig struct { + Properties *lzma.Properties + DictCap int + BufSize int + BlockSize int64 + // checksum method: CRC32, CRC64 or SHA256 + CheckSum byte + // match algorithm + Matcher lzma.MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *WriterConfig) fill() { + if c.Properties == nil { + c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } + if c.BlockSize == 0 { + c.BlockSize = maxInt64 + } + if c.CheckSum == 0 { + c.CheckSum = CRC64 + } +} + +// Verify checks the configuration for errors. Zero values will be +// replaced by default values. +func (c *WriterConfig) Verify() error { + if c == nil { + return errors.New("xz: writer configuration is nil") + } + c.fill() + lc := lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + if err := lc.Verify(); err != nil { + return err + } + if c.BlockSize <= 0 { + return errors.New("xz: block size out of range") + } + if err := verifyFlags(c.CheckSum); err != nil { + return err + } + return nil +} + +// filters creates the filter list for the given parameters. +func (c *WriterConfig) filters() []filter { + return []filter{&lzmaFilter{int64(c.DictCap)}} +} + +// maxInt64 defines the maximum 64-bit signed integer. +const maxInt64 = 1<<63 - 1 + +// verifyFilters checks the filter list for the length and the right +// sequence of filters. +func verifyFilters(f []filter) error { + if len(f) == 0 { + return errors.New("xz: no filters") + } + if len(f) > 4 { + return errors.New("xz: more than four filters") + } + for _, g := range f[:len(f)-1] { + if g.last() { + return errors.New("xz: last filter is not last") + } + } + if !f[len(f)-1].last() { + return errors.New("xz: wrong last filter") + } + return nil +} + +// newFilterWriteCloser converts a filter list into a WriteCloser that +// can be used by a blockWriter. +func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) { + if err = verifyFilters(f); err != nil { + return nil, err + } + fw = nopWriteCloser(w) + for i := len(f) - 1; i >= 0; i-- { + fw, err = f[i].writeCloser(fw, c) + if err != nil { + return nil, err + } + } + return fw, nil +} + +// nopWCloser implements a WriteCloser with a Close method not doing +// anything. +type nopWCloser struct { + io.Writer +} + +// Close returns nil and doesn't do anything else. +func (c nopWCloser) Close() error { + return nil +} + +// nopWriteCloser converts the Writer into a WriteCloser with a Close +// function that does nothing beside returning nil. +func nopWriteCloser(w io.Writer) io.WriteCloser { + return nopWCloser{w} +} + +// Writer compresses data written to it. It is an io.WriteCloser. +type Writer struct { + WriterConfig + + xz io.Writer + bw *blockWriter + newHash func() hash.Hash + h header + index []record + closed bool +} + +// newBlockWriter creates a new block writer writes the header out. +func (w *Writer) newBlockWriter() error { + var err error + w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash()) + if err != nil { + return err + } + if err = w.bw.writeHeader(w.xz); err != nil { + return err + } + return nil +} + +// closeBlockWriter closes a block writer and records the sizes in the +// index. +func (w *Writer) closeBlockWriter() error { + var err error + if err = w.bw.Close(); err != nil { + return err + } + w.index = append(w.index, w.bw.record()) + return nil +} + +// NewWriter creates a new xz writer using default parameters. +func NewWriter(xz io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(xz) +} + +// NewWriter creates a new Writer using the given configuration parameters. +func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{ + WriterConfig: c, + xz: xz, + h: header{c.CheckSum}, + index: make([]record, 0, 4), + } + if w.newHash, err = newHashFunc(c.CheckSum); err != nil { + return nil, err + } + data, err := w.h.MarshalBinary() + if _, err = xz.Write(data); err != nil { + return nil, err + } + if err = w.newBlockWriter(); err != nil { + return nil, err + } + return w, nil + +} + +// Write compresses the uncompressed data provided. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, errClosed + } + for { + k, err := w.bw.Write(p[n:]) + n += k + if err != errNoSpace { + return n, err + } + if err = w.closeBlockWriter(); err != nil { + return n, err + } + if err = w.newBlockWriter(); err != nil { + return n, err + } + } +} + +// Close closes the writer and adds the footer to the Writer. Close +// doesn't close the underlying writer. +func (w *Writer) Close() error { + if w.closed { + return errClosed + } + w.closed = true + var err error + if err = w.closeBlockWriter(); err != nil { + return err + } + + f := footer{flags: w.h.flags} + if f.indexSize, err = writeIndex(w.xz, w.index); err != nil { + return err + } + data, err := f.MarshalBinary() + if err != nil { + return err + } + if _, err = w.xz.Write(data); err != nil { + return err + } + return nil +} + +// countingWriter is a writer that counts all data written to it. +type countingWriter struct { + w io.Writer + n int64 +} + +// Write writes data to the countingWriter. +func (cw *countingWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + if err == nil && cw.n < 0 { + return n, errors.New("xz: counter overflow") + } + return +} + +// blockWriter is writes a single block. +type blockWriter struct { + cxz countingWriter + // mw combines io.WriteCloser w and the hash. + mw io.Writer + w io.WriteCloser + n int64 + blockSize int64 + closed bool + headerLen int + + filters []filter + hash hash.Hash +} + +// newBlockWriter creates a new block writer. +func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) { + bw = &blockWriter{ + cxz: countingWriter{w: xz}, + blockSize: c.BlockSize, + filters: c.filters(), + hash: hash, + } + bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters) + if err != nil { + return nil, err + } + bw.mw = io.MultiWriter(bw.w, bw.hash) + return bw, nil +} + +// writeHeader writes the header. If the function is called after Close +// the commpressedSize and uncompressedSize fields will be filled. +func (bw *blockWriter) writeHeader(w io.Writer) error { + h := blockHeader{ + compressedSize: -1, + uncompressedSize: -1, + filters: bw.filters, + } + if bw.closed { + h.compressedSize = bw.compressedSize() + h.uncompressedSize = bw.uncompressedSize() + } + data, err := h.MarshalBinary() + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + bw.headerLen = len(data) + return nil +} + +// compressed size returns the amount of data written to the underlying +// stream. +func (bw *blockWriter) compressedSize() int64 { + return bw.cxz.n +} + +// uncompressedSize returns the number of data written to the +// blockWriter +func (bw *blockWriter) uncompressedSize() int64 { + return bw.n +} + +// unpaddedSize returns the sum of the header length, the uncompressed +// size of the block and the hash size. +func (bw *blockWriter) unpaddedSize() int64 { + if bw.headerLen <= 0 { + panic("xz: block header not written") + } + n := int64(bw.headerLen) + n += bw.compressedSize() + n += int64(bw.hash.Size()) + return n +} + +// record returns the record for the current stream. Call Close before +// calling this method. +func (bw *blockWriter) record() record { + return record{bw.unpaddedSize(), bw.uncompressedSize()} +} + +var errClosed = errors.New("xz: writer already closed") + +var errNoSpace = errors.New("xz: no space") + +// Write writes uncompressed data to the block writer. +func (bw *blockWriter) Write(p []byte) (n int, err error) { + if bw.closed { + return 0, errClosed + } + + t := bw.blockSize - bw.n + if int64(len(p)) > t { + err = errNoSpace + p = p[:t] + } + + var werr error + n, werr = bw.mw.Write(p) + bw.n += int64(n) + if werr != nil { + return n, werr + } + return n, err +} + +// Close closes the writer. +func (bw *blockWriter) Close() error { + if bw.closed { + return errClosed + } + bw.closed = true + if err := bw.w.Close(); err != nil { + return err + } + s := bw.hash.Size() + k := padLen(bw.cxz.n) + p := make([]byte, k+s) + bw.hash.Sum(p[k:k]) + if _, err := bw.cxz.w.Write(p); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/LICENSE b/vendor/github.com/zclconf/go-cty/LICENSE new file mode 100644 index 00000000..d6503b55 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017-2018 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/zclconf/go-cty/cty/capsule.go b/vendor/github.com/zclconf/go-cty/cty/capsule.go new file mode 100644 index 00000000..d273d148 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/capsule.go @@ -0,0 +1,89 @@ +package cty + +import ( + "fmt" + "reflect" +) + +type capsuleType struct { + typeImplSigil + Name string + GoType reflect.Type +} + +func (t *capsuleType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(*capsuleType); ok { + // capsule types compare by pointer identity + return otherP == t + } + return false +} + +func (t *capsuleType) FriendlyName(mode friendlyTypeNameMode) string { + return t.Name +} + +func (t *capsuleType) GoString() string { + // To get a useful representation of our native type requires some + // shenanigans. + victimVal := reflect.Zero(t.GoType) + return fmt.Sprintf("cty.Capsule(%q, reflect.TypeOf(%#v))", t.Name, victimVal.Interface()) +} + +// Capsule creates a new Capsule type. +// +// A Capsule type is a special type that can be used to transport arbitrary +// Go native values of a given type through the cty type system. A language +// that uses cty as its type system might, for example, provide functions +// that return capsule-typed values and then other functions that operate +// on those values. +// +// From cty's perspective, Capsule types have a few interesting characteristics, +// described in the following paragraphs. +// +// Each capsule type has an associated Go native type that it is able to +// transport. Capsule types compare by identity, so each call to the +// Capsule function creates an entirely-distinct cty Type, even if two calls +// use the same native type. +// +// Each capsule-typed value contains a pointer to a value of the given native +// type. A capsule-typed value supports no operations except equality, and +// equality is implemented by pointer identity of the encapsulated pointer. +// +// The given name is used as the new type's "friendly name". This can be any +// string in principle, but will usually be a short, all-lowercase name aimed +// at users of the embedding language (i.e. not mention Go-specific details) +// and will ideally not create ambiguity with any predefined cty type. +// +// Capsule types are never introduced by any standard cty operation, so a +// calling application opts in to including them within its own type system +// by creating them and introducing them via its own functions. At that point, +// the application is responsible for dealing with any capsule-typed values +// that might be returned. +func Capsule(name string, nativeType reflect.Type) Type { + return Type{ + &capsuleType{ + Name: name, + GoType: nativeType, + }, + } +} + +// IsCapsuleType returns true if this type is a capsule type, as created +// by cty.Capsule . +func (t Type) IsCapsuleType() bool { + _, ok := t.typeImpl.(*capsuleType) + return ok +} + +// EncapsulatedType returns the encapsulated native type of a capsule type, +// or panics if the receiver is not a Capsule type. +// +// Is IsCapsuleType to determine if this method is safe to call. +func (t Type) EncapsulatedType() reflect.Type { + impl, ok := t.typeImpl.(*capsuleType) + if !ok { + panic("not a capsule type") + } + return impl.GoType +} diff --git a/vendor/github.com/zclconf/go-cty/cty/collection.go b/vendor/github.com/zclconf/go-cty/cty/collection.go new file mode 100644 index 00000000..ab3919b1 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/collection.go @@ -0,0 +1,34 @@ +package cty + +import ( + "errors" +) + +type collectionTypeImpl interface { + ElementType() Type +} + +// IsCollectionType returns true if the given type supports the operations +// that are defined for all collection types. +func (t Type) IsCollectionType() bool { + _, ok := t.typeImpl.(collectionTypeImpl) + return ok +} + +// ElementType returns the element type of the receiver if it is a collection +// type, or panics if it is not. Use IsCollectionType first to test whether +// this method will succeed. +func (t Type) ElementType() Type { + if ct, ok := t.typeImpl.(collectionTypeImpl); ok { + return ct.ElementType() + } + panic(errors.New("not a collection type")) +} + +// ElementCallback is a callback type used for iterating over elements of +// collections and attributes of objects. +// +// The types of key and value depend on what type is being iterated over. +// Return true to stop iterating after the current element, or false to +// continue iterating. +type ElementCallback func(key Value, val Value) (stop bool) diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go b/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go new file mode 100644 index 00000000..d84f6ac1 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/compare_types.go @@ -0,0 +1,165 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// compareTypes implements a preference order for unification. +// +// The result of this method is not useful for anything other than unification +// preferences, since it assumes that the caller will verify that any suggested +// conversion is actually possible and it is thus able to to make certain +// optimistic assumptions. +func compareTypes(a cty.Type, b cty.Type) int { + + // DynamicPseudoType always has lowest preference, because anything can + // convert to it (it acts as a placeholder for "any type") and we want + // to optimistically assume that any dynamics will converge on matching + // their neighbors. + if a == cty.DynamicPseudoType || b == cty.DynamicPseudoType { + if a != cty.DynamicPseudoType { + return -1 + } + if b != cty.DynamicPseudoType { + return 1 + } + return 0 + } + + if a.IsPrimitiveType() && b.IsPrimitiveType() { + // String is a supertype of all primitive types, because we can + // represent all primitive values as specially-formatted strings. + if a == cty.String || b == cty.String { + if a != cty.String { + return 1 + } + if b != cty.String { + return -1 + } + return 0 + } + } + + if a.IsListType() && b.IsListType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsSetType() && b.IsSetType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsMapType() && b.IsMapType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + + // From this point on we may have swapped the two items in order to + // simplify our cases. Therefore any non-zero return after this point + // must be multiplied by "swap" to potentially invert the return value + // if needed. + swap := 1 + switch { + case a.IsTupleType() && b.IsListType(): + fallthrough + case a.IsObjectType() && b.IsMapType(): + fallthrough + case a.IsSetType() && b.IsTupleType(): + fallthrough + case a.IsSetType() && b.IsListType(): + a, b = b, a + swap = -1 + } + + if b.IsSetType() && (a.IsTupleType() || a.IsListType()) { + // We'll just optimistically assume that the element types are + // unifyable/convertible, and let a second recursive pass + // figure out how to make that so. + return -1 * swap + } + + if a.IsListType() && b.IsTupleType() { + // We'll just optimistically assume that the tuple's element types + // can be unified into something compatible with the list's element + // type. + return -1 * swap + } + + if a.IsMapType() && b.IsObjectType() { + // We'll just optimistically assume that the object's attribute types + // can be unified into something compatible with the map's element + // type. + return -1 * swap + } + + // For object and tuple types, comparing two types doesn't really tell + // the whole story because it may be possible to construct a new type C + // that is the supertype of both A and B by unifying each attribute/element + // separately. That possibility is handled by Unify as a follow-up if + // type sorting is insufficient to produce a valid result. + // + // Here we will take care of the simple possibilities where no new type + // is needed. + if a.IsObjectType() && b.IsObjectType() { + atysA := a.AttributeTypes() + atysB := b.AttributeTypes() + + if len(atysA) != len(atysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for k := range atysA { + if _, has := atysB[k]; !has { + return 0 + } + + cmp := compareTypes(atysA[k], atysB[k]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + if a.IsTupleType() && b.IsTupleType() { + etysA := a.TupleElementTypes() + etysB := b.TupleElementTypes() + + if len(etysA) != len(etysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for i := range etysA { + cmp := compareTypes(etysA[i], etysB[i]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + + return 0 +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go new file mode 100644 index 00000000..c773b522 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go @@ -0,0 +1,136 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversion is an internal variant of Conversion that carries around +// a cty.Path to be used in error responses. +type conversion func(cty.Value, cty.Path) (cty.Value, error) + +func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion { + conv := getConversionKnown(in, out, unsafe) + if conv == nil { + return nil + } + + // Wrap the conversion in some standard checks that we don't want to + // have to repeat in every conversion function. + return func(in cty.Value, path cty.Path) (cty.Value, error) { + if !in.IsKnown() { + return cty.UnknownVal(out), nil + } + if in.IsNull() { + // We'll pass through nulls, albeit type converted, and let + // the caller deal with whatever handling they want to do in + // case null values are considered valid in some applications. + return cty.NullVal(out), nil + } + + return conv(in, path) + } +} + +func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion { + switch { + + case out == cty.DynamicPseudoType: + // Conversion *to* DynamicPseudoType means that the caller wishes + // to allow any type in this position, so we'll produce a do-nothing + // conversion that just passes through the value as-is. + return dynamicPassthrough + + case unsafe && in == cty.DynamicPseudoType: + // Conversion *from* DynamicPseudoType means that we have a value + // whose type isn't yet known during type checking. For these we will + // assume that conversion will succeed and deal with any errors that + // result (which is why we can only do this when "unsafe" is set). + return dynamicFixup(out) + + case in.IsPrimitiveType() && out.IsPrimitiveType(): + conv := primitiveConversionsSafe[in][out] + if conv != nil { + return conv + } + if unsafe { + return primitiveConversionsUnsafe[in][out] + } + return nil + + case out.IsObjectType() && in.IsObjectType(): + return conversionObjectToObject(in, out, unsafe) + + case out.IsListType() && (in.IsListType() || in.IsSetType()): + inEty := in.ElementType() + outEty := out.ElementType() + if inEty.Equals(outEty) { + // This indicates that we're converting from list to set with + // the same element type, so we don't need an element converter. + return conversionCollectionToList(outEty, nil) + } + + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToList(outEty, convEty) + + case out.IsSetType() && (in.IsListType() || in.IsSetType()): + if in.IsListType() && !unsafe { + // Conversion from list to map is unsafe because it will lose + // information: the ordering will not be preserved, and any + // duplicate elements will be conflated. + return nil + } + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if inEty.Equals(outEty) { + // This indicates that we're converting from set to list with + // the same element type, so we don't need an element converter. + return conversionCollectionToSet(outEty, nil) + } + + if convEty == nil { + return nil + } + return conversionCollectionToSet(outEty, convEty) + + case out.IsMapType() && in.IsMapType(): + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToMap(outEty, convEty) + + case out.IsListType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToList(in, outEty, unsafe) + + case out.IsSetType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToSet(in, outEty, unsafe) + + case out.IsMapType() && in.IsObjectType(): + outEty := out.ElementType() + return conversionObjectToMap(in, outEty, unsafe) + + default: + return nil + + } +} + +// retConversion wraps a conversion (internal type) so it can be returned +// as a Conversion (public type). +func retConversion(conv conversion) Conversion { + if conv == nil { + return nil + } + + return func(in cty.Value) (cty.Value, error) { + return conv(in, cty.Path(nil)) + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go new file mode 100644 index 00000000..c2ac14ec --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go @@ -0,0 +1,340 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversionCollectionToList returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a list. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a list. (For example, +// if we're converting from a set into a list of the same element type.) +func conversionCollectionToList(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(elems), nil + } +} + +// conversionCollectionToSet returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a set. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a set. (For example, +// if we're converting from a list into a set of the same element type.) +func conversionCollectionToSet(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(elems), nil + } +} + +// conversionCollectionToMap returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a map. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a map. +func conversionCollectionToMap(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, 0) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: key, + } + + keyStr, err := Convert(key, cty.String) + if err != nil { + // Should never happen, because keys can only be numbers or + // strings and both can convert to string. + return cty.DynamicVal, path.NewErrorf("cannot convert key type %s to string for map", key.Type().FriendlyName()) + } + + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + elems[keyStr.AsString()] = val + } + + if len(elems) == 0 { + return cty.MapValEmpty(ety), nil + } + + return cty.MapVal(elems), nil + } +} + +// conversionTupleToSet returns a conversion that will take a value of the +// given tuple type and return a set of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToSet(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.ListValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + path = append(path, nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.SetVal(elems), nil + } +} + +// conversionTupleToList returns a conversion that will take a value of the +// given tuple type and return a list of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.ListValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + path = append(path, nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.ListVal(elems), nil + } +} + +// conversionObjectToMap returns a conversion that will take a value of the +// given object type and return a map of the given element type. +// +// Will panic if the given objectType isn't actually an object type. +func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) conversion { + objectAtys := objectType.AttributeTypes() + + if len(objectAtys) == 0 { + // Empty object short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.MapValEmpty(mapEty), nil + } + } + + if mapEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + objectAtysList := make([]cty.Type, 0, len(objectAtys)) + for _, aty := range objectAtys { + objectAtysList = append(objectAtysList, aty) + } + mapEty, _ = unify(objectAtysList, unsafe) + if mapEty == cty.NilType { + return nil + } + } + + elemConvs := make(map[string]conversion, len(objectAtys)) + for name, objectAty := range objectAtys { + if objectAty.Equals(mapEty) { + // no conversion required + continue + } + + elemConvs[name] = getConversion(objectAty, mapEty, unsafe) + if elemConvs[name] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, len(elemConvs)) + path = append(path, nil) + it := val.ElementIterator() + for it.Next() { + name, val := it.Element() + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: name, + } + + conv := elemConvs[name.AsString()] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + elems[name.AsString()] = val + } + + return cty.MapVal(elems), nil + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go new file mode 100644 index 00000000..4d19cf6c --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go @@ -0,0 +1,33 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// dynamicFixup deals with just-in-time conversions of values that were +// input-typed as cty.DynamicPseudoType during analysis, ensuring that +// we end up with the desired output type once the value is known, or +// failing with an error if that is not possible. +// +// This is in the spirit of the cty philosophy of optimistically assuming that +// DynamicPseudoType values will become the intended value eventually, and +// dealing with any inconsistencies during final evaluation. +func dynamicFixup(wantType cty.Type) conversion { + return func(in cty.Value, path cty.Path) (cty.Value, error) { + ret, err := Convert(in, wantType) + if err != nil { + // Re-wrap this error so that the returned path is relative + // to the caller's original value, rather than relative to our + // conversion value here. + return cty.NilVal, path.NewError(err) + } + return ret, nil + } +} + +// dynamicPassthrough is an identity conversion that is used when the +// target type is DynamicPseudoType, indicating that the caller doesn't care +// which type is returned. +func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) { + return in, nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go new file mode 100644 index 00000000..62dabb8d --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_object.go @@ -0,0 +1,76 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// conversionObjectToObject returns a conversion that will make the input +// object type conform to the output object type, if possible. +// +// Conversion is possible only if the output type is a subset of the input +// type, meaning that each attribute of the output type has a corresponding +// attribute in the input type where a recursive conversion is available. +// +// Shallow object conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit the above definition of "subset". +func conversionObjectToObject(in, out cty.Type, unsafe bool) conversion { + inAtys := in.AttributeTypes() + outAtys := out.AttributeTypes() + attrConvs := make(map[string]conversion) + + for name, outAty := range outAtys { + inAty, exists := inAtys[name] + if !exists { + // No conversion is available, then. + return nil + } + + if inAty.Equals(outAty) { + // No conversion needed, but we'll still record the attribute + // in our map for later reference. + attrConvs[name] = nil + continue + } + + attrConvs[name] = getConversion(inAty, outAty, unsafe) + if attrConvs[name] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the attribute + // conversions given in attrConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + attrVals := make(map[string]cty.Value, len(attrConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + for it := val.ElementIterator(); it.Next(); { + nameVal, val := it.Element() + var err error + + name := nameVal.AsString() + *pathStep = cty.GetAttrStep{ + Name: name, + } + + conv, exists := attrConvs[name] + if !exists { + continue + } + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + attrVals[name] = val + } + + return cty.ObjectVal(attrVals), nil + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go new file mode 100644 index 00000000..e563ee34 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_primitive.go @@ -0,0 +1,50 @@ +package convert + +import ( + "math/big" + + "github.com/zclconf/go-cty/cty" +) + +var stringTrue = cty.StringVal("true") +var stringFalse = cty.StringVal("false") + +var primitiveConversionsSafe = map[cty.Type]map[cty.Type]conversion{ + cty.Number: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + f := val.AsBigFloat() + return cty.StringVal(f.Text('f', -1)), nil + }, + }, + cty.Bool: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + if val.True() { + return stringTrue, nil + } else { + return stringFalse, nil + } + }, + }, +} + +var primitiveConversionsUnsafe = map[cty.Type]map[cty.Type]conversion{ + cty.String: { + cty.Number: func(val cty.Value, path cty.Path) (cty.Value, error) { + f, _, err := big.ParseFloat(val.AsString(), 10, 512, big.ToNearestEven) + if err != nil { + return cty.NilVal, path.NewErrorf("a number is required") + } + return cty.NumberVal(f), nil + }, + cty.Bool: func(val cty.Value, path cty.Path) (cty.Value, error) { + switch val.AsString() { + case "true", "1": + return cty.True, nil + case "false", "0": + return cty.False, nil + default: + return cty.NilVal, path.NewErrorf("a bool is required") + } + }, + }, +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/doc.go b/vendor/github.com/zclconf/go-cty/cty/convert/doc.go new file mode 100644 index 00000000..2037299b --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/doc.go @@ -0,0 +1,15 @@ +// Package convert contains some routines for converting between cty types. +// The intent of providing this package is to encourage applications using +// cty to have consistent type conversion behavior for maximal interoperability +// when Values pass from one application to another. +// +// The conversions are categorized into two categories. "Safe" conversions are +// ones that are guaranteed to succeed if given a non-null value of the +// appropriate source type. "Unsafe" conversions, on the other hand, are valid +// for only a subset of input values, and thus may fail with an error when +// called for values outside of that valid subset. +// +// The functions whose names end in Unsafe support all of the conversions that +// are supported by the corresponding functions whose names do not have that +// suffix, and then additional unsafe conversions as well. +package convert diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go b/vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go new file mode 100644 index 00000000..88a4a251 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/mismatch_msg.go @@ -0,0 +1,135 @@ +package convert + +import ( + "bytes" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" +) + +// MismatchMessage is a helper to return an English-language description of +// the differences between got and want, phrased as a reason why got does +// not conform to want. +// +// This function does not itself attempt conversion, and so it should generally +// be used only after a conversion has failed, to report the conversion failure +// to an English-speaking user. The result will be confusing got is actually +// conforming to or convertable to want. +// +// The shorthand helper function Convert uses this function internally to +// produce its error messages, so callers of that function do not need to +// also use MismatchMessage. +// +// This function is similar to Type.TestConformance, but it is tailored to +// describing conversion failures and so the messages it generates relate +// specifically to the conversion rules implemented in this package. +func MismatchMessage(got, want cty.Type) string { + switch { + + case got.IsObjectType() && want.IsObjectType(): + // If both types are object types then we may be able to say something + // about their respective attributes. + return mismatchMessageObjects(got, want) + + case got.IsTupleType() && want.IsListType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to list failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all list elements must have the same type" + + case got.IsTupleType() && want.IsSetType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to set failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all set elements must have the same type" + + case got.IsObjectType() && want.IsMapType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from object to map failed then it's because we couldn't + // find a common type to convert all of the object attributes to. + return "all map elements must have the same type" + + default: + // If we have nothing better to say, we'll just state what was required. + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageObjects(got, want cty.Type) string { + // Per our conversion rules, "got" is allowed to be a superset of "want", + // and so we'll produce error messages here under that assumption. + gotAtys := got.AttributeTypes() + wantAtys := want.AttributeTypes() + + // If we find missing attributes then we'll report those in preference, + // but if not then we will report a maximum of one non-conforming + // attribute, just to keep our messages relatively terse. + // We'll also prefer to report a recursive type error from an _unsafe_ + // conversion over a safe one, because these are subjectively more + // "serious". + var missingAttrs []string + var unsafeMismatchAttr string + var safeMismatchAttr string + + for name, wantAty := range wantAtys { + gotAty, exists := gotAtys[name] + if !exists { + missingAttrs = append(missingAttrs, name) + continue + } + + // We'll now try to convert these attributes in isolation and + // see if we have a nested conversion error to report. + // We'll try an unsafe conversion first, and then fall back on + // safe if unsafe is possible. + + // If we already have an unsafe mismatch attr error then we won't bother + // hunting for another one. + if unsafeMismatchAttr != "" { + continue + } + if conv := GetConversionUnsafe(gotAty, wantAty); conv == nil { + unsafeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + + // If we already have a safe mismatch attr error then we won't bother + // hunting for another one. + if safeMismatchAttr != "" { + continue + } + if conv := GetConversion(gotAty, wantAty); conv == nil { + safeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + } + + // We should now have collected at least one problem. If we have more than + // one then we'll use our preference order to decide what is most important + // to report. + switch { + + case len(missingAttrs) != 0: + switch len(missingAttrs) { + case 1: + return fmt.Sprintf("attribute %q is required", missingAttrs[0]) + case 2: + return fmt.Sprintf("attributes %q and %q are required", missingAttrs[0], missingAttrs[1]) + default: + sort.Strings(missingAttrs) + var buf bytes.Buffer + for _, name := range missingAttrs[:len(missingAttrs)-1] { + fmt.Fprintf(&buf, "%q, ", name) + } + fmt.Fprintf(&buf, "and %q", missingAttrs[len(missingAttrs)-1]) + return fmt.Sprintf("attributes %s are required", buf.Bytes()) + } + + case unsafeMismatchAttr != "": + return unsafeMismatchAttr + + case safeMismatchAttr != "": + return safeMismatchAttr + + default: + // We should never get here, but if we do then we'll return + // just a generic message. + return "incorrect object attributes" + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/public.go b/vendor/github.com/zclconf/go-cty/cty/convert/public.go new file mode 100644 index 00000000..af19bdc5 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/public.go @@ -0,0 +1,83 @@ +package convert + +import ( + "errors" + + "github.com/zclconf/go-cty/cty" +) + +// This file contains the public interface of this package, which is intended +// to be a small, convenient interface designed for easy integration into +// a hypothetical language type checker and interpreter. + +// Conversion is a named function type representing a conversion from a +// value of one type to a value of another type. +// +// The source type for a conversion is always the source type given to +// the function that returned the Conversion, but there is no way to recover +// that from a Conversion value itself. If a Conversion is given a value +// that is not of its expected type (with the exception of DynamicPseudoType, +// which is always supported) then the function may panic or produce undefined +// results. +type Conversion func(in cty.Value) (out cty.Value, err error) + +// GetConversion returns a Conversion between the given in and out Types if +// a safe one is available, or returns nil otherwise. +func GetConversion(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, false)) +} + +// GetConversionUnsafe returns a Conversion between the given in and out Types +// if either a safe or unsafe one is available, or returns nil otherwise. +func GetConversionUnsafe(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, true)) +} + +// Convert returns the result of converting the given value to the given type +// if an safe or unsafe conversion is available, or returns an error if such a +// conversion is impossible. +// +// This is a convenience wrapper around calling GetConversionUnsafe and then +// immediately passing the given value to the resulting function. +func Convert(in cty.Value, want cty.Type) (cty.Value, error) { + if in.Type().Equals(want) { + return in, nil + } + + conv := GetConversionUnsafe(in.Type(), want) + if conv == nil { + return cty.NilVal, errors.New(MismatchMessage(in.Type(), want)) + } + return conv(in) +} + +// Unify attempts to find the most general type that can be converted from +// all of the given types. If this is possible, that type is returned along +// with a slice of necessary conversions for some of the given types. +// +// If no common supertype can be found, this function returns cty.NilType and +// a nil slice. +// +// If a common supertype *can* be found, the returned slice will always be +// non-nil and will contain a non-nil conversion for each given type that +// needs to be converted, with indices corresponding to the input slice. +// Any given type that does *not* need conversion (because it is already of +// the appropriate type) will have a nil Conversion. +// +// cty.DynamicPseudoType is, as usual, a special case. If the given type list +// contains a mixture of dynamic and non-dynamic types, the dynamic types are +// disregarded for type selection and a conversion is returned for them that +// will attempt a late conversion of the given value to the target type, +// failing with a conversion error if the eventual concrete type is not +// compatible. If *all* given types are DynamicPseudoType, or in the +// degenerate case of an empty slice of types, the returned type is itself +// cty.DynamicPseudoType and no conversions are attempted. +func Unify(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, false) +} + +// UnifyUnsafe is the same as Unify except that it may return unsafe +// conversions in situations where a safe conversion isn't also available. +func UnifyUnsafe(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, true) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go b/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go new file mode 100644 index 00000000..b7769106 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/sort_types.go @@ -0,0 +1,69 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// sortTypes produces an ordering of the given types that serves as a +// preference order for the result of unification of the given types. +// The return value is a slice of indices into the given slice, and will +// thus always be the same length as the given slice. +// +// The goal is that the most general of the given types will appear first +// in the ordering. If there are uncomparable pairs of types in the list +// then they will appear in an undefined order, and the unification pass +// will presumably then fail. +func sortTypes(tys []cty.Type) []int { + l := len(tys) + + // First we build a graph whose edges represent "more general than", + // which we will then do a topological sort of. + edges := make([][]int, l) + for i := 0; i < (l - 1); i++ { + for j := i + 1; j < l; j++ { + cmp := compareTypes(tys[i], tys[j]) + switch { + case cmp < 0: + edges[i] = append(edges[i], j) + case cmp > 0: + edges[j] = append(edges[j], i) + } + } + } + + // Compute the in-degree of each node + inDegree := make([]int, l) + for _, outs := range edges { + for _, j := range outs { + inDegree[j]++ + } + } + + // The array backing our result will double as our queue for visiting + // the nodes, with the queue slice moving along this array until it + // is empty and positioned at the end of the array. Thus our visiting + // order is also our result order. + result := make([]int, l) + queue := result[0:0] + + // Initialize the queue with any item of in-degree 0, preserving + // their relative order. + for i, n := range inDegree { + if n == 0 { + queue = append(queue, i) + } + } + + for len(queue) != 0 { + i := queue[0] + queue = queue[1:] + for _, j := range edges[i] { + inDegree[j]-- + if inDegree[j] == 0 { + queue = append(queue, j) + } + } + } + + return result +} diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/unify.go b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go new file mode 100644 index 00000000..bd6736b4 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/convert/unify.go @@ -0,0 +1,66 @@ +package convert + +import ( + "github.com/zclconf/go-cty/cty" +) + +// The current unify implementation is somewhat inefficient, but we accept this +// under the assumption that it will generally be used with small numbers of +// types and with types of reasonable complexity. However, it does have a +// "happy path" where all of the given types are equal. +// +// This function is likely to have poor performance in cases where any given +// types are very complex (lots of deeply-nested structures) or if the list +// of types itself is very large. In particular, it will walk the nested type +// structure under the given types several times, especially when given a +// list of types for which unification is not possible, since each permutation +// will be tried to determine that result. +func unify(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + if len(types) == 0 { + // Degenerate case + return cty.NilType, nil + } + + prefOrder := sortTypes(types) + + // sortTypes gives us an order where earlier items are preferable as + // our result type. We'll now walk through these and choose the first + // one we encounter for which conversions exist for all source types. + conversions := make([]Conversion, len(types)) +Preferences: + for _, wantTypeIdx := range prefOrder { + wantType := types[wantTypeIdx] + for i, tryType := range types { + if i == wantTypeIdx { + // Don't need to convert our wanted type to itself + conversions[i] = nil + continue + } + + if tryType.Equals(wantType) { + conversions[i] = nil + continue + } + + if unsafe { + conversions[i] = GetConversionUnsafe(tryType, wantType) + } else { + conversions[i] = GetConversion(tryType, wantType) + } + + if conversions[i] == nil { + // wantType is not a suitable unification type, so we'll + // try the next one in our preference order. + continue Preferences + } + } + + return wantType, conversions + } + + // TODO: For structural types, try to invent a new type that they + // can all be unified to, by unifying their respective attributes. + + // If we fall out here, no unification is possible + return cty.NilType, nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/doc.go b/vendor/github.com/zclconf/go-cty/cty/doc.go new file mode 100644 index 00000000..d31f0547 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/doc.go @@ -0,0 +1,18 @@ +// Package cty (pronounced see-tie) provides some infrastructure for a type +// system that might be useful for applications that need to represent +// configuration values provided by the user whose types are not known +// at compile time, particularly if the calling application also allows +// such values to be used in expressions. +// +// The type system consists of primitive types Number, String and Bool, as +// well as List and Map collection types and Object types that can have +// arbitrarily-typed sets of attributes. +// +// A set of operations is defined on these types, which is accessible via +// the wrapper struct Value, which annotates the raw, internal representation +// of a value with its corresponding type. +// +// This package is oriented towards being a building block for configuration +// languages used to bootstrap an application. It is not optimized for use +// in tight loops where CPU time or memory pressure are a concern. +package cty diff --git a/vendor/github.com/zclconf/go-cty/cty/element_iterator.go b/vendor/github.com/zclconf/go-cty/cty/element_iterator.go new file mode 100644 index 00000000..0bf84c77 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/element_iterator.go @@ -0,0 +1,191 @@ +package cty + +import ( + "sort" + + "github.com/zclconf/go-cty/cty/set" +) + +// ElementIterator is the interface type returned by Value.ElementIterator to +// allow the caller to iterate over elements of a collection-typed value. +// +// Its usage pattern is as follows: +// +// it := val.ElementIterator() +// for it.Next() { +// key, val := it.Element() +// // ... +// } +type ElementIterator interface { + Next() bool + Element() (key Value, value Value) +} + +func canElementIterator(val Value) bool { + switch { + case val.ty.IsListType(): + return true + case val.ty.IsMapType(): + return true + case val.ty.IsSetType(): + return true + case val.ty.IsTupleType(): + return true + case val.ty.IsObjectType(): + return true + default: + return false + } +} + +func elementIterator(val Value) ElementIterator { + switch { + case val.ty.IsListType(): + return &listElementIterator{ + ety: val.ty.ElementType(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsMapType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same input map. + rawMap := val.v.(map[string]interface{}) + keys := make([]string, 0, len(rawMap)) + for key := range rawMap { + keys = append(keys, key) + } + sort.Strings(keys) + + return &mapElementIterator{ + ety: val.ty.ElementType(), + vals: rawMap, + keys: keys, + idx: -1, + } + case val.ty.IsSetType(): + rawSet := val.v.(set.Set) + return &setElementIterator{ + ety: val.ty.ElementType(), + setIt: rawSet.Iterator(), + } + case val.ty.IsTupleType(): + return &tupleElementIterator{ + etys: val.ty.TupleElementTypes(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsObjectType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same object type. + atys := val.ty.AttributeTypes() + keys := make([]string, 0, len(atys)) + for key := range atys { + keys = append(keys, key) + } + sort.Strings(keys) + + return &objectElementIterator{ + atys: atys, + vals: val.v.(map[string]interface{}), + attrNames: keys, + idx: -1, + } + default: + panic("attempt to iterate on non-collection, non-tuple type") + } +} + +type listElementIterator struct { + ety Type + vals []interface{} + idx int +} + +func (it *listElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.ety, + v: it.vals[i], + } +} + +func (it *listElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type mapElementIterator struct { + ety Type + vals map[string]interface{} + keys []string + idx int +} + +func (it *mapElementIterator) Element() (Value, Value) { + key := it.keys[it.idx] + return StringVal(key), Value{ + ty: it.ety, + v: it.vals[key], + } +} + +func (it *mapElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.keys) +} + +type setElementIterator struct { + ety Type + setIt *set.Iterator +} + +func (it *setElementIterator) Element() (Value, Value) { + val := Value{ + ty: it.ety, + v: it.setIt.Value(), + } + return val, val +} + +func (it *setElementIterator) Next() bool { + return it.setIt.Next() +} + +type tupleElementIterator struct { + etys []Type + vals []interface{} + idx int +} + +func (it *tupleElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.etys[i], + v: it.vals[i], + } +} + +func (it *tupleElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type objectElementIterator struct { + atys map[string]Type + vals map[string]interface{} + attrNames []string + idx int +} + +func (it *objectElementIterator) Element() (Value, Value) { + key := it.attrNames[it.idx] + return StringVal(key), Value{ + ty: it.atys[key], + v: it.vals[key], + } +} + +func (it *objectElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.attrNames) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/error.go b/vendor/github.com/zclconf/go-cty/cty/error.go new file mode 100644 index 00000000..dd139f72 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/error.go @@ -0,0 +1,55 @@ +package cty + +import ( + "fmt" +) + +// PathError is a specialization of error that represents where in a +// potentially-deep data structure an error occured, using a Path. +type PathError struct { + error + Path Path +} + +func errorf(path Path, f string, args ...interface{}) error { + // We need to copy the Path because often our caller builds it by + // continually mutating the same underlying buffer. + sPath := make(Path, len(path)) + copy(sPath, path) + return PathError{ + error: fmt.Errorf(f, args...), + Path: sPath, + } +} + +// NewErrorf creates a new PathError for the current path by passing the +// given format and arguments to fmt.Errorf and then wrapping the result +// similarly to NewError. +func (p Path) NewErrorf(f string, args ...interface{}) error { + return errorf(p, f, args...) +} + +// NewError creates a new PathError for the current path, wrapping the given +// error. +func (p Path) NewError(err error) error { + // if we're being asked to wrap an existing PathError then our new + // PathError will be the concatenation of the two paths, ensuring + // that we still get a single flat PathError that's thus easier for + // callers to deal with. + perr, wrappingPath := err.(PathError) + pathLen := len(p) + if wrappingPath { + pathLen = pathLen + len(perr.Path) + } + + sPath := make(Path, pathLen) + copy(sPath, p) + if wrappingPath { + copy(sPath[len(p):], perr.Path) + } + + return PathError{ + error: err, + Path: sPath, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/argument.go b/vendor/github.com/zclconf/go-cty/cty/function/argument.go new file mode 100644 index 00000000..bfd30157 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/argument.go @@ -0,0 +1,50 @@ +package function + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Parameter represents a parameter to a function. +type Parameter struct { + // Name is an optional name for the argument. This package ignores this + // value, but callers may use it for documentation, etc. + Name string + + // A type that any argument for this parameter must conform to. + // cty.DynamicPseudoType can be used, either at top-level or nested + // in a parameterized type, to indicate that any type should be + // permitted, to allow the definition of type-generic functions. + Type cty.Type + + // If AllowNull is set then null values may be passed into this + // argument's slot in both the type-check function and the implementation + // function. If not set, such values are rejected by the built-in + // checking rules. + AllowNull bool + + // If AllowUnknown is set then unknown values may be passed into this + // argument's slot in the implementation function. If not set, any + // unknown values will cause the function to immediately return + // an unkonwn value without calling the implementation function, thus + // freeing the function implementer from dealing with this case. + AllowUnknown bool + + // If AllowDynamicType is set then DynamicVal may be passed into this + // argument's slot in the implementation function. If not set, any + // dynamic values will cause the function to immediately return + // DynamicVal value without calling the implementation function, thus + // freeing the function implementer from dealing with this case. + // + // Note that DynamicVal is also unknown, so in order to receive dynamic + // *values* it is also necessary to set AllowUnknown. + // + // However, it is valid to set AllowDynamicType without AllowUnknown, in + // which case a dynamic value may be passed to the type checking function + // but will not make it to the *implementation* function. Instead, an + // unknown value of the type returned by the type-check function will be + // returned. This is suggested for functions that have a static return + // type since it allows the return value to be typed even if the input + // values are not, thus improving the type-check accuracy of derived + // values. + AllowDynamicType bool +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/doc.go b/vendor/github.com/zclconf/go-cty/cty/function/doc.go new file mode 100644 index 00000000..393b3110 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/doc.go @@ -0,0 +1,6 @@ +// Package function builds on the functionality of cty by modeling functions +// that operate on cty Values. +// +// Functions are, at their core, Go anonymous functions. However, this package +// wraps around them utility functions for parameter type checking, etc. +package function diff --git a/vendor/github.com/zclconf/go-cty/cty/function/error.go b/vendor/github.com/zclconf/go-cty/cty/function/error.go new file mode 100644 index 00000000..2b567799 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/error.go @@ -0,0 +1,50 @@ +package function + +import ( + "fmt" + "runtime/debug" +) + +// ArgError represents an error with one of the arguments in a call. The +// attribute Index represents the zero-based index of the argument in question. +// +// Its error *may* be a cty.PathError, in which case the error actually +// pertains to a nested value within the data structure passed as the argument. +type ArgError struct { + error + Index int +} + +func NewArgErrorf(i int, f string, args ...interface{}) error { + return ArgError{ + error: fmt.Errorf(f, args...), + Index: i, + } +} + +func NewArgError(i int, err error) error { + return ArgError{ + error: err, + Index: i, + } +} + +// PanicError indicates that a panic occurred while executing either a +// function's type or implementation function. This is captured and wrapped +// into a normal error so that callers (expected to be language runtimes) +// are freed from having to deal with panics in buggy functions. +type PanicError struct { + Value interface{} + Stack []byte +} + +func errorForPanic(val interface{}) error { + return PanicError{ + Value: val, + Stack: debug.Stack(), + } +} + +func (e PanicError) Error() string { + return fmt.Sprintf("panic in function implementation: %s\n%s", e.Value, e.Stack) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/function.go b/vendor/github.com/zclconf/go-cty/cty/function/function.go new file mode 100644 index 00000000..9e8bf337 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/function.go @@ -0,0 +1,291 @@ +package function + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// Function represents a function. This is the main type in this package. +type Function struct { + spec *Spec +} + +// Spec is the specification of a function, used to instantiate +// a new Function. +type Spec struct { + // Params is a description of the positional parameters for the function. + // The standard checking logic rejects any calls that do not provide + // arguments conforming to this definition, freeing the function + // implementer from dealing with such inconsistencies. + Params []Parameter + + // VarParam is an optional specification of additional "varargs" the + // function accepts. If this is non-nil then callers may provide an + // arbitrary number of additional arguments (after those matching with + // the fixed parameters in Params) that conform to the given specification, + // which will appear as additional values in the slices of values + // provided to the type and implementation functions. + VarParam *Parameter + + // Type is the TypeFunc that decides the return type of the function + // given its arguments, which may be Unknown. See the documentation + // of TypeFunc for more information. + // + // Use StaticReturnType if the function's return type does not vary + // depending on its arguments. + Type TypeFunc + + // Impl is the ImplFunc that implements the function's behavior. + // + // Functions are expected to behave as pure functions, and not create + // any visible side-effects. + // + // If a TypeFunc is also provided, the value returned from Impl *must* + // conform to the type it returns, or a call to the function will panic. + Impl ImplFunc +} + +// New creates a new function with the given specification. +// +// After passing a Spec to this function, the caller must no longer read from +// or mutate it. +func New(spec *Spec) Function { + f := Function{ + spec: spec, + } + return f +} + +// TypeFunc is a callback type for determining the return type of a function +// given its arguments. +// +// Any of the values passed to this function may be unknown, even if the +// parameters are not configured to accept unknowns. +// +// If any of the given values are *not* unknown, the TypeFunc may use the +// values for pre-validation and for choosing the return type. For example, +// a hypothetical JSON-unmarshalling function could return +// cty.DynamicPseudoType if the given JSON string is unknown, but return +// a concrete type based on the JSON structure if the JSON string is already +// known. +type TypeFunc func(args []cty.Value) (cty.Type, error) + +// ImplFunc is a callback type for the main implementation of a function. +// +// "args" are the values for the arguments, and this slice will always be at +// least as long as the argument definition slice for the function. +// +// "retType" is the type returned from the Type callback, included as a +// convenience to avoid the need to re-compute the return type for generic +// functions whose return type is a function of the arguments. +type ImplFunc func(args []cty.Value, retType cty.Type) (cty.Value, error) + +// StaticReturnType returns a TypeFunc that always returns the given type. +// +// This is provided as a convenience for defining a function whose return +// type does not depend on the argument types. +func StaticReturnType(ty cty.Type) TypeFunc { + return func([]cty.Value) (cty.Type, error) { + return ty, nil + } +} + +// ReturnType returns the return type of a function given a set of candidate +// argument types, or returns an error if the given types are unacceptable. +// +// If the caller already knows values for at least some of the arguments +// it can be better to call ReturnTypeForValues, since certain functions may +// determine their return types from their values and return DynamicVal if +// the values are unknown. +func (f Function) ReturnType(argTypes []cty.Type) (cty.Type, error) { + vals := make([]cty.Value, len(argTypes)) + for i, ty := range argTypes { + vals[i] = cty.UnknownVal(ty) + } + return f.ReturnTypeForValues(vals) +} + +// ReturnTypeForValues is similar to ReturnType but can be used if the caller +// already knows the values of some or all of the arguments, in which case +// the function may be able to determine a more definite result if its +// return type depends on the argument *values*. +// +// For any arguments whose values are not known, pass an Unknown value of +// the appropriate type. +func (f Function) ReturnTypeForValues(args []cty.Value) (ty cty.Type, err error) { + var posArgs []cty.Value + var varArgs []cty.Value + + if f.spec.VarParam == nil { + if len(args) != len(f.spec.Params) { + return cty.Type{}, fmt.Errorf( + "wrong number of arguments (%d required; %d given)", + len(f.spec.Params), len(args), + ) + } + + posArgs = args + varArgs = nil + } else { + if len(args) < len(f.spec.Params) { + return cty.Type{}, fmt.Errorf( + "wrong number of arguments (at least %d required; %d given)", + len(f.spec.Params), len(args), + ) + } + + posArgs = args[0:len(f.spec.Params)] + varArgs = args[len(f.spec.Params):] + } + + for i, spec := range f.spec.Params { + val := posArgs[i] + + if val.IsNull() && !spec.AllowNull { + return cty.Type{}, NewArgErrorf(i, "argument must not be null") + } + + // AllowUnknown is ignored for type-checking, since we expect to be + // able to type check with unknown values. We *do* still need to deal + // with DynamicPseudoType here though, since the Type function might + // not be ready to deal with that. + + if val.Type() == cty.DynamicPseudoType { + if !spec.AllowDynamicType { + return cty.DynamicPseudoType, nil + } + } else if errs := val.Type().TestConformance(spec.Type); errs != nil { + // For now we'll just return the first error in the set, since + // we don't have a good way to return the whole list here. + // Would be good to do something better at some point... + return cty.Type{}, NewArgError(i, errs[0]) + } + } + + if varArgs != nil { + spec := f.spec.VarParam + for i, val := range varArgs { + realI := i + len(posArgs) + + if val.IsNull() && !spec.AllowNull { + return cty.Type{}, NewArgErrorf(realI, "argument must not be null") + } + + if val.Type() == cty.DynamicPseudoType { + if !spec.AllowDynamicType { + return cty.DynamicPseudoType, nil + } + } else if errs := val.Type().TestConformance(spec.Type); errs != nil { + // For now we'll just return the first error in the set, since + // we don't have a good way to return the whole list here. + // Would be good to do something better at some point... + return cty.Type{}, NewArgError(i, errs[0]) + } + } + } + + // Intercept any panics from the function and return them as normal errors, + // so a calling language runtime doesn't need to deal with panics. + defer func() { + if r := recover(); r != nil { + ty = cty.NilType + err = errorForPanic(r) + } + }() + + return f.spec.Type(args) +} + +// Call actually calls the function with the given arguments, which must +// conform to the function's parameter specification or an error will be +// returned. +func (f Function) Call(args []cty.Value) (val cty.Value, err error) { + expectedType, err := f.ReturnTypeForValues(args) + if err != nil { + return cty.NilVal, err + } + + // Type checking already dealt with most situations relating to our + // parameter specification, but we still need to deal with unknown + // values. + posArgs := args[:len(f.spec.Params)] + varArgs := args[len(f.spec.Params):] + + for i, spec := range f.spec.Params { + val := posArgs[i] + + if !val.IsKnown() && !spec.AllowUnknown { + return cty.UnknownVal(expectedType), nil + } + } + + if f.spec.VarParam != nil { + spec := f.spec.VarParam + for _, val := range varArgs { + if !val.IsKnown() && !spec.AllowUnknown { + return cty.UnknownVal(expectedType), nil + } + } + } + + var retVal cty.Value + { + // Intercept any panics from the function and return them as normal errors, + // so a calling language runtime doesn't need to deal with panics. + defer func() { + if r := recover(); r != nil { + val = cty.NilVal + err = errorForPanic(r) + } + }() + + retVal, err = f.spec.Impl(args, expectedType) + if err != nil { + return cty.NilVal, err + } + } + + // Returned value must conform to what the Type function expected, to + // protect callers from having to deal with inconsistencies. + if errs := retVal.Type().TestConformance(expectedType); errs != nil { + panic(fmt.Errorf( + "returned value %#v does not conform to expected return type %#v: %s", + retVal, expectedType, errs[0], + )) + } + + return retVal, nil +} + +// ProxyFunc the type returned by the method Function.Proxy. +type ProxyFunc func(args ...cty.Value) (cty.Value, error) + +// Proxy returns a function that can be called with cty.Value arguments +// to run the function. This is provided as a convenience for when using +// a function directly within Go code. +func (f Function) Proxy() ProxyFunc { + return func(args ...cty.Value) (cty.Value, error) { + return f.Call(args) + } +} + +// Params returns information about the function's fixed positional parameters. +// This does not include information about any variadic arguments accepted; +// for that, call VarParam. +func (f Function) Params() []Parameter { + new := make([]Parameter, len(f.spec.Params)) + copy(new, f.spec.Params) + return new +} + +// VarParam returns information about the variadic arguments the function +// expects, or nil if the function is not variadic. +func (f Function) VarParam() *Parameter { + if f.spec.VarParam == nil { + return nil + } + + ret := *f.spec.VarParam + return &ret +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go new file mode 100644 index 00000000..a473d0ec --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bool.go @@ -0,0 +1,73 @@ +package stdlib + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var NotFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "val", + Type: cty.Bool, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Not(), nil + }, +}) + +var AndFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Bool, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Bool, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].And(args[1]), nil + }, +}) + +var OrFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Bool, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Bool, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Or(args[1]), nil + }, +}) + +// Not returns the logical complement of the given boolean value. +func Not(num cty.Value) (cty.Value, error) { + return NotFunc.Call([]cty.Value{num}) +} + +// And returns true if and only if both of the given boolean values are true. +func And(a, b cty.Value) (cty.Value, error) { + return AndFunc.Call([]cty.Value{a, b}) +} + +// Or returns true if either of the given boolean values are true. +func Or(a, b cty.Value) (cty.Value, error) { + return OrFunc.Call([]cty.Value{a, b}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go new file mode 100644 index 00000000..a132e0cd --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/bytes.go @@ -0,0 +1,112 @@ +package stdlib + +import ( + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// Bytes is a capsule type that can be used with the binary functions to +// support applications that need to support raw buffers in addition to +// UTF-8 strings. +var Bytes = cty.Capsule("bytes", reflect.TypeOf([]byte(nil))) + +// BytesVal creates a new Bytes value from the given buffer, which must be +// non-nil or this function will panic. +// +// Once a byte slice has been wrapped in a Bytes capsule, its underlying array +// must be considered immutable. +func BytesVal(buf []byte) cty.Value { + if buf == nil { + panic("can't make Bytes value from nil slice") + } + + return cty.CapsuleVal(Bytes, &buf) +} + +// BytesLen is a Function that returns the length of the buffer encapsulated +// in a Bytes value. +var BytesLenFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "buf", + Type: Bytes, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + bufPtr := args[0].EncapsulatedValue().(*[]byte) + return cty.NumberIntVal(int64(len(*bufPtr))), nil + }, +}) + +// BytesSlice is a Function that returns a slice of the given Bytes value. +var BytesSliceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "buf", + Type: Bytes, + AllowDynamicType: true, + }, + { + Name: "offset", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "length", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(Bytes), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + bufPtr := args[0].EncapsulatedValue().(*[]byte) + + var offset, length int + + var err error + err = gocty.FromCtyValue(args[1], &offset) + if err != nil { + return cty.NilVal, err + } + err = gocty.FromCtyValue(args[2], &length) + if err != nil { + return cty.NilVal, err + } + + if offset < 0 || length < 0 { + return cty.NilVal, fmt.Errorf("offset and length must be non-negative") + } + + if offset > len(*bufPtr) { + return cty.NilVal, fmt.Errorf( + "offset %d is greater than total buffer length %d", + offset, len(*bufPtr), + ) + } + + end := offset + length + + if end > len(*bufPtr) { + return cty.NilVal, fmt.Errorf( + "offset %d + length %d is greater than total buffer length %d", + offset, length, len(*bufPtr), + ) + } + + return BytesVal((*bufPtr)[offset:end]), nil + }, +}) + +func BytesLen(buf cty.Value) (cty.Value, error) { + return BytesLenFunc.Call([]cty.Value{buf}) +} + +func BytesSlice(buf cty.Value, offset cty.Value, length cty.Value) (cty.Value, error) { + return BytesSliceFunc.Call([]cty.Value{buf, offset, length}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go new file mode 100644 index 00000000..967ba03c --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/collection.go @@ -0,0 +1,140 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +var HasIndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "collection", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + { + Name: "key", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + collTy := args[0].Type() + if !(collTy.IsTupleType() || collTy.IsListType() || collTy.IsMapType() || collTy == cty.DynamicPseudoType) { + return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple") + } + return cty.Bool, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].HasIndex(args[1]), nil + }, +}) + +var IndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "collection", + Type: cty.DynamicPseudoType, + }, + { + Name: "key", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + collTy := args[0].Type() + key := args[1] + keyTy := key.Type() + switch { + case collTy.IsTupleType(): + if keyTy != cty.Number && keyTy != cty.DynamicPseudoType { + return cty.NilType, fmt.Errorf("key for tuple must be number") + } + if !key.IsKnown() { + return cty.DynamicPseudoType, nil + } + var idx int + err := gocty.FromCtyValue(key, &idx) + if err != nil { + return cty.NilType, fmt.Errorf("invalid key for tuple: %s", err) + } + + etys := collTy.TupleElementTypes() + + if idx >= len(etys) || idx < 0 { + return cty.NilType, fmt.Errorf("key must be between 0 and %d inclusive", len(etys)) + } + + return etys[idx], nil + + case collTy.IsListType(): + if keyTy != cty.Number && keyTy != cty.DynamicPseudoType { + return cty.NilType, fmt.Errorf("key for list must be number") + } + + return collTy.ElementType(), nil + + case collTy.IsMapType(): + if keyTy != cty.String && keyTy != cty.DynamicPseudoType { + return cty.NilType, fmt.Errorf("key for map must be string") + } + + return collTy.ElementType(), nil + + default: + return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + has, err := HasIndex(args[0], args[1]) + if err != nil { + return cty.NilVal, err + } + if has.False() { // safe because collection and key are guaranteed known here + return cty.NilVal, fmt.Errorf("invalid index") + } + + return args[0].Index(args[1]), nil + }, +}) + +var LengthFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "collection", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + collTy := args[0].Type() + if !(collTy.IsTupleType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType) { + return cty.NilType, fmt.Errorf("collection must be a list, a map or a tuple") + } + return cty.Number, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].Length(), nil + }, +}) + +// HasIndex determines whether the given collection can be indexed with the +// given key. +func HasIndex(collection cty.Value, key cty.Value) (cty.Value, error) { + return HasIndexFunc.Call([]cty.Value{collection, key}) +} + +// Index returns an element from the given collection using the given key, +// or returns an error if there is no element for the given key. +func Index(collection cty.Value, key cty.Value) (cty.Value, error) { + return IndexFunc.Call([]cty.Value{collection, key}) +} + +// Length returns the number of elements in the given collection. +func Length(collection cty.Value) (cty.Value, error) { + return LengthFunc.Call([]cty.Value{collection}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go new file mode 100644 index 00000000..5070a5ad --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/csv.go @@ -0,0 +1,93 @@ +package stdlib + +import ( + "encoding/csv" + "fmt" + "io" + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var CSVDecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + str := args[0] + if !str.IsKnown() { + return cty.DynamicPseudoType, nil + } + + r := strings.NewReader(str.AsString()) + cr := csv.NewReader(r) + headers, err := cr.Read() + if err == io.EOF { + return cty.DynamicPseudoType, fmt.Errorf("missing header line") + } + if err != nil { + return cty.DynamicPseudoType, err + } + + atys := make(map[string]cty.Type, len(headers)) + for _, name := range headers { + if _, exists := atys[name]; exists { + return cty.DynamicPseudoType, fmt.Errorf("duplicate column name %q", name) + } + atys[name] = cty.String + } + return cty.List(cty.Object(atys)), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + ety := retType.ElementType() + atys := ety.AttributeTypes() + str := args[0] + r := strings.NewReader(str.AsString()) + cr := csv.NewReader(r) + cr.FieldsPerRecord = len(atys) + + // Read the header row first, since that'll tell us which indices + // map to which attribute names. + headers, err := cr.Read() + if err != nil { + return cty.DynamicVal, err + } + + var rows []cty.Value + for { + cols, err := cr.Read() + if err == io.EOF { + break + } + if err != nil { + return cty.DynamicVal, err + } + + vals := make(map[string]cty.Value, len(cols)) + for i, str := range cols { + name := headers[i] + vals[name] = cty.StringVal(str) + } + rows = append(rows, cty.ObjectVal(vals)) + } + + if len(rows) == 0 { + return cty.ListValEmpty(ety), nil + } + return cty.ListVal(rows), nil + }, +}) + +// CSVDecode parses the given CSV (RFC 4180) string and, if it is valid, +// returns a list of objects representing the rows. +// +// The result is always a list of some object type. The first row of the +// input is used to determine the object attributes, and subsequent rows +// determine the values of those attributes. +func CSVDecode(str cty.Value) (cty.Value, error) { + return CSVDecodeFunc.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go new file mode 100644 index 00000000..aa15b7bd --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/datetime.go @@ -0,0 +1,385 @@ +package stdlib + +import ( + "bufio" + "bytes" + "fmt" + "strings" + "time" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var FormatDateFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "format", + Type: cty.String, + }, + { + Name: "time", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + formatStr := args[0].AsString() + timeStr := args[1].AsString() + t, err := parseTimestamp(timeStr) + if err != nil { + return cty.DynamicVal, function.NewArgError(1, err) + } + + var buf bytes.Buffer + sc := bufio.NewScanner(strings.NewReader(formatStr)) + sc.Split(splitDateFormat) + const esc = '\'' + for sc.Scan() { + tok := sc.Bytes() + + // The leading byte signals the token type + switch { + case tok[0] == esc: + if tok[len(tok)-1] != esc || len(tok) == 1 { + return cty.DynamicVal, function.NewArgErrorf(0, "unterminated literal '") + } + if len(tok) == 2 { + // Must be a single escaped quote, '' + buf.WriteByte(esc) + } else { + // The content (until a closing esc) is printed out verbatim + // except that we must un-double any double-esc escapes in + // the middle of the string. + raw := tok[1 : len(tok)-1] + for i := 0; i < len(raw); i++ { + buf.WriteByte(raw[i]) + if raw[i] == esc { + i++ // skip the escaped quote + } + } + } + + case startsDateFormatVerb(tok[0]): + switch tok[0] { + case 'Y': + y := t.Year() + switch len(tok) { + case 2: + fmt.Fprintf(&buf, "%02d", y%100) + case 4: + fmt.Fprintf(&buf, "%04d", y) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: year must either be \"YY\" or \"YYYY\"", tok) + } + case 'M': + m := t.Month() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", m) + case 2: + fmt.Fprintf(&buf, "%02d", m) + case 3: + buf.WriteString(m.String()[:3]) + case 4: + buf.WriteString(m.String()) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: month must be \"M\", \"MM\", \"MMM\", or \"MMMM\"", tok) + } + case 'D': + d := t.Day() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", d) + case 2: + fmt.Fprintf(&buf, "%02d", d) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: day of month must either be \"D\" or \"DD\"", tok) + } + case 'E': + d := t.Weekday() + switch len(tok) { + case 3: + buf.WriteString(d.String()[:3]) + case 4: + buf.WriteString(d.String()) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: day of week must either be \"EEE\" or \"EEEE\"", tok) + } + case 'h': + h := t.Hour() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", h) + case 2: + fmt.Fprintf(&buf, "%02d", h) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: 24-hour must either be \"h\" or \"hh\"", tok) + } + case 'H': + h := t.Hour() % 12 + if h == 0 { + h = 12 + } + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", h) + case 2: + fmt.Fprintf(&buf, "%02d", h) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: 12-hour must either be \"H\" or \"HH\"", tok) + } + case 'A', 'a': + if len(tok) != 2 { + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: must be \"%s%s\"", tok, tok[0:1], tok[0:1]) + } + upper := tok[0] == 'A' + switch t.Hour() / 12 { + case 0: + if upper { + buf.WriteString("AM") + } else { + buf.WriteString("am") + } + case 1: + if upper { + buf.WriteString("PM") + } else { + buf.WriteString("pm") + } + } + case 'm': + m := t.Minute() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", m) + case 2: + fmt.Fprintf(&buf, "%02d", m) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: minute must either be \"m\" or \"mm\"", tok) + } + case 's': + s := t.Second() + switch len(tok) { + case 1: + fmt.Fprintf(&buf, "%d", s) + case 2: + fmt.Fprintf(&buf, "%02d", s) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: second must either be \"s\" or \"ss\"", tok) + } + case 'Z': + // We'll just lean on Go's own formatter for this one, since + // the necessary information is unexported. + switch len(tok) { + case 1: + buf.WriteString(t.Format("Z07:00")) + case 3: + str := t.Format("-0700") + switch str { + case "+0000": + buf.WriteString("UTC") + default: + buf.WriteString(str) + } + case 4: + buf.WriteString(t.Format("-0700")) + case 5: + buf.WriteString(t.Format("-07:00")) + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q: timezone must be Z, ZZZZ, or ZZZZZ", tok) + } + default: + return cty.DynamicVal, function.NewArgErrorf(0, "invalid date format verb %q", tok) + } + + default: + // Any other starting character indicates a literal sequence + buf.Write(tok) + } + } + + return cty.StringVal(buf.String()), nil + }, +}) + +// FormatDate reformats a timestamp given in RFC3339 syntax into another time +// syntax defined by a given format string. +// +// The format string uses letter mnemonics to represent portions of the +// timestamp, with repetition signifying length variants of each portion. +// Single quote characters ' can be used to quote sequences of literal letters +// that should not be interpreted as formatting mnemonics. +// +// The full set of supported mnemonic sequences is listed below: +// +// YY Year modulo 100 zero-padded to two digits, like "06". +// YYYY Four (or more) digit year, like "2006". +// M Month number, like "1" for January. +// MM Month number zero-padded to two digits, like "01". +// MMM English month name abbreviated to three letters, like "Jan". +// MMMM English month name unabbreviated, like "January". +// D Day of month number, like "2". +// DD Day of month number zero-padded to two digits, like "02". +// EEE English day of week name abbreviated to three letters, like "Mon". +// EEEE English day of week name unabbreviated, like "Monday". +// h 24-hour number, like "2". +// hh 24-hour number zero-padded to two digits, like "02". +// H 12-hour number, like "2". +// HH 12-hour number zero-padded to two digits, like "02". +// AA Hour AM/PM marker in uppercase, like "AM". +// aa Hour AM/PM marker in lowercase, like "am". +// m Minute within hour, like "5". +// mm Minute within hour zero-padded to two digits, like "05". +// s Second within minute, like "9". +// ss Second within minute zero-padded to two digits, like "09". +// ZZZZ Timezone offset with just sign and digit, like "-0800". +// ZZZZZ Timezone offset with colon separating hours and minutes, like "-08:00". +// Z Like ZZZZZ but with a special case "Z" for UTC. +// ZZZ Like ZZZZ but with a special case "UTC" for UTC. +// +// The format syntax is optimized mainly for generating machine-oriented +// timestamps rather than human-oriented timestamps; the English language +// portions of the output reflect the use of English names in a number of +// machine-readable date formatting standards. For presentation to humans, +// a locale-aware time formatter (not included in this package) is a better +// choice. +// +// The format syntax is not compatible with that of any other language, but +// is optimized so that patterns for common standard date formats can be +// recognized quickly even by a reader unfamiliar with the format syntax. +func FormatDate(format cty.Value, timestamp cty.Value) (cty.Value, error) { + return FormatDateFunc.Call([]cty.Value{format, timestamp}) +} + +func parseTimestamp(ts string) (time.Time, error) { + t, err := time.Parse(time.RFC3339, ts) + if err != nil { + switch err := err.(type) { + case *time.ParseError: + // If err is s time.ParseError then its string representation is not + // appropriate since it relies on details of Go's strange date format + // representation, which a caller of our functions is not expected + // to be familiar with. + // + // Therefore we do some light transformation to get a more suitable + // error that should make more sense to our callers. These are + // still not awesome error messages, but at least they refer to + // the timestamp portions by name rather than by Go's example + // values. + if err.LayoutElem == "" && err.ValueElem == "" && err.Message != "" { + // For some reason err.Message is populated with a ": " prefix + // by the time package. + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp%s", err.Message) + } + var what string + switch err.LayoutElem { + case "2006": + what = "year" + case "01": + what = "month" + case "02": + what = "day of month" + case "15": + what = "hour" + case "04": + what = "minute" + case "05": + what = "second" + case "Z07:00": + what = "UTC offset" + case "T": + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: missing required time introducer 'T'") + case ":", "-": + if err.ValueElem == "" { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string where %q is expected", err.LayoutElem) + } else { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: found %q where %q is expected", err.ValueElem, err.LayoutElem) + } + default: + // Should never get here, because time.RFC3339 includes only the + // above portions, but since that might change in future we'll + // be robust here. + what = "timestamp segment" + } + if err.ValueElem == "" { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: end of string before %s", what) + } else { + return time.Time{}, fmt.Errorf("not a valid RFC3339 timestamp: cannot use %q as %s", err.ValueElem, what) + } + } + return time.Time{}, err + } + return t, nil +} + +// splitDataFormat is a bufio.SplitFunc used to tokenize a date format. +func splitDateFormat(data []byte, atEOF bool) (advance int, token []byte, err error) { + if len(data) == 0 { + return 0, nil, nil + } + + const esc = '\'' + + switch { + + case data[0] == esc: + // If we have another quote immediately after then this is a single + // escaped escape. + if len(data) > 1 && data[1] == esc { + return 2, data[:2], nil + } + + // Beginning of quoted sequence, so we will seek forward until we find + // the closing quote, ignoring escaped quotes along the way. + for i := 1; i < len(data); i++ { + if data[i] == esc { + if (i + 1) == len(data) { + // We need at least one more byte to decide if this is an + // escape or a terminator. + return 0, nil, nil + } + if data[i+1] == esc { + i++ // doubled-up quotes are an escape sequence + continue + } + // We've found the closing quote + return i + 1, data[:i+1], nil + } + } + // If we fall out here then we need more bytes to find the end, + // unless we're already at the end with an unclosed quote. + if atEOF { + return len(data), data, nil + } + return 0, nil, nil + + case startsDateFormatVerb(data[0]): + rep := data[0] + for i := 1; i < len(data); i++ { + if data[i] != rep { + return i, data[:i], nil + } + } + if atEOF { + return len(data), data, nil + } + // We need more data to decide if we've found the end + return 0, nil, nil + + default: + for i := 1; i < len(data); i++ { + if data[i] == esc || startsDateFormatVerb(data[i]) { + return i, data[:i], nil + } + } + // We might not actually be at the end of a literal sequence, + // but that doesn't matter since we'll concat them back together + // anyway. + return len(data), data, nil + } +} + +func startsDateFormatVerb(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go new file mode 100644 index 00000000..cfb613e5 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/doc.go @@ -0,0 +1,13 @@ +// Package stdlib is a collection of cty functions that are expected to be +// generally useful, and are thus factored out into this shared library in +// the hope that cty-using applications will have consistent behavior when +// using these functions. +// +// See the parent package "function" for more information on the purpose +// and usage of cty functions. +// +// This package contains both Go functions, which provide convenient access +// to call the functions from Go code, and the Function objects themselves. +// The latter follow the naming scheme of appending "Func" to the end of +// the function name. +package stdlib diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go new file mode 100644 index 00000000..fb24f204 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format.go @@ -0,0 +1,496 @@ +package stdlib + +import ( + "bytes" + "fmt" + "math/big" + "strings" + + "github.com/apparentlymart/go-textseg/textseg" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/json" +) + +//go:generate ragel -Z format_fsm.rl +//go:generate gofmt -w format_fsm.go + +var FormatFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "format", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "args", + Type: cty.DynamicPseudoType, + AllowNull: true, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + for _, arg := range args[1:] { + if !arg.IsWhollyKnown() { + // We require all nested values to be known because the only + // thing we can do for a collection/structural type is print + // it as JSON and that requires it to be wholly known. + return cty.UnknownVal(cty.String), nil + } + } + str, err := formatFSM(args[0].AsString(), args[1:]) + return cty.StringVal(str), err + }, +}) + +var FormatListFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "format", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "args", + Type: cty.DynamicPseudoType, + AllowNull: true, + AllowUnknown: true, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + fmtVal := args[0] + args = args[1:] + + if len(args) == 0 { + // With no arguments, this function is equivalent to Format, but + // returning a single-element list result. + result, err := Format(fmtVal, args...) + return cty.ListVal([]cty.Value{result}), err + } + + fmtStr := fmtVal.AsString() + + // Each of our arguments will be dealt with either as an iterator + // or as a single value. Iterators are used for sequence-type values + // (lists, sets, tuples) while everything else is treated as a + // single value. The sequences we iterate over are required to be + // all the same length. + iterLen := -1 + lenChooser := -1 + iterators := make([]cty.ElementIterator, len(args)) + singleVals := make([]cty.Value, len(args)) + for i, arg := range args { + argTy := arg.Type() + switch { + case (argTy.IsListType() || argTy.IsSetType() || argTy.IsTupleType()) && !arg.IsNull(): + thisLen := arg.LengthInt() + if iterLen == -1 { + iterLen = thisLen + lenChooser = i + } else { + if thisLen != iterLen { + return cty.NullVal(cty.List(cty.String)), function.NewArgErrorf( + i+1, + "argument %d has length %d, which is inconsistent with argument %d of length %d", + i+1, thisLen, + lenChooser+1, iterLen, + ) + } + } + iterators[i] = arg.ElementIterator() + default: + singleVals[i] = arg + } + } + + if iterLen == 0 { + // If our sequences are all empty then our result must be empty. + return cty.ListValEmpty(cty.String), nil + } + + if iterLen == -1 { + // If we didn't encounter any iterables at all then we're going + // to just do one iteration with items from singleVals. + iterLen = 1 + } + + ret := make([]cty.Value, 0, iterLen) + fmtArgs := make([]cty.Value, len(iterators)) + Results: + for iterIdx := 0; iterIdx < iterLen; iterIdx++ { + + // Construct our arguments for a single format call + for i := range fmtArgs { + switch { + case iterators[i] != nil: + iterator := iterators[i] + iterator.Next() + _, val := iterator.Element() + fmtArgs[i] = val + default: + fmtArgs[i] = singleVals[i] + } + + // If any of the arguments to this call would be unknown then + // this particular result is unknown, but we'll keep going + // to see if any other iterations can produce known values. + if !fmtArgs[i].IsWhollyKnown() { + // We require all nested values to be known because the only + // thing we can do for a collection/structural type is print + // it as JSON and that requires it to be wholly known. + ret = append(ret, cty.UnknownVal(cty.String)) + continue Results + } + } + + str, err := formatFSM(fmtStr, fmtArgs) + if err != nil { + return cty.NullVal(cty.List(cty.String)), fmt.Errorf( + "error on format iteration %d: %s", iterIdx, err, + ) + } + + ret = append(ret, cty.StringVal(str)) + } + + return cty.ListVal(ret), nil + }, +}) + +// Format produces a string representation of zero or more values using a +// format string similar to the "printf" function in C. +// +// It supports the following "verbs": +// +// %% Literal percent sign, consuming no value +// %v A default formatting of the value based on type, as described below. +// %#v JSON serialization of the value +// %t Converts to boolean and then produces "true" or "false" +// %b Converts to number, requires integer, produces binary representation +// %d Converts to number, requires integer, produces decimal representation +// %o Converts to number, requires integer, produces octal representation +// %x Converts to number, requires integer, produces hexadecimal representation +// with lowercase letters +// %X Like %x but with uppercase letters +// %e Converts to number, produces scientific notation like -1.234456e+78 +// %E Like %e but with an uppercase "E" representing the exponent +// %f Converts to number, produces decimal representation with fractional +// part but no exponent, like 123.456 +// %g %e for large exponents or %f otherwise +// %G %E for large exponents or %f otherwise +// %s Converts to string and produces the string's characters +// %q Converts to string and produces JSON-quoted string representation, +// like %v. +// +// The default format selections made by %v are: +// +// string %s +// number %g +// bool %t +// other %#v +// +// Null values produce the literal keyword "null" for %v and %#v, and produce +// an error otherwise. +// +// Width is specified by an optional decimal number immediately preceding the +// verb letter. If absent, the width is whatever is necessary to represent the +// value. Precision is specified after the (optional) width by a period +// followed by a decimal number. If no period is present, a default precision +// is used. A period with no following number is invalid. +// For examples: +// +// %f default width, default precision +// %9f width 9, default precision +// %.2f default width, precision 2 +// %9.2f width 9, precision 2 +// +// Width and precision are measured in unicode characters (grapheme clusters). +// +// For most values, width is the minimum number of characters to output, +// padding the formatted form with spaces if necessary. +// +// For strings, precision limits the length of the input to be formatted (not +// the size of the output), truncating if necessary. +// +// For numbers, width sets the minimum width of the field and precision sets +// the number of places after the decimal, if appropriate, except that for +// %g/%G precision sets the total number of significant digits. +// +// The following additional symbols can be used immediately after the percent +// introducer as flags: +// +// (a space) leave a space where the sign would be if number is positive +// + Include a sign for a number even if it is positive (numeric only) +// - Pad with spaces on the left rather than the right +// 0 Pad with zeros rather than spaces. +// +// Flag characters are ignored for verbs that do not support them. +// +// By default, % sequences consume successive arguments starting with the first. +// Introducing a [n] sequence immediately before the verb letter, where n is a +// decimal integer, explicitly chooses a particular value argument by its +// one-based index. Subsequent calls without an explicit index will then +// proceed with n+1, n+2, etc. +// +// An error is produced if the format string calls for an impossible conversion +// or accesses more values than are given. An error is produced also for +// an unsupported format verb. +func Format(format cty.Value, vals ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, 0, len(vals)+1) + args = append(args, format) + args = append(args, vals...) + return FormatFunc.Call(args) +} + +// FormatList applies the same formatting behavior as Format, but accepts +// a mixture of list and non-list values as arguments. Any list arguments +// passed must have the same length, which dictates the length of the +// resulting list. +// +// Any non-list arguments are used repeatedly for each iteration over the +// list arguments. The list arguments are iterated in order by key, so +// corresponding items are formatted together. +func FormatList(format cty.Value, vals ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, 0, len(vals)+1) + args = append(args, format) + args = append(args, vals...) + return FormatListFunc.Call(args) +} + +type formatVerb struct { + Raw string + Offset int + + ArgNum int + Mode rune + + Zero bool + Sharp bool + Plus bool + Minus bool + Space bool + + HasPrec bool + Prec int + + HasWidth bool + Width int +} + +// formatAppend is called by formatFSM (generated by format_fsm.rl) for each +// formatting sequence that is encountered. +func formatAppend(verb *formatVerb, buf *bytes.Buffer, args []cty.Value) error { + argIdx := verb.ArgNum - 1 + if argIdx >= len(args) { + return fmt.Errorf( + "not enough arguments for %q at %d: need index %d but have %d total", + verb.Raw, verb.Offset, + verb.ArgNum, len(args), + ) + } + arg := args[argIdx] + + if verb.Mode != 'v' && arg.IsNull() { + return fmt.Errorf("unsupported value for %q at %d: null value cannot be formatted", verb.Raw, verb.Offset) + } + + // Normalize to make some things easier for downstream formatters + if !verb.HasWidth { + verb.Width = -1 + } + if !verb.HasPrec { + verb.Prec = -1 + } + + // For our first pass we'll ensure the verb is supported and then fan + // out to other functions based on what conversion is needed. + switch verb.Mode { + + case 'v': + return formatAppendAsIs(verb, buf, arg) + + case 't': + return formatAppendBool(verb, buf, arg) + + case 'b', 'd', 'o', 'x', 'X', 'e', 'E', 'f', 'g', 'G': + return formatAppendNumber(verb, buf, arg) + + case 's', 'q': + return formatAppendString(verb, buf, arg) + + default: + return fmt.Errorf("unsupported format verb %q in %q at offset %d", verb.Mode, verb.Raw, verb.Offset) + } +} + +func formatAppendAsIs(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + + if !verb.Sharp && !arg.IsNull() { + // Unless the caller overrode it with the sharp flag, we'll try some + // specialized formats before we fall back on JSON. + switch arg.Type() { + case cty.String: + fmted := arg.AsString() + fmted = formatPadWidth(verb, fmted) + buf.WriteString(fmted) + return nil + case cty.Number: + bf := arg.AsBigFloat() + fmted := bf.Text('g', -1) + fmted = formatPadWidth(verb, fmted) + buf.WriteString(fmted) + return nil + } + } + + jb, err := json.Marshal(arg, arg.Type()) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + fmted := formatPadWidth(verb, string(jb)) + buf.WriteString(fmted) + + return nil +} + +func formatAppendBool(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + var err error + arg, err = convert.Convert(arg, cty.Bool) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + + if arg.True() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + return nil +} + +func formatAppendNumber(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + var err error + arg, err = convert.Convert(arg, cty.Number) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + + switch verb.Mode { + case 'b', 'd', 'o', 'x', 'X': + return formatAppendInteger(verb, buf, arg) + default: + bf := arg.AsBigFloat() + + // For floats our format syntax is a subset of Go's, so it's + // safe for us to just lean on the existing Go implementation. + fmtstr := formatStripIndexSegment(verb.Raw) + fmted := fmt.Sprintf(fmtstr, bf) + buf.WriteString(fmted) + return nil + } +} + +func formatAppendInteger(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + bf := arg.AsBigFloat() + bi, acc := bf.Int(nil) + if acc != big.Exact { + return fmt.Errorf("unsupported value for %q at %d: an integer is required", verb.Raw, verb.Offset) + } + + // For integers our format syntax is a subset of Go's, so it's + // safe for us to just lean on the existing Go implementation. + fmtstr := formatStripIndexSegment(verb.Raw) + fmted := fmt.Sprintf(fmtstr, bi) + buf.WriteString(fmted) + return nil +} + +func formatAppendString(verb *formatVerb, buf *bytes.Buffer, arg cty.Value) error { + var err error + arg, err = convert.Convert(arg, cty.String) + if err != nil { + return fmt.Errorf("unsupported value for %q at %d: %s", verb.Raw, verb.Offset, err) + } + + // We _cannot_ directly use the Go fmt.Sprintf implementation for strings + // because it measures widths and precisions in runes rather than grapheme + // clusters. + + str := arg.AsString() + if verb.Prec > 0 { + strB := []byte(str) + pos := 0 + wanted := verb.Prec + for i := 0; i < wanted; i++ { + next := strB[pos:] + if len(next) == 0 { + // ran out of characters before we hit our max width + break + } + d, _, _ := textseg.ScanGraphemeClusters(strB[pos:], true) + pos += d + } + str = str[:pos] + } + + switch verb.Mode { + case 's': + fmted := formatPadWidth(verb, str) + buf.WriteString(fmted) + case 'q': + jb, err := json.Marshal(cty.StringVal(str), cty.String) + if err != nil { + // Should never happen, since we know this is a known, non-null string + panic(fmt.Errorf("failed to marshal %#v as JSON: %s", arg, err)) + } + fmted := formatPadWidth(verb, string(jb)) + buf.WriteString(fmted) + default: + // Should never happen because formatAppend should've already validated + panic(fmt.Errorf("invalid string formatting mode %q", verb.Mode)) + } + return nil +} + +func formatPadWidth(verb *formatVerb, fmted string) string { + if verb.Width < 0 { + return fmted + } + + // Safe to ignore errors because ScanGraphemeClusters cannot produce errors + givenLen, _ := textseg.TokenCount([]byte(fmted), textseg.ScanGraphemeClusters) + wantLen := verb.Width + if givenLen >= wantLen { + return fmted + } + + padLen := wantLen - givenLen + padChar := " " + if verb.Zero { + padChar = "0" + } + pads := strings.Repeat(padChar, padLen) + + if verb.Minus { + return fmted + pads + } + return pads + fmted +} + +// formatStripIndexSegment strips out any [nnn] segment present in a verb +// string so that we can pass it through to Go's fmt.Sprintf with a single +// argument. This is used in cases where we're just leaning on Go's formatter +// because it's a superset of ours. +func formatStripIndexSegment(rawVerb string) string { + // We assume the string has already been validated here, since we should + // only be using this function with strings that were accepted by our + // scanner in formatFSM. + start := strings.Index(rawVerb, "[") + end := strings.Index(rawVerb, "]") + if start == -1 || end == -1 { + return rawVerb + } + + return rawVerb[:start] + rawVerb[end+1:] +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go new file mode 100644 index 00000000..1dc1f461 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.go @@ -0,0 +1,358 @@ +// line 1 "format_fsm.rl" +// This file is generated from format_fsm.rl. DO NOT EDIT. + +// line 5 "format_fsm.rl" + +package stdlib + +import ( + "bytes" + "fmt" + "unicode/utf8" + + "github.com/zclconf/go-cty/cty" +) + +// line 20 "format_fsm.go" +var _formatfsm_actions []byte = []byte{ + 0, 1, 0, 1, 1, 1, 2, 1, 4, + 1, 5, 1, 6, 1, 7, 1, 8, + 1, 9, 1, 10, 1, 11, 1, 14, + 1, 16, 1, 17, 1, 18, 2, 3, + 4, 2, 12, 10, 2, 12, 16, 2, + 12, 18, 2, 13, 14, 2, 15, 10, + 2, 15, 18, +} + +var _formatfsm_key_offsets []byte = []byte{ + 0, 0, 14, 27, 34, 36, 39, 43, + 51, +} + +var _formatfsm_trans_keys []byte = []byte{ + 32, 35, 37, 43, 45, 46, 48, 91, + 49, 57, 65, 90, 97, 122, 32, 35, + 43, 45, 46, 48, 91, 49, 57, 65, + 90, 97, 122, 91, 48, 57, 65, 90, + 97, 122, 49, 57, 93, 48, 57, 65, + 90, 97, 122, 46, 91, 48, 57, 65, + 90, 97, 122, 37, +} + +var _formatfsm_single_lengths []byte = []byte{ + 0, 8, 7, 1, 0, 1, 0, 2, + 1, +} + +var _formatfsm_range_lengths []byte = []byte{ + 0, 3, 3, 3, 1, 1, 2, 3, + 0, +} + +var _formatfsm_index_offsets []byte = []byte{ + 0, 0, 12, 23, 28, 30, 33, 36, + 42, +} + +var _formatfsm_indicies []byte = []byte{ + 1, 2, 3, 4, 5, 6, 7, 10, + 8, 9, 9, 0, 1, 2, 4, 5, + 6, 7, 10, 8, 9, 9, 0, 13, + 11, 12, 12, 0, 14, 0, 15, 14, + 0, 9, 9, 0, 16, 19, 17, 18, + 18, 0, 20, 3, +} + +var _formatfsm_trans_targs []byte = []byte{ + 0, 2, 2, 8, 2, 2, 3, 2, + 7, 8, 4, 3, 8, 4, 5, 6, + 3, 7, 8, 4, 1, +} + +var _formatfsm_trans_actions []byte = []byte{ + 7, 17, 9, 3, 15, 13, 25, 11, + 43, 29, 19, 27, 49, 46, 21, 0, + 37, 23, 40, 34, 1, +} + +var _formatfsm_eof_actions []byte = []byte{ + 0, 31, 31, 31, 31, 31, 31, 31, + 5, +} + +const formatfsm_start int = 8 +const formatfsm_first_final int = 8 +const formatfsm_error int = 0 + +const formatfsm_en_main int = 8 + +// line 19 "format_fsm.rl" + +func formatFSM(format string, a []cty.Value) (string, error) { + var buf bytes.Buffer + data := format + nextArg := 1 // arg numbers are 1-based + var verb formatVerb + + // line 153 "format_fsm.rl" + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + cs := 0 // current state (will be initialized by ragel-generated code) + ts := 0 + te := 0 + eof := pe + + // Keep Go compiler happy even if generated code doesn't use these + _ = ts + _ = te + _ = eof + + // line 121 "format_fsm.go" + { + cs = formatfsm_start + } + + // line 126 "format_fsm.go" + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } + _resume: + _keys = int(_formatfsm_key_offsets[cs]) + _trans = int(_formatfsm_index_offsets[cs]) + + _klen = int(_formatfsm_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _formatfsm_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _formatfsm_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_formatfsm_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _formatfsm_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _formatfsm_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_formatfsm_indicies[_trans]) + cs = int(_formatfsm_trans_targs[_trans]) + + if _formatfsm_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_formatfsm_trans_actions[_trans]) + _nacts = uint(_formatfsm_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _formatfsm_actions[_acts-1] { + case 0: + // line 29 "format_fsm.rl" + + verb = formatVerb{ + ArgNum: nextArg, + Prec: -1, + Width: -1, + } + ts = p + + case 1: + // line 38 "format_fsm.rl" + + buf.WriteByte(data[p]) + + case 4: + // line 49 "format_fsm.rl" + + // We'll try to slurp a whole UTF-8 sequence here, to give the user + // better feedback. + r, _ := utf8.DecodeRuneInString(data[p:]) + return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p) + + case 5: + // line 56 "format_fsm.rl" + + verb.Sharp = true + + case 6: + // line 59 "format_fsm.rl" + + verb.Zero = true + + case 7: + // line 62 "format_fsm.rl" + + verb.Minus = true + + case 8: + // line 65 "format_fsm.rl" + + verb.Plus = true + + case 9: + // line 68 "format_fsm.rl" + + verb.Space = true + + case 10: + // line 72 "format_fsm.rl" + + verb.ArgNum = 0 + + case 11: + // line 75 "format_fsm.rl" + + verb.ArgNum = (10 * verb.ArgNum) + (int(data[p]) - '0') + + case 12: + // line 79 "format_fsm.rl" + + verb.HasWidth = true + + case 13: + // line 82 "format_fsm.rl" + + verb.Width = 0 + + case 14: + // line 85 "format_fsm.rl" + + verb.Width = (10 * verb.Width) + (int(data[p]) - '0') + + case 15: + // line 89 "format_fsm.rl" + + verb.HasPrec = true + + case 16: + // line 92 "format_fsm.rl" + + verb.Prec = 0 + + case 17: + // line 95 "format_fsm.rl" + + verb.Prec = (10 * verb.Prec) + (int(data[p]) - '0') + + case 18: + // line 99 "format_fsm.rl" + + verb.Mode = rune(data[p]) + te = p + 1 + verb.Raw = data[ts:te] + verb.Offset = ts + + err := formatAppend(&verb, &buf, a) + if err != nil { + return buf.String(), err + } + nextArg = verb.ArgNum + 1 + + // line 324 "format_fsm.go" + } + } + + _again: + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + __acts := _formatfsm_eof_actions[cs] + __nacts := uint(_formatfsm_actions[__acts]) + __acts++ + for ; __nacts > 0; __nacts-- { + __acts++ + switch _formatfsm_actions[__acts-1] { + case 2: + // line 42 "format_fsm.rl" + + case 3: + // line 45 "format_fsm.rl" + + return buf.String(), fmt.Errorf("invalid format string starting at offset %d", p) + + case 4: + // line 49 "format_fsm.rl" + + // We'll try to slurp a whole UTF-8 sequence here, to give the user + // better feedback. + r, _ := utf8.DecodeRuneInString(data[p:]) + return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p) + + // line 363 "format_fsm.go" + } + } + } + + _out: + { + } + } + + // line 171 "format_fsm.rl" + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // flag it anyway rather than just losing data from the end. + if cs < formatfsm_first_final { + return buf.String(), fmt.Errorf("extraneous characters beginning at offset %d", p) + } + + return buf.String(), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl new file mode 100644 index 00000000..dbaa91c6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/format_fsm.rl @@ -0,0 +1,182 @@ +// This file is generated from format_fsm.rl. DO NOT EDIT. +%%{ + # (except you are actually in scan_tokens.rl here, so edit away!) + machine formatfsm; +}%% + +package stdlib + +import ( + "bytes" + "fmt" + "unicode/utf8" + + "github.com/zclconf/go-cty/cty" +) + +%%{ + write data; +}%% + +func formatFSM(format string, a []cty.Value) (string, error) { + var buf bytes.Buffer + data := format + nextArg := 1 // arg numbers are 1-based + var verb formatVerb + + %%{ + + action begin { + verb = formatVerb{ + ArgNum: nextArg, + Prec: -1, + Width: -1, + } + ts = p + } + + action emit { + buf.WriteByte(fc); + } + + action finish_ok { + } + + action finish_err { + return buf.String(), fmt.Errorf("invalid format string starting at offset %d", p) + } + + action err_char { + // We'll try to slurp a whole UTF-8 sequence here, to give the user + // better feedback. + r, _ := utf8.DecodeRuneInString(data[p:]) + return buf.String(), fmt.Errorf("unrecognized format character %q at offset %d", r, p) + } + + action flag_sharp { + verb.Sharp = true + } + action flag_zero { + verb.Zero = true + } + action flag_minus { + verb.Minus = true + } + action flag_plus { + verb.Plus = true + } + action flag_space { + verb.Space = true + } + + action argidx_reset { + verb.ArgNum = 0 + } + action argidx_num { + verb.ArgNum = (10 * verb.ArgNum) + (int(fc) - '0') + } + + action has_width { + verb.HasWidth = true + } + action width_reset { + verb.Width = 0 + } + action width_num { + verb.Width = (10 * verb.Width) + (int(fc) - '0') + } + + action has_prec { + verb.HasPrec = true + } + action prec_reset { + verb.Prec = 0 + } + action prec_num { + verb.Prec = (10 * verb.Prec) + (int(fc) - '0') + } + + action mode { + verb.Mode = rune(fc) + te = p+1 + verb.Raw = data[ts:te] + verb.Offset = ts + + err := formatAppend(&verb, &buf, a) + if err != nil { + return buf.String(), err + } + nextArg = verb.ArgNum + 1 + } + + # a number that isn't zero and doesn't have a leading zero + num = [1-9] [0-9]*; + + flags = ( + '0' @flag_zero | + '#' @flag_sharp | + '-' @flag_minus | + '+' @flag_plus | + ' ' @flag_space + )*; + + argidx = (( + '[' (num $argidx_num) ']' + ) >argidx_reset)?; + + width = ( + ( num $width_num ) >width_reset %has_width + )?; + + precision = ( + ('.' ( digit* $prec_num )) >prec_reset %has_prec + )?; + + # We accept any letter here, but will be more picky in formatAppend + mode = ('a'..'z' | 'A'..'Z') @mode; + + fmt_verb = ( + '%' @begin + flags + width + precision + argidx + mode + ); + + main := ( + [^%] @emit | + '%%' @emit | + fmt_verb + )* @/finish_err %/finish_ok $!err_char; + + }%% + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + cs := 0 // current state (will be initialized by ragel-generated code) + ts := 0 + te := 0 + eof := pe + + // Keep Go compiler happy even if generated code doesn't use these + _ = ts + _ = te + _ = eof + + %%{ + write init; + write exec; + }%% + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // flag it anyway rather than just losing data from the end. + if cs < formatfsm_first_final { + return buf.String(), fmt.Errorf("extraneous characters beginning at offset %d", p) + } + + return buf.String(), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go new file mode 100644 index 00000000..6b31f266 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/general.go @@ -0,0 +1,107 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +var EqualFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + { + Name: "b", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].Equals(args[1]), nil + }, +}) + +var NotEqualFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + { + Name: "b", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].Equals(args[1]).Not(), nil + }, +}) + +var CoalesceFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + argTypes := make([]cty.Type, len(args)) + for i, val := range args { + argTypes[i] = val.Type() + } + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, fmt.Errorf("all arguments must have the same type") + } + return retType, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, argVal := range args { + if !argVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + if argVal.IsNull() { + continue + } + + return convert.Convert(argVal, retType) + } + return cty.NilVal, fmt.Errorf("no non-null arguments") + }, +}) + +// Equal determines whether the two given values are equal, returning a +// bool value. +func Equal(a cty.Value, b cty.Value) (cty.Value, error) { + return EqualFunc.Call([]cty.Value{a, b}) +} + +// NotEqual is the opposite of Equal. +func NotEqual(a cty.Value, b cty.Value) (cty.Value, error) { + return NotEqualFunc.Call([]cty.Value{a, b}) +} + +// Coalesce returns the first of the given arguments that is not null. If +// all arguments are null, an error is produced. +func Coalesce(vals ...cty.Value) (cty.Value, error) { + return CoalesceFunc.Call(vals) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go new file mode 100644 index 00000000..07901c65 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/json.go @@ -0,0 +1,72 @@ +package stdlib + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/json" +) + +var JSONEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "val", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + val := args[0] + if !val.IsWhollyKnown() { + // We can't serialize unknowns, so if the value is unknown or + // contains any _nested_ unknowns then our result must be + // unknown. + return cty.UnknownVal(retType), nil + } + + buf, err := json.Marshal(val, val.Type()) + if err != nil { + return cty.NilVal, err + } + + return cty.StringVal(string(buf)), nil + }, +}) + +var JSONDecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + str := args[0] + if !str.IsKnown() { + return cty.DynamicPseudoType, nil + } + + buf := []byte(str.AsString()) + return json.ImpliedType(buf) + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + buf := []byte(args[0].AsString()) + return json.Unmarshal(buf, retType) + }, +}) + +// JSONEncode returns a JSON serialization of the given value. +func JSONEncode(val cty.Value) (cty.Value, error) { + return JSONEncodeFunc.Call([]cty.Value{val}) +} + +// JSONDecode parses the given JSON string and, if it is valid, returns the +// value it represents. +// +// Note that applying JSONDecode to the result of JSONEncode may not produce +// an identically-typed result, since JSON encoding is lossy for cty Types. +// The resulting value will consist only of primitive types, object types, and +// tuple types. +func JSONDecode(str cty.Value) (cty.Value, error) { + return JSONDecodeFunc.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go new file mode 100644 index 00000000..bd9b2e51 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/number.go @@ -0,0 +1,428 @@ +package stdlib + +import ( + "fmt" + "math/big" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var AbsoluteFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Absolute(), nil + }, +}) + +var AddFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Add can panic if the input values are opposing infinities, + // so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't compute sum of opposing infinities") + } else { + // not a panic we recognize + panic(r) + } + } + }() + return args[0].Add(args[1]), nil + }, +}) + +var SubtractFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Sub can panic if the input values are infinities, + // so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't subtract infinity from itself") + } else { + // not a panic we recognize + panic(r) + } + } + }() + return args[0].Subtract(args[1]), nil + }, +}) + +var MultiplyFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Mul can panic if the input values are both zero or both + // infinity, so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't multiply zero by infinity") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + return args[0].Multiply(args[1]), nil + }, +}) + +var DivideFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Quo can panic if the input values are both zero or both + // infinity, so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't divide zero by zero or infinity by infinity") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + return args[0].Divide(args[1]), nil + }, +}) + +var ModuloFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + // big.Float.Mul can panic if the input values are both zero or both + // infinity, so we must catch that here in order to remain within + // the cty Function abstraction. + defer func() { + if r := recover(); r != nil { + if _, ok := r.(big.ErrNaN); ok { + ret = cty.NilVal + err = fmt.Errorf("can't use modulo with zero and infinity") + } else { + // not a panic we recognize + panic(r) + } + } + }() + + return args[0].Modulo(args[1]), nil + }, +}) + +var GreaterThanFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].GreaterThan(args[1]), nil + }, +}) + +var GreaterThanOrEqualToFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].GreaterThanOrEqualTo(args[1]), nil + }, +}) + +var LessThanFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].LessThan(args[1]), nil + }, +}) + +var LessThanOrEqualToFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].LessThanOrEqualTo(args[1]), nil + }, +}) + +var NegateFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return args[0].Negate(), nil + }, +}) + +var MinFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "numbers", + Type: cty.Number, + AllowDynamicType: true, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if len(args) == 0 { + return cty.NilVal, fmt.Errorf("must pass at least one number") + } + + min := cty.PositiveInfinity + for _, num := range args { + if num.LessThan(min).True() { + min = num + } + } + + return min, nil + }, +}) + +var MaxFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "numbers", + Type: cty.Number, + AllowDynamicType: true, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + if len(args) == 0 { + return cty.NilVal, fmt.Errorf("must pass at least one number") + } + + max := cty.NegativeInfinity + for _, num := range args { + if num.GreaterThan(max).True() { + max = num + } + } + + return max, nil + }, +}) + +var IntFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + bf := args[0].AsBigFloat() + if bf.IsInt() { + return args[0], nil + } + bi, _ := bf.Int(nil) + bf = (&big.Float{}).SetInt(bi) + return cty.NumberVal(bf), nil + }, +}) + +// Absolute returns the magnitude of the given number, without its sign. +// That is, it turns negative values into positive values. +func Absolute(num cty.Value) (cty.Value, error) { + return AbsoluteFunc.Call([]cty.Value{num}) +} + +// Add returns the sum of the two given numbers. +func Add(a cty.Value, b cty.Value) (cty.Value, error) { + return AddFunc.Call([]cty.Value{a, b}) +} + +// Subtract returns the difference between the two given numbers. +func Subtract(a cty.Value, b cty.Value) (cty.Value, error) { + return SubtractFunc.Call([]cty.Value{a, b}) +} + +// Multiply returns the product of the two given numbers. +func Multiply(a cty.Value, b cty.Value) (cty.Value, error) { + return MultiplyFunc.Call([]cty.Value{a, b}) +} + +// Divide returns a divided by b, where both a and b are numbers. +func Divide(a cty.Value, b cty.Value) (cty.Value, error) { + return DivideFunc.Call([]cty.Value{a, b}) +} + +// Negate returns the given number multipled by -1. +func Negate(num cty.Value) (cty.Value, error) { + return NegateFunc.Call([]cty.Value{num}) +} + +// LessThan returns true if a is less than b. +func LessThan(a cty.Value, b cty.Value) (cty.Value, error) { + return LessThanFunc.Call([]cty.Value{a, b}) +} + +// LessThanOrEqualTo returns true if a is less than b. +func LessThanOrEqualTo(a cty.Value, b cty.Value) (cty.Value, error) { + return LessThanOrEqualToFunc.Call([]cty.Value{a, b}) +} + +// GreaterThan returns true if a is less than b. +func GreaterThan(a cty.Value, b cty.Value) (cty.Value, error) { + return GreaterThanFunc.Call([]cty.Value{a, b}) +} + +// GreaterThanOrEqualTo returns true if a is less than b. +func GreaterThanOrEqualTo(a cty.Value, b cty.Value) (cty.Value, error) { + return GreaterThanOrEqualToFunc.Call([]cty.Value{a, b}) +} + +// Modulo returns the remainder of a divided by b under integer division, +// where both a and b are numbers. +func Modulo(a cty.Value, b cty.Value) (cty.Value, error) { + return ModuloFunc.Call([]cty.Value{a, b}) +} + +// Min returns the minimum number from the given numbers. +func Min(numbers ...cty.Value) (cty.Value, error) { + return MinFunc.Call(numbers) +} + +// Max returns the maximum number from the given numbers. +func Max(numbers ...cty.Value) (cty.Value, error) { + return MaxFunc.Call(numbers) +} + +// Int removes the fractional component of the given number returning an +// integer representing the whole number component, rounding towards zero. +// For example, -1.5 becomes -1. +// +// If an infinity is passed to Int, an error is returned. +func Int(num cty.Value) (cty.Value, error) { + if num == cty.PositiveInfinity || num == cty.NegativeInfinity { + return cty.NilVal, fmt.Errorf("can't truncate infinity to an integer") + } + return IntFunc.Call([]cty.Value{num}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go new file mode 100644 index 00000000..e2c77c5d --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/sequence.go @@ -0,0 +1,130 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +var ConcatFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "seqs", + Type: cty.DynamicPseudoType, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) == 0 { + return cty.NilType, fmt.Errorf("at least one argument is required") + } + + if args[0].Type().IsListType() { + // Possibly we're going to return a list, if all of our other + // args are also lists and we can find a common element type. + tys := make([]cty.Type, len(args)) + for i, val := range args { + ty := val.Type() + if !ty.IsListType() { + tys = nil + break + } + tys[i] = ty + } + + if tys != nil { + commonType, _ := convert.UnifyUnsafe(tys) + if commonType != cty.NilType { + return commonType, nil + } + } + } + + etys := make([]cty.Type, 0, len(args)) + for i, val := range args { + ety := val.Type() + switch { + case ety.IsTupleType(): + etys = append(etys, ety.TupleElementTypes()...) + case ety.IsListType(): + if !val.IsKnown() { + // We need to know the list to count its elements to + // build our tuple type, so any concat of an unknown + // list can't be typed yet. + return cty.DynamicPseudoType, nil + } + + l := val.LengthInt() + subEty := ety.ElementType() + for j := 0; j < l; j++ { + etys = append(etys, subEty) + } + default: + return cty.NilType, function.NewArgErrorf( + i, "all arguments must be lists or tuples; got %s", + ety.FriendlyName(), + ) + } + } + return cty.Tuple(etys), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + switch { + case retType.IsListType(): + // If retType is a list type then we know that all of the + // given values will be lists and that they will either be of + // retType or of something we can convert to retType. + vals := make([]cty.Value, 0, len(args)) + for i, list := range args { + list, err = convert.Convert(list, retType) + if err != nil { + // Conversion might fail because we used UnifyUnsafe + // to choose our return type. + return cty.NilVal, function.NewArgError(i, err) + } + + it := list.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, v) + } + } + if len(vals) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + + return cty.ListVal(vals), nil + case retType.IsTupleType(): + // If retType is a tuple type then we could have a mixture of + // lists and tuples but we know they all have known values + // (because our params don't AllowUnknown) and we know that + // concatenating them all together will produce a tuple of + // retType because of the work we did in the Type function above. + vals := make([]cty.Value, 0, len(args)) + + for _, seq := range args { + // Both lists and tuples support ElementIterator, so this is easy. + it := seq.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, v) + } + } + + return cty.TupleVal(vals), nil + default: + // should never happen if Type is working correctly above + panic("unsupported return type") + } + }, +}) + +// Concat takes one or more sequences (lists or tuples) and returns the single +// sequence that results from concatenating them together in order. +// +// If all of the given sequences are lists of the same element type then the +// result is a list of that type. Otherwise, the result is a of a tuple type +// constructed from the given sequence types. +func Concat(seqs ...cty.Value) (cty.Value, error) { + return ConcatFunc.Call(seqs) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go new file mode 100644 index 00000000..100078fd --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/set.go @@ -0,0 +1,195 @@ +package stdlib + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty/convert" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +var SetHasElementFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + { + Name: "elem", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return args[0].HasElement(args[1]), nil + }, +}) + +var SetUnionFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "first_set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + VarParam: &function.Parameter{ + Name: "other_sets", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Union(s2) + }), +}) + +var SetIntersectionFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "first_set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + VarParam: &function.Parameter{ + Name: "other_sets", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Intersection(s2) + }), +}) + +var SetSubtractFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "a", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + { + Name: "b", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Subtract(s2) + }), +}) + +var SetSymmetricDifferenceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "first_set", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + }, + VarParam: &function.Parameter{ + Name: "other_sets", + Type: cty.Set(cty.DynamicPseudoType), + AllowDynamicType: true, + }, + Type: setOperationReturnType, + Impl: setOperationImpl(func(s1, s2 cty.ValueSet) cty.ValueSet { + return s1.Subtract(s2) + }), +}) + +// SetHasElement determines whether the given set contains the given value as an +// element. +func SetHasElement(set cty.Value, elem cty.Value) (cty.Value, error) { + return SetHasElementFunc.Call([]cty.Value{set, elem}) +} + +// SetUnion returns a new set containing all of the elements from the given +// sets, which must have element types that can all be converted to some +// common type using the standard type unification rules. If conversion +// is not possible, an error is returned. +// +// The union operation is performed after type conversion, which may result +// in some previously-distinct values being conflated. +// +// At least one set must be provided. +func SetUnion(sets ...cty.Value) (cty.Value, error) { + return SetUnionFunc.Call(sets) +} + +// Intersection returns a new set containing the elements that exist +// in all of the given sets, which must have element types that can all be +// converted to some common type using the standard type unification rules. +// If conversion is not possible, an error is returned. +// +// The intersection operation is performed after type conversion, which may +// result in some previously-distinct values being conflated. +// +// At least one set must be provided. +func SetIntersection(sets ...cty.Value) (cty.Value, error) { + return SetIntersectionFunc.Call(sets) +} + +// SetSubtract returns a new set containing the elements from the +// first set that are not present in the second set. The sets must have +// element types that can both be converted to some common type using the +// standard type unification rules. If conversion is not possible, an error +// is returned. +// +// The subtract operation is performed after type conversion, which may +// result in some previously-distinct values being conflated. +func SetSubtract(a, b cty.Value) (cty.Value, error) { + return SetSubtractFunc.Call([]cty.Value{a, b}) +} + +// SetSymmetricDifference returns a new set containing elements that appear +// in any of the given sets but not multiple. The sets must have +// element types that can all be converted to some common type using the +// standard type unification rules. If conversion is not possible, an error +// is returned. +// +// The difference operation is performed after type conversion, which may +// result in some previously-distinct values being conflated. +func SetSymmetricDifference(sets ...cty.Value) (cty.Value, error) { + return SetSymmetricDifferenceFunc.Call(sets) +} + +func setOperationReturnType(args []cty.Value) (ret cty.Type, err error) { + var etys []cty.Type + for _, arg := range args { + etys = append(etys, arg.Type().ElementType()) + } + newEty, _ := convert.UnifyUnsafe(etys) + if newEty == cty.NilType { + return cty.NilType, fmt.Errorf("given sets must all have compatible element types") + } + return cty.Set(newEty), nil +} + +func setOperationImpl(f func(s1, s2 cty.ValueSet) cty.ValueSet) function.ImplFunc { + return func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + first := args[0] + first, err = convert.Convert(first, retType) + if err != nil { + return cty.NilVal, function.NewArgError(0, err) + } + + set := first.AsValueSet() + for i, arg := range args[1:] { + arg, err := convert.Convert(arg, retType) + if err != nil { + return cty.NilVal, function.NewArgError(i+1, err) + } + + argSet := arg.AsValueSet() + set = f(set, argSet) + } + return cty.SetValFromValueSet(set), nil + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go new file mode 100644 index 00000000..d7c89fa8 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/stdlib/string.go @@ -0,0 +1,234 @@ +package stdlib + +import ( + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" + "github.com/apparentlymart/go-textseg/textseg" +) + +var UpperFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + out := strings.ToUpper(in) + return cty.StringVal(out), nil + }, +}) + +var LowerFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + out := strings.ToLower(in) + return cty.StringVal(out), nil + }, +}) + +var ReverseFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := []byte(args[0].AsString()) + out := make([]byte, len(in)) + pos := len(out) + + inB := []byte(in) + for i := 0; i < len(in); { + d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true) + cluster := in[i : i+d] + pos -= len(cluster) + copy(out[pos:], cluster) + i += d + } + + return cty.StringVal(string(out)), nil + }, +}) + +var StrlenFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + l := 0 + + inB := []byte(in) + for i := 0; i < len(in); { + d, _, _ := textseg.ScanGraphemeClusters(inB[i:], true) + l++ + i += d + } + + return cty.NumberIntVal(int64(l)), nil + }, +}) + +var SubstrFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + AllowDynamicType: true, + }, + { + Name: "offset", + Type: cty.Number, + AllowDynamicType: true, + }, + { + Name: "length", + Type: cty.Number, + AllowDynamicType: true, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := []byte(args[0].AsString()) + var offset, length int + + var err error + err = gocty.FromCtyValue(args[1], &offset) + if err != nil { + return cty.NilVal, err + } + err = gocty.FromCtyValue(args[2], &length) + if err != nil { + return cty.NilVal, err + } + + if offset < 0 { + totalLenNum, err := Strlen(args[0]) + if err != nil { + // should never happen + panic("Stdlen returned an error") + } + + var totalLen int + err = gocty.FromCtyValue(totalLenNum, &totalLen) + if err != nil { + // should never happen + panic("Stdlen returned a non-int number") + } + + offset += totalLen + } + + sub := in + pos := 0 + var i int + + // First we'll seek forward to our offset + if offset > 0 { + for i = 0; i < len(sub); { + d, _, _ := textseg.ScanGraphemeClusters(sub[i:], true) + i += d + pos++ + if pos == offset { + break + } + if i >= len(in) { + return cty.StringVal(""), nil + } + } + + sub = sub[i:] + } + + if length < 0 { + // Taking the remainder of the string is a fast path since + // we can just return the rest of the buffer verbatim. + return cty.StringVal(string(sub)), nil + } + + // Otherwise we need to start seeking forward again until we + // reach the length we want. + pos = 0 + for i = 0; i < len(sub); { + d, _, _ := textseg.ScanGraphemeClusters(sub[i:], true) + i += d + pos++ + if pos == length { + break + } + } + + sub = sub[:i] + + return cty.StringVal(string(sub)), nil + }, +}) + +// Upper is a Function that converts a given string to uppercase. +func Upper(str cty.Value) (cty.Value, error) { + return UpperFunc.Call([]cty.Value{str}) +} + +// Lower is a Function that converts a given string to lowercase. +func Lower(str cty.Value) (cty.Value, error) { + return LowerFunc.Call([]cty.Value{str}) +} + +// Reverse is a Function that reverses the order of the characters in the +// given string. +// +// As usual, "character" for the sake of this function is a grapheme cluster, +// so combining diacritics (for example) will be considered together as a +// single character. +func Reverse(str cty.Value) (cty.Value, error) { + return ReverseFunc.Call([]cty.Value{str}) +} + +// Strlen is a Function that returns the length of the given string in +// characters. +// +// As usual, "character" for the sake of this function is a grapheme cluster, +// so combining diacritics (for example) will be considered together as a +// single character. +func Strlen(str cty.Value) (cty.Value, error) { + return StrlenFunc.Call([]cty.Value{str}) +} + +// Substr is a Function that extracts a sequence of characters from another +// string and creates a new string. +// +// As usual, "character" for the sake of this function is a grapheme cluster, +// so combining diacritics (for example) will be considered together as a +// single character. +// +// The "offset" index may be negative, in which case it is relative to the +// end of the given string. +// +// The "length" may be -1, in which case the remainder of the string after +// the given offset will be returned. +func Substr(str cty.Value, offset cty.Value, length cty.Value) (cty.Value, error) { + return SubstrFunc.Call([]cty.Value{str, offset, length}) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go b/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go new file mode 100644 index 00000000..3495550a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/function/unpredictable.go @@ -0,0 +1,31 @@ +package function + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Unpredictable wraps a given function such that it retains the same arguments +// and type checking behavior but will return an unknown value when called. +// +// It is recommended that most functions be "pure", which is to say that they +// will always produce the same value given particular input. However, +// sometimes it is necessary to offer functions whose behavior depends on +// some external state, such as reading a file or determining the current time. +// In such cases, an unpredictable wrapper might be used to stand in for +// the function during some sort of prior "checking" phase in order to delay +// the actual effect until later. +// +// While Unpredictable can support a function that isn't pure in its +// implementation, it still expects a function to be pure in its type checking +// behavior, except for the special case of returning cty.DynamicPseudoType +// if it is not yet able to predict its return value based on current argument +// information. +func Unpredictable(f Function) Function { + newSpec := *f.spec // shallow copy + newSpec.Impl = unpredictableImpl + return New(&newSpec) +} + +func unpredictableImpl(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.UnknownVal(retType), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gob.go b/vendor/github.com/zclconf/go-cty/cty/gob.go new file mode 100644 index 00000000..a77dace2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gob.go @@ -0,0 +1,125 @@ +package cty + +import ( + "bytes" + "encoding/gob" + "fmt" + "math/big" +) + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Values to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent values of capsule types in gob, +// because the types themselves cannot be represented. +func (val Value) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gv := gobValue{ + Version: 0, + Ty: val.ty, + V: val.v, + } + + err := enc.Encode(gv) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Value: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementation of the gob.GobDecoder interface, which +// inverts the operation performed by GobEncode. See the documentation of +// GobEncode for considerations when using cty.Value instances with gob. +func (val *Value) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gv gobValue + err := dec.Decode(&gv) + if err != nil { + return fmt.Errorf("error decoding cty.Value: %s", err) + } + if gv.Version != 0 { + return fmt.Errorf("unsupported cty.Value encoding version %d; only 0 is supported", gv.Version) + } + + // big.Float seems to, for some reason, lose its "pointerness" when we + // round-trip it, so we'll fix that here. + if bf, ok := gv.V.(big.Float); ok { + gv.V = &bf + } + + val.ty = gv.Ty + val.v = gv.V + + return nil +} + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Types to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent capsule types in gob. +func (t Type) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gt := gobType{ + Version: 0, + Impl: t.typeImpl, + } + + err := enc.Encode(gt) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Type: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementatino of the gob.GobDecoder interface, which +// reverses the encoding performed by GobEncode to allow types to be recovered +// from gob buffers. +func (t *Type) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gt gobType + err := dec.Decode(>) + if err != nil { + return fmt.Errorf("error decoding cty.Type: %s", err) + } + if gt.Version != 0 { + return fmt.Errorf("unsupported cty.Type encoding version %d; only 0 is supported", gt.Version) + } + + t.typeImpl = gt.Impl + + return nil +} + +// Capsule types cannot currently be gob-encoded, because they rely on pointer +// equality and we have no way to recover the original pointer on decode. +func (t *capsuleType) GobEncode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +func (t *capsuleType) GobDecode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +type gobValue struct { + Version int + Ty Type + V interface{} +} + +type gobType struct { + Version int + Impl typeImpl +} + +type gobCapsuleTypeImpl struct { +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go b/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go new file mode 100644 index 00000000..a5177d22 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/doc.go @@ -0,0 +1,7 @@ +// Package gocty deals with converting between cty Values and native go +// values. +// +// It operates under a similar principle to the encoding/json and +// encoding/xml packages in the standard library, using reflection to +// populate native Go data structures from cty values and vice-versa. +package gocty diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go b/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go new file mode 100644 index 00000000..94ffd2fb --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/helpers.go @@ -0,0 +1,43 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/set" +) + +var valueType = reflect.TypeOf(cty.Value{}) +var typeType = reflect.TypeOf(cty.Type{}) + +var setType = reflect.TypeOf(set.Set{}) + +var bigFloatType = reflect.TypeOf(big.Float{}) +var bigIntType = reflect.TypeOf(big.Int{}) + +var emptyInterfaceType = reflect.TypeOf(interface{}(nil)) + +var stringType = reflect.TypeOf("") + +// structTagIndices interrogates the fields of the given type (which must +// be a struct type, or we'll panic) and returns a map from the cty +// attribute names declared via struct tags to the indices of the +// fields holding those tags. +// +// This function will panic if two fields within the struct are tagged with +// the same cty attribute name. +func structTagIndices(st reflect.Type) map[string]int { + ct := st.NumField() + ret := make(map[string]int, ct) + + for i := 0; i < ct; i++ { + field := st.Field(i) + attrName := field.Tag.Get("cty") + if attrName != "" { + ret[attrName] = i + } + } + + return ret +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/in.go b/vendor/github.com/zclconf/go-cty/cty/gocty/in.go new file mode 100644 index 00000000..642501b2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/in.go @@ -0,0 +1,528 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/set" +) + +// ToCtyValue produces a cty.Value from a Go value. The result will conform +// to the given type, or an error will be returned if this is not possible. +// +// The target type serves as a hint to resolve ambiguities in the mapping. +// For example, the Go type set.Set tells us that the value is a set but +// does not describe the set's element type. This also allows for convenient +// conversions, such as populating a set from a slice rather than having to +// first explicitly instantiate a set.Set. +// +// The audience of this function is assumed to be the developers of Go code +// that is integrating with cty, and thus the error messages it returns are +// presented from Go's perspective. These messages are thus not appropriate +// for display to end-users. An error returned from ToCtyValue represents a +// bug in the calling program, not user error. +func ToCtyValue(val interface{}, ty cty.Type) (cty.Value, error) { + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time toCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given Type is. + path := make(cty.Path, 0) + return toCtyValue(reflect.ValueOf(val), ty, path) +} + +func toCtyValue(val reflect.Value, ty cty.Type, path cty.Path) (cty.Value, error) { + + switch ty { + case cty.Bool: + return toCtyBool(val, path) + case cty.Number: + return toCtyNumber(val, path) + case cty.String: + return toCtyString(val, path) + case cty.DynamicPseudoType: + return toCtyDynamic(val, path) + } + + switch { + case ty.IsListType(): + return toCtyList(val, ty.ElementType(), path) + case ty.IsMapType(): + return toCtyMap(val, ty.ElementType(), path) + case ty.IsSetType(): + return toCtySet(val, ty.ElementType(), path) + case ty.IsObjectType(): + return toCtyObject(val, ty.AttributeTypes(), path) + case ty.IsTupleType(): + return toCtyTuple(val, ty.TupleElementTypes(), path) + case ty.IsCapsuleType(): + return toCtyCapsule(val, ty, path) + } + + // We should never fall out here + return cty.NilVal, path.NewErrorf("unsupported target type %#v", ty) +} + +func toCtyBool(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Bool), nil + } + + switch val.Kind() { + + case reflect.Bool: + return cty.BoolVal(val.Bool()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to bool", val.Kind()) + + } + +} + +func toCtyNumber(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Number), nil + } + + switch val.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.NumberIntVal(val.Int()), nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.NumberUIntVal(val.Uint()), nil + + case reflect.Float32, reflect.Float64: + return cty.NumberFloatVal(val.Float()), nil + + case reflect.Struct: + if val.Type().AssignableTo(bigIntType) { + bigInt := val.Interface().(big.Int) + bigFloat := (&big.Float{}).SetInt(&bigInt) + val = reflect.ValueOf(*bigFloat) + } + + if val.Type().AssignableTo(bigFloatType) { + bigFloat := val.Interface().(big.Float) + return cty.NumberVal(&bigFloat), nil + } + + fallthrough + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to number", val.Kind()) + + } + +} + +func toCtyString(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.String), nil + } + + switch val.Kind() { + + case reflect.String: + return cty.StringVal(val.String()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to string", val.Kind()) + + } + +} + +func toCtyList(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.List(ety)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.List(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.ListValEmpty(ety), nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, val.Len()) + for i := range vals { + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ListVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.List(ety)) + + } +} + +func toCtyMap(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Map(ety)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Map(ety)), nil + } + + if val.Len() == 0 { + return cty.MapValEmpty(ety), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make(map[string]cty.Value, val.Len()) + for _, kv := range val.MapKeys() { + k := kv.String() + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.MapVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Map(ety)) + + } +} + +func toCtySet(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Set(ety)), nil + } + + var vals []cty.Value + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Set(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, val.Len()) + for i := range vals { + var err error + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + case reflect.Struct: + + if !val.Type().AssignableTo(setType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Type(), cty.Set(ety)) + } + + rawSet := val.Interface().(set.Set) + inVals := rawSet.Values() + + if len(inVals) == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, len(inVals)) + for i := range inVals { + var err error + vals[i], err = toCtyValue(reflect.ValueOf(inVals[i]), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Set(ety)) + + } + + return cty.SetVal(vals), nil +} + +func toCtyObject(val reflect.Value, attrTypes map[string]cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + haveKeys := make(map[string]struct{}, val.Len()) + for _, kv := range val.MapKeys() { + haveKeys[kv.String()] = struct{}{} + } + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + var err error + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if _, have := haveKeys[k]; !have { + vals[k] = cty.NullVal(at) + continue + } + + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), at, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + case reflect.Struct: + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + attrFields := structTagIndices(val.Type()) + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if fieldIdx, have := attrFields[k]; have { + var err error + vals[k], err = toCtyValue(val.Field(fieldIdx), at, path) + if err != nil { + return cty.NilVal, err + } + } else { + vals[k] = cty.NullVal(at) + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Object(attrTypes)) + + } +} + +func toCtyTuple(val reflect.Value, elemTypes []cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + if val.Len() != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of elements %d; need %d", val.Len(), len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + case reflect.Struct: + fieldCount := val.Type().NumField() + if fieldCount != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of struct fields %d; need %d", fieldCount, len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Field(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Tuple(elemTypes)) + + } +} + +func toCtyCapsule(val reflect.Value, capsuleType cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(capsuleType), nil + } + + if val.Kind() != reflect.Ptr { + if !val.CanAddr() { + return cty.NilVal, path.NewErrorf("source value for capsule %#v must be addressable", capsuleType) + } + + val = val.Addr() + } + + if !val.Type().Elem().AssignableTo(capsuleType.EncapsulatedType()) { + return cty.NilVal, path.NewErrorf("value of type %T not compatible with capsule %#v", val.Interface(), capsuleType) + } + + return cty.CapsuleVal(capsuleType, val.Interface()), nil +} + +func toCtyDynamic(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.DynamicPseudoType), nil + } + + switch val.Kind() { + + case reflect.Struct: + if !val.Type().AssignableTo(valueType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Type()) + } + + return val.Interface().(cty.Value), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Kind()) + + } + +} + +// toCtyUnwrapPointer is a helper for dealing with Go pointers. It has three +// possible outcomes: +// +// - Given value isn't a pointer, so it's just returned as-is. +// - Given value is a non-nil pointer, in which case it is dereferenced +// and the result returned. +// - Given value is a nil pointer, in which case an invalid value is returned. +// +// For nested pointer types, like **int, they are all dereferenced in turn +// until a non-pointer value is found, or until a nil pointer is encountered. +func toCtyUnwrapPointer(val reflect.Value) reflect.Value { + for val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface { + if val.IsNil() { + return reflect.Value{} + } + + val = val.Elem() + } + + return val +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/out.go b/vendor/github.com/zclconf/go-cty/cty/gocty/out.go new file mode 100644 index 00000000..99b65a76 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/out.go @@ -0,0 +1,705 @@ +package gocty + +import ( + "math/big" + "reflect" + + "math" + + "github.com/zclconf/go-cty/cty" +) + +// FromCtyValue assigns a cty.Value to a reflect.Value, which must be a pointer, +// using a fixed set of conversion rules. +// +// This function considers its audience to be the creator of the cty Value +// given, and thus the error messages it generates are (unlike with ToCtyValue) +// presented in cty terminology that is generally appropriate to return to +// end-users in applications where cty data structures are built from +// user-provided configuration. In particular this means that if incorrect +// target types are provided by the calling application the resulting error +// messages are likely to be confusing, since we assume that the given target +// type is correct and the cty.Value is where the error lies. +// +// If an error is returned, the target data structure may have been partially +// populated, but the degree to which this is true is an implementation +// detail that the calling application should not rely on. +// +// The function will panic if given a non-pointer as the Go value target, +// since that is considered to be a bug in the calling program. +func FromCtyValue(val cty.Value, target interface{}) error { + tVal := reflect.ValueOf(target) + if tVal.Kind() != reflect.Ptr { + panic("target value is not a pointer") + } + if tVal.IsNil() { + panic("target value is nil pointer") + } + + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time fromCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given cty.Value is. + path := make(cty.Path, 0) + return fromCtyValue(val, tVal, path) +} + +func fromCtyValue(val cty.Value, target reflect.Value, path cty.Path) error { + ty := val.Type() + + deepTarget := fromCtyPopulatePtr(target, false) + + // If we're decoding into a cty.Value then we just pass through the + // value as-is, to enable partial decoding. This is the only situation + // where unknown values are permitted. + if deepTarget.Kind() == reflect.Struct && deepTarget.Type().AssignableTo(valueType) { + deepTarget.Set(reflect.ValueOf(val)) + return nil + } + + // Lists and maps can be nil without indirection, but everything else + // requires a pointer and we set it immediately to nil. + // We also make an exception for capsule types because we want to handle + // pointers specially for these. + // (fromCtyList and fromCtyMap must therefore deal with val.IsNull, while + // other types can assume no nulls after this point.) + if val.IsNull() && !val.Type().IsListType() && !val.Type().IsMapType() && !val.Type().IsCapsuleType() { + target = fromCtyPopulatePtr(target, true) + if target.Kind() != reflect.Ptr { + return path.NewErrorf("null value is not allowed") + } + + target.Set(reflect.Zero(target.Type())) + return nil + } + + target = deepTarget + + if !val.IsKnown() { + return path.NewErrorf("value must be known") + } + + switch ty { + case cty.Bool: + return fromCtyBool(val, target, path) + case cty.Number: + return fromCtyNumber(val, target, path) + case cty.String: + return fromCtyString(val, target, path) + } + + switch { + case ty.IsListType(): + return fromCtyList(val, target, path) + case ty.IsMapType(): + return fromCtyMap(val, target, path) + case ty.IsSetType(): + return fromCtySet(val, target, path) + case ty.IsObjectType(): + return fromCtyObject(val, target, path) + case ty.IsTupleType(): + return fromCtyTuple(val, target, path) + case ty.IsCapsuleType(): + return fromCtyCapsule(val, target, path) + } + + // We should never fall out here; reaching here indicates a bug in this + // function. + return path.NewErrorf("unsupported source type %#v", ty) +} + +func fromCtyBool(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Bool: + if val.True() { + target.Set(reflect.ValueOf(true)) + } else { + target.Set(reflect.ValueOf(false)) + } + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumber(val cty.Value, target reflect.Value, path cty.Path) error { + bf := val.AsBigFloat() + + switch target.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fromCtyNumberInt(bf, target, path) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fromCtyNumberUInt(bf, target, path) + + case reflect.Float32, reflect.Float64: + return fromCtyNumberFloat(bf, target, path) + + case reflect.Struct: + return fromCtyNumberBig(bf, target, path) + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumberInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var min int64 + var max int64 + switch target.Type().Bits() { + case 8: + min = math.MinInt8 + max = math.MaxInt8 + case 16: + min = math.MinInt16 + max = math.MaxInt16 + case 32: + min = math.MinInt32 + max = math.MaxInt32 + case 64: + min = math.MinInt64 + max = math.MaxInt64 + default: + panic("weird number of bits in target int") + } + + iv, accuracy := bf.Int64() + if accuracy != big.Exact || iv < min || iv > max { + return path.NewErrorf("value must be a whole number, between %d and %d", min, max) + } + + target.Set(reflect.ValueOf(iv).Convert(target.Type())) + + return nil +} + +func fromCtyNumberUInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var max uint64 + switch target.Type().Bits() { + case 8: + max = math.MaxUint8 + case 16: + max = math.MaxUint16 + case 32: + max = math.MaxUint32 + case 64: + max = math.MaxUint64 + default: + panic("weird number of bits in target uint") + } + + iv, accuracy := bf.Uint64() + if accuracy != big.Exact || iv > max { + return path.NewErrorf("value must be a whole number, between 0 and %d inclusive", max) + } + + target.Set(reflect.ValueOf(iv).Convert(target.Type())) + + return nil +} + +func fromCtyNumberFloat(bf *big.Float, target reflect.Value, path cty.Path) error { + switch target.Kind() { + case reflect.Float32: + fv, accuracy := bf.Float32() + if accuracy != big.Exact { + // We allow the precision to be truncated as part of our conversion, + // but we don't want to silently introduce infinities. + if math.IsInf(float64(fv), 0) { + return path.NewErrorf("value must be between %f and %f inclusive", -math.MaxFloat32, math.MaxFloat32) + } + } + target.Set(reflect.ValueOf(fv)) + return nil + case reflect.Float64: + fv, accuracy := bf.Float64() + if accuracy != big.Exact { + // We allow the precision to be truncated as part of our conversion, + // but we don't want to silently introduce infinities. + if math.IsInf(fv, 0) { + return path.NewErrorf("value must be between %f and %f inclusive", -math.MaxFloat64, math.MaxFloat64) + } + } + target.Set(reflect.ValueOf(fv)) + return nil + default: + panic("unsupported kind of float") + } +} + +func fromCtyNumberBig(bf *big.Float, target reflect.Value, path cty.Path) error { + switch { + + case bigFloatType.AssignableTo(target.Type()): + // Easy! + target.Set(reflect.ValueOf(bf).Elem()) + return nil + + case bigIntType.AssignableTo(target.Type()): + bi, accuracy := bf.Int(nil) + if accuracy != big.Exact { + return path.NewErrorf("value must be a whole number") + } + target.Set(reflect.ValueOf(bi).Elem()) + return nil + + default: + return likelyRequiredTypesError(path, target) + } +} + +func fromCtyString(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.String: + target.Set(reflect.ValueOf(val.AsString())) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyList(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a list of length %d", target.Len()) + } + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyMap(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Map: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + tv := reflect.MakeMap(target.Type()) + et := target.Type().Elem() + + path = append(path, nil) + + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: key, + } + + ks := key.AsString() + + targetElem := reflect.New(et) + err = fromCtyValue(val, targetElem, path) + + tv.SetMapIndex(reflect.ValueOf(ks), targetElem.Elem()) + + return err != nil + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtySet(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a set of length %d", target.Len()) + } + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + return nil + + // TODO: decode into set.Set instance + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyObject(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + attrTypes := val.Type().AttributeTypes() + targetFields := structTagIndices(target.Type()) + + path = append(path, nil) + + for k, i := range targetFields { + if _, exists := attrTypes[k]; !exists { + // If the field in question isn't able to represent nil, + // that's an error. + fk := target.Field(i).Kind() + switch fk { + case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface: + // okay + default: + return path.NewErrorf("missing required attribute %q", k) + } + } + } + + for k := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + fieldIdx, exists := targetFields[k] + if !exists { + return path.NewErrorf("unsupported attribute %q", k) + } + + ev := val.GetAttr(k) + + targetField := target.Field(fieldIdx) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyTuple(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + elemTypes := val.Type().TupleElementTypes() + fieldCount := target.Type().NumField() + + if fieldCount != len(elemTypes) { + return path.NewErrorf("a tuple of %d elements is required", fieldCount) + } + + path = append(path, nil) + + for i := range elemTypes { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + ev := val.Index(cty.NumberIntVal(int64(i))) + + targetField := target.Field(i) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyCapsule(val cty.Value, target reflect.Value, path cty.Path) error { + + if target.Kind() == reflect.Ptr { + // Walk through indirection until we get to the last pointer, + // which we might set to null below. + target = fromCtyPopulatePtr(target, true) + + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + // Since a capsule contains a pointer to an object, we'll preserve + // that pointer on the way out and thus allow the caller to recover + // the original object, rather than a copy of it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Elem().Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + target.Set(reflect.ValueOf(val.EncapsulatedValue())) + + return nil + } else { + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + // If our target isn't a pointer then we will attempt to copy + // the encapsulated value into it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + // We know that EncapsulatedValue is always a pointer, so we + // can safely call .Elem on its reflect.Value. + target.Set(reflect.ValueOf(val.EncapsulatedValue()).Elem()) + + return nil + } + +} + +// fromCtyPopulatePtr recognizes when target is a pointer type and allocates +// a value to assign to that pointer, which it returns. +// +// If the given value has multiple levels of indirection, like **int, these +// will be processed in turn so that the return value is guaranteed to be +// a non-pointer. +// +// As an exception, if decodingNull is true then the returned value will be +// the final level of pointer, if any, so that the caller can assign it +// as nil to represent a null value. If the given target value is not a pointer +// at all then the returned value will be just the given target, so the caller +// must test if the returned value is a pointer before trying to assign nil +// to it. +func fromCtyPopulatePtr(target reflect.Value, decodingNull bool) reflect.Value { + for { + if target.Kind() == reflect.Interface && !target.IsNil() { + e := target.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + target = e + } + } + + if target.Kind() != reflect.Ptr { + break + } + + // Stop early if we're decodingNull and we've found our last indirection + if target.Elem().Kind() != reflect.Ptr && decodingNull && target.CanSet() { + break + } + + if target.IsNil() { + target.Set(reflect.New(target.Type().Elem())) + } + + target = target.Elem() + } + return target +} + +// likelyRequiredTypesError returns an error that states which types are +// acceptable by making some assumptions about what types we support for +// each target Go kind. It's not a precise science but it allows us to return +// an error message that is cty-user-oriented rather than Go-oriented. +// +// Generally these error messages should be a matter of last resort, since +// the calling application should be validating user-provided value types +// before decoding anyway. +func likelyRequiredTypesError(path cty.Path, target reflect.Value) error { + switch target.Kind() { + + case reflect.Bool: + return path.NewErrorf("bool value is required") + + case reflect.String: + return path.NewErrorf("string value is required") + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fallthrough + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + fallthrough + case reflect.Float32, reflect.Float64: + return path.NewErrorf("number value is required") + + case reflect.Slice, reflect.Array: + return path.NewErrorf("list or set value is required") + + case reflect.Map: + return path.NewErrorf("map or object value is required") + + case reflect.Struct: + switch { + + case target.Type().AssignableTo(bigFloatType) || target.Type().AssignableTo(bigIntType): + return path.NewErrorf("number value is required") + + case target.Type().AssignableTo(setType): + return path.NewErrorf("set or list value is required") + + default: + return path.NewErrorf("object or tuple value is required") + + } + + default: + // We should avoid getting into this path, since this error + // message is rather useless. + return path.NewErrorf("incorrect type") + + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go new file mode 100644 index 00000000..ce4c8f1e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/gocty/type_implied.go @@ -0,0 +1,108 @@ +package gocty + +import ( + "reflect" + + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts +// to find a suitable cty.Type instance that could be used for a conversion +// with ToCtyValue. +// +// This allows -- for simple situations at least -- types to be defined just +// once in Go and the cty types derived from the Go types, but in the process +// it makes some assumptions that may be undesirable so applications are +// encouraged to build their cty types directly if exacting control is +// required. +// +// Not all Go types can be represented as cty types, so an error may be +// returned which is usually considered to be a bug in the calling program. +// In particular, ImpliedType will never use capsule types in its returned +// type, because it cannot know the capsule types supported by the calling +// program. +func ImpliedType(gv interface{}) (cty.Type, error) { + rt := reflect.TypeOf(gv) + var path cty.Path + return impliedType(rt, path) +} + +func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) { + switch rt.Kind() { + + case reflect.Ptr: + return impliedType(rt.Elem(), path) + + // Primitive types + case reflect.Bool: + return cty.Bool, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.Number, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.Number, nil + case reflect.Float32, reflect.Float64: + return cty.Number, nil + case reflect.String: + return cty.String, nil + + // Collection types + case reflect.Slice: + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.List(ety), nil + case reflect.Map: + if !stringType.AssignableTo(rt.Key()) { + return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt) + } + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.Map(ety), nil + + // Structural types + case reflect.Struct: + return impliedStructType(rt, path) + + default: + return cty.NilType, path.NewErrorf("no cty.Type for %s", rt) + } +} + +func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) { + if valueType.AssignableTo(rt) { + // Special case: cty.Value represents cty.DynamicPseudoType, for + // type conformance checking. + return cty.DynamicPseudoType, nil + } + + fieldIdxs := structTagIndices(rt) + if len(fieldIdxs) == 0 { + return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt) + } + + atys := make(map[string]cty.Type, len(fieldIdxs)) + + { + // Temporary extension of path for attributes + path := append(path, nil) + + for k, fi := range fieldIdxs { + path[len(path)-1] = cty.GetAttrStep{Name: k} + + ft := rt.Field(fi).Type + aty, err := impliedType(ft, path) + if err != nil { + return cty.NilType, err + } + + atys[k] = aty + } + } + + return cty.Object(atys), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/helper.go b/vendor/github.com/zclconf/go-cty/cty/helper.go new file mode 100644 index 00000000..1b88e9fa --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/helper.go @@ -0,0 +1,99 @@ +package cty + +import ( + "fmt" +) + +// anyUnknown is a helper to easily check if a set of values contains any +// unknowns, for operations that short-circuit to return unknown in that case. +func anyUnknown(values ...Value) bool { + for _, val := range values { + if val.v == unknown { + return true + } + } + return false +} + +// typeCheck tests whether all of the given values belong to the given type. +// If the given types are a mixture of the given type and the dynamic +// pseudo-type then a short-circuit dynamic value is returned. If the given +// values are all of the correct type but at least one is unknown then +// a short-circuit unknown value is returned. If any other types appear then +// an error is returned. Otherwise (finally!) the result is nil, nil. +func typeCheck(required Type, ret Type, values ...Value) (shortCircuit *Value, err error) { + hasDynamic := false + hasUnknown := false + + for i, val := range values { + if val.ty == DynamicPseudoType { + hasDynamic = true + continue + } + + if !val.Type().Equals(required) { + return nil, fmt.Errorf( + "type mismatch: want %s but value %d is %s", + required.FriendlyName(), + i, val.ty.FriendlyName(), + ) + } + + if val.v == unknown { + hasUnknown = true + } + } + + if hasDynamic { + return &DynamicVal, nil + } + + if hasUnknown { + ret := UnknownVal(ret) + return &ret, nil + } + + return nil, nil +} + +// mustTypeCheck is a wrapper around typeCheck that immediately panics if +// any error is returned. +func mustTypeCheck(required Type, ret Type, values ...Value) *Value { + shortCircuit, err := typeCheck(required, ret, values...) + if err != nil { + panic(err) + } + return shortCircuit +} + +// shortCircuitForceType takes the return value from mustTypeCheck and +// replaces it with an unknown of the given type if the original value was +// DynamicVal. +// +// This is useful for operations that are specified to always return a +// particular type, since then a dynamic result can safely be "upgrade" to +// a strongly-typed unknown, which then allows subsequent operations to +// be actually type-checked. +// +// It is safe to use this only if the operation in question is defined as +// returning either a value of the given type or panicking, since we know +// then that subsequent operations won't run if the operation panics. +// +// If the given short-circuit value is *not* DynamicVal then it must be +// of the given type, or this function will panic. +func forceShortCircuitType(shortCircuit *Value, ty Type) *Value { + if shortCircuit == nil { + return nil + } + + if shortCircuit.ty == DynamicPseudoType { + ret := UnknownVal(ty) + return &ret + } + + if !shortCircuit.ty.Equals(ty) { + panic("forceShortCircuitType got value of wrong type") + } + + return shortCircuit +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json.go b/vendor/github.com/zclconf/go-cty/cty/json.go new file mode 100644 index 00000000..c421a62e --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json.go @@ -0,0 +1,176 @@ +package cty + +import ( + "bytes" + "encoding/json" + "fmt" +) + +// MarshalJSON is an implementation of json.Marshaler that allows Type +// instances to be serialized as JSON. +// +// All standard types can be serialized, but capsule types cannot since there +// is no way to automatically recover the original pointer and capsule types +// compare by equality. +func (t Type) MarshalJSON() ([]byte, error) { + switch impl := t.typeImpl.(type) { + case primitiveType: + switch impl.Kind { + case primitiveTypeBool: + return []byte{'"', 'b', 'o', 'o', 'l', '"'}, nil + case primitiveTypeNumber: + return []byte{'"', 'n', 'u', 'm', 'b', 'e', 'r', '"'}, nil + case primitiveTypeString: + return []byte{'"', 's', 't', 'r', 'i', 'n', 'g', '"'}, nil + default: + panic("unknown primitive type kind") + } + case typeList, typeMap, typeSet: + buf := &bytes.Buffer{} + etyJSON, err := t.ElementType().MarshalJSON() + if err != nil { + return nil, err + } + buf.WriteRune('[') + switch impl.(type) { + case typeList: + buf.WriteString(`"list"`) + case typeMap: + buf.WriteString(`"map"`) + case typeSet: + buf.WriteString(`"set"`) + } + buf.WriteRune(',') + buf.Write(etyJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeObject: + buf := &bytes.Buffer{} + atysJSON, err := json.Marshal(t.AttributeTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["object",`) + buf.Write(atysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeTuple: + buf := &bytes.Buffer{} + etysJSON, err := json.Marshal(t.TupleElementTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["tuple",`) + buf.Write(etysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case pseudoTypeDynamic: + return []byte{'"', 'd', 'y', 'n', 'a', 'm', 'i', 'c', '"'}, nil + case *capsuleType: + return nil, fmt.Errorf("type not allowed: %s", t.FriendlyName()) + default: + // should never happen + panic("unknown type implementation") + } +} + +// UnmarshalJSON is the opposite of MarshalJSON. See the documentation of +// MarshalJSON for information on the limitations of JSON serialization of +// types. +func (t *Type) UnmarshalJSON(buf []byte) error { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + + tok, err := dec.Token() + if err != nil { + return err + } + + switch v := tok.(type) { + case string: + switch v { + case "bool": + *t = Bool + case "number": + *t = Number + case "string": + *t = String + case "dynamic": + *t = DynamicPseudoType + default: + return fmt.Errorf("invalid primitive type name %q", v) + } + + if dec.More() { + return fmt.Errorf("extraneous data after type description") + } + return nil + case json.Delim: + if rune(v) != '[' { + return fmt.Errorf("invalid complex type description") + } + + tok, err = dec.Token() + if err != nil { + return err + } + + kind, ok := tok.(string) + if !ok { + return fmt.Errorf("invalid complex type kind name") + } + + switch kind { + case "list": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = List(ety) + case "map": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Map(ety) + case "set": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Set(ety) + case "object": + var atys map[string]Type + err = dec.Decode(&atys) + if err != nil { + return err + } + *t = Object(atys) + case "tuple": + var etys []Type + err = dec.Decode(&etys) + if err != nil { + return err + } + *t = Tuple(etys) + default: + return fmt.Errorf("invalid complex type kind name") + } + + tok, err = dec.Token() + if err != nil { + return err + } + if delim, ok := tok.(json.Delim); !ok || rune(delim) != ']' || dec.More() { + return fmt.Errorf("unexpected extra data in type description") + } + + return nil + + default: + return fmt.Errorf("invalid type description") + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/doc.go b/vendor/github.com/zclconf/go-cty/cty/json/doc.go new file mode 100644 index 00000000..8916513d --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/doc.go @@ -0,0 +1,11 @@ +// Package json provides functions for serializing cty types and values in +// JSON format, and for decoding them again. +// +// Since the cty type system is a superset of the JSON type system, +// round-tripping through JSON is lossy unless type information is provided +// both at encoding time and decoding time. Callers of this package are +// therefore suggested to define their expected structure as a cty.Type +// and pass it in consistently both when encoding and when decoding, though +// default (type-lossy) behavior is provided for situations where the precise +// representation of the data is not significant. +package json diff --git a/vendor/github.com/zclconf/go-cty/cty/json/marshal.go b/vendor/github.com/zclconf/go-cty/cty/json/marshal.go new file mode 100644 index 00000000..f7bea1a2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/marshal.go @@ -0,0 +1,189 @@ +package json + +import ( + "bytes" + "encoding/json" + "sort" + + "github.com/zclconf/go-cty/cty" +) + +func marshal(val cty.Value, t cty.Type, path cty.Path, b *bytes.Buffer) error { + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if t == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, b) + } + + if val.IsNull() { + b.WriteString("null") + return nil + } + + if !val.IsKnown() { + return path.NewErrorf("value is not known") + } + + // The caller should've guaranteed that the given val is conformant with + // the given type t, so we'll proceed under that assumption here. + + switch { + case t.IsPrimitiveType(): + switch t { + case cty.String: + json, err := json.Marshal(val.AsString()) + if err != nil { + return path.NewErrorf("failed to serialize value: %s", err) + } + b.Write(json) + return nil + case cty.Number: + if val.RawEquals(cty.PositiveInfinity) || val.RawEquals(cty.NegativeInfinity) { + return path.NewErrorf("cannot serialize infinity as JSON") + } + b.WriteString(val.AsBigFloat().Text('f', -1)) + return nil + case cty.Bool: + if val.True() { + b.WriteString("true") + } else { + b.WriteString("false") + } + return nil + default: + panic("unsupported primitive type") + } + case t.IsListType(), t.IsSetType(): + b.WriteRune('[') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune(']') + return nil + case t.IsMapType(): + b.WriteRune('{') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune('}') + return nil + case t.IsTupleType(): + b.WriteRune('[') + etys := t.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + for it.Next() { + if i > 0 { + b.WriteRune(',') + } + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + i++ + } + b.WriteRune(']') + return nil + case t.IsObjectType(): + b.WriteRune('{') + atys := t.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + for i, k := range names { + aty := atys[k] + if i > 0 { + b.WriteRune(',') + } + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(av, aty, path, b) + if err != nil { + return err + } + } + b.WriteRune('}') + return nil + case t.IsCapsuleType(): + rawVal := val.EncapsulatedValue() + jsonVal, err := json.Marshal(rawVal) + if err != nil { + return path.NewError(err) + } + b.Write(jsonVal) + return nil + default: + // should never happen + return path.NewErrorf("cannot JSON-serialize %s", t.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, b *bytes.Buffer) error { + typeJSON, err := MarshalType(val.Type()) + if err != nil { + return path.NewErrorf("failed to serialize type: %s", err) + } + b.WriteString(`{"value":`) + marshal(val, val.Type(), path, b) + b.WriteString(`,"type":`) + b.Write(typeJSON) + b.WriteRune('}') + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/simple.go b/vendor/github.com/zclconf/go-cty/cty/json/simple.go new file mode 100644 index 00000000..507c9cc2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/simple.go @@ -0,0 +1,41 @@ +package json + +import ( + "github.com/zclconf/go-cty/cty" +) + +// SimpleJSONValue is a wrapper around cty.Value that adds implementations of +// json.Marshaler and json.Unmarshaler for simple-but-type-lossy automatic +// encoding and decoding of values. +// +// The couplet Marshal and Unmarshal both take extra type information to +// inform the encoding and decoding process so that all of the cty types +// can be represented even though JSON's type system is a subset. +// +// SimpleJSONValue instead takes the approach of discarding the value's type +// information and then deriving a new type from the stored structure when +// decoding. This results in the same data being returned but not necessarily +// with exactly the same type. +// +// For information on how types are inferred when decoding, see the +// documentation of the function ImpliedType. +type SimpleJSONValue struct { + cty.Value +} + +// MarshalJSON is an implementation of json.Marshaler. See the documentation +// of SimpleJSONValue for more information. +func (v SimpleJSONValue) MarshalJSON() ([]byte, error) { + return Marshal(v.Value, v.Type()) +} + +// UnmarshalJSON is an implementation of json.Unmarshaler. See the +// documentation of SimpleJSONValue for more information. +func (v *SimpleJSONValue) UnmarshalJSON(buf []byte) error { + t, err := ImpliedType(buf) + if err != nil { + return err + } + v.Value, err = Unmarshal(buf, t) + return err +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/type.go b/vendor/github.com/zclconf/go-cty/cty/json/type.go new file mode 100644 index 00000000..9131c6c7 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/type.go @@ -0,0 +1,23 @@ +package json + +import ( + "github.com/zclconf/go-cty/cty" +) + +// MarshalType returns a JSON serialization of the given type. +// +// This is just a thin wrapper around t.MarshalJSON, for symmetry with +// UnmarshalType. +func MarshalType(t cty.Type) ([]byte, error) { + return t.MarshalJSON() +} + +// UnmarshalType decodes a JSON serialization of the given type as produced +// by either Type.MarshalJSON or MarshalType. +// +// This is a convenience wrapper around Type.UnmarshalJSON. +func UnmarshalType(buf []byte) (cty.Type, error) { + var t cty.Type + err := t.UnmarshalJSON(buf) + return t, err +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go b/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go new file mode 100644 index 00000000..1a973066 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/type_implied.go @@ -0,0 +1,171 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// JSON-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary JSON without explicit cty Type +// information. +// +// The rules are as follows: +// +// JSON strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// JSON objects map to cty object types, with the attributes defined by the +// object keys and the types of their values. +// +// JSON arrays map to cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any JSON syntax errors will be returned as an error, and the type will +// be the invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + if dec.More() { + return cty.NilType, fmt.Errorf("extraneous data after JSON object") + } + + return ty, nil +} + +func impliedType(dec *json.Decoder) (cty.Type, error) { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + return impliedTypeForTok(tok, dec) +} + +func impliedTypeForTok(tok json.Token, dec *json.Decoder) (cty.Type, error) { + if tok == nil { + return cty.DynamicPseudoType, nil + } + + switch ttok := tok.(type) { + case bool: + return cty.Bool, nil + + case json.Number: + return cty.Number, nil + + case string: + return cty.String, nil + + case json.Delim: + + switch rune(ttok) { + case '{': + return impliedObjectType(dec) + case '[': + return impliedTupleType(dec) + default: + return cty.NilType, fmt.Errorf("unexpected token %q", ttok) + } + + default: + return cty.NilType, fmt.Errorf("unsupported JSON token %#v", tok) + } +} + +func impliedObjectType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the { delimiter + // and so our next token should be the first object key. + + var atys map[string]cty.Type + + for { + // Read the object key first + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) != '}' { + return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok) + } + break + } + + key, ok := tok.(string) + if !ok { + return cty.NilType, fmt.Errorf("expected string but found %T", tok) + } + + // Now read the value + tok, err = dec.Token() + if err != nil { + return cty.NilType, err + } + + aty, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[key] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the { delimiter + // and so our next token should be the first value. + + var etys []cty.Type + + for { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) != ']' { + return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok) + } + break + } + + ety, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + etys = append(etys, ety) + } + + if len(etys) == 0 { + return cty.EmptyTuple, nil + } + + return cty.Tuple(etys), nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go b/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go new file mode 100644 index 00000000..155f0b8a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/unmarshal.go @@ -0,0 +1,459 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +func unmarshal(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return cty.NilVal, path.NewError(err) + } + + if tok == nil { + return cty.NullVal(t), nil + } + + if t == cty.DynamicPseudoType { + return unmarshalDynamic(buf, path) + } + + switch { + case t.IsPrimitiveType(): + val, err := unmarshalPrimitive(tok, t, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case t.IsListType(): + return unmarshalList(buf, t.ElementType(), path) + case t.IsSetType(): + return unmarshalSet(buf, t.ElementType(), path) + case t.IsMapType(): + return unmarshalMap(buf, t.ElementType(), path) + case t.IsTupleType(): + return unmarshalTuple(buf, t.TupleElementTypes(), path) + case t.IsObjectType(): + return unmarshalObject(buf, t.AttributeTypes(), path) + case t.IsCapsuleType(): + return unmarshalCapsule(buf, t, path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", t.FriendlyName()) + } +} + +func unmarshalPrimitive(tok json.Token, t cty.Type, path cty.Path) (cty.Value, error) { + + switch t { + case cty.Bool: + switch v := tok.(type) { + case bool: + return cty.BoolVal(v), nil + case string: + val, err := convert.Convert(cty.StringVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("bool is required") + } + case cty.Number: + if v, ok := tok.(json.Number); ok { + tok = string(v) + } + switch v := tok.(type) { + case string: + val, err := convert.Convert(cty.StringVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("number is required") + } + case cty.String: + switch v := tok.(type) { + case string: + return cty.StringVal(v), nil + case json.Number: + return cty.StringVal(string(v)), nil + case bool: + val, err := convert.Convert(cty.BoolVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("string is required") + } + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int64 + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(idx), + } + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read list value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(ety), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read set value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(cty.String), + } + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map key: %s", err) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(buf []byte, etys []cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int + + for dec.More() { + if idx >= len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("too many tuple elements (need %d)", len(etys)) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(idx)), + } + ety := etys[idx] + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read tuple value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) != len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("not enough tuple elements (need %d)", len(etys)) + } + + if len(vals) == 0 { + return cty.EmptyTupleVal, nil + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(buf []byte, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + objPath := path // some errors report from the object's perspective + path := append(path, nil) // path to a specific attribute + + for dec.More() { + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object key: %s", err) + } + + aty, ok := atys[k] + if !ok { + return cty.NilVal, objPath.NewErrorf("unsupported attribute %q", k) + } + + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object value: %s", err) + } + + el, err := unmarshal(rawVal, aty, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + // Make sure we have a value for every attribute + for k, aty := range atys { + if _, exists := vals[k]; !exists { + vals[k] = cty.NullVal(aty) + } + } + + if len(vals) == 0 { + return cty.EmptyObjectVal, nil + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalCapsule(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + rawType := t.EncapsulatedType() + ptrPtr := reflect.New(reflect.PtrTo(rawType)) + ptrPtr.Elem().Set(reflect.New(rawType)) + ptr := ptrPtr.Elem().Interface() + err := json.Unmarshal(buf, ptr) + if err != nil { + return cty.NilVal, path.NewError(err) + } + + return cty.CapsuleVal(t, ptr), nil +} + +func unmarshalDynamic(buf []byte, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + var t cty.Type + var valBody []byte // defer actual decoding until we know the type + + for dec.More() { + var err error + + key, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor key: %s", err) + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor value: %s", err) + } + + switch key { + case "type": + err := json.Unmarshal(rawVal, &t) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to decode type for dynamic value: %s", err) + } + case "value": + valBody = rawVal + default: + return cty.NilVal, path.NewErrorf("invalid key %q in dynamically-typed value", key) + } + + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if t == cty.NilType { + return cty.NilVal, path.NewErrorf("missing type in dynamically-typed value") + } + if valBody == nil { + return cty.NilVal, path.NewErrorf("missing value in dynamically-typed value") + } + + val, err := Unmarshal([]byte(valBody), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil +} + +func requireDelim(dec *json.Decoder, d rune) error { + tok, err := dec.Token() + if err != nil { + return err + } + + if tok != json.Delim(d) { + return fmt.Errorf("missing expected %c", d) + } + + return nil +} + +func requireObjectKey(dec *json.Decoder) (string, error) { + tok, err := dec.Token() + if err != nil { + return "", err + } + if s, ok := tok.(string); ok { + return s, nil + } + return "", fmt.Errorf("missing expected object key") +} + +func readRawValue(dec *json.Decoder) ([]byte, error) { + var rawVal json.RawMessage + err := dec.Decode(&rawVal) + if err != nil { + return nil, err + } + return []byte(rawVal), nil +} + +func bufDecoder(buf []byte) *json.Decoder { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + return dec +} diff --git a/vendor/github.com/zclconf/go-cty/cty/json/value.go b/vendor/github.com/zclconf/go-cty/cty/json/value.go new file mode 100644 index 00000000..f2f7dd56 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/json/value.go @@ -0,0 +1,65 @@ +package json + +import ( + "bytes" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// Marshal produces a JSON representation of the given value that can later +// be decoded into a value of the given type. +// +// A type is specified separately to allow for the given type to include +// cty.DynamicPseudoType to represent situations where any type is permitted +// and so type information must be included to allow recovery of the stored +// structure when decoding. +// +// The given type will also be used to attempt automatic conversions of any +// non-conformant types in the given value, although this will not always +// be possible. If the value cannot be made to be conformant then an error is +// returned, which may be a cty.PathError. +// +// Capsule-typed values can be marshalled, but with some caveats. Since +// capsule values are compared by pointer equality, it is impossible to recover +// a value that will compare equal to the original value. Additionally, +// it's not possible to JSON-serialize the capsule type itself, so it's not +// valid to use capsule types within parts of the value that are conformed to +// cty.DynamicPseudoType. Otherwise, a capsule value can be used as long as +// the encapsulated type itself is serializable with the Marshal function +// in encoding/json. +func Marshal(val cty.Value, t cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(t) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, t) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + buf := &bytes.Buffer{} + var path cty.Path + err := marshal(val, t, path, buf) + + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Unmarshal decodes a JSON representation of the given value into a cty Value +// conforming to the given type. +// +// While decoding, type conversions will be done where possible to make +// the result conformant even if the types given in JSON are not exactly +// correct. If conversion isn't possible then an error is returned, which +// may be a cty.PathError. +func Unmarshal(buf []byte, t cty.Type) (cty.Value, error) { + var path cty.Path + return unmarshal(buf, t, path) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/list_type.go b/vendor/github.com/zclconf/go-cty/cty/list_type.go new file mode 100644 index 00000000..2ef02a12 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/list_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeList struct { + typeImplSigil + ElementTypeT Type +} + +// List creates a map type with the given element Type. +// +// List types are CollectionType implementations. +func List(elem Type) Type { + return Type{ + typeList{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a list whose element type is +// equal to that of the receiver. +func (t typeList) Equals(other Type) bool { + ot, isList := other.typeImpl.(typeList) + if !isList { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeList) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "list of " + elemName +} + +func (t typeList) ElementType() Type { + return t.ElementTypeT +} + +func (t typeList) GoString() string { + return fmt.Sprintf("cty.List(%#v)", t.ElementTypeT) +} + +// IsListType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsListType() bool { + _, ok := t.typeImpl.(typeList) + return ok +} + +// ListElementType is a convenience method that checks if the given type is +// a list type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.ListElementType(); et != nil { +// // Do something with *et +// } +func (t Type) ListElementType() *Type { + if lt, ok := t.typeImpl.(typeList); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/map_type.go b/vendor/github.com/zclconf/go-cty/cty/map_type.go new file mode 100644 index 00000000..82d36c62 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/map_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeMap struct { + typeImplSigil + ElementTypeT Type +} + +// Map creates a map type with the given element Type. +// +// Map types are CollectionType implementations. +func Map(elem Type) Type { + return Type{ + typeMap{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a map whose element type is +// equal to that of the receiver. +func (t typeMap) Equals(other Type) bool { + ot, isMap := other.typeImpl.(typeMap) + if !isMap { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeMap) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "map of " + elemName +} + +func (t typeMap) ElementType() Type { + return t.ElementTypeT +} + +func (t typeMap) GoString() string { + return fmt.Sprintf("cty.Map(%#v)", t.ElementTypeT) +} + +// IsMapType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsMapType() bool { + _, ok := t.typeImpl.(typeMap) + return ok +} + +// MapElementType is a convenience method that checks if the given type is +// a map type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.MapElementType(); et != nil { +// // Do something with *et +// } +func (t Type) MapElementType() *Type { + if lt, ok := t.typeImpl.(typeMap); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/null.go b/vendor/github.com/zclconf/go-cty/cty/null.go new file mode 100644 index 00000000..d58d0287 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/null.go @@ -0,0 +1,14 @@ +package cty + +// NullVal returns a null value of the given type. A null can be created of any +// type, but operations on such values will always panic. Calling applications +// are encouraged to use nulls only sparingly, particularly when user-provided +// expressions are to be evaluated, since the precence of nulls creates a +// much higher chance of evaluation errors that can't be caught by a type +// checker. +func NullVal(t Type) Value { + return Value{ + ty: t, + v: nil, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/object_type.go b/vendor/github.com/zclconf/go-cty/cty/object_type.go new file mode 100644 index 00000000..187d3875 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/object_type.go @@ -0,0 +1,135 @@ +package cty + +import ( + "fmt" +) + +type typeObject struct { + typeImplSigil + AttrTypes map[string]Type +} + +// Object creates an object type with the given attribute types. +// +// After a map is passed to this function the caller must no longer access it, +// since ownership is transferred to this library. +func Object(attrTypes map[string]Type) Type { + attrTypesNorm := make(map[string]Type, len(attrTypes)) + for k, v := range attrTypes { + attrTypesNorm[NormalizeString(k)] = v + } + + return Type{ + typeObject{ + AttrTypes: attrTypesNorm, + }, + } +} + +func (t typeObject) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeObject); ok { + if len(t.AttrTypes) != len(ot.AttrTypes) { + // Fast path: if we don't have the same number of attributes + // then we can't possibly be equal. This also avoids the need + // to test attributes in both directions below, since we know + // there can't be extras in "other". + return false + } + + for attr, ty := range t.AttrTypes { + oty, ok := ot.AttrTypes[attr] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeObject) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write an object type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // an object type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "object" +} + +func (t typeObject) GoString() string { + if len(t.AttrTypes) == 0 { + return "cty.EmptyObject" + } + return fmt.Sprintf("cty.Object(%#v)", t.AttrTypes) +} + +// EmptyObject is a shorthand for Object(map[string]Type{}), to more +// easily talk about the empty object type. +var EmptyObject Type + +// EmptyObjectVal is the only possible non-null, non-unknown value of type +// EmptyObject. +var EmptyObjectVal Value + +func init() { + EmptyObject = Object(map[string]Type{}) + EmptyObjectVal = Value{ + ty: EmptyObject, + v: map[string]interface{}{}, + } +} + +// IsObjectType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsObjectType() bool { + _, ok := t.typeImpl.(typeObject) + return ok +} + +// HasAttribute returns true if the receiver has an attribute with the given +// name, regardless of its type. Will panic if the reciever isn't an object +// type; use IsObjectType to determine whether this operation will succeed. +func (t Type) HasAttribute(name string) bool { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + _, hasAttr := ot.AttrTypes[name] + return hasAttr + } + panic("HasAttribute on non-object Type") +} + +// AttributeType returns the type of the attribute with the given name. Will +// panic if the receiver is not an object type (use IsObjectType to confirm) +// or if the object type has no such attribute (use HasAttribute to confirm). +func (t Type) AttributeType(name string) Type { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + aty, hasAttr := ot.AttrTypes[name] + if !hasAttr { + panic("no such attribute") + } + return aty + } + panic("AttributeType on non-object Type") +} + +// AttributeTypes returns a map from attribute names to their associated +// types. Will panic if the receiver is not an object type (use IsObjectType +// to confirm). +// +// The returned map is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the returned +// map. For many purposes the attribute-related methods of Value are more +// appropriate and more convenient to use. +func (t Type) AttributeTypes() map[string]Type { + if ot, ok := t.typeImpl.(typeObject); ok { + return ot.AttrTypes + } + panic("AttributeTypes on non-object Type") +} diff --git a/vendor/github.com/zclconf/go-cty/cty/path.go b/vendor/github.com/zclconf/go-cty/cty/path.go new file mode 100644 index 00000000..69bc4819 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/path.go @@ -0,0 +1,190 @@ +package cty + +import ( + "errors" + "fmt" +) + +// A Path is a sequence of operations to locate a nested value within a +// data structure. +// +// The empty Path represents the given item. Any PathSteps within represent +// taking a single step down into a data structure. +// +// Path has some convenience methods for gradually constructing a path, +// but callers can also feel free to just produce a slice of PathStep manually +// and convert to this type, which may be more appropriate in environments +// where memory pressure is a concern. +// +// Although a Path is technically mutable, by convention callers should not +// mutate a path once it has been built and passed to some other subsystem. +// Instead, use Copy and then mutate the copy before using it. +type Path []PathStep + +// PathStep represents a single step down into a data structure, as part +// of a Path. PathStep is a closed interface, meaning that the only +// permitted implementations are those within this package. +type PathStep interface { + pathStepSigil() pathStepImpl + Apply(Value) (Value, error) +} + +// embed pathImpl into a struct to declare it a PathStep implementation +type pathStepImpl struct{} + +func (p pathStepImpl) pathStepSigil() pathStepImpl { + return p +} + +// Index returns a new Path that is the reciever with an IndexStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) Index(v Value) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = IndexStep{ + Key: v, + } + return ret +} + +// GetAttr returns a new Path that is the reciever with a GetAttrStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) GetAttr(name string) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = GetAttrStep{ + Name: name, + } + return ret +} + +// Apply applies each of the steps in turn to successive values starting with +// the given value, and returns the result. If any step returns an error, +// the whole operation returns an error. +func (p Path) Apply(val Value) (Value, error) { + var err error + for i, step := range p { + val, err = step.Apply(val) + if err != nil { + return NilVal, fmt.Errorf("at step %d: %s", i, err) + } + } + return val, nil +} + +// LastStep applies the given path up to the last step and then returns +// the resulting value and the final step. +// +// This is useful when dealing with assignment operations, since in that +// case the *value* of the last step is not important (and may not, in fact, +// present at all) and we care only about its location. +// +// Since LastStep applies all steps except the last, it will return errors +// for those steps in the same way as Apply does. +// +// If the path has *no* steps then the returned PathStep will be nil, +// representing that any operation should be applied directly to the +// given value. +func (p Path) LastStep(val Value) (Value, PathStep, error) { + var err error + + if len(p) == 0 { + return val, nil, nil + } + + journey := p[:len(p)-1] + val, err = journey.Apply(val) + if err != nil { + return NilVal, nil, err + } + return val, p[len(p)-1], nil +} + +// Copy makes a shallow copy of the receiver. Often when paths are passed to +// caller code they come with the constraint that they are valid only until +// the caller returns, due to how they are constructed internally. Callers +// can use Copy to conveniently produce a copy of the value that _they_ control +// the validity of. +func (p Path) Copy() Path { + ret := make(Path, len(p)) + copy(ret, p) + return ret +} + +// IndexStep is a Step implementation representing applying the index operation +// to a value, which must be of either a list, map, or set type. +// +// When describing a path through a *type* rather than a concrete value, +// the Key may be an unknown value, indicating that the step applies to +// *any* key of the given type. +// +// When indexing into a set, the Key is actually the element being accessed +// itself, since in sets elements are their own identity. +type IndexStep struct { + pathStepImpl + Key Value +} + +// Apply returns the value resulting from indexing the given value with +// our key value. +func (s IndexStep) Apply(val Value) (Value, error) { + switch s.Key.Type() { + case Number: + if !val.Type().IsListType() { + return NilVal, errors.New("not a list type") + } + case String: + if !val.Type().IsMapType() { + return NilVal, errors.New("not a map type") + } + default: + return NilVal, errors.New("key value not number or string") + } + + has := val.HasIndex(s.Key) + if !has.IsKnown() { + return UnknownVal(val.Type().ElementType()), nil + } + if !has.True() { + return NilVal, errors.New("value does not have given index key") + } + + return val.Index(s.Key), nil +} + +func (s IndexStep) GoString() string { + return fmt.Sprintf("cty.IndexStep{Key:%#v}", s.Key) +} + +// GetAttrStep is a Step implementation representing retrieving an attribute +// from a value, which must be of an object type. +type GetAttrStep struct { + pathStepImpl + Name string +} + +// Apply returns the value of our named attribute from the given value, which +// must be of an object type that has a value of that name. +func (s GetAttrStep) Apply(val Value) (Value, error) { + if !val.Type().IsObjectType() { + return NilVal, errors.New("not an object type") + } + + if !val.Type().HasAttribute(s.Name) { + return NilVal, fmt.Errorf("object has no attribute %q", s.Name) + } + + return val.GetAttr(s.Name), nil +} + +func (s GetAttrStep) GoString() string { + return fmt.Sprintf("cty.GetAttrStep{Name:%q}", s.Name) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/path_set.go b/vendor/github.com/zclconf/go-cty/cty/path_set.go new file mode 100644 index 00000000..f1c892b9 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/path_set.go @@ -0,0 +1,198 @@ +package cty + +import ( + "fmt" + "hash/crc64" + + "github.com/zclconf/go-cty/cty/set" +) + +// PathSet represents a set of Path objects. This can be used, for example, +// to talk about a subset of paths within a value that meet some criteria, +// without directly modifying the values at those paths. +type PathSet struct { + set set.Set +} + +// NewPathSet creates and returns a PathSet, with initial contents optionally +// set by the given arguments. +func NewPathSet(paths ...Path) PathSet { + ret := PathSet{ + set: set.NewSet(pathSetRules{}), + } + + for _, path := range paths { + ret.Add(path) + } + + return ret +} + +// Add inserts a single given path into the set. +// +// Paths are immutable after construction by convention. It is particularly +// important not to mutate a path after it has been placed into a PathSet. +// If a Path is mutated while in a set, behavior is undefined. +func (s PathSet) Add(path Path) { + s.set.Add(path) +} + +// AddAllSteps is like Add but it also adds all of the steps leading to +// the given path. +// +// For example, if given a path representing "foo.bar", it will add both +// "foo" and "bar". +func (s PathSet) AddAllSteps(path Path) { + for i := 1; i <= len(path); i++ { + s.Add(path[:i]) + } +} + +// Has returns true if the given path is in the receiving set. +func (s PathSet) Has(path Path) bool { + return s.set.Has(path) +} + +// List makes and returns a slice of all of the paths in the receiving set, +// in an undefined but consistent order. +func (s PathSet) List() []Path { + if s.Empty() { + return nil + } + ret := make([]Path, 0, s.set.Length()) + for it := s.set.Iterator(); it.Next(); { + ret = append(ret, it.Value().(Path)) + } + return ret +} + +// Remove modifies the receving set to no longer include the given path. +// If the given path was already absent, this is a no-op. +func (s PathSet) Remove(path Path) { + s.set.Remove(path) +} + +// Empty returns true if the length of the receiving set is zero. +func (s PathSet) Empty() bool { + return s.set.Length() == 0 +} + +// Union returns a new set whose contents are the union of the receiver and +// the given other set. +func (s PathSet) Union(other PathSet) PathSet { + return PathSet{ + set: s.set.Union(other.set), + } +} + +// Intersection returns a new set whose contents are the intersection of the +// receiver and the given other set. +func (s PathSet) Intersection(other PathSet) PathSet { + return PathSet{ + set: s.set.Intersection(other.set), + } +} + +// Subtract returns a new set whose contents are those from the receiver with +// any elements of the other given set subtracted. +func (s PathSet) Subtract(other PathSet) PathSet { + return PathSet{ + set: s.set.Subtract(other.set), + } +} + +// SymmetricDifference returns a new set whose contents are the symmetric +// difference of the receiver and the given other set. +func (s PathSet) SymmetricDifference(other PathSet) PathSet { + return PathSet{ + set: s.set.SymmetricDifference(other.set), + } +} + +// Equal returns true if and only if both the receiver and the given other +// set contain exactly the same paths. +func (s PathSet) Equal(other PathSet) bool { + if s.set.Length() != other.set.Length() { + return false + } + // Now we know the lengths are the same we only need to test in one + // direction whether everything in one is in the other. + for it := s.set.Iterator(); it.Next(); { + if !other.set.Has(it.Value()) { + return false + } + } + return true +} + +var crc64Table = crc64.MakeTable(crc64.ISO) + +var indexStepPlaceholder = []byte("#") + +// pathSetRules is an implementation of set.Rules from the set package, +// used internally within PathSet. +type pathSetRules struct { +} + +func (r pathSetRules) Hash(v interface{}) int { + path := v.(Path) + hash := crc64.New(crc64Table) + + for _, rawStep := range path { + switch step := rawStep.(type) { + case GetAttrStep: + // (this creates some garbage converting the string name to a + // []byte, but that's okay since cty is not designed to be + // used in tight loops under memory pressure.) + hash.Write([]byte(step.Name)) + default: + // For any other step type we just append a predefined value, + // which means that e.g. all indexes into a given collection will + // hash to the same value but we assume that collections are + // small and thus this won't hurt too much. + hash.Write(indexStepPlaceholder) + } + } + + // We discard half of the hash on 32-bit platforms; collisions just make + // our lookups take marginally longer, so not a big deal. + return int(hash.Sum64()) +} + +func (r pathSetRules) Equivalent(a, b interface{}) bool { + aPath := a.(Path) + bPath := b.(Path) + + if len(aPath) != len(bPath) { + return false + } + + for i := range aPath { + switch aStep := aPath[i].(type) { + case GetAttrStep: + bStep, ok := bPath[i].(GetAttrStep) + if !ok { + return false + } + + if aStep.Name != bStep.Name { + return false + } + case IndexStep: + bStep, ok := bPath[i].(IndexStep) + if !ok { + return false + } + + eq := aStep.Key.Equals(bStep.Key) + if !eq.IsKnown() || eq.False() { + return false + } + default: + // Should never happen, since we document PathStep as a closed type. + panic(fmt.Errorf("unsupported step type %T", aStep)) + } + } + + return true +} diff --git a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go new file mode 100644 index 00000000..7b3d1196 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go @@ -0,0 +1,122 @@ +package cty + +import "math/big" + +// primitiveType is the hidden implementation of the various primitive types +// that are exposed as variables in this package. +type primitiveType struct { + typeImplSigil + Kind primitiveTypeKind +} + +type primitiveTypeKind byte + +const ( + primitiveTypeBool primitiveTypeKind = 'B' + primitiveTypeNumber primitiveTypeKind = 'N' + primitiveTypeString primitiveTypeKind = 'S' +) + +func (t primitiveType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(primitiveType); ok { + return otherP.Kind == t.Kind + } + return false +} + +func (t primitiveType) FriendlyName(mode friendlyTypeNameMode) string { + switch t.Kind { + case primitiveTypeBool: + return "bool" + case primitiveTypeNumber: + return "number" + case primitiveTypeString: + return "string" + default: + // should never happen + panic("invalid primitive type") + } +} + +func (t primitiveType) GoString() string { + switch t.Kind { + case primitiveTypeBool: + return "cty.Bool" + case primitiveTypeNumber: + return "cty.Number" + case primitiveTypeString: + return "cty.String" + default: + // should never happen + panic("invalid primitive type") + } +} + +// Number is the numeric type. Number values are arbitrary-precision +// decimal numbers, which can then be converted into Go's various numeric +// types only if they are in the appropriate range. +var Number Type + +// String is the string type. String values are sequences of unicode codepoints +// encoded internally as UTF-8. +var String Type + +// Bool is the boolean type. The two values of this type are True and False. +var Bool Type + +// True is the truthy value of type Bool +var True Value + +// False is the falsey value of type Bool +var False Value + +// Zero is a number value representing exactly zero. +var Zero Value + +// PositiveInfinity is a Number value representing positive infinity +var PositiveInfinity Value + +// NegativeInfinity is a Number value representing negative infinity +var NegativeInfinity Value + +func init() { + Number = Type{ + primitiveType{Kind: primitiveTypeNumber}, + } + String = Type{ + primitiveType{Kind: primitiveTypeString}, + } + Bool = Type{ + primitiveType{Kind: primitiveTypeBool}, + } + True = Value{ + ty: Bool, + v: true, + } + False = Value{ + ty: Bool, + v: false, + } + Zero = Value{ + ty: Number, + v: big.NewFloat(0), + } + PositiveInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(false), + } + NegativeInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(true), + } +} + +// IsPrimitiveType returns true if and only if the reciever is a primitive +// type, which means it's either number, string, or bool. Any two primitive +// types can be safely compared for equality using the standard == operator +// without panic, which is not a guarantee that holds for all types. Primitive +// types can therefore also be used in switch statements. +func (t Type) IsPrimitiveType() bool { + _, ok := t.typeImpl.(primitiveType) + return ok +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/gob.go b/vendor/github.com/zclconf/go-cty/cty/set/gob.go new file mode 100644 index 00000000..da2978f6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/gob.go @@ -0,0 +1,76 @@ +package set + +import ( + "bytes" + "encoding/gob" + "fmt" +) + +// GobEncode is an implementation of the interface gob.GobEncoder, allowing +// sets to be included in structures encoded via gob. +// +// The set rules are included in the serialized value, so the caller must +// register its concrete rules type with gob.Register before using a +// set in a gob, and possibly also implement GobEncode/GobDecode to customize +// how any parameters are persisted. +// +// The set elements are also included, so if they are of non-primitive types +// they too must be registered with gob. +// +// If the produced gob values will persist for a long time, the caller must +// ensure compatibility of the rules implementation. In particular, if the +// definition of element equivalence changes between encoding and decoding +// then two distinct stored elements may be considered equivalent on decoding, +// causing the recovered set to have fewer elements than when it was stored. +func (s Set) GobEncode() ([]byte, error) { + gs := gobSet{ + Version: 0, + Rules: s.rules, + Values: s.Values(), + } + + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + err := enc.Encode(gs) + if err != nil { + return nil, fmt.Errorf("error encoding set.Set: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is the opposite of GobEncode. See GobEncode for information +// on the requirements for and caveats of including set values in gobs. +func (s *Set) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gs gobSet + err := dec.Decode(&gs) + if err != nil { + return fmt.Errorf("error decoding set.Set: %s", err) + } + if gs.Version != 0 { + return fmt.Errorf("unsupported set.Set encoding version %d; need 0", gs.Version) + } + + victim := NewSetFromSlice(gs.Rules, gs.Values) + s.vals = victim.vals + s.rules = victim.rules + return nil +} + +type gobSet struct { + Version int + Rules Rules + + // The bucket-based representation is for efficient in-memory access, but + // for serialization it's enough to just retain the values themselves, + // which we can re-bucket using the rules (which may have changed!) when + // we re-inflate. + Values []interface{} +} + +func init() { + gob.Register([]interface{}(nil)) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/iterator.go b/vendor/github.com/zclconf/go-cty/cty/set/iterator.go new file mode 100644 index 00000000..f15498e2 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/iterator.go @@ -0,0 +1,36 @@ +package set + +type Iterator struct { + bucketIds []int + vals map[int][]interface{} + bucketIdx int + valIdx int +} + +func (it *Iterator) Value() interface{} { + return it.currentBucket()[it.valIdx] +} + +func (it *Iterator) Next() bool { + if it.bucketIdx == -1 { + // init + if len(it.bucketIds) == 0 { + return false + } + + it.valIdx = 0 + it.bucketIdx = 0 + return true + } + + it.valIdx++ + if it.valIdx >= len(it.currentBucket()) { + it.valIdx = 0 + it.bucketIdx++ + } + return it.bucketIdx < len(it.bucketIds) +} + +func (it *Iterator) currentBucket() []interface{} { + return it.vals[it.bucketIds[it.bucketIdx]] +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/ops.go b/vendor/github.com/zclconf/go-cty/cty/set/ops.go new file mode 100644 index 00000000..726e7077 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/ops.go @@ -0,0 +1,199 @@ +package set + +import ( + "sort" +) + +// Add inserts the given value into the receiving Set. +// +// This mutates the set in-place. This operation is not thread-safe. +func (s Set) Add(val interface{}) { + hv := s.rules.Hash(val) + if _, ok := s.vals[hv]; !ok { + s.vals[hv] = make([]interface{}, 0, 1) + } + bucket := s.vals[hv] + + // See if an equivalent value is already present + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return + } + } + + s.vals[hv] = append(bucket, val) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s Set) Remove(val interface{}) { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return + } + + for i, ev := range bucket { + if s.rules.Equivalent(val, ev) { + newBucket := make([]interface{}, 0, len(bucket)-1) + newBucket = append(newBucket, bucket[:i]...) + newBucket = append(newBucket, bucket[i+1:]...) + if len(newBucket) > 0 { + s.vals[hv] = newBucket + } else { + delete(s.vals, hv) + } + return + } + } +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s Set) Has(val interface{}) bool { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return false + } + + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return true + } + } + return false +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s Set) Copy() Set { + ret := NewSet(s.rules) + for k, v := range s.vals { + ret.vals[k] = v + } + return ret +} + +// Iterator returns an iterator over values in the set, in an undefined order +// that callers should not depend on. +// +// The pattern for using the returned iterator is: +// +// it := set.Iterator() +// for it.Next() { +// val := it.Value() +// // ... +// } +// +// Once an iterator has been created for a set, the set *must not* be mutated +// until the iterator is no longer in use. +func (s Set) Iterator() *Iterator { + // Sort the bucketIds to ensure that we always traverse in a + // consistent order. + bucketIds := make([]int, 0, len(s.vals)) + for id := range s.vals { + bucketIds = append(bucketIds, id) + } + sort.Ints(bucketIds) + + return &Iterator{ + bucketIds: bucketIds, + vals: s.vals, + bucketIdx: -1, + } +} + +// EachValue calls the given callback once for each value in the set, in an +// undefined order that callers should not depend on. +func (s Set) EachValue(cb func(interface{})) { + it := s.Iterator() + for it.Next() { + cb(it.Value()) + } +} + +// Values returns a slice of all of the values in the set in no particular +// order. This is just a wrapper around EachValue that accumulates the results +// in a slice for caller convenience. +// +// The returned slice will be nil if there are no values in the set. +func (s Set) Values() []interface{} { + var ret []interface{} + s.EachValue(func(v interface{}) { + ret = append(ret, v) + }) + return ret +} + +// Length returns the number of values in the set. +func (s Set) Length() int { + var count int + for _, bucket := range s.vals { + count = count + len(bucket) + } + return count +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same rules, or +// else this function will panic. +func (s1 Set) Union(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + rs.Add(v) + }) + s2.EachValue(func(v interface{}) { + rs.Add(v) + }) + return rs +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Intersection(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Subtract(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same rules, or else this function will +// panic. +func (s1 Set) SymmetricDifference(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + rs.Add(v) + } + }) + return rs +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/rules.go b/vendor/github.com/zclconf/go-cty/cty/set/rules.go new file mode 100644 index 00000000..7200184b --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/rules.go @@ -0,0 +1,25 @@ +package set + +// Rules represents the operations that define membership for a Set. +// +// Each Set has a Rules instance, whose methods must satisfy the interface +// contracts given below for any value that will be added to the set. +type Rules interface { + // Hash returns an int that somewhat-uniquely identifies the given value. + // + // A good hash function will minimize collisions for values that will be + // added to the set, though collisions *are* permitted. Collisions will + // simply reduce the efficiency of operations on the set. + Hash(interface{}) int + + // Equivalent returns true if and only if the two values are considered + // equivalent for the sake of set membership. Two values that are + // equivalent cannot exist in the set at the same time, and if two + // equivalent values are added it is undefined which one will be + // returned when enumerating all of the set members. + // + // Two values that are equivalent *must* result in the same hash value, + // though it is *not* required that two values with the same hash value + // be equivalent. + Equivalent(interface{}, interface{}) bool +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set/set.go b/vendor/github.com/zclconf/go-cty/cty/set/set.go new file mode 100644 index 00000000..b4fb316f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set/set.go @@ -0,0 +1,62 @@ +package set + +import ( + "fmt" +) + +// Set is an implementation of the concept of a set: a collection where all +// values are conceptually either in or out of the set, but the members are +// not ordered. +// +// This type primarily exists to be the internal type of sets in cty, but +// it is considered to be at the same level of abstraction as Go's built in +// slice and map collection types, and so should make no cty-specific +// assumptions. +// +// Set operations are not thread safe. It is the caller's responsibility to +// provide mutex guarantees where necessary. +// +// Set operations are not optimized to minimize memory pressure. Mutating +// a set will generally create garbage and so should perhaps be avoided in +// tight loops where memory pressure is a concern. +type Set struct { + vals map[int][]interface{} + rules Rules +} + +// NewSet returns an empty set with the membership rules given. +func NewSet(rules Rules) Set { + return Set{ + vals: map[int][]interface{}{}, + rules: rules, + } +} + +func NewSetFromSlice(rules Rules, vals []interface{}) Set { + s := NewSet(rules) + for _, v := range vals { + s.Add(v) + } + return s +} + +func sameRules(s1 Set, s2 Set) bool { + return s1.rules == s2.rules +} + +func mustHaveSameRules(s1 Set, s2 Set) { + if !sameRules(s1, s2) { + panic(fmt.Errorf("incompatible set rules: %#v, %#v", s1.rules, s2.rules)) + } +} + +// HasRules returns true if and only if the receiving set has the given rules +// instance as its rules. +func (s Set) HasRules(rules Rules) bool { + return s.rules == rules +} + +// Rules returns the receiving set's rules instance. +func (s Set) Rules() Rules { + return s.rules +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set_helper.go b/vendor/github.com/zclconf/go-cty/cty/set_helper.go new file mode 100644 index 00000000..a88ddaff --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set_helper.go @@ -0,0 +1,126 @@ +package cty + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty/set" +) + +// ValueSet is to cty.Set what []cty.Value is to cty.List and +// map[string]cty.Value is to cty.Map. It's provided to allow callers a +// convenient interface for manipulating sets before wrapping them in cty.Set +// values using cty.SetValFromValueSet. +// +// Unlike value slices and value maps, ValueSet instances have a single +// homogenous element type because that is a requirement of the underlying +// set implementation, which uses the element type to select a suitable +// hashing function. +// +// Set mutations are not concurrency-safe. +type ValueSet struct { + // ValueSet is just a thin wrapper around a set.Set with our value-oriented + // "rules" applied. We do this so that the caller can work in terms of + // cty.Value objects even though the set internals use the raw values. + s set.Set +} + +// NewValueSet creates and returns a new ValueSet with the given element type. +func NewValueSet(ety Type) ValueSet { + return newValueSet(set.NewSet(setRules{Type: ety})) +} + +func newValueSet(s set.Set) ValueSet { + return ValueSet{ + s: s, + } +} + +// ElementType returns the element type for the receiving ValueSet. +func (s ValueSet) ElementType() Type { + return s.s.Rules().(setRules).Type +} + +// Add inserts the given value into the receiving set. +func (s ValueSet) Add(v Value) { + s.requireElementType(v) + s.s.Add(v.v) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s ValueSet) Remove(v Value) { + s.requireElementType(v) + s.s.Remove(v.v) +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s ValueSet) Has(v Value) bool { + s.requireElementType(v) + return s.s.Has(v.v) +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s ValueSet) Copy() ValueSet { + return newValueSet(s.s.Copy()) +} + +// Length returns the number of values in the set. +func (s ValueSet) Length() int { + return s.s.Length() +} + +// Values returns a slice of all of the values in the set in no particular +// order. +func (s ValueSet) Values() []Value { + l := s.s.Length() + if l == 0 { + return nil + } + ret := make([]Value, 0, l) + ety := s.ElementType() + for it := s.s.Iterator(); it.Next(); { + ret = append(ret, Value{ + ty: ety, + v: it.Value(), + }) + } + return ret +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same element type, +// or else this function will panic. +func (s ValueSet) Union(other ValueSet) ValueSet { + return newValueSet(s.s.Union(other.s)) +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Intersection(other ValueSet) ValueSet { + return newValueSet(s.s.Intersection(other.s)) +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Subtract(other ValueSet) ValueSet { + return newValueSet(s.s.Subtract(other.s)) +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same element type, or else this function +// will panic. +func (s ValueSet) SymmetricDifference(other ValueSet) ValueSet { + return newValueSet(s.s.SymmetricDifference(other.s)) +} + +// requireElementType panics if the given value is not of the set's element type. +func (s ValueSet) requireElementType(v Value) { + if !v.Type().Equals(s.ElementType()) { + panic(fmt.Errorf("attempt to use %#v value with set of %#v", v.Type(), s.ElementType())) + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set_internals.go b/vendor/github.com/zclconf/go-cty/cty/set_internals.go new file mode 100644 index 00000000..1d7a731a --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set_internals.go @@ -0,0 +1,158 @@ +package cty + +import ( + "bytes" + "fmt" + "hash/crc32" + "math/big" + "sort" +) + +// setRules provides a Rules implementation for the ./set package that +// respects the equality rules for cty values of the given type. +// +// This implementation expects that values added to the set will be +// valid internal values for the given Type, which is to say that wrapping +// the given value in a Value struct along with the ruleset's type should +// produce a valid, working Value. +type setRules struct { + Type Type +} + +// Hash returns a hash value for the receiver that can be used for equality +// checks where some inaccuracy is tolerable. +// +// The hash function is value-type-specific, so it is not meaningful to compare +// hash results for values of different types. +// +// This function is not safe to use for security-related applications, since +// the hash used is not strong enough. +func (val Value) Hash() int { + hashBytes := makeSetHashBytes(val) + return int(crc32.ChecksumIEEE(hashBytes)) +} + +func (r setRules) Hash(v interface{}) int { + return Value{ + ty: r.Type, + v: v, + }.Hash() +} + +func (r setRules) Equivalent(v1 interface{}, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + eqv := v1v.Equals(v2v) + + // By comparing the result to true we ensure that an Unknown result, + // which will result if either value is unknown, will be considered + // as non-equivalent. Two unknown values are not equivalent for the + // sake of set membership. + return eqv.v == true +} + +func makeSetHashBytes(val Value) []byte { + var buf bytes.Buffer + appendSetHashBytes(val, &buf) + return buf.Bytes() +} + +func appendSetHashBytes(val Value, buf *bytes.Buffer) { + // Exactly what bytes we generate here don't matter as long as the following + // constraints hold: + // - Unknown and null values all generate distinct strings from + // each other and from any normal value of the given type. + // - The delimiter used to separate items in a compound structure can + // never appear literally in any of its elements. + // Since we don't support hetrogenous lists we don't need to worry about + // collisions between values of different types, apart from + // PseudoTypeDynamic. + // If in practice we *do* get a collision then it's not a big deal because + // the Equivalent function will still distinguish values, but set + // performance will be best if we are able to produce a distinct string + // for each distinct value, unknown values notwithstanding. + if !val.IsKnown() { + buf.WriteRune('?') + return + } + if val.IsNull() { + buf.WriteRune('~') + return + } + + switch val.ty { + case Number: + buf.WriteString(val.v.(*big.Float).String()) + return + case Bool: + if val.v.(bool) { + buf.WriteRune('T') + } else { + buf.WriteRune('F') + } + return + case String: + buf.WriteString(fmt.Sprintf("%q", val.v.(string))) + return + } + + if val.ty.IsMapType() { + buf.WriteRune('{') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(keyVal, buf) + buf.WriteRune(':') + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune('}') + return + } + + if val.ty.IsListType() || val.ty.IsSetType() { + buf.WriteRune('[') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune(']') + return + } + + if val.ty.IsObjectType() { + buf.WriteRune('<') + attrNames := make([]string, 0, len(val.ty.AttributeTypes())) + for attrName := range val.ty.AttributeTypes() { + attrNames = append(attrNames, attrName) + } + sort.Strings(attrNames) + for _, attrName := range attrNames { + appendSetHashBytes(val.GetAttr(attrName), buf) + buf.WriteRune(';') + } + buf.WriteRune('>') + return + } + + if val.ty.IsTupleType() { + buf.WriteRune('<') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf) + buf.WriteRune(';') + return false + }) + buf.WriteRune('>') + return + } + + // should never get down here + panic("unsupported type in set hash") +} diff --git a/vendor/github.com/zclconf/go-cty/cty/set_type.go b/vendor/github.com/zclconf/go-cty/cty/set_type.go new file mode 100644 index 00000000..cbc3706f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/set_type.go @@ -0,0 +1,72 @@ +package cty + +import ( + "fmt" +) + +type typeSet struct { + typeImplSigil + ElementTypeT Type +} + +// Set creates a set type with the given element Type. +// +// Set types are CollectionType implementations. +func Set(elem Type) Type { + return Type{ + typeSet{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a set whose element type is +// equal to that of the receiver. +func (t typeSet) Equals(other Type) bool { + ot, isSet := other.typeImpl.(typeSet) + if !isSet { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeSet) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "set of " + elemName +} + +func (t typeSet) ElementType() Type { + return t.ElementTypeT +} + +func (t typeSet) GoString() string { + return fmt.Sprintf("cty.Set(%#v)", t.ElementTypeT) +} + +// IsSetType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsSetType() bool { + _, ok := t.typeImpl.(typeSet) + return ok +} + +// SetElementType is a convenience method that checks if the given type is +// a set type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.SetElementType(); et != nil { +// // Do something with *et +// } +func (t Type) SetElementType() *Type { + if lt, ok := t.typeImpl.(typeSet); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/zclconf/go-cty/cty/tuple_type.go b/vendor/github.com/zclconf/go-cty/cty/tuple_type.go new file mode 100644 index 00000000..798cacd6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/tuple_type.go @@ -0,0 +1,121 @@ +package cty + +import ( + "fmt" +) + +type typeTuple struct { + typeImplSigil + ElemTypes []Type +} + +// Tuple creates a tuple type with the given element types. +// +// After a slice is passed to this function the caller must no longer access +// the underlying array, since ownership is transferred to this library. +func Tuple(elemTypes []Type) Type { + return Type{ + typeTuple{ + ElemTypes: elemTypes, + }, + } +} + +func (t typeTuple) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeTuple); ok { + if len(t.ElemTypes) != len(ot.ElemTypes) { + // Fast path: if we don't have the same number of elements + // then we can't possibly be equal. + return false + } + + for i, ty := range t.ElemTypes { + oty := ot.ElemTypes[i] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeTuple) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write a tuple type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // a tuple type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "tuple" +} + +func (t typeTuple) GoString() string { + if len(t.ElemTypes) == 0 { + return "cty.EmptyTuple" + } + return fmt.Sprintf("cty.Tuple(%#v)", t.ElemTypes) +} + +// EmptyTuple is a shorthand for Tuple([]Type{}), to more easily talk about +// the empty tuple type. +var EmptyTuple Type + +// EmptyTupleVal is the only possible non-null, non-unknown value of type +// EmptyTuple. +var EmptyTupleVal Value + +func init() { + EmptyTuple = Tuple([]Type{}) + EmptyTupleVal = Value{ + ty: EmptyTuple, + v: []interface{}{}, + } +} + +// IsTupleType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsTupleType() bool { + _, ok := t.typeImpl.(typeTuple) + return ok +} + +// Length returns the number of elements of the receiving tuple type. +// Will panic if the reciever isn't a tuple type; use IsTupleType to determine +// whether this operation will succeed. +func (t Type) Length() int { + if ot, ok := t.typeImpl.(typeTuple); ok { + return len(ot.ElemTypes) + } + panic("Length on non-tuple Type") +} + +// TupleElementType returns the type of the element with the given index. Will +// panic if the receiver is not a tuple type (use IsTupleType to confirm) +// or if the index is out of range (use Length to confirm). +func (t Type) TupleElementType(idx int) Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes[idx] + } + panic("TupleElementType on non-tuple Type") +} + +// TupleElementTypes returns a slice of the recieving tuple type's element +// types. Will panic if the receiver is not a tuple type (use IsTupleType +// to confirm). +// +// The returned slice is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the +// underlying array. For many purposes the element-related methods of Value +// are more appropriate and more convenient to use. +func (t Type) TupleElementTypes() []Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes + } + panic("TupleElementTypes on non-tuple Type") +} diff --git a/vendor/github.com/zclconf/go-cty/cty/type.go b/vendor/github.com/zclconf/go-cty/cty/type.go new file mode 100644 index 00000000..730cb986 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/type.go @@ -0,0 +1,120 @@ +package cty + +// Type represents value types within the type system. +// +// This is a closed interface type, meaning that only the concrete +// implementations provided within this package are considered valid. +type Type struct { + typeImpl +} + +type typeImpl interface { + // isTypeImpl is a do-nothing method that exists only to express + // that a type is an implementation of typeImpl. + isTypeImpl() typeImplSigil + + // Equals returns true if the other given Type exactly equals the + // receiver Type. + Equals(other Type) bool + + // FriendlyName returns a human-friendly *English* name for the given + // type. + FriendlyName(mode friendlyTypeNameMode) string + + // GoString implements the GoStringer interface from package fmt. + GoString() string +} + +// Base implementation of Type to embed into concrete implementations +// to signal that they are implementations of Type. +type typeImplSigil struct{} + +func (t typeImplSigil) isTypeImpl() typeImplSigil { + return typeImplSigil{} +} + +// Equals returns true if the other given Type exactly equals the receiver +// type. +func (t Type) Equals(other Type) bool { + return t.typeImpl.Equals(other) +} + +// FriendlyName returns a human-friendly *English* name for the given type. +func (t Type) FriendlyName() string { + return t.typeImpl.FriendlyName(friendlyTypeName) +} + +// FriendlyNameForConstraint is similar to FriendlyName except that the +// result is specialized for describing type _constraints_ rather than types +// themselves. This is more appropriate when reporting that a particular value +// does not conform to an expected type constraint. +// +// In particular, this function uses the term "any type" to refer to +// cty.DynamicPseudoType, rather than "dynamic" as returned by FriendlyName. +func (t Type) FriendlyNameForConstraint() string { + return t.typeImpl.FriendlyName(friendlyTypeConstraintName) +} + +// friendlyNameMode is an internal combination of the various FriendlyName* +// variants that just directly takes a mode, for easy passthrough for +// recursive name construction. +func (t Type) friendlyNameMode(mode friendlyTypeNameMode) string { + return t.typeImpl.FriendlyName(mode) +} + +// GoString returns a string approximating how the receiver type would be +// expressed in Go source code. +func (t Type) GoString() string { + if t.typeImpl == nil { + return "cty.NilType" + } + + return t.typeImpl.GoString() +} + +// NilType is an invalid type used when a function is returning an error +// and has no useful type to return. It should not be used and any methods +// called on it will panic. +var NilType = Type{} + +// HasDynamicTypes returns true either if the receiver is itself +// DynamicPseudoType or if it is a compound type whose descendent elements +// are DynamicPseudoType. +func (t Type) HasDynamicTypes() bool { + switch { + case t == DynamicPseudoType: + return true + case t.IsPrimitiveType(): + return false + case t.IsCollectionType(): + return false + case t.IsObjectType(): + attrTypes := t.AttributeTypes() + for _, at := range attrTypes { + if at.HasDynamicTypes() { + return true + } + } + return false + case t.IsTupleType(): + elemTypes := t.TupleElementTypes() + for _, et := range elemTypes { + if et.HasDynamicTypes() { + return true + } + } + return false + case t.IsCapsuleType(): + return false + default: + // Should never happen, since above should be exhaustive + panic("HasDynamicTypes does not support the given type") + } +} + +type friendlyTypeNameMode rune + +const ( + friendlyTypeName friendlyTypeNameMode = 'N' + friendlyTypeConstraintName friendlyTypeNameMode = 'C' +) diff --git a/vendor/github.com/zclconf/go-cty/cty/type_conform.go b/vendor/github.com/zclconf/go-cty/cty/type_conform.go new file mode 100644 index 00000000..b417dc79 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/type_conform.go @@ -0,0 +1,142 @@ +package cty + +// TestConformance recursively walks the receiver and the given other type and +// returns nil if the receiver *conforms* to the given type. +// +// Type conformance is similar to type equality but has one crucial difference: +// PseudoTypeDynamic can be used within the given type to represent that +// *any* type is allowed. +// +// If any non-conformities are found, the returned slice will be non-nil and +// contain at least one error value. It will be nil if the type is entirely +// conformant. +// +// Note that the special behavior of PseudoTypeDynamic is the *only* exception +// to normal type equality. Calling applications may wish to apply their own +// automatic conversion logic to the given data structure to create a more +// liberal notion of conformance to a type. +// +// Returned errors are usually (but not always) PathError instances that +// indicate where in the structure the error was found. If a returned error +// is of that type then the error message is written for (English-speaking) +// end-users working within the cty type system, not mentioning any Go-oriented +// implementation details. +func (t Type) TestConformance(other Type) []error { + path := make(Path, 0) + var errs []error + testConformance(t, other, path, &errs) + return errs +} + +func testConformance(given Type, want Type, path Path, errs *[]error) { + if want.Equals(DynamicPseudoType) { + // anything goes! + return + } + + if given.Equals(want) { + // Any equal types are always conformant + return + } + + // The remainder of this function is concerned with detecting + // and reporting the specific non-conformance, since we wouldn't + // have got here if the types were not divergent. + // We treat compound structures as special so that we can report + // specifically what is non-conforming, rather than simply returning + // the entire type names and letting the user puzzle it out. + + if given.IsObjectType() && want.IsObjectType() { + givenAttrs := given.AttributeTypes() + wantAttrs := want.AttributeTypes() + + if len(givenAttrs) != len(wantAttrs) { + // Something is missing from one of them. + for k := range givenAttrs { + if _, exists := wantAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "unsupported attribute %q", k), + ) + } + } + for k := range wantAttrs { + if _, exists := givenAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "missing required attribute %q", k), + ) + } + } + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for k, wantAttrType := range wantAttrs { + if givenAttrType, exists := givenAttrs[k]; exists { + path[pathIdx] = GetAttrStep{Name: k} + testConformance(givenAttrType, wantAttrType, path, errs) + } + } + + path = path[0:pathIdx] + + return + } + + if given.IsTupleType() && want.IsTupleType() { + givenElems := given.TupleElementTypes() + wantElems := want.TupleElementTypes() + + if len(givenElems) != len(wantElems) { + *errs = append( + *errs, + errorf(path, "%d elements are required, but got %d", len(wantElems), len(givenElems)), + ) + return + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for i, wantElemType := range wantElems { + givenElemType := givenElems[i] + path[pathIdx] = IndexStep{Key: NumberIntVal(int64(i))} + testConformance(givenElemType, wantElemType, path, errs) + } + + path = path[0:pathIdx] + + return + } + + if given.IsListType() && want.IsListType() { + path = append(path, IndexStep{Key: UnknownVal(Number)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsMapType() && want.IsMapType() { + path = append(path, IndexStep{Key: UnknownVal(String)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsSetType() && want.IsSetType() { + path = append(path, IndexStep{Key: UnknownVal(given.ElementType())}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + *errs = append( + *errs, + errorf(path, "%s required, but received %s", want.FriendlyName(), given.FriendlyName()), + ) +} diff --git a/vendor/github.com/zclconf/go-cty/cty/types_to_register.go b/vendor/github.com/zclconf/go-cty/cty/types_to_register.go new file mode 100644 index 00000000..e1e220aa --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/types_to_register.go @@ -0,0 +1,57 @@ +package cty + +import ( + "encoding/gob" + "fmt" + "math/big" + "strings" + + "github.com/zclconf/go-cty/cty/set" +) + +// InternalTypesToRegister is a slice of values that covers all of the +// internal types used in the representation of cty.Type and cty.Value +// across all cty Types. +// +// This is intended to be used to register these types with encoding +// packages that require registration of types used in interfaces, such as +// encoding/gob, thus allowing cty types and values to be included in streams +// created from those packages. However, registering with gob is not necessary +// since that is done automatically as a side-effect of importing this package. +// +// Callers should not do anything with the values here except pass them on +// verbatim to a registration function. +// +// If the calling application uses Capsule types that wrap local structs either +// directly or indirectly, these structs may also need to be registered in +// order to support encoding and decoding of values of these types. That is the +// responsibility of the calling application. +var InternalTypesToRegister []interface{} + +func init() { + InternalTypesToRegister = []interface{}{ + primitiveType{}, + typeList{}, + typeMap{}, + typeObject{}, + typeSet{}, + setRules{}, + set.Set{}, + typeTuple{}, + big.Float{}, + capsuleType{}, + []interface{}(nil), + map[string]interface{}(nil), + } + + // Register these with gob here, rather than in gob.go, to ensure + // that this will always happen after we build the above. + for _, tv := range InternalTypesToRegister { + typeName := fmt.Sprintf("%T", tv) + if strings.HasPrefix(typeName, "cty.") { + gob.RegisterName(fmt.Sprintf("github.com/zclconf/go-cty/%s", typeName), tv) + } else { + gob.Register(tv) + } + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/unknown.go b/vendor/github.com/zclconf/go-cty/cty/unknown.go new file mode 100644 index 00000000..e54179eb --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/unknown.go @@ -0,0 +1,84 @@ +package cty + +// unknownType is the placeholder type used for the sigil value representing +// "Unknown", to make it unambigiously distinct from any other possible value. +type unknownType struct { +} + +// Unknown is a special value that can be +var unknown interface{} = &unknownType{} + +// UnknownVal returns an Value that represents an unknown value of the given +// type. Unknown values can be used to represent a value that is +// not yet known. Its meaning is undefined in cty, but it could be used by +// an calling application to allow partial evaluation. +// +// Unknown values of any type can be created of any type. All operations on +// Unknown values themselves return Unknown. +func UnknownVal(t Type) Value { + return Value{ + ty: t, + v: unknown, + } +} + +func (t unknownType) GoString() string { + // This is the stringification of our internal unknown marker. The + // stringification of the public representation of unknowns is in + // Value.GoString. + return "cty.unknown" +} + +type pseudoTypeDynamic struct { + typeImplSigil +} + +// DynamicPseudoType represents the dynamic pseudo-type. +// +// This type can represent situations where a type is not yet known. Its +// meaning is undefined in cty, but it could be used by a calling +// application to allow expression type checking with some types not yet known. +// For example, the application might optimistically permit any operation on +// values of this type in type checking, allowing a partial type-check result, +// and then repeat the check when more information is known to get the +// final, concrete type. +// +// It is a pseudo-type because it is used only as a sigil to the calling +// application. "Unknown" is the only valid value of this pseudo-type, so +// operations on values of this type will always short-circuit as per +// the rules for that special value. +var DynamicPseudoType Type + +func (t pseudoTypeDynamic) Equals(other Type) bool { + _, ok := other.typeImpl.(pseudoTypeDynamic) + return ok +} + +func (t pseudoTypeDynamic) FriendlyName(mode friendlyTypeNameMode) string { + switch mode { + case friendlyTypeConstraintName: + return "any type" + default: + return "dynamic" + } +} + +func (t pseudoTypeDynamic) GoString() string { + return "cty.DynamicPseudoType" +} + +// DynamicVal is the only valid value of the pseudo-type dynamic. +// This value can be used as a placeholder where a value or expression's +// type and value are both unknown, thus allowing partial evaluation. See +// the docs for DynamicPseudoType for more information. +var DynamicVal Value + +func init() { + DynamicPseudoType = Type{ + pseudoTypeDynamic{}, + } + DynamicVal = Value{ + ty: DynamicPseudoType, + v: unknown, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go b/vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go new file mode 100644 index 00000000..ba926475 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/unknown_as_null.go @@ -0,0 +1,64 @@ +package cty + +// UnknownAsNull returns a value of the same type as the given value but +// with any unknown values (including nested values) replaced with null +// values of the same type. +// +// This can be useful if a result is to be serialized in a format that can't +// represent unknowns, such as JSON, as long as the caller does not need to +// retain the unknown value information. +func UnknownAsNull(val Value) Value { + ty := val.Type() + switch { + case val.IsNull(): + return val + case !val.IsKnown(): + return NullVal(ty) + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + length := val.LengthInt() + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make([]Value, 0, length) + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, UnknownAsNull(v)) + } + switch { + case ty.IsListType(): + return ListVal(vals) + case ty.IsTupleType(): + return TupleVal(vals) + default: + return SetVal(vals) + } + case ty.IsMapType() || ty.IsObjectType(): + var length int + switch { + case ty.IsMapType(): + length = val.LengthInt() + default: + length = len(val.Type().AttributeTypes()) + } + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make(map[string]Value, length) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + vals[k.AsString()] = UnknownAsNull(v) + } + switch { + case ty.IsMapType(): + return MapVal(vals) + default: + return ObjectVal(vals) + } + } + + return val +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value.go b/vendor/github.com/zclconf/go-cty/cty/value.go new file mode 100644 index 00000000..80cb8f76 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value.go @@ -0,0 +1,98 @@ +package cty + +// Value represents a value of a particular type, and is the interface by +// which operations are executed on typed values. +// +// Value has two different classes of method. Operation methods stay entirely +// within the type system (methods accept and return Value instances) and +// are intended for use in implementing a language in terms of cty, while +// integration methods either enter or leave the type system, working with +// native Go values. Operation methods are guaranteed to support all of the +// expected short-circuit behavior for unknown and dynamic values, while +// integration methods may not. +// +// The philosophy for the operations API is that it's the caller's +// responsibility to ensure that the given types and values satisfy the +// specified invariants during a separate type check, so that the caller is +// able to return errors to its user from the application's own perspective. +// +// Consequently the design of these methods assumes such checks have already +// been done and panics if any invariants turn out not to be satisfied. These +// panic errors are not intended to be handled, but rather indicate a bug in +// the calling application that should be fixed with more checks prior to +// executing operations. +// +// A related consequence of this philosophy is that no automatic type +// conversions are done. If a method specifies that its argument must be +// number then it's the caller's responsibility to do that conversion before +// the call, thus allowing the application to have more constrained conversion +// rules than are offered by the built-in converter where necessary. +type Value struct { + ty Type + v interface{} +} + +// Type returns the type of the value. +func (val Value) Type() Type { + return val.ty +} + +// IsKnown returns true if the value is known. That is, if it is not +// the result of the unknown value constructor Unknown(...), and is not +// the result of an operation on another unknown value. +// +// Unknown values are only produced either directly or as a result of +// operating on other unknown values, and so an application that never +// introduces Unknown values can be guaranteed to never receive any either. +func (val Value) IsKnown() bool { + return val.v != unknown +} + +// IsNull returns true if the value is null. Values of any type can be +// null, but any operations on a null value will panic. No operation ever +// produces null, so an application that never introduces Null values can +// be guaranteed to never receive any either. +func (val Value) IsNull() bool { + return val.v == nil +} + +// NilVal is an invalid Value that can be used as a placeholder when returning +// with an error from a function that returns (Value, error). +// +// NilVal is *not* a valid error and so no operations may be performed on it. +// Any attempt to use it will result in a panic. +// +// This should not be confused with the idea of a Null value, as returned by +// NullVal. NilVal is a nil within the *Go* type system, and is invalid in +// the cty type system. Null values *do* exist in the cty type system. +var NilVal = Value{ + ty: Type{typeImpl: nil}, + v: nil, +} + +// IsWhollyKnown is an extension of IsKnown that also recursively checks +// inside collections and structures to see if there are any nested unknown +// values. +func (val Value) IsWhollyKnown() bool { + if !val.IsKnown() { + return false + } + + if val.IsNull() { + // Can't recurse into a null, so we're done + return true + } + + switch { + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsWhollyKnown() { + return false + } + } + return true + default: + return true + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value_init.go b/vendor/github.com/zclconf/go-cty/cty/value_init.go new file mode 100644 index 00000000..495a83e6 --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value_init.go @@ -0,0 +1,276 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "golang.org/x/text/unicode/norm" + + "github.com/zclconf/go-cty/cty/set" +) + +// BoolVal returns a Value of type Number whose internal value is the given +// bool. +func BoolVal(v bool) Value { + return Value{ + ty: Bool, + v: v, + } +} + +// NumberVal returns a Value of type Number whose internal value is the given +// big.Float. The returned value becomes the owner of the big.Float object, +// and so it's forbidden for the caller to mutate the object after it's +// wrapped in this way. +func NumberVal(v *big.Float) Value { + return Value{ + ty: Number, + v: v, + } +} + +// NumberIntVal returns a Value of type Number whose internal value is equal +// to the given integer. +func NumberIntVal(v int64) Value { + return NumberVal(new(big.Float).SetInt64(v)) +} + +// NumberUIntVal returns a Value of type Number whose internal value is equal +// to the given unsigned integer. +func NumberUIntVal(v uint64) Value { + return NumberVal(new(big.Float).SetUint64(v)) +} + +// NumberFloatVal returns a Value of type Number whose internal value is +// equal to the given float. +func NumberFloatVal(v float64) Value { + return NumberVal(new(big.Float).SetFloat64(v)) +} + +// StringVal returns a Value of type String whose internal value is the +// given string. +// +// Strings must be UTF-8 encoded sequences of valid unicode codepoints, and +// they are NFC-normalized on entry into the world of cty values. +// +// If the given string is not valid UTF-8 then behavior of string operations +// is undefined. +func StringVal(v string) Value { + return Value{ + ty: String, + v: NormalizeString(v), + } +} + +// NormalizeString applies the same normalization that cty applies when +// constructing string values. +// +// A return value from this function can be meaningfully compared byte-for-byte +// with a Value.AsString result. +func NormalizeString(s string) string { + return norm.NFC.String(s) +} + +// ObjectVal returns a Value of an object type whose structure is defined +// by the key names and value types in the given map. +func ObjectVal(attrs map[string]Value) Value { + attrTypes := make(map[string]Type, len(attrs)) + attrVals := make(map[string]interface{}, len(attrs)) + + for attr, val := range attrs { + attr = NormalizeString(attr) + attrTypes[attr] = val.ty + attrVals[attr] = val.v + } + + return Value{ + ty: Object(attrTypes), + v: attrVals, + } +} + +// TupleVal returns a Value of a tuple type whose element types are +// defined by the value types in the given slice. +func TupleVal(elems []Value) Value { + elemTypes := make([]Type, len(elems)) + elemVals := make([]interface{}, len(elems)) + + for i, val := range elems { + elemTypes[i] = val.ty + elemVals[i] = val.v + } + + return Value{ + ty: Tuple(elemTypes), + v: elemVals, + } +} + +// ListVal returns a Value of list type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also ListValEmpty.) +func ListVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call ListVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent list element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + return Value{ + ty: List(elementType), + v: rawList, + } +} + +// ListValEmpty returns an empty list of the given element type. +func ListValEmpty(element Type) Value { + return Value{ + ty: List(element), + v: []interface{}{}, + } +} + +// MapVal returns a Value of a map type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given map is empty, since then the element type cannot be inferred. +// (See also MapValEmpty.) +func MapVal(vals map[string]Value) Value { + if len(vals) == 0 { + panic("must not call MapVal with empty map") + } + elementType := DynamicPseudoType + rawMap := make(map[string]interface{}, len(vals)) + + for key, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent map element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawMap[NormalizeString(key)] = val.v + } + + return Value{ + ty: Map(elementType), + v: rawMap, + } +} + +// MapValEmpty returns an empty map of the given element type. +func MapValEmpty(element Type) Value { + return Value{ + ty: Map(element), + v: map[string]interface{}{}, + } +} + +// SetVal returns a Value of set type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also SetValEmpty.) +func SetVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call SetVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent set element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + rawVal := set.NewSetFromSlice(setRules{elementType}, rawList) + + return Value{ + ty: Set(elementType), + v: rawVal, + } +} + +// SetValFromValueSet returns a Value of set type based on an already-constructed +// ValueSet. +// +// The element type of the returned value is the element type of the given +// set. +func SetValFromValueSet(s ValueSet) Value { + ety := s.ElementType() + rawVal := s.s.Copy() // copy so caller can't mutate what we wrap + + return Value{ + ty: Set(ety), + v: rawVal, + } +} + +// SetValEmpty returns an empty set of the given element type. +func SetValEmpty(element Type) Value { + return Value{ + ty: Set(element), + v: set.NewSet(setRules{element}), + } +} + +// CapsuleVal creates a value of the given capsule type using the given +// wrapVal, which must be a pointer to a value of the capsule type's native +// type. +// +// This function will panic if the given type is not a capsule type, if +// the given wrapVal is not compatible with the given capsule type, or if +// wrapVal is not a pointer. +func CapsuleVal(ty Type, wrapVal interface{}) Value { + if !ty.IsCapsuleType() { + panic("not a capsule type") + } + + wv := reflect.ValueOf(wrapVal) + if wv.Kind() != reflect.Ptr { + panic("wrapVal is not a pointer") + } + + it := ty.typeImpl.(*capsuleType).GoType + if !wv.Type().Elem().AssignableTo(it) { + panic("wrapVal target is not compatible with the given capsule type") + } + + return Value{ + ty: ty, + v: wrapVal, + } +} diff --git a/vendor/github.com/zclconf/go-cty/cty/value_ops.go b/vendor/github.com/zclconf/go-cty/cty/value_ops.go new file mode 100644 index 00000000..ff540b7f --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/value_ops.go @@ -0,0 +1,1074 @@ +package cty + +import ( + "fmt" + "math/big" + + "reflect" + + "github.com/zclconf/go-cty/cty/set" +) + +func (val Value) GoString() string { + if val == NilVal { + return "cty.NilVal" + } + + if val.IsNull() { + return fmt.Sprintf("cty.NullVal(%#v)", val.ty) + } + if val == DynamicVal { // is unknown, so must be before the IsKnown check below + return "cty.DynamicVal" + } + if !val.IsKnown() { + return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty) + } + + // By the time we reach here we've dealt with all of the exceptions around + // unknowns and nulls, so we're guaranteed that the values are the + // canonical internal representation of the given type. + + switch val.ty { + case Bool: + if val.v.(bool) { + return "cty.True" + } else { + return "cty.False" + } + case Number: + fv := val.v.(*big.Float) + // We'll try to use NumberIntVal or NumberFloatVal if we can, since + // the fully-general initializer call is pretty ugly-looking. + if fv.IsInt() { + return fmt.Sprintf("cty.NumberIntVal(%#v)", fv) + } + if rfv, accuracy := fv.Float64(); accuracy == big.Exact { + return fmt.Sprintf("cty.NumberFloatVal(%#v)", rfv) + } + return fmt.Sprintf("cty.NumberVal(new(big.Float).Parse(\"%#v\", 10))", fv) + case String: + return fmt.Sprintf("cty.StringVal(%#v)", val.v) + } + + switch { + case val.ty.IsSetType(): + vals := val.v.(set.Set).Values() + if vals == nil || len(vals) == 0 { + return fmt.Sprintf("cty.SetValEmpty()") + } else { + return fmt.Sprintf("cty.SetVal(%#v)", vals) + } + case val.ty.IsCapsuleType(): + return fmt.Sprintf("cty.CapsuleVal(%#v, %#v)", val.ty, val.v) + } + + // Default exposes implementation details, so should actually cover + // all of the cases above for good caller UX. + return fmt.Sprintf("cty.Value{ty: %#v, v: %#v}", val.ty, val.v) +} + +// Equals returns True if the receiver and the given other value have the +// same type and are exactly equal in value. +// +// The usual short-circuit rules apply, so the result can be unknown or typed +// as dynamic if either of the given values are. Use RawEquals to compare +// if two values are equal *ignoring* the short-circuit rules. +func (val Value) Equals(other Value) Value { + if val.ty.HasDynamicTypes() || other.ty.HasDynamicTypes() { + return UnknownVal(Bool) + } + + if !val.ty.Equals(other.ty) { + return BoolVal(false) + } + + if !(val.IsKnown() && other.IsKnown()) { + return UnknownVal(Bool) + } + + if val.IsNull() || other.IsNull() { + if val.IsNull() && other.IsNull() { + return BoolVal(true) + } + return BoolVal(false) + } + + ty := val.ty + result := false + + switch { + case ty == Number: + result = val.v.(*big.Float).Cmp(other.v.(*big.Float)) == 0 + case ty == Bool: + result = val.v.(bool) == other.v.(bool) + case ty == String: + // Simple equality is safe because we NFC-normalize strings as they + // enter our world from StringVal, and so we can assume strings are + // always in normal form. + result = val.v.(string) == other.v.(string) + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + result = true + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + result = true + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + result = true + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + equal := true + + // Note that by our definition of sets it's never possible for two + // sets that contain unknown values (directly or indicrectly) to + // ever be equal, even if they are otherwise identical. + + // FIXME: iterating both lists and checking each item is not the + // ideal implementation here, but it works with the primitives we + // have in the set implementation. Perhaps the set implementation + // can provide its own equality test later. + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + equal = false + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + equal = false + } + }) + + result = equal + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + result = true + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + result = false + break + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsCapsuleType(): + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return BoolVal(val.v == other.v) + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in Equals", ty)) + } + + return BoolVal(result) +} + +// NotEqual is a shorthand for Equals followed by Not. +func (val Value) NotEqual(other Value) Value { + return val.Equals(other).Not() +} + +// True returns true if the receiver is True, false if False, and panics if +// the receiver is not of type Bool. +// +// This is a helper function to help write application logic that works with +// values, rather than a first-class operation. It does not work with unknown +// or null values. For more robust handling with unknown value +// short-circuiting, use val.Equals(cty.True). +func (val Value) True() bool { + if val.ty != Bool { + panic("not bool") + } + return val.Equals(True).v.(bool) +} + +// False is the opposite of True. +func (val Value) False() bool { + return !val.True() +} + +// RawEquals returns true if and only if the two given values have the same +// type and equal value, ignoring the usual short-circuit rules about +// unknowns and dynamic types. +// +// This method is more appropriate for testing than for real use, since it +// skips over usual semantics around unknowns but as a consequence allows +// testing the result of another operation that is expected to return unknown. +// It returns a primitive Go bool rather than a Value to remind us that it +// is not a first-class value operation. +func (val Value) RawEquals(other Value) bool { + if !val.ty.Equals(other.ty) { + return false + } + if (!val.IsKnown()) && (!other.IsKnown()) { + return true + } + if (val.IsKnown() && !other.IsKnown()) || (other.IsKnown() && !val.IsKnown()) { + return false + } + if val.IsNull() && other.IsNull() { + return true + } + if (val.IsNull() && !other.IsNull()) || (other.IsNull() && !val.IsNull()) { + return false + } + if val.ty == DynamicPseudoType && other.ty == DynamicPseudoType { + return true + } + + ty := val.ty + switch { + case ty == Number || ty == Bool || ty == String || ty == DynamicPseudoType: + return val.Equals(other).True() + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + + // Since we're intentionally ignoring our rule that two unknowns + // are never equal, we can cheat here. + // (This isn't 100% right since e.g. it will fail if the set contains + // numbers that are infinite, which DeepEqual can't compare properly. + // We're accepting that limitation for simplicity here, since this + // function is here primarily for testing.) + return reflect.DeepEqual(s1, s2) + + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + return false + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsCapsuleType(): + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return val.v == other.v + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in RawEquals", ty)) + } +} + +// Add returns the sum of the receiver and the given other value. Both values +// must be numbers; this method will panic if not. +func (val Value) Add(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Add(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Subtract returns receiver minus the given other value. Both values must be +// numbers; this method will panic if not. +func (val Value) Subtract(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + return val.Add(other.Negate()) +} + +// Negate returns the numeric negative of the receiver, which must be a number. +// This method will panic when given a value of any other type. +func (val Value) Negate() Value { + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float).Neg(val.v.(*big.Float)) + return NumberVal(ret) +} + +// Multiply returns the product of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +func (val Value) Multiply(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Mul(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Divide returns the quotient of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// If both values are zero or infinity, this function will panic with +// an instance of big.ErrNaN. +func (val Value) Divide(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Quo(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Modulo returns the remainder of an integer division of the receiver and +// the given other value. Both values must be numbers; this method will panic +// if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// This operation is primarily here for use with nonzero natural numbers. +// Modulo with "other" as a non-natural number gets somewhat philosophical, +// and this function takes a position on what that should mean, but callers +// may wish to disallow such things outright or implement their own modulo +// if they disagree with the interpretation used here. +func (val Value) Modulo(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + // We cheat a bit here with infinities, just abusing the Multiply operation + // to get an infinite result of the correct sign. + if val == PositiveInfinity || val == NegativeInfinity || other == PositiveInfinity || other == NegativeInfinity { + return val.Multiply(other) + } + + if other.RawEquals(Zero) { + return val + } + + // FIXME: This is a bit clumsy. Should come back later and see if there's a + // more straightforward way to do this. + rat := val.Divide(other) + ratFloorInt := &big.Int{} + rat.v.(*big.Float).Int(ratFloorInt) + work := (&big.Float{}).SetInt(ratFloorInt) + work.Mul(other.v.(*big.Float), work) + work.Sub(val.v.(*big.Float), work) + + return NumberVal(work) +} + +// Absolute returns the absolute (signless) value of the receiver, which must +// be a number or this method will panic. +func (val Value) Absolute() Value { + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := (&big.Float{}).Abs(val.v.(*big.Float)) + return NumberVal(ret) +} + +// GetAttr returns the value of the given attribute of the receiver, which +// must be of an object type that has an attribute of the given name. +// This method will panic if the receiver type is not compatible. +// +// The method will also panic if the given attribute name is not defined +// for the value's type. Use the attribute-related methods on Type to +// check for the validity of an attribute before trying to use it. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be DynamicVal. +func (val Value) GetAttr(name string) Value { + if val.ty == DynamicPseudoType { + return DynamicVal + } + + if !val.ty.IsObjectType() { + panic("value is not an object") + } + + name = NormalizeString(name) + if !val.ty.HasAttribute(name) { + panic("value has no attribute of that name") + } + + attrType := val.ty.AttributeType(name) + + if !val.IsKnown() { + return UnknownVal(attrType) + } + + return Value{ + ty: attrType, + v: val.v.(map[string]interface{})[name], + } +} + +// Index returns the value of an element of the receiver, which must have +// either a list, map or tuple type. This method will panic if the receiver +// type is not compatible. +// +// The key value must be the correct type for the receving collection: a +// number if the collection is a list or tuple, or a string if it is a map. +// In the case of a list or tuple, the given number must be convertable to int +// or this method will panic. The key may alternatively be of +// DynamicPseudoType, in which case the result itself is an unknown of the +// collection's element type. +// +// The result is of the receiver collection's element type, or in the case +// of a tuple the type of the specific element index requested. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be the DynamicValue. +func (val Value) Index(key Value) Value { + if val.ty == DynamicPseudoType { + return DynamicVal + } + + switch { + case val.Type().IsListType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != Number { + panic("element key for list must be number") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + return Value{ + ty: elty, + v: val.v.([]interface{})[index], + } + case val.Type().IsMapType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != String { + panic("element key for map must be string") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + keyStr := key.v.(string) + + return Value{ + ty: elty, + v: val.v.(map[string]interface{})[keyStr], + } + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return DynamicVal + } + + if key.Type() != Number { + panic("element key for tuple must be number") + } + if !key.IsKnown() { + return DynamicVal + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + eltys := val.Type().TupleElementTypes() + + if !val.IsKnown() { + return UnknownVal(eltys[index]) + } + + return Value{ + ty: eltys[index], + v: val.v.([]interface{})[index], + } + default: + panic("not a list, map, or tuple type") + } +} + +// HasIndex returns True if the receiver (which must be supported for Index) +// has an element with the given index key, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the collection or the +// key value are unknown. +// +// This method will panic if the receiver is not indexable, but does not +// impose any panic-causing type constraints on the key. +func (val Value) HasIndex(key Value) Value { + if val.ty == DynamicPseudoType { + return UnknownVal(Bool) + } + + switch { + case val.Type().IsListType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + return BoolVal(int(index) < len(val.v.([]interface{})) && index >= 0) + case val.Type().IsMapType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != String { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + keyStr := key.v.(string) + _, exists := val.v.(map[string]interface{})[keyStr] + + return BoolVal(exists) + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + length := val.Type().Length() + return BoolVal(int(index) < length && index >= 0) + default: + panic("not a list, map, or tuple type") + } +} + +// HasElement returns True if the receiver (which must be of a set type) +// has the given value as an element, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the set or the +// given value are unknown. +// +// This method will panic if the receiver is not a set, or if it is a null set. +func (val Value) HasElement(elem Value) Value { + ty := val.Type() + + if !ty.IsSetType() { + panic("not a set type") + } + if !val.IsKnown() || !elem.IsKnown() { + return UnknownVal(Bool) + } + if val.IsNull() { + panic("can't call HasElement on a nil value") + } + if !ty.ElementType().Equals(elem.Type()) { + return False + } + + s := val.v.(set.Set) + return BoolVal(s.Has(elem.v)) +} + +// Length returns the length of the receiver, which must be a collection type +// or tuple type, as a number value. If the receiver is not a compatible type +// then this method will panic. +// +// If the receiver is unknown then the result is also unknown. +// +// If the receiver is null then this function will panic. +// +// Note that Length is not supported for strings. To determine the length +// of a string, call AsString and take the length of the native Go string +// that is returned. +func (val Value) Length() Value { + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return NumberIntVal(int64(val.Type().Length())) + } + + if !val.IsKnown() { + return UnknownVal(Number) + } + + return NumberIntVal(int64(val.LengthInt())) +} + +// LengthInt is like Length except it returns an int. It has the same behavior +// as Length except that it will panic if the receiver is unknown. +// +// This is an integration method provided for the convenience of code bridging +// into Go's type system. +func (val Value) LengthInt() int { + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return val.Type().Length() + } + if val.Type().IsObjectType() { + // For objects, the length is the number of attributes associated with the type. + return len(val.Type().AttributeTypes()) + } + if !val.IsKnown() { + panic("value is not known") + } + if val.IsNull() { + panic("value is null") + } + + switch { + + case val.ty.IsListType(): + return len(val.v.([]interface{})) + + case val.ty.IsSetType(): + return val.v.(set.Set).Length() + + case val.ty.IsMapType(): + return len(val.v.(map[string]interface{})) + + default: + panic("value is not a collection") + } +} + +// ElementIterator returns an ElementIterator for iterating the elements +// of the receiver, which must be a collection type, a tuple type, or an object +// type. If called on a method of any other type, this method will panic. +// +// The value must be Known and non-Null, or this method will panic. +// +// If the receiver is of a list type, the returned keys will be of type Number +// and the values will be of the list's element type. +// +// If the receiver is of a map type, the returned keys will be of type String +// and the value will be of the map's element type. Elements are passed in +// ascending lexicographical order by key. +// +// If the receiver is of a set type, each element is returned as both the +// key and the value, since set members are their own identity. +// +// If the receiver is of a tuple type, the returned keys will be of type Number +// and the value will be of the corresponding element's type. +// +// If the receiver is of an object type, the returned keys will be of type +// String and the value will be of the corresponding attributes's type. +// +// ElementIterator is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ElementIterator() ElementIterator { + if !val.IsKnown() { + panic("can't use ElementIterator on unknown value") + } + if val.IsNull() { + panic("can't use ElementIterator on null value") + } + return elementIterator(val) +} + +// CanIterateElements returns true if the receiver can support the +// ElementIterator method (and by extension, ForEachElement) without panic. +func (val Value) CanIterateElements() bool { + return canElementIterator(val) +} + +// ForEachElement executes a given callback function for each element of +// the receiver, which must be a collection type or tuple type, or this method +// will panic. +// +// ForEachElement uses ElementIterator internally, and so the values passed +// to the callback are as described for ElementIterator. +// +// Returns true if the iteration exited early due to the callback function +// returning true, or false if the loop ran to completion. +// +// ForEachElement is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ForEachElement(cb ElementCallback) bool { + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + stop := cb(key, val) + if stop { + return true + } + } + return false +} + +// Not returns the logical inverse of the receiver, which must be of type +// Bool or this method will panic. +func (val Value) Not() Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(!val.v.(bool)) +} + +// And returns the result of logical AND with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) And(other Value) Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) && other.v.(bool)) +} + +// Or returns the result of logical OR with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) Or(other Value) Value { + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) || other.v.(bool)) +} + +// LessThan returns True if the receiver is less than the other given value, +// which must both be numbers or this method will panic. +func (val Value) LessThan(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) < 0) +} + +// GreaterThan returns True if the receiver is greater than the other given +// value, which must both be numbers or this method will panic. +func (val Value) GreaterThan(other Value) Value { + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) > 0) +} + +// LessThanOrEqualTo is equivalent to LessThan and Equal combined with Or. +func (val Value) LessThanOrEqualTo(other Value) Value { + return val.LessThan(other).Or(val.Equals(other)) +} + +// GreaterThanOrEqualTo is equivalent to GreaterThan and Equal combined with Or. +func (val Value) GreaterThanOrEqualTo(other Value) Value { + return val.GreaterThan(other).Or(val.Equals(other)) +} + +// AsString returns the native string from a non-null, non-unknown cty.String +// value, or panics if called on any other value. +func (val Value) AsString() string { + if val.ty != String { + panic("not a string") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + return val.v.(string) +} + +// AsBigFloat returns a big.Float representation of a non-null, non-unknown +// cty.Number value, or panics if called on any other value. +// +// For more convenient conversions to other native numeric types, use the +// "gocty" package. +func (val Value) AsBigFloat() *big.Float { + if val.ty != Number { + panic("not a number") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + // Copy the float so that callers can't mutate our internal state + ret := *(val.v.(*big.Float)) + + return &ret +} + +// AsValueSlice returns a []cty.Value representation of a non-null, non-unknown +// value of any type that CanIterateElements, or panics if called on +// any other value. +// +// For more convenient conversions to slices of more specific types, use +// the "gocty" package. +func (val Value) AsValueSlice() []Value { + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret = append(ret, v) + } + return ret +} + +// AsValueMap returns a map[string]cty.Value representation of a non-null, +// non-unknown value of any type that CanIterateElements, or panics if called +// on any other value. +// +// For more convenient conversions to maps of more specific types, use +// the "gocty" package. +func (val Value) AsValueMap() map[string]Value { + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make(map[string]Value, l) + for it := val.ElementIterator(); it.Next(); { + k, v := it.Element() + ret[k.AsString()] = v + } + return ret +} + +// AsValueSet returns a ValueSet representation of a non-null, +// non-unknown value of any collection type, or panics if called +// on any other value. +// +// Unlike AsValueSlice and AsValueMap, this method requires specifically a +// collection type (list, set or map) and does not allow structural types +// (tuple or object), because the ValueSet type requires homogenous +// element types. +// +// The returned ValueSet can store only values of the receiver's element type. +func (val Value) AsValueSet() ValueSet { + if !val.Type().IsCollectionType() { + panic("not a collection type") + } + + // We don't give the caller our own set.Set (assuming we're a cty.Set value) + // because then the caller could mutate our internals, which is forbidden. + // Instead, we will construct a new set and append our elements into it. + ret := NewValueSet(val.Type().ElementType()) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret.Add(v) + } + return ret +} + +// EncapsulatedValue returns the native value encapsulated in a non-null, +// non-unknown capsule-typed value, or panics if called on any other value. +// +// The result is the same pointer that was passed to CapsuleVal to create +// the value. Since cty considers values to be immutable, it is strongly +// recommended to treat the encapsulated value itself as immutable too. +func (val Value) EncapsulatedValue() interface{} { + if !val.Type().IsCapsuleType() { + panic("not a capsule-typed value") + } + + return val.v +} diff --git a/vendor/github.com/zclconf/go-cty/cty/walk.go b/vendor/github.com/zclconf/go-cty/cty/walk.go new file mode 100644 index 00000000..a6943bab --- /dev/null +++ b/vendor/github.com/zclconf/go-cty/cty/walk.go @@ -0,0 +1,182 @@ +package cty + +// Walk visits all of the values in a possibly-complex structure, calling +// a given function for each value. +// +// For example, given a list of strings the callback would first be called +// with the whole list and then called once for each element of the list. +// +// The callback function may prevent recursive visits to child values by +// returning false. The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Walk(val Value, cb func(Path, Value) (bool, error)) error { + var path Path + return walk(path, val, cb) +} + +func walk(path Path, val Value, cb func(Path, Value) (bool, error)) error { + deeper, err := cb(path, val) + if err != nil { + return err + } + if !deeper { + return nil + } + + if val.IsNull() || !val.IsKnown() { + // Can't recurse into null or unknown values, regardless of type + return nil + } + + ty := val.Type() + switch { + case ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + nameVal, av := it.Element() + path := append(path, GetAttrStep{ + Name: nameVal.AsString(), + }) + err := walk(path, av, cb) + if err != nil { + return err + } + } + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + err := walk(path, ev, cb) + if err != nil { + return err + } + } + } + return nil +} + +// Transform visits all of the values in a possibly-complex structure, +// calling a given function for each value which has an opportunity to +// replace that value. +// +// Unlike Walk, Transform visits child nodes first, so for a list of strings +// it would first visit the strings and then the _new_ list constructed +// from the transformed values of the list items. +// +// This is useful for creating the effect of being able to make deep mutations +// to a value even though values are immutable. However, it's the responsibility +// of the given function to preserve expected invariants, such as homogenity of +// element types in collections; this function can panic if such invariants +// are violated, just as if new values were constructed directly using the +// value constructor functions. An easy way to preserve invariants is to +// ensure that the transform function never changes the value type. +// +// The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Transform(val Value, cb func(Path, Value) (Value, error)) (Value, error) { + var path Path + return transform(path, val, cb) +} + +func transform(path Path, val Value, cb func(Path, Value) (Value, error)) (Value, error) { + ty := val.Type() + var newVal Value + + switch { + + case val.IsNull() || !val.IsKnown(): + // Can't recurse into null or unknown values, regardless of type + newVal = val + + case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty sequence + newVal = val + default: + elems := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems = append(elems, newEv) + } + switch { + case ty.IsListType(): + newVal = ListVal(elems) + case ty.IsSetType(): + newVal = SetVal(elems) + case ty.IsTupleType(): + newVal = TupleVal(elems) + default: + panic("unknown sequence type") // should never happen because of the case we are in + } + } + + case ty.IsMapType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty map + newVal = val + default: + elems := make(map[string]Value) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems[kv.AsString()] = newEv + } + newVal = MapVal(elems) + } + + case ty.IsObjectType(): + switch { + case ty.Equals(EmptyObject): + // No deep transform for an empty object + newVal = val + default: + atys := ty.AttributeTypes() + newAVs := make(map[string]Value) + for name := range atys { + av := val.GetAttr(name) + path := append(path, GetAttrStep{ + Name: name, + }) + newAV, err := transform(path, av, cb) + if err != nil { + return DynamicVal, err + } + newAVs[name] = newAV + } + newVal = ObjectVal(newAVs) + } + + default: + newVal = val + } + + return cb(path, newVal) +} diff --git a/vendor/vendor.json b/vendor/vendor.json deleted file mode 100644 index b0054eb0..00000000 --- a/vendor/vendor.json +++ /dev/null @@ -1,406 +0,0 @@ -{ - "comment": "", - "ignore": "test", - "package": [ - { - "checksumSHA1": "kn+zdUr5TNsoAX8BgjOaWYtMT5U=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/apparentlymart/go-cidr/cidr", - "path": "github.com/apparentlymart/go-cidr/cidr", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "Ex4zN+CcY/wtoDoK/penrVAxAok=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/aws", - "path": "github.com/aws/aws-sdk-go/aws", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/aws/awserr", - "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "+q4vdl3l1Wom8K1wfIpJ4jlFsbY=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/aws/awsutil", - "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "H/tMKHZU+Qka6RtYiGB50s2uA0s=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/aws/client", - "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/aws/client/metadata", - "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "gNWirlrTfSLbOe421hISBAhTqa4=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/aws/corehandlers", - "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "dNZNaOPfBPnzE2CBnfhXXZ9g9jU=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/aws/credentials", - "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "KQiUK/zr3mqnAXD7x/X55/iNme0=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "4Ipx+5xN0gso+cENC2MHMWmQlR4=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "nCMd1XKjgV21bEl7J8VZFqTV8PE=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/aws/defaults", - "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "U0SthWum+t9ACanK7SDJOg3dO6M=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata", - "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "NyUg1P8ZS/LHAAQAk/4C5O4X3og=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/aws/request", - "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "44uohX3kLsfZHHOqunr+qJnSCdw=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/aws/session", - "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "7lla+sckQeF18wORAGuU2fFMlp4=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/aws/signer/v4", - "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "Bm6UrYb2QCzpYseLwwgw6aetgRc=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/private/endpoints", - "path": "github.com/aws/aws-sdk-go/private/endpoints", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/private/protocol", - "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "isoix7lTx4qIq2zI2xFADtti5SI=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/private/protocol/query", - "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "5xzix1R8prUyWxgLnzUQoxTsfik=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "TW/7U+/8ormL7acf6z2rv2hDD+s=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/private/protocol/rest", - "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "Y6Db2GGfGD9LPpcJIPj8vXE8BbQ=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml", - "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "eUEkjyMPAuekKBE4ou+nM9tXEas=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "Eo9yODN5U99BK0pMzoqnBm7PCrY=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/private/waiter", - "path": "github.com/aws/aws-sdk-go/private/waiter", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "imxJucuPrgaPRMPtAgsu+Y7soB4=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/service/s3", - "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "nH/itbdeFHpl4ysegdtgww9bFSA=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/aws/aws-sdk-go/service/sts", - "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "nqw2Qn5xUklssHTubS5HDvEL9L4=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/bgentry/go-netrc/netrc", - "path": "github.com/bgentry/go-netrc/netrc", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "PkPgpx/FYoIJCbihlRZEp7oHG9Q=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/go-ini/ini", - "path": "github.com/go-ini/ini", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "cdOCt0Yb+hdErz8NAQqayxPmRsY=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/errwrap", - "path": "github.com/hashicorp/errwrap", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "aOWXSbYAdK3PBSMNFiK2ze4lPEc=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/go-getter", - "path": "github.com/hashicorp/go-getter", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "9J+kDr29yDrwsdu2ULzewmqGjpA=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/go-getter/helper/url", - "path": "github.com/hashicorp/go-getter/helper/url", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "lrSl49G23l6NhfilxPM0XFs5rZo=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/go-multierror", - "path": "github.com/hashicorp/go-multierror", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "85XUnluYJL7F55ptcwdmN8eSOsk=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/go-uuid", - "path": "github.com/hashicorp/go-uuid", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "9nNGCY0dWyRDEm7xb57BzV0AKeA=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/go-version", - "path": "github.com/hashicorp/go-version", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "8OPDk+bKyRGJoKcS4QNw9F7dpE8=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/hcl", - "path": "github.com/hashicorp/hcl", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "67DfevLBglV52Y2eAuhFc/xQni0=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/hcl/hcl/ast", - "path": "github.com/hashicorp/hcl/hcl/ast", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "l2oQxBsZRwn6eZjf+whXr8c9+8c=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/hcl/hcl/parser", - "path": "github.com/hashicorp/hcl/hcl/parser", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "lgR7PSAZ0RtvAc9OCtCnNsF/x8g=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/hcl/hcl/scanner", - "path": "github.com/hashicorp/hcl/hcl/scanner", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "JlZmnzqdmFFyb1+2afLyR3BOE/8=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/hcl/hcl/strconv", - "path": "github.com/hashicorp/hcl/hcl/strconv", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "c6yprzj06ASwCo18TtbbNNBHljA=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/hcl/hcl/token", - "path": "github.com/hashicorp/hcl/hcl/token", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "jQ45CCc1ed/nlV7bbSnx6z72q1M=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/hcl/json/parser", - "path": "github.com/hashicorp/hcl/json/parser", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "YdvFsNOMSWMLnY6fcliWQa0O5Fw=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/hcl/json/scanner", - "path": "github.com/hashicorp/hcl/json/scanner", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "fNlXQCQEnb+B3k5UDL/r15xtSJY=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/hcl/json/token", - "path": "github.com/hashicorp/hcl/json/token", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "OrnLOmhc0FcHYs02wtbu1siIsnM=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/hil", - "path": "github.com/hashicorp/hil", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "UICubs001+Q4MsUf9zl2vcMzWQQ=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/hashicorp/hil/ast", - "path": "github.com/hashicorp/hil/ast", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "2XvSzKRpTzuHl/tFvdj5VOaAxL8=", - "path": "github.com/hashicorp/terraform/config", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "Fz+xdIj90Kg/9GcGphXS3Iq4+8w=", - "path": "github.com/hashicorp/terraform/config/module", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "LsV1O5IMK565VOluOjx1gr+aDoQ=", - "path": "github.com/hashicorp/terraform/dag", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "TxqBBTRqbLW9f21FvjWKJlfzQFw=", - "path": "github.com/hashicorp/terraform/dot", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "0Q17QX0jzIL2Bg+XThUuOetCwU0=", - "path": "github.com/hashicorp/terraform/flatmap", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "B267stWNQd0/pBTXHfI/tJsxzfc=", - "path": "github.com/hashicorp/terraform/helper/hilmapstructure", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "MqCTcVza5jPf21vV7u1rZrhE9hc=", - "path": "github.com/hashicorp/terraform/terraform", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "3/Bhy+ua/DCv2ElMD5GzOYSGN6g=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/jmespath/go-jmespath", - "path": "github.com/jmespath/go-jmespath", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "Vfkp+PcZ1wZ4+D6AsHTpKkdsQG0=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/mitchellh/copystructure", - "path": "github.com/mitchellh/copystructure", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "z235fRXw4+SW4xWgLTYc8SwkM2M=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/mitchellh/go-homedir", - "path": "github.com/mitchellh/go-homedir", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "4Js6Jlu93Wa0o6Kjt393L9Z7diE=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/mitchellh/mapstructure", - "path": "github.com/mitchellh/mapstructure", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "mrqMlK6gqe//WsJSrJ1HgkPM0lM=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/mitchellh/reflectwalk", - "path": "github.com/mitchellh/reflectwalk", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - }, - { - "checksumSHA1": "TvF3ym5sZVNqGlUOS9HgOIl/sZM=", - "origin": "github.com/hashicorp/terraform/vendor/github.com/satori/go.uuid", - "path": "github.com/satori/go.uuid", - "revision": "84592f5967490d118aae0b61a25d589d269fd0b6", - "revisionTime": "2016-09-19T19:13:49Z" - } - ], - "rootPath": "github.com/palantir/tfjson" -}