diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 62ecb0179d..8389b077f2 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -13,7 +13,7 @@ on: - 'v**' env: - GO_VERSION: '1.22' + GO_VERSION: '1.23' concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c778cde165..02fd6f7275 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ on: - 'v**' env: - GO_VERSION: '1.22' + GO_VERSION: '1.23' concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index d4babec2d7..1adf469893 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -13,7 +13,7 @@ on: - 'v**' env: - GO_VERSION: '1.22' + GO_VERSION: '1.23' concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} diff --git a/.github/workflows/compatibility-check-template.yml b/.github/workflows/compatibility-check-template.yml index 2f69e88045..e0d02c2f1c 100644 --- a/.github/workflows/compatibility-check-template.yml +++ b/.github/workflows/compatibility-check-template.yml @@ -17,11 +17,11 @@ on: required: true type: string secrets: - FLOWDIVER_API_KEY: + FIND_API_AUTH: required: true env: - GO_VERSION: '1.22' + GO_VERSION: '1.23' concurrency: group: ${{ github.workflow }}-${{ inputs.base-branch || github.run_id }}-${{ inputs.chain }} @@ -62,10 +62,10 @@ jobs: - name: Download contracts if: ${{ steps.restore-cached-contracts.outputs.cache-hit != 'true' }} env: - FLOWDIVER_API_KEY: ${{ secrets.FLOWDIVER_API_KEY }} + FIND_API_AUTH: ${{ secrets.FIND_API_AUTH }} working-directory: ./tools/get-contracts run: | - go run . --chain=${{ inputs.chain }} --apiKey="$FLOWDIVER_API_KEY" > ../../tmp/contracts.csv + go run . -chain=${{ inputs.chain }} -auth="$FIND_API_AUTH" > ../../tmp/contracts.csv - name: Cache Contracts id: cache-contracts diff --git a/.github/workflows/compatibility-check.yml b/.github/workflows/compatibility-check.yml index 2be88025c5..65b79957c3 100644 --- a/.github/workflows/compatibility-check.yml +++ b/.github/workflows/compatibility-check.yml @@ -23,7 +23,7 @@ on: - 'v**' env: - GO_VERSION: '1.22' + GO_VERSION: '1.23' concurrency: group: ${{ github.workflow }}-${{ inputs.base || github.run_id }} @@ -66,7 +66,7 @@ jobs: current-branch: ${{ needs.setup.outputs.branch }} chain: mainnet secrets: - FLOWDIVER_API_KEY: ${{ secrets.FLOWDIVER_API_KEY }} + FIND_API_AUTH: ${{ secrets.FIND_API_AUTH }} testnet: needs: setup @@ -77,4 +77,4 @@ jobs: current-branch: ${{ needs.setup.outputs.branch }} chain: testnet secrets: - FLOWDIVER_API_KEY: ${{ secrets.FLOWDIVER_API_KEY }} + FIND_API_AUTH: ${{ secrets.FIND_API_AUTH }} diff --git a/.github/workflows/downstream.yml b/.github/workflows/downstream.yml index d7fe718e91..2b48989b40 100644 --- a/.github/workflows/downstream.yml +++ b/.github/workflows/downstream.yml @@ -13,7 +13,7 @@ on: - 'v**' env: - GO_VERSION: '1.22' + GO_VERSION: '1.23' concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} diff --git a/.github/workflows/get-contracts.yml b/.github/workflows/get-contracts.yml index 93a1f4ffbc..a4197e9068 100644 --- a/.github/workflows/get-contracts.yml +++ b/.github/workflows/get-contracts.yml @@ -7,11 +7,11 @@ on: required: true type: string secrets: - FLOWDIVER_API_KEY: + FIND_API_AUTH: required: true env: - GO_VERSION: '1.22' + GO_VERSION: '1.23' concurrency: group: ${{ github.workflow }}-${{ github.run_id }}-${{ inputs.chain }} @@ -37,10 +37,10 @@ jobs: - name: Download contracts env: - FLOWDIVER_API_KEY: ${{ secrets.FLOWDIVER_API_KEY }} + FIND_API_AUTH: ${{ secrets.FIND_API_AUTH }} working-directory: ./tools/get-contracts run: | - go run . --chain=${{ inputs.chain }} --apiKey="$FLOWDIVER_API_KEY" > ../../tmp/contracts.csv + go run . -chain=${{ inputs.chain }} -auth="$FIND_API_AUTH" > ../../tmp/contracts.csv # Upload diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e7182cc4aa..c582bee135 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,7 +20,7 @@ on: default: 'master' env: - GO_VERSION: '1.22' + GO_VERSION: '1.23' concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.run_id }} diff --git a/bbq/vm/storage.go b/bbq/vm/storage.go index 9ceb3200d0..36e61106c7 100644 --- a/bbq/vm/storage.go +++ b/bbq/vm/storage.go @@ -43,33 +43,44 @@ func MustConvertStoredValue(gauge common.MemoryGauge, storage interpreter.Storag } func ReadStored( - gauge common.MemoryGauge, - storage interpreter.Storage, + config *Config, address common.Address, domain string, identifier string, ) Value { - accountStorage := storage.GetStorageMap(address, domain, false) + storage := config.Storage + + storageDomain, _ := common.StorageDomainFromIdentifier(domain) + + accountStorage := storage.GetDomainStorageMap( + config.interpreter(), + address, + storageDomain, + false, + ) if accountStorage == nil { return nil } - referenced := accountStorage.ReadValue(gauge, interpreter.StringStorageMapKey(identifier)) + referenced := accountStorage.ReadValue(config.MemoryGauge, interpreter.StringStorageMapKey(identifier)) return InterpreterValueToVMValue(storage, referenced) } func WriteStored( config *Config, storageAddress common.Address, - domain string, + domain common.StorageDomain, key interpreter.StorageMapKey, value Value, ) (existed bool) { - accountStorage := config.Storage.GetStorageMap(storageAddress, domain, true) + + inter := config.interpreter() + + accountStorage := config.Storage.GetDomainStorageMap(inter, storageAddress, domain, true) interValue := VMValueToInterpreterValue(config, value) return accountStorage.WriteValue( - config.interpreter(), + inter, key, interValue, ) @@ -90,12 +101,17 @@ func RemoveReferencedSlab(storage interpreter.Storage, storable atree.Storable) } func StoredValueExists( - storage interpreter.Storage, + config *Config, storageAddress common.Address, - domain string, + domain common.StorageDomain, identifier interpreter.StorageMapKey, ) bool { - accountStorage := storage.GetStorageMap(storageAddress, domain, false) + accountStorage := config.Storage.GetDomainStorageMap( + config.interpreter(), + storageAddress, + domain, + false, + ) if accountStorage == nil { return false } diff --git a/bbq/vm/value_account.go b/bbq/vm/value_account.go index 781892bb9b..4772d91571 100644 --- a/bbq/vm/value_account.go +++ b/bbq/vm/value_account.go @@ -23,7 +23,6 @@ import ( "github.com/onflow/cadence/errors" "github.com/onflow/cadence/interpreter" "github.com/onflow/cadence/sema" - "github.com/onflow/cadence/stdlib" ) type AccountIDGenerator interface { @@ -143,8 +142,7 @@ func getCapability( // Read stored capability, if any readValue := ReadStored( - config.MemoryGauge, - config.Storage, + config, address, domain, identifier, @@ -339,7 +337,7 @@ func storeCapabilityController( existed := WriteStored( config, address, - stdlib.CapabilityControllerStorageDomain, + common.StorageDomainCapabilityController, storageMapKey, controller, ) @@ -376,7 +374,12 @@ func recordStorageCapabilityController( storageMapKey := interpreter.StringStorageMapKey(identifier) - accountStorage := config.Storage.GetStorageMap(address, stdlib.PathCapabilityStorageDomain, true) + accountStorage := config.Storage.GetDomainStorageMap( + config.interpreter(), + address, + common.StorageDomainPathCapability, + true, + ) referenced := accountStorage.ReadValue(config.MemoryGauge, interpreter.StringStorageMapKey(identifier)) readValue := InterpreterValueToVMValue(config.Storage, referenced) diff --git a/bbq/vm/value_account_capabilities.go b/bbq/vm/value_account_capabilities.go index 7cac4796ea..d2bf5920ad 100644 --- a/bbq/vm/value_account_capabilities.go +++ b/bbq/vm/value_account_capabilities.go @@ -142,15 +142,17 @@ func init() { } domain := path.Domain.Identifier() + storageDomain, _ := common.StorageDomainFromIdentifier(domain) + identifier := path.Identifier // Prevent an overwrite storageMapKey := interpreter.StringStorageMapKey(identifier) if StoredValueExists( - config.Storage, + config, accountAddress, - domain, + storageDomain, storageMapKey, ) { panic(interpreter.OverwriteError{ @@ -174,7 +176,7 @@ func init() { WriteStored( config, accountAddress, - domain, + storageDomain, storageMapKey, capabilityValue, ) diff --git a/bbq/vm/value_account_storage.go b/bbq/vm/value_account_storage.go index 18f29180d2..cf055a95b9 100644 --- a/bbq/vm/value_account_storage.go +++ b/bbq/vm/value_account_storage.go @@ -85,10 +85,11 @@ func init() { // Write new value + storageDomain, _ := common.StorageDomainFromIdentifier(domain) WriteStored( config, address, - domain, + storageDomain, interpreter.StringStorageMapKey(identifier), value, ) diff --git a/bbq/vm/value_capability.go b/bbq/vm/value_capability.go index 5d2af9153e..5a22f71708 100644 --- a/bbq/vm/value_capability.go +++ b/bbq/vm/value_capability.go @@ -26,7 +26,6 @@ import ( "github.com/onflow/cadence/format" "github.com/onflow/cadence/interpreter" "github.com/onflow/cadence/sema" - "github.com/onflow/cadence/stdlib" ) // members @@ -210,7 +209,12 @@ func getCapabilityController( storageMapKey := interpreter.Uint64StorageMapKey(capabilityID) - accountStorage := config.Storage.GetStorageMap(address, stdlib.CapabilityControllerStorageDomain, false) + accountStorage := config.Storage.GetDomainStorageMap( + config.interpreter(), + address, + common.StorageDomainCapabilityController, + false, + ) if accountStorage == nil { return nil } diff --git a/bbq/vm/value_storage_reference.go b/bbq/vm/value_storage_reference.go index 057badca31..f5c3bdfe43 100644 --- a/bbq/vm/value_storage_reference.go +++ b/bbq/vm/value_storage_reference.go @@ -86,12 +86,11 @@ func (v *StorageReferenceValue) StaticType(config *Config) StaticType { } func (v *StorageReferenceValue) dereference(config *Config) (*Value, error) { - memoryGauge := config.MemoryGauge address := v.TargetStorageAddress domain := v.TargetPath.Domain.Identifier() identifier := v.TargetPath.Identifier - vmReferencedValue := ReadStored(memoryGauge, config.Storage, address, domain, identifier) + vmReferencedValue := ReadStored(config, address, domain, identifier) if vmReferencedValue == nil { return nil, nil } diff --git a/benchmarks/binarytrees.cdc b/benchmarks/binarytrees.cdc new file mode 100644 index 0000000000..bdfa6a028e --- /dev/null +++ b/benchmarks/binarytrees.cdc @@ -0,0 +1,82 @@ + +access(all) +struct Tree { + access(all) + var left: Tree? + + access(all) + var right: Tree? + + init(left: Tree?, right: Tree?) { + self.left = left + self.right = right + } + + access(all) + fun nodeCount(): Int { + return 1 + + (self.left?.nodeCount() ?? 0) + + (self.right?.nodeCount() ?? 0) + } + + access(all) + fun clear() { + if (self.left != nil) { + self.left?.clear() + self.left = nil + self.right?.clear() + self.right = nil + } + } +} + +access(all) +fun newTree(depth: Int): Tree { + if depth == 0 { + return Tree(left: nil, right: nil) + } + return Tree( + left: newTree(depth: depth - 1), + right: newTree(depth: depth - 1) + ) +} + +access(all) +fun stretch(_ depth: Int) { + log("stretch tree of depth \(depth), check: \(count(depth))") +} + +access(all) +fun count(_ depth: Int): Int { + let t = newTree(depth: depth) + let c = t.nodeCount() + t.clear() + return c +} + +access(all) +fun run(_ n: Int) { + let minDepth = 4 + let maxDepth = minDepth + 2 > n ? minDepth + 2 : n + let stretchDepth = maxDepth + 1 + + stretch(stretchDepth) + let longLivedTree = newTree(depth: maxDepth) + + for depth in InclusiveRange(minDepth, maxDepth, step: 2) { + let iterations = 1 << (maxDepth - depth + minDepth) + var sum = 0 + for _ in InclusiveRange(1, iterations, step: 1) { + sum = sum + count(depth) + } + log("\(iterations), trees of depth \(depth), check: \(sum)") + } + let count = longLivedTree.nodeCount() + longLivedTree.clear() + log("long lived tree of depth \(maxDepth), check: \(count)") +} + +access(all) +fun main() { + run(10) +} diff --git a/benchmarks/fannkuch.cdc b/benchmarks/fannkuch.cdc new file mode 100644 index 0000000000..0e4d1e2ac3 --- /dev/null +++ b/benchmarks/fannkuch.cdc @@ -0,0 +1,95 @@ +access(all) +fun newArray(repeating value: Int, count: Int): [Int] { + let array: [Int] = [] + for _ in InclusiveRange(0, count-1) { + array.append(value) + } + return array +} + +access(all) +fun fannkuch(_ n: Int): Int { + let perm = newArray(repeating: 0, count: n) + let count = newArray(repeating: 0, count: n) + let perm1 = newArray(repeating: 0, count: n) + + for j in InclusiveRange(0, n-1) { + perm1[j] = j + } + + var f = 0 + var i = 0 + var k = 0 + var r = 0 + var flips = 0 + var nperm = 0 + var checksum = 0 + + r = n + while r > 0 { + i = 0 + while r != 1 { + count[r-1] = r + r = r - 1 + } + while i < n { + perm[i] = perm1[i] + i = i + 1 + } + + // Count flips and update max and checksum + f = 0 + k = perm[0] + while k != 0 { + i = 0 + while 2*i < k { + let t = perm[i] + perm[i] = perm[k-i] + perm[k-i] = t + i = i + 1 + } + k = perm[0] + f = f + 1 + } + if f > flips { + flips = f + } + + if (nperm & 0x1) == 0 { + checksum = checksum + f + } else { + checksum = checksum - f + } + + // Use incremental change to generate another permutation + var more = true + while more { + if r == n { + log(checksum) + return flips + } + let p0 = perm1[0] + i = 0 + while i < r { + let j = i+1 + perm1[i] = perm1[j] + i = j + } + perm1[r] = p0 + + count[r] = count[r] - 1 + if count[r] > 0 { + more = false + } else { + r = r + 1 + } + } + nperm = nperm + 1 + } + return flips +} + +access(all) +fun main() { + assert(fannkuch(7) == 16) +} diff --git a/benchmarks/fib_dynamic.cdc b/benchmarks/fib_dynamic.cdc new file mode 100644 index 0000000000..55248afa14 --- /dev/null +++ b/benchmarks/fib_dynamic.cdc @@ -0,0 +1,21 @@ +access(all) +fun fib(_ n: Int): Int { + if n == 0 { + return 0 + } + + let f = [0, 1] + + var i = 2 + while i <= n { + f.append(f[i - 1] + f[i - 2]) + i = i + 1 + } + + return f[n] +} + +access(all) +fun main() { + assert(fib(23) == 28657) +} diff --git a/benchmarks/fib_iterative.cdc b/benchmarks/fib_iterative.cdc new file mode 100644 index 0000000000..a7a1622708 --- /dev/null +++ b/benchmarks/fib_iterative.cdc @@ -0,0 +1,19 @@ +access(all) +fun fib(_ n: Int): Int { + var fib1 = 1 + var fib2 = 1 + var fibonacci = fib1 + var i = 2 + while i < n { + fibonacci = fib1 + fib2 + fib1 = fib2 + fib2 = fibonacci + i = i + 1 + } + return fibonacci +} + +access(all) +fun main() { + assert(fib(23) == 28657) +} diff --git a/benchmarks/fib_recursive.cdc b/benchmarks/fib_recursive.cdc new file mode 100644 index 0000000000..5b06a75a45 --- /dev/null +++ b/benchmarks/fib_recursive.cdc @@ -0,0 +1,12 @@ +access(all) +fun fib(_ n: Int): Int { + if n < 2 { + return n + } + return fib(n - 1) + fib(n - 2) +} + +access(all) +fun main() { + assert(fib(23) == 28657) +} diff --git a/cmd/compile/main.go b/cmd/compile/main.go deleted file mode 100644 index 59c5de9eba..0000000000 --- a/cmd/compile/main.go +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "os" - - "github.com/onflow/cadence/ast" - "github.com/onflow/cadence/cmd" - "github.com/onflow/cadence/common" - "github.com/onflow/cadence/compiler" - "github.com/onflow/cadence/compiler/ir" - "github.com/onflow/cadence/compiler/wasm" - "github.com/onflow/cadence/stdlib" -) - -func main() { - args := os.Args - - if len(args) < 2 { - cmd.ExitWithError("no input file") - } - - path := args[1] - - location := common.NewStringLocation(nil, path) - - codes := map[common.Location][]byte{} - - program, must := cmd.PrepareProgramFromFile(location, codes) - - // standard library handler is only needed for execution, but we're only checking - standardLibraryValues := stdlib.DefaultScriptStandardLibraryValues(nil) - - checker, must := cmd.PrepareChecker( - program, - location, - codes, - nil, - standardLibraryValues, - must, - ) - - must(checker.Check()) - - // Compile all functions - - comp := compiler.NewCompiler(checker) - - functionDeclarations := checker.Program.FunctionDeclarations() - - funcs := make([]*ir.Func, len(functionDeclarations)) - - for i, functionDeclaration := range functionDeclarations { - funcs[i] = ast.AcceptDeclaration[ir.Stmt](functionDeclaration, comp).(*ir.Func) - } - - // Generate a WebAssembly module for the functions - - module := compiler.GenerateWasm(funcs) - - // Export all public functions - - for i, functionDeclaration := range functionDeclarations { - if functionDeclaration.Access != ast.AccessAll { - continue - } - - module.Exports = append(module.Exports, - &wasm.Export{ - Name: functionDeclaration.Identifier.Identifier, - Descriptor: wasm.FunctionExport{ - FunctionIndex: uint32(i), - }, - }, - ) - } - - // Generate WASM binary - - var buf wasm.Buffer - w := wasm.NewWASMWriter(&buf) - err := w.WriteModule(module) - if err != nil { - panic(nil) - } - - // Write WASM binary to stdout - - _, err = os.Stdout.Write(buf.Bytes()) - if err != nil { - panic(nil) - } -} diff --git a/cmd/decode-state-values/main.go b/cmd/decode-state-values/main.go index 70ecd7e7b4..91f3a6d840 100644 --- a/cmd/decode-state-values/main.go +++ b/cmd/decode-state-values/main.go @@ -234,8 +234,13 @@ type interpreterStorage struct { var _ interpreter.Storage = &interpreterStorage{} -func (i interpreterStorage) GetStorageMap(_ common.Address, _ string, _ bool) *interpreter.StorageMap { - panic("unexpected GetStorageMap call") +func (i interpreterStorage) GetDomainStorageMap( + _ *interpreter.Interpreter, + _ common.Address, + _ common.StorageDomain, + _ bool, +) *interpreter.DomainStorageMap { + panic("unexpected GetDomainStorageMap call") } func (i interpreterStorage) CheckHealth() error { diff --git a/common/address.go b/common/address.go index 5c1a354f59..04c2bade70 100644 --- a/common/address.go +++ b/common/address.go @@ -19,6 +19,7 @@ package common import ( + "bytes" "encoding/hex" goErrors "errors" "fmt" @@ -112,6 +113,10 @@ func (a Address) HexWithPrefix() string { return fmt.Sprintf("0x%x", [AddressLength]byte(a)) } +func (a Address) Compare(other Address) int { + return bytes.Compare(a[:], other[:]) +} + // HexToAddress converts a hex string to an Address after // ensuring that the hex string starts with the prefix 0x. func HexToAddressAssertPrefix(h string) (Address, error) { diff --git a/common/pathdomain.go b/common/pathdomain.go index 943301dc4c..ea9e5609c8 100644 --- a/common/pathdomain.go +++ b/common/pathdomain.go @@ -70,3 +70,18 @@ func (i PathDomain) Identifier() string { panic(errors.NewUnreachableError()) } + +func (i PathDomain) StorageDomain() StorageDomain { + switch i { + case PathDomainStorage: + return StorageDomainPathStorage + + case PathDomainPrivate: + return StorageDomainPathPrivate + + case PathDomainPublic: + return StorageDomainPathPublic + } + + panic(errors.NewUnreachableError()) +} diff --git a/common/storagedomain.go b/common/storagedomain.go new file mode 100644 index 0000000000..ac7b297daa --- /dev/null +++ b/common/storagedomain.go @@ -0,0 +1,147 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package common + +import ( + "fmt" + + "github.com/onflow/cadence/errors" +) + +// StorageDomain is used to store domain values on chain. +// +// !!! *WARNING* !!! +// +// Only add new StorageDomain by: +// - appending to the end. +// +// Only remove StorageDomain by: +// - replacing existing StorageDomain with a placeholder `_`. +// +// DO *NOT* REPLACE EXISTING STORAGEDOMAIN! +// DO *NOT* REMOVE EXISTING STORAGEDOMAIN! +// DO *NOT* INSERT NEW STORAGEDOMAIN IN BETWEEN! +type StorageDomain uint8 + +const ( + StorageDomainUnknown StorageDomain = iota + + StorageDomainPathStorage + + StorageDomainPathPrivate + + StorageDomainPathPublic + + StorageDomainContract + + StorageDomainInbox + + // StorageDomainCapabilityController is the storage domain which stores + // capability controllers by capability ID + StorageDomainCapabilityController + + // StorageDomainCapabilityControllerTag is the storage domain which stores + // capability controller tags by capability ID + StorageDomainCapabilityControllerTag + + // StorageDomainPathCapability is the storage domain which stores + // capability ID dictionaries (sets) by storage path identifier + StorageDomainPathCapability + + // StorageDomainAccountCapability is the storage domain which + // records active account capability controller IDs + StorageDomainAccountCapability + + // Append new StorageDomain here (if needed). +) + +var AllStorageDomains = []StorageDomain{ + StorageDomainPathStorage, + StorageDomainPathPrivate, + StorageDomainPathPublic, + StorageDomainContract, + StorageDomainInbox, + StorageDomainCapabilityController, + StorageDomainCapabilityControllerTag, + StorageDomainPathCapability, + StorageDomainAccountCapability, +} + +var AllStorageDomainsByIdentifier = map[string]StorageDomain{} + +var allStorageDomainsSet = map[StorageDomain]struct{}{} + +func init() { + for _, domain := range AllStorageDomains { + identifier := domain.Identifier() + AllStorageDomainsByIdentifier[identifier] = domain + + allStorageDomainsSet[domain] = struct{}{} + } +} + +func StorageDomainFromIdentifier(domain string) (StorageDomain, bool) { + result, ok := AllStorageDomainsByIdentifier[domain] + if !ok { + return StorageDomainUnknown, false + } + return result, true +} + +func StorageDomainFromUint64(i uint64) (StorageDomain, error) { + d := StorageDomain(i) + _, exists := allStorageDomainsSet[d] + if !exists { + return StorageDomainUnknown, fmt.Errorf("failed to convert %d to StorageDomain", i) + } + return d, nil +} + +func (d StorageDomain) Identifier() string { + switch d { + case StorageDomainPathStorage: + return PathDomainStorage.Identifier() + + case StorageDomainPathPrivate: + return PathDomainPrivate.Identifier() + + case StorageDomainPathPublic: + return PathDomainPublic.Identifier() + + case StorageDomainContract: + return "contract" + + case StorageDomainInbox: + return "inbox" + + case StorageDomainCapabilityController: + return "cap_con" + + case StorageDomainCapabilityControllerTag: + return "cap_tag" + + case StorageDomainPathCapability: + return "path_cap" + + case StorageDomainAccountCapability: + return "acc_cap" + } + + panic(errors.NewUnreachableError()) +} diff --git a/compat/suite/green-goo-dao-flow-utils.yaml b/compat/suite/green-goo-dao-flow-utils.yaml index 82f2925875..ec7fee3649 100644 --- a/compat/suite/green-goo-dao-flow-utils.yaml +++ b/compat/suite/green-goo-dao-flow-utils.yaml @@ -1,8 +1,8 @@ description: Green Goo Dao flow-utils maintainers: - bastian.mueller@flowfoundation.org -url: https://github.com/turbolent/flow-utils.git -branch: improvements +url: https://github.com/green-goo-dao/flow-utils.git +branch: main cadence_tests: - path: . command: npm i && ./run-tests.sh diff --git a/compiler/codegen.go b/compiler/codegen.go deleted file mode 100644 index 9e67e0f9f0..0000000000 --- a/compiler/codegen.go +++ /dev/null @@ -1,277 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package compiler - -import ( - "fmt" - - "github.com/onflow/cadence/compiler/ir" - "github.com/onflow/cadence/compiler/wasm" - "github.com/onflow/cadence/errors" -) - -const RuntimeModuleName = "crt" - -type wasmCodeGen struct { - mod *wasm.ModuleBuilder - code *wasm.Code - runtimeFunctionIndexInt uint32 - runtimeFunctionIndexString uint32 - runtimeFunctionIndexAdd uint32 -} - -func (codeGen *wasmCodeGen) VisitInt(i ir.Int) ir.Repr { - codeGen.emitConstantCall( - codeGen.runtimeFunctionIndexInt, - i.Value, - ) - return nil -} - -func (codeGen *wasmCodeGen) VisitString(s ir.String) ir.Repr { - codeGen.emitConstantCall( - codeGen.runtimeFunctionIndexString, - []byte(s.Value), - ) - return nil -} - -func (codeGen *wasmCodeGen) VisitSequence(sequence *ir.Sequence) ir.Repr { - for _, stmt := range sequence.Stmts { - stmt.Accept(codeGen) - } - return nil -} - -func (codeGen *wasmCodeGen) VisitBlock(_ *ir.Block) ir.Repr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (codeGen *wasmCodeGen) VisitLoop(_ *ir.Loop) ir.Repr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (codeGen *wasmCodeGen) VisitIf(_ *ir.If) ir.Repr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (codeGen *wasmCodeGen) VisitBranch(_ *ir.Branch) ir.Repr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (codeGen *wasmCodeGen) VisitBranchIf(_ *ir.BranchIf) ir.Repr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (codeGen *wasmCodeGen) VisitStoreLocal(storeLocal *ir.StoreLocal) ir.Repr { - storeLocal.Exp.Accept(codeGen) - codeGen.emit(wasm.InstructionLocalSet{ - LocalIndex: storeLocal.LocalIndex, - }) - return nil -} - -func (codeGen *wasmCodeGen) VisitDrop(_ *ir.Drop) ir.Repr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (codeGen *wasmCodeGen) VisitReturn(r *ir.Return) ir.Repr { - r.Exp.Accept(codeGen) - codeGen.emit(wasm.InstructionReturn{}) - return nil -} - -func (codeGen *wasmCodeGen) VisitConst(c *ir.Const) ir.Repr { - c.Constant.Accept(codeGen) - return nil -} - -func (codeGen *wasmCodeGen) VisitCopyLocal(c *ir.CopyLocal) ir.Repr { - // TODO: copy - codeGen.emit(wasm.InstructionLocalGet{ - LocalIndex: c.LocalIndex, - }) - return nil -} - -func (codeGen *wasmCodeGen) VisitMoveLocal(_ *ir.MoveLocal) ir.Repr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (codeGen *wasmCodeGen) VisitUnOpExpr(_ *ir.UnOpExpr) ir.Repr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (codeGen *wasmCodeGen) VisitBinOpExpr(expr *ir.BinOpExpr) ir.Repr { - expr.Left.Accept(codeGen) - expr.Right.Accept(codeGen) - // TODO: add remaining operations, take types into account - switch expr.Op { - case ir.BinOpPlus: - codeGen.emit(wasm.InstructionCall{ - FuncIndex: codeGen.runtimeFunctionIndexAdd, - }) - return nil - } - panic(errors.NewUnreachableError()) -} - -func (codeGen *wasmCodeGen) VisitCall(_ *ir.Call) ir.Repr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (codeGen *wasmCodeGen) VisitFunc(f *ir.Func) ir.Repr { - codeGen.code = &wasm.Code{} - codeGen.code.Locals = generateWasmLocalTypes(f.Locals) - f.Statement.Accept(codeGen) - functionType := generateWasmFunctionType(f.Type) - funcIndex := codeGen.mod.AddFunction(f.Name, functionType, codeGen.code) - // TODO: make export dependent on visibility modifier - codeGen.mod.AddExport(&wasm.Export{ - Name: f.Name, - Descriptor: wasm.FunctionExport{ - FunctionIndex: funcIndex, - }, - }) - return nil -} - -func (codeGen *wasmCodeGen) emit(inst wasm.Instruction) { - codeGen.code.Instructions = append(codeGen.code.Instructions, inst) -} - -func (codeGen *wasmCodeGen) addConstant(value []byte) uint32 { - offset := codeGen.mod.RequireMemory(uint32(len(value))) - // TODO: optimize: - // let module builder generate one data entry of all constants, - // instead of one data entry for each constant - codeGen.mod.AddData(offset, value) - return offset -} - -func (codeGen *wasmCodeGen) emitConstantCall(funcIndex uint32, value []byte) { - memoryOffset := codeGen.addConstant(value) - codeGen.emit(wasm.InstructionI32Const{Value: int32(memoryOffset)}) - - length := int32(len(value)) - codeGen.emit(wasm.InstructionI32Const{Value: length}) - - codeGen.emit(wasm.InstructionCall{FuncIndex: funcIndex}) -} - -var constantFunctionType = &wasm.FunctionType{ - Params: []wasm.ValueType{ - // memory offset - wasm.ValueTypeI32, - // length - wasm.ValueTypeI32, - }, - Results: []wasm.ValueType{ - wasm.ValueTypeExternRef, - }, -} - -var addFunctionType = &wasm.FunctionType{ - Params: []wasm.ValueType{ - wasm.ValueTypeExternRef, - wasm.ValueTypeExternRef, - }, - Results: []wasm.ValueType{ - wasm.ValueTypeExternRef, - }, -} - -func (codeGen *wasmCodeGen) addRuntimeImports() { - // NOTE: ensure to update the imports in the vm - codeGen.runtimeFunctionIndexInt = codeGen.addRuntimeImport("Int", constantFunctionType) - codeGen.runtimeFunctionIndexString = codeGen.addRuntimeImport("String", constantFunctionType) - codeGen.runtimeFunctionIndexAdd = codeGen.addRuntimeImport("add", addFunctionType) -} - -func (codeGen *wasmCodeGen) addRuntimeImport(name string, funcType *wasm.FunctionType) uint32 { - funcIndex, err := codeGen.mod.AddFunctionImport(RuntimeModuleName, name, funcType) - if err != nil { - panic(fmt.Errorf("failed to add runtime import of function %s: %w", name, err)) - } - return funcIndex -} - -func GenerateWasm(funcs []*ir.Func) *wasm.Module { - g := &wasmCodeGen{ - mod: &wasm.ModuleBuilder{}, - } - - g.addRuntimeImports() - - for _, f := range funcs { - f.Accept(g) - } - - g.mod.ExportMemory("mem") - - return g.mod.Build() -} - -func generateWasmLocalTypes(locals []ir.Local) []wasm.ValueType { - result := make([]wasm.ValueType, len(locals)) - for i, local := range locals { - result[i] = generateWasmValType(local.Type) - } - return result -} - -func generateWasmValType(valType ir.ValType) wasm.ValueType { - // TODO: add remaining types - switch valType { - case ir.ValTypeInt, - ir.ValTypeString: - - return wasm.ValueTypeExternRef - } - - panic(errors.NewUnreachableError()) -} - -func generateWasmFunctionType(funcType ir.FuncType) *wasm.FunctionType { - // generate parameter types - params := make([]wasm.ValueType, len(funcType.Params)) - for i, param := range funcType.Params { - params[i] = generateWasmValType(param) - } - - // generate result types - results := make([]wasm.ValueType, len(funcType.Results)) - for i, result := range funcType.Results { - results[i] = generateWasmValType(result) - } - - return &wasm.FunctionType{ - Params: params, - Results: results, - } -} diff --git a/compiler/codegen_test.go b/compiler/codegen_test.go deleted file mode 100644 index ad1bd9fea8..0000000000 --- a/compiler/codegen_test.go +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package compiler - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/onflow/cadence/compiler/ir" - "github.com/onflow/cadence/compiler/wasm" -) - -func TestWasmCodeGenSimple(t *testing.T) { - - t.Skip("WIP") - - mod := GenerateWasm([]*ir.Func{ - { - Name: "inc", - Type: ir.FuncType{ - Params: []ir.ValType{ - ir.ValTypeInt, - }, - Results: []ir.ValType{ - ir.ValTypeInt, - }, - }, - Locals: []ir.Local{ - {Type: ir.ValTypeInt}, - {Type: ir.ValTypeInt}, - }, - Statement: &ir.Sequence{ - Stmts: []ir.Stmt{ - &ir.StoreLocal{ - LocalIndex: 1, - Exp: &ir.Const{ - Constant: ir.Int{Value: []byte{1, 1}}, - }, - }, - &ir.Return{ - Exp: &ir.BinOpExpr{ - Op: ir.BinOpPlus, - Left: &ir.CopyLocal{ - LocalIndex: 0, - }, - Right: &ir.CopyLocal{ - LocalIndex: 1, - }, - }, - }, - }, - }, - }, - }) - - require.Equal(t, - &wasm.Module{ - Types: []*wasm.FunctionType{ - // function type of crt.Int - { - Params: []wasm.ValueType{ - wasm.ValueTypeI32, - wasm.ValueTypeI32, - }, - Results: []wasm.ValueType{ - wasm.ValueTypeExternRef, - }, - }, - // function type of crt.String - { - Params: []wasm.ValueType{ - wasm.ValueTypeI32, - wasm.ValueTypeI32, - }, - Results: []wasm.ValueType{ - wasm.ValueTypeExternRef, - }, - }, - // function type of add - { - Params: []wasm.ValueType{ - wasm.ValueTypeExternRef, - wasm.ValueTypeExternRef, - }, - Results: []wasm.ValueType{ - wasm.ValueTypeExternRef, - }, - }, - // function type of inc - { - Params: []wasm.ValueType{ - wasm.ValueTypeExternRef, - }, - Results: []wasm.ValueType{ - wasm.ValueTypeExternRef, - }, - }, - }, - Imports: []*wasm.Import{ - { - Module: RuntimeModuleName, - Name: "Int", - TypeIndex: 0, - }, - { - Module: RuntimeModuleName, - Name: "String", - TypeIndex: 1, - }, - { - Module: RuntimeModuleName, - Name: "add", - TypeIndex: 2, - }, - }, - Functions: []*wasm.Function{ - { - Name: "inc", - TypeIndex: 3, - Code: &wasm.Code{ - Locals: []wasm.ValueType{ - wasm.ValueTypeExternRef, - wasm.ValueTypeExternRef, - }, - Instructions: []wasm.Instruction{ - wasm.InstructionI32Const{Value: 0}, - wasm.InstructionI32Const{Value: 2}, - wasm.InstructionCall{FuncIndex: 0}, - wasm.InstructionLocalSet{LocalIndex: 1}, - wasm.InstructionLocalGet{LocalIndex: 0}, - wasm.InstructionLocalGet{LocalIndex: 1}, - wasm.InstructionCall{FuncIndex: 2}, - wasm.InstructionReturn{}, - }, - }, - }, - }, - Memories: []*wasm.Memory{ - { - Min: 1, - Max: nil, - }, - }, - Data: []*wasm.Data{ - // load [0x1, 0x1] at offset 0 - { - MemoryIndex: 0, - Offset: []wasm.Instruction{ - wasm.InstructionI32Const{Value: 0}, - }, - Init: []byte{ - // positive flag - 0x1, - // integer 1 - 0x1, - }, - }, - }, - Exports: []*wasm.Export{ - { - Name: "inc", - Descriptor: wasm.FunctionExport{ - FunctionIndex: 3, - }, - }, - { - Name: "mem", - Descriptor: wasm.MemoryExport{ - MemoryIndex: 0, - }, - }, - }, - }, - mod, - ) - - var buf wasm.Buffer - w := wasm.NewWASMWriter(&buf) - err := w.WriteModule(mod) - require.NoError(t, err) - - _ = wasm.WASM2WAT(buf.Bytes()) -} diff --git a/compiler/compiler.go b/compiler/compiler.go deleted file mode 100644 index d7fefc2a14..0000000000 --- a/compiler/compiler.go +++ /dev/null @@ -1,464 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package compiler - -import ( - "github.com/onflow/cadence/activations" - "github.com/onflow/cadence/ast" - "github.com/onflow/cadence/compiler/ir" - "github.com/onflow/cadence/errors" - "github.com/onflow/cadence/sema" -) - -type Compiler struct { - Checker *sema.Checker - activations *activations.Activations[*Local] - locals []*Local -} - -var _ ast.DeclarationVisitor[ir.Stmt] = &Compiler{} -var _ ast.StatementVisitor[ir.Stmt] = &Compiler{} -var _ ast.ExpressionVisitor[ir.Expr] = &Compiler{} - -func NewCompiler(checker *sema.Checker) *Compiler { - return &Compiler{ - Checker: checker, - activations: activations.NewActivations[*Local](nil), - } -} - -// declareLocal declares a local -func (compiler *Compiler) declareLocal(identifier string, valType ir.ValType) *Local { - // NOTE: semantic analysis already checked possible invalid redeclaration - index := uint32(len(compiler.locals)) - local := NewLocal(index, valType) - compiler.locals = append(compiler.locals, local) - compiler.setLocal(identifier, local) - return local -} - -func (compiler *Compiler) findLocal(name string) *Local { - return compiler.activations.Find(name) -} - -func (compiler *Compiler) setLocal(name string, variable *Local) { - compiler.activations.Set(name, variable) -} - -func (compiler *Compiler) VisitReturnStatement(statement *ast.ReturnStatement) ir.Stmt { - exp := ast.AcceptExpression[ir.Expr](statement.Expression, compiler) - return &ir.Return{ - Exp: exp, - } -} - -func (compiler *Compiler) VisitBreakStatement(_ *ast.BreakStatement) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitContinueStatement(_ *ast.ContinueStatement) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitIfStatement(_ *ast.IfStatement) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitWhileStatement(_ *ast.WhileStatement) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitForStatement(_ *ast.ForStatement) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitEmitStatement(_ *ast.EmitStatement) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitRemoveStatement(_ *ast.RemoveStatement) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitSwitchStatement(_ *ast.SwitchStatement) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitVariableDeclaration(declaration *ast.VariableDeclaration) ir.Stmt { - - // TODO: potential storage removal - // TODO: copy and convert - // TODO: second value - - identifier := declaration.Identifier.Identifier - targetType := compiler.Checker.Elaboration.VariableDeclarationTypes(declaration).TargetType - valType := compileValueType(targetType) - local := compiler.declareLocal(identifier, valType) - exp := ast.AcceptExpression[ir.Expr](declaration.Value, compiler) - - return &ir.StoreLocal{ - LocalIndex: local.Index, - Exp: exp, - } -} - -func (compiler *Compiler) VisitAssignmentStatement(_ *ast.AssignmentStatement) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitSwapStatement(_ *ast.SwapStatement) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitExpressionStatement(_ *ast.ExpressionStatement) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitVoidExpression(_ *ast.VoidExpression) ir.Expr { - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitBoolExpression(_ *ast.BoolExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitNilExpression(_ *ast.NilExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitIntegerExpression(expression *ast.IntegerExpression) ir.Expr { - var value []byte - - if expression.Value.Sign() < 0 { - value = append(value, 0) - } else { - value = append(value, 1) - } - - value = append(value, - expression.Value.Bytes()..., - ) - - return &ir.Const{ - Constant: ir.Int{ - Value: value, - }, - } -} - -func (compiler *Compiler) VisitFixedPointExpression(_ *ast.FixedPointExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitArrayExpression(_ *ast.ArrayExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitDictionaryExpression(_ *ast.DictionaryExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitIdentifierExpression(expression *ast.IdentifierExpression) ir.Expr { - // TODO - local := compiler.findLocal(expression.Identifier.Identifier) - // TODO: moves - return &ir.CopyLocal{ - LocalIndex: local.Index, - } -} - -func (compiler *Compiler) VisitInvocationExpression(_ *ast.InvocationExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitMemberExpression(_ *ast.MemberExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitIndexExpression(_ *ast.IndexExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitConditionalExpression(_ *ast.ConditionalExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitAttachExpression(_ *ast.AttachExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitUnaryExpression(_ *ast.UnaryExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitBinaryExpression(expression *ast.BinaryExpression) ir.Expr { - op := compileBinaryOperation(expression.Operation) - left := ast.AcceptExpression[ir.Expr](expression.Left, compiler) - right := ast.AcceptExpression[ir.Expr](expression.Right, compiler) - - return &ir.BinOpExpr{ - Op: op, - Left: left, - Right: right, - } -} - -func (compiler *Compiler) VisitFunctionExpression(_ *ast.FunctionExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitStringTemplateExpression(e *ast.StringTemplateExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitStringExpression(e *ast.StringExpression) ir.Expr { - return &ir.Const{ - Constant: ir.String{ - Value: e.Value, - }, - } -} - -func (compiler *Compiler) VisitCastingExpression(_ *ast.CastingExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitCreateExpression(_ *ast.CreateExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitDestroyExpression(_ *ast.DestroyExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitReferenceExpression(_ *ast.ReferenceExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitForceExpression(_ *ast.ForceExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitPathExpression(_ *ast.PathExpression) ir.Expr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitProgram(_ *ast.Program) ir.Repr { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitSpecialFunctionDeclaration(declaration *ast.SpecialFunctionDeclaration) ir.Stmt { - return compiler.VisitFunctionDeclaration(declaration.FunctionDeclaration) -} - -func (compiler *Compiler) VisitFunctionDeclaration(declaration *ast.FunctionDeclaration) ir.Stmt { - - // TODO: declare function in current scope, use current scope in function - // TODO: conditions - - compiler.locals = nil - - block := declaration.FunctionBlock.Block - - // Declare a local for each parameter - - functionType := compiler.Checker.Elaboration.FunctionDeclarationFunctionType(declaration) - - parameters := declaration.ParameterList.Parameters - - for i, parameter := range parameters { - parameterType := functionType.Parameters[i].TypeAnnotation.Type - valType := compileValueType(parameterType) - name := parameter.Identifier.Identifier - compiler.declareLocal(name, valType) - } - - // Compile the function block - - stmt := compiler.visitBlock(block) - - // Important: compile locals after compiling function block, - // and don't include parameters in locals - locals := compileLocals(compiler.locals[len(parameters):]) - - compiledFunctionType := compileFunctionType(functionType) - - return &ir.Func{ - // TODO: fully qualify - Name: declaration.Identifier.Identifier, - Type: compiledFunctionType, - Locals: locals, - Statement: stmt, - } -} - -func (compiler *Compiler) visitBlock(block *ast.Block) ir.Stmt { - - // Block scope: each block gets an activation record - - compiler.activations.PushNewWithCurrent() - defer compiler.activations.Pop() - - // Compile each statement in the block - - stmts := make([]ir.Stmt, len(block.Statements)) - for i, statement := range block.Statements { - stmts[i] = ast.AcceptStatement[ir.Stmt](statement, compiler) - } - - // NOTE: just return an IR statement sequence, - // there is no need for an IR block - return &ir.Sequence{ - Stmts: stmts, - } -} - -func (compiler *Compiler) VisitCompositeDeclaration(_ *ast.CompositeDeclaration) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitAttachmentDeclaration(_ *ast.AttachmentDeclaration) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitInterfaceDeclaration(_ *ast.InterfaceDeclaration) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitFieldDeclaration(_ *ast.FieldDeclaration) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitPragmaDeclaration(_ *ast.PragmaDeclaration) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitImportDeclaration(_ *ast.ImportDeclaration) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitTransactionDeclaration(_ *ast.TransactionDeclaration) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitEntitlementDeclaration(_ *ast.EntitlementDeclaration) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitEntitlementMappingDeclaration(_ *ast.EntitlementMappingDeclaration) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func (compiler *Compiler) VisitEnumCaseDeclaration(_ *ast.EnumCaseDeclaration) ir.Stmt { - // TODO - panic(errors.NewUnreachableError()) -} - -func compileBinaryOperation(operation ast.Operation) ir.BinOp { - // TODO: add remaining operations - switch operation { - case ast.OperationPlus: - return ir.BinOpPlus - } - - panic(errors.NewUnreachableError()) -} - -func compileValueType(ty sema.Type) ir.ValType { - // TODO: add remaining types - - switch ty { - case sema.StringType: - return ir.ValTypeString - case sema.IntType: - return ir.ValTypeInt - } - - panic(errors.NewUnreachableError()) -} - -func compileFunctionType(functionType *sema.FunctionType) ir.FuncType { - // compile parameter types - paramTypes := make([]ir.ValType, len(functionType.Parameters)) - for i, parameter := range functionType.Parameters { - paramTypes[i] = compileValueType(parameter.TypeAnnotation.Type) - } - - // compile return / result type - var resultTypes []ir.ValType - if functionType.ReturnTypeAnnotation.Type != sema.VoidType { - resultTypes = []ir.ValType{ - compileValueType(functionType.ReturnTypeAnnotation.Type), - } - } - return ir.FuncType{ - Params: paramTypes, - Results: resultTypes, - } -} - -func compileLocals(locals []*Local) []ir.Local { - result := make([]ir.Local, len(locals)) - for i, local := range locals { - result[i] = ir.Local{ - Type: local.Type, - } - } - return result -} diff --git a/compiler/compiler_test.go b/compiler/compiler_test.go deleted file mode 100644 index b9a0e2d866..0000000000 --- a/compiler/compiler_test.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package compiler - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/onflow/cadence/compiler/ir" - . "github.com/onflow/cadence/test_utils/sema_utils" -) - -func TestCompilerSimple(t *testing.T) { - - checker, err := ParseAndCheck(t, ` - fun inc(a: Int): Int { - let mod = 1 - return a + mod - } - `) - - require.NoError(t, err) - - compiler := NewCompiler(checker) - - res := compiler.VisitFunctionDeclaration(checker.Program.FunctionDeclarations()[0]) - - require.Equal(t, - &ir.Func{ - Name: "inc", - Type: ir.FuncType{ - Params: []ir.ValType{ - ir.ValTypeInt, - }, - Results: []ir.ValType{ - ir.ValTypeInt, - }, - }, - Locals: []ir.Local{ - {Type: ir.ValTypeInt}, - }, - Statement: &ir.Sequence{ - Stmts: []ir.Stmt{ - &ir.StoreLocal{ - LocalIndex: 1, - Exp: &ir.Const{ - Constant: ir.Int{Value: []byte{1, 1}}, - }, - }, - &ir.Return{ - Exp: &ir.BinOpExpr{ - Op: ir.BinOpPlus, - Left: &ir.CopyLocal{ - LocalIndex: 0, - }, - Right: &ir.CopyLocal{ - LocalIndex: 1, - }, - }, - }, - }, - }, - }, - res, - ) -} diff --git a/compiler/ir/binop.go b/compiler/ir/binop.go deleted file mode 100644 index 361101b31a..0000000000 --- a/compiler/ir/binop.go +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ir - -//go:generate go run golang.org/x/tools/cmd/stringer -type=BinOp - -type BinOp uint - -const ( - BinOpUnknown BinOp = iota - BinOpPlus -) diff --git a/compiler/ir/binop_string.go b/compiler/ir/binop_string.go deleted file mode 100644 index 6960586a1d..0000000000 --- a/compiler/ir/binop_string.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by "stringer -type=BinOp"; DO NOT EDIT. - -package ir - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[BinOpUnknown-0] - _ = x[BinOpPlus-1] -} - -const _BinOp_name = "BinOpUnknownBinOpPlus" - -var _BinOp_index = [...]uint8{0, 12, 21} - -func (i BinOp) String() string { - if i >= BinOp(len(_BinOp_index)-1) { - return "BinOp(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _BinOp_name[_BinOp_index[i]:_BinOp_index[i+1]] -} diff --git a/compiler/ir/constant.go b/compiler/ir/constant.go deleted file mode 100644 index f4ebe49dc0..0000000000 --- a/compiler/ir/constant.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ir - -type Constant interface { - isConstant() - Accept(Visitor) Repr -} - -type Int struct { - Value []byte -} - -func (Int) isConstant() {} - -func (c Int) Accept(v Visitor) Repr { - return v.VisitInt(c) -} - -type String struct { - Value string -} - -func (String) isConstant() {} - -func (c String) Accept(v Visitor) Repr { - return v.VisitString(c) -} diff --git a/compiler/ir/expr.go b/compiler/ir/expr.go deleted file mode 100644 index 094003957b..0000000000 --- a/compiler/ir/expr.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ir - -type Expr interface { - isExpr() - Accept(Visitor) Repr -} - -type Const struct { - Constant Constant -} - -func (*Const) isExpr() {} - -func (e *Const) Accept(v Visitor) Repr { - return v.VisitConst(e) -} - -type CopyLocal struct { - LocalIndex uint32 -} - -func (*CopyLocal) isExpr() {} - -func (e *CopyLocal) Accept(v Visitor) Repr { - return v.VisitCopyLocal(e) -} - -type MoveLocal struct { - LocalIndex uint32 -} - -func (*MoveLocal) isExpr() {} - -func (e *MoveLocal) Accept(v Visitor) Repr { - return v.VisitMoveLocal(e) -} - -type UnOpExpr struct { - Expr Expr - Op UnOp -} - -func (*UnOpExpr) isExpr() {} - -func (e *UnOpExpr) Accept(v Visitor) Repr { - return v.VisitUnOpExpr(e) -} - -type BinOpExpr struct { - Left Expr - Right Expr - Op BinOp -} - -func (*BinOpExpr) isExpr() {} - -func (e *BinOpExpr) Accept(v Visitor) Repr { - return v.VisitBinOpExpr(e) -} - -type Call struct { - Arguments []Expr - FunctionIndex uint32 -} - -func (*Call) isExpr() {} - -func (e *Call) Accept(v Visitor) Repr { - return v.VisitCall(e) -} diff --git a/compiler/ir/func.go b/compiler/ir/func.go deleted file mode 100644 index 532190e4c5..0000000000 --- a/compiler/ir/func.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ir - -type Func struct { - Statement Stmt - Name string - Type FuncType - Locals []Local -} - -func (*Func) isStmt() {} - -func (f *Func) Accept(v Visitor) Repr { - return v.VisitFunc(f) -} - -type FuncType struct { - Params []ValType - Results []ValType -} diff --git a/compiler/ir/local.go b/compiler/ir/local.go deleted file mode 100644 index 1c95b03df0..0000000000 --- a/compiler/ir/local.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ir - -type Local struct { - Type ValType -} diff --git a/compiler/ir/stmt.go b/compiler/ir/stmt.go deleted file mode 100644 index f2ac1d453b..0000000000 --- a/compiler/ir/stmt.go +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ir - -type Stmt interface { - isStmt() - Accept(Visitor) Repr -} - -type Sequence struct { - Stmts []Stmt -} - -func (*Sequence) isStmt() {} - -func (s *Sequence) Accept(v Visitor) Repr { - return v.VisitSequence(s) -} - -type Block struct { - Stmts []Stmt -} - -func (*Block) isStmt() {} - -func (s *Block) Accept(v Visitor) Repr { - return v.VisitBlock(s) -} - -type Loop struct { - Stmts []Stmt -} - -func (*Loop) isStmt() {} - -func (s *Loop) Accept(v Visitor) Repr { - return v.VisitLoop(s) -} - -type If struct { - Test Expr - Then Stmt - Else Stmt -} - -func (*If) isStmt() {} - -func (s *If) Accept(v Visitor) Repr { - return v.VisitIf(s) -} - -type Branch struct { - Index uint32 -} - -func (*Branch) isStmt() {} - -func (s *Branch) Accept(v Visitor) Repr { - return v.VisitBranch(s) -} - -type BranchIf struct { - Exp Expr - Index uint32 -} - -func (*BranchIf) isStmt() {} - -func (s *BranchIf) Accept(v Visitor) Repr { - return v.VisitBranchIf(s) -} - -type StoreLocal struct { - Exp Expr - LocalIndex uint32 -} - -func (*StoreLocal) isStmt() {} - -func (s *StoreLocal) Accept(v Visitor) Repr { - return v.VisitStoreLocal(s) -} - -type Drop struct { - Exp Expr -} - -func (*Drop) isStmt() {} - -func (s *Drop) Accept(v Visitor) Repr { - return v.VisitDrop(s) -} - -type Return struct { - Exp Expr -} - -func (*Return) isStmt() {} - -func (s *Return) Accept(v Visitor) Repr { - return v.VisitReturn(s) -} diff --git a/compiler/ir/unop.go b/compiler/ir/unop.go deleted file mode 100644 index 405a1af023..0000000000 --- a/compiler/ir/unop.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ir - -//go:generate go run golang.org/x/tools/cmd/stringer -type=UnOp - -type UnOp uint - -const ( - UnOpUnknown UnOp = iota -) diff --git a/compiler/ir/unop_string.go b/compiler/ir/unop_string.go deleted file mode 100644 index 2c929f83a2..0000000000 --- a/compiler/ir/unop_string.go +++ /dev/null @@ -1,23 +0,0 @@ -// Code generated by "stringer -type=UnOp"; DO NOT EDIT. - -package ir - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[UnOpUnknown-0] -} - -const _UnOp_name = "UnOpUnknown" - -var _UnOp_index = [...]uint8{0, 11} - -func (i UnOp) String() string { - if i >= UnOp(len(_UnOp_index)-1) { - return "UnOp(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _UnOp_name[_UnOp_index[i]:_UnOp_index[i+1]] -} diff --git a/compiler/ir/valtype.go b/compiler/ir/valtype.go deleted file mode 100644 index 35b0916a2d..0000000000 --- a/compiler/ir/valtype.go +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ir - -//go:generate go run golang.org/x/tools/cmd/stringer -type=ValType - -type ValType uint - -const ( - ValTypeUnknown ValType = iota - ValTypeInt - ValTypeString -) diff --git a/compiler/ir/valtype_string.go b/compiler/ir/valtype_string.go deleted file mode 100644 index 0ca71428d0..0000000000 --- a/compiler/ir/valtype_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type=ValType"; DO NOT EDIT. - -package ir - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[ValTypeUnknown-0] - _ = x[ValTypeInt-1] - _ = x[ValTypeString-2] -} - -const _ValType_name = "ValTypeUnknownValTypeIntValTypeString" - -var _ValType_index = [...]uint8{0, 14, 24, 37} - -func (i ValType) String() string { - if i >= ValType(len(_ValType_index)-1) { - return "ValType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _ValType_name[_ValType_index[i]:_ValType_index[i+1]] -} diff --git a/compiler/ir/visitor.go b/compiler/ir/visitor.go deleted file mode 100644 index 61b24de0a3..0000000000 --- a/compiler/ir/visitor.go +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ir - -type Repr any - -type ConstVisitor interface { - VisitInt(Int) Repr - VisitString(String) Repr -} - -type StmtVisitor interface { - VisitSequence(*Sequence) Repr - VisitBlock(*Block) Repr - VisitLoop(*Loop) Repr - VisitIf(*If) Repr - VisitBranch(*Branch) Repr - VisitBranchIf(*BranchIf) Repr - VisitStoreLocal(*StoreLocal) Repr - VisitDrop(*Drop) Repr - VisitReturn(*Return) Repr -} - -type ExprVisitor interface { - VisitConst(*Const) Repr - VisitCopyLocal(*CopyLocal) Repr - VisitMoveLocal(*MoveLocal) Repr - VisitUnOpExpr(*UnOpExpr) Repr - VisitBinOpExpr(*BinOpExpr) Repr - VisitCall(*Call) Repr -} - -type Visitor interface { - ConstVisitor - StmtVisitor - ExprVisitor - VisitFunc(f *Func) Repr -} diff --git a/compiler/local.go b/compiler/local.go deleted file mode 100644 index 8dc5ad765f..0000000000 --- a/compiler/local.go +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package compiler - -import ( - "github.com/onflow/cadence/compiler/ir" -) - -type Local struct { - Index uint32 - Type ir.ValType -} - -func NewLocal(index uint32, valType ir.ValType) *Local { - return &Local{ - Index: index, - Type: valType, - } -} diff --git a/compiler/wasm/block.go b/compiler/wasm/block.go deleted file mode 100644 index 127be91a29..0000000000 --- a/compiler/wasm/block.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -type Block struct { - BlockType BlockType - Instructions1 []Instruction - Instructions2 []Instruction -} diff --git a/compiler/wasm/blocktype.go b/compiler/wasm/blocktype.go deleted file mode 100644 index d62a36d4ad..0000000000 --- a/compiler/wasm/blocktype.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -const emptyBlockType byte = 0x40 - -type BlockType interface { - isBlockType() - write(writer *WASMWriter) error -} - -type TypeIndexBlockType struct { - TypeIndex uint32 -} - -func (t TypeIndexBlockType) write(w *WASMWriter) error { - // "the type index in a block type is encoded as a positive signed integer, - // so that its signed LEB128 bit pattern cannot collide with the encoding of value types or the special code 0x40, - // which correspond to the LEB128 encoding of negative integers. - // To avoid any loss in the range of allowed indices, it is treated as a 33 bit signed integer." - return w.buf.writeInt64LEB128(int64(t.TypeIndex)) -} - -func (TypeIndexBlockType) isBlockType() {} diff --git a/compiler/wasm/buf.go b/compiler/wasm/buf.go deleted file mode 100644 index d0483d165b..0000000000 --- a/compiler/wasm/buf.go +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -import ( - "io" -) - -type offset int - -// Buffer is a byte buffer, which allows reading and writing. -type Buffer struct { - data []byte - offset offset -} - -func (buf *Buffer) WriteByte(b byte) error { - if buf.offset < offset(len(buf.data)) { - buf.data[buf.offset] = b - } else { - buf.data = append(buf.data, b) - } - buf.offset++ - return nil -} - -func (buf *Buffer) WriteBytes(data []byte) error { - for _, b := range data { - err := buf.WriteByte(b) - if err != nil { - return err - } - } - return nil -} - -func (buf *Buffer) Read(data []byte) (int, error) { - n := copy(data, buf.data[buf.offset:]) - if n == 0 && len(data) != 0 { - return 0, io.EOF - } - buf.offset += offset(n) - return n, nil -} - -func (buf *Buffer) ReadByte() (byte, error) { - if buf.offset >= offset(len(buf.data)) { - return 0, io.EOF - } - b := buf.data[buf.offset] - buf.offset++ - return b, nil -} - -func (buf *Buffer) PeekByte() (byte, error) { - if buf.offset >= offset(len(buf.data)) { - return 0, io.EOF - } - b := buf.data[buf.offset] - return b, nil -} - -func (buf *Buffer) ReadBytesEqual(expected []byte) (bool, error) { - off := buf.offset - for _, b := range expected { - if off >= offset(len(buf.data)) { - return false, io.EOF - } - if buf.data[off] != b { - return false, nil - } - off++ - } - buf.offset = off - return true, nil -} - -func (buf *Buffer) Bytes() []byte { - return buf.data -} diff --git a/compiler/wasm/data.go b/compiler/wasm/data.go deleted file mode 100644 index f5a051d452..0000000000 --- a/compiler/wasm/data.go +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -// Data represents a data segment, which initializes a range of memory, -// at a given offset, with a static vector of bytes. -type Data struct { - // must be constant, as defined in the spec - // (https://webassembly.github.io/spec/core/valid/instructions.html#constant-expressions) - Offset []Instruction - Init []byte - MemoryIndex uint32 -} diff --git a/compiler/wasm/errors.go b/compiler/wasm/errors.go deleted file mode 100644 index 9317b89d7b..0000000000 --- a/compiler/wasm/errors.go +++ /dev/null @@ -1,928 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -import ( - "fmt" -) - -// InvalidMagicError is returned when the WASM binary -// does not start with the magic byte sequence -type InvalidMagicError struct { - ReadError error - Offset int -} - -func (e InvalidMagicError) Error() string { - return fmt.Sprintf( - "invalid magic at offset %d", - e.Offset, - ) -} - -func (e InvalidMagicError) Unwrap() error { - return e.ReadError -} - -// InvalidMagicError is returned when the WASM binary -// does not have the expected version -type InvalidVersionError struct { - ReadError error - Offset int -} - -func (e InvalidVersionError) Error() string { - return fmt.Sprintf( - "invalid version at offset %d", - e.Offset, - ) -} - -func (e InvalidVersionError) Unwrap() error { - return e.ReadError -} - -// InvalidSectionIDError is returned when the WASM binary specifies -// an invalid section ID -type InvalidSectionIDError struct { - ReadError error - Offset int - SectionID sectionID -} - -func (e InvalidSectionIDError) Error() string { - return fmt.Sprintf( - "invalid section ID %d at offset %d", - e.SectionID, - e.Offset, - ) -} - -func (e InvalidSectionIDError) Unwrap() error { - return e.ReadError -} - -// InvalidDuplicateSectionError is returned when the WASM binary specifies -// a duplicate section -type InvalidDuplicateSectionError struct { - Offset int - SectionID sectionID -} - -func (e InvalidDuplicateSectionError) Error() string { - return fmt.Sprintf( - "invalid duplicate section with ID %d at offset %d", - e.SectionID, - e.Offset, - ) -} - -// InvalidSectionOrderError is returned when the WASM binary specifies -// a non-custom section out-of-order -type InvalidSectionOrderError struct { - Offset int - SectionID sectionID -} - -func (e InvalidSectionOrderError) Error() string { - return fmt.Sprintf( - "out-of-order section with ID %d at offset %d", - e.SectionID, - e.Offset, - ) -} - -// InvalidSectionSizeError is returned when the WASM binary specifies -// an invalid section size -type InvalidSectionSizeError struct { - ReadError error - Offset int -} - -func (e InvalidSectionSizeError) Error() string { - return fmt.Sprintf( - "invalid section size at offset %d: %s", - e.Offset, - e.ReadError, - ) -} - -func (e InvalidSectionSizeError) Unwrap() error { - return e.ReadError -} - -// InvalidValTypeError is returned when the WASM binary specifies -// an invalid value type -type InvalidValTypeError struct { - ReadError error - Offset int - ValType ValueType -} - -func (e InvalidValTypeError) Error() string { - return fmt.Sprintf( - "invalid value type %d at offset %d", - e.ValType, - e.Offset, - ) -} - -func (e InvalidValTypeError) Unwrap() error { - return e.ReadError -} - -// InvalidFuncTypeIndicatorError is returned when the WASM binary specifies -// an invalid function type indicator -type InvalidFuncTypeIndicatorError struct { - ReadError error - Offset int - FuncTypeIndicator byte -} - -func (e InvalidFuncTypeIndicatorError) Error() string { - return fmt.Sprintf( - "invalid function type indicator at offset %d: got %x, expected %x", - e.Offset, - e.FuncTypeIndicator, - functionTypeIndicator, - ) -} - -func (e InvalidFuncTypeIndicatorError) Unwrap() error { - return e.ReadError -} - -// InvalidFuncTypeParameterCountError is returned when the WASM binary specifies -// an invalid func type parameter count -type InvalidFuncTypeParameterCountError struct { - ReadError error - Offset int -} - -func (e InvalidFuncTypeParameterCountError) Error() string { - return fmt.Sprintf( - "invalid function type parameter count at offset %d", - e.Offset, - ) -} - -func (e InvalidFuncTypeParameterCountError) Unwrap() error { - return e.ReadError -} - -// InvalidFuncTypeParameterTypeError is returned when the WASM binary specifies -// an invalid function type parameter type -type InvalidFuncTypeParameterTypeError struct { - ReadError error - Index int -} - -func (e InvalidFuncTypeParameterTypeError) Error() string { - return fmt.Sprintf( - "invalid function type parameter type at index %d", - e.Index, - ) -} - -func (e InvalidFuncTypeParameterTypeError) Unwrap() error { - return e.ReadError -} - -// InvalidFuncTypeResultCountError is returned when the WASM binary specifies -// an invalid func type result count -type InvalidFuncTypeResultCountError struct { - ReadError error - Offset int -} - -func (e InvalidFuncTypeResultCountError) Error() string { - return fmt.Sprintf( - "invalid function type result count at offset %d", - e.Offset, - ) -} - -func (e InvalidFuncTypeResultCountError) Unwrap() error { - return e.ReadError -} - -// InvalidFuncTypeResultTypeError is returned when the WASM binary specifies -// an invalid function type result type -type InvalidFuncTypeResultTypeError struct { - ReadError error - Index int -} - -func (e InvalidFuncTypeResultTypeError) Error() string { - return fmt.Sprintf( - "invalid function type result type at index %d", - e.Index, - ) -} - -func (e InvalidFuncTypeResultTypeError) Unwrap() error { - return e.ReadError -} - -// InvalidTypeSectionTypeCountError is returned when the WASM binary specifies -// an invalid count in the type section -type InvalidTypeSectionTypeCountError struct { - ReadError error - Offset int -} - -func (e InvalidTypeSectionTypeCountError) Error() string { - return fmt.Sprintf( - "invalid type count in type section at offset %d", - e.Offset, - ) -} - -func (e InvalidTypeSectionTypeCountError) Unwrap() error { - return e.ReadError -} - -// InvalidImportSectionImportCountError is returned when the WASM binary specifies -// an invalid count in the import section -type InvalidImportSectionImportCountError struct { - ReadError error - Offset int -} - -func (e InvalidImportSectionImportCountError) Error() string { - return fmt.Sprintf( - "invalid import count in import section at offset %d", - e.Offset, - ) -} - -func (e InvalidImportSectionImportCountError) Unwrap() error { - return e.ReadError -} - -// InvalidImportError is returned when the WASM binary specifies -// invalid import in the import section -type InvalidImportError struct { - ReadError error - Index int -} - -func (e InvalidImportError) Error() string { - return fmt.Sprintf( - "invalid import at index %d", - e.Index, - ) -} - -func (e InvalidImportError) Unwrap() error { - return e.ReadError -} - -// InvalidImportIndicatorError is returned when the WASM binary specifies -// an invalid type indicator in the import section -type InvalidImportIndicatorError struct { - ReadError error - Offset int - ImportIndicator importIndicator -} - -func (e InvalidImportIndicatorError) Error() string { - return fmt.Sprintf( - "invalid import indicator %d at offset %d", - e.ImportIndicator, - e.Offset, - ) -} - -func (e InvalidImportIndicatorError) Unwrap() error { - return e.ReadError -} - -// InvalidImportSectionTypeIndexError is returned when the WASM binary specifies -// an invalid type index in the import section -type InvalidImportSectionTypeIndexError struct { - ReadError error - Offset int -} - -func (e InvalidImportSectionTypeIndexError) Error() string { - return fmt.Sprintf( - "invalid type index in import section at offset %d", - e.Offset, - ) -} - -func (e InvalidImportSectionTypeIndexError) Unwrap() error { - return e.ReadError -} - -// InvalidFunctionSectionFunctionCountError is returned when the WASM binary specifies -// an invalid count in the function section -type InvalidFunctionSectionFunctionCountError struct { - ReadError error - Offset int -} - -func (e InvalidFunctionSectionFunctionCountError) Error() string { - return fmt.Sprintf( - "invalid function count in function section at offset %d", - e.Offset, - ) -} - -func (e InvalidFunctionSectionFunctionCountError) Unwrap() error { - return e.ReadError -} - -// InvalidFunctionSectionTypeIndexError is returned when the WASM binary specifies -// an invalid type index in the function section -type InvalidFunctionSectionTypeIndexError struct { - ReadError error - Offset int - Index int -} - -func (e InvalidFunctionSectionTypeIndexError) Error() string { - return fmt.Sprintf( - "invalid type index in function section at index %d at offset %d", - e.Index, - e.Offset, - ) -} - -func (e InvalidFunctionSectionTypeIndexError) Unwrap() error { - return e.ReadError -} - -// FunctionCountMismatchError is returned when the WASM binary specifies -// information for a different number of functions than previously specified -type FunctionCountMismatchError struct { - Offset int -} - -func (e FunctionCountMismatchError) Error() string { - return fmt.Sprintf( - "function count mismatch at offset %d", - e.Offset, - ) -} - -// InvalidExportSectionExportCountError is returned when the WASM binary specifies -// an invalid count in the export section -type InvalidExportSectionExportCountError struct { - ReadError error - Offset int -} - -func (e InvalidExportSectionExportCountError) Error() string { - return fmt.Sprintf( - "invalid export count in export section at offset %d", - e.Offset, - ) -} - -func (e InvalidExportSectionExportCountError) Unwrap() error { - return e.ReadError -} - -// InvalidExportError is returned when the WASM binary specifies -// invalid export in the export section -type InvalidExportError struct { - ReadError error - Index int -} - -func (e InvalidExportError) Error() string { - return fmt.Sprintf( - "invalid export at index %d", - e.Index, - ) -} - -func (e InvalidExportError) Unwrap() error { - return e.ReadError -} - -// InvalidExportIndicatorError is returned when the WASM binary specifies -// an invalid type indicator in the export section -type InvalidExportIndicatorError struct { - ReadError error - Offset int - ExportIndicator exportIndicator -} - -func (e InvalidExportIndicatorError) Error() string { - return fmt.Sprintf( - "invalid export indicator %d at offset %d", - e.ExportIndicator, - e.Offset, - ) -} - -func (e InvalidExportIndicatorError) Unwrap() error { - return e.ReadError -} - -// InvalidExportSectionIndexError is returned when the WASM binary specifies -// an invalid index in the export section -type InvalidExportSectionIndexError struct { - ReadError error - Offset int -} - -func (e InvalidExportSectionIndexError) Error() string { - return fmt.Sprintf( - "invalid index in export section at offset %d", - e.Offset, - ) -} - -func (e InvalidExportSectionIndexError) Unwrap() error { - return e.ReadError -} - -// InvalidCodeSectionFunctionCountError is returned when the WASM binary specifies -// an invalid function count in the code section -type InvalidCodeSectionFunctionCountError struct { - ReadError error - Offset int -} - -func (e InvalidCodeSectionFunctionCountError) Error() string { - return fmt.Sprintf( - "invalid function count in code section at offset %d", - e.Offset, - ) -} - -func (e InvalidCodeSectionFunctionCountError) Unwrap() error { - return e.ReadError -} - -// InvalidFunctionCodeError is returned when the WASM binary specifies -// invalid code for a function in the code section -type InvalidFunctionCodeError struct { - ReadError error - Index int -} - -func (e InvalidFunctionCodeError) Error() string { - return fmt.Sprintf( - "invalid code for function at index %d", - e.Index, - ) -} - -func (e InvalidFunctionCodeError) Unwrap() error { - return e.ReadError -} - -// InvalidCodeSizeError is returned when the WASM binary specifies -// an invalid code size in the code section -type InvalidCodeSizeError struct { - ReadError error - Offset int -} - -func (e InvalidCodeSizeError) Error() string { - return fmt.Sprintf( - "invalid code size in code section at offset %d", - e.Offset, - ) -} - -// InvalidCodeSectionLocalsCountError is returned when the WASM binary specifies -// an invalid locals count in the code section -type InvalidCodeSectionLocalsCountError struct { - ReadError error - Offset int -} - -func (e InvalidCodeSectionLocalsCountError) Error() string { - return fmt.Sprintf( - "invalid locals count in code section at offset %d", - e.Offset, - ) -} - -func (e InvalidCodeSectionLocalsCountError) Unwrap() error { - return e.ReadError -} - -// InvalidCodeSectionCompressedLocalsCountError is returned when the WASM binary specifies -// an invalid local type in the code section -type InvalidCodeSectionCompressedLocalsCountError struct { - ReadError error - Offset int -} - -func (e InvalidCodeSectionCompressedLocalsCountError) Error() string { - return fmt.Sprintf( - "invalid compressed local type count in code section at offset %d", - e.Offset, - ) -} - -func (e InvalidCodeSectionCompressedLocalsCountError) Unwrap() error { - return e.ReadError -} - -// InvalidCodeSectionLocalTypeError is returned when the WASM binary specifies -// an invalid local type in the code section -type InvalidCodeSectionLocalTypeError struct { - ReadError error - Offset int -} - -func (e InvalidCodeSectionLocalTypeError) Error() string { - return fmt.Sprintf( - "invalid local type in code section at offset %d", - e.Offset, - ) -} - -func (e InvalidCodeSectionLocalTypeError) Unwrap() error { - return e.ReadError -} - -// CodeSectionLocalsCountMismatchError is returned when -// the sum of the compressed locals locals count in the code section does not match -// the number of locals in the code section of the WASM binary -type CodeSectionLocalsCountMismatchError struct { - Offset int - Expected uint32 - Actual uint32 -} - -func (e CodeSectionLocalsCountMismatchError) Error() string { - return fmt.Sprintf( - "local count mismatch in code section at offset %d: expected %d, got %d", - e.Offset, - e.Expected, - e.Actual, - ) -} - -// InvalidOpcodeError is returned when the WASM binary specifies -// an invalid opcode in the code section -type InvalidOpcodeError struct { - ReadError error - Offset int - Opcode opcode -} - -func (e InvalidOpcodeError) Error() string { - return fmt.Sprintf( - "invalid opcode in code section at offset %d: %x", - e.Offset, - e.Opcode, - ) -} - -func (e InvalidOpcodeError) Unwrap() error { - return e.ReadError -} - -// InvalidInstructionArgumentError is returned when the WASM binary specifies -// an invalid argument for an instruction in the code section -type InvalidInstructionArgumentError struct { - ReadError error - Offset int -} - -func (e InvalidInstructionArgumentError) Error() string { - return fmt.Sprintf( - "invalid argument in code section at offset %d", - e.Offset, - ) -} - -func (e InvalidInstructionArgumentError) Unwrap() error { - return e.ReadError -} - -// MissingEndInstructionError is returned when the WASM binary -// misses an end instruction for a function in the code section -type MissingEndInstructionError struct { - Offset int -} - -func (e MissingEndInstructionError) Error() string { - return fmt.Sprintf( - "missing end instruction in code section at offset %d", - e.Offset, - ) -} - -// InvalidNonUTF8NameError is returned when the WASM binary specifies -// or the writer is given a name which is not properly UTF-8 encoded -type InvalidNonUTF8NameError struct { - Name string - Offset int -} - -func (e InvalidNonUTF8NameError) Error() string { - return fmt.Sprintf( - "invalid non UTF-8 string at offset %d: %s", - e.Offset, - e.Name, - ) -} - -// InvalidNameLengthError is returned the WASM binary specifies -// an invalid name length -type InvalidNameLengthError struct { - ReadError error - Offset int -} - -func (e InvalidNameLengthError) Error() string { - return fmt.Sprintf( - "invalid name length at offset %d", - e.Offset, - ) -} - -func (e InvalidNameLengthError) Unwrap() error { - return e.ReadError -} - -// InvalidNameError is returned the WASM binary specifies -// an invalid name -type InvalidNameError struct { - ReadError error - Offset int -} - -func (e InvalidNameError) Error() string { - return fmt.Sprintf( - "invalid name at offset %d", - e.Offset, - ) -} - -func (e InvalidNameError) Unwrap() error { - return e.ReadError -} - -// IncompleteNameError is returned the WASM binary specifies -// an incomplete name -type IncompleteNameError struct { - Offset int - Expected uint32 - Actual uint32 -} - -func (e IncompleteNameError) Error() string { - return fmt.Sprintf( - "incomplete name at offset %d. expected %d bytes, got %d", - e.Offset, - e.Expected, - e.Actual, - ) -} - -// InvalidBlockSecondInstructionsError is returned when the WASM binary specifies -// or the writer is given a second set of instructions in a block that -// is not allowed to have it (only the 'if' instruction may have it) -type InvalidBlockSecondInstructionsError struct { - Offset int -} - -func (e InvalidBlockSecondInstructionsError) Error() string { - return fmt.Sprintf( - "invalid second set of instructions at offset %d", - e.Offset, - ) -} - -// InvalidInstructionVectorArgumentCountError is returned when the WASM binary specifies -// an invalid count for a vector argument of an instruction -type InvalidInstructionVectorArgumentCountError struct { - ReadError error - Offset int -} - -func (e InvalidInstructionVectorArgumentCountError) Error() string { - return fmt.Sprintf( - "invalid vector count for argument of instruction at offset %d", - e.Offset, - ) -} - -func (e InvalidInstructionVectorArgumentCountError) Unwrap() error { - return e.ReadError -} - -// InvalidBlockTypeTypeIndexError is returned when the WASM binary specifies -// an invalid type index as a block type -type InvalidBlockTypeTypeIndexError struct { - TypeIndex int64 - Offset int -} - -func (e InvalidBlockTypeTypeIndexError) Error() string { - return fmt.Sprintf( - "invalid type index in block type at offset %d: %d", - e.Offset, - e.TypeIndex, - ) -} - -// InvalidDataSectionSegmentCountError is returned when the WASM binary specifies -// an invalid count in the data section -type InvalidDataSectionSegmentCountError struct { - ReadError error - Offset int -} - -func (e InvalidDataSectionSegmentCountError) Error() string { - return fmt.Sprintf( - "invalid segment count in data section at offset %d", - e.Offset, - ) -} - -func (e InvalidDataSectionSegmentCountError) Unwrap() error { - return e.ReadError -} - -// InvalidDataSegmentError is returned when the WASM binary specifies -// invalid segment in the data section -type InvalidDataSegmentError struct { - ReadError error - Index int -} - -func (e InvalidDataSegmentError) Error() string { - return fmt.Sprintf( - "invalid data segment at index %d", - e.Index, - ) -} - -func (e InvalidDataSegmentError) Unwrap() error { - return e.ReadError -} - -// InvalidDataSectionMemoryIndexError is returned when the WASM binary specifies -// an invalid memory index in the data section -type InvalidDataSectionMemoryIndexError struct { - ReadError error - Offset int -} - -func (e InvalidDataSectionMemoryIndexError) Error() string { - return fmt.Sprintf( - "invalid memory index in data section at offset %d", - e.Offset, - ) -} - -func (e InvalidDataSectionMemoryIndexError) Unwrap() error { - return e.ReadError -} - -// InvalidDataSectionInitByteCountError is returned when the WASM binary specifies -// an invalid init byte count in the data section -type InvalidDataSectionInitByteCountError struct { - ReadError error - Offset int -} - -func (e InvalidDataSectionInitByteCountError) Error() string { - return fmt.Sprintf( - "invalid init byte count in data section at offset %d", - e.Offset, - ) -} - -func (e InvalidDataSectionInitByteCountError) Unwrap() error { - return e.ReadError -} - -// InvalidMemorySectionMemoryCountError is returned when the WASM binary specifies -// an invalid count in the memory section -type InvalidMemorySectionMemoryCountError struct { - ReadError error - Offset int -} - -func (e InvalidMemorySectionMemoryCountError) Error() string { - return fmt.Sprintf( - "invalid memories count in memory section at offset %d", - e.Offset, - ) -} - -func (e InvalidMemorySectionMemoryCountError) Unwrap() error { - return e.ReadError -} - -// InvalidMemoryError is returned when the WASM binary specifies -// invalid memory in the memory section -type InvalidMemoryError struct { - ReadError error - Index int -} - -func (e InvalidMemoryError) Error() string { - return fmt.Sprintf( - "invalid memory at index %d", - e.Index, - ) -} - -func (e InvalidMemoryError) Unwrap() error { - return e.ReadError -} - -// InvalidLimitIndicatorError is returned when the WASM binary specifies -// an invalid limit indicator -type InvalidLimitIndicatorError struct { - ReadError error - Offset int - LimitIndicator byte -} - -func (e InvalidLimitIndicatorError) Error() string { - return fmt.Sprintf( - "invalid limit indicator at offset %d: %x", - e.Offset, - e.LimitIndicator, - ) -} - -func (e InvalidLimitIndicatorError) Unwrap() error { - return e.ReadError -} - -// InvalidLimitMinError is returned when the WASM binary specifies -// an invalid limit minimum -type InvalidLimitMinError struct { - ReadError error - Offset int -} - -func (e InvalidLimitMinError) Error() string { - return fmt.Sprintf( - "invalid limit minimum at offset %d", - e.Offset, - ) -} - -func (e InvalidLimitMinError) Unwrap() error { - return e.ReadError -} - -// InvalidLimitMaxError is returned when the WASM binary specifies -// an invalid limit maximum -type InvalidLimitMaxError struct { - ReadError error - Offset int -} - -func (e InvalidLimitMaxError) Error() string { - return fmt.Sprintf( - "invalid limit maximum at offset %d", - e.Offset, - ) -} - -func (e InvalidLimitMaxError) Unwrap() error { - return e.ReadError -} - -// InvalidStartSectionFunctionIndexError is returned when the WASM binary specifies -// an invalid function index in the start section -type InvalidStartSectionFunctionIndexError struct { - ReadError error - Offset int -} - -func (e InvalidStartSectionFunctionIndexError) Error() string { - return fmt.Sprintf( - "invalid function index in start section at offset %d", - e.Offset, - ) -} - -func (e InvalidStartSectionFunctionIndexError) Unwrap() error { - return e.ReadError -} diff --git a/compiler/wasm/export.go b/compiler/wasm/export.go deleted file mode 100644 index 1f2e3e04ad..0000000000 --- a/compiler/wasm/export.go +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -// Exports represents an export -type Export struct { - Descriptor ExportDescriptor - Name string -} - -// exportIndicator is the byte used to indicate the kind of export in the WASM binary -type exportIndicator byte - -const ( - // exportIndicatorFunction is the byte used to indicate the export of a function in the WASM binary - exportIndicatorFunction exportIndicator = 0x0 - // exportIndicatorMemory is the byte used to indicate the export of a memory in the WASM binary - exportIndicatorMemory exportIndicator = 0x2 -) - -// ExportDescriptor represents an export (e.g. a function, memory, etc.) -type ExportDescriptor interface { - isExportDescriptor() -} - -// FunctionExport represents the export of a function -type FunctionExport struct { - FunctionIndex uint32 -} - -func (FunctionExport) isExportDescriptor() {} - -// MemoryExport represents the export of a memory -type MemoryExport struct { - MemoryIndex uint32 -} - -func (MemoryExport) isExportDescriptor() {} diff --git a/compiler/wasm/function.go b/compiler/wasm/function.go deleted file mode 100644 index 0f22ff75d8..0000000000 --- a/compiler/wasm/function.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -// Function represents a function -type Function struct { - Code *Code - Name string - TypeIndex uint32 -} - -// Code represents the code of a function -type Code struct { - Locals []ValueType - Instructions []Instruction -} diff --git a/compiler/wasm/functiontype.go b/compiler/wasm/functiontype.go deleted file mode 100644 index 86287073bd..0000000000 --- a/compiler/wasm/functiontype.go +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -// functionTypeIndicator is the byte used to indicate a function type in the WASM binary -const functionTypeIndicator = 0x60 - -// FunctionType is the type of a function. -// It may have multiple parameters and return values -type FunctionType struct { - Params []ValueType - Results []ValueType -} diff --git a/compiler/wasm/gen/main.go b/compiler/wasm/gen/main.go deleted file mode 100644 index f052c85f8e..0000000000 --- a/compiler/wasm/gen/main.go +++ /dev/null @@ -1,959 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "fmt" - "os" - "regexp" - "strings" - "text/template" -) - -const fileTemplate = `// Code generated by utils/version. DO NOT EDIT. -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -{{goGenerateComment}} - -package wasm - -import ( - "io" -) - -{{range .Instructions -}} -// Instruction{{.Identifier}} is the '{{.Name}}' instruction -// -type Instruction{{.Identifier}} struct{{if .Arguments}} { -{{- range .Arguments}}{{- if .Identifier}} - {{.Identifier}} {{.Type.FieldType}}{{end}}{{end}} -} -{{- else}}{}{{- end}} - -func (Instruction{{.Identifier}}) isInstruction() {} - -func (i Instruction{{.Identifier}}) write(w *WASMWriter) error { - err := w.writeOpcode({{.OpcodeList}}) - if err != nil { - return err - } -{{range .Arguments}} - {{.Variable}} := i.{{.Identifier}} - {{.Type.Write .Variable}} -{{end}} - return nil -} - -{{end -}} - -const ( -{{- range .Instructions }} - // {{.OpcodeIdentifier}} is the opcode for the '{{.Name}}' instruction - {{.OpcodeIdentifier}} opcode = {{.Opcode | printf "0x%x"}} -{{- end}} -) - -// readInstruction reads an instruction in the WASM binary -// -func (r *WASMReader) readInstruction() (Instruction, error) { - opcodeOffset := r.buf.offset - b, err := r.buf.ReadByte() - - c := opcode(b) - - if err != nil { - if err == io.EOF { - return nil, MissingEndInstructionError{ - Offset: int(opcodeOffset), - } - } else { - return nil, InvalidOpcodeError{ - Offset: int(opcodeOffset), - Opcode: c, - ReadError: err, - } - } - } -{{switch .}} -} -` - -const switchTemplate = ` -switch c { -{{- range $key, $group := . }} -case {{ $key }}: -{{- if (eq (len $group.Instructions) 1)}} -{{- with (index $group.Instructions 0) }} -{{- range .Arguments}} - {{.Type.Read .Variable}} -{{end}} - return Instruction{{.Identifier}}{{if .Arguments}}{ -{{- range .Arguments}} - {{.Identifier}}: {{.Variable}},{{end}} - } -{{- else}}{}{{- end}}, nil -{{end}} -{{- else}} -{{switch $group}} -{{- end}}{{end}} -default: - return nil, InvalidOpcodeError{ - Offset: int(opcodeOffset), - Opcode: c, - ReadError: err, - } -} -` - -type opcodes []byte - -type argumentType interface { - isArgumentType() - FieldType() string - Read(variable string) string - Write(variable string) string -} - -type ArgumentTypeUint32 struct{} - -func (t ArgumentTypeUint32) isArgumentType() {} - -func (t ArgumentTypeUint32) FieldType() string { - return "uint32" -} - -func (t ArgumentTypeUint32) Read(variable string) string { - return fmt.Sprintf( - `%s, err := r.readUint32LEB128InstructionArgument() - if err != nil { - return nil, err - }`, - variable, - ) -} - -func (t ArgumentTypeUint32) Write(variable string) string { - return fmt.Sprintf( - `err = w.buf.writeUint32LEB128(%s) - if err != nil { - return err - }`, - variable, - ) -} - -type ArgumentTypeInt32 struct{} - -func (t ArgumentTypeInt32) isArgumentType() {} - -func (t ArgumentTypeInt32) FieldType() string { - return "int32" -} - -func (t ArgumentTypeInt32) Read(variable string) string { - return fmt.Sprintf( - `%s, err := r.readInt32LEB128InstructionArgument() - if err != nil { - return nil, err - }`, - variable, - ) -} - -func (t ArgumentTypeInt32) Write(variable string) string { - return fmt.Sprintf( - `err = w.buf.writeInt32LEB128(%s) - if err != nil { - return err - }`, - variable, - ) -} - -type ArgumentTypeInt64 struct{} - -func (t ArgumentTypeInt64) isArgumentType() {} - -func (t ArgumentTypeInt64) FieldType() string { - return "int64" -} - -func (t ArgumentTypeInt64) Read(variable string) string { - return fmt.Sprintf( - `%s, err := r.readInt64LEB128InstructionArgument() - if err != nil { - return nil, err - }`, - variable, - ) -} - -func (t ArgumentTypeInt64) Write(variable string) string { - return fmt.Sprintf( - `err = w.buf.writeInt64LEB128(%s) - if err != nil { - return err - }`, - variable, - ) -} - -type ArgumentTypeBlock struct { - AllowElse bool -} - -func (t ArgumentTypeBlock) isArgumentType() {} - -func (t ArgumentTypeBlock) FieldType() string { - return "Block" -} - -func (t ArgumentTypeBlock) Read(variable string) string { - return fmt.Sprintf( - `%s, err := r.readBlockInstructionArgument(%v) - if err != nil { - return nil, err - }`, - variable, - t.AllowElse, - ) -} - -func (t ArgumentTypeBlock) Write(variable string) string { - return fmt.Sprintf( - `err = w.writeBlockInstructionArgument(%s, %v) - if err != nil { - return err - }`, - variable, - t.AllowElse, - ) -} - -type ArgumentTypeVector struct { - ArgumentType argumentType -} - -func (t ArgumentTypeVector) isArgumentType() {} - -func (t ArgumentTypeVector) FieldType() string { - return fmt.Sprintf("[]%s", t.ArgumentType.FieldType()) -} - -func (t ArgumentTypeVector) Read(variable string) string { - // TODO: improve error - - elementVariable := variable + "Element" - - return fmt.Sprintf( - `%[1]sCountOffset := r.buf.offset - %[1]sCount, err := r.buf.readUint32LEB128() - if err != nil { - return nil, InvalidInstructionVectorArgumentCountError{ - Offset: int(%[1]sCountOffset), - ReadError: err, - } - } - - %[1]s := make(%[2]s, %[1]sCount) - - for i := uint32(0); i < %[1]sCount; i++ { - %[3]s - %[1]s[i] = %[4]s - }`, - variable, - t.FieldType(), - t.ArgumentType.Read(elementVariable), - elementVariable, - ) -} - -func (t ArgumentTypeVector) Write(variable string) string { - // TODO: improve error - - elementVariable := variable + "Element" - - return fmt.Sprintf( - `%[1]sCount := len(%[1]s) - err = w.buf.writeUint32LEB128(uint32(%[1]sCount)) - if err != nil { - return err - } - - for i := 0; i < %[1]sCount; i++ { - %[3]s := %[1]s[i] - %[2]s - }`, - variable, - t.ArgumentType.Write(elementVariable), - elementVariable, - ) -} - -type argument struct { - Type argumentType - Identifier string -} - -func (a argument) Variable() string { - first := strings.ToLower(string(a.Identifier[0])) - rest := a.Identifier[1:] - return first + rest -} - -type arguments []argument - -type instruction struct { - Name string - Opcodes opcodes - Arguments arguments -} - -var identifierPartRegexp = regexp.MustCompile("(^|[._])[A-Za-z0-9]") - -func (ins instruction) Identifier() string { - return string(identifierPartRegexp.ReplaceAllFunc([]byte(ins.Name), func(bytes []byte) []byte { - return []byte(strings.ToUpper(string(bytes[len(bytes)-1]))) - })) -} -func (ins instruction) OpcodeList() string { - var b strings.Builder - - count := len(ins.Opcodes) - - // prefix - for i := 0; i < count-1; i++ { - if i > 0 { - b.WriteString(", ") - } - opcode := ins.Opcodes[i] - _, err := fmt.Fprintf(&b, "0x%x", opcode) - if err != nil { - panic(err) - } - } - - // final opcode - if count > 1 { - b.WriteString(", ") - } - _, err := b.WriteString(ins.OpcodeIdentifier()) - if err != nil { - panic(err) - } - - return b.String() -} - -func (ins instruction) Opcode() byte { - return ins.Opcodes[len(ins.Opcodes)-1] -} - -func (ins instruction) OpcodeIdentifier() string { - return fmt.Sprintf("opcode%s", ins.Identifier()) -} - -type instructionGroup struct { - Instructions []instruction - Depth int -} - -func (group instructionGroup) GroupByOpcode() map[string]instructionGroup { - result := map[string]instructionGroup{} - - for _, ins := range group.Instructions { - innerDepth := group.Depth + 1 - atEnd := len(ins.Opcodes) <= innerDepth - opcode := ins.Opcodes[group.Depth] - var key string - if atEnd { - key = ins.OpcodeIdentifier() - } else { - key = fmt.Sprintf("0x%x", opcode) - } - innerGroup := result[key] - innerGroup.Depth = innerDepth - innerGroup.Instructions = append(innerGroup.Instructions, ins) - result[key] = innerGroup - } - - return result -} - -var trailingWhitespaceRegexp = regexp.MustCompile("(?m:[ \t]+$)") - -const target = "instructions.go" - -var indexArgumentType = ArgumentTypeUint32{} - -func main() { - - f, err := os.Create(target) - if err != nil { - panic(fmt.Errorf("could not create %s: %w\n", target, err)) - } - defer func() { - _ = f.Close() - }() - - var generateSwitch func(group instructionGroup) (string, error) - - templateFuncs := map[string]any{ - "goGenerateComment": func() string { - // NOTE: must be templated/injected, as otherwise - // it will be detected itself as a go generate invocation itself - return "//go:generate go run ./gen/main.go\n//go:generate go fmt $GOFILE" - }, - "switch": func(group instructionGroup) (string, error) { - res, err := generateSwitch(group) - if err != nil { - return "", err - } - pad := strings.Repeat("\t", group.Depth+1) - padded := pad + strings.ReplaceAll(res, "\n", "\n"+pad) - trimmed := trailingWhitespaceRegexp.ReplaceAll([]byte(padded), nil) - return string(trimmed), nil - }, - } - - parsedSwitchTemplate := template.Must( - template.New("switch"). - Funcs(templateFuncs). - Parse(switchTemplate), - ) - - parsedFileTemplate := template.Must( - template.New("instructions"). - Funcs(templateFuncs). - Parse(fileTemplate), - ) - - generateSwitch = func(instructions instructionGroup) (string, error) { - var b strings.Builder - err := parsedSwitchTemplate.Execute(&b, instructions.GroupByOpcode()) - if err != nil { - return "", err - } - return b.String(), nil - } - - declare := func(instructions []instruction) { - err = parsedFileTemplate.Execute(f, - instructionGroup{ - Depth: 0, - Instructions: instructions, - }, - ) - if err != nil { - panic(err) - } - } - - declare([]instruction{ - // Control Instructions - { - Name: "unreachable", - Opcodes: opcodes{0x0}, - Arguments: arguments{}, - }, - { - Name: "nop", - Opcodes: opcodes{0x01}, - Arguments: arguments{}, - }, - { - Name: "block", - Opcodes: opcodes{0x02}, - Arguments: arguments{ - {Identifier: "Block", Type: ArgumentTypeBlock{AllowElse: false}}, - }, - }, - { - Name: "loop", - Opcodes: opcodes{0x03}, - Arguments: arguments{ - {Identifier: "Block", Type: ArgumentTypeBlock{AllowElse: false}}, - }, - }, - { - Name: "if", - Opcodes: opcodes{0x04}, - Arguments: arguments{ - {Identifier: "Block", Type: ArgumentTypeBlock{AllowElse: true}}, - }, - }, - { - Name: "end", - Opcodes: opcodes{0x0B}, - Arguments: arguments{}, - }, - { - Name: "br", - Opcodes: opcodes{0x0C}, - Arguments: arguments{ - {Identifier: "LabelIndex", Type: indexArgumentType}, - }, - }, - { - Name: "br_if", - Opcodes: opcodes{0x0D}, - Arguments: arguments{ - {Identifier: "LabelIndex", Type: indexArgumentType}, - }, - }, - { - Name: "br_table", - Opcodes: opcodes{0x0E}, - Arguments: arguments{ - {Identifier: "LabelIndices", Type: ArgumentTypeVector{ArgumentType: indexArgumentType}}, - {Identifier: "DefaultLabelIndex", Type: indexArgumentType}, - }, - }, - { - Name: "return", - Opcodes: opcodes{0x0F}, - Arguments: arguments{}, - }, - { - Name: "call", - Opcodes: opcodes{0x10}, - Arguments: arguments{ - {Identifier: "FuncIndex", Type: indexArgumentType}, - }, - }, - { - Name: "call_indirect", - Opcodes: opcodes{0x11}, - Arguments: arguments{ - {Identifier: "TypeIndex", Type: indexArgumentType}, - {Identifier: "TableIndex", Type: indexArgumentType}, - }, - }, - // Reference Instructions - { - Name: "ref.null", - Opcodes: opcodes{0xD0}, - Arguments: arguments{ - {Identifier: "TypeIndex", Type: indexArgumentType}, - }, - }, - { - Name: "ref.is_null", - Opcodes: opcodes{0xD1}, - Arguments: arguments{}, - }, - { - Name: "ref.func", - Opcodes: opcodes{0xD2}, - Arguments: arguments{ - {Identifier: "FuncIndex", Type: indexArgumentType}, - }, - }, - // Parametric Instructions - { - Name: "drop", - Opcodes: opcodes{0x1A}, - Arguments: arguments{}, - }, - { - Name: "select", - Opcodes: opcodes{0x1B}, - Arguments: arguments{}, - }, - // Variable Instructions - { - Name: "local.get", - Opcodes: opcodes{0x20}, - Arguments: arguments{ - {Identifier: "LocalIndex", Type: indexArgumentType}, - }, - }, - { - Name: "local.set", - Opcodes: opcodes{0x21}, - Arguments: arguments{ - {Identifier: "LocalIndex", Type: indexArgumentType}, - }, - }, - { - Name: "local.tee", - Opcodes: opcodes{0x22}, - Arguments: arguments{ - {Identifier: "LocalIndex", Type: indexArgumentType}, - }, - }, - { - Name: "global.get", - Opcodes: opcodes{0x23}, - Arguments: arguments{ - {Identifier: "GlobalIndex", Type: indexArgumentType}, - }, - }, - { - Name: "global.set", - Opcodes: opcodes{0x24}, - Arguments: arguments{ - {Identifier: "GlobalIndex", Type: indexArgumentType}, - }, - }, - // Numeric Instructions - // const instructions are followed by the respective literal - { - Name: "i32.const", - Opcodes: opcodes{0x41}, - Arguments: arguments{ - // i32, "Uninterpreted integers are encoded as signed integers." - {Identifier: "Value", Type: ArgumentTypeInt32{}}, - }, - }, - { - Name: "i64.const", - Opcodes: opcodes{0x42}, - Arguments: arguments{ - // i64, "Uninterpreted integers are encoded as signed integers." - {Identifier: "Value", Type: ArgumentTypeInt64{}}, - }, - }, - // All other numeric instructions are plain opcodes without any immediates. - { - Name: "i32.eqz", - Opcodes: opcodes{0x45}, - Arguments: arguments{}, - }, - { - Name: "i32.eq", - Opcodes: opcodes{0x46}, - Arguments: arguments{}, - }, - { - Name: "i32.ne", - Opcodes: opcodes{0x47}, - Arguments: arguments{}, - }, - { - Name: "i32.lt_s", - Opcodes: opcodes{0x48}, - Arguments: arguments{}, - }, - { - Name: "i32.lt_u", - Opcodes: opcodes{0x49}, - Arguments: arguments{}, - }, - { - Name: "i32.gt_s", - Opcodes: opcodes{0x4a}, - Arguments: arguments{}, - }, - { - Name: "i32.gt_u", - Opcodes: opcodes{0x4b}, - Arguments: arguments{}, - }, - { - Name: "i32.le_s", - Opcodes: opcodes{0x4c}, - Arguments: arguments{}, - }, - { - Name: "i32.le_u", - Opcodes: opcodes{0x4d}, - Arguments: arguments{}, - }, - { - Name: "i32.ge_s", - Opcodes: opcodes{0x4e}, - Arguments: arguments{}, - }, - { - Name: "i32.ge_u", - Opcodes: opcodes{0x4f}, - Arguments: arguments{}, - }, - { - Name: "i64.eqz", - Opcodes: opcodes{0x50}, - Arguments: arguments{}, - }, - { - Name: "i64.eq", - Opcodes: opcodes{0x51}, - Arguments: arguments{}, - }, - { - Name: "i64.ne", - Opcodes: opcodes{0x52}, - Arguments: arguments{}, - }, - { - Name: "i64.lt_s", - Opcodes: opcodes{0x53}, - Arguments: arguments{}, - }, - { - Name: "i64.lt_u", - Opcodes: opcodes{0x54}, - Arguments: arguments{}, - }, - { - Name: "i64.gt_s", - Opcodes: opcodes{0x55}, - Arguments: arguments{}, - }, - { - Name: "i64.gt_u", - Opcodes: opcodes{0x56}, - Arguments: arguments{}, - }, - { - Name: "i64.le_s", - Opcodes: opcodes{0x57}, - Arguments: arguments{}, - }, - { - Name: "i64.le_u", - Opcodes: opcodes{0x58}, - Arguments: arguments{}, - }, - { - Name: "i64.ge_s", - Opcodes: opcodes{0x59}, - Arguments: arguments{}, - }, - { - Name: "i64.ge_u", - Opcodes: opcodes{0x5a}, - Arguments: arguments{}, - }, - - { - Name: "i32.clz", - Opcodes: opcodes{0x67}, - Arguments: arguments{}, - }, - { - Name: "i32.ctz", - Opcodes: opcodes{0x68}, - Arguments: arguments{}, - }, - { - Name: "i32.popcnt", - Opcodes: opcodes{0x69}, - Arguments: arguments{}, - }, - { - Name: "i32.add", - Opcodes: opcodes{0x6a}, - Arguments: arguments{}, - }, - { - Name: "i32.sub", - Opcodes: opcodes{0x6b}, - Arguments: arguments{}, - }, - { - Name: "i32.mul", - Opcodes: opcodes{0x6c}, - Arguments: arguments{}, - }, - { - Name: "i32.div_s", - Opcodes: opcodes{0x6d}, - Arguments: arguments{}, - }, - { - Name: "i32.div_u", - Opcodes: opcodes{0x6e}, - Arguments: arguments{}, - }, - { - Name: "i32.rem_s", - Opcodes: opcodes{0x6f}, - Arguments: arguments{}, - }, - { - Name: "i32.rem_u", - Opcodes: opcodes{0x70}, - Arguments: arguments{}, - }, - { - Name: "i32.and", - Opcodes: opcodes{0x71}, - Arguments: arguments{}, - }, - { - Name: "i32.or", - Opcodes: opcodes{0x72}, - Arguments: arguments{}, - }, - { - Name: "i32.xor", - Opcodes: opcodes{0x73}, - Arguments: arguments{}, - }, - { - Name: "i32.shl", - Opcodes: opcodes{0x74}, - Arguments: arguments{}, - }, - { - Name: "i32.shr_s", - Opcodes: opcodes{0x75}, - Arguments: arguments{}, - }, - { - Name: "i32.shr_u", - Opcodes: opcodes{0x76}, - Arguments: arguments{}, - }, - { - Name: "i32.rotl", - Opcodes: opcodes{0x77}, - Arguments: arguments{}, - }, - { - Name: "i32.rotr", - Opcodes: opcodes{0x78}, - Arguments: arguments{}, - }, - { - Name: "i64.clz", - Opcodes: opcodes{0x79}, - Arguments: arguments{}, - }, - { - Name: "i64.ctz", - Opcodes: opcodes{0x7a}, - Arguments: arguments{}, - }, - { - Name: "i64.popcnt", - Opcodes: opcodes{0x7b}, - Arguments: arguments{}, - }, - { - Name: "i64.add", - Opcodes: opcodes{0x7c}, - Arguments: arguments{}, - }, - { - Name: "i64.sub", - Opcodes: opcodes{0x7d}, - Arguments: arguments{}, - }, - { - Name: "i64.mul", - Opcodes: opcodes{0x7e}, - Arguments: arguments{}, - }, - { - Name: "i64.div_s", - Opcodes: opcodes{0x7f}, - Arguments: arguments{}, - }, - { - Name: "i64.div_u", - Opcodes: opcodes{0x80}, - Arguments: arguments{}, - }, - { - Name: "i64.rem_s", - Opcodes: opcodes{0x81}, - Arguments: arguments{}, - }, - { - Name: "i64.rem_u", - Opcodes: opcodes{0x82}, - Arguments: arguments{}, - }, - { - Name: "i64.and", - Opcodes: opcodes{0x83}, - Arguments: arguments{}, - }, - { - Name: "i64.or", - Opcodes: opcodes{0x84}, - Arguments: arguments{}, - }, - { - Name: "i64.xor", - Opcodes: opcodes{0x85}, - Arguments: arguments{}, - }, - { - Name: "i64.shl", - Opcodes: opcodes{0x86}, - Arguments: arguments{}, - }, - { - Name: "i64.shr_s", - Opcodes: opcodes{0x87}, - Arguments: arguments{}, - }, - { - Name: "i64.shr_u", - Opcodes: opcodes{0x88}, - Arguments: arguments{}, - }, - { - Name: "i64.rotl", - Opcodes: opcodes{0x89}, - Arguments: arguments{}, - }, - { - Name: "i64.rotr", - Opcodes: opcodes{0x8a}, - Arguments: arguments{}, - }, - - { - Name: "i32.wrap_i64", - Opcodes: opcodes{0xa7}, - Arguments: arguments{}, - }, - - { - Name: "i64.extend_i32_s", - Opcodes: opcodes{0xac}, - Arguments: arguments{}, - }, - { - Name: "i64.extend_i32_u", - Opcodes: opcodes{0xad}, - Arguments: arguments{}, - }, - }) -} diff --git a/compiler/wasm/import.go b/compiler/wasm/import.go deleted file mode 100644 index 095f7dc0d6..0000000000 --- a/compiler/wasm/import.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -import "fmt" - -// Import represents an import -type Import struct { - Module string - Name string - // TODO: add support for tables, memories, and globals. adjust name section! - TypeIndex uint32 -} - -func (imp Import) FullName() string { - return fmt.Sprintf("%s.%s", imp.Module, imp.Name) -} - -// importIndicator is the byte used to indicate the kind of import in the WASM binary -type importIndicator byte - -const ( - // importIndicatorFunction is the byte used to indicate the import of a function in the WASM binary - importIndicatorFunction importIndicator = 0x0 -) diff --git a/compiler/wasm/instruction.go b/compiler/wasm/instruction.go deleted file mode 100644 index 57d44c7132..0000000000 --- a/compiler/wasm/instruction.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -// Instruction represents an instruction in the code of a WASM binary -type Instruction interface { - isInstruction() - write(*WASMWriter) error -} diff --git a/compiler/wasm/instructions.go b/compiler/wasm/instructions.go deleted file mode 100644 index e4cad104dc..0000000000 --- a/compiler/wasm/instructions.go +++ /dev/null @@ -1,1981 +0,0 @@ -// Code generated by utils/version. DO NOT EDIT. -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//go:generate go run ./gen/main.go -//go:generate go fmt $GOFILE - -package wasm - -import ( - "io" -) - -// InstructionUnreachable is the 'unreachable' instruction -type InstructionUnreachable struct{} - -func (InstructionUnreachable) isInstruction() {} - -func (i InstructionUnreachable) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeUnreachable) - if err != nil { - return err - } - - return nil -} - -// InstructionNop is the 'nop' instruction -type InstructionNop struct{} - -func (InstructionNop) isInstruction() {} - -func (i InstructionNop) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeNop) - if err != nil { - return err - } - - return nil -} - -// InstructionBlock is the 'block' instruction -type InstructionBlock struct { - Block Block -} - -func (InstructionBlock) isInstruction() {} - -func (i InstructionBlock) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeBlock) - if err != nil { - return err - } - - block := i.Block - err = w.writeBlockInstructionArgument(block, false) - if err != nil { - return err - } - - return nil -} - -// InstructionLoop is the 'loop' instruction -type InstructionLoop struct { - Block Block -} - -func (InstructionLoop) isInstruction() {} - -func (i InstructionLoop) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeLoop) - if err != nil { - return err - } - - block := i.Block - err = w.writeBlockInstructionArgument(block, false) - if err != nil { - return err - } - - return nil -} - -// InstructionIf is the 'if' instruction -type InstructionIf struct { - Block Block -} - -func (InstructionIf) isInstruction() {} - -func (i InstructionIf) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeIf) - if err != nil { - return err - } - - block := i.Block - err = w.writeBlockInstructionArgument(block, true) - if err != nil { - return err - } - - return nil -} - -// InstructionEnd is the 'end' instruction -type InstructionEnd struct{} - -func (InstructionEnd) isInstruction() {} - -func (i InstructionEnd) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeEnd) - if err != nil { - return err - } - - return nil -} - -// InstructionBr is the 'br' instruction -type InstructionBr struct { - LabelIndex uint32 -} - -func (InstructionBr) isInstruction() {} - -func (i InstructionBr) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeBr) - if err != nil { - return err - } - - labelIndex := i.LabelIndex - err = w.buf.writeUint32LEB128(labelIndex) - if err != nil { - return err - } - - return nil -} - -// InstructionBrIf is the 'br_if' instruction -type InstructionBrIf struct { - LabelIndex uint32 -} - -func (InstructionBrIf) isInstruction() {} - -func (i InstructionBrIf) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeBrIf) - if err != nil { - return err - } - - labelIndex := i.LabelIndex - err = w.buf.writeUint32LEB128(labelIndex) - if err != nil { - return err - } - - return nil -} - -// InstructionBrTable is the 'br_table' instruction -type InstructionBrTable struct { - LabelIndices []uint32 - DefaultLabelIndex uint32 -} - -func (InstructionBrTable) isInstruction() {} - -func (i InstructionBrTable) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeBrTable) - if err != nil { - return err - } - - labelIndices := i.LabelIndices - labelIndicesCount := len(labelIndices) - err = w.buf.writeUint32LEB128(uint32(labelIndicesCount)) - if err != nil { - return err - } - - for i := 0; i < labelIndicesCount; i++ { - labelIndicesElement := labelIndices[i] - err = w.buf.writeUint32LEB128(labelIndicesElement) - if err != nil { - return err - } - } - - defaultLabelIndex := i.DefaultLabelIndex - err = w.buf.writeUint32LEB128(defaultLabelIndex) - if err != nil { - return err - } - - return nil -} - -// InstructionReturn is the 'return' instruction -type InstructionReturn struct{} - -func (InstructionReturn) isInstruction() {} - -func (i InstructionReturn) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeReturn) - if err != nil { - return err - } - - return nil -} - -// InstructionCall is the 'call' instruction -type InstructionCall struct { - FuncIndex uint32 -} - -func (InstructionCall) isInstruction() {} - -func (i InstructionCall) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeCall) - if err != nil { - return err - } - - funcIndex := i.FuncIndex - err = w.buf.writeUint32LEB128(funcIndex) - if err != nil { - return err - } - - return nil -} - -// InstructionCallIndirect is the 'call_indirect' instruction -type InstructionCallIndirect struct { - TypeIndex uint32 - TableIndex uint32 -} - -func (InstructionCallIndirect) isInstruction() {} - -func (i InstructionCallIndirect) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeCallIndirect) - if err != nil { - return err - } - - typeIndex := i.TypeIndex - err = w.buf.writeUint32LEB128(typeIndex) - if err != nil { - return err - } - - tableIndex := i.TableIndex - err = w.buf.writeUint32LEB128(tableIndex) - if err != nil { - return err - } - - return nil -} - -// InstructionRefNull is the 'ref.null' instruction -type InstructionRefNull struct { - TypeIndex uint32 -} - -func (InstructionRefNull) isInstruction() {} - -func (i InstructionRefNull) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeRefNull) - if err != nil { - return err - } - - typeIndex := i.TypeIndex - err = w.buf.writeUint32LEB128(typeIndex) - if err != nil { - return err - } - - return nil -} - -// InstructionRefIsNull is the 'ref.is_null' instruction -type InstructionRefIsNull struct{} - -func (InstructionRefIsNull) isInstruction() {} - -func (i InstructionRefIsNull) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeRefIsNull) - if err != nil { - return err - } - - return nil -} - -// InstructionRefFunc is the 'ref.func' instruction -type InstructionRefFunc struct { - FuncIndex uint32 -} - -func (InstructionRefFunc) isInstruction() {} - -func (i InstructionRefFunc) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeRefFunc) - if err != nil { - return err - } - - funcIndex := i.FuncIndex - err = w.buf.writeUint32LEB128(funcIndex) - if err != nil { - return err - } - - return nil -} - -// InstructionDrop is the 'drop' instruction -type InstructionDrop struct{} - -func (InstructionDrop) isInstruction() {} - -func (i InstructionDrop) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeDrop) - if err != nil { - return err - } - - return nil -} - -// InstructionSelect is the 'select' instruction -type InstructionSelect struct{} - -func (InstructionSelect) isInstruction() {} - -func (i InstructionSelect) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeSelect) - if err != nil { - return err - } - - return nil -} - -// InstructionLocalGet is the 'local.get' instruction -type InstructionLocalGet struct { - LocalIndex uint32 -} - -func (InstructionLocalGet) isInstruction() {} - -func (i InstructionLocalGet) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeLocalGet) - if err != nil { - return err - } - - localIndex := i.LocalIndex - err = w.buf.writeUint32LEB128(localIndex) - if err != nil { - return err - } - - return nil -} - -// InstructionLocalSet is the 'local.set' instruction -type InstructionLocalSet struct { - LocalIndex uint32 -} - -func (InstructionLocalSet) isInstruction() {} - -func (i InstructionLocalSet) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeLocalSet) - if err != nil { - return err - } - - localIndex := i.LocalIndex - err = w.buf.writeUint32LEB128(localIndex) - if err != nil { - return err - } - - return nil -} - -// InstructionLocalTee is the 'local.tee' instruction -type InstructionLocalTee struct { - LocalIndex uint32 -} - -func (InstructionLocalTee) isInstruction() {} - -func (i InstructionLocalTee) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeLocalTee) - if err != nil { - return err - } - - localIndex := i.LocalIndex - err = w.buf.writeUint32LEB128(localIndex) - if err != nil { - return err - } - - return nil -} - -// InstructionGlobalGet is the 'global.get' instruction -type InstructionGlobalGet struct { - GlobalIndex uint32 -} - -func (InstructionGlobalGet) isInstruction() {} - -func (i InstructionGlobalGet) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeGlobalGet) - if err != nil { - return err - } - - globalIndex := i.GlobalIndex - err = w.buf.writeUint32LEB128(globalIndex) - if err != nil { - return err - } - - return nil -} - -// InstructionGlobalSet is the 'global.set' instruction -type InstructionGlobalSet struct { - GlobalIndex uint32 -} - -func (InstructionGlobalSet) isInstruction() {} - -func (i InstructionGlobalSet) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeGlobalSet) - if err != nil { - return err - } - - globalIndex := i.GlobalIndex - err = w.buf.writeUint32LEB128(globalIndex) - if err != nil { - return err - } - - return nil -} - -// InstructionI32Const is the 'i32.const' instruction -type InstructionI32Const struct { - Value int32 -} - -func (InstructionI32Const) isInstruction() {} - -func (i InstructionI32Const) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32Const) - if err != nil { - return err - } - - value := i.Value - err = w.buf.writeInt32LEB128(value) - if err != nil { - return err - } - - return nil -} - -// InstructionI64Const is the 'i64.const' instruction -type InstructionI64Const struct { - Value int64 -} - -func (InstructionI64Const) isInstruction() {} - -func (i InstructionI64Const) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64Const) - if err != nil { - return err - } - - value := i.Value - err = w.buf.writeInt64LEB128(value) - if err != nil { - return err - } - - return nil -} - -// InstructionI32Eqz is the 'i32.eqz' instruction -type InstructionI32Eqz struct{} - -func (InstructionI32Eqz) isInstruction() {} - -func (i InstructionI32Eqz) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32Eqz) - if err != nil { - return err - } - - return nil -} - -// InstructionI32Eq is the 'i32.eq' instruction -type InstructionI32Eq struct{} - -func (InstructionI32Eq) isInstruction() {} - -func (i InstructionI32Eq) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32Eq) - if err != nil { - return err - } - - return nil -} - -// InstructionI32Ne is the 'i32.ne' instruction -type InstructionI32Ne struct{} - -func (InstructionI32Ne) isInstruction() {} - -func (i InstructionI32Ne) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32Ne) - if err != nil { - return err - } - - return nil -} - -// InstructionI32LtS is the 'i32.lt_s' instruction -type InstructionI32LtS struct{} - -func (InstructionI32LtS) isInstruction() {} - -func (i InstructionI32LtS) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32LtS) - if err != nil { - return err - } - - return nil -} - -// InstructionI32LtU is the 'i32.lt_u' instruction -type InstructionI32LtU struct{} - -func (InstructionI32LtU) isInstruction() {} - -func (i InstructionI32LtU) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32LtU) - if err != nil { - return err - } - - return nil -} - -// InstructionI32GtS is the 'i32.gt_s' instruction -type InstructionI32GtS struct{} - -func (InstructionI32GtS) isInstruction() {} - -func (i InstructionI32GtS) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32GtS) - if err != nil { - return err - } - - return nil -} - -// InstructionI32GtU is the 'i32.gt_u' instruction -type InstructionI32GtU struct{} - -func (InstructionI32GtU) isInstruction() {} - -func (i InstructionI32GtU) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32GtU) - if err != nil { - return err - } - - return nil -} - -// InstructionI32LeS is the 'i32.le_s' instruction -type InstructionI32LeS struct{} - -func (InstructionI32LeS) isInstruction() {} - -func (i InstructionI32LeS) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32LeS) - if err != nil { - return err - } - - return nil -} - -// InstructionI32LeU is the 'i32.le_u' instruction -type InstructionI32LeU struct{} - -func (InstructionI32LeU) isInstruction() {} - -func (i InstructionI32LeU) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32LeU) - if err != nil { - return err - } - - return nil -} - -// InstructionI32GeS is the 'i32.ge_s' instruction -type InstructionI32GeS struct{} - -func (InstructionI32GeS) isInstruction() {} - -func (i InstructionI32GeS) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32GeS) - if err != nil { - return err - } - - return nil -} - -// InstructionI32GeU is the 'i32.ge_u' instruction -type InstructionI32GeU struct{} - -func (InstructionI32GeU) isInstruction() {} - -func (i InstructionI32GeU) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32GeU) - if err != nil { - return err - } - - return nil -} - -// InstructionI64Eqz is the 'i64.eqz' instruction -type InstructionI64Eqz struct{} - -func (InstructionI64Eqz) isInstruction() {} - -func (i InstructionI64Eqz) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64Eqz) - if err != nil { - return err - } - - return nil -} - -// InstructionI64Eq is the 'i64.eq' instruction -type InstructionI64Eq struct{} - -func (InstructionI64Eq) isInstruction() {} - -func (i InstructionI64Eq) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64Eq) - if err != nil { - return err - } - - return nil -} - -// InstructionI64Ne is the 'i64.ne' instruction -type InstructionI64Ne struct{} - -func (InstructionI64Ne) isInstruction() {} - -func (i InstructionI64Ne) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64Ne) - if err != nil { - return err - } - - return nil -} - -// InstructionI64LtS is the 'i64.lt_s' instruction -type InstructionI64LtS struct{} - -func (InstructionI64LtS) isInstruction() {} - -func (i InstructionI64LtS) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64LtS) - if err != nil { - return err - } - - return nil -} - -// InstructionI64LtU is the 'i64.lt_u' instruction -type InstructionI64LtU struct{} - -func (InstructionI64LtU) isInstruction() {} - -func (i InstructionI64LtU) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64LtU) - if err != nil { - return err - } - - return nil -} - -// InstructionI64GtS is the 'i64.gt_s' instruction -type InstructionI64GtS struct{} - -func (InstructionI64GtS) isInstruction() {} - -func (i InstructionI64GtS) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64GtS) - if err != nil { - return err - } - - return nil -} - -// InstructionI64GtU is the 'i64.gt_u' instruction -type InstructionI64GtU struct{} - -func (InstructionI64GtU) isInstruction() {} - -func (i InstructionI64GtU) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64GtU) - if err != nil { - return err - } - - return nil -} - -// InstructionI64LeS is the 'i64.le_s' instruction -type InstructionI64LeS struct{} - -func (InstructionI64LeS) isInstruction() {} - -func (i InstructionI64LeS) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64LeS) - if err != nil { - return err - } - - return nil -} - -// InstructionI64LeU is the 'i64.le_u' instruction -type InstructionI64LeU struct{} - -func (InstructionI64LeU) isInstruction() {} - -func (i InstructionI64LeU) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64LeU) - if err != nil { - return err - } - - return nil -} - -// InstructionI64GeS is the 'i64.ge_s' instruction -type InstructionI64GeS struct{} - -func (InstructionI64GeS) isInstruction() {} - -func (i InstructionI64GeS) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64GeS) - if err != nil { - return err - } - - return nil -} - -// InstructionI64GeU is the 'i64.ge_u' instruction -type InstructionI64GeU struct{} - -func (InstructionI64GeU) isInstruction() {} - -func (i InstructionI64GeU) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64GeU) - if err != nil { - return err - } - - return nil -} - -// InstructionI32Clz is the 'i32.clz' instruction -type InstructionI32Clz struct{} - -func (InstructionI32Clz) isInstruction() {} - -func (i InstructionI32Clz) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32Clz) - if err != nil { - return err - } - - return nil -} - -// InstructionI32Ctz is the 'i32.ctz' instruction -type InstructionI32Ctz struct{} - -func (InstructionI32Ctz) isInstruction() {} - -func (i InstructionI32Ctz) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32Ctz) - if err != nil { - return err - } - - return nil -} - -// InstructionI32Popcnt is the 'i32.popcnt' instruction -type InstructionI32Popcnt struct{} - -func (InstructionI32Popcnt) isInstruction() {} - -func (i InstructionI32Popcnt) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32Popcnt) - if err != nil { - return err - } - - return nil -} - -// InstructionI32Add is the 'i32.add' instruction -type InstructionI32Add struct{} - -func (InstructionI32Add) isInstruction() {} - -func (i InstructionI32Add) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32Add) - if err != nil { - return err - } - - return nil -} - -// InstructionI32Sub is the 'i32.sub' instruction -type InstructionI32Sub struct{} - -func (InstructionI32Sub) isInstruction() {} - -func (i InstructionI32Sub) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32Sub) - if err != nil { - return err - } - - return nil -} - -// InstructionI32Mul is the 'i32.mul' instruction -type InstructionI32Mul struct{} - -func (InstructionI32Mul) isInstruction() {} - -func (i InstructionI32Mul) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32Mul) - if err != nil { - return err - } - - return nil -} - -// InstructionI32DivS is the 'i32.div_s' instruction -type InstructionI32DivS struct{} - -func (InstructionI32DivS) isInstruction() {} - -func (i InstructionI32DivS) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32DivS) - if err != nil { - return err - } - - return nil -} - -// InstructionI32DivU is the 'i32.div_u' instruction -type InstructionI32DivU struct{} - -func (InstructionI32DivU) isInstruction() {} - -func (i InstructionI32DivU) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32DivU) - if err != nil { - return err - } - - return nil -} - -// InstructionI32RemS is the 'i32.rem_s' instruction -type InstructionI32RemS struct{} - -func (InstructionI32RemS) isInstruction() {} - -func (i InstructionI32RemS) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32RemS) - if err != nil { - return err - } - - return nil -} - -// InstructionI32RemU is the 'i32.rem_u' instruction -type InstructionI32RemU struct{} - -func (InstructionI32RemU) isInstruction() {} - -func (i InstructionI32RemU) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32RemU) - if err != nil { - return err - } - - return nil -} - -// InstructionI32And is the 'i32.and' instruction -type InstructionI32And struct{} - -func (InstructionI32And) isInstruction() {} - -func (i InstructionI32And) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32And) - if err != nil { - return err - } - - return nil -} - -// InstructionI32Or is the 'i32.or' instruction -type InstructionI32Or struct{} - -func (InstructionI32Or) isInstruction() {} - -func (i InstructionI32Or) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32Or) - if err != nil { - return err - } - - return nil -} - -// InstructionI32Xor is the 'i32.xor' instruction -type InstructionI32Xor struct{} - -func (InstructionI32Xor) isInstruction() {} - -func (i InstructionI32Xor) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32Xor) - if err != nil { - return err - } - - return nil -} - -// InstructionI32Shl is the 'i32.shl' instruction -type InstructionI32Shl struct{} - -func (InstructionI32Shl) isInstruction() {} - -func (i InstructionI32Shl) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32Shl) - if err != nil { - return err - } - - return nil -} - -// InstructionI32ShrS is the 'i32.shr_s' instruction -type InstructionI32ShrS struct{} - -func (InstructionI32ShrS) isInstruction() {} - -func (i InstructionI32ShrS) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32ShrS) - if err != nil { - return err - } - - return nil -} - -// InstructionI32ShrU is the 'i32.shr_u' instruction -type InstructionI32ShrU struct{} - -func (InstructionI32ShrU) isInstruction() {} - -func (i InstructionI32ShrU) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32ShrU) - if err != nil { - return err - } - - return nil -} - -// InstructionI32Rotl is the 'i32.rotl' instruction -type InstructionI32Rotl struct{} - -func (InstructionI32Rotl) isInstruction() {} - -func (i InstructionI32Rotl) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32Rotl) - if err != nil { - return err - } - - return nil -} - -// InstructionI32Rotr is the 'i32.rotr' instruction -type InstructionI32Rotr struct{} - -func (InstructionI32Rotr) isInstruction() {} - -func (i InstructionI32Rotr) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32Rotr) - if err != nil { - return err - } - - return nil -} - -// InstructionI64Clz is the 'i64.clz' instruction -type InstructionI64Clz struct{} - -func (InstructionI64Clz) isInstruction() {} - -func (i InstructionI64Clz) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64Clz) - if err != nil { - return err - } - - return nil -} - -// InstructionI64Ctz is the 'i64.ctz' instruction -type InstructionI64Ctz struct{} - -func (InstructionI64Ctz) isInstruction() {} - -func (i InstructionI64Ctz) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64Ctz) - if err != nil { - return err - } - - return nil -} - -// InstructionI64Popcnt is the 'i64.popcnt' instruction -type InstructionI64Popcnt struct{} - -func (InstructionI64Popcnt) isInstruction() {} - -func (i InstructionI64Popcnt) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64Popcnt) - if err != nil { - return err - } - - return nil -} - -// InstructionI64Add is the 'i64.add' instruction -type InstructionI64Add struct{} - -func (InstructionI64Add) isInstruction() {} - -func (i InstructionI64Add) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64Add) - if err != nil { - return err - } - - return nil -} - -// InstructionI64Sub is the 'i64.sub' instruction -type InstructionI64Sub struct{} - -func (InstructionI64Sub) isInstruction() {} - -func (i InstructionI64Sub) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64Sub) - if err != nil { - return err - } - - return nil -} - -// InstructionI64Mul is the 'i64.mul' instruction -type InstructionI64Mul struct{} - -func (InstructionI64Mul) isInstruction() {} - -func (i InstructionI64Mul) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64Mul) - if err != nil { - return err - } - - return nil -} - -// InstructionI64DivS is the 'i64.div_s' instruction -type InstructionI64DivS struct{} - -func (InstructionI64DivS) isInstruction() {} - -func (i InstructionI64DivS) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64DivS) - if err != nil { - return err - } - - return nil -} - -// InstructionI64DivU is the 'i64.div_u' instruction -type InstructionI64DivU struct{} - -func (InstructionI64DivU) isInstruction() {} - -func (i InstructionI64DivU) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64DivU) - if err != nil { - return err - } - - return nil -} - -// InstructionI64RemS is the 'i64.rem_s' instruction -type InstructionI64RemS struct{} - -func (InstructionI64RemS) isInstruction() {} - -func (i InstructionI64RemS) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64RemS) - if err != nil { - return err - } - - return nil -} - -// InstructionI64RemU is the 'i64.rem_u' instruction -type InstructionI64RemU struct{} - -func (InstructionI64RemU) isInstruction() {} - -func (i InstructionI64RemU) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64RemU) - if err != nil { - return err - } - - return nil -} - -// InstructionI64And is the 'i64.and' instruction -type InstructionI64And struct{} - -func (InstructionI64And) isInstruction() {} - -func (i InstructionI64And) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64And) - if err != nil { - return err - } - - return nil -} - -// InstructionI64Or is the 'i64.or' instruction -type InstructionI64Or struct{} - -func (InstructionI64Or) isInstruction() {} - -func (i InstructionI64Or) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64Or) - if err != nil { - return err - } - - return nil -} - -// InstructionI64Xor is the 'i64.xor' instruction -type InstructionI64Xor struct{} - -func (InstructionI64Xor) isInstruction() {} - -func (i InstructionI64Xor) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64Xor) - if err != nil { - return err - } - - return nil -} - -// InstructionI64Shl is the 'i64.shl' instruction -type InstructionI64Shl struct{} - -func (InstructionI64Shl) isInstruction() {} - -func (i InstructionI64Shl) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64Shl) - if err != nil { - return err - } - - return nil -} - -// InstructionI64ShrS is the 'i64.shr_s' instruction -type InstructionI64ShrS struct{} - -func (InstructionI64ShrS) isInstruction() {} - -func (i InstructionI64ShrS) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64ShrS) - if err != nil { - return err - } - - return nil -} - -// InstructionI64ShrU is the 'i64.shr_u' instruction -type InstructionI64ShrU struct{} - -func (InstructionI64ShrU) isInstruction() {} - -func (i InstructionI64ShrU) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64ShrU) - if err != nil { - return err - } - - return nil -} - -// InstructionI64Rotl is the 'i64.rotl' instruction -type InstructionI64Rotl struct{} - -func (InstructionI64Rotl) isInstruction() {} - -func (i InstructionI64Rotl) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64Rotl) - if err != nil { - return err - } - - return nil -} - -// InstructionI64Rotr is the 'i64.rotr' instruction -type InstructionI64Rotr struct{} - -func (InstructionI64Rotr) isInstruction() {} - -func (i InstructionI64Rotr) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64Rotr) - if err != nil { - return err - } - - return nil -} - -// InstructionI32WrapI64 is the 'i32.wrap_i64' instruction -type InstructionI32WrapI64 struct{} - -func (InstructionI32WrapI64) isInstruction() {} - -func (i InstructionI32WrapI64) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI32WrapI64) - if err != nil { - return err - } - - return nil -} - -// InstructionI64ExtendI32S is the 'i64.extend_i32_s' instruction -type InstructionI64ExtendI32S struct{} - -func (InstructionI64ExtendI32S) isInstruction() {} - -func (i InstructionI64ExtendI32S) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64ExtendI32S) - if err != nil { - return err - } - - return nil -} - -// InstructionI64ExtendI32U is the 'i64.extend_i32_u' instruction -type InstructionI64ExtendI32U struct{} - -func (InstructionI64ExtendI32U) isInstruction() {} - -func (i InstructionI64ExtendI32U) write(w *WASMWriter) error { - err := w.writeOpcode(opcodeI64ExtendI32U) - if err != nil { - return err - } - - return nil -} - -const ( - // opcodeUnreachable is the opcode for the 'unreachable' instruction - opcodeUnreachable opcode = 0x0 - // opcodeNop is the opcode for the 'nop' instruction - opcodeNop opcode = 0x1 - // opcodeBlock is the opcode for the 'block' instruction - opcodeBlock opcode = 0x2 - // opcodeLoop is the opcode for the 'loop' instruction - opcodeLoop opcode = 0x3 - // opcodeIf is the opcode for the 'if' instruction - opcodeIf opcode = 0x4 - // opcodeEnd is the opcode for the 'end' instruction - opcodeEnd opcode = 0xb - // opcodeBr is the opcode for the 'br' instruction - opcodeBr opcode = 0xc - // opcodeBrIf is the opcode for the 'br_if' instruction - opcodeBrIf opcode = 0xd - // opcodeBrTable is the opcode for the 'br_table' instruction - opcodeBrTable opcode = 0xe - // opcodeReturn is the opcode for the 'return' instruction - opcodeReturn opcode = 0xf - // opcodeCall is the opcode for the 'call' instruction - opcodeCall opcode = 0x10 - // opcodeCallIndirect is the opcode for the 'call_indirect' instruction - opcodeCallIndirect opcode = 0x11 - // opcodeRefNull is the opcode for the 'ref.null' instruction - opcodeRefNull opcode = 0xd0 - // opcodeRefIsNull is the opcode for the 'ref.is_null' instruction - opcodeRefIsNull opcode = 0xd1 - // opcodeRefFunc is the opcode for the 'ref.func' instruction - opcodeRefFunc opcode = 0xd2 - // opcodeDrop is the opcode for the 'drop' instruction - opcodeDrop opcode = 0x1a - // opcodeSelect is the opcode for the 'select' instruction - opcodeSelect opcode = 0x1b - // opcodeLocalGet is the opcode for the 'local.get' instruction - opcodeLocalGet opcode = 0x20 - // opcodeLocalSet is the opcode for the 'local.set' instruction - opcodeLocalSet opcode = 0x21 - // opcodeLocalTee is the opcode for the 'local.tee' instruction - opcodeLocalTee opcode = 0x22 - // opcodeGlobalGet is the opcode for the 'global.get' instruction - opcodeGlobalGet opcode = 0x23 - // opcodeGlobalSet is the opcode for the 'global.set' instruction - opcodeGlobalSet opcode = 0x24 - // opcodeI32Const is the opcode for the 'i32.const' instruction - opcodeI32Const opcode = 0x41 - // opcodeI64Const is the opcode for the 'i64.const' instruction - opcodeI64Const opcode = 0x42 - // opcodeI32Eqz is the opcode for the 'i32.eqz' instruction - opcodeI32Eqz opcode = 0x45 - // opcodeI32Eq is the opcode for the 'i32.eq' instruction - opcodeI32Eq opcode = 0x46 - // opcodeI32Ne is the opcode for the 'i32.ne' instruction - opcodeI32Ne opcode = 0x47 - // opcodeI32LtS is the opcode for the 'i32.lt_s' instruction - opcodeI32LtS opcode = 0x48 - // opcodeI32LtU is the opcode for the 'i32.lt_u' instruction - opcodeI32LtU opcode = 0x49 - // opcodeI32GtS is the opcode for the 'i32.gt_s' instruction - opcodeI32GtS opcode = 0x4a - // opcodeI32GtU is the opcode for the 'i32.gt_u' instruction - opcodeI32GtU opcode = 0x4b - // opcodeI32LeS is the opcode for the 'i32.le_s' instruction - opcodeI32LeS opcode = 0x4c - // opcodeI32LeU is the opcode for the 'i32.le_u' instruction - opcodeI32LeU opcode = 0x4d - // opcodeI32GeS is the opcode for the 'i32.ge_s' instruction - opcodeI32GeS opcode = 0x4e - // opcodeI32GeU is the opcode for the 'i32.ge_u' instruction - opcodeI32GeU opcode = 0x4f - // opcodeI64Eqz is the opcode for the 'i64.eqz' instruction - opcodeI64Eqz opcode = 0x50 - // opcodeI64Eq is the opcode for the 'i64.eq' instruction - opcodeI64Eq opcode = 0x51 - // opcodeI64Ne is the opcode for the 'i64.ne' instruction - opcodeI64Ne opcode = 0x52 - // opcodeI64LtS is the opcode for the 'i64.lt_s' instruction - opcodeI64LtS opcode = 0x53 - // opcodeI64LtU is the opcode for the 'i64.lt_u' instruction - opcodeI64LtU opcode = 0x54 - // opcodeI64GtS is the opcode for the 'i64.gt_s' instruction - opcodeI64GtS opcode = 0x55 - // opcodeI64GtU is the opcode for the 'i64.gt_u' instruction - opcodeI64GtU opcode = 0x56 - // opcodeI64LeS is the opcode for the 'i64.le_s' instruction - opcodeI64LeS opcode = 0x57 - // opcodeI64LeU is the opcode for the 'i64.le_u' instruction - opcodeI64LeU opcode = 0x58 - // opcodeI64GeS is the opcode for the 'i64.ge_s' instruction - opcodeI64GeS opcode = 0x59 - // opcodeI64GeU is the opcode for the 'i64.ge_u' instruction - opcodeI64GeU opcode = 0x5a - // opcodeI32Clz is the opcode for the 'i32.clz' instruction - opcodeI32Clz opcode = 0x67 - // opcodeI32Ctz is the opcode for the 'i32.ctz' instruction - opcodeI32Ctz opcode = 0x68 - // opcodeI32Popcnt is the opcode for the 'i32.popcnt' instruction - opcodeI32Popcnt opcode = 0x69 - // opcodeI32Add is the opcode for the 'i32.add' instruction - opcodeI32Add opcode = 0x6a - // opcodeI32Sub is the opcode for the 'i32.sub' instruction - opcodeI32Sub opcode = 0x6b - // opcodeI32Mul is the opcode for the 'i32.mul' instruction - opcodeI32Mul opcode = 0x6c - // opcodeI32DivS is the opcode for the 'i32.div_s' instruction - opcodeI32DivS opcode = 0x6d - // opcodeI32DivU is the opcode for the 'i32.div_u' instruction - opcodeI32DivU opcode = 0x6e - // opcodeI32RemS is the opcode for the 'i32.rem_s' instruction - opcodeI32RemS opcode = 0x6f - // opcodeI32RemU is the opcode for the 'i32.rem_u' instruction - opcodeI32RemU opcode = 0x70 - // opcodeI32And is the opcode for the 'i32.and' instruction - opcodeI32And opcode = 0x71 - // opcodeI32Or is the opcode for the 'i32.or' instruction - opcodeI32Or opcode = 0x72 - // opcodeI32Xor is the opcode for the 'i32.xor' instruction - opcodeI32Xor opcode = 0x73 - // opcodeI32Shl is the opcode for the 'i32.shl' instruction - opcodeI32Shl opcode = 0x74 - // opcodeI32ShrS is the opcode for the 'i32.shr_s' instruction - opcodeI32ShrS opcode = 0x75 - // opcodeI32ShrU is the opcode for the 'i32.shr_u' instruction - opcodeI32ShrU opcode = 0x76 - // opcodeI32Rotl is the opcode for the 'i32.rotl' instruction - opcodeI32Rotl opcode = 0x77 - // opcodeI32Rotr is the opcode for the 'i32.rotr' instruction - opcodeI32Rotr opcode = 0x78 - // opcodeI64Clz is the opcode for the 'i64.clz' instruction - opcodeI64Clz opcode = 0x79 - // opcodeI64Ctz is the opcode for the 'i64.ctz' instruction - opcodeI64Ctz opcode = 0x7a - // opcodeI64Popcnt is the opcode for the 'i64.popcnt' instruction - opcodeI64Popcnt opcode = 0x7b - // opcodeI64Add is the opcode for the 'i64.add' instruction - opcodeI64Add opcode = 0x7c - // opcodeI64Sub is the opcode for the 'i64.sub' instruction - opcodeI64Sub opcode = 0x7d - // opcodeI64Mul is the opcode for the 'i64.mul' instruction - opcodeI64Mul opcode = 0x7e - // opcodeI64DivS is the opcode for the 'i64.div_s' instruction - opcodeI64DivS opcode = 0x7f - // opcodeI64DivU is the opcode for the 'i64.div_u' instruction - opcodeI64DivU opcode = 0x80 - // opcodeI64RemS is the opcode for the 'i64.rem_s' instruction - opcodeI64RemS opcode = 0x81 - // opcodeI64RemU is the opcode for the 'i64.rem_u' instruction - opcodeI64RemU opcode = 0x82 - // opcodeI64And is the opcode for the 'i64.and' instruction - opcodeI64And opcode = 0x83 - // opcodeI64Or is the opcode for the 'i64.or' instruction - opcodeI64Or opcode = 0x84 - // opcodeI64Xor is the opcode for the 'i64.xor' instruction - opcodeI64Xor opcode = 0x85 - // opcodeI64Shl is the opcode for the 'i64.shl' instruction - opcodeI64Shl opcode = 0x86 - // opcodeI64ShrS is the opcode for the 'i64.shr_s' instruction - opcodeI64ShrS opcode = 0x87 - // opcodeI64ShrU is the opcode for the 'i64.shr_u' instruction - opcodeI64ShrU opcode = 0x88 - // opcodeI64Rotl is the opcode for the 'i64.rotl' instruction - opcodeI64Rotl opcode = 0x89 - // opcodeI64Rotr is the opcode for the 'i64.rotr' instruction - opcodeI64Rotr opcode = 0x8a - // opcodeI32WrapI64 is the opcode for the 'i32.wrap_i64' instruction - opcodeI32WrapI64 opcode = 0xa7 - // opcodeI64ExtendI32S is the opcode for the 'i64.extend_i32_s' instruction - opcodeI64ExtendI32S opcode = 0xac - // opcodeI64ExtendI32U is the opcode for the 'i64.extend_i32_u' instruction - opcodeI64ExtendI32U opcode = 0xad -) - -// readInstruction reads an instruction in the WASM binary -func (r *WASMReader) readInstruction() (Instruction, error) { - opcodeOffset := r.buf.offset - b, err := r.buf.ReadByte() - - c := opcode(b) - - if err != nil { - if err == io.EOF { - return nil, MissingEndInstructionError{ - Offset: int(opcodeOffset), - } - } else { - return nil, InvalidOpcodeError{ - Offset: int(opcodeOffset), - Opcode: c, - ReadError: err, - } - } - } - - switch c { - case opcodeBlock: - block, err := r.readBlockInstructionArgument(false) - if err != nil { - return nil, err - } - - return InstructionBlock{ - Block: block, - }, nil - - case opcodeBr: - labelIndex, err := r.readUint32LEB128InstructionArgument() - if err != nil { - return nil, err - } - - return InstructionBr{ - LabelIndex: labelIndex, - }, nil - - case opcodeBrIf: - labelIndex, err := r.readUint32LEB128InstructionArgument() - if err != nil { - return nil, err - } - - return InstructionBrIf{ - LabelIndex: labelIndex, - }, nil - - case opcodeBrTable: - labelIndicesCountOffset := r.buf.offset - labelIndicesCount, err := r.buf.readUint32LEB128() - if err != nil { - return nil, InvalidInstructionVectorArgumentCountError{ - Offset: int(labelIndicesCountOffset), - ReadError: err, - } - } - - labelIndices := make([]uint32, labelIndicesCount) - - for i := uint32(0); i < labelIndicesCount; i++ { - labelIndicesElement, err := r.readUint32LEB128InstructionArgument() - if err != nil { - return nil, err - } - labelIndices[i] = labelIndicesElement - } - - defaultLabelIndex, err := r.readUint32LEB128InstructionArgument() - if err != nil { - return nil, err - } - - return InstructionBrTable{ - LabelIndices: labelIndices, - DefaultLabelIndex: defaultLabelIndex, - }, nil - - case opcodeCall: - funcIndex, err := r.readUint32LEB128InstructionArgument() - if err != nil { - return nil, err - } - - return InstructionCall{ - FuncIndex: funcIndex, - }, nil - - case opcodeCallIndirect: - typeIndex, err := r.readUint32LEB128InstructionArgument() - if err != nil { - return nil, err - } - - tableIndex, err := r.readUint32LEB128InstructionArgument() - if err != nil { - return nil, err - } - - return InstructionCallIndirect{ - TypeIndex: typeIndex, - TableIndex: tableIndex, - }, nil - - case opcodeDrop: - return InstructionDrop{}, nil - - case opcodeEnd: - return InstructionEnd{}, nil - - case opcodeGlobalGet: - globalIndex, err := r.readUint32LEB128InstructionArgument() - if err != nil { - return nil, err - } - - return InstructionGlobalGet{ - GlobalIndex: globalIndex, - }, nil - - case opcodeGlobalSet: - globalIndex, err := r.readUint32LEB128InstructionArgument() - if err != nil { - return nil, err - } - - return InstructionGlobalSet{ - GlobalIndex: globalIndex, - }, nil - - case opcodeI32Add: - return InstructionI32Add{}, nil - - case opcodeI32And: - return InstructionI32And{}, nil - - case opcodeI32Clz: - return InstructionI32Clz{}, nil - - case opcodeI32Const: - value, err := r.readInt32LEB128InstructionArgument() - if err != nil { - return nil, err - } - - return InstructionI32Const{ - Value: value, - }, nil - - case opcodeI32Ctz: - return InstructionI32Ctz{}, nil - - case opcodeI32DivS: - return InstructionI32DivS{}, nil - - case opcodeI32DivU: - return InstructionI32DivU{}, nil - - case opcodeI32Eq: - return InstructionI32Eq{}, nil - - case opcodeI32Eqz: - return InstructionI32Eqz{}, nil - - case opcodeI32GeS: - return InstructionI32GeS{}, nil - - case opcodeI32GeU: - return InstructionI32GeU{}, nil - - case opcodeI32GtS: - return InstructionI32GtS{}, nil - - case opcodeI32GtU: - return InstructionI32GtU{}, nil - - case opcodeI32LeS: - return InstructionI32LeS{}, nil - - case opcodeI32LeU: - return InstructionI32LeU{}, nil - - case opcodeI32LtS: - return InstructionI32LtS{}, nil - - case opcodeI32LtU: - return InstructionI32LtU{}, nil - - case opcodeI32Mul: - return InstructionI32Mul{}, nil - - case opcodeI32Ne: - return InstructionI32Ne{}, nil - - case opcodeI32Or: - return InstructionI32Or{}, nil - - case opcodeI32Popcnt: - return InstructionI32Popcnt{}, nil - - case opcodeI32RemS: - return InstructionI32RemS{}, nil - - case opcodeI32RemU: - return InstructionI32RemU{}, nil - - case opcodeI32Rotl: - return InstructionI32Rotl{}, nil - - case opcodeI32Rotr: - return InstructionI32Rotr{}, nil - - case opcodeI32Shl: - return InstructionI32Shl{}, nil - - case opcodeI32ShrS: - return InstructionI32ShrS{}, nil - - case opcodeI32ShrU: - return InstructionI32ShrU{}, nil - - case opcodeI32Sub: - return InstructionI32Sub{}, nil - - case opcodeI32WrapI64: - return InstructionI32WrapI64{}, nil - - case opcodeI32Xor: - return InstructionI32Xor{}, nil - - case opcodeI64Add: - return InstructionI64Add{}, nil - - case opcodeI64And: - return InstructionI64And{}, nil - - case opcodeI64Clz: - return InstructionI64Clz{}, nil - - case opcodeI64Const: - value, err := r.readInt64LEB128InstructionArgument() - if err != nil { - return nil, err - } - - return InstructionI64Const{ - Value: value, - }, nil - - case opcodeI64Ctz: - return InstructionI64Ctz{}, nil - - case opcodeI64DivS: - return InstructionI64DivS{}, nil - - case opcodeI64DivU: - return InstructionI64DivU{}, nil - - case opcodeI64Eq: - return InstructionI64Eq{}, nil - - case opcodeI64Eqz: - return InstructionI64Eqz{}, nil - - case opcodeI64ExtendI32S: - return InstructionI64ExtendI32S{}, nil - - case opcodeI64ExtendI32U: - return InstructionI64ExtendI32U{}, nil - - case opcodeI64GeS: - return InstructionI64GeS{}, nil - - case opcodeI64GeU: - return InstructionI64GeU{}, nil - - case opcodeI64GtS: - return InstructionI64GtS{}, nil - - case opcodeI64GtU: - return InstructionI64GtU{}, nil - - case opcodeI64LeS: - return InstructionI64LeS{}, nil - - case opcodeI64LeU: - return InstructionI64LeU{}, nil - - case opcodeI64LtS: - return InstructionI64LtS{}, nil - - case opcodeI64LtU: - return InstructionI64LtU{}, nil - - case opcodeI64Mul: - return InstructionI64Mul{}, nil - - case opcodeI64Ne: - return InstructionI64Ne{}, nil - - case opcodeI64Or: - return InstructionI64Or{}, nil - - case opcodeI64Popcnt: - return InstructionI64Popcnt{}, nil - - case opcodeI64RemS: - return InstructionI64RemS{}, nil - - case opcodeI64RemU: - return InstructionI64RemU{}, nil - - case opcodeI64Rotl: - return InstructionI64Rotl{}, nil - - case opcodeI64Rotr: - return InstructionI64Rotr{}, nil - - case opcodeI64Shl: - return InstructionI64Shl{}, nil - - case opcodeI64ShrS: - return InstructionI64ShrS{}, nil - - case opcodeI64ShrU: - return InstructionI64ShrU{}, nil - - case opcodeI64Sub: - return InstructionI64Sub{}, nil - - case opcodeI64Xor: - return InstructionI64Xor{}, nil - - case opcodeIf: - block, err := r.readBlockInstructionArgument(true) - if err != nil { - return nil, err - } - - return InstructionIf{ - Block: block, - }, nil - - case opcodeLocalGet: - localIndex, err := r.readUint32LEB128InstructionArgument() - if err != nil { - return nil, err - } - - return InstructionLocalGet{ - LocalIndex: localIndex, - }, nil - - case opcodeLocalSet: - localIndex, err := r.readUint32LEB128InstructionArgument() - if err != nil { - return nil, err - } - - return InstructionLocalSet{ - LocalIndex: localIndex, - }, nil - - case opcodeLocalTee: - localIndex, err := r.readUint32LEB128InstructionArgument() - if err != nil { - return nil, err - } - - return InstructionLocalTee{ - LocalIndex: localIndex, - }, nil - - case opcodeLoop: - block, err := r.readBlockInstructionArgument(false) - if err != nil { - return nil, err - } - - return InstructionLoop{ - Block: block, - }, nil - - case opcodeNop: - return InstructionNop{}, nil - - case opcodeRefFunc: - funcIndex, err := r.readUint32LEB128InstructionArgument() - if err != nil { - return nil, err - } - - return InstructionRefFunc{ - FuncIndex: funcIndex, - }, nil - - case opcodeRefIsNull: - return InstructionRefIsNull{}, nil - - case opcodeRefNull: - typeIndex, err := r.readUint32LEB128InstructionArgument() - if err != nil { - return nil, err - } - - return InstructionRefNull{ - TypeIndex: typeIndex, - }, nil - - case opcodeReturn: - return InstructionReturn{}, nil - - case opcodeSelect: - return InstructionSelect{}, nil - - case opcodeUnreachable: - return InstructionUnreachable{}, nil - - default: - return nil, InvalidOpcodeError{ - Offset: int(opcodeOffset), - Opcode: c, - ReadError: err, - } - } - -} diff --git a/compiler/wasm/leb128.go b/compiler/wasm/leb128.go deleted file mode 100644 index cbba8d7f74..0000000000 --- a/compiler/wasm/leb128.go +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -import ( - "fmt" -) - -// max32bitLEB128ByteCount is the maximum number of bytes a 32-bit integer -// (signed or unsigned) may be encoded as. From -// https://webassembly.github.io/spec/core/binary/values.html#binary-int: -// -// "the total number of bytes encoding a value of type uN must not exceed ceil(N/7) bytes" -// "the total number of bytes encoding a value of type sN must not exceed ceil(N/7) bytes" -const max32bitLEB128ByteCount = 5 - -// max64bitLEB128ByteCount is the maximum number of bytes a 64-bit integer -// (signed or unsigned) may be encoded as. From -// https://webassembly.github.io/spec/core/binary/values.html#binary-int: -// -// "the total number of bytes encoding a value of type uN must not exceed ceil(N/7) bytes" -// "the total number of bytes encoding a value of type sN must not exceed ceil(N/7) bytes" -const max64bitLEB128ByteCount = 10 - -// writeUint32LEB128 encodes and writes the given unsigned 32-bit integer -// in canonical (with the fewest bytes possible) unsigned little endian base 128 format -func (buf *Buffer) writeUint32LEB128(v uint32) error { - if v < 128 { - err := buf.WriteByte(uint8(v)) - if err != nil { - return err - } - return nil - } - more := true - for more { - // low order 7 bits of value - c := uint8(v & 0x7f) - v >>= 7 - // more bits to come? - more = v != 0 - if more { - // set high order bit of byte - c |= 0x80 - } - // emit byte - err := buf.WriteByte(c) - if err != nil { - return err - } - } - return nil -} - -// writeUint64LEB128 encodes and writes the given unsigned 64-bit integer -// in canonical (with the fewest bytes possible) unsigned little endian base 128 format -func (buf *Buffer) writeUint64LEB128(v uint64) error { - if v < 128 { - err := buf.WriteByte(uint8(v)) - if err != nil { - return err - } - return nil - } - more := true - for more { - // low order 7 bits of value - c := uint8(v & 0x7f) - v >>= 7 - // more bits to come? - more = v != 0 - if more { - // set high order bit of byte - c |= 0x80 - } - // emit byte - err := buf.WriteByte(c) - if err != nil { - return err - } - } - return nil -} - -// writeUint32LEB128FixedLength encodes and writes the given unsigned 32-bit integer -// in non-canonical (fixed-size, instead of with the fewest bytes possible) -// unsigned little endian base 128 format -func (buf *Buffer) writeUint32LEB128FixedLength(v uint32, length int) error { - for i := 0; i < length; i++ { - c := uint8(v & 0x7f) - v >>= 7 - if i < length-1 { - c |= 0x80 - } - err := buf.WriteByte(c) - if err != nil { - return err - } - } - if v != 0 { - return fmt.Errorf("writeUint32LEB128FixedLength: length too small: %d", length) - } - return nil -} - -// readUint32LEB128 reads and decodes an unsigned 32-bit integer -func (buf *Buffer) readUint32LEB128() (uint32, error) { - var result uint32 - var shift, i uint - // only read up to maximum number of bytes - for i < max32bitLEB128ByteCount { - b, err := buf.ReadByte() - if err != nil { - return 0, err - } - result |= (uint32(b & 0x7F)) << shift - // check high order bit of byte - if b&0x80 == 0 { - break - } - shift += 7 - i++ - } - return result, nil -} - -// readUint64LEB128 reads and decodes an unsigned 64-bit integer -func (buf *Buffer) readUint64LEB128() (uint64, error) { - var result uint64 - var shift, i uint - // only read up to maximum number of bytes - for i < max64bitLEB128ByteCount { - b, err := buf.ReadByte() - if err != nil { - return 0, err - } - result |= (uint64(b & 0x7F)) << shift - // check high order bit of byte - if b&0x80 == 0 { - break - } - shift += 7 - i++ - } - return result, nil -} - -// writeInt32LEB128 encodes and writes the given signed 32-bit integer -// in canonical (with the fewest bytes possible) signed little endian base 128 format -func (buf *Buffer) writeInt32LEB128(v int32) error { - more := true - for more { - // low order 7 bits of value - c := uint8(v & 0x7f) - sign := uint8(v & 0x40) - v >>= 7 - more = !((v == 0 && sign == 0) || (v == -1 && sign != 0)) - if more { - c |= 0x80 - } - err := buf.WriteByte(c) - if err != nil { - return err - } - } - return nil -} - -// writeInt64LEB128 encodes and writes the given signed 64-bit integer -// in canonical (with the fewest bytes possible) signed little endian base 128 format -func (buf *Buffer) writeInt64LEB128(v int64) error { - more := true - for more { - // low order 7 bits of value - c := uint8(v & 0x7f) - sign := uint8(v & 0x40) - v >>= 7 - more = !((v == 0 && sign == 0) || (v == -1 && sign != 0)) - if more { - c |= 0x80 - } - err := buf.WriteByte(c) - if err != nil { - return err - } - } - return nil -} - -// readInt32LEB128 reads and decodes a signed 32-bit integer -func (buf *Buffer) readInt32LEB128() (int32, error) { - var result int32 - var i uint - var b byte = 0x80 - var signBits int32 = -1 - var err error - for (b&0x80 == 0x80) && i < max32bitLEB128ByteCount { - b, err = buf.ReadByte() - if err != nil { - return 0, err - } - result += int32(b&0x7f) << (i * 7) - signBits <<= 7 - i++ - } - if ((signBits >> 1) & result) != 0 { - result += signBits - } - return result, nil -} - -// readInt64LEB128 reads and decodes a signed 64-bit integer -func (buf *Buffer) readInt64LEB128() (int64, error) { - var result int64 - var i uint - var b byte = 0x80 - var signBits int64 = -1 - var err error - for (b&0x80 == 0x80) && i < max64bitLEB128ByteCount { - b, err = buf.ReadByte() - if err != nil { - return 0, err - } - result += int64(b&0x7f) << (i * 7) - signBits <<= 7 - i++ - } - if ((signBits >> 1) & result) != 0 { - result += signBits - } - return result, nil -} - -// writeFixedUint32LEB128Space writes a non-canonical 5-byte fixed-size space -// (instead of the minimal size if canonical encoding would be used) -func (buf *Buffer) writeFixedUint32LEB128Space() (offset, error) { - off := buf.offset - for i := 0; i < max32bitLEB128ByteCount; i++ { - err := buf.WriteByte(0) - if err != nil { - return 0, err - } - } - return off, nil -} - -// writeUint32LEB128SizeAt writes the size, the number of bytes -// between the given offset and the current offset, -// as an uint32 in non-canonical 5-byte fixed-size format -// (instead of the minimal size if canonical encoding would be used) -// at the given offset -func (buf *Buffer) writeUint32LEB128SizeAt(off offset) error { - currentOff := buf.offset - if currentOff < max32bitLEB128ByteCount || currentOff-max32bitLEB128ByteCount < off { - return fmt.Errorf("writeUint32LEB128SizeAt: invalid offset: %d", off) - } - size := uint32(currentOff - off - max32bitLEB128ByteCount) - buf.offset = off - defer func() { - buf.offset = currentOff - }() - return buf.writeUint32LEB128FixedLength(size, max32bitLEB128ByteCount) -} diff --git a/compiler/wasm/leb128_test.go b/compiler/wasm/leb128_test.go deleted file mode 100644 index 6b21ae86a1..0000000000 --- a/compiler/wasm/leb128_test.go +++ /dev/null @@ -1,347 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -import ( - "math" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestBuf_Uint32LEB128(t *testing.T) { - - t.Parallel() - - t.Run("DWARF spec + more", func(t *testing.T) { - - t.Parallel() - - // DWARF Debugging Information Format, Version 3, page 140 - - for v, expected := range map[uint32][]byte{ - 0: {0x00}, - 1: {0x01}, - 2: {2}, - 63: {0x3f}, - 64: {0x40}, - 127: {127}, - 128: {0 + 0x80, 1}, - 129: {1 + 0x80, 1}, - 130: {2 + 0x80, 1}, - 0x90: {0x90, 0x01}, - 0x100: {0x80, 0x02}, - 0x101: {0x81, 0x02}, - 0xff: {0xff, 0x01}, - 12857: {57 + 0x80, 100}, - } { - var b Buffer - err := b.writeUint32LEB128(v) - require.NoError(t, err) - require.Equal(t, expected, b.data) - - b.offset = 0 - - actual, err := b.readUint32LEB128() - require.NoError(t, err) - require.Equal(t, v, actual) - } - }) - - t.Run("write: max byte count", func(t *testing.T) { - - t.Parallel() - - // This test ensures that only up to the maximum number of bytes are written - // when writing a LEB128-encoded 32-bit number (see max32bitLEB128ByteCount), - // i.e. test that only up to 5 bytes are written. - - var b Buffer - err := b.writeUint32LEB128(math.MaxUint32) - require.NoError(t, err) - require.GreaterOrEqual(t, max32bitLEB128ByteCount, len(b.data)) - }) - - t.Run("read: max byte count", func(t *testing.T) { - - t.Parallel() - - // This test ensures that only up to the maximum number of bytes are read - // when reading a LEB128-encoded 32-bit number (see max32bitLEB128ByteCount), - // i.e. test that only 5 of the 8 given bytes are read, - // to ensure the LEB128 parser doesn't keep reading infinitely. - - b := Buffer{data: []byte{0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88}} - _, err := b.readUint32LEB128() - require.NoError(t, err) - require.Equal(t, offset(max32bitLEB128ByteCount), b.offset) - }) -} - -func TestBuf_Uint64LEB128(t *testing.T) { - - t.Parallel() - - t.Run("DWARF spec + more", func(t *testing.T) { - - t.Parallel() - - // DWARF Debugging Information Format, Version 3, page 140 - - for v, expected := range map[uint64][]byte{ - 0: {0x00}, - 1: {0x01}, - 2: {2}, - 63: {0x3f}, - 64: {0x40}, - 127: {127}, - 128: {0 + 0x80, 1}, - 129: {1 + 0x80, 1}, - 130: {2 + 0x80, 1}, - 0x90: {0x90, 0x01}, - 0x100: {0x80, 0x02}, - 0x101: {0x81, 0x02}, - 0xff: {0xff, 0x01}, - 12857: {57 + 0x80, 100}, - } { - var b Buffer - err := b.writeUint64LEB128(v) - require.NoError(t, err) - require.Equal(t, expected, b.data) - - b.offset = 0 - - actual, err := b.readUint64LEB128() - require.NoError(t, err) - require.Equal(t, v, actual) - } - }) - - t.Run("write: max byte count", func(t *testing.T) { - - t.Parallel() - - var b Buffer - err := b.writeUint64LEB128(math.MaxUint64) - require.NoError(t, err) - require.GreaterOrEqual(t, max64bitLEB128ByteCount, len(b.data)) - }) - - t.Run("read: max byte count", func(t *testing.T) { - - t.Parallel() - - b := Buffer{data: []byte{ - 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, - 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, - }} - _, err := b.readUint64LEB128() - require.NoError(t, err) - require.Equal(t, offset(max64bitLEB128ByteCount), b.offset) - }) -} - -func TestBuf_Int32LEB128(t *testing.T) { - - t.Parallel() - - t.Run("DWARF spec + more", func(t *testing.T) { - - t.Parallel() - - // DWARF Debugging Information Format, Version 3, page 141 - - for v, expected := range map[int32][]byte{ - 0: {0x00}, - 1: {0x01}, - -1: {0x7f}, - 2: {2}, - -2: {0x7e}, - 63: {0x3f}, - -63: {0x41}, - 64: {0xc0, 0x00}, - -64: {0x40}, - -65: {0xbf, 0x7f}, - 127: {127 + 0x80, 0}, - -127: {1 + 0x80, 0x7f}, - 128: {0 + 0x80, 1}, - -128: {0 + 0x80, 0x7f}, - 129: {1 + 0x80, 1}, - -129: {0x7f + 0x80, 0x7e}, - -12345: {0xc7, 0x9f, 0x7f}, - } { - var b Buffer - err := b.writeInt32LEB128(v) - require.NoError(t, err) - require.Equal(t, expected, b.data) - - b.offset = 0 - - actual, err := b.readInt32LEB128() - require.NoError(t, err) - require.Equal(t, v, actual) - } - }) - - t.Run("write: max byte count", func(t *testing.T) { - - t.Parallel() - - // This test ensures that only up to the maximum number of bytes are written - // when writing a LEB128-encoded 32-bit number (see max32bitLEB128ByteCount), - // i.e. test that only up to 5 bytes are written. - - var b Buffer - err := b.writeInt32LEB128(math.MaxInt32) - require.NoError(t, err) - require.GreaterOrEqual(t, max32bitLEB128ByteCount, len(b.data)) - - var b2 Buffer - err = b2.writeInt32LEB128(math.MinInt32) - require.NoError(t, err) - require.GreaterOrEqual(t, max32bitLEB128ByteCount, len(b.data)) - }) - - t.Run("read: max byte count", func(t *testing.T) { - - t.Parallel() - - // This test ensures that only up to the maximum number of bytes are read - // when reading a LEB128-encoded 32-bit number (see max32bitLEB128ByteCount), - // i.e. test that only 5 of the 8 given bytes are read, - // to ensure the LEB128 parser doesn't keep reading infinitely. - - b := Buffer{data: []byte{0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88}} - _, err := b.readInt32LEB128() - require.NoError(t, err) - require.Equal(t, offset(max32bitLEB128ByteCount), b.offset) - }) -} - -func TestBuf_Int64LEB128(t *testing.T) { - - t.Parallel() - - t.Run("DWARF spec + more", func(t *testing.T) { - - t.Parallel() - - // DWARF Debugging Information Format, Version 3, page 141 - - for v, expected := range map[int64][]byte{ - 0: {0x00}, - 1: {0x01}, - -1: {0x7f}, - 2: {2}, - -2: {0x7e}, - 63: {0x3f}, - -63: {0x41}, - 64: {0xc0, 0x00}, - -64: {0x40}, - -65: {0xbf, 0x7f}, - 127: {127 + 0x80, 0}, - -127: {1 + 0x80, 0x7f}, - 128: {0 + 0x80, 1}, - -128: {0 + 0x80, 0x7f}, - 129: {1 + 0x80, 1}, - -129: {0x7f + 0x80, 0x7e}, - -12345: {0xc7, 0x9f, 0x7f}, - } { - var b Buffer - err := b.writeInt64LEB128(v) - require.NoError(t, err) - require.Equal(t, expected, b.data) - - b.offset = 0 - - actual, err := b.readInt64LEB128() - require.NoError(t, err) - require.Equal(t, v, actual) - } - }) - - t.Run("write: max byte count", func(t *testing.T) { - - t.Parallel() - - var b Buffer - err := b.writeInt64LEB128(math.MaxInt64) - require.NoError(t, err) - require.GreaterOrEqual(t, max64bitLEB128ByteCount, len(b.data)) - - var b2 Buffer - err = b2.writeInt64LEB128(math.MinInt64) - require.NoError(t, err) - require.GreaterOrEqual(t, max64bitLEB128ByteCount, len(b.data)) - }) - - t.Run("read: max byte count", func(t *testing.T) { - - t.Parallel() - - b := Buffer{data: []byte{ - 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, - 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, - }} - _, err := b.readInt64LEB128() - require.NoError(t, err) - require.Equal(t, offset(max64bitLEB128ByteCount), b.offset) - }) -} - -func TestBuf_WriteSpaceAndSize(t *testing.T) { - - t.Parallel() - - var b Buffer - - err := b.WriteByte(101) - require.NoError(t, err) - err = b.WriteByte(102) - require.NoError(t, err) - - off, err := b.writeFixedUint32LEB128Space() - require.NoError(t, err) - require.Equal(t, offset(2), off) - require.Equal(t, - []byte{ - 101, 102, - 0, 0, 0, 0, 0, - }, - b.data, - ) - - err = b.WriteByte(104) - require.NoError(t, err) - err = b.WriteByte(105) - require.NoError(t, err) - err = b.WriteByte(106) - require.NoError(t, err) - - err = b.writeUint32LEB128SizeAt(off) - require.NoError(t, err) - require.Equal(t, - []byte{ - 101, 102, - 0x83, 0x80, 0x80, 0x80, 0, - 104, 105, 106, - }, - b.data, - ) -} diff --git a/compiler/wasm/magic.go b/compiler/wasm/magic.go deleted file mode 100644 index 2fa2b82aba..0000000000 --- a/compiler/wasm/magic.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -// wasmMagic is the magic byte sequence that appears at the start of the WASM binary. -// -// See https://webassembly.github.io/spec/core/binary/modules.html#binary-module: -// -// The encoding of a module starts with a preamble containing a 4-byte magic number (the string '\0asm') -// -// magic ::= 0x00 0x61 0x73 0x6d -var wasmMagic = []byte{0x00, 0x61, 0x73, 0x6d} - -// wasmVersion is the byte sequence that appears after wasmMagic -// and indicated the version of the WASM binary. -// -// See https://webassembly.github.io/spec/core/binary/modules.html#binary-module: -// -// The encoding of a module starts with [...] a version field. -// The current version of the WebAssembly binary format is 1. -// -// version ::= 0x01 0x00 0x00 0x00 -var wasmVersion = []byte{0x01, 0x00, 0x00, 0x00} diff --git a/compiler/wasm/memory.go b/compiler/wasm/memory.go deleted file mode 100644 index c8fa322286..0000000000 --- a/compiler/wasm/memory.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -// MemoryPageSize is the size of a memory page: 64KiB -const MemoryPageSize = 64 * 1024 - -// Memory represents a memory -type Memory struct { - // maximum number of pages (each one is 64KiB in size). optional, unlimited if nil - Max *uint32 - // minimum number of pages (each one is 64KiB in size) - Min uint32 -} - -// limitIndicator is the byte used to indicate the kind of limit in the WASM binary -type limitIndicator byte - -const ( - // limitIndicatorNoMax is the byte used to indicate a limit with no maximum in the WASM binary - limitIndicatorNoMax limitIndicator = 0x0 - // limitIndicatorMax is the byte used to indicate a limit with no maximum in the WASM binary - limitIndicatorMax limitIndicator = 0x1 -) diff --git a/compiler/wasm/module.go b/compiler/wasm/module.go deleted file mode 100644 index fb818ad9c3..0000000000 --- a/compiler/wasm/module.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -// Module represents a module -type Module struct { - Name string - Types []*FunctionType - Imports []*Import - Functions []*Function - Memories []*Memory - Exports []*Export - StartFunctionIndex *uint32 - Data []*Data -} diff --git a/compiler/wasm/modulebuilder.go b/compiler/wasm/modulebuilder.go deleted file mode 100644 index d0b4d0f11f..0000000000 --- a/compiler/wasm/modulebuilder.go +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -import ( - "errors" - "math" -) - -// ModuleBuilder allows building modules -type ModuleBuilder struct { - functionImports []*Import - types []*FunctionType - functions []*Function - data []*Data - exports []*Export - requiredMemorySize uint32 -} - -func (b *ModuleBuilder) AddFunction(name string, functionType *FunctionType, code *Code) uint32 { - typeIndex := uint32(len(b.types)) - b.types = append(b.types, functionType) - // function indices include function imports - funcIndex := uint32(len(b.functionImports) + len(b.functions)) - b.functions = append( - b.functions, - &Function{ - Name: name, - TypeIndex: typeIndex, - Code: code, - }, - ) - return funcIndex -} - -func (b *ModuleBuilder) AddFunctionImport(module string, name string, functionType *FunctionType) (uint32, error) { - if len(b.functions) > 0 { - return 0, errors.New("cannot add function imports after adding functions") - } - - typeIndex := uint32(len(b.types)) - b.types = append(b.types, functionType) - funcIndex := uint32(len(b.functionImports)) - b.functionImports = append( - b.functionImports, - &Import{ - Module: module, - Name: name, - TypeIndex: typeIndex, - }, - ) - - return funcIndex, nil -} - -func (b *ModuleBuilder) RequireMemory(size uint32) uint32 { - offset := b.requiredMemorySize - b.requiredMemorySize += size - return offset -} - -func (b *ModuleBuilder) AddData(offset uint32, value []byte) { - b.data = append(b.data, &Data{ - // NOTE: currently only one memory is supported - MemoryIndex: 0, - Offset: []Instruction{ - InstructionI32Const{Value: int32(offset)}, - }, - Init: value, - }) -} - -func (b *ModuleBuilder) Build() *Module { - // NOTE: currently only one memory is supported - memories := []*Memory{ - { - Min: uint32(math.Ceil(float64(b.requiredMemorySize) / float64(MemoryPageSize))), - Max: nil, - }, - } - - return &Module{ - Types: b.types, - Imports: b.functionImports, - Functions: b.functions, - Memories: memories, - Data: b.data, - Exports: b.exports, - } -} - -func (b *ModuleBuilder) ExportMemory(name string) { - b.AddExport(&Export{ - Name: name, - Descriptor: MemoryExport{ - MemoryIndex: 0, - }, - }) -} - -func (b *ModuleBuilder) AddExport(export *Export) { - b.exports = append(b.exports, export) -} diff --git a/compiler/wasm/opcode.go b/compiler/wasm/opcode.go deleted file mode 100644 index 9a0766597d..0000000000 --- a/compiler/wasm/opcode.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -// opcode is the byte used to indicate a certain instruction in the WASM binary -type opcode byte - -const opcodeElse opcode = 0x05 diff --git a/compiler/wasm/reader.go b/compiler/wasm/reader.go deleted file mode 100644 index a9163d4bb8..0000000000 --- a/compiler/wasm/reader.go +++ /dev/null @@ -1,1236 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -import ( - "io" - "math" - "unicode/utf8" -) - -// WASMReader allows reading WASM binaries -type WASMReader struct { - buf *Buffer - Module Module - lastSectionID sectionID - didReadFunctions bool - didReadCode bool -} - -func NewWASMReader(buf *Buffer) *WASMReader { - return &WASMReader{ - buf: buf, - } -} - -// readMagicAndVersion reads the magic byte sequence and version at the beginning of the WASM binary -// -// See https://webassembly.github.io/spec/core/binary/modules.html#binary-module: -// -// The encoding of a module starts with a preamble containing a 4-byte magic number [...] and a version field. -func (r *WASMReader) readMagicAndVersion() error { - - // Read the magic - equal, err := r.buf.ReadBytesEqual(wasmMagic) - if err != nil || !equal { - return InvalidMagicError{ - Offset: int(r.buf.offset), - ReadError: err, - } - } - - // Read the version - equal, err = r.buf.ReadBytesEqual(wasmVersion) - if err != nil || !equal { - return InvalidVersionError{ - Offset: int(r.buf.offset), - ReadError: err, - } - } - - return nil -} - -// readSection reads a section in the WASM binary -func (r *WASMReader) readSection() error { - // read the section ID - sectionIDOffset := r.buf.offset - b, err := r.buf.ReadByte() - - sectionID := sectionID(b) - - if err != nil { - return InvalidSectionIDError{ - SectionID: sectionID, - Offset: int(sectionIDOffset), - ReadError: err, - } - } - - invalidDuplicateSectionError := func() error { - return InvalidDuplicateSectionError{ - SectionID: sectionID, - Offset: int(sectionIDOffset), - } - } - - // "Custom sections may be inserted at any place in this sequence, - // while other sections must occur at most once and in the prescribed order." - - if sectionID > 0 && sectionID <= r.lastSectionID { - return InvalidSectionOrderError{ - SectionID: sectionID, - Offset: int(sectionIDOffset), - } - } - - switch sectionID { - case sectionIDCustom: - err = r.readCustomSection() - if err != nil { - return err - } - - case sectionIDType: - if r.Module.Types != nil { - return invalidDuplicateSectionError() - } - - err = r.readTypeSection() - if err != nil { - return err - } - - case sectionIDImport: - if r.Module.Imports != nil { - return invalidDuplicateSectionError() - } - - err = r.readImportSection() - if err != nil { - return err - } - - case sectionIDFunction: - if r.didReadFunctions { - return invalidDuplicateSectionError() - } - - err = r.readFunctionSection() - if err != nil { - return err - } - - r.didReadFunctions = true - - case sectionIDMemory: - if r.Module.Memories != nil { - return invalidDuplicateSectionError() - } - - err = r.readMemorySection() - if err != nil { - return err - } - - case sectionIDExport: - if r.Module.Exports != nil { - return invalidDuplicateSectionError() - } - - err = r.readExportSection() - if err != nil { - return err - } - - case sectionIDStart: - if r.Module.StartFunctionIndex != nil { - return invalidDuplicateSectionError() - } - - err = r.readStartSection() - if err != nil { - return err - } - - case sectionIDCode: - if r.didReadCode { - return invalidDuplicateSectionError() - } - - err = r.readCodeSection() - if err != nil { - return err - } - - r.didReadCode = true - - case sectionIDData: - if r.Module.Data != nil { - return invalidDuplicateSectionError() - } - - err = r.readDataSection() - if err != nil { - return err - } - - default: - return InvalidSectionIDError{ - SectionID: sectionID, - Offset: int(sectionIDOffset), - } - } - - // Keep track of the last read non-custom section ID: - // non-custom sections must appear in order - - if sectionID > 0 { - r.lastSectionID = sectionID - } - - return nil -} - -// readSectionSize reads the content size of a section -func (r *WASMReader) readSectionSize() (uint32, error) { - // read the size - sizeOffset := r.buf.offset - size, err := r.buf.readUint32LEB128() - if err != nil { - return 0, InvalidSectionSizeError{ - Offset: int(sizeOffset), - ReadError: err, - } - } - - return size, nil -} - -// readTypeSection reads the section that declares all function types -// so they can be referenced by index -func (r *WASMReader) readTypeSection() error { - - _, err := r.readSectionSize() - if err != nil { - return err - } - - // read the number of types - countOffset := r.buf.offset - count, err := r.buf.readUint32LEB128() - if err != nil { - return InvalidTypeSectionTypeCountError{ - Offset: int(countOffset), - ReadError: err, - } - } - - funcTypes := make([]*FunctionType, count) - - // read each type - for i := uint32(0); i < count; i++ { - funcType, err := r.readFuncType() - if err != nil { - return err - } - funcTypes[i] = funcType - } - - r.Module.Types = funcTypes - - return nil -} - -// readFuncType reads a function type -func (r *WASMReader) readFuncType() (*FunctionType, error) { - // read the function type indicator - funcTypeIndicatorOffset := r.buf.offset - funcTypeIndicator, err := r.buf.ReadByte() - if err != nil || funcTypeIndicator != functionTypeIndicator { - return nil, InvalidFuncTypeIndicatorError{ - Offset: int(funcTypeIndicatorOffset), - FuncTypeIndicator: funcTypeIndicator, - ReadError: err, - } - } - - // read the number of parameters - parameterCountOffset := r.buf.offset - parameterCount, err := r.buf.readUint32LEB128() - if err != nil { - return nil, InvalidFuncTypeParameterCountError{ - Offset: int(parameterCountOffset), - ReadError: err, - } - } - - // read the type of each parameter - - var parameterTypes []ValueType - if parameterCount > 0 { - parameterTypes = make([]ValueType, parameterCount) - - for i := uint32(0); i < parameterCount; i++ { - parameterType, err := r.readValType() - if err != nil { - return nil, InvalidFuncTypeParameterTypeError{ - Index: int(i), - ReadError: err, - } - } - parameterTypes[i] = parameterType - } - } - - // read the number of results - resultCountOffset := r.buf.offset - resultCount, err := r.buf.readUint32LEB128() - if err != nil { - return nil, InvalidFuncTypeResultCountError{ - Offset: int(resultCountOffset), - ReadError: err, - } - } - - // read the type of each result - - var resultTypes []ValueType - if resultCount > 0 { - resultTypes = make([]ValueType, resultCount) - for i := uint32(0); i < resultCount; i++ { - resultType, err := r.readValType() - if err != nil { - return nil, InvalidFuncTypeResultTypeError{ - Index: int(i), - ReadError: err, - } - } - resultTypes[i] = resultType - } - } - - return &FunctionType{ - Params: parameterTypes, - Results: resultTypes, - }, nil -} - -// readValType reads a value type -func (r *WASMReader) readValType() (ValueType, error) { - valTypeOffset := r.buf.offset - b, err := r.buf.ReadByte() - - valType := ValueType(b) - - if err != nil { - return 0, InvalidValTypeError{ - Offset: int(valTypeOffset), - ValType: valType, - ReadError: err, - } - } - - switch valType { - case ValueTypeI32, ValueTypeI64: - return valType, nil - } - - return 0, InvalidValTypeError{ - Offset: int(valTypeOffset), - ValType: valType, - } -} - -// readImportSection reads the section that declares the imports -func (r *WASMReader) readImportSection() error { - - _, err := r.readSectionSize() - if err != nil { - return err - } - - // read the number of imports - countOffset := r.buf.offset - count, err := r.buf.readUint32LEB128() - if err != nil { - return InvalidImportSectionImportCountError{ - Offset: int(countOffset), - ReadError: err, - } - } - - imports := make([]*Import, count) - - // read each import - for i := uint32(0); i < count; i++ { - im, err := r.readImport() - if err != nil { - return InvalidImportError{ - Index: int(i), - ReadError: err, - } - } - imports[i] = im - } - - r.Module.Imports = imports - - return nil -} - -// readImport reads an import in the import section -func (r *WASMReader) readImport() (*Import, error) { - - // read the module - module, err := r.readName() - if err != nil { - return nil, err - } - - // read the name - name, err := r.readName() - if err != nil { - return nil, err - } - - // read the type indicator - indicatorOffset := r.buf.offset - b, err := r.buf.ReadByte() - - indicator := importIndicator(b) - - // TODO: add support for tables, memories, and globals. adjust name section! - if err != nil || indicator != importIndicatorFunction { - return nil, InvalidImportIndicatorError{ - ImportIndicator: indicator, - Offset: int(indicatorOffset), - ReadError: err, - } - } - - // read the type index - typeIndexOffset := r.buf.offset - typeIndex, err := r.buf.readUint32LEB128() - if err != nil { - return nil, InvalidImportSectionTypeIndexError{ - Offset: int(typeIndexOffset), - ReadError: err, - } - } - - return &Import{ - Module: module, - Name: name, - TypeIndex: typeIndex, - }, nil -} - -// readFunctionSection reads the section that declares the types of functions. -// The bodies of these functions will later be provided in the code section -func (r *WASMReader) readFunctionSection() error { - - _, err := r.readSectionSize() - if err != nil { - return err - } - - // read the number of functions - countOffset := r.buf.offset - count, err := r.buf.readUint32LEB128() - if err != nil { - return InvalidFunctionSectionFunctionCountError{ - Offset: int(countOffset), - ReadError: err, - } - } - - functionTypeIndices := make([]uint32, count) - - // read the type index for each function - for i := uint32(0); i < count; i++ { - typeIndexOffset := r.buf.offset - typeIndex, err := r.buf.readUint32LEB128() - if err != nil { - return InvalidFunctionSectionTypeIndexError{ - Index: int(i), - Offset: int(typeIndexOffset), - ReadError: err, - } - } - functionTypeIndices[i] = typeIndex - } - - if !r.ensureModuleFunctions(len(functionTypeIndices)) { - return FunctionCountMismatchError{ - Offset: int(r.buf.offset), - } - } - - for i, functionTypeIndex := range functionTypeIndices { - r.Module.Functions[i].TypeIndex = functionTypeIndex - } - - return nil -} - -func (r *WASMReader) ensureModuleFunctions(count int) bool { - if r.Module.Functions != nil { - return len(r.Module.Functions) == count - } - - r.Module.Functions = make([]*Function, count) - for i := 0; i < count; i++ { - r.Module.Functions[i] = &Function{} - } - - return true -} - -// readMemorySection reads the section that declares the memories -func (r *WASMReader) readMemorySection() error { - - _, err := r.readSectionSize() - if err != nil { - return err - } - - // read the number of memories - countOffset := r.buf.offset - count, err := r.buf.readUint32LEB128() - if err != nil { - return InvalidMemorySectionMemoryCountError{ - Offset: int(countOffset), - ReadError: err, - } - } - - memories := make([]*Memory, count) - - // read each memory - for i := uint32(0); i < count; i++ { - im, err := r.readMemory() - if err != nil { - return InvalidMemoryError{ - Index: int(i), - ReadError: err, - } - } - memories[i] = im - } - - r.Module.Memories = memories - - return nil -} - -// readMemory reads a memory in the memory section -func (r *WASMReader) readMemory() (*Memory, error) { - min, max, err := r.readLimit() - if err != nil { - return nil, err - } - - return &Memory{ - Min: min, - Max: max, - }, nil -} - -// readLimit reads a limit -func (r *WASMReader) readLimit() (min uint32, max *uint32, err error) { - // read the limit indicator - indicatorOffset := r.buf.offset - b, err := r.buf.ReadByte() - if err != nil { - return 0, nil, InvalidLimitIndicatorError{ - Offset: int(indicatorOffset), - LimitIndicator: b, - ReadError: err, - } - } - - indicator := limitIndicator(b) - - var readMax bool - - switch indicator { - case limitIndicatorNoMax: - readMax = false - case limitIndicatorMax: - readMax = true - default: - return 0, nil, InvalidLimitIndicatorError{ - Offset: int(indicatorOffset), - LimitIndicator: byte(indicator), - } - } - - // read the minimum - minOffset := r.buf.offset - min, err = r.buf.readUint32LEB128() - if err != nil { - return 0, nil, InvalidLimitMinError{ - Offset: int(minOffset), - ReadError: err, - } - } - - // read the maximum, if given - if readMax { - maxOffset := r.buf.offset - maximum, err := r.buf.readUint32LEB128() - if err != nil { - return 0, nil, InvalidLimitMaxError{ - Offset: int(maxOffset), - ReadError: err, - } - } - max = &maximum - } - - return min, max, nil -} - -// readExportSection reads the section that declares the exports -func (r *WASMReader) readExportSection() error { - - _, err := r.readSectionSize() - if err != nil { - return err - } - - // read the number of exports - countOffset := r.buf.offset - count, err := r.buf.readUint32LEB128() - if err != nil { - return InvalidExportSectionExportCountError{ - Offset: int(countOffset), - ReadError: err, - } - } - - exports := make([]*Export, count) - - // read each export - for i := uint32(0); i < count; i++ { - im, err := r.readExport() - if err != nil { - return InvalidExportError{ - Index: int(i), - ReadError: err, - } - } - exports[i] = im - } - - r.Module.Exports = exports - - return nil -} - -// readExport reads an export in the export section -func (r *WASMReader) readExport() (*Export, error) { - - // read the name - name, err := r.readName() - if err != nil { - return nil, err - } - - // read the type indicator - indicatorOffset := r.buf.offset - b, err := r.buf.ReadByte() - - indicator := exportIndicator(b) - - // TODO: add support for tables, and globals. adjust name section! - if err != nil { - return nil, InvalidExportIndicatorError{ - ExportIndicator: indicator, - Offset: int(indicatorOffset), - ReadError: err, - } - } - - // read the index - indexOffset := r.buf.offset - index, err := r.buf.readUint32LEB128() - if err != nil { - return nil, InvalidExportSectionIndexError{ - Offset: int(indexOffset), - ReadError: err, - } - } - - var descriptor ExportDescriptor - - switch indicator { - case exportIndicatorFunction: - descriptor = FunctionExport{ - FunctionIndex: index, - } - - case exportIndicatorMemory: - descriptor = MemoryExport{ - MemoryIndex: index, - } - - default: - return nil, InvalidExportIndicatorError{ - ExportIndicator: indicator, - Offset: int(indicatorOffset), - } - } - - return &Export{ - Name: name, - Descriptor: descriptor, - }, nil -} - -// readStartSection reads the section that declares the types of functions. -// The bodies of these functions will later be provided in the code section -func (r *WASMReader) readStartSection() error { - - _, err := r.readSectionSize() - if err != nil { - return err - } - - // read the function index - functionIndexOffset := r.buf.offset - functionIndex, err := r.buf.readUint32LEB128() - if err != nil { - return InvalidStartSectionFunctionIndexError{ - Offset: int(functionIndexOffset), - ReadError: err, - } - } - - r.Module.StartFunctionIndex = &functionIndex - - return nil -} - -// readCodeSection reads the section that provides the function bodies for the functions -// declared by the function section (which only provides the function types) -func (r *WASMReader) readCodeSection() error { - - _, err := r.readSectionSize() - if err != nil { - return err - } - - // read the number of functions - countOffset := r.buf.offset - count, err := r.buf.readUint32LEB128() - if err != nil { - return InvalidCodeSectionFunctionCountError{ - Offset: int(countOffset), - ReadError: err, - } - } - - // read the code of each function - - functionBodies := make([]*Code, count) - - for i := uint32(0); i < count; i++ { - functionBody, err := r.readFunctionBody() - if err != nil { - return InvalidFunctionCodeError{ - Index: int(i), - ReadError: err, - } - } - functionBodies[i] = functionBody - } - - if !r.ensureModuleFunctions(len(functionBodies)) { - return FunctionCountMismatchError{ - Offset: int(r.buf.offset), - } - } - - for i, functionBody := range functionBodies { - r.Module.Functions[i].Code = functionBody - } - - return nil -} - -// readFunctionBody reads the body (locals and instruction) of one function in the code section -func (r *WASMReader) readFunctionBody() (*Code, error) { - - // read the size - sizeOffset := r.buf.offset - // TODO: use size - _, err := r.buf.readUint32LEB128() - if err != nil { - return nil, InvalidCodeSizeError{ - Offset: int(sizeOffset), - ReadError: err, - } - } - - // read the locals - locals, err := r.readLocals() - if err != nil { - return nil, err - } - - // read the instructions - instructions, err := r.readInstructions() - if err != nil { - return nil, err - } - - return &Code{ - Locals: locals, - Instructions: instructions, - }, nil -} - -// readLocals reads the locals for one function in the code sections -func (r *WASMReader) readLocals() ([]ValueType, error) { - // read the number of locals - localsCountOffset := r.buf.offset - localsCount, err := r.buf.readUint32LEB128() - if err != nil { - return nil, InvalidCodeSectionLocalsCountError{ - Offset: int(localsCountOffset), - ReadError: err, - } - } - - if localsCount == 0 { - return nil, nil - } - - locals := make([]ValueType, localsCount) - - // read each local - for i := uint32(0); i < localsCount; { - compressedLocalsCountOffset := r.buf.offset - compressedLocalsCount, err := r.buf.readUint32LEB128() - if err != nil { - return nil, InvalidCodeSectionCompressedLocalsCountError{ - Offset: int(compressedLocalsCountOffset), - ReadError: err, - } - } - - localTypeOffset := r.buf.offset - localType, err := r.readValType() - if err != nil { - return nil, InvalidCodeSectionLocalTypeError{ - Offset: int(localTypeOffset), - ReadError: err, - } - } - - locals[i] = localType - - i += compressedLocalsCount - - if i > localsCount { - return nil, CodeSectionLocalsCountMismatchError{ - Actual: i, - Expected: localsCount, - } - } - } - - return locals, nil -} - -// readInstructions reads the instructions for one function in the code sections -func (r *WASMReader) readInstructions() (instructions []Instruction, err error) { - for { - instruction, err := r.readInstruction() - if err != nil { - return nil, err - } - - if _, ok := instruction.(InstructionEnd); ok { - return instructions, nil - } - - instructions = append(instructions, instruction) - } -} - -// readName reads a name -func (r *WASMReader) readName() (string, error) { - - // read the length - lengthOffset := r.buf.offset - length, err := r.buf.readUint32LEB128() - if err != nil { - return "", InvalidNameLengthError{ - Offset: int(lengthOffset), - ReadError: err, - } - } - - // read the name - nameOffset := r.buf.offset - name := make([]byte, length) - n, err := r.buf.Read(name) - if err != nil { - return "", InvalidNameError{ - Offset: int(nameOffset), - ReadError: err, - } - } - - readCount := uint32(n) - - // ensure the full name was read - if readCount != length { - return "", IncompleteNameError{ - Offset: int(nameOffset), - Expected: length, - Actual: readCount, - } - } - - // ensure the name is valid UTF-8 - if !utf8.Valid(name) { - return "", InvalidNonUTF8NameError{ - Offset: int(nameOffset), - Name: string(name), - } - } - - return string(name), nil -} - -// readUint32LEB128InstructionArgument reads a uint32 instruction argument -// (in LEB128 format) -func (r *WASMReader) readUint32LEB128InstructionArgument() (uint32, error) { - offset := r.buf.offset - v, err := r.buf.readUint32LEB128() - if err != nil { - return 0, InvalidInstructionArgumentError{ - Offset: int(offset), - ReadError: err, - } - } - return v, nil -} - -// readInt32LEB128InstructionArgument reads an int32 instruction argument -// (in LEB128 format) -func (r *WASMReader) readInt32LEB128InstructionArgument() (int32, error) { - offset := r.buf.offset - v, err := r.buf.readInt32LEB128() - if err != nil { - return 0, InvalidInstructionArgumentError{ - Offset: int(offset), - ReadError: err, - } - } - return v, nil -} - -// readInt64LEB128InstructionArgument reads an int64 instruction argument -// (in LEB128 format) -func (r *WASMReader) readInt64LEB128InstructionArgument() (int64, error) { - offset := r.buf.offset - v, err := r.buf.readInt64LEB128() - if err != nil { - return 0, InvalidInstructionArgumentError{ - Offset: int(offset), - ReadError: err, - } - } - return v, nil -} - -// readBlockInstructionArgument reads a block instruction argument -func (r *WASMReader) readBlockInstructionArgument(allowElse bool) (Block, error) { - // read the block type. - blockType, err := r.readBlockType() - if err != nil { - // TODO: improve error - return Block{}, err - } - - // read the first sequence of instructions. - // `readInstructions` cannot be used, as it does not handle the `else` instruction / opcode. - - var instructions1 []Instruction - - sawElse := false - - for { - b, err := r.buf.PeekByte() - if err != nil { - // TODO: improve error - return Block{}, err - } - if b == byte(opcodeElse) { - if !allowElse { - return Block{}, InvalidBlockSecondInstructionsError{ - Offset: int(r.buf.offset), - } - } - r.buf.offset++ - sawElse = true - break - } - - instruction, err := r.readInstruction() - if err != nil { - // TODO: improve error - return Block{}, err - } - - if _, ok := instruction.(InstructionEnd); ok { - break - } - - instructions1 = append(instructions1, instruction) - } - - var instructions2 []Instruction - if sawElse { - // the first set of instructions contained an `else` instruction / opcode, - // so read the second set of instructions. the validity was already checked above. - - instructions2, err = r.readInstructions() - if err != nil { - // TODO: improve error - return Block{}, err - } - } - - return Block{ - BlockType: blockType, - Instructions1: instructions1, - Instructions2: instructions2, - }, nil -} - -func (r *WASMReader) readBlockType() (BlockType, error) { - // it may be the empty block type - b, err := r.buf.PeekByte() - if err != nil { - // TODO: improve error - return nil, err - } - if b == emptyBlockType { - r.buf.offset++ - return nil, nil - } - - // it may be a value type - blockType, err := r.readBlockTypeValueType() - if err != nil { - // TODO: improve error - return nil, err - } - - if blockType != nil { - return blockType, nil - } - - // the block type is not a value type, - // it must be a type index. - - typeIndexOffset := r.buf.offset - typeIndex, err := r.buf.readInt64LEB128() - if err != nil { - // TODO: improve error - return nil, err - } - - // the type index was read as a int64, - // but must fit a uint32 - if typeIndex < 0 || typeIndex > math.MaxUint32 { - return nil, InvalidBlockTypeTypeIndexError{ - Offset: int(typeIndexOffset), - TypeIndex: typeIndex, - } - } - - return TypeIndexBlockType{ - TypeIndex: uint32(typeIndex), - }, nil -} - -// readBlockTypeValueType reads a value type block type -// and returns nil if the next byte is not a valid value type -func (r *WASMReader) readBlockTypeValueType() (BlockType, error) { - b, err := r.buf.PeekByte() - if err != nil { - return nil, err - } - valueType := AsValueType(b) - if valueType == 0 { - return nil, nil - } - r.buf.offset++ - return valueType, nil -} - -// readCustomSection reads a custom section -func (r *WASMReader) readCustomSection() error { - - size, err := r.readSectionSize() - if err != nil { - return err - } - - // read the name of the custom section - - nameStartOffset := r.buf.offset - name, err := r.readName() - if err != nil { - return err - } - - size -= uint32(r.buf.offset - nameStartOffset) - - switch name { - case customSectionNameName: - return r.readNameSection(size) - } - - // skip unknown custom sections - r.buf.offset += offset(size) - - return nil -} - -// readDataSection reads the section that declares the data segments -func (r *WASMReader) readDataSection() error { - - _, err := r.readSectionSize() - if err != nil { - return err - } - - // read the number of data segments - countOffset := r.buf.offset - count, err := r.buf.readUint32LEB128() - if err != nil { - return InvalidDataSectionSegmentCountError{ - Offset: int(countOffset), - ReadError: err, - } - } - - segments := make([]*Data, count) - - // read each data segment - for i := uint32(0); i < count; i++ { - segment, err := r.readDataSegment() - if err != nil { - return InvalidDataSegmentError{ - Index: int(i), - ReadError: err, - } - } - segments[i] = segment - } - - r.Module.Data = segments - - return nil -} - -// readDataSegment reads a segment in the data section -func (r *WASMReader) readDataSegment() (*Data, error) { - - // read the memory index - memoryIndexOffset := r.buf.offset - memoryIndex, err := r.buf.readUint32LEB128() - if err != nil { - return nil, InvalidDataSectionMemoryIndexError{ - Offset: int(memoryIndexOffset), - ReadError: err, - } - } - - // read the offset instructions - instructions, err := r.readInstructions() - if err != nil { - return nil, err - } - - // read the number of init bytes - countOffset := r.buf.offset - count, err := r.buf.readUint32LEB128() - if err != nil { - return nil, InvalidDataSectionInitByteCountError{ - Offset: int(countOffset), - ReadError: err, - } - } - - init := make([]byte, count) - - // read each init byte - for i := uint32(0); i < count; i++ { - b, err := r.buf.ReadByte() - if err != nil { - return nil, err - } - init[i] = b - } - - return &Data{ - MemoryIndex: memoryIndex, - Offset: instructions, - Init: init, - }, nil -} - -// readNameSection reads the section that provides names -func (r *WASMReader) readNameSection(size uint32) error { - - // TODO: read the names and store them. for now, just skip the content - r.buf.offset += offset(size) - - return nil -} - -func (r *WASMReader) ReadModule() error { - if err := r.readMagicAndVersion(); err != nil { - return err - } - - for { - _, err := r.buf.PeekByte() - if err != nil { - if err == io.EOF { - return nil - } - - return err - } - - if err = r.readSection(); err != nil { - return err - } - } -} diff --git a/compiler/wasm/reader_test.go b/compiler/wasm/reader_test.go deleted file mode 100644 index 65485b59ee..0000000000 --- a/compiler/wasm/reader_test.go +++ /dev/null @@ -1,2243 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -import ( - "io" - "math" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestWASMReader_readMagicAndVersion(t *testing.T) { - - t.Parallel() - - read := func(data []byte) error { - b := Buffer{data: data} - r := NewWASMReader(&b) - err := r.readMagicAndVersion() - if err != nil { - return err - } - require.Equal(t, offset(len(b.data)), b.offset) - return nil - } - - t.Run("invalid magic, too short", func(t *testing.T) { - - t.Parallel() - - err := read([]byte{0x0, 0x61}) - require.Error(t, err) - assert.Equal(t, - InvalidMagicError{ - Offset: 0, - ReadError: io.EOF, - }, - err, - ) - }) - - t.Run("invalid magic, incorrect", func(t *testing.T) { - - t.Parallel() - - err := read([]byte{0x0, 0x61, 0x73, 0xFF}) - require.Error(t, err) - assert.Equal(t, - InvalidMagicError{ - Offset: 0, - ReadError: nil, - }, - err, - ) - }) - - t.Run("invalid version, too short", func(t *testing.T) { - - t.Parallel() - - err := read([]byte{0x0, 0x61, 0x73, 0x6d, 0x1, 0x0}) - require.Error(t, err) - assert.Equal(t, - InvalidVersionError{ - Offset: 4, - ReadError: io.EOF, - }, - err, - ) - }) - - t.Run("invalid version, incorrect", func(t *testing.T) { - - t.Parallel() - - err := read([]byte{0x0, 0x61, 0x73, 0x6d, 0x2, 0x0, 0x0, 0x0}) - require.Error(t, err) - assert.Equal(t, - InvalidVersionError{ - Offset: 4, - ReadError: nil, - }, - err, - ) - }) - - t.Run("valid magic and version", func(t *testing.T) { - - t.Parallel() - - err := read([]byte{0x0, 0x61, 0x73, 0x6d, 0x1, 0x0, 0x0, 0x0}) - require.NoError(t, err) - }) -} - -func TestWASMReader_readValType(t *testing.T) { - - t.Parallel() - - read := func(data []byte) (ValueType, error) { - b := Buffer{data: data} - r := NewWASMReader(&b) - valueType, err := r.readValType() - if err != nil { - return 0, err - } - require.Equal(t, offset(len(b.data)), b.offset) - return valueType, nil - } - - t.Run("too short", func(t *testing.T) { - - t.Parallel() - - valType, err := read([]byte{}) - require.Error(t, err) - assert.Equal(t, - InvalidValTypeError{ - Offset: 0, - ValType: valType, - ReadError: io.EOF, - }, - err, - ) - assert.Equal(t, ValueType(0), valType) - }) - - t.Run("invalid", func(t *testing.T) { - - t.Parallel() - - valType, err := read([]byte{0xFF}) - require.Error(t, err) - assert.Equal(t, - InvalidValTypeError{ - Offset: 0, - ValType: 0xFF, - ReadError: nil, - }, - err, - ) - assert.Equal(t, ValueType(0), valType) - }) - - t.Run("i32", func(t *testing.T) { - - t.Parallel() - - valType, err := read([]byte{byte(ValueTypeI32)}) - require.NoError(t, err) - assert.Equal(t, ValueTypeI32, valType) - }) - - t.Run("i64", func(t *testing.T) { - - t.Parallel() - - valType, err := read([]byte{byte(ValueTypeI64)}) - require.NoError(t, err) - assert.Equal(t, ValueTypeI64, valType) - }) -} - -func TestWASMReader_readTypeSection(t *testing.T) { - - t.Parallel() - - read := func(data []byte) ([]*FunctionType, error) { - b := Buffer{data: data} - r := NewWASMReader(&b) - err := r.readTypeSection() - if err != nil { - return nil, err - } - require.Equal(t, offset(len(b.data)), b.offset) - return r.Module.Types, nil - } - - t.Run("valid", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 7 (LEB128) - 0x87, 0x80, 0x80, 0x80, 0x0, - // type count - 0x1, - // function type - 0x60, - // parameter count: 2 - 0x2, - // type of parameter 1: i32 - 0x7f, - // type of parameter 2: i32 - 0x7f, - // return value count: 1 - 0x1, - // type of return value 1: i32 - 0x7f, - }) - require.NoError(t, err) - assert.Equal(t, - []*FunctionType{ - { - Params: []ValueType{ValueTypeI32, ValueTypeI32}, - Results: []ValueType{ValueTypeI32}, - }, - }, - funcTypes, - ) - }) - - t.Run("invalid size", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - 0x87, 0x80, 0x80, - }) - require.Error(t, err) - assert.Equal(t, - InvalidSectionSizeError{ - Offset: 0, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid count", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 0 (LEB128) - 0x80, 0x80, 0x80, 0x80, 0x0, - }) - require.Error(t, err) - assert.Equal(t, - InvalidTypeSectionTypeCountError{ - Offset: 5, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid indicator", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 2 (LEB128) - 0x82, 0x80, 0x80, 0x80, 0x0, - // type count - 0x1, - // function type - 0xFF, - }) - require.Error(t, err) - assert.Equal(t, - InvalidFuncTypeIndicatorError{ - Offset: 6, - FuncTypeIndicator: 0xff, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid parameter count", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 2 (LEB128) - 0x82, 0x80, 0x80, 0x80, 0x0, - // type count - 0x1, - // function type - 0x60, - }) - require.Error(t, err) - assert.Equal(t, - InvalidFuncTypeParameterCountError{ - Offset: 7, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid parameter type", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 4 (LEB128) - 0x84, 0x80, 0x80, 0x80, 0x0, - // type count - 0x1, - // function type - 0x60, - // parameter count: 1 - 0x1, - // type of parameter 1 - 0xff, - }) - require.Error(t, err) - assert.Equal(t, - InvalidFuncTypeParameterTypeError{ - Index: 0, - ReadError: InvalidValTypeError{ - Offset: 8, - ValType: 0xFF, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid, parameter type missing", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 4 (LEB128) - 0x84, 0x80, 0x80, 0x80, 0x0, - // type count - 0x1, - // function type - 0x60, - // parameter count: 2 - 0x2, - // type of parameter 1: i32 - 0x7f, - }) - require.Error(t, err) - assert.Equal(t, - InvalidFuncTypeParameterTypeError{ - Index: 1, - ReadError: InvalidValTypeError{ - Offset: 9, - ValType: 0, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid result count", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 3 (LEB128) - 0x83, 0x80, 0x80, 0x80, 0x0, - // type count - 0x1, - // function type - 0x60, - // parameter count - 0x0, - }) - require.Error(t, err) - assert.Equal(t, - InvalidFuncTypeResultCountError{ - Offset: 8, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid result type", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 5 (LEB128) - 0x85, 0x80, 0x80, 0x80, 0x0, - // type count - 0x1, - // function type - 0x60, - // parameter count: 0 - 0x0, - // result count: 1 - 0x1, - // type of result 1 - 0xff, - }) - require.Error(t, err) - assert.Equal(t, - InvalidFuncTypeResultTypeError{ - Index: 0, - ReadError: InvalidValTypeError{ - Offset: 9, - ValType: 0xFF, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid, result type missing", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 5 (LEB128) - 0x85, 0x80, 0x80, 0x80, 0x0, - // type count - 0x1, - // function type - 0x60, - // parameter count: 0 - 0x0, - // result count: 2 - 0x2, - // type of result 1: i32 - 0x7f, - }) - require.Error(t, err) - assert.Equal(t, - InvalidFuncTypeResultTypeError{ - Index: 1, - ReadError: InvalidValTypeError{ - Offset: 10, - ValType: 0, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) -} - -func TestWASMReader_readImportSection(t *testing.T) { - - t.Parallel() - - read := func(data []byte) ([]*Import, error) { - b := Buffer{data: data} - r := NewWASMReader(&b) - err := r.readImportSection() - if err != nil { - return nil, err - } - require.Equal(t, offset(len(b.data)), b.offset) - return r.Module.Imports, nil - } - - t.Run("valid", func(t *testing.T) { - - t.Parallel() - - typeIndices, err := read([]byte{ - // section size: 11 (LEB128) - 0x8b, 0x80, 0x80, 0x80, 0x0, - // import count: 1 - 0x1, - // module length - 0x3, - // module = "foo" - 0x66, 0x6f, 0x6f, - // name length - 0x3, - // name = "bar" - 0x62, 0x61, 0x72, - // indicator: function = 0 - 0x0, - // type index of function: 0 - 0x1, - }) - require.NoError(t, err) - assert.Equal(t, - []*Import{ - { - Module: "foo", - Name: "bar", - TypeIndex: 1, - }, - }, - typeIndices, - ) - }) - - t.Run("invalid size", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - 0x87, 0x80, 0x80, - }) - require.Error(t, err) - assert.Equal(t, - InvalidSectionSizeError{ - Offset: 0, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid count", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 0 (LEB128) - 0x80, 0x80, 0x80, 0x80, 0x0, - }) - require.Error(t, err) - assert.Equal(t, - InvalidImportSectionImportCountError{ - Offset: 5, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid module", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 1 (LEB128) - 0x81, 0x80, 0x80, 0x80, 0x0, - // import count - 0x1, - }) - require.Error(t, err) - assert.Equal(t, - InvalidImportError{ - Index: 0, - ReadError: InvalidNameLengthError{ - Offset: 6, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid name", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 5 (LEB128) - 0x85, 0x80, 0x80, 0x80, 0x0, - // import count - 0x1, - // module length - 0x3, - // module = "foo" - 0x66, 0x6f, 0x6f, - }) - require.Error(t, err) - assert.Equal(t, - InvalidImportError{ - Index: 0, - ReadError: InvalidNameLengthError{ - Offset: 10, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("missing indicator", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 9 (LEB128) - 0x89, 0x80, 0x80, 0x80, 0x0, - // import count - 0x1, - // module length - 0x3, - // module = "foo" - 0x66, 0x6f, 0x6f, - // name length - 0x3, - // name = "bar" - 0x62, 0x61, 0x72, - }) - require.Error(t, err) - assert.Equal(t, - InvalidImportError{ - Index: 0, - ReadError: InvalidImportIndicatorError{ - Offset: 14, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid indicator", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 10 (LEB128) - 0x8a, 0x80, 0x80, 0x80, 0x0, - // import count - 0x1, - // module length - 0x3, - // module = "foo" - 0x66, 0x6f, 0x6f, - // name length - 0x3, - // name = "bar" - 0x62, 0x61, 0x72, - // indicator - 0x1, - }) - require.Error(t, err) - assert.Equal(t, - InvalidImportError{ - Index: 0, - ReadError: InvalidImportIndicatorError{ - Offset: 14, - ImportIndicator: 0x1, - ReadError: nil, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid type index", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 10 (LEB128) - 0x8a, 0x80, 0x80, 0x80, 0x0, - // import count - 0x1, - // module length - 0x3, - // module = "foo" - 0x66, 0x6f, 0x6f, - // name length - 0x3, - // name = "bar" - 0x62, 0x61, 0x72, - // indicator: function = 0 - 0x0, - }) - require.Error(t, err) - assert.Equal(t, - InvalidImportError{ - Index: 0, - ReadError: InvalidImportSectionTypeIndexError{ - Offset: 15, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) -} - -func TestWASMReader_readFunctionSection(t *testing.T) { - - t.Parallel() - - read := func(data []byte) ([]*Function, error) { - b := Buffer{data: data} - r := NewWASMReader(&b) - err := r.readFunctionSection() - if err != nil { - return nil, err - } - require.Equal(t, offset(len(b.data)), b.offset) - return r.Module.Functions, nil - } - - t.Run("valid", func(t *testing.T) { - - t.Parallel() - - typeIDs, err := read([]byte{ - // section size: 2 (LEB128) - 0x82, 0x80, 0x80, 0x80, 0x0, - // function count: 1 - 0x1, - // type index of function: 0x23 - 0x23, - }) - require.NoError(t, err) - assert.Equal(t, - []*Function{ - { - TypeIndex: 0x23, - }, - }, - typeIDs, - ) - }) - - t.Run("invalid size", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - 0x87, 0x80, 0x80, - }) - require.Error(t, err) - assert.Equal(t, - InvalidSectionSizeError{ - Offset: 0, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid count", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 0 (LEB128) - 0x80, 0x80, 0x80, 0x80, 0x0, - }) - require.Error(t, err) - assert.Equal(t, - InvalidFunctionSectionFunctionCountError{ - Offset: 5, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid function type ID", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 1 (LEB128) - 0x81, 0x80, 0x80, 0x80, 0x0, - // function count - 0x1, - }) - require.Error(t, err) - assert.Equal(t, - InvalidFunctionSectionTypeIndexError{ - Offset: 6, - Index: 0, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, funcTypes) - }) -} - -func TestWASMReader_readMemorySection(t *testing.T) { - - t.Parallel() - - read := func(data []byte) ([]*Memory, error) { - b := Buffer{data: data} - r := NewWASMReader(&b) - err := r.readMemorySection() - if err != nil { - return nil, err - } - require.Equal(t, offset(len(b.data)), b.offset) - return r.Module.Memories, nil - } - - t.Run("valid", func(t *testing.T) { - - t.Parallel() - - codes, err := read([]byte{ - // section size: 6 (LEB128) - 0x86, 0x80, 0x80, 0x80, 0x0, - // memory count: 2 - 0x2, - // memory type / limit: no max - 0x0, - // limit 1 min - 0x0, - // memory type / limit: max - 0x1, - // limit 2 min - 0x1, - // limit 2 max - 0x2, - }) - require.NoError(t, err) - assert.Equal(t, - []*Memory{ - { - Min: 0, - Max: nil, - }, - { - Min: 1, - Max: func() *uint32 { - var max uint32 = 2 - return &max - }(), - }, - }, - codes, - ) - }) - - t.Run("invalid size", func(t *testing.T) { - - t.Parallel() - - segments, err := read([]byte{ - 0x87, 0x80, 0x80, - }) - require.Error(t, err) - assert.Equal(t, - InvalidSectionSizeError{ - Offset: 0, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, segments) - }) - - t.Run("invalid count", func(t *testing.T) { - - t.Parallel() - - segments, err := read([]byte{ - // section size: 7 (LEB128) - 0x80, 0x80, 0x80, 0x80, 0x0, - }) - require.Error(t, err) - assert.Equal(t, - InvalidMemorySectionMemoryCountError{ - Offset: 5, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, segments) - }) - - t.Run("missing limit indicator", func(t *testing.T) { - - t.Parallel() - - segments, err := read([]byte{ - // section size: 10 (LEB128) - 0x8a, 0x80, 0x80, 0x80, 0x0, - // segment count - 0x1, - }) - require.Error(t, err) - assert.Equal(t, - InvalidMemoryError{ - Index: 0, - ReadError: InvalidLimitIndicatorError{ - Offset: 6, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, segments) - }) - - t.Run("invalid limit indicator", func(t *testing.T) { - - t.Parallel() - - segments, err := read([]byte{ - // section size: 10 (LEB128) - 0x8a, 0x80, 0x80, 0x80, 0x0, - // segment count - 0x1, - // limit indicator - 0xFF, - }) - require.Error(t, err) - assert.Equal(t, - InvalidMemoryError{ - Index: 0, - ReadError: InvalidLimitIndicatorError{ - Offset: 6, - LimitIndicator: 0xFF, - }, - }, - err, - ) - assert.Nil(t, segments) - }) - - t.Run("invalid min", func(t *testing.T) { - - t.Parallel() - - segments, err := read([]byte{ - // section size: 10 (LEB128) - 0x8a, 0x80, 0x80, 0x80, 0x0, - // segment count - 0x1, - // limit indicator: no max = 0x0 - 0x0, - }) - require.Error(t, err) - assert.Equal(t, - InvalidMemoryError{ - Index: 0, - ReadError: InvalidLimitMinError{ - Offset: 7, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, segments) - }) - - t.Run("invalid max", func(t *testing.T) { - - t.Parallel() - - segments, err := read([]byte{ - // section size: 10 (LEB128) - 0x8a, 0x80, 0x80, 0x80, 0x0, - // segment count - 0x1, - // limit indicator: max = 0x1 - 0x1, - // min - 0x1, - }) - require.Error(t, err) - assert.Equal(t, - InvalidMemoryError{ - Index: 0, - ReadError: InvalidLimitMaxError{ - Offset: 8, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, segments) - }) -} - -func TestWASMReader_readExportSection(t *testing.T) { - - t.Parallel() - - read := func(data []byte) ([]*Export, error) { - b := Buffer{data: data} - r := NewWASMReader(&b) - err := r.readExportSection() - if err != nil { - return nil, err - } - require.Equal(t, offset(len(b.data)), b.offset) - return r.Module.Exports, nil - } - - t.Run("valid", func(t *testing.T) { - - t.Parallel() - - typeIndices, err := read([]byte{ - // section size: 7 (LEB128) - 0x87, 0x80, 0x80, 0x80, 0x0, - // export count: 1 - 0x1, - // name length - 0x3, - // name = "foo" - 0x66, 0x6f, 0x6f, - // indicator: function = 0 - 0x0, - // index of function: 0 - 0x1, - }) - require.NoError(t, err) - assert.Equal(t, - []*Export{ - { - Name: "foo", - Descriptor: FunctionExport{ - FunctionIndex: 1, - }, - }, - }, - typeIndices, - ) - }) - - t.Run("invalid size", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - 0x80, 0x80, 0x80, - }) - require.Error(t, err) - assert.Equal(t, - InvalidSectionSizeError{ - Offset: 0, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid count", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 0 (LEB128) - 0x80, 0x80, 0x80, 0x80, 0x0, - }) - require.Error(t, err) - assert.Equal(t, - InvalidExportSectionExportCountError{ - Offset: 5, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid name", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 1 (LEB128) - 0x81, 0x80, 0x80, 0x80, 0x0, - // export count - 0x1, - }) - require.Error(t, err) - assert.Equal(t, - InvalidExportError{ - Index: 0, - ReadError: InvalidNameLengthError{ - Offset: 6, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("missing indicator", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 5 (LEB128) - 0x85, 0x80, 0x80, 0x80, 0x0, - // export count - 0x1, - // name length - 0x3, - // name = "foo" - 0x66, 0x6f, 0x6f, - }) - require.Error(t, err) - assert.Equal(t, - InvalidExportError{ - Index: 0, - ReadError: InvalidExportIndicatorError{ - Offset: 10, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid indicator", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 6 (LEB128) - 0x86, 0x80, 0x80, 0x80, 0x0, - // import count - 0x1, - // name length - 0x3, - // name = "foo" - 0x66, 0x6f, 0x6f, - // indicator: invalid - 0xFF, - // index - 0x1, - }) - require.Error(t, err) - assert.Equal(t, - InvalidExportError{ - Index: 0, - ReadError: InvalidExportIndicatorError{ - Offset: 10, - ExportIndicator: 0xFF, - ReadError: nil, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid function index", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 6 (LEB128) - 0x86, 0x80, 0x80, 0x80, 0x0, - // import count - 0x1, - // name length - 0x3, - // name = "bar" - 0x62, 0x61, 0x72, - // indicator: function = 0 - 0x0, - }) - require.Error(t, err) - assert.Equal(t, - InvalidExportError{ - Index: 0, - ReadError: InvalidExportSectionIndexError{ - Offset: 11, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) -} - -func TestWASMReader_readStartSection(t *testing.T) { - - t.Parallel() - - read := func(data []byte) (*uint32, error) { - b := Buffer{data: data} - r := NewWASMReader(&b) - err := r.readStartSection() - if err != nil { - return nil, err - } - require.Equal(t, offset(len(b.data)), b.offset) - return r.Module.StartFunctionIndex, nil - } - - t.Run("valid", func(t *testing.T) { - - t.Parallel() - - typeIDs, err := read([]byte{ - // section size: 1 (LEB128) - 0x81, 0x80, 0x80, 0x80, 0x0, - // function index: 1 - 0x1, - }) - require.NoError(t, err) - assert.Equal(t, - func() *uint32 { - var funcIndex uint32 = 1 - return &funcIndex - }(), - typeIDs, - ) - }) - - t.Run("invalid size", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - 0x87, 0x80, 0x80, - }) - require.Error(t, err) - assert.Equal(t, - InvalidSectionSizeError{ - Offset: 0, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid function type ID", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 7 (LEB128) - 0x81, 0x80, 0x80, 0x80, 0x0, - }) - require.Error(t, err) - assert.Equal(t, - InvalidStartSectionFunctionIndexError{ - Offset: 5, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, funcTypes) - }) -} - -func TestWASMReader_readCodeSection(t *testing.T) { - - t.Parallel() - - read := func(data []byte) ([]*Function, error) { - b := Buffer{data: data} - r := NewWASMReader(&b) - err := r.readCodeSection() - if err != nil { - return nil, err - } - require.Equal(t, offset(len(b.data)), b.offset) - return r.Module.Functions, nil - } - - t.Run("valid", func(t *testing.T) { - - t.Parallel() - - codes, err := read([]byte{ - // section size: 15 (LEB128) - 0x8f, 0x80, 0x80, 0x80, 0x0, - // function count: 1 - 0x1, - // code size: 9 (LEB128) - 0x89, 0x80, 0x80, 0x80, 0x0, - // number of locals: 1 - 0x1, - // number of locals with this type: 1 - 0x1, - // local type: i32 - 0x7f, - // opcode: local.get, 0 - 0x20, 0x0, - // opcode: local.get 1 - 0x20, 0x1, - // opcode: i32.add - 0x6a, - // opcode: end - 0xb, - }) - require.NoError(t, err) - assert.Equal(t, - []*Function{ - { - Code: &Code{ - Locals: []ValueType{ - ValueTypeI32, - }, - Instructions: []Instruction{ - InstructionLocalGet{LocalIndex: 0}, - InstructionLocalGet{LocalIndex: 1}, - InstructionI32Add{}, - }, - }, - }, - }, - codes, - ) - }) - - t.Run("invalid size", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - 0x87, 0x80, 0x80, - }) - require.Error(t, err) - assert.Equal(t, - InvalidSectionSizeError{ - Offset: 0, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid count", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 0 (LEB128) - 0x80, 0x80, 0x80, 0x80, 0x0, - }) - require.Error(t, err) - assert.Equal(t, - InvalidCodeSectionFunctionCountError{ - Offset: 5, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid code size", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 1 (LEB128) - 0x81, 0x80, 0x80, 0x80, 0x0, - // function count: 1 - 0x1, - }) - require.Error(t, err) - assert.Equal(t, - InvalidFunctionCodeError{ - Index: 0, - ReadError: InvalidCodeSizeError{ - Offset: 6, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid locals count", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 6 (LEB128) - 0x86, 0x80, 0x80, 0x80, 0x0, - // function count: 1 - 0x1, - // code size: 9 (LEB128) - 0x89, 0x80, 0x80, 0x80, 0x0, - }) - require.Error(t, err) - assert.Equal(t, - InvalidFunctionCodeError{ - Index: 0, - ReadError: InvalidCodeSectionLocalsCountError{ - Offset: 11, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid compressed locals count", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 7 (LEB128) - 0x87, 0x80, 0x80, 0x80, 0x0, - // function count: 1 - 0x1, - // code size: 9 (LEB128) - 0x89, 0x80, 0x80, 0x80, 0x0, - // number of locals: 1 - 0x1, - }) - require.Error(t, err) - assert.Equal(t, - InvalidFunctionCodeError{ - Index: 0, - ReadError: InvalidCodeSectionCompressedLocalsCountError{ - Offset: 12, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid local type", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 8 (LEB128) - 0x88, 0x80, 0x80, 0x80, 0x0, - // function count: 1 - 0x1, - // code size: 9 (LEB128) - 0x89, 0x80, 0x80, 0x80, 0x0, - // number of locals: 1 - 0x1, - // number of locals with this type: 1 - 0x1, - }) - require.Error(t, err) - assert.Equal(t, - InvalidFunctionCodeError{ - Index: 0, - ReadError: InvalidCodeSectionLocalTypeError{ - Offset: 13, - ReadError: InvalidValTypeError{ - Offset: 13, - ValType: 0, - ReadError: io.EOF, - }, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("invalid instruction", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 10 (LEB128) - 0x8a, 0x80, 0x80, 0x80, 0x0, - // function count: 1 - 0x1, - // code size: 9 (LEB128) - 0x89, 0x80, 0x80, 0x80, 0x0, - // number of locals: 1 - 0x1, - // number of locals with this type: 1 - 0x1, - // local type: i32 - 0x7f, - // invalid opcode - 0xff, - }) - require.Error(t, err) - assert.Equal(t, - InvalidFunctionCodeError{ - Index: 0, - ReadError: InvalidOpcodeError{ - Offset: 14, - Opcode: 0xff, - ReadError: nil, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) - - t.Run("missing end", func(t *testing.T) { - - t.Parallel() - - funcTypes, err := read([]byte{ - // section size: 14 (LEB128) - 0x8e, 0x80, 0x80, 0x80, 0x0, - // function count: 1 - 0x1, - // code size: 9 (LEB128) - 0x89, 0x80, 0x80, 0x80, 0x0, - // number of locals: 1 - 0x1, - // number of locals with this type: 1 - 0x1, - // local type: i32 - 0x7f, - // opcode: local.get, 0 - 0x20, 0x0, - // opcode: local.get 1 - 0x20, 0x1, - // opcode: i32.add - 0x6a, - }) - require.Error(t, err) - assert.Equal(t, - InvalidFunctionCodeError{ - Index: 0, - ReadError: MissingEndInstructionError{ - Offset: 19, - }, - }, - err, - ) - assert.Nil(t, funcTypes) - }) -} - -func TestWASMReader_readDataSection(t *testing.T) { - - t.Parallel() - - read := func(data []byte) ([]*Data, error) { - b := Buffer{data: data} - r := NewWASMReader(&b) - err := r.readDataSection() - if err != nil { - return nil, err - } - require.Equal(t, offset(len(b.data)), b.offset) - return r.Module.Data, nil - } - - t.Run("valid", func(t *testing.T) { - - t.Parallel() - - codes, err := read([]byte{ - // section size: 9 (LEB128) - 0x89, 0x80, 0x80, 0x80, 0x0, - // segment count: 1 - 0x1, - // memory index - 0x1, - // i32.const 2 - 0x41, 0x2, - // end - 0xb, - // byte count - 0x3, - // init (bytes 0x3, 0x4, 0x5) - 0x3, 0x4, 0x5, - }) - require.NoError(t, err) - assert.Equal(t, - []*Data{ - { - MemoryIndex: 1, - Offset: []Instruction{ - InstructionI32Const{Value: 2}, - }, - Init: []byte{3, 4, 5}, - }, - }, - codes, - ) - }) - - t.Run("invalid size", func(t *testing.T) { - - t.Parallel() - - segments, err := read([]byte{ - 0x87, 0x80, 0x80, - }) - require.Error(t, err) - assert.Equal(t, - InvalidSectionSizeError{ - Offset: 0, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, segments) - }) - - t.Run("invalid count", func(t *testing.T) { - - t.Parallel() - - segments, err := read([]byte{ - // section size: 7 (LEB128) - 0x80, 0x80, 0x80, 0x80, 0x0, - }) - require.Error(t, err) - assert.Equal(t, - InvalidDataSectionSegmentCountError{ - Offset: 5, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, segments) - }) - - t.Run("invalid memory index", func(t *testing.T) { - - t.Parallel() - - segments, err := read([]byte{ - // section size: 10 (LEB128) - 0x8a, 0x80, 0x80, 0x80, 0x0, - // segment count - 0x1, - }) - require.Error(t, err) - assert.Equal(t, - InvalidDataSegmentError{ - Index: 0, - ReadError: InvalidDataSectionMemoryIndexError{ - Offset: 6, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, segments) - }) - - t.Run("invalid instruction", func(t *testing.T) { - - t.Parallel() - - segments, err := read([]byte{ - // section size: 10 (LEB128) - 0x8a, 0x80, 0x80, 0x80, 0x0, - // segment count - 0x1, - // memory index - 0x0, - // invalid opcode - 0xff, - }) - require.Error(t, err) - assert.Equal(t, - InvalidDataSegmentError{ - Index: 0, - ReadError: InvalidOpcodeError{ - Offset: 7, - Opcode: 0xff, - ReadError: nil, - }, - }, - err, - ) - assert.Nil(t, segments) - }) - - t.Run("missing end", func(t *testing.T) { - - t.Parallel() - - segments, err := read([]byte{ - // section size: 9 (LEB128) - 0x89, 0x80, 0x80, 0x80, 0x0, - // segment count: 1 - 0x1, - // memory index - 0x1, - // i32.const 2 - 0x41, 0x2, - }) - require.Error(t, err) - assert.Equal(t, - InvalidDataSegmentError{ - Index: 0, - ReadError: MissingEndInstructionError{ - Offset: 9, - }, - }, - err, - ) - assert.Nil(t, segments) - }) - - t.Run("invalid init byte count", func(t *testing.T) { - - t.Parallel() - - segments, err := read([]byte{ - // section size: 9 (LEB128) - 0x89, 0x80, 0x80, 0x80, 0x0, - // segment count: 1 - 0x1, - // memory index - 0x1, - // i32.const 2 - 0x41, 0x2, - // end - 0xb, - }) - require.Error(t, err) - assert.Equal(t, - InvalidDataSegmentError{ - Index: 0, - ReadError: InvalidDataSectionInitByteCountError{ - Offset: 10, - ReadError: io.EOF, - }, - }, - err, - ) - assert.Nil(t, segments) - }) - - t.Run("invalid init bytes", func(t *testing.T) { - - t.Parallel() - - segments, err := read([]byte{ - // section size: 9 (LEB128) - 0x89, 0x80, 0x80, 0x80, 0x0, - // segment count: 1 - 0x1, - // memory index - 0x1, - // i32.const 2 - 0x41, 0x2, - // end - 0xb, - // byte count - 0x2, - }) - require.Error(t, err) - assert.Equal(t, - InvalidDataSegmentError{ - Index: 0, - ReadError: io.EOF, - }, - err, - ) - assert.Nil(t, segments) - }) -} - -func TestWASMReader_readName(t *testing.T) { - - t.Parallel() - - read := func(data []byte) (string, error) { - b := Buffer{data: data} - r := NewWASMReader(&b) - name, err := r.readName() - if err != nil { - return "", err - } - require.Equal(t, offset(len(b.data)), b.offset) - return name, nil - } - - t.Run("valid", func(t *testing.T) { - - t.Parallel() - - name, err := read([]byte{ - // length - 0x5, - // "hello" - 0x68, 0x65, 0x6c, 0x6c, 0x6f, - }) - require.NoError(t, err) - - require.Equal(t, "hello", name) - }) - - t.Run("invalid length", func(t *testing.T) { - - t.Parallel() - - name, err := read(nil) - require.Error(t, err) - - assert.Equal(t, - InvalidNameLengthError{ - Offset: 0, - ReadError: io.EOF, - }, - err, - ) - assert.Empty(t, name) - }) - - t.Run("invalid name", func(t *testing.T) { - - t.Parallel() - - name, err := read([]byte{ - // length - 0x5, - // "he" - 0x68, 0x65, - }) - require.Error(t, err) - - assert.Equal(t, - IncompleteNameError{ - Offset: 1, - Expected: 5, - Actual: 2, - }, - err, - ) - assert.Empty(t, name) - }) - - t.Run("invalid non UTF-8", func(t *testing.T) { - - t.Parallel() - - name, err := read([]byte{ - // length - 0x3, - // name - 0xff, 0xfe, 0xfd, - }) - require.Error(t, err) - - assert.Equal(t, - InvalidNonUTF8NameError{ - Name: "\xff\xfe\xfd", - Offset: 1, - }, - err, - ) - assert.Empty(t, name) - }) -} - -func TestWASMReader_readInstruction(t *testing.T) { - - t.Parallel() - - t.Run("block, i32 result", func(t *testing.T) { - - t.Parallel() - - b := Buffer{ - data: []byte{ - // block - 0x02, - // i32 - 0x7f, - // i32.const - 0x41, - 0x01, - // end - 0x0b, - }, - offset: 0, - } - r := NewWASMReader(&b) - - expected := InstructionBlock{ - Block: Block{ - BlockType: ValueTypeI32, - Instructions1: []Instruction{ - InstructionI32Const{Value: 1}, - }, - Instructions2: nil, - }, - } - actual, err := r.readInstruction() - require.NoError(t, err) - - require.Equal(t, expected, actual) - require.Equal(t, offset(len(b.data)), b.offset) - }) - - t.Run("block, type index result", func(t *testing.T) { - - t.Parallel() - - b := Buffer{ - data: []byte{ - // block - 0x02, - // type index: 2 - 0x2, - // unreachable - 0x0, - // end - 0x0b, - }, - offset: 0, - } - r := NewWASMReader(&b) - - expected := InstructionBlock{ - Block: Block{ - BlockType: TypeIndexBlockType{TypeIndex: 2}, - Instructions1: []Instruction{ - InstructionUnreachable{}, - }, - Instructions2: nil, - }, - } - actual, err := r.readInstruction() - require.NoError(t, err) - - require.Equal(t, expected, actual) - require.Equal(t, offset(len(b.data)), b.offset) - }) - - t.Run("block, type index result, type index too large", func(t *testing.T) { - - t.Parallel() - - b := Buffer{ - data: []byte{ - // block - 0x02, - // type index: math.MaxUint32 + 1 - 0x80, 0x80, 0x80, 0x80, 0x10, - // unreachable - 0x0, - // end - 0x0b, - }, - offset: 0, - } - r := NewWASMReader(&b) - - _, err := r.readInstruction() - require.Equal(t, - InvalidBlockTypeTypeIndexError{ - TypeIndex: math.MaxUint32 + 1, - Offset: 1, - }, - err, - ) - }) - - t.Run("block, i32 result, second instructions", func(t *testing.T) { - - t.Parallel() - - b := Buffer{ - data: []byte{ - // block - 0x02, - // i32 - 0x7f, - // i32.const - 0x41, - 0x01, - // else - 0x05, - // i32.const - 0x41, - 0x02, - // end - 0x0b, - }, - offset: 0, - } - r := NewWASMReader(&b) - - _, err := r.readInstruction() - require.Equal(t, InvalidBlockSecondInstructionsError{ - Offset: 4, - }, err) - }) - - t.Run("loop, i32 result", func(t *testing.T) { - - t.Parallel() - - b := Buffer{ - data: []byte{ - // loop - 0x03, - // i32 - 0x7f, - // i32.const - 0x41, - 0x01, - // end - 0x0b, - }, - offset: 0, - } - r := NewWASMReader(&b) - - expected := InstructionLoop{ - Block: Block{ - BlockType: ValueTypeI32, - Instructions1: []Instruction{ - InstructionI32Const{Value: 1}, - }, - Instructions2: nil, - }, - } - actual, err := r.readInstruction() - require.NoError(t, err) - - require.Equal(t, expected, actual) - require.Equal(t, offset(len(b.data)), b.offset) - }) - - t.Run("loop, i32 result, second instructions", func(t *testing.T) { - - t.Parallel() - - b := Buffer{ - data: []byte{ - // loop - 0x03, - // i32 - 0x7f, - // i32.const - 0x41, - 0x01, - // else - 0x05, - // i32.const - 0x41, - 0x02, - // end - 0x0b, - }, - offset: 0, - } - r := NewWASMReader(&b) - - _, err := r.readInstruction() - require.Equal(t, InvalidBlockSecondInstructionsError{ - Offset: 4, - }, err) - }) - - t.Run("if, i32 result", func(t *testing.T) { - - t.Parallel() - - b := Buffer{ - data: []byte{ - // if - 0x04, - // i32 - 0x7f, - // i32.const - 0x41, - 0x01, - // end - 0x0b, - }, - offset: 0, - } - r := NewWASMReader(&b) - - expected := InstructionIf{ - Block: Block{ - BlockType: ValueTypeI32, - Instructions1: []Instruction{ - InstructionI32Const{Value: 1}, - }, - Instructions2: nil, - }, - } - actual, err := r.readInstruction() - require.NoError(t, err) - - require.Equal(t, expected, actual) - require.Equal(t, offset(len(b.data)), b.offset) - }) - - t.Run("if-else, i32 result", func(t *testing.T) { - - t.Parallel() - - b := Buffer{ - data: []byte{ - // loop - 0x04, - // i32 - 0x7f, - // i32.const - 0x41, - 0x01, - // else - 0x05, - // i32.const - 0x41, - 0x02, - // end - 0x0b, - }, - offset: 0, - } - r := NewWASMReader(&b) - - expected := InstructionIf{ - Block: Block{ - BlockType: ValueTypeI32, - Instructions1: []Instruction{ - InstructionI32Const{Value: 1}, - }, - Instructions2: []Instruction{ - InstructionI32Const{Value: 2}, - }, - }, - } - actual, err := r.readInstruction() - require.NoError(t, err) - - require.Equal(t, expected, actual) - require.Equal(t, offset(len(b.data)), b.offset) - }) - - t.Run("br_table", func(t *testing.T) { - - t.Parallel() - - b := Buffer{ - data: []byte{ - // br_table - 0x0e, - // number of branch depths - 0x04, - // 1. branch depth - 0x03, - // 2. branch depth - 0x02, - // 3. branch depth - 0x01, - // 4. branch depth - 0x00, - // default branch depth - 0x04, - }, - offset: 0, - } - r := NewWASMReader(&b) - - expected := InstructionBrTable{ - LabelIndices: []uint32{3, 2, 1, 0}, - DefaultLabelIndex: 4, - } - actual, err := r.readInstruction() - require.NoError(t, err) - - require.Equal(t, expected, actual) - require.Equal(t, offset(len(b.data)), b.offset) - }) -} - -func TestWASMReader_readNameSection(t *testing.T) { - - t.Parallel() - - b := Buffer{ - data: []byte{ - // section size: 37 (LEB128) - 0xa5, 0x80, 0x80, 0x80, 0x0, - // name length - 0x4, - // name = "name" - 0x6e, 0x61, 0x6d, 0x65, - // sub-section ID: module name = 0 - 0x0, - // sub-section size: 5 (LEB128) - 0x85, 0x80, 0x80, 0x80, 0x0, - // name length - 0x4, - // name = "test" - 0x74, 0x65, 0x73, 0x74, - // sub-section ID: function names = 1 - 0x1, - // sub-section size: 15 (LEB128) - 0x8f, 0x80, 0x80, 0x80, 0x0, - // name count - 0x2, - // function index = 0 - 0x0, - // name length - 0x7, - // name = "foo.bar" - 0x66, 0x6f, 0x6f, 0x2e, 0x62, 0x61, 0x72, - // function index = 1 - 0x1, - // name length - 0x3, - // name = "add" - 0x61, 0x64, 0x64, - }, - offset: 0, - } - - r := NewWASMReader(&b) - - err := r.readCustomSection() - require.NoError(t, err) - - require.Equal(t, offset(len(b.data)), b.offset) -} diff --git a/compiler/wasm/section.go b/compiler/wasm/section.go deleted file mode 100644 index e6609d33a1..0000000000 --- a/compiler/wasm/section.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -// sectionID is the ID of a section in the WASM binary. -// -// See https://webassembly.github.io/spec/core/binary/modules.html#sections: -// -// The following section ids are used: -// -// 0 = custom section -// 1 = type section -// 2 = import section -// 3 = function section -// 4 = table section -// 5 = memory section -// 6 = global section -// 7 = export section -// 8 = start section -// 9 = element section -// 10 = code section -// 11 = data section -type sectionID byte - -const ( - sectionIDCustom sectionID = 0 - sectionIDType sectionID = 1 - sectionIDImport sectionID = 2 - sectionIDFunction sectionID = 3 - sectionIDMemory sectionID = 5 - sectionIDExport sectionID = 7 - sectionIDStart sectionID = 8 - sectionIDCode sectionID = 10 - sectionIDData sectionID = 11 -) diff --git a/compiler/wasm/valuetype.go b/compiler/wasm/valuetype.go deleted file mode 100644 index f80b21c788..0000000000 --- a/compiler/wasm/valuetype.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -// ValueType is the type of a value -type ValueType byte - -const ( - // ValueTypeI32 is the `i32` type, - // the type of 32 bit integers. - // The value is the byte used in the WASM binary - ValueTypeI32 ValueType = 0x7F - - // ValueTypeI64 is the `i64` type, - // the type of 64 bit integers. - // The value is the byte used in the WASM binary - ValueTypeI64 ValueType = 0x7E - - // ValueTypeFuncRef is the `funcref` type, - // the type of first-class references to functions. - // The value is the byte used in the WASM binary - ValueTypeFuncRef ValueType = 0x70 - - // ValueTypeExternRef is the `funcref` type, - // the type of first-class references to objects owned by the embedder. - // The value is the byte used in the WASM binary - ValueTypeExternRef ValueType = 0x6F -) - -// AsValueType returns the value type for the given byte, -// or 0 if the byte is not a valid value type -func AsValueType(b byte) ValueType { - switch ValueType(b) { - case ValueTypeI32: - return ValueTypeI32 - - case ValueTypeI64: - return ValueTypeI64 - - case ValueTypeFuncRef: - return ValueTypeFuncRef - - case ValueTypeExternRef: - return ValueTypeExternRef - } - - return 0 -} - -func (ValueType) isBlockType() {} - -func (t ValueType) write(w *WASMWriter) error { - return w.buf.WriteByte(byte(t)) -} diff --git a/compiler/wasm/wasm.go b/compiler/wasm/wasm.go deleted file mode 100644 index 295f38da5b..0000000000 --- a/compiler/wasm/wasm.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// WebAssembly (https://webassembly.org/) is an open standard for portable executable programs. -// It is designed to be a portable compilation target for programming languages. -// -// The standard defines two formats for encoding WebAssembly programs ("modules"): -// -// - A machine-optimized binary format (WASM, https://webassembly.github.io/spec/core/binary/index.html), -// which is not designed to be used by humans -// -// - A human-readable text format (WAT, https://webassembly.github.io/spec/core/text/index.html) -// -// WebAssembly modules in either format can be converted into the other format. -// -// There exists also another textual format, WAST, which is a superset of WAT, -// but is not part of the official standard. -// -// Package wasm implements a representation of WebAssembly modules (Module) and related types, -// e.g. instructions (Instruction). -// -// Package wasm also implements a reader and writer for the binary format: -// -// - The reader (WASMReader) allows parsing a WebAssembly module in binary form ([]byte) -// into an representation of the module (Module). -// -// - The writer (WASMWriter) allows encoding the representation of the module (Module) -// to a WebAssembly program in binary form ([]byte). -// -// Package wasm does not currently provide a reader and writer for the textual format (WAT). -// -// Package wasm is not a compiler for Cadence programs, but rather a building block that allows -// reading and writing WebAssembly modules. -package wasm diff --git a/compiler/wasm/writer.go b/compiler/wasm/writer.go deleted file mode 100644 index 2ae17dd6ec..0000000000 --- a/compiler/wasm/writer.go +++ /dev/null @@ -1,714 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -import ( - "fmt" - "unicode/utf8" -) - -// WASMWriter allows writing WASM binaries -type WASMWriter struct { - buf *Buffer - WriteNames bool -} - -func NewWASMWriter(buf *Buffer) *WASMWriter { - return &WASMWriter{ - buf: buf, - } -} - -// writeMagicAndVersion writes the magic byte sequence and version at the beginning of the WASM binary -func (w *WASMWriter) writeMagicAndVersion() error { - err := w.buf.WriteBytes(wasmMagic) - if err != nil { - return err - } - return w.buf.WriteBytes(wasmVersion) -} - -// writeSection writes a section in the WASM binary, with the given section ID and the given content. -// The content is a function that writes the contents of the section. -func (w *WASMWriter) writeSection(sectionID sectionID, content func() error) error { - // write the section ID - err := w.buf.WriteByte(byte(sectionID)) - if err != nil { - return err - } - - // write the size and the content - return w.writeContentWithSize(content) -} - -// writeContentWithSize writes the size of the content, -// and the content itself -func (w *WASMWriter) writeContentWithSize(content func() error) error { - - // write the temporary placeholder for the size - sizeOffset, err := w.buf.writeFixedUint32LEB128Space() - if err != nil { - return err - } - - // write the content - err = content() - if err != nil { - return err - } - - // write the actual size into the size placeholder - return w.buf.writeUint32LEB128SizeAt(sizeOffset) -} - -// writeCustomSection writes a custom section with the given name and content. -// The content is a function that writes the contents of the section. -func (w *WASMWriter) writeCustomSection(name string, content func() error) error { - return w.writeSection(sectionIDCustom, func() error { - err := w.writeName(name) - if err != nil { - return err - } - - return content() - }) -} - -// writeTypeSection writes the section that declares all function types -// so they can be referenced by index -func (w *WASMWriter) writeTypeSection(funcTypes []*FunctionType) error { - return w.writeSection(sectionIDType, func() error { - - // write the number of types - err := w.buf.writeUint32LEB128(uint32(len(funcTypes))) - if err != nil { - return err - } - - // write each type - for _, funcType := range funcTypes { - err = w.writeFuncType(funcType) - if err != nil { - return err - } - } - - return nil - }) -} - -// writeFuncType writes the function type -func (w *WASMWriter) writeFuncType(funcType *FunctionType) error { - // write the type - err := w.buf.WriteByte(functionTypeIndicator) - if err != nil { - return err - } - - // write the number of parameters - err = w.buf.writeUint32LEB128(uint32(len(funcType.Params))) - if err != nil { - return err - } - - // write the type of each parameter - for _, paramType := range funcType.Params { - err = w.buf.WriteByte(byte(paramType)) - if err != nil { - return err - } - } - - // write the number of results - err = w.buf.writeUint32LEB128(uint32(len(funcType.Results))) - if err != nil { - return err - } - - // write the type of each result - for _, resultType := range funcType.Results { - err = w.buf.WriteByte(byte(resultType)) - if err != nil { - return err - } - } - - return nil -} - -// writeImportSection writes the section that declares all imports -func (w *WASMWriter) writeImportSection(imports []*Import) error { - return w.writeSection(sectionIDImport, func() error { - - // write the number of imports - err := w.buf.writeUint32LEB128(uint32(len(imports))) - if err != nil { - return err - } - - // write each import - for _, im := range imports { - err = w.writeImport(im) - if err != nil { - return err - } - } - - return nil - }) -} - -// writeImport writes the import -func (w *WASMWriter) writeImport(im *Import) error { - // write the module - err := w.writeName(im.Module) - if err != nil { - return err - } - - // write the name - err = w.writeName(im.Name) - if err != nil { - return err - } - - // TODO: add support for tables, memories, and globals. adjust name section! - // write the type indicator - err = w.buf.WriteByte(byte(importIndicatorFunction)) - if err != nil { - return err - } - - // write the type index - return w.buf.writeUint32LEB128(im.TypeIndex) -} - -// writeFunctionSection writes the section that declares the types of functions. -// The bodies of these functions will later be provided in the code section -func (w *WASMWriter) writeFunctionSection(functions []*Function) error { - return w.writeSection(sectionIDFunction, func() error { - // write the number of functions - err := w.buf.writeUint32LEB128(uint32(len(functions))) - if err != nil { - return err - } - - // write the type index for each function - for _, function := range functions { - err = w.buf.writeUint32LEB128(function.TypeIndex) - if err != nil { - return err - } - } - - return nil - }) -} - -// writeMemorySection writes the section that declares all memories -func (w *WASMWriter) writeMemorySection(memories []*Memory) error { - return w.writeSection(sectionIDMemory, func() error { - - // write the number of memories - err := w.buf.writeUint32LEB128(uint32(len(memories))) - if err != nil { - return err - } - - // write each memory - for _, memory := range memories { - err = w.writeMemory(memory) - if err != nil { - return err - } - } - - return nil - }) -} - -// writeMemory writes the memory -func (w *WASMWriter) writeMemory(memory *Memory) error { - return w.writeLimit(memory.Max, memory.Min) -} - -func (w *WASMWriter) writeLimit(max *uint32, min uint32) error { - - // write the indicator - var indicator = limitIndicatorNoMax - if max != nil { - indicator = limitIndicatorMax - } - - err := w.buf.WriteByte(byte(indicator)) - if err != nil { - return err - } - - // write the minimum - err = w.buf.writeUint32LEB128(min) - if err != nil { - return err - } - - // write the maximum - if max != nil { - err := w.buf.writeUint32LEB128(*max) - if err != nil { - return err - } - } - - return nil -} - -// writeExportSection writes the section that declares all exports -func (w *WASMWriter) writeExportSection(exports []*Export) error { - return w.writeSection(sectionIDExport, func() error { - - // write the number of exports - err := w.buf.writeUint32LEB128(uint32(len(exports))) - if err != nil { - return err - } - - // write each export - for _, export := range exports { - err = w.writeExport(export) - if err != nil { - return err - } - } - - return nil - }) -} - -// writeExport writes the export -func (w *WASMWriter) writeExport(export *Export) error { - - // write the name - err := w.writeName(export.Name) - if err != nil { - return err - } - - // TODO: add support for tables and globals. adjust name section! - - var indicator exportIndicator - var index uint32 - - switch descriptor := export.Descriptor.(type) { - case FunctionExport: - indicator = exportIndicatorFunction - index = descriptor.FunctionIndex - case MemoryExport: - indicator = exportIndicatorMemory - index = descriptor.MemoryIndex - default: - return fmt.Errorf("unsupported export descripor: %#+v", descriptor) - } - - // write the indicator - err = w.buf.WriteByte(byte(indicator)) - if err != nil { - return err - } - - // write the index - return w.buf.writeUint32LEB128(index) -} - -// writeStartSection writes the section that declares the start function -func (w *WASMWriter) writeStartSection(funcIndex uint32) error { - return w.writeSection(sectionIDStart, func() error { - // write the index of the start function - return w.buf.writeUint32LEB128(funcIndex) - }) -} - -// writeCodeSection writes the section that provides the function bodies for the functions -// declared by the function section (which only provides the function types) -func (w *WASMWriter) writeCodeSection(functions []*Function) error { - return w.writeSection(sectionIDCode, func() error { - // write the number of code entries (one for each function) - err := w.buf.writeUint32LEB128(uint32(len(functions))) - if err != nil { - return err - } - - // write the code for each function - for _, function := range functions { - - err := w.writeFunctionBody(function.Code) - if err != nil { - return err - } - } - - return nil - }) -} - -// writeFunctionBody writes the body of the function -func (w *WASMWriter) writeFunctionBody(code *Code) error { - return w.writeContentWithSize(func() error { - - // write the number of locals - err := w.buf.writeUint32LEB128(uint32(len(code.Locals))) - if err != nil { - return err - } - - // TODO: run-length encode - // write each local - for _, localValType := range code.Locals { - err = w.buf.writeUint32LEB128(1) - if err != nil { - return err - } - - err = w.buf.WriteByte(byte(localValType)) - if err != nil { - return err - } - } - - err = w.writeInstructions(code.Instructions) - if err != nil { - return err - } - - return w.writeOpcode(opcodeEnd) - }) -} - -// writeInstructions writes an instruction sequence -func (w *WASMWriter) writeInstructions(instructions []Instruction) error { - for _, instruction := range instructions { - err := instruction.write(w) - if err != nil { - return err - } - } - return nil -} - -// writeOpcode writes the opcode of an instruction -func (w *WASMWriter) writeOpcode(opcodes ...opcode) error { - for _, b := range opcodes { - err := w.buf.WriteByte(byte(b)) - if err != nil { - return err - } - } - return nil -} - -// writeName writes a name, a UTF-8 byte sequence -func (w *WASMWriter) writeName(name string) error { - - // ensure the name is valid UTF-8 - if !utf8.ValidString(name) { - return InvalidNonUTF8NameError{ - Name: name, - Offset: int(w.buf.offset), - } - } - - // write the length - err := w.buf.writeUint32LEB128(uint32(len(name))) - if err != nil { - return err - } - - // write the name - return w.buf.WriteBytes([]byte(name)) -} - -// writeBlockInstructionArgument writes a block instruction argument -func (w *WASMWriter) writeBlockInstructionArgument(block Block, allowElse bool) error { - - // write the block type - if block.BlockType != nil { - err := block.BlockType.write(w) - if err != nil { - return err - } - } else { - err := w.buf.WriteByte(emptyBlockType) - if err != nil { - return err - } - } - - // write the first sequence of instructions - err := w.writeInstructions(block.Instructions1) - if err != nil { - return err - } - - // write the second sequence of instructions. - // in an if-instruction, this is the else branch. - // in other instructions, it is not allowed. - - if len(block.Instructions2) > 0 { - if !allowElse { - return InvalidBlockSecondInstructionsError{ - Offset: int(w.buf.offset), - } - } - - err := w.writeOpcode(opcodeElse) - if err != nil { - return err - } - - err = w.writeInstructions(block.Instructions2) - if err != nil { - return err - } - } - - // write the implicit end instruction / opcode - - return InstructionEnd{}.write(w) -} - -const customSectionNameName = "name" - -// writeNameSection writes the section which declares -// the names of the module, functions, and locals -func (w *WASMWriter) writeNameSection(moduleName string, imports []*Import, functions []*Function) error { - return w.writeCustomSection(customSectionNameName, func() error { - - // write the module name sub-section - err := w.writeNameSectionModuleNameSubSection(moduleName) - if err != nil { - return err - } - - // write the function names sub-section - return w.writeNameSectionFunctionNamesSubSection(imports, functions) - }) -} - -// nameSubSectionID is the ID of a sub-section in the name section of the WASM binary -type nameSubSectionID byte - -const ( - nameSubSectionIDModuleName nameSubSectionID = 0 - nameSubSectionIDFunctionNames nameSubSectionID = 1 - // TODO: - //nameSubSectionIDLocalNames nameSubSectionID = 2 -) - -// writeNameSubSection writes a sub-section in the name section of the WASM binary, -// with the given sub-section ID and the given content. -// The content is a function that writes the contents of the section. -func (w *WASMWriter) writeNameSubSection(nameSubSectionID nameSubSectionID, content func() error) error { - // write the name sub-section ID - err := w.buf.WriteByte(byte(nameSubSectionID)) - if err != nil { - return err - } - - // write the size and the content - return w.writeContentWithSize(content) -} - -// writeNameSectionModuleName writes the module name sub-section in the name section of the WASM binary -func (w *WASMWriter) writeNameSectionModuleNameSubSection(moduleName string) error { - return w.writeNameSubSection(nameSubSectionIDModuleName, func() error { - return w.writeName(moduleName) - }) -} - -// writeNameSectionFunctionNames writes the module name sub-section in the name section of the WASM binary -func (w *WASMWriter) writeNameSectionFunctionNamesSubSection(imports []*Import, functions []*Function) error { - return w.writeNameSubSection(nameSubSectionIDFunctionNames, func() error { - - // write the number of function names - count := len(imports) + len(functions) - - err := w.buf.writeUint32LEB128(uint32(count)) - if err != nil { - return err - } - - // write the name map entries for the imports - - var index uint32 - - for _, imp := range imports { - - // write the index - err := w.buf.writeUint32LEB128(index) - if err != nil { - return err - } - - index++ - - // write the name - - err = w.writeName(imp.FullName()) - if err != nil { - return err - } - } - - // write the name map entries for the functions - - for _, function := range functions { - - // write the index - err := w.buf.writeUint32LEB128(index) - if err != nil { - return err - } - - index++ - - // write the name - - err = w.writeName(function.Name) - if err != nil { - return err - } - } - - return nil - }) -} - -// writeDataSection writes the section that declares the data segments -func (w *WASMWriter) writeDataSection(segments []*Data) error { - return w.writeSection(sectionIDData, func() error { - // write the number of data segments - err := w.buf.writeUint32LEB128(uint32(len(segments))) - if err != nil { - return err - } - - // write each data segment - for _, segment := range segments { - err = w.writeDataSegment(segment) - if err != nil { - return err - } - } - - return nil - }) -} - -// writeDataSegment writes the data segment -func (w *WASMWriter) writeDataSegment(segment *Data) error { - - // write the memory index - err := w.buf.writeUint32LEB128(segment.MemoryIndex) - if err != nil { - return err - } - - // write the offset instructions - err = w.writeInstructions(segment.Offset) - if err != nil { - return err - } - - err = w.writeOpcode(opcodeEnd) - if err != nil { - return err - } - - // write the number of bytes - err = w.buf.writeUint32LEB128(uint32(len(segment.Init))) - if err != nil { - return err - } - - // write each byte - for _, b := range segment.Init { - err = w.buf.WriteByte(b) - if err != nil { - return err - } - } - - return nil -} - -func (w *WASMWriter) WriteModule(module *Module) error { - if err := w.writeMagicAndVersion(); err != nil { - return err - } - if len(module.Types) > 0 { - if err := w.writeTypeSection(module.Types); err != nil { - return err - } - } - if len(module.Imports) > 0 { - if err := w.writeImportSection(module.Imports); err != nil { - return err - } - } - if len(module.Functions) > 0 { - if err := w.writeFunctionSection(module.Functions); err != nil { - return err - } - } - if len(module.Memories) > 0 { - if err := w.writeMemorySection(module.Memories); err != nil { - return err - } - } - if len(module.Exports) > 0 { - if err := w.writeExportSection(module.Exports); err != nil { - return err - } - } - if module.StartFunctionIndex != nil { - if err := w.writeStartSection(*module.StartFunctionIndex); err != nil { - return err - } - } - if len(module.Functions) > 0 { - if err := w.writeCodeSection(module.Functions); err != nil { - return err - } - } - if len(module.Data) > 0 { - if err := w.writeDataSection(module.Data); err != nil { - return err - } - } - if w.WriteNames { - if err := w.writeNameSection( - module.Name, - module.Imports, - module.Functions, - ); err != nil { - return err - } - } - - return nil -} diff --git a/compiler/wasm/writer_test.go b/compiler/wasm/writer_test.go deleted file mode 100644 index 2b79997d25..0000000000 --- a/compiler/wasm/writer_test.go +++ /dev/null @@ -1,977 +0,0 @@ -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wasm - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestWASMWriter_writeMagicAndVersion(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - err := w.writeMagicAndVersion() - require.NoError(t, err) - - require.Equal(t, - []byte{ - // magic - 0x0, 0x61, 0x73, 0x6d, - // version - 0x1, 0x0, 0x0, 0x0, - }, - b.data, - ) -} - -func TestWASMWriter_writeTypeSection(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - err := w.writeTypeSection([]*FunctionType{ - { - Params: []ValueType{ValueTypeI32, ValueTypeI32}, - Results: []ValueType{ValueTypeI32}, - }, - }) - require.NoError(t, err) - - require.Equal(t, - []byte{ - // section ID: Type = 1 - 0x1, - // section size: 7 (LEB128) - 0x87, 0x80, 0x80, 0x80, 0x0, - // type count - 0x1, - // function type indicator - 0x60, - // parameter count: 2 - 0x2, - // type of parameter 1: i32 - 0x7f, - // type of parameter 2: i32 - 0x7f, - // return value count: 1 - 0x1, - // type of return value 1: i32 - 0x7f, - }, - b.data, - ) -} - -func TestWASMWriter_writeImportSection(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - imports := []*Import{ - { - Module: "foo", - Name: "bar", - TypeIndex: 1, - }, - } - - err := w.writeImportSection(imports) - require.NoError(t, err) - - require.Equal(t, - []byte{ - // section ID: Import = 2 - 0x2, - // section size: 11 (LEB128) - 0x8b, 0x80, 0x80, 0x80, 0x0, - // import count: 1 - 0x1, - // module length - 0x3, - // module = "foo" - 0x66, 0x6f, 0x6f, - // name length - 0x3, - // name = "bar" - 0x62, 0x61, 0x72, - // type indicator: function = 0 - 0x0, - // type index of function: 0 - 0x1, - }, - b.data, - ) -} - -func TestWASMWriter_writeFunctionSection(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - functions := []*Function{ - { - // not used, just for testing - Name: "add", - TypeIndex: 0, - // not used, just for testing - Code: &Code{ - Locals: []ValueType{ - ValueTypeI32, - }, - Instructions: []Instruction{ - InstructionLocalGet{LocalIndex: 0}, - InstructionLocalGet{LocalIndex: 1}, - InstructionI32Add{}, - }, - }, - }, - } - - err := w.writeFunctionSection(functions) - require.NoError(t, err) - - require.Equal(t, - []byte{ - // section ID: Function = 3 - 0x3, - // section size: 2 (LEB128) - 0x82, 0x80, 0x80, 0x80, 0x0, - // function count: 1 - 0x1, - // type index of function: 0 - 0x0, - }, - b.data, - ) -} - -func TestWASMWriter_writeMemorySection(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - memories := []*Memory{ - { - Min: 1024, - Max: nil, - }, - { - Min: 2048, - Max: func() *uint32 { - var max uint32 = 2 - return &max - }(), - }, - } - - err := w.writeMemorySection(memories) - require.NoError(t, err) - - require.Equal(t, - []byte{ - // section ID: Import = 5 - 0x5, - // section size: 8 (LEB128) - 0x88, 0x80, 0x80, 0x80, 0x0, - // memory count: 2 - 0x2, - // memory type / limit: no max - 0x0, - // limit 1 min: 1024 (LEB128) - 0x80, 0x8, - // memory type / limit: max - 0x1, - // limit 2 min: 2048 (LEB128) - 0x80, 0x10, - // limit 2 max - 0x2, - }, - b.data, - ) -} - -func TestWASMWriter_writeExportSection(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - exports := []*Export{ - { - Name: "foo", - Descriptor: FunctionExport{ - FunctionIndex: 1, - }, - }, - } - - err := w.writeExportSection(exports) - require.NoError(t, err) - - require.Equal(t, - []byte{ - // section ID: Export = 7 - 0x7, - // section size: 7 (LEB128) - 0x87, 0x80, 0x80, 0x80, 0x0, - // import count: 1 - 0x1, - // name length - 0x3, - // name = "foo" - 0x66, 0x6f, 0x6f, - // type indicator: function = 0 - 0x0, - // index of function: 1 - 0x1, - }, - b.data, - ) -} - -func TestWASMWriter_writeStartSection(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - err := w.writeStartSection(1) - require.NoError(t, err) - - require.Equal(t, - []byte{ - // Section ID: Code = 8 - 0x8, - // section size: 15 (LEB128) - 0x81, 0x80, 0x80, 0x80, 0x0, - // function index: 1 - 0x1, - }, - b.data, - ) -} - -func TestWASMWriter_writeCodeSection(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - functions := []*Function{ - { - // not used, just for testing - Name: "add", - // not used, just for testing - TypeIndex: 0, - Code: &Code{ - Locals: []ValueType{ - ValueTypeI32, - }, - Instructions: []Instruction{ - InstructionLocalGet{LocalIndex: 0}, - InstructionLocalGet{LocalIndex: 1}, - InstructionI32Add{}, - }, - }, - }, - } - - err := w.writeCodeSection(functions) - require.NoError(t, err) - - require.Equal(t, - []byte{ - // Section ID: Code = 10 - 0xa, - // section size: 15 (LEB128) - 0x8f, 0x80, 0x80, 0x80, 0x0, - // function count: 1 - 0x1, - // code size: 9 (LEB128) - 0x89, 0x80, 0x80, 0x80, 0x0, - // number of locals: 1 - 0x1, - // number of locals with this type: 1 - 0x1, - // local type: i32 - 0x7f, - // opcode: local.get, 0 - 0x20, 0x0, - // opcode: local.get 1 - 0x20, 0x1, - // opcode: i32.add - 0x6a, - // opcode: end - 0xb, - }, - b.data, - ) -} - -func TestWASMWriter_writeDataSection(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - dataSegments := []*Data{ - { - MemoryIndex: 1, - Offset: []Instruction{ - InstructionI32Const{Value: 2}, - }, - Init: []byte{3, 4, 5}, - }, - } - - err := w.writeDataSection(dataSegments) - require.NoError(t, err) - - require.Equal(t, - []byte{ - // section ID: Import = 11 - 0xB, - // section size: 9 (LEB128) - 0x89, 0x80, 0x80, 0x80, 0x0, - // segment count: 1 - 0x1, - // memory index - 0x1, - // i32.const 2 - 0x41, 0x2, - // end - 0xb, - // byte count - 0x3, - // init (bytes 0x3, 0x4, 0x5) - 0x3, 0x4, 0x5, - }, - b.data, - ) -} - -func TestWASMWriter_writeName(t *testing.T) { - - t.Parallel() - - t.Run("valid", func(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - err := w.writeName("hello") - require.NoError(t, err) - - require.Equal(t, - []byte{ - // length - 0x5, - // "hello" - 0x68, 0x65, 0x6c, 0x6c, 0x6f, - }, - b.data, - ) - }) - - t.Run("invalid", func(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - name := string([]byte{0xff, 0xfe, 0xfd}) - err := w.writeName(name) - require.Error(t, err) - - assert.Equal(t, - InvalidNonUTF8NameError{ - Name: name, - Offset: 0, - }, - err, - ) - - assert.Empty(t, b.data) - }) -} - -func TestWASMWriter_writeNameSection(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - imports := []*Import{ - { - Module: "foo", - Name: "bar", - }, - } - - functions := []*Function{ - { - Name: "add", - }, - } - - err := w.writeNameSection("test", imports, functions) - require.NoError(t, err) - - require.Equal(t, - []byte{ - // Section ID: Custom = 0 - 0x0, - // section size: 37 (LEB128) - 0xa5, 0x80, 0x80, 0x80, 0x0, - // name length - 0x4, - // name = "name" - 0x6e, 0x61, 0x6d, 0x65, - // sub-section ID: module name = 0 - 0x0, - // sub-section size: 5 (LEB128) - 0x85, 0x80, 0x80, 0x80, 0x0, - // name length - 0x4, - // name = "test" - 0x74, 0x65, 0x73, 0x74, - // sub-section ID: function names = 1 - 0x1, - // sub-section size: 15 (LEB128) - 0x8f, 0x80, 0x80, 0x80, 0x0, - // name count - 0x2, - // function index = 0 - 0x0, - // name length - 0x7, - // name = "foo.bar" - 0x66, 0x6f, 0x6f, 0x2e, 0x62, 0x61, 0x72, - // function index = 1 - 0x1, - // name length - 0x3, - // name = "add" - 0x61, 0x64, 0x64, - }, - b.data, - ) -} - -func TestWASMWriterReader(t *testing.T) { - - t.Skip("WIP") - - t.Parallel() - - var b Buffer - - w := NewWASMWriter(&b) - w.WriteNames = true - - module := &Module{ - Name: "test", - Types: []*FunctionType{ - { - Params: nil, - Results: nil, - }, - { - Params: []ValueType{ValueTypeI32, ValueTypeI32}, - Results: []ValueType{ValueTypeI32}, - }, - }, - Imports: []*Import{ - { - Module: "env", - Name: "add", - TypeIndex: 1, - }, - }, - Functions: []*Function{ - { - Name: "start", - TypeIndex: 0, - Code: &Code{ - Instructions: []Instruction{ - InstructionReturn{}, - }, - }, - }, - { - Name: "add", - TypeIndex: 1, - Code: &Code{ - // not used, just for testing - Locals: []ValueType{ - ValueTypeI32, - }, - Instructions: []Instruction{ - InstructionLocalGet{LocalIndex: 0}, - InstructionLocalGet{LocalIndex: 1}, - InstructionI32Add{}, - }, - }, - }, - }, - Memories: []*Memory{ - { - Min: 1024, - Max: func() *uint32 { - var max uint32 = 2048 - return &max - }(), - }, - }, - Exports: []*Export{ - { - Name: "add", - Descriptor: FunctionExport{ - FunctionIndex: 0, - }, - }, - { - Name: "mem", - Descriptor: MemoryExport{ - MemoryIndex: 0, - }, - }, - }, - StartFunctionIndex: func() *uint32 { - var funcIndex uint32 = 1 - return &funcIndex - }(), - Data: []*Data{ - { - MemoryIndex: 0, - Offset: []Instruction{ - InstructionI32Const{Value: 0}, - }, - Init: []byte{0x0, 0x1, 0x2, 0x3}, - }, - }, - } - - err := w.WriteModule(module) - require.NoError(t, err) - - expected := []byte{ - // magic - 0x0, 0x61, 0x73, 0x6d, - // version - 0x1, 0x0, 0x0, 0x0, - // type section - 0x1, - 0x8a, 0x80, 0x80, 0x80, 0x0, - 0x2, - 0x60, 0x0, 0x0, - 0x60, 0x2, 0x7f, 0x7f, 0x1, 0x7f, - // import section - 0x2, - 0x8b, 0x80, 0x80, 0x80, 0x0, - 0x1, - 0x3, 0x65, 0x6e, 0x76, 0x3, 0x61, 0x64, 0x64, 0x0, 0x1, - // function section - 0x3, - 0x83, 0x80, 0x80, 0x80, 0x0, - 0x2, - 0x0, - 0x1, - // memory section - 0x5, - 0x86, 0x80, 0x80, 0x80, 0x0, - 0x1, - 0x1, 0x80, 0x8, 0x80, 0x10, - // export section - 0x07, - 0x8d, 0x80, 0x80, 0x80, 0x00, - 0x02, - 0x03, 0x61, 0x64, 0x64, - 0x00, 0x00, - 0x03, 0x6d, 0x65, 0x6d, - 0x02, 0x00, - // start section - 0x8, - 0x81, 0x80, 0x80, 0x80, 0x0, - 0x1, - // code section - 0xa, - 0x97, 0x80, 0x80, 0x80, 0x0, - 0x2, - 0x83, 0x80, 0x80, 0x80, 0x0, 0x0, 0xf, 0xb, - 0x89, 0x80, 0x80, 0x80, 0x0, 0x1, 0x1, 0x7f, 0x20, 0x0, 0x20, 0x1, 0x6a, 0xb, - // data section - 0xb, - 0x8a, 0x80, 0x80, 0x80, 0x0, - 0x1, - 0x0, - 0x41, 0x0, 0xb, - 0x4, - 0x0, 0x1, 0x2, 0x3, - // name section - 0x0, - 0xac, 0x80, 0x80, 0x80, 0x0, - 0x4, 0x6e, 0x61, 0x6d, 0x65, 0x0, 0x85, 0x80, - 0x80, 0x80, 0x0, 0x4, 0x74, 0x65, 0x73, 0x74, - 0x1, 0x96, 0x80, 0x80, 0x80, 0x0, 0x3, 0x0, - 0x7, 0x65, 0x6e, 0x76, 0x2e, 0x61, 0x64, 0x64, - 0x1, 0x5, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x2, 0x3, 0x61, 0x64, 0x64, - } - require.Equal(t, - expected, - b.data, - ) - - require.Equal(t, - `(module $test - (type (;0;) (func)) - (type (;1;) (func (param i32 i32) (result i32))) - (import "env" "add" (func $env.add (type 1))) - (func $start (type 0) - return) - (func $add (type 1) (param i32 i32) (result i32) - (local i32) - local.get 0 - local.get 1 - i32.add) - (memory (;0;) 1024 2048) - (export "add" (func $env.add)) - (export "mem" (memory 0)) - (start $start) - (data (;0;) (i32.const 0) "\00\01\02\03")) -`, - WASM2WAT(b.data), - ) - - b.offset = 0 - - r := NewWASMReader(&b) - err = r.ReadModule() - require.NoError(t, err) - - // prepare the expected module: - // remove all names, as the name section is not read yet - - module.Name = "" - for _, function := range module.Functions { - function.Name = "" - } - - require.Equal(t, - module, - &r.Module, - ) - - require.Equal(t, - offset(len(expected)), - b.offset, - ) -} - -func TestWASMWriter_writeInstruction(t *testing.T) { - - t.Parallel() - - t.Run("block, i32 result", func(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - instruction := InstructionBlock{ - Block: Block{ - BlockType: ValueTypeI32, - Instructions1: []Instruction{ - InstructionI32Const{Value: 1}, - }, - Instructions2: nil, - }, - } - err := instruction.write(w) - require.NoError(t, err) - - require.Equal(t, - []byte{ - // block - 0x02, - // i32 - 0x7f, - // i32.const - 0x41, - 0x01, - // end - 0x0b, - }, - b.data, - ) - }) - - t.Run("block, type index result", func(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - instruction := InstructionBlock{ - Block: Block{ - BlockType: TypeIndexBlockType{TypeIndex: 2}, - Instructions1: []Instruction{ - InstructionUnreachable{}, - }, - Instructions2: nil, - }, - } - err := instruction.write(w) - require.NoError(t, err) - - require.Equal(t, - []byte{ - // block - 0x02, - // type index: 2 - 0x2, - // unreachable - 0x0, - // end - 0x0b, - }, - b.data, - ) - }) - - t.Run("block, i32 result, second instructions", func(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - instruction := InstructionBlock{ - Block: Block{ - BlockType: ValueTypeI32, - Instructions1: []Instruction{ - InstructionI32Const{Value: 1}, - }, - Instructions2: []Instruction{ - InstructionI32Const{Value: 2}, - }, - }, - } - err := instruction.write(w) - require.Equal(t, InvalidBlockSecondInstructionsError{ - Offset: 4, - }, err) - }) - - t.Run("loop, i32 result", func(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - instruction := InstructionLoop{ - Block: Block{ - BlockType: ValueTypeI32, - Instructions1: []Instruction{ - InstructionI32Const{Value: 1}, - }, - Instructions2: nil, - }, - } - err := instruction.write(w) - require.NoError(t, err) - - require.Equal(t, - []byte{ - // loop - 0x03, - // i32 - 0x7f, - // i32.const - 0x41, - 0x01, - // end - 0x0b, - }, - b.data, - ) - }) - - t.Run("loop, i32 result, second instructions", func(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - instruction := InstructionLoop{ - Block: Block{ - BlockType: ValueTypeI32, - Instructions1: []Instruction{ - InstructionI32Const{Value: 1}, - }, - Instructions2: []Instruction{ - InstructionI32Const{Value: 2}, - }, - }, - } - err := instruction.write(w) - require.Equal(t, InvalidBlockSecondInstructionsError{ - Offset: 4, - }, err) - }) - - t.Run("if, i32 result", func(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - instruction := InstructionIf{ - Block: Block{ - BlockType: ValueTypeI32, - Instructions1: []Instruction{ - InstructionI32Const{Value: 1}, - }, - Instructions2: nil, - }, - } - err := instruction.write(w) - require.NoError(t, err) - - require.Equal(t, - []byte{ - // if - 0x04, - // i32 - 0x7f, - // i32.const - 0x41, - 0x01, - // end - 0x0b, - }, - b.data, - ) - }) - - t.Run("if-else, i32 result", func(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - instruction := InstructionIf{ - Block: Block{ - BlockType: ValueTypeI32, - Instructions1: []Instruction{ - InstructionI32Const{Value: 1}, - }, - Instructions2: []Instruction{ - InstructionI32Const{Value: 2}, - }, - }, - } - err := instruction.write(w) - require.NoError(t, err) - - require.Equal(t, - []byte{ - // ii - 0x04, - // i32 - 0x7f, - // i32.const - 0x41, - 0x01, - // else - 0x05, - // i32.const - 0x41, - 0x02, - // end - 0x0b, - }, - b.data, - ) - }) - - t.Run("br_table", func(t *testing.T) { - - t.Parallel() - - var b Buffer - w := NewWASMWriter(&b) - - instruction := InstructionBrTable{ - LabelIndices: []uint32{3, 2, 1, 0}, - DefaultLabelIndex: 4, - } - err := instruction.write(w) - require.NoError(t, err) - - require.Equal(t, - []byte{ - // br_table - 0x0e, - // number of branch depths - 0x04, - // 1. branch depth - 0x03, - // 2. branch depth - 0x02, - // 3. branch depth - 0x01, - // 4. branch depth - 0x00, - // default branch depth - 0x04, - }, - b.data, - ) - }) -} diff --git a/errors/errors.go b/errors/errors.go index d465852747..32dda055d0 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -30,6 +30,8 @@ func NewUnreachableError() InternalError { return NewUnexpectedError("unreachable") } +const InternalErrorMessagePrefix = "internal error:" + // InternalError is an implementation error, e.g: an unreachable code path (UnreachableError). // A program should never throw an InternalError in an ideal world. // @@ -174,9 +176,18 @@ func (e UnexpectedError) Unwrap() error { func (e UnexpectedError) Error() string { message := e.Err.Error() if len(e.Stack) == 0 { - return fmt.Sprintf("unexpected error: %s", message) + return fmt.Sprintf( + "%s unexpected: %s", + InternalErrorMessagePrefix, + message, + ) } else { - return fmt.Sprintf("unexpected error: %s\n%s", message, e.Stack) + return fmt.Sprintf( + "%s unexpected: %s\n%s", + InternalErrorMessagePrefix, + message, + e.Stack, + ) } } diff --git a/go.mod b/go.mod index 7db9c01457..11ef930d1e 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,9 @@ module github.com/onflow/cadence -go 1.22 +go 1.23 require ( github.com/bits-and-blooms/bitset v1.5.0 - github.com/bytecodealliance/wasmtime-go/v7 v7.0.0 github.com/c-bata/go-prompt v0.2.6 github.com/dave/dst v0.27.2 github.com/fxamacker/cbor/v2 v2.4.1-0.20230228173756-c0c9f774e40c @@ -13,10 +12,10 @@ require ( github.com/kr/pretty v0.3.1 github.com/leanovate/gopter v0.2.9 github.com/logrusorgru/aurora/v4 v4.0.0 - github.com/onflow/atree v0.8.0 + github.com/onflow/atree v0.9.0 github.com/rivo/uniseg v0.4.4 github.com/schollz/progressbar/v3 v3.13.1 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c github.com/tidwall/pretty v1.2.1 github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d @@ -50,16 +49,18 @@ require ( github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mattn/go-tty v0.0.3 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect github.com/pkg/term v1.2.0-beta.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/zeebo/assert v1.3.0 // indirect - github.com/zeebo/blake3 v0.2.3 // indirect + github.com/zeebo/blake3 v0.2.4 // indirect golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/term v0.25.0 // indirect gonum.org/v1/gonum v0.6.1 // indirect + gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index b7f332a032..46e69fa99f 100644 --- a/go.sum +++ b/go.sum @@ -5,8 +5,6 @@ github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3 github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/bits-and-blooms/bitset v1.5.0 h1:NpE8frKRLGHIcEzkR+gZhiioW1+WbYV6fKwD6ZIpQT8= github.com/bits-and-blooms/bitset v1.5.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bytecodealliance/wasmtime-go/v7 v7.0.0 h1:/rBNjgFju2HCZnkPb1eL+W4GBwP8DMbaQu7i+GR9DH4= -github.com/bytecodealliance/wasmtime-go/v7 v7.0.0/go.mod h1:bu6fic7trDt20w+LMooX7j3fsOwv4/ln6j8gAdP6vmA= github.com/c-bata/go-prompt v0.2.6 h1:POP+nrHE+DfLYx370bedwNhsqmpCUynWPxuHi0C5vZI= github.com/c-bata/go-prompt v0.2.6/go.mod h1:/LMAke8wD2FsNu9EXNdHxNLbd9MedkPnCdfpU9wwHfY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -39,7 +37,6 @@ github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= github.com/k0kubun/pp/v3 v3.2.0 h1:h33hNTZ9nVFNP3u2Fsgz8JXiF5JINoZfFq4SvKJwNcs= github.com/k0kubun/pp/v3 v3.2.0/go.mod h1:ODtJQbQcIRfAD3N+theGCV1m/CBxweERz2dapdz1EwA= -github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.2.0 h1:4ZexSFt8agMNzNisrsilL6RClWDC5YJnLHNIfTy4iuc= github.com/klauspost/cpuid/v2 v2.2.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/kodova/html-to-markdown v1.0.1 h1:MJxQAnqxtss3DaPnm72DRV65HZiMQZF3DUAfEaTg+14= @@ -77,8 +74,8 @@ github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2Em github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/onflow/atree v0.8.0 h1:qg5c6J1gVDNObughpEeWm8oxqhPGdEyGrda121GM4u0= -github.com/onflow/atree v0.8.0/go.mod h1:yccR+LR7xc1Jdic0mrjocbHvUD7lnVvg8/Ct1AA5zBo= +github.com/onflow/atree v0.9.0 h1:M+Z/UPwzv0/Yy7ChI5T1ZIHD3YN1cs/hxGEs/HWhzaY= +github.com/onflow/atree v0.9.0/go.mod h1:FT6udJF9Q7VQTu3wknDhFX+VV4D44ZGdqtTAE5iztck= github.com/onflow/crypto v0.25.0 h1:BeWbLsh3ZD13Ej+Uky6kg1PL1ZIVBDVX+2MVBNwqddg= github.com/onflow/crypto v0.25.0/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -100,8 +97,8 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c h1:HelZ2kAFadG0La9d+4htN4HzQ68Bm2iM9qKMSMES6xg= github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c/go.mod h1:JlzghshsemAMDGZLytTFY8C1JQxQPhnatWqNwUXjggo= github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= @@ -110,11 +107,10 @@ github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d h1:5JInRQbk5UBX github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d/go.mod h1:Nlx5Y115XQvNcIdIy7dZXaNSUpzwBSge4/Ivk93/Yog= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= -github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= +github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= +github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= go.opentelemetry.io/otel v1.8.0 h1:zcvBFizPbpa1q7FehvFiHbQwGzmPILebO0tyqIR5Djg= diff --git a/interpreter/account_storagemap.go b/interpreter/account_storagemap.go new file mode 100644 index 0000000000..25e1d881a1 --- /dev/null +++ b/interpreter/account_storagemap.go @@ -0,0 +1,346 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package interpreter + +import ( + goerrors "errors" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/errors" +) + +// AccountStorageMap stores domain storage maps in an account. +type AccountStorageMap struct { + orderedMap *atree.OrderedMap +} + +// NewAccountStorageMap creates account storage map. +func NewAccountStorageMap( + memoryGauge common.MemoryGauge, + storage atree.SlabStorage, + address atree.Address, +) *AccountStorageMap { + common.UseMemory(memoryGauge, common.StorageMapMemoryUsage) + + orderedMap, err := atree.NewMap( + storage, + address, + atree.NewDefaultDigesterBuilder(), + emptyTypeInfo, + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + + return &AccountStorageMap{ + orderedMap: orderedMap, + } +} + +// NewAccountStorageMapWithRootID loads existing account storage map with given atree SlabID. +func NewAccountStorageMapWithRootID( + storage atree.SlabStorage, + slabID atree.SlabID, +) *AccountStorageMap { + orderedMap, err := atree.NewMapWithRootID( + storage, + slabID, + atree.NewDefaultDigesterBuilder(), + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + + return &AccountStorageMap{ + orderedMap: orderedMap, + } +} + +// DomainExists returns true if the given domain exists in the account storage map. +func (s *AccountStorageMap) DomainExists(domain common.StorageDomain) bool { + key := Uint64StorageMapKey(domain) + + exists, err := s.orderedMap.Has( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + + return exists +} + +// GetDomain returns domain storage map for the given domain. +// If createIfNotExists is true and domain doesn't exist, new domain storage map +// is created and inserted into account storage map with given domain as key. +func (s *AccountStorageMap) GetDomain( + gauge common.MemoryGauge, + interpreter *Interpreter, + domain common.StorageDomain, + createIfNotExists bool, +) *DomainStorageMap { + key := Uint64StorageMapKey(domain) + + storedValue, err := s.orderedMap.Get( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + ) + if err != nil { + var keyNotFoundError *atree.KeyNotFoundError + if goerrors.As(err, &keyNotFoundError) { + // Create domain storage map if needed. + + if createIfNotExists { + return s.NewDomain(gauge, interpreter, domain) + } + + return nil + } + + panic(errors.NewExternalError(err)) + } + + // Create domain storage map from raw atree value. + return NewDomainStorageMapWithAtreeValue(storedValue) +} + +// NewDomain creates new domain storage map and inserts it to AccountStorageMap with given domain as key. +func (s *AccountStorageMap) NewDomain( + gauge common.MemoryGauge, + interpreter *Interpreter, + domain common.StorageDomain, +) *DomainStorageMap { + interpreter.recordStorageMutation() + + domainStorageMap := NewDomainStorageMap(gauge, s.orderedMap.Storage, s.orderedMap.Address()) + + key := Uint64StorageMapKey(domain) + + existingStorable, err := s.orderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + domainStorageMap.orderedMap, + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + if existingStorable != nil { + panic(errors.NewUnexpectedError( + "account %x domain %s should not exist", + s.orderedMap.Address(), + domain.Identifier(), + )) + } + + return domainStorageMap +} + +// WriteDomain sets or removes domain storage map in account storage map. +// If the given storage map is nil, domain is removed. +// If the given storage map is non-nil, domain is added/updated. +// Returns true if domain storage map previously existed at the given domain. +func (s *AccountStorageMap) WriteDomain( + interpreter *Interpreter, + domain common.StorageDomain, + domainStorageMap *DomainStorageMap, +) (existed bool) { + if domainStorageMap == nil { + return s.removeDomain(interpreter, domain) + } + return s.setDomain(interpreter, domain, domainStorageMap) +} + +// setDomain sets domain storage map in the account storage map and returns true if domain previously existed. +// If the given domain already stores a domain storage map, it is overwritten. +func (s *AccountStorageMap) setDomain( + interpreter *Interpreter, + domain common.StorageDomain, + newDomainStorageMap *DomainStorageMap, +) (existed bool) { + interpreter.recordStorageMutation() + + key := Uint64StorageMapKey(domain) + + existingValueStorable, err := s.orderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + newDomainStorageMap.orderedMap, + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + + existed = existingValueStorable != nil + if existed { + // Create domain storage map from overwritten storable + existingDomainStorageMap := newDomainStorageMapWithAtreeStorable(s.orderedMap.Storage, existingValueStorable) + + // Deep remove elements in domain storage map + existingDomainStorageMap.DeepRemove(interpreter, true) + + // Remove domain storage map slab + interpreter.RemoveReferencedSlab(existingValueStorable) + } + + interpreter.maybeValidateAtreeValue(s.orderedMap) + + // NOTE: Don't call maybeValidateAtreeStorage() here because it is possible + // that domain storage map is in the process of being migrated to account + // storage map and state isn't consistent during migration. + + return +} + +// removeDomain removes domain storage map with given domain in account storage map, if it exists. +func (s *AccountStorageMap) removeDomain(interpreter *Interpreter, domain common.StorageDomain) (existed bool) { + interpreter.recordStorageMutation() + + key := Uint64StorageMapKey(domain) + + existingKeyStorable, existingValueStorable, err := s.orderedMap.Remove( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + ) + if err != nil { + var keyNotFoundError *atree.KeyNotFoundError + if goerrors.As(err, &keyNotFoundError) { + // No-op to remove non-existent domain. + return + } + panic(errors.NewExternalError(err)) + } + + // Key + + // NOTE: Key is just an atree.Value (Uint64AtreeValue), not an interpreter.Value, + // so do not need (can) convert and not need to deep remove + interpreter.RemoveReferencedSlab(existingKeyStorable) + + // Value + + existed = existingValueStorable != nil + if existed { + // Create domain storage map from removed storable + domainStorageMap := newDomainStorageMapWithAtreeStorable(s.orderedMap.Storage, existingValueStorable) + + // Deep remove elements in domain storage map + domainStorageMap.DeepRemove(interpreter, true) + + // Remove domain storage map slab + interpreter.RemoveReferencedSlab(existingValueStorable) + } + + interpreter.maybeValidateAtreeValue(s.orderedMap) + interpreter.maybeValidateAtreeStorage() + + return +} + +func (s *AccountStorageMap) SlabID() atree.SlabID { + return s.orderedMap.SlabID() +} + +func (s *AccountStorageMap) Count() uint64 { + return s.orderedMap.Count() +} + +// Domains returns a set of domains in account storage map +func (s *AccountStorageMap) Domains() map[common.StorageDomain]struct{} { + domains := make(map[common.StorageDomain]struct{}) + + iterator := s.Iterator() + + for { + k, err := iterator.mapIterator.NextKey() + if err != nil { + panic(errors.NewExternalError(err)) + } + + if k == nil { + break + } + + domain := convertAccountStorageMapKeyToStorageDomain(k) + domains[domain] = struct{}{} + } + + return domains +} + +// Iterator returns a mutable iterator (AccountStorageMapIterator), +// which allows iterating over the domain and domain storage map. +func (s *AccountStorageMap) Iterator() *AccountStorageMapIterator { + mapIterator, err := s.orderedMap.Iterator( + StorageMapKeyAtreeValueComparator, + StorageMapKeyAtreeValueHashInput, + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + + return &AccountStorageMapIterator{ + mapIterator: mapIterator, + storage: s.orderedMap.Storage, + } +} + +// AccountStorageMapIterator is an iterator over AccountStorageMap. +type AccountStorageMapIterator struct { + mapIterator atree.MapIterator + storage atree.SlabStorage +} + +// Next returns the next domain and domain storage map. +// If there is no more domain, (common.StorageDomainUnknown, nil) is returned. +func (i *AccountStorageMapIterator) Next() (common.StorageDomain, *DomainStorageMap) { + k, v, err := i.mapIterator.Next() + if err != nil { + panic(errors.NewExternalError(err)) + } + + if k == nil || v == nil { + return common.StorageDomainUnknown, nil + } + + key := convertAccountStorageMapKeyToStorageDomain(k) + + value := NewDomainStorageMapWithAtreeValue(v) + + return key, value +} + +func convertAccountStorageMapKeyToStorageDomain(v atree.Value) common.StorageDomain { + key, ok := v.(Uint64AtreeValue) + if !ok { + panic(errors.NewUnexpectedError("domain key type %T isn't expected", key)) + } + domain, err := common.StorageDomainFromUint64(uint64(key)) + if err != nil { + panic(errors.NewUnexpectedError("domain key %d isn't expected: %w", key, err)) + } + return domain +} diff --git a/interpreter/account_storagemap_test.go b/interpreter/account_storagemap_test.go new file mode 100644 index 0000000000..a2939e14ac --- /dev/null +++ b/interpreter/account_storagemap_test.go @@ -0,0 +1,942 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package interpreter_test + +import ( + "math/rand" + goruntime "runtime" + "slices" + "strconv" + "strings" + "testing" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + . "github.com/onflow/cadence/test_utils/interpreter_utils" + . "github.com/onflow/cadence/test_utils/runtime_utils" + + "github.com/stretchr/testify/require" +) + +func TestAccountStorageMapDomainExists(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + for _, domain := range common.AllStorageDomains { + exist := accountStorageMap.DomainExists(domain) + require.False(t, exist) + } + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()} + + const count = 10 + accountStorageMap, _ := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + // Check if domain exists + for _, domain := range common.AllStorageDomains { + exist := accountStorageMap.DomainExists(domain) + require.Equal(t, slices.Contains(existingDomains, domain), exist) + } + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapGetDomain(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + for _, domain := range common.AllStorageDomains { + const createIfNotExists = false + domainStorageMap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.Nil(t, domainStorageMap) + } + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()} + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + for _, domain := range common.AllStorageDomains { + const createIfNotExists = false + domainStorageMap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.Equal(t, slices.Contains(existingDomains, domain), domainStorageMap != nil) + + if domainStorageMap != nil { + checkDomainStorageMapData(t, inter, domainStorageMap, accountValues[domain]) + } + } + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapCreateDomain(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + accountValues := make(accountStorageMapValues) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + accountStorageMapRootSlabID := accountStorageMap.SlabID() + + for _, domain := range common.AllStorageDomains { + const createIfNotExists = true + domainStorageMap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + accountValues[domain] = make(domainStorageMapValues) + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID}) + + err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues) + }) + + t.Run("non-empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()} + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + accountStorageMapRootSlabID := accountStorageMap.SlabID() + + for _, domain := range common.AllStorageDomains { + const createIfNotExists = true + domainStorageMap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(len(accountValues[domain])), domainStorageMap.Count()) + + if !slices.Contains(existingDomains, domain) { + accountValues[domain] = make(domainStorageMapValues) + } + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID}) + + err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues) + }) +} + +func TestAccountStorageMapSetAndUpdateDomain(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + accountValues := make(accountStorageMapValues) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + accountStorageMapRootSlabID := accountStorageMap.SlabID() + + const count = 10 + for _, domain := range common.AllStorageDomains { + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random) + + existed := accountStorageMap.WriteDomain(inter, domain, domainStorageMap) + require.False(t, existed) + + accountValues[domain] = domainValues + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID}) + + err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues) + }) + + t.Run("non-empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()} + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + accountStorageMapRootSlabID := accountStorageMap.SlabID() + + for _, domain := range common.AllStorageDomains { + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random) + + existed := accountStorageMap.WriteDomain(inter, domain, domainStorageMap) + require.Equal(t, slices.Contains(existingDomains, domain), existed) + + accountValues[domain] = domainValues + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID}) + + err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues) + }) +} + +func TestAccountStorageMapRemoveDomain(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + accountValues := make(accountStorageMapValues) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + accountStorageMapRootSlabID := accountStorageMap.SlabID() + + for _, domain := range common.AllStorageDomains { + existed := accountStorageMap.WriteDomain(inter, domain, nil) + require.False(t, existed) + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID}) + + err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues) + }) + + t.Run("non-empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()} + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + accountStorageMapRootSlabID := accountStorageMap.SlabID() + + for _, domain := range common.AllStorageDomains { + + existed := accountStorageMap.WriteDomain(inter, domain, nil) + require.Equal(t, slices.Contains(existingDomains, domain), existed) + + delete(accountValues, domain) + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID}) + + err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues) + }) +} + +func TestAccountStorageMapIterator(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + accountValues := make(accountStorageMapValues) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + iterator := accountStorageMap.Iterator() + + // Test calling Next() twice on empty account storage map. + for range 2 { + domain, domainStorageMap := iterator.Next() + require.Empty(t, domain) + require.Nil(t, domainStorageMap) + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []common.StorageDomain{ + common.PathDomainStorage.StorageDomain(), + common.PathDomainPublic.StorageDomain(), + } + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + iterator := accountStorageMap.Iterator() + + domainCount := 0 + for { + domain, domainStorageMap := iterator.Next() + if domain == common.StorageDomainUnknown { + break + } + + domainCount++ + + require.True(t, slices.Contains(existingDomains, domain)) + require.NotNil(t, domainStorageMap) + + checkDomainStorageMapData(t, inter, domainStorageMap, accountValues[domain]) + } + + // Test calling Next() after iterator reaches the end. + domain, domainStorageMap := iterator.Next() + require.Equal(t, common.StorageDomainUnknown, domain) + require.Nil(t, domainStorageMap) + + require.Equal(t, len(existingDomains), domainCount) + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapDomains(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + domains := accountStorageMap.Domains() + require.Equal(t, 0, len(domains)) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + // Turn off automatic AtreeStorageValidationEnabled and explicitly check atree storage health directly. + // This is because AccountStorageMap isn't created through storage, so there isn't any account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(t, storage, atreeValueValidationEnabled, atreeStorageValidationEnabled) + + existingDomains := []common.StorageDomain{ + common.PathDomainStorage.StorageDomain(), + common.PathDomainPublic.StorageDomain(), + common.PathDomainPrivate.StorageDomain(), + } + + const count = 10 + accountStorageMap, _ := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + domains := accountStorageMap.Domains() + require.Equal(t, len(existingDomains), len(domains)) + + for _, domain := range existingDomains { + _, exist := domains[domain] + require.True(t, exist) + } + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapLoadFromRootSlabID(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + init := func() (atree.SlabID, accountStorageMapValues, map[string][]byte, map[string]uint64) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + err := storage.Commit(inter, false) + require.NoError(t, err) + + return accountStorageMap.SlabID(), make(accountStorageMapValues), ledger.StoredValues, ledger.StorageIndices + } + + accountStorageMapRootSlabID, accountValues, storedValues, storageIndices := init() + + checkAccountStorageMapDataWithRawData(t, storedValues, storageIndices, accountStorageMapRootSlabID, accountValues) + }) + + t.Run("non-empty", func(t *testing.T) { + existingDomains := []common.StorageDomain{ + common.PathDomainStorage.StorageDomain(), + common.PathDomainPublic.StorageDomain(), + common.PathDomainPrivate.StorageDomain(), + } + + init := func() (atree.SlabID, accountStorageMapValues, map[string][]byte, map[string]uint64) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + // Turn off automatic AtreeStorageValidationEnabled and explicitly check atree storage health directly. + // This is because AccountStorageMap isn't created through storage, so there isn't any account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(t, storage, atreeValueValidationEnabled, atreeStorageValidationEnabled) + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + err := storage.Commit(inter, false) + require.NoError(t, err) + + return accountStorageMap.SlabID(), accountValues, ledger.StoredValues, ledger.StorageIndices + } + + accountStorageMapRootSlabID, accountValues, storedValues, storageIndices := init() + + checkAccountStorageMapDataWithRawData(t, storedValues, storageIndices, accountStorageMapRootSlabID, accountValues) + }) +} + +type ( + domainStorageMapValues map[interpreter.StorageMapKey]interpreter.Value + accountStorageMapValues map[common.StorageDomain]domainStorageMapValues +) + +func createAccountStorageMap( + storage atree.SlabStorage, + inter *interpreter.Interpreter, + address common.Address, + domains []common.StorageDomain, + count int, + random *rand.Rand, +) (*interpreter.AccountStorageMap, accountStorageMapValues) { + + // Create account storage map + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + + accountValues := make(accountStorageMapValues) + + for _, domain := range domains { + // Create domain storage map + domainStorageMap := accountStorageMap.NewDomain(nil, inter, domain) + + // Write to new domain storage map + domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random) + + accountValues[domain] = domainValues + } + + return accountStorageMap, accountValues +} + +func writeRandomValuesToDomainStorageMap( + inter *interpreter.Interpreter, + domainStorageMap *interpreter.DomainStorageMap, + count int, + random *rand.Rand, +) domainStorageMapValues { + + domainValues := make(domainStorageMapValues) + + for len(domainValues) < count { + n := random.Int() + + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + + var value interpreter.Value + + if len(domainValues) == 0 { + // First element is a large value that is stored in its own slabs. + value = interpreter.NewUnmeteredStringValue(strings.Repeat("a", 1_000)) + } else { + value = interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + } + + domainStorageMap.WriteValue(inter, key, value) + + domainValues[key] = value + } + + return domainValues +} + +// checkAccountStorageMapDataWithRawData checks loaded account storage map against expected account values. +func checkAccountStorageMapDataWithRawData( + tb testing.TB, + storedValues map[string][]byte, + storageIndices map[string]uint64, + rootSlabID atree.SlabID, + expectedAccountValues accountStorageMapValues, +) { + // Create new storage from raw data + ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(tb, storage) + + loadedAccountStorageMap := interpreter.NewAccountStorageMapWithRootID(storage, rootSlabID) + require.Equal(tb, uint64(len(expectedAccountValues)), loadedAccountStorageMap.Count()) + require.Equal(tb, rootSlabID, loadedAccountStorageMap.SlabID()) + + checkAccountStorageMapData(tb, inter, loadedAccountStorageMap, expectedAccountValues) + + CheckAtreeStorageHealth(tb, storage, []atree.SlabID{rootSlabID}) +} + +// checkAccountStorageMapData iterates account storage map and compares values with given expectedAccountValues. +func checkAccountStorageMapData( + tb testing.TB, + inter *interpreter.Interpreter, + accountStorageMap *interpreter.AccountStorageMap, + expectedAccountValues accountStorageMapValues, +) { + require.Equal(tb, uint64(len(expectedAccountValues)), accountStorageMap.Count()) + + domainCount := 0 + iter := accountStorageMap.Iterator() + for { + domain, domainStorageMap := iter.Next() + if domain == common.StorageDomainUnknown { + break + } + + domainCount++ + + expectedDomainValues, exist := expectedAccountValues[domain] + require.True(tb, exist) + + checkDomainStorageMapData(tb, inter, domainStorageMap, expectedDomainValues) + } + + require.Equal(tb, len(expectedAccountValues), domainCount) +} + +// checkDomainStorageMapData iterates domain storage map and compares values with given expectedDomainValues. +func checkDomainStorageMapData( + tb testing.TB, + inter *interpreter.Interpreter, + domainStorageMap *interpreter.DomainStorageMap, + expectedDomainValues domainStorageMapValues, +) { + require.Equal(tb, uint64(len(expectedDomainValues)), domainStorageMap.Count()) + + count := 0 + iter := domainStorageMap.Iterator(nil) + for { + k, v := iter.Next() + if k == nil { + break + } + + count++ + + kv := k.(interpreter.StringAtreeValue) + + expectedValue := expectedDomainValues[interpreter.StringStorageMapKey(kv)] + + checkCadenceValue(tb, inter, v, expectedValue) + } + + require.Equal(tb, len(expectedDomainValues), count) +} + +func checkCadenceValue( + tb testing.TB, + inter *interpreter.Interpreter, + value, + expectedValue interpreter.Value, +) { + ev, ok := value.(interpreter.EquatableValue) + require.True(tb, ok) + require.True(tb, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue)) +} diff --git a/interpreter/account_test.go b/interpreter/account_test.go index a003477a56..8f1f9c4214 100644 --- a/interpreter/account_test.go +++ b/interpreter/account_test.go @@ -483,7 +483,7 @@ func testAccountWithErrorHandler( getAccountValues := func() map[storageKey]interpreter.Value { accountValues := make(map[storageKey]interpreter.Value) - for storageMapKey, accountStorage := range inter.Storage().(interpreter.InMemoryStorage).StorageMaps { + for storageMapKey, accountStorage := range inter.Storage().(interpreter.InMemoryStorage).DomainStorageMaps { iterator := accountStorage.Iterator(inter) for { key, value := iterator.Next() @@ -492,7 +492,7 @@ func testAccountWithErrorHandler( } storageKey := storageKey{ address: storageMapKey.Address, - domain: storageMapKey.Key, + domain: storageMapKey.Domain.Identifier(), key: key, } accountValues[storageKey] = value diff --git a/interpreter/bitwise_test.go b/interpreter/bitwise_test.go index a288e7e667..9f0d12d74c 100644 --- a/interpreter/bitwise_test.go +++ b/interpreter/bitwise_test.go @@ -20,10 +20,14 @@ package interpreter_test import ( "fmt" + "math/big" "testing" + "github.com/stretchr/testify/require" + "github.com/onflow/cadence/interpreter" "github.com/onflow/cadence/sema" + . "github.com/onflow/cadence/test_utils/common_utils" . "github.com/onflow/cadence/test_utils/interpreter_utils" ) @@ -251,3 +255,914 @@ func TestInterpretBitwiseRightShift(t *testing.T) { }) } } + +func TestInterpretBitwiseNegativeShift(t *testing.T) { + t.Run("Int8 << -3", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + fun test() { + let a: Int8 = 0x7f + let b: Int8 = -3 + let c = a << b + } + `) + _, err := inter.Invoke("test") + RequireError(t, err) + + var shiftErr interpreter.NegativeShiftError + require.ErrorAs(t, err, &shiftErr) + }) + + t.Run("Int16 << -3", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + fun test() { + let a: Int16 = 0x7f + let b: Int16 = -3 + let c = a << b + } + `) + _, err := inter.Invoke("test") + RequireError(t, err) + + var shiftErr interpreter.NegativeShiftError + require.ErrorAs(t, err, &shiftErr) + }) + + t.Run("Int32 << -3", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + fun test() { + let a: Int32 = 0x7f + let b: Int32 = -3 + let c = a << b + } + `) + _, err := inter.Invoke("test") + RequireError(t, err) + + var shiftErr interpreter.NegativeShiftError + require.ErrorAs(t, err, &shiftErr) + }) + + t.Run("Int64 << -3", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + fun test() { + let a: Int64 = 0x7f + let b: Int64 = -3 + let c = a << b + } + `) + _, err := inter.Invoke("test") + RequireError(t, err) + + var shiftErr interpreter.NegativeShiftError + require.ErrorAs(t, err, &shiftErr) + }) + + t.Run("Int128 << -3", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + fun test() { + let a: Int128 = 0x7f + let b: Int128 = -3 + let c = a << b + } + `) + _, err := inter.Invoke("test") + RequireError(t, err) + + var shiftErr interpreter.NegativeShiftError + require.ErrorAs(t, err, &shiftErr) + }) + + t.Run("Int256 << -3", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + fun test() { + let a: Int256 = 0x7f + let b: Int256 = -3 + let c = a << b + } + `) + _, err := inter.Invoke("test") + RequireError(t, err) + + var shiftErr interpreter.NegativeShiftError + require.ErrorAs(t, err, &shiftErr) + }) +} + +func TestInterpretBitwiseLeftShift8(t *testing.T) { + + t.Parallel() + + t.Run("Int8 << 9 (zero result)", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int8 = 0x7f + let b: Int8 = 9 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt8Value(0), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int8 << 1 (positive to positive)", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int8 = 5 + let b: Int8 = 1 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt8Value(10), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int8 << 1 (negative to negative)", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int8 = -5 // 0b1111_1011 + let b: Int8 = 1 + let c = a << b // 0b1111_0110 --> -10 + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt8Value(-10), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int8 << 1 (positive to negative)", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int8 = 5 // 0b0000_0101 + let b: Int8 = 7 + let c = a << b // 0b1000_0000 --> -128 + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt8Value(-128), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int8 << 1 (negative to positive)", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int8 = -5 // 0b1111_1011 + let b: Int8 = 5 + let c = a << b // 0b0110_0000 --> 96 + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt8Value(0x60), // or 96 + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("UInt8 << 9", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: UInt8 = 0x7f + let b: UInt8 = 9 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredUInt8Value(0), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("UInt8 << 1", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: UInt8 = 0xff + let b: UInt8 = 1 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredUInt8Value(0xfe), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Word8 << 9", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Word8 = 0xff + let b: Word8 = 9 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredWord8Value(0), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Word8 << 1", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Word8 = 0xff + let b: Word8 = 1 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredWord8Value(0xfe), + inter.Globals.Get("c").GetValue(inter), + ) + }) +} + +func TestInterpretBitwiseLeftShift128(t *testing.T) { + + t.Parallel() + + t.Run("Int128 << 130 (zero result)", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int128 = 0x7fff_ffff_ffff_ffff_ffff_ffff_ffff_ffff + let b: Int128 = 130 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt128ValueFromInt64(int64(0)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int128 << 1 (positive to positive)", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int128 = 5 + let b: Int128 = 1 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt128ValueFromInt64(10), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int128 << 1 (negative to negative)", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int128 = -5 + let b: Int128 = 1 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt128ValueFromInt64(-10), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int128 << 127 (positive to negative)", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int128 = 5 // 0b0000_0101 + let b: Int128 = 127 + let c = a << b // 0b1000_0000_..._0000 --> -2^127 + `, + ) + + bigInt, _ := big.NewInt(0).SetString("-0x80000000_00000000_00000000_00000000", 0) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt128ValueFromBigInt(bigInt), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int128 << 125 (negative to positive)", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int128 = -5 // 0b1111_1111_..._1111_1011 + let b: Int128 = 125 + let c = a << b // 0b0110_0000_..._0000 + `, + ) + + bigInt, _ := big.NewInt(0).SetString("0x60000000_00000000_00000000_00000000", 0) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt128ValueFromBigInt(bigInt), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int128 << -3", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + fun test() { + let a: Int128 = 0x7fff_ffff + let b: Int128 = -3 + let c = a << b + } + `) + _, err := inter.Invoke("test") + RequireError(t, err) + + var shiftErr interpreter.NegativeShiftError + require.ErrorAs(t, err, &shiftErr) + }) + + t.Run("UInt128 << 130", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: UInt128 = 0x7fff_ffff + let b: UInt128 = 130 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredUInt128ValueFromUint64(uint64(0)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("UInt128 << 32", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: UInt128 = 0xffff_ffff_ffff_ffff_ffff_ffff_ffff_ffff + let b: UInt128 = 32 + let c = a << b + `, + ) + + bigInt, _ := big.NewInt(0).SetString("0xffff_ffff_ffff_ffff_ffff_ffff_0000_0000", 0) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredUInt128ValueFromBigInt(bigInt), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Word128 << 130", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Word128 = 0xffff_ffff_ffff_ffff + let b: Word128 = 130 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredWord128ValueFromUint64(uint64(0)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Word128 << 32", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Word128 = 0xffff_ffff_ffff_ffff_ffff_ffff_ffff_ffff + let b: Word128 = 32 + let c = a << b + `, + ) + + bigInt, _ := big.NewInt(0).SetString("0xffff_ffff_ffff_ffff_ffff_ffff_0000_0000", 0) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredWord128ValueFromBigInt(bigInt), + inter.Globals.Get("c").GetValue(inter), + ) + }) +} + +func TestInterpretBitwiseLeftShift256(t *testing.T) { + + t.Parallel() + + t.Run("Int256 << 260 (zero result)", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int256 = 0x7fff_ffff + let b: Int256 = 260 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt256ValueFromInt64(int64(0)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int256 << 1 (positive to positive)", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int256 = 5 + let b: Int256 = 1 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt256ValueFromInt64(10), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int256 << 1 (negative to negative)", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int256 = -5 + let b: Int256 = 1 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt256ValueFromInt64(-10), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int256 << 255 (positive to negative)", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int256 = 5 // 0b0000_0101 + let b: Int256 = 255 + let c = a << b // 0b1000_0000_..._0000 --> -2^127 + `, + ) + + bigInt, _ := big.NewInt(0).SetString("-0x80000000_00000000_00000000_00000000_00000000_00000000_00000000_00000000", 0) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt256ValueFromBigInt(bigInt), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int256 << 253 (negative to positive)", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int256 = -5 // 0b1111_1111_..._1111_1011 + let b: Int256 = 253 + let c = a << b // 0b0110_0000_..._0000 + `, + ) + + bigInt, _ := big.NewInt(0).SetString("0x60000000_00000000_00000000_00000000_00000000_00000000_00000000_00000000", 0) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt256ValueFromBigInt(bigInt), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int256 << -3", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + fun test() { + let a: Int256 = 0x7fff_ffff + let b: Int256 = -3 + let c = a << b + } + `) + _, err := inter.Invoke("test") + RequireError(t, err) + + var shiftErr interpreter.NegativeShiftError + require.ErrorAs(t, err, &shiftErr) + }) + + t.Run("UInt256 << 260", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: UInt256 = 0x7fff_ffff + let b: UInt256 = 260 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredUInt256ValueFromUint64(uint64(0)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("UInt256 << 32", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: UInt256 = 0x7fff_ffff + let b: UInt256 = 32 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredUInt256ValueFromUint64(uint64(0x7fff_ffff_0000_0000)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Word256 << 260", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Word256 = 0x7fff_ffff + let b: Word256 = 260 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredWord256ValueFromUint64(uint64(0)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Word256 << 32", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Word256 = 0x7fff_ffff + let b: Word256 = 32 + let c = a << b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredWord256ValueFromUint64(uint64(0x7fff_ffff_0000_0000)), + inter.Globals.Get("c").GetValue(inter), + ) + }) +} + +func TestInterpretBitwiseRightShift128(t *testing.T) { + + t.Parallel() + + t.Run("Int128 >> 130", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int128 = 0x7fff_ffff_ffff_ffff_ffff_ffff_ffff_ffff + let b: Int128 = 130 + let c = a >> b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt128ValueFromInt64(int64(0)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int128 >> 32", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int128 = 0x7fff_ffff_0000_0000 + let b: Int128 = 32 + let c = a >> b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt128ValueFromInt64(int64(0x7fff_ffff)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int128 >> -3", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + fun test() { + let a: Int128 = 0x7fff_ffff + let b: Int128 = -3 + let c = a >> b + } + `) + _, err := inter.Invoke("test") + RequireError(t, err) + + var shiftErr interpreter.NegativeShiftError + require.ErrorAs(t, err, &shiftErr) + }) + + t.Run("UInt128 >> 130", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: UInt128 = 0x7fff_ffff + let b: UInt128 = 130 + let c = a >> b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredUInt128ValueFromUint64(uint64(0)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("UInt128 >> 32", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: UInt128 = 0xffff_ffff_0000_0000 + let b: UInt128 = 32 + let c = a >> b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredUInt128ValueFromUint64(uint64(0xffff_ffff)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Word128 >> 130", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Word128 = 0xffff_ffff_ffff_ffff + let b: Word128 = 130 + let c = a >> b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredWord128ValueFromUint64(uint64(0)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Word128 >> 32", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Word128 = 0xffff_ffff_0000_0000 + let b: Word128 = 32 + let c = a >> b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredWord128ValueFromUint64(uint64(0xffff_ffff)), + inter.Globals.Get("c").GetValue(inter), + ) + }) +} + +func TestInterpretBitwiseRightShift256(t *testing.T) { + + t.Parallel() + + t.Run("Int256 >> 260", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int256 = 0x7fff_ffff + let b: Int256 = 260 + let c = a >> b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt256ValueFromInt64(int64(0)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int256 >> 32", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Int256 = 0x7fff_ffff_0000_0000 + let b: Int256 = 32 + let c = a >> b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredInt256ValueFromInt64(int64(0x7fff_ffff)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Int256 >> -3", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + fun test() { + let a: Int256 = 0x7fff_ffff + let b: Int256 = -3 + let c = a >> b + } + `) + _, err := inter.Invoke("test") + RequireError(t, err) + + var shiftErr interpreter.NegativeShiftError + require.ErrorAs(t, err, &shiftErr) + }) + + t.Run("UInt256 >> 260", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: UInt256 = 0x7fff_ffff + let b: UInt256 = 260 + let c = a >> b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredUInt256ValueFromUint64(uint64(0)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("UInt256 >> 32", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: UInt256 = 0x7fff_ffff_0000_0000 + let b: UInt256 = 32 + let c = a >> b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredUInt256ValueFromUint64(uint64(0x7fff_ffff)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Word256 >> 260", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Word256 = 0x7fff_ffff + let b: Word256 = 260 + let c = a >> b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredWord256ValueFromUint64(uint64(0)), + inter.Globals.Get("c").GetValue(inter), + ) + }) + + t.Run("Word256 >> 32", func(t *testing.T) { + + inter := parseCheckAndInterpret(t, + ` + let a: Word256 = 0x7fff_ffff_0000_0000 + let b: Word256 = 32 + let c = a >> b + `, + ) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredWord256ValueFromUint64(uint64(0x7fff_ffff)), + inter.Globals.Get("c").GetValue(inter), + ) + }) +} diff --git a/interpreter/decode.go b/interpreter/decode.go index 8fd29b9c90..ca614ae4a6 100644 --- a/interpreter/decode.go +++ b/interpreter/decode.go @@ -54,7 +54,8 @@ func (UnsupportedTagDecodingError) IsInternalError() {} func (e UnsupportedTagDecodingError) Error() string { return fmt.Sprintf( - "internal error: unsupported decoded tag: %d", + "%s unsupported decoded tag: %d", + errors.InternalErrorMessagePrefix, e.Tag, ) } @@ -69,7 +70,8 @@ func (InvalidStringLengthError) IsInternalError() {} func (e InvalidStringLengthError) Error() string { return fmt.Sprintf( - "internal error: invalid string length: got %d, expected max %d", + "%s invalid string length: got %d, expected max %d", + errors.InternalErrorMessagePrefix, e.Length, goMaxInt, ) diff --git a/interpreter/storagemap.go b/interpreter/domain_storagemap.go similarity index 53% rename from interpreter/storagemap.go rename to interpreter/domain_storagemap.go index 54c9e2acbb..e8f79bca35 100644 --- a/interpreter/storagemap.go +++ b/interpreter/domain_storagemap.go @@ -20,6 +20,7 @@ package interpreter import ( goerrors "errors" + "time" "github.com/onflow/atree" @@ -27,12 +28,13 @@ import ( "github.com/onflow/cadence/errors" ) -// StorageMap is an ordered map which stores values in an account. -type StorageMap struct { +// DomainStorageMap is an ordered map which stores values in an account domain. +type DomainStorageMap struct { orderedMap *atree.OrderedMap } -func NewStorageMap(memoryGauge common.MemoryGauge, storage atree.SlabStorage, address atree.Address) *StorageMap { +// NewDomainStorageMap creates new domain storage map for given address. +func NewDomainStorageMap(memoryGauge common.MemoryGauge, storage atree.SlabStorage, address atree.Address) *DomainStorageMap { common.UseMemory(memoryGauge, common.StorageMapMemoryUsage) orderedMap, err := atree.NewMap( @@ -45,12 +47,16 @@ func NewStorageMap(memoryGauge common.MemoryGauge, storage atree.SlabStorage, ad panic(errors.NewExternalError(err)) } - return &StorageMap{ + return &DomainStorageMap{ orderedMap: orderedMap, } } -func NewStorageMapWithRootID(storage atree.SlabStorage, slabID atree.SlabID) *StorageMap { +// NewDomainStorageMapWithRootID loads domain storage map with given slabID. +// This function is only used with legacy domain registers for unmigrated accounts. +// For migrated accounts, NewDomainStorageMapWithAtreeValue() is used to load +// domain storage map as an element of AccountStorageMap. +func NewDomainStorageMapWithRootID(storage atree.SlabStorage, slabID atree.SlabID) *DomainStorageMap { orderedMap, err := atree.NewMapWithRootID( storage, slabID, @@ -60,13 +66,51 @@ func NewStorageMapWithRootID(storage atree.SlabStorage, slabID atree.SlabID) *St panic(errors.NewExternalError(err)) } - return &StorageMap{ + return &DomainStorageMap{ orderedMap: orderedMap, } } +// newDomainStorageMapWithAtreeStorable loads domain storage map with given atree.Storable. +func newDomainStorageMapWithAtreeStorable(storage atree.SlabStorage, storable atree.Storable) *DomainStorageMap { + + // NOTE: Don't use interpreter.StoredValue() to convert given storable + // to DomainStorageMap because DomainStorageMap isn't interpreter.Value. + + value, err := storable.StoredValue(storage) + if err != nil { + panic(errors.NewExternalError(err)) + } + + return NewDomainStorageMapWithAtreeValue(value) +} + +// NewDomainStorageMapWithAtreeValue loads domain storage map with given atree.Value. +// This function is used by migrated account to load domain as an element of AccountStorageMap. +func NewDomainStorageMapWithAtreeValue(value atree.Value) *DomainStorageMap { + // Check if type of given value is *atree.OrderedMap + dm, isAtreeOrderedMap := value.(*atree.OrderedMap) + if !isAtreeOrderedMap { + panic(errors.NewUnexpectedError( + "domain storage map has unexpected type %T, expect *atree.OrderedMap", + value, + )) + } + + // Check if TypeInfo of atree.OrderedMap is EmptyTypeInfo + dt, isEmptyTypeInfo := dm.Type().(EmptyTypeInfo) + if !isEmptyTypeInfo { + panic(errors.NewUnexpectedError( + "domain storage map has unexpected encoded type %T, expect EmptyTypeInfo", + dt, + )) + } + + return &DomainStorageMap{orderedMap: dm} +} + // ValueExists returns true if the given key exists in the storage map. -func (s StorageMap) ValueExists(key StorageMapKey) bool { +func (s *DomainStorageMap) ValueExists(key StorageMapKey) bool { exists, err := s.orderedMap.Has( key.AtreeValueCompare, key.AtreeValueHashInput, @@ -81,7 +125,7 @@ func (s StorageMap) ValueExists(key StorageMapKey) bool { // ReadValue returns the value for the given key. // Returns nil if the key does not exist. -func (s StorageMap) ReadValue(gauge common.MemoryGauge, key StorageMapKey) Value { +func (s *DomainStorageMap) ReadValue(gauge common.MemoryGauge, key StorageMapKey) Value { storedValue, err := s.orderedMap.Get( key.AtreeValueCompare, key.AtreeValueHashInput, @@ -102,7 +146,7 @@ func (s StorageMap) ReadValue(gauge common.MemoryGauge, key StorageMapKey) Value // If the given value is nil, the key is removed. // If the given value is non-nil, the key is added/updated. // Returns true if a value previously existed at the given key. -func (s StorageMap) WriteValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { +func (s *DomainStorageMap) WriteValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { if value == nil { return s.RemoveValue(interpreter, key) } else { @@ -112,8 +156,8 @@ func (s StorageMap) WriteValue(interpreter *Interpreter, key StorageMapKey, valu // SetValue sets a value in the storage map. // If the given key already stores a value, it is overwritten. -// Returns true if -func (s StorageMap) SetValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { +// Returns true if given key already exists and existing value is overwritten. +func (s *DomainStorageMap) SetValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { interpreter.recordStorageMutation() existingStorable, err := s.orderedMap.Set( @@ -126,20 +170,21 @@ func (s StorageMap) SetValue(interpreter *Interpreter, key StorageMapKey, value panic(errors.NewExternalError(err)) } - interpreter.maybeValidateAtreeValue(s.orderedMap) - interpreter.maybeValidateAtreeStorage() - existed = existingStorable != nil if existed { existingValue := StoredValue(interpreter, existingStorable, interpreter.Storage()) existingValue.DeepRemove(interpreter, true) // existingValue is standalone because it was overwritten in parent container. interpreter.RemoveReferencedSlab(existingStorable) } + + interpreter.maybeValidateAtreeValue(s.orderedMap) + interpreter.maybeValidateAtreeStorage() + return } // RemoveValue removes a value in the storage map, if it exists. -func (s StorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (existed bool) { +func (s *DomainStorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (existed bool) { interpreter.recordStorageMutation() existingKeyStorable, existingValueStorable, err := s.orderedMap.Remove( @@ -155,9 +200,6 @@ func (s StorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (ex panic(errors.NewExternalError(err)) } - interpreter.maybeValidateAtreeValue(s.orderedMap) - interpreter.maybeValidateAtreeStorage() - // Key // NOTE: Key is just an atree.Value, not an interpreter.Value, @@ -172,12 +214,82 @@ func (s StorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (ex existingValue.DeepRemove(interpreter, true) // existingValue is standalone because it was removed from parent container. interpreter.RemoveReferencedSlab(existingValueStorable) } + + interpreter.maybeValidateAtreeValue(s.orderedMap) + interpreter.maybeValidateAtreeStorage() + return } +// DeepRemove removes all elements (and their slabs) of domain storage map. +func (s *DomainStorageMap) DeepRemove(interpreter *Interpreter, hasNoParentContainer bool) { + + config := interpreter.SharedState.Config + + if config.TracingEnabled { + startTime := time.Now() + + typeInfo := "DomainStorageMap" + count := s.Count() + + defer func() { + interpreter.reportDomainStorageMapDeepRemoveTrace( + typeInfo, + int(count), + time.Since(startTime), + ) + }() + } + + // Remove nested values and storables + + // Remove keys and values + + storage := s.orderedMap.Storage + + err := s.orderedMap.PopIterate(func(keyStorable atree.Storable, valueStorable atree.Storable) { + // Key + + // NOTE: Key is just an atree.Value, not an interpreter.Value, + // so do not need (can) convert and not need to deep remove + interpreter.RemoveReferencedSlab(keyStorable) + + // Value + + value := StoredValue(interpreter, valueStorable, storage) + value.DeepRemove(interpreter, false) // value is an element of v.dictionary because it is from PopIterate() callback. + interpreter.RemoveReferencedSlab(valueStorable) + }) + if err != nil { + panic(errors.NewExternalError(err)) + } + + interpreter.maybeValidateAtreeValue(s.orderedMap) + if hasNoParentContainer { + interpreter.maybeValidateAtreeStorage() + } +} + +func (s *DomainStorageMap) SlabID() atree.SlabID { + return s.orderedMap.SlabID() +} + +func (s *DomainStorageMap) ValueID() atree.ValueID { + return s.orderedMap.ValueID() +} + +func (s *DomainStorageMap) Count() uint64 { + return s.orderedMap.Count() +} + +func (s *DomainStorageMap) Inlined() bool { + // This is only used for testing currently. + return s.orderedMap.Inlined() +} + // Iterator returns an iterator (StorageMapIterator), // which allows iterating over the keys and values of the storage map -func (s StorageMap) Iterator(gauge common.MemoryGauge) StorageMapIterator { +func (s *DomainStorageMap) Iterator(gauge common.MemoryGauge) DomainStorageMapIterator { mapIterator, err := s.orderedMap.Iterator( StorageMapKeyAtreeValueComparator, StorageMapKeyAtreeValueHashInput, @@ -186,31 +298,23 @@ func (s StorageMap) Iterator(gauge common.MemoryGauge) StorageMapIterator { panic(errors.NewExternalError(err)) } - return StorageMapIterator{ + return DomainStorageMapIterator{ gauge: gauge, mapIterator: mapIterator, storage: s.orderedMap.Storage, } } -func (s StorageMap) SlabID() atree.SlabID { - return s.orderedMap.SlabID() -} - -func (s StorageMap) Count() uint64 { - return s.orderedMap.Count() -} - -// StorageMapIterator is an iterator over StorageMap -type StorageMapIterator struct { +// DomainStorageMapIterator is an iterator over DomainStorageMap +type DomainStorageMapIterator struct { gauge common.MemoryGauge mapIterator atree.MapIterator storage atree.SlabStorage } // Next returns the next key and value of the storage map iterator. -// If there is no further key-value pair, ("", nil) is returned. -func (i StorageMapIterator) Next() (atree.Value, Value) { +// If there is no further key-value pair, (nil, nil) is returned. +func (i DomainStorageMapIterator) Next() (atree.Value, Value) { k, v, err := i.mapIterator.Next() if err != nil { panic(errors.NewExternalError(err)) @@ -230,7 +334,7 @@ func (i StorageMapIterator) Next() (atree.Value, Value) { // NextKey returns the next key of the storage map iterator. // If there is no further key, "" is returned. -func (i StorageMapIterator) NextKey() atree.Value { +func (i DomainStorageMapIterator) NextKey() atree.Value { k, err := i.mapIterator.NextKey() if err != nil { panic(errors.NewExternalError(err)) @@ -240,8 +344,8 @@ func (i StorageMapIterator) NextKey() atree.Value { } // NextValue returns the next value in the storage map iterator. -// If there is nop further value, nil is returned. -func (i StorageMapIterator) NextValue() Value { +// If there is no further value, nil is returned. +func (i DomainStorageMapIterator) NextValue() Value { v, err := i.mapIterator.NextValue() if err != nil { panic(errors.NewExternalError(err)) diff --git a/interpreter/domain_storagemap_test.go b/interpreter/domain_storagemap_test.go new file mode 100644 index 0000000000..5eb2db5c66 --- /dev/null +++ b/interpreter/domain_storagemap_test.go @@ -0,0 +1,814 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package interpreter_test + +import ( + "math/rand" + "strconv" + "testing" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + . "github.com/onflow/cadence/test_utils/interpreter_utils" + . "github.com/onflow/cadence/test_utils/runtime_utils" + + "github.com/stretchr/testify/require" +) + +func TestDomainStorageMapValueExists(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + key := interpreter.StringAtreeValue("key") + exist := domainStorageMap.ValueExists(interpreter.StringStorageMapKey(key)) + require.False(t, exist) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match DomainStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + // Check if value exists + for key := range domainValues { + exist := domainStorageMap.ValueExists(key) + require.True(t, exist) + } + + // Check if random value exists + for range 10 { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + _, keyExist := domainValues[key] + + exist := domainStorageMap.ValueExists(key) + require.Equal(t, keyExist, exist) + } + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapReadValue(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + key := interpreter.StringAtreeValue("key") + v := domainStorageMap.ReadValue(nil, interpreter.StringStorageMapKey(key)) + require.Nil(t, v) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match DomainStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + for key, expectedValue := range domainValues { + value := domainStorageMap.ReadValue(nil, key) + require.NotNil(t, value) + + checkCadenceValue(t, inter, value, expectedValue) + } + + // Get non-existent value + for range 10 { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + if _, keyExist := domainValues[key]; keyExist { + continue + } + + value := domainStorageMap.ReadValue(nil, key) + require.Nil(t, value) + } + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapSetAndUpdateValue(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + const count = 10 + domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random) + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + for key := range domainValues { + // Overwrite existing values + n := random.Int() + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + domainStorageMap.WriteValue(inter, key, value) + + domainValues[key] = value + } + require.Equal(t, uint64(count), domainStorageMap.Count()) + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapRemoveValue(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + key := interpreter.StringAtreeValue("key") + existed := domainStorageMap.WriteValue(inter, interpreter.StringStorageMapKey(key), nil) + require.False(t, existed) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + for key := range domainValues { + existed := domainStorageMap.WriteValue(inter, key, nil) + require.True(t, existed) + } + + // Remove non-existent value + for range 10 { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + if _, keyExist := domainValues[key]; keyExist { + continue + } + + existed := domainStorageMap.WriteValue(inter, key, nil) + require.False(t, existed) + } + + clear(domainValues) + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapIteratorNext(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + domainValues := make(domainStorageMapValues) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + iterator := domainStorageMap.Iterator(nil) + + // Test calling Next() twice on empty account storage map. + for range 2 { + k, v := iterator.Next() + require.Nil(t, k) + require.Nil(t, v) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + iterator := domainStorageMap.Iterator(nil) + + elementCount := 0 + for { + k, v := iterator.Next() + if k == nil { + break + } + + elementCount++ + + kv := k.(interpreter.StringAtreeValue) + + expectedValue, expectedValueExist := domainValues[interpreter.StringStorageMapKey(kv)] + require.True(t, expectedValueExist) + + checkCadenceValue(t, inter, v, expectedValue) + } + require.Equal(t, uint64(elementCount), domainStorageMap.Count()) + + // Test calling Next() after iterator reaches the end. + for range 2 { + k, v := iterator.Next() + require.Nil(t, k) + require.Nil(t, v) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapIteratorNextKey(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + domainValues := make(domainStorageMapValues) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + iterator := domainStorageMap.Iterator(nil) + + // Test calling NextKey() twice on empty account storage map. + for range 2 { + k := iterator.NextKey() + require.Nil(t, k) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + iterator := domainStorageMap.Iterator(nil) + + elementCount := 0 + for { + k := iterator.NextKey() + if k == nil { + break + } + + elementCount++ + + kv := k.(interpreter.StringAtreeValue) + + _, expectedValueExist := domainValues[interpreter.StringStorageMapKey(kv)] + require.True(t, expectedValueExist) + } + require.Equal(t, uint64(elementCount), domainStorageMap.Count()) + + // Test calling Next() after iterator reaches the end. + for range 2 { + k := iterator.NextKey() + require.Nil(t, k) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapIteratorNextValue(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + domainValues := make(domainStorageMapValues) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + iterator := domainStorageMap.Iterator(nil) + + // Test calling NextKey() twice on empty account storage map. + for range 2 { + v := iterator.NextValue() + require.Nil(t, v) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + t.Parallel() + + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + iterator := domainStorageMap.Iterator(nil) + + elementCount := 0 + for { + v := iterator.NextValue() + if v == nil { + break + } + + elementCount++ + + ev, ok := v.(interpreter.EquatableValue) + require.True(t, ok) + + match := false + for _, expectedValue := range domainValues { + if ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue) { + match = true + break + } + } + require.True(t, match) + } + require.Equal(t, uint64(elementCount), domainStorageMap.Count()) + + // Test calling NextValue() after iterator reaches the end. + for range 2 { + v := iterator.NextValue() + require.Nil(t, v) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapLoadFromRootSlabID(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + t.Parallel() + + init := func() (atree.SlabID, domainStorageMapValues, map[string][]byte, map[string]uint64) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + err := storage.Commit(inter, false) + require.NoError(t, err) + + valueID := domainStorageMap.ValueID() + return atreeValueIDToSlabID(valueID), make(domainStorageMapValues), ledger.StoredValues, ledger.StorageIndices + } + + domainStorageMapRootSlabID, domainValues, storedValues, storageIndices := init() + + ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + domainStorageMap := interpreter.NewDomainStorageMapWithRootID(storage, domainStorageMapRootSlabID) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + inter := NewTestInterpreterWithStorage(t, storage) + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{domainStorageMapRootSlabID}) + }) + + t.Run("non-empty", func(t *testing.T) { + t.Parallel() + + init := func() (atree.SlabID, domainStorageMapValues, map[string][]byte, map[string]uint64) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + // Turn off automatic AtreeStorageValidationEnabled and explicitly check atree storage health directly. + // This is because AccountStorageMap isn't created through storage, so there isn't any account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(t, storage, atreeValueValidationEnabled, atreeStorageValidationEnabled) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + err := storage.Commit(inter, false) + require.NoError(t, err) + + valueID := domainStorageMap.ValueID() + return atreeValueIDToSlabID(valueID), domainValues, ledger.StoredValues, ledger.StorageIndices + } + + domainStorageMapRootSlabID, domainValues, storedValues, storageIndices := init() + + ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) + + domainStorageMap := interpreter.NewDomainStorageMapWithRootID(storage, domainStorageMapRootSlabID) + + inter := NewTestInterpreterWithStorage(t, storage) + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{domainStorageMapRootSlabID}) + }) +} + +func createDomainStorageMap( + storage atree.SlabStorage, + inter *interpreter.Interpreter, + address common.Address, + count int, + random *rand.Rand, +) (*interpreter.DomainStorageMap, domainStorageMapValues) { + + // Create domain storage map + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + + // Write to new domain storage map + domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random) + + return domainStorageMap, domainValues +} + +func atreeValueIDToSlabID(vid atree.ValueID) atree.SlabID { + return atree.NewSlabID( + atree.Address(vid[:8]), + atree.SlabIndex(vid[8:]), + ) +} diff --git a/interpreter/dynamic_casting_test.go b/interpreter/dynamic_casting_test.go index 255a7b719e..2b24793232 100644 --- a/interpreter/dynamic_casting_test.go +++ b/interpreter/dynamic_casting_test.go @@ -737,12 +737,11 @@ func returnResourceCasted(fromType, targetType string, operation ast.Operation) func testResourceCastValid(t *testing.T, types, fromType string, targetType string, operation ast.Operation) { inter := parseCheckAndInterpret(t, - types+ - returnResourceCasted( - fromType, - targetType, - operation, - ), + types+returnResourceCasted( + fromType, + targetType, + operation, + ), ) value, err := inter.Invoke("test") @@ -776,12 +775,10 @@ func testResourceCastValid(t *testing.T, types, fromType string, targetType stri func testResourceCastInvalid(t *testing.T, types, fromType, targetType string, operation ast.Operation) { inter := parseCheckAndInterpret(t, - fmt.Sprintf( - types+returnResourceCasted( - fromType, - targetType, - operation, - ), + types+returnResourceCasted( + fromType, + targetType, + operation, ), ) @@ -886,12 +883,11 @@ func returnStructCasted(fromType, targetType string, operation ast.Operation) st func testStructCastValid(t *testing.T, types, fromType string, targetType string, operation ast.Operation) { inter := parseCheckAndInterpret(t, - types+ - returnStructCasted( - fromType, - targetType, - operation, - ), + types+returnStructCasted( + fromType, + targetType, + operation, + ), ) value, err := inter.Invoke("test") @@ -925,13 +921,10 @@ func testStructCastValid(t *testing.T, types, fromType string, targetType string func testStructCastInvalid(t *testing.T, types, fromType, targetType string, operation ast.Operation) { inter := parseCheckAndInterpret(t, - fmt.Sprintf( - types+ - returnStructCasted( - fromType, - targetType, - operation, - ), + types+returnStructCasted( + fromType, + targetType, + operation, ), ) @@ -2256,8 +2249,12 @@ func returnReferenceCasted(fromType, targetType string, operation ast.Operation, func testReferenceCastValid(t *testing.T, types, fromType, targetType string, operation ast.Operation, isResource bool) { inter := parseCheckAndInterpret(t, - types+ - returnReferenceCasted(fromType, targetType, operation, isResource), + types+returnReferenceCasted( + fromType, + targetType, + operation, + isResource, + ), ) value, err := inter.Invoke("test") @@ -2309,9 +2306,11 @@ func testReferenceCastValid(t *testing.T, types, fromType, targetType string, op func testReferenceCastInvalid(t *testing.T, types, fromType, targetType string, operation ast.Operation, isResource bool) { inter := parseCheckAndInterpret(t, - fmt.Sprintf( - types+ - returnReferenceCasted(fromType, targetType, operation, isResource), + types+returnReferenceCasted( + fromType, + targetType, + operation, + isResource, ), ) diff --git a/interpreter/encode.go b/interpreter/encode.go index 1af6642f79..aad1bfa82c 100644 --- a/interpreter/encode.go +++ b/interpreter/encode.go @@ -1684,10 +1684,6 @@ func (c CompositeTypeInfo) IsComposite() bool { return true } -func (c CompositeTypeInfo) Identifier() string { - return string(c.Location.TypeID(nil, c.QualifiedIdentifier)) -} - func (c CompositeTypeInfo) Copy() atree.TypeInfo { // Return c as is because c is a value type. return c @@ -1741,10 +1737,6 @@ func (e EmptyTypeInfo) IsComposite() bool { return false } -func (e EmptyTypeInfo) Identifier() string { - return "" -} - func (e EmptyTypeInfo) Copy() atree.TypeInfo { return e } diff --git a/interpreter/errors.go b/interpreter/errors.go index d70dcf4848..fba381b4b2 100644 --- a/interpreter/errors.go +++ b/interpreter/errors.go @@ -44,7 +44,8 @@ func (*unsupportedOperation) IsInternalError() {} func (e *unsupportedOperation) Error() string { return fmt.Sprintf( - "internal error: cannot evaluate unsupported %s operation: %s", + "%s cannot evaluate unsupported %s operation: %s", + errors.InternalErrorMessagePrefix, e.kind.Name(), e.operation.Symbol(), ) @@ -299,7 +300,21 @@ func (e UnderflowError) Error() string { return "underflow" } -// UnderflowError +// NegativeShiftError + +type NegativeShiftError struct { + LocationRange +} + +var _ errors.UserError = NegativeShiftError{} + +func (NegativeShiftError) IsUserError() {} + +func (e NegativeShiftError) Error() string { + return "negative shift" +} + +// DivisionByZeroError type DivisionByZeroError struct { LocationRange @@ -323,7 +338,10 @@ var _ errors.InternalError = InvalidatedResourceError{} func (InvalidatedResourceError) IsInternalError() {} func (e InvalidatedResourceError) Error() string { - return "internal error: resource is invalidated and cannot be used anymore" + return fmt.Sprintf( + "%s resource is invalidated and cannot be used anymore", + errors.InternalErrorMessagePrefix, + ) } // DestroyedResourceError is the error which is reported @@ -633,7 +651,8 @@ func (MemberAccessTypeError) IsInternalError() {} func (e MemberAccessTypeError) Error() string { return fmt.Sprintf( - "invalid member access: expected `%s`, got `%s`", + "%s invalid member access: expected `%s`, got `%s`", + errors.InternalErrorMessagePrefix, e.ExpectedType.QualifiedString(), e.ActualType.QualifiedString(), ) @@ -657,7 +676,8 @@ func (e ValueTransferTypeError) Error() string { ) return fmt.Sprintf( - "invalid transfer of value: expected `%s`, got `%s`", + "%s invalid transfer of value: expected `%s`, got `%s`", + errors.InternalErrorMessagePrefix, expected, actual, ) @@ -675,7 +695,8 @@ func (UnexpectedMappedEntitlementError) IsInternalError() {} func (e UnexpectedMappedEntitlementError) Error() string { return fmt.Sprintf( - "invalid transfer of value: found an unexpected runtime mapped entitlement `%s`", + "%s invalid transfer of value: found an unexpected runtime mapped entitlement `%s`", + errors.InternalErrorMessagePrefix, e.Type.QualifiedString(), ) } @@ -692,7 +713,8 @@ func (ResourceConstructionError) IsInternalError() {} func (e ResourceConstructionError) Error() string { return fmt.Sprintf( - "cannot create resource `%s`: outside of declaring location %s", + "%s cannot create resource `%s`: outside of declaring location %s", + errors.InternalErrorMessagePrefix, e.CompositeType.QualifiedString(), e.CompositeType.Location.String(), ) @@ -952,7 +974,8 @@ func (InvalidAttachmentOperationTargetError) IsInternalError() {} func (e InvalidAttachmentOperationTargetError) Error() string { return fmt.Sprintf( - "cannot add or remove attachment with non-owned value (%T)", + "%s cannot add or remove attachment with non-owned value (%T)", + errors.InternalErrorMessagePrefix, e.Value, ) } @@ -1092,7 +1115,10 @@ var _ errors.InternalError = ResourceReferenceDereferenceError{} func (ResourceReferenceDereferenceError) IsInternalError() {} func (e ResourceReferenceDereferenceError) Error() string { - return "internal error: resource-references cannot be dereferenced" + return fmt.Sprintf( + "%s resource-references cannot be dereferenced", + errors.InternalErrorMessagePrefix, + ) } // ResourceLossError @@ -1117,7 +1143,10 @@ var _ errors.InternalError = InvalidCapabilityIDError{} func (InvalidCapabilityIDError) IsInternalError() {} func (e InvalidCapabilityIDError) Error() string { - return "capability created with invalid ID" + return fmt.Sprintf( + "%s capability created with invalid ID", + errors.InternalErrorMessagePrefix, + ) } // ReferencedValueChangedError diff --git a/interpreter/interpreter.go b/interpreter/interpreter.go index 80def48c42..746eeaa7bf 100644 --- a/interpreter/interpreter.go +++ b/interpreter/interpreter.go @@ -237,7 +237,12 @@ func (c TypeCodes) Merge(codes TypeCodes) { type Storage interface { atree.SlabStorage - GetStorageMap(address common.Address, domain string, createIfNotExists bool) *StorageMap + GetDomainStorageMap( + inter *Interpreter, + address common.Address, + domain common.StorageDomain, + createIfNotExists bool, + ) *DomainStorageMap CheckHealth() error } @@ -967,7 +972,7 @@ func (interpreter *Interpreter) declareSelfVariable(value Value, locationRange L } func (interpreter *Interpreter) visitAssignment( - transferOperation ast.TransferOperation, + _ ast.TransferOperation, targetGetterSetter getterSetter, targetType sema.Type, valueExpression ast.Expression, valueType sema.Type, position ast.HasPosition, @@ -1271,7 +1276,7 @@ func (declarationInterpreter *Interpreter) declareNonEnumCompositeValue( functions.Set(resourceDefaultDestroyEventName(compositeType), destroyEventConstructor) } - applyDefaultFunctions := func(ty *sema.InterfaceType, code WrapperCode) { + applyDefaultFunctions := func(_ *sema.InterfaceType, code WrapperCode) { // Apply default functions, if conforming type does not provide the function @@ -2678,10 +2683,10 @@ func (interpreter *Interpreter) NewSubInterpreter( func (interpreter *Interpreter) StoredValueExists( storageAddress common.Address, - domain string, + domain common.StorageDomain, identifier StorageMapKey, ) bool { - accountStorage := interpreter.Storage().GetStorageMap(storageAddress, domain, false) + accountStorage := interpreter.Storage().GetDomainStorageMap(interpreter, storageAddress, domain, false) if accountStorage == nil { return false } @@ -2690,10 +2695,10 @@ func (interpreter *Interpreter) StoredValueExists( func (interpreter *Interpreter) ReadStored( storageAddress common.Address, - domain string, + domain common.StorageDomain, identifier StorageMapKey, ) Value { - accountStorage := interpreter.Storage().GetStorageMap(storageAddress, domain, false) + accountStorage := interpreter.Storage().GetDomainStorageMap(interpreter, storageAddress, domain, false) if accountStorage == nil { return nil } @@ -2702,11 +2707,11 @@ func (interpreter *Interpreter) ReadStored( func (interpreter *Interpreter) WriteStored( storageAddress common.Address, - domain string, + domain common.StorageDomain, key StorageMapKey, value Value, ) (existed bool) { - accountStorage := interpreter.Storage().GetStorageMap(storageAddress, domain, true) + accountStorage := interpreter.Storage().GetDomainStorageMap(interpreter, storageAddress, domain, true) return accountStorage.WriteValue(interpreter, key, value) } @@ -4069,7 +4074,7 @@ func (interpreter *Interpreter) IsSubTypeOfSemaType(staticSubType StaticType, su } func (interpreter *Interpreter) domainPaths(address common.Address, domain common.PathDomain) []Value { - storageMap := interpreter.Storage().GetStorageMap(address, domain.Identifier(), false) + storageMap := interpreter.Storage().GetDomainStorageMap(interpreter, address, domain.StorageDomain(), false) if storageMap == nil { return []Value{} } @@ -4164,7 +4169,7 @@ func (interpreter *Interpreter) newStorageIterationFunction( parameterTypes := fnType.ParameterTypes() returnType := fnType.ReturnTypeAnnotation.Type - storageMap := config.Storage.GetStorageMap(address, domain.Identifier(), false) + storageMap := config.Storage.GetDomainStorageMap(interpreter, address, domain.StorageDomain(), false) if storageMap == nil { // if nothing is stored, no iteration is required return Void @@ -4327,7 +4332,7 @@ func (interpreter *Interpreter) authAccountSaveFunction( panic(errors.NewUnreachableError()) } - domain := path.Domain.Identifier() + domain := path.Domain.StorageDomain() identifier := path.Identifier // Prevent an overwrite @@ -4390,7 +4395,7 @@ func (interpreter *Interpreter) authAccountTypeFunction( panic(errors.NewUnreachableError()) } - domain := path.Domain.Identifier() + domain := path.Domain.StorageDomain() identifier := path.Identifier storageMapKey := StringStorageMapKey(identifier) @@ -4448,7 +4453,7 @@ func (interpreter *Interpreter) authAccountReadFunction( panic(errors.NewUnreachableError()) } - domain := path.Domain.Identifier() + domain := path.Domain.StorageDomain() identifier := path.Identifier storageMapKey := StringStorageMapKey(identifier) @@ -4589,7 +4594,7 @@ func (interpreter *Interpreter) authAccountCheckFunction( panic(errors.NewUnreachableError()) } - domain := path.Domain.Identifier() + domain := path.Domain.StorageDomain() identifier := path.Identifier storageMapKey := StringStorageMapKey(identifier) diff --git a/interpreter/interpreter_tracing.go b/interpreter/interpreter_tracing.go index 365eebed42..10ee3dd418 100644 --- a/interpreter/interpreter_tracing.go +++ b/interpreter/interpreter_tracing.go @@ -30,9 +30,10 @@ const ( tracingImportPrefix = "import." // type prefixes - tracingArrayPrefix = "array." - tracingDictionaryPrefix = "dictionary." - tracingCompositePrefix = "composite." + tracingArrayPrefix = "array." + tracingDictionaryPrefix = "dictionary." + tracingCompositePrefix = "composite." + tracingDomainStorageMapPrefix = "domainstoragemap." // Value operation postfixes tracingConstructPostfix = "construct" @@ -162,6 +163,20 @@ func (interpreter *Interpreter) reportDictionaryValueDeepRemoveTrace( ) } +func (interpreter *Interpreter) reportDomainStorageMapDeepRemoveTrace( + typeInfo string, + count int, + duration time.Duration, +) { + config := interpreter.SharedState.Config + config.OnRecordTrace( + interpreter, + tracingDomainStorageMapPrefix+tracingDeepRemovePostfix, + duration, + prepareArrayAndMapValueTraceAttrs(typeInfo, count), + ) +} + func (interpreter *Interpreter) reportDictionaryValueDestroyTrace( typeInfo string, count int, diff --git a/interpreter/memory_metering_test.go b/interpreter/memory_metering_test.go index 725ea93037..62f9d6ef98 100644 --- a/interpreter/memory_metering_test.go +++ b/interpreter/memory_metering_test.go @@ -3289,9 +3289,9 @@ func TestInterpretUInt128Metering(t *testing.T) { _, err := inter.Invoke("main") require.NoError(t, err) - // creation: 8 + 8 + // creation: 8 + 8 + 16 // result: 16 - assert.Equal(t, uint64(32), meter.getMemory(common.MemoryKindBigInt)) + assert.Equal(t, uint64(48), meter.getMemory(common.MemoryKindBigInt)) }) t.Run("bitwise right-shift", func(t *testing.T) { @@ -3587,9 +3587,9 @@ func TestInterpretUInt256Metering(t *testing.T) { _, err := inter.Invoke("main") require.NoError(t, err) - // creation: 8 + 8 + // creation: 8 + 8 + 32 // result: 32 - assert.Equal(t, uint64(48), meter.getMemory(common.MemoryKindBigInt)) + assert.Equal(t, uint64(80), meter.getMemory(common.MemoryKindBigInt)) }) t.Run("bitwise right-shift", func(t *testing.T) { @@ -5308,9 +5308,9 @@ func TestInterpretInt128Metering(t *testing.T) { _, err := inter.Invoke("main") require.NoError(t, err) - // two literals: 8 + 8 + // two literals: 8 + 8 + 16 // result: 16 - assert.Equal(t, uint64(32), meter.getMemory(common.MemoryKindBigInt)) + assert.Equal(t, uint64(48), meter.getMemory(common.MemoryKindBigInt)) }) t.Run("bitwise right shift", func(t *testing.T) { @@ -5677,9 +5677,9 @@ func TestInterpretInt256Metering(t *testing.T) { _, err := inter.Invoke("main") require.NoError(t, err) - // two literals: 8 + 8 + // two literals: 8 + 8 + 32 // result: 32 - assert.Equal(t, uint64(48), meter.getMemory(common.MemoryKindBigInt)) + assert.Equal(t, uint64(80), meter.getMemory(common.MemoryKindBigInt)) }) t.Run("bitwise right shift", func(t *testing.T) { diff --git a/interpreter/misc_test.go b/interpreter/misc_test.go index 8ec9d08532..7ef440fac9 100644 --- a/interpreter/misc_test.go +++ b/interpreter/misc_test.go @@ -41,6 +41,7 @@ import ( "github.com/onflow/cadence/stdlib" . "github.com/onflow/cadence/test_utils/common_utils" . "github.com/onflow/cadence/test_utils/interpreter_utils" + . "github.com/onflow/cadence/test_utils/runtime_utils" . "github.com/onflow/cadence/test_utils/sema_utils" ) @@ -67,6 +68,23 @@ func parseCheckAndInterpretWithOptions( return parseCheckAndInterpretWithOptionsAndMemoryMetering(t, code, options, nil) } +func parseCheckAndInterpretWithAtreeValidationsDisabled( + t testing.TB, + code string, + options ParseCheckAndInterpretOptions, +) ( + inter *interpreter.Interpreter, + err error, +) { + return parseCheckAndInterpretWithOptionsAndMemoryMeteringAndAtreeValidations( + t, + code, + options, + nil, + false, + ) +} + func parseCheckAndInterpretWithLogs( tb testing.TB, code string, @@ -169,6 +187,30 @@ func parseCheckAndInterpretWithOptionsAndMemoryMetering( err error, ) { + // Atree validation should be disabled for memory metering tests. + // Otherwise, validation may also affect the memory consumption. + enableAtreeValidations := memoryGauge == nil + + return parseCheckAndInterpretWithOptionsAndMemoryMeteringAndAtreeValidations( + t, + code, + options, + memoryGauge, + enableAtreeValidations, + ) +} + +func parseCheckAndInterpretWithOptionsAndMemoryMeteringAndAtreeValidations( + t testing.TB, + code string, + options ParseCheckAndInterpretOptions, + memoryGauge common.MemoryGauge, + enableAtreeValidations bool, +) ( + inter *interpreter.Interpreter, + err error, +) { + checker, err := ParseAndCheckWithOptionsAndMemoryMetering(t, code, ParseAndCheckOptions{ @@ -197,10 +239,15 @@ func parseCheckAndInterpretWithOptionsAndMemoryMetering( if options.Config != nil { config = *options.Config } - if memoryGauge == nil { + + if enableAtreeValidations { config.AtreeValueValidationEnabled = true config.AtreeStorageValidationEnabled = true + } else { + config.AtreeValueValidationEnabled = false + config.AtreeStorageValidationEnabled = false } + if config.UUIDHandler == nil { config.UUIDHandler = func() (uint64, error) { uuid++ @@ -5349,8 +5396,8 @@ func TestInterpretReferenceFailableDowncasting(t *testing.T) { true, // r is standalone. ) - domain := storagePath.Domain.Identifier() - storageMap := storage.GetStorageMap(storageAddress, domain, true) + domain := storagePath.Domain.StorageDomain() + storageMap := storage.GetDomainStorageMap(inter, storageAddress, domain, true) storageMapKey := interpreter.StringStorageMapKey(storagePath.Identifier) storageMap.WriteValue(inter, storageMapKey, r) @@ -12509,4 +12556,725 @@ func TestInterpretStringTemplates(t *testing.T) { inter.Globals.Get("x").GetValue(inter), ) }) + + t.Run("func", func(t *testing.T) { + t.Parallel() + + inter := parseCheckAndInterpret(t, ` + let add = fun(): Int { + return 2+2 + } + let x: String = "\(add())" + `) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredStringValue("4"), + inter.Globals.Get("x").GetValue(inter), + ) + }) + + t.Run("ternary", func(t *testing.T) { + t.Parallel() + + inter := parseCheckAndInterpret(t, ` + let z = false + let x: String = "\(z ? "foo" : "bar" )" + `) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredStringValue("bar"), + inter.Globals.Get("x").GetValue(inter), + ) + }) + + t.Run("nested", func(t *testing.T) { + t.Parallel() + + inter := parseCheckAndInterpret(t, ` + let x: String = "\(2*(4-2) + 1 == 5)" + `) + + AssertValuesEqual( + t, + inter, + interpreter.NewUnmeteredStringValue("true"), + inter.Globals.Get("x").GetValue(inter), + ) + }) +} + +func TestInterpretSomeValueChildContainerMutation(t *testing.T) { + + t.Parallel() + + test := func(t *testing.T, code string) { + + t.Parallel() + + ledger := NewTestLedger(nil, nil) + + newInter := func() *interpreter.Interpreter { + + inter, err := parseCheckAndInterpretWithOptions(t, + code, + ParseCheckAndInterpretOptions{ + Config: &interpreter.Config{ + Storage: runtime.NewStorage(ledger, nil, runtime.StorageConfig{}), + }, + }, + ) + require.NoError(t, err) + + return inter + } + + // Setup + + inter := newInter() + + foo, err := inter.Invoke("setup") + require.NoError(t, err) + + address := common.MustBytesToAddress([]byte{0x1}) + path := interpreter.NewUnmeteredPathValue(common.PathDomainStorage, "foo") + + storage := inter.Storage().(*runtime.Storage) + storageMap := storage.GetDomainStorageMap( + inter, + address, + common.StorageDomain(path.Domain), + true, + ) + + foo = foo.Transfer( + inter, + interpreter.EmptyLocationRange, + atree.Address(address), + false, + nil, + nil, + true, + ) + + // Write the value to the storage map. + // However, the value is not referenced by the root of the storage yet + // (a storage map), so atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. + withoutAtreeStorageValidationEnabled( + inter, + func() struct{} { + storageMap.WriteValue( + inter, + interpreter.StringStorageMapKey(path.Identifier), + foo, + ) + return struct{}{} + }, + ) + + err = storage.Commit(inter, false) + require.NoError(t, err) + + // Update + + inter = newInter() + + storage = inter.Storage().(*runtime.Storage) + storageMap = storage.GetDomainStorageMap( + inter, + address, + common.StorageDomain(path.Domain), + false, + ) + require.NotNil(t, storageMap) + + ref := interpreter.NewStorageReferenceValue( + nil, + interpreter.UnauthorizedAccess, + address, + path, + nil, + ) + + result, err := inter.Invoke("update", ref) + require.NoError(t, err) + assert.Equal(t, interpreter.TrueValue, result) + + err = storage.Commit(inter, false) + require.NoError(t, err) + + // Update again + + inter = newInter() + + storage = inter.Storage().(*runtime.Storage) + storageMap = storage.GetDomainStorageMap( + inter, + address, + common.StorageDomain(path.Domain), + false, + ) + require.NotNil(t, storageMap) + + ref = interpreter.NewStorageReferenceValue( + nil, + interpreter.UnauthorizedAccess, + address, + path, + nil, + ) + + result, err = inter.Invoke("updateAgain", ref) + require.NoError(t, err) + assert.Equal(t, interpreter.TrueValue, result) + } + + t.Run("dictionary, one level", func(t *testing.T) { + + test(t, ` + struct Foo { + let values: {String: Int}? + + init() { + self.values = {} + } + + fun set(key: String, value: Int) { + if let ref: auth(Mutate) &{String: Int} = &self.values { + ref[key] = value + } + } + + fun get(key: String): Int? { + if let ref: &{String: Int} = &self.values { + return ref[key] + } + return nil + } + } + + fun setup(): Foo { + let foo = Foo() + foo.set(key: "a", value: 1) + return foo + } + + fun update(foo: &Foo): Bool { + if foo.get(key: "a") != 1 { + return false + } + foo.set(key: "a", value: 2) + return true + } + + fun updateAgain(foo: &Foo): Bool { + if foo.get(key: "a") != 2 { + return false + } + foo.set(key: "a", value: 3) + return true + } + `) + }) + + t.Run("dictionary, two levels", func(t *testing.T) { + test(t, ` + struct Foo { + let values: {String: Int}?? + + init() { + self.values = {} + } + + fun set(key: String, value: Int) { + if let optRef: auth(Mutate) &{String: Int}? = &self.values { + if let ref: auth(Mutate) &{String: Int} = optRef { + ref[key] = value + } + } + } + + fun get(key: String): Int? { + if let optRef: &{String: Int}? = &self.values { + if let ref: &{String: Int} = optRef { + return ref[key] + } + } + return nil + } + } + + fun setup(): Foo { + let foo = Foo() + foo.set(key: "a", value: 1) + return foo + } + + fun update(foo: &Foo): Bool { + if foo.get(key: "a") != 1 { + return false + } + foo.set(key: "a", value: 2) + return true + } + + fun updateAgain(foo: &Foo): Bool { + if foo.get(key: "a") != 2 { + return false + } + foo.set(key: "a", value: 3) + return true + } + `) + }) + + t.Run("dictionary, nested", func(t *testing.T) { + + test(t, ` + struct Bar { + let values: {String: Int}? + + init() { + self.values = {} + } + + fun set(key: String, value: Int) { + if let ref: auth(Mutate) &{String: Int} = &self.values { + ref[key] = value + } + } + + fun get(key: String): Int? { + if let ref: &{String: Int} = &self.values { + return ref[key] + } + return nil + } + } + + struct Foo { + let values: {String: Bar}? + + init() { + self.values = {} + } + + fun set(key: String, value: Int) { + if let ref: auth(Mutate) &{String: Bar} = &self.values { + if ref[key] == nil { + ref[key] = Bar() + } + ref[key]?.set(key: key, value: value) + } + } + + fun get(key: String): Int? { + if let ref: &{String: Bar} = &self.values { + return ref[key]?.get(key: key) ?? nil + } + return nil + } + } + + fun setup(): Foo { + let foo = Foo() + foo.set(key: "a", value: 1) + return foo + } + + fun update(foo: &Foo): Bool { + if foo.get(key: "a") != 1 { + return false + } + foo.set(key: "a", value: 2) + return true + } + + fun updateAgain(foo: &Foo): Bool { + if foo.get(key: "a") != 2 { + return false + } + foo.set(key: "a", value: 3) + return true + } + `) + }) + + t.Run("resource, one level", func(t *testing.T) { + + test(t, ` + + resource Bar { + var value: Int + + init() { + self.value = 0 + } + } + + resource Foo { + let bar: @Bar? + + init() { + self.bar <- create Bar() + } + + fun set(value: Int) { + if let ref: &Bar = &self.bar { + ref.value = value + } + } + + fun getValue(): Int? { + return self.bar?.value + } + } + + fun setup(): @Foo { + let foo <- create Foo() + foo.set(value: 1) + return <-foo + } + + fun update(foo: &Foo): Bool { + if foo.getValue() != 1 { + return false + } + foo.set(value: 2) + return true + } + + fun updateAgain(foo: &Foo): Bool { + if foo.getValue() != 2 { + return false + } + foo.set(value: 3) + return true + } + `) + + }) + + t.Run("resource, two levels", func(t *testing.T) { + + test(t, ` + + resource Bar { + var value: Int + + init() { + self.value = 0 + } + } + + resource Foo { + let bar: @Bar?? + + init() { + self.bar <- create Bar() + } + + fun set(value: Int) { + if let optRef: &Bar? = &self.bar { + if let ref = optRef { + ref.value = value + } + } + } + + fun getValue(): Int? { + if let optRef: &Bar? = &self.bar { + return optRef?.value + } + return nil + } + } + + fun setup(): @Foo { + let foo <- create Foo() + foo.set(value: 1) + return <-foo + } + + fun update(foo: &Foo): Bool { + if foo.getValue() != 1 { + return false + } + foo.set(value: 2) + return true + } + + fun updateAgain(foo: &Foo): Bool { + if foo.getValue() != 2 { + return false + } + foo.set(value: 3) + return true + } + `) + }) + + t.Run("resource, nested", func(t *testing.T) { + + test(t, ` + resource Baz { + var value: Int + + init() { + self.value = 0 + } + } + + resource Bar { + let baz: @Baz? + + init() { + self.baz <- create Baz() + } + + fun set(value: Int) { + if let ref: &Baz = &self.baz { + ref.value = value + } + } + + fun getValue(): Int? { + return self.baz?.value + } + } + + resource Foo { + let bar: @Bar? + + init() { + self.bar <- create Bar() + } + + fun set(value: Int) { + if let ref: &Bar = &self.bar { + ref.set(value: value) + } + } + + fun getValue(): Int? { + return self.bar?.getValue() ?? nil + } + } + + fun setup(): @Foo { + let foo <- create Foo() + foo.set(value: 1) + return <-foo + } + + fun update(foo: &Foo): Bool { + if foo.getValue() != 1 { + return false + } + foo.set(value: 2) + return true + } + + fun updateAgain(foo: &Foo): Bool { + if foo.getValue() != 2 { + return false + } + foo.set(value: 3) + return true + } + `) + }) + + t.Run("array, one level", func(t *testing.T) { + + test(t, ` + + struct Foo { + let values: [Int]? + + init() { + self.values = [] + } + + fun set(value: Int) { + if let ref: auth(Mutate) &[Int] = &self.values { + if ref.length == 0 { + ref.append(value) + } else { + ref[0] = value + } + } + } + + fun getValue(): Int? { + if let ref: &[Int] = &self.values { + return ref[0] + } + return nil + } + } + + fun setup(): Foo { + let foo = Foo() + foo.set(value: 1) + return foo + } + + fun update(foo: &Foo): Bool { + if foo.getValue() != 1 { + return false + } + foo.set(value: 2) + return true + } + + fun updateAgain(foo: &Foo): Bool { + if foo.getValue() != 2 { + return false + } + foo.set(value: 3) + return true + } + `) + + }) + + t.Run("array, two levels", func(t *testing.T) { + + test(t, ` + + struct Foo { + let values: [Int]?? + + init() { + self.values = [] + } + + fun set(value: Int) { + if let optRef: auth(Mutate) &[Int]? = &self.values { + if let ref = optRef { + if ref.length == 0 { + ref.append(value) + } else { + ref[0] = value + } + } + } + } + + fun getValue(): Int? { + if let optRef: &[Int]? = &self.values { + if let ref = optRef { + return ref[0] + } + } + return nil + } + } + + fun setup(): Foo { + let foo = Foo() + foo.set(value: 1) + return foo + } + + fun update(foo: &Foo): Bool { + if foo.getValue() != 1 { + return false + } + foo.set(value: 2) + return true + } + + fun updateAgain(foo: &Foo): Bool { + if foo.getValue() != 2 { + return false + } + foo.set(value: 3) + return true + } + `) + }) + + t.Run("array, nested", func(t *testing.T) { + + test(t, ` + + struct Bar { + let values: [Int]? + + init() { + self.values = [] + } + + fun set(value: Int) { + if let ref: auth(Mutate) &[Int] = &self.values { + if ref.length == 0 { + ref.append(value) + } else { + ref[0] = value + } + } + } + + fun getValue(): Int? { + if let ref: &[Int] = &self.values { + return ref[0] + } + return nil + } + } + + struct Foo { + let values: [Bar]? + + init() { + self.values = [] + } + + fun set(value: Int) { + if let ref: auth(Mutate) &[Bar] = &self.values { + if ref.length == 0 { + ref.append(Bar()) + } + ref[0].set(value: value) + } + } + + fun getValue(): Int? { + if let ref: &[Bar] = &self.values { + return ref[0].getValue() + } + return nil + } + } + + fun setup(): Foo { + let foo = Foo() + foo.set(value: 1) + return foo + } + + fun update(foo: &Foo): Bool { + if foo.getValue() != 1 { + return false + } + foo.set(value: 2) + return true + } + + fun updateAgain(foo: &Foo): Bool { + if foo.getValue() != 2 { + return false + } + foo.set(value: 3) + return true + } + `) + + }) } diff --git a/interpreter/resources_test.go b/interpreter/resources_test.go index 564c745258..8965aef848 100644 --- a/interpreter/resources_test.go +++ b/interpreter/resources_test.go @@ -24,6 +24,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/onflow/atree" + "github.com/onflow/cadence/interpreter" "github.com/onflow/cadence/sema" . "github.com/onflow/cadence/test_utils/common_utils" @@ -3542,8 +3544,12 @@ func TestInterpretInvalidNilCoalescingResourceDuplication(t *testing.T) { t.Parallel() - inter, err := parseCheckAndInterpretWithOptions(t, - ` + t.Run("remove", func(t *testing.T) { + + t.Parallel() + + inter, err := parseCheckAndInterpretWithAtreeValidationsDisabled(t, + ` access(all) resource R { access(all) let answer: Int init() { @@ -3564,18 +3570,59 @@ func TestInterpretInvalidNilCoalescingResourceDuplication(t *testing.T) { return answer1 + answer2 } `, - ParseCheckAndInterpretOptions{ - HandleCheckerError: func(err error) { - errs := RequireCheckerErrors(t, err, 1) - assert.IsType(t, &sema.InvalidNilCoalescingRightResourceOperandError{}, errs[0]) + ParseCheckAndInterpretOptions{ + HandleCheckerError: func(err error) { + errs := RequireCheckerErrors(t, err, 1) + assert.IsType(t, &sema.InvalidNilCoalescingRightResourceOperandError{}, errs[0]) + }, }, - }, - ) - require.NoError(t, err) + ) + require.NoError(t, err) - _, err = inter.Invoke("main") - require.Error(t, err) + _, err = inter.Invoke("main") + require.Error(t, err) + + var inliningError *atree.FatalError + require.ErrorAs(t, err, &inliningError) + require.Contains(t, inliningError.Error(), "failed to uninline") + }) + + t.Run("destroy", func(t *testing.T) { + + t.Parallel() + + inter, err := parseCheckAndInterpretWithAtreeValidationsDisabled(t, + ` + access(all) resource R { + access(all) let answer: Int + init() { + self.answer = 42 + } + } + + access(all) fun main(): Int { + let rs <- [<- create R(), nil] + rs[1] <-! (nil ?? rs[0]) + let answer1 = rs[0]?.answer! + let answer2 = rs[1]?.answer! + destroy rs + return answer1 + answer2 + } + `, + ParseCheckAndInterpretOptions{ + HandleCheckerError: func(err error) { + errs := RequireCheckerErrors(t, err, 1) + assert.IsType(t, &sema.InvalidNilCoalescingRightResourceOperandError{}, errs[0]) + }, + }, + ) + require.NoError(t, err) + + _, err = inter.Invoke("main") + require.Error(t, err) + + var destroyedResourceErr interpreter.DestroyedResourceError + require.ErrorAs(t, err, &destroyedResourceErr) + }) - var destroyedResourceErr interpreter.DestroyedResourceError - require.ErrorAs(t, err, &destroyedResourceErr) } diff --git a/interpreter/statictype.go b/interpreter/statictype.go index 1b80201088..c2de979eea 100644 --- a/interpreter/statictype.go +++ b/interpreter/statictype.go @@ -252,10 +252,6 @@ func (t *VariableSizedStaticType) Copy() atree.TypeInfo { return t } -func (t *VariableSizedStaticType) Identifier() string { - return string(t.ID()) -} - func (*VariableSizedStaticType) isStaticType() {} func (*VariableSizedStaticType) elementSize() uint { @@ -382,10 +378,6 @@ func (t *ConstantSizedStaticType) Copy() atree.TypeInfo { return t } -func (t *ConstantSizedStaticType) Identifier() string { - return string(t.ID()) -} - func (*ConstantSizedStaticType) isStaticType() {} func (*ConstantSizedStaticType) elementSize() uint { @@ -464,10 +456,6 @@ func (t *DictionaryStaticType) Copy() atree.TypeInfo { return t } -func (t *DictionaryStaticType) Identifier() string { - return string(t.ID()) -} - func (*DictionaryStaticType) isStaticType() {} func (*DictionaryStaticType) elementSize() uint { diff --git a/interpreter/storage.go b/interpreter/storage.go index f9c6eb6d59..638de0d099 100644 --- a/interpreter/storage.go +++ b/interpreter/storage.go @@ -20,6 +20,7 @@ package interpreter import ( "bytes" + "cmp" "io" "math" "strings" @@ -101,6 +102,36 @@ func ConvertStoredValue(gauge common.MemoryGauge, value atree.Value) (Value, err } } +type StorageDomainKey struct { + Domain common.StorageDomain + Address common.Address +} + +func (k StorageDomainKey) Compare(o StorageDomainKey) int { + switch bytes.Compare(k.Address[:], o.Address[:]) { + case -1: + return -1 + case 0: + return cmp.Compare(k.Domain, o.Domain) + case 1: + return 1 + default: + panic(errors.NewUnreachableError()) + } +} + +func NewStorageDomainKey( + memoryGauge common.MemoryGauge, + address common.Address, + domain common.StorageDomain, +) StorageDomainKey { + common.UseMemory(memoryGauge, common.StorageKeyMemoryUsage) + return StorageDomainKey{ + Address: address, + Domain: domain, + } +} + type StorageKey struct { Key string Address common.Address @@ -130,8 +161,8 @@ func (k StorageKey) IsLess(o StorageKey) bool { // InMemoryStorage type InMemoryStorage struct { *atree.BasicSlabStorage - StorageMaps map[StorageKey]*StorageMap - memoryGauge common.MemoryGauge + DomainStorageMaps map[StorageDomainKey]*DomainStorageMap + memoryGauge common.MemoryGauge } var _ Storage = InMemoryStorage{} @@ -157,26 +188,27 @@ func NewInMemoryStorage(memoryGauge common.MemoryGauge) InMemoryStorage { ) return InMemoryStorage{ - BasicSlabStorage: slabStorage, - StorageMaps: make(map[StorageKey]*StorageMap), - memoryGauge: memoryGauge, + BasicSlabStorage: slabStorage, + DomainStorageMaps: make(map[StorageDomainKey]*DomainStorageMap), + memoryGauge: memoryGauge, } } -func (i InMemoryStorage) GetStorageMap( +func (i InMemoryStorage) GetDomainStorageMap( + _ *Interpreter, address common.Address, - domain string, + domain common.StorageDomain, createIfNotExists bool, ) ( - storageMap *StorageMap, + domainStorageMap *DomainStorageMap, ) { - key := NewStorageKey(i.memoryGauge, address, domain) - storageMap = i.StorageMaps[key] - if storageMap == nil && createIfNotExists { - storageMap = NewStorageMap(i.memoryGauge, i, atree.Address(address)) - i.StorageMaps[key] = storageMap + key := NewStorageDomainKey(i.memoryGauge, address, domain) + domainStorageMap = i.DomainStorageMaps[key] + if domainStorageMap == nil && createIfNotExists { + domainStorageMap = NewDomainStorageMap(i.memoryGauge, i, atree.Address(address)) + i.DomainStorageMaps[key] = domainStorageMap } - return storageMap + return domainStorageMap } func (i InMemoryStorage) CheckHealth() error { diff --git a/interpreter/storage_test.go b/interpreter/storage_test.go index 0693df2d9b..fe822fd05b 100644 --- a/interpreter/storage_test.go +++ b/interpreter/storage_test.go @@ -524,7 +524,7 @@ func TestStorageOverwriteAndRemove(t *testing.T) { const storageMapKey = StringStorageMapKey("test") - storageMap := storage.GetStorageMap(address, "storage", true) + storageMap := storage.GetDomainStorageMap(inter, address, common.StorageDomainPathStorage, true) storageMap.WriteValue(inter, storageMapKey, array1) // Overwriting delete any existing child slabs diff --git a/interpreter/stringatreevalue_test.go b/interpreter/stringatreevalue_test.go index f2e622a8a9..ffaedf6d44 100644 --- a/interpreter/stringatreevalue_test.go +++ b/interpreter/stringatreevalue_test.go @@ -36,12 +36,6 @@ func TestLargeStringAtreeValueInSeparateSlab(t *testing.T) { storage := NewInMemoryStorage(nil) - storageMap := storage.GetStorageMap( - common.MustBytesToAddress([]byte{0x1}), - common.PathDomainStorage.Identifier(), - true, - ) - inter, err := NewInterpreter( nil, common.StringLocation("test"), @@ -51,6 +45,13 @@ func TestLargeStringAtreeValueInSeparateSlab(t *testing.T) { ) require.NoError(t, err) + storageMap := storage.GetDomainStorageMap( + inter, + common.MustBytesToAddress([]byte{0x1}), + common.PathDomainStorage.StorageDomain(), + true, + ) + // Generate a large key to force the string to get stored in a separate slab keyValue := NewStringAtreeValue(nil, strings.Repeat("x", 10_000)) diff --git a/interpreter/value.go b/interpreter/value.go index bf698515c5..cff5dad1ec 100644 --- a/interpreter/value.go +++ b/interpreter/value.go @@ -246,6 +246,13 @@ type ValueIterator interface { Next(interpreter *Interpreter, locationRange LocationRange) Value } +// atreeContainerBackedValue is an interface for values using atree containers +// (atree.Array or atree.OrderedMap) under the hood. +type atreeContainerBackedValue interface { + Value + isAtreeContainerBackedValue() +} + func safeAdd(a, b int, locationRange LocationRange) int { // INT32-C if (b > 0) && (a > (goMaxInt - b)) { diff --git a/interpreter/value_account.go b/interpreter/value_account.go index 998c8e98fd..23f012704f 100644 --- a/interpreter/value_account.go +++ b/interpreter/value_account.go @@ -55,44 +55,29 @@ func NewAccountValue( sema.AccountTypeAddressFieldName: address, } - var storage Value - var contracts Value - var keys Value - var inbox Value - var capabilities Value - - computeField := func(name string, inter *Interpreter, locationRange LocationRange) Value { + computeLazyStoredField := func(name string) Value { switch name { case sema.AccountTypeStorageFieldName: - if storage == nil { - storage = storageConstructor() - } - return storage + return storageConstructor() case sema.AccountTypeContractsFieldName: - if contracts == nil { - contracts = contractsConstructor() - } - return contracts + return contractsConstructor() case sema.AccountTypeKeysFieldName: - if keys == nil { - keys = keysConstructor() - } - return keys + return keysConstructor() case sema.AccountTypeInboxFieldName: - if inbox == nil { - inbox = inboxConstructor() - } - return inbox + return inboxConstructor() case sema.AccountTypeCapabilitiesFieldName: - if capabilities == nil { - capabilities = capabilitiesConstructor() - } - return capabilities + return capabilitiesConstructor() + } + + return nil + } + computeField := func(name string, _ *Interpreter, _ LocationRange) Value { + switch name { case sema.AccountTypeBalanceFieldName: return accountBalanceGet() @@ -100,7 +85,11 @@ func NewAccountValue( return accountAvailableBalanceGet() } - return nil + field := computeLazyStoredField(name) + if field != nil { + fields[name] = field + } + return field } var str string diff --git a/interpreter/value_account_accountcapabilities.go b/interpreter/value_account_accountcapabilities.go index 81922f9ab7..b271870b6d 100644 --- a/interpreter/value_account_accountcapabilities.go +++ b/interpreter/value_account_accountcapabilities.go @@ -41,6 +41,35 @@ func NewAccountAccountCapabilitiesValue( issueWithTypeFunction BoundFunctionGenerator, ) *SimpleCompositeValue { + var accountCapabilities *SimpleCompositeValue + + fields := map[string]Value{} + + computeLazyStoredField := func(name string) Value { + switch name { + case sema.Account_AccountCapabilitiesTypeGetControllerFunctionName: + return getControllerFunction(accountCapabilities) + case sema.Account_AccountCapabilitiesTypeGetControllersFunctionName: + return getControllersFunction(accountCapabilities) + case sema.Account_AccountCapabilitiesTypeForEachControllerFunctionName: + return forEachControllerFunction(accountCapabilities) + case sema.Account_AccountCapabilitiesTypeIssueFunctionName: + return issueFunction(accountCapabilities) + case sema.Account_AccountCapabilitiesTypeIssueWithTypeFunctionName: + return issueWithTypeFunction(accountCapabilities) + } + + return nil + } + + computeField := func(name string, _ *Interpreter, _ LocationRange) Value { + field := computeLazyStoredField(name) + if field != nil { + fields[name] = field + } + return field + } + var str string stringer := func(interpreter *Interpreter, seenReferences SeenReferences, locationRange LocationRange) string { if str == "" { @@ -51,24 +80,16 @@ func NewAccountAccountCapabilitiesValue( return str } - accountCapabilities := NewSimpleCompositeValue( + accountCapabilities = NewSimpleCompositeValue( gauge, account_AccountCapabilitiesTypeID, account_AccountCapabilitiesStaticType, account_AccountCapabilitiesFieldNames, - nil, - nil, + fields, + computeField, nil, stringer, ) - accountCapabilities.Fields = map[string]Value{ - sema.Account_AccountCapabilitiesTypeGetControllerFunctionName: getControllerFunction(accountCapabilities), - sema.Account_AccountCapabilitiesTypeGetControllersFunctionName: getControllersFunction(accountCapabilities), - sema.Account_AccountCapabilitiesTypeForEachControllerFunctionName: forEachControllerFunction(accountCapabilities), - sema.Account_AccountCapabilitiesTypeIssueFunctionName: issueFunction(accountCapabilities), - sema.Account_AccountCapabilitiesTypeIssueWithTypeFunctionName: issueWithTypeFunction(accountCapabilities), - } - return accountCapabilities } diff --git a/interpreter/value_account_capabilities.go b/interpreter/value_account_capabilities.go index a6ab1f55fe..2c854ff948 100644 --- a/interpreter/value_account_capabilities.go +++ b/interpreter/value_account_capabilities.go @@ -29,6 +29,10 @@ import ( var account_CapabilitiesTypeID = sema.AccountCapabilitiesType.ID() var account_CapabilitiesStaticType StaticType = PrimitiveStaticTypeAccount_Capabilities +var account_CapabilitiesFieldNames = []string{ + sema.Account_CapabilitiesTypeStorageFieldName, + sema.Account_CapabilitiesTypeAccountFieldName, +} func NewAccountCapabilitiesValue( gauge common.MemoryGauge, @@ -42,27 +46,39 @@ func NewAccountCapabilitiesValue( accountCapabilitiesConstructor func() Value, ) Value { - var storageCapabilities Value - var accountCapabilities Value + var capabilities *SimpleCompositeValue + + fields := map[string]Value{} - computeField := func(name string, inter *Interpreter, locationRange LocationRange) Value { + computeLazyStoredField := func(name string) Value { switch name { case sema.Account_CapabilitiesTypeStorageFieldName: - if storageCapabilities == nil { - storageCapabilities = storageCapabilitiesConstructor() - } - return storageCapabilities - + return storageCapabilitiesConstructor() case sema.Account_CapabilitiesTypeAccountFieldName: - if accountCapabilities == nil { - accountCapabilities = accountCapabilitiesConstructor() - } - return accountCapabilities + return accountCapabilitiesConstructor() + case sema.Account_CapabilitiesTypeGetFunctionName: + return getFunction(capabilities) + case sema.Account_CapabilitiesTypeBorrowFunctionName: + return borrowFunction(capabilities) + case sema.Account_CapabilitiesTypeExistsFunctionName: + return existsFunction(capabilities) + case sema.Account_CapabilitiesTypePublishFunctionName: + return publishFunction(capabilities) + case sema.Account_CapabilitiesTypeUnpublishFunctionName: + return unpublishFunction(capabilities) } return nil } + computeField := func(name string, _ *Interpreter, _ LocationRange) Value { + field := computeLazyStoredField(name) + if field != nil { + fields[name] = field + } + return field + } + var str string stringer := func(interpreter *Interpreter, seenReferences SeenReferences, locationRange LocationRange) string { if str == "" { @@ -73,24 +89,16 @@ func NewAccountCapabilitiesValue( return str } - capabilities := NewSimpleCompositeValue( + capabilities = NewSimpleCompositeValue( gauge, account_CapabilitiesTypeID, account_CapabilitiesStaticType, - nil, - nil, + account_CapabilitiesFieldNames, + fields, computeField, nil, stringer, ) - capabilities.Fields = map[string]Value{ - sema.Account_CapabilitiesTypeGetFunctionName: getFunction(capabilities), - sema.Account_CapabilitiesTypeBorrowFunctionName: borrowFunction(capabilities), - sema.Account_CapabilitiesTypeExistsFunctionName: existsFunction(capabilities), - sema.Account_CapabilitiesTypePublishFunctionName: publishFunction(capabilities), - sema.Account_CapabilitiesTypeUnpublishFunctionName: unpublishFunction(capabilities), - } - return capabilities } diff --git a/interpreter/value_account_contracts.go b/interpreter/value_account_contracts.go index 83ae25552a..9b10a89e34 100644 --- a/interpreter/value_account_contracts.go +++ b/interpreter/value_account_contracts.go @@ -45,18 +45,42 @@ func NewAccountContractsValue( namesGetter ContractNamesGetter, ) Value { - computeField := func( - name string, - interpreter *Interpreter, - locationRange LocationRange, - ) Value { + var accountContracts *SimpleCompositeValue + + fields := map[string]Value{} + + computeLazyStoredField := func(name string) Value { switch name { - case sema.Account_ContractsTypeNamesFieldName: - return namesGetter(interpreter, locationRange) + case sema.Account_ContractsTypeAddFunctionName: + return addFunction(accountContracts) + case sema.Account_ContractsTypeGetFunctionName: + return getFunction(accountContracts) + case sema.Account_ContractsTypeBorrowFunctionName: + return borrowFunction(accountContracts) + case sema.Account_ContractsTypeRemoveFunctionName: + return removeFunction(accountContracts) + case sema.Account_ContractsTypeUpdateFunctionName: + return updateFunction(accountContracts) + case sema.Account_ContractsTypeTryUpdateFunctionName: + return tryUpdateFunction(accountContracts) } + return nil } + computeField := func(name string, inter *Interpreter, locationRange LocationRange) Value { + switch name { + case sema.Account_ContractsTypeNamesFieldName: + return namesGetter(inter, locationRange) + } + + field := computeLazyStoredField(name) + if field != nil { + fields[name] = field + } + return field + } + var str string stringer := func(interpreter *Interpreter, seenReferences SeenReferences, locationRange LocationRange) string { if str == "" { @@ -67,25 +91,16 @@ func NewAccountContractsValue( return str } - accountContracts := NewSimpleCompositeValue( + accountContracts = NewSimpleCompositeValue( gauge, account_ContractsTypeID, account_ContractsStaticType, account_ContractsFieldNames, - nil, + fields, computeField, nil, stringer, ) - accountContracts.Fields = map[string]Value{ - sema.Account_ContractsTypeAddFunctionName: addFunction(accountContracts), - sema.Account_ContractsTypeGetFunctionName: getFunction(accountContracts), - sema.Account_ContractsTypeBorrowFunctionName: borrowFunction(accountContracts), - sema.Account_ContractsTypeRemoveFunctionName: removeFunction(accountContracts), - sema.Account_ContractsTypeUpdateFunctionName: updateFunction(accountContracts), - sema.Account_ContractsTypeTryUpdateFunctionName: tryUpdateFunction(accountContracts), - } - return accountContracts } diff --git a/interpreter/value_account_inbox.go b/interpreter/value_account_inbox.go index 9681ddb331..1a926aa55c 100644 --- a/interpreter/value_account_inbox.go +++ b/interpreter/value_account_inbox.go @@ -29,6 +29,7 @@ import ( var account_InboxTypeID = sema.Account_InboxType.ID() var account_InboxStaticType StaticType = PrimitiveStaticTypeAccount_Inbox +var account_InboxFieldNames []string = nil // NewAccountInboxValue constructs an Account.Inbox value. func NewAccountInboxValue( @@ -39,6 +40,31 @@ func NewAccountInboxValue( claimFunction BoundFunctionGenerator, ) Value { + var accountInbox *SimpleCompositeValue + + fields := map[string]Value{} + + computeLazyStoredField := func(name string) Value { + switch name { + case sema.Account_InboxTypePublishFunctionName: + return publishFunction(accountInbox) + case sema.Account_InboxTypeUnpublishFunctionName: + return unpublishFunction(accountInbox) + case sema.Account_InboxTypeClaimFunctionName: + return claimFunction(accountInbox) + } + + return nil + } + + computeField := func(name string, _ *Interpreter, _ LocationRange) Value { + field := computeLazyStoredField(name) + if field != nil { + fields[name] = field + } + return field + } + var str string stringer := func(interpreter *Interpreter, seenReferences SeenReferences, locationRange LocationRange) string { if str == "" { @@ -49,22 +75,16 @@ func NewAccountInboxValue( return str } - accountInbox := NewSimpleCompositeValue( + accountInbox = NewSimpleCompositeValue( gauge, account_InboxTypeID, account_InboxStaticType, - nil, - nil, - nil, + account_InboxFieldNames, + fields, + computeField, nil, stringer, ) - accountInbox.Fields = map[string]Value{ - sema.Account_InboxTypePublishFunctionName: publishFunction(accountInbox), - sema.Account_InboxTypeUnpublishFunctionName: unpublishFunction(accountInbox), - sema.Account_InboxTypeClaimFunctionName: claimFunction(accountInbox), - } - return accountInbox } diff --git a/interpreter/value_account_storage.go b/interpreter/value_account_storage.go index 0531b5f67a..80fd60c908 100644 --- a/interpreter/value_account_storage.go +++ b/interpreter/value_account_storage.go @@ -29,6 +29,7 @@ import ( var account_StorageTypeID = sema.Account_StorageType.ID() var account_StorageStaticType StaticType = PrimitiveStaticTypeAccount_Storage +var account_StorageFieldNames []string = nil // NewAccountStorageValue constructs an Account.Storage value. func NewAccountStorageValue( @@ -38,113 +39,94 @@ func NewAccountStorageValue( storageCapacityGet func(interpreter *Interpreter) UInt64Value, ) Value { - var str string - stringer := func(interpreter *Interpreter, seenReferences SeenReferences, locationRange LocationRange) string { - if str == "" { - common.UseMemory(interpreter, common.AccountStorageStringMemoryUsage) - addressStr := address.MeteredString(interpreter, seenReferences, locationRange) - str = fmt.Sprintf("Account.Storage(%s)", addressStr) - } - return str - } + var storageValue *SimpleCompositeValue - storageValue := NewSimpleCompositeValue( - gauge, - account_StorageTypeID, - account_StorageStaticType, - nil, - nil, - nil, - nil, - stringer, - ) - - var forEachStoredFunction FunctionValue - var forEachPublicFunction FunctionValue - var typeFunction FunctionValue - var loadFunction FunctionValue - var copyFunction FunctionValue - var saveFunction FunctionValue - var borrowFunction FunctionValue - var checkFunction FunctionValue + fields := map[string]Value{} - storageValue.ComputeField = func(name string, inter *Interpreter, locationRange LocationRange) Value { + computeLazyStoredField := func(name string, inter *Interpreter) Value { switch name { - case sema.Account_StorageTypePublicPathsFieldName: - return inter.publicAccountPaths(address, locationRange) - - case sema.Account_StorageTypeStoragePathsFieldName: - return inter.storageAccountPaths(address, locationRange) - case sema.Account_StorageTypeForEachPublicFunctionName: - if forEachPublicFunction == nil { - forEachPublicFunction = inter.newStorageIterationFunction( - storageValue, - sema.Account_StorageTypeForEachPublicFunctionType, - address, - common.PathDomainPublic, - sema.PublicPathType, - ) - } - return forEachPublicFunction + return inter.newStorageIterationFunction( + storageValue, + sema.Account_StorageTypeForEachPublicFunctionType, + address, + common.PathDomainPublic, + sema.PublicPathType, + ) case sema.Account_StorageTypeForEachStoredFunctionName: - if forEachStoredFunction == nil { - forEachStoredFunction = inter.newStorageIterationFunction( - storageValue, - sema.Account_StorageTypeForEachStoredFunctionType, - address, - common.PathDomainStorage, - sema.StoragePathType, - ) - } - return forEachStoredFunction - - case sema.Account_StorageTypeUsedFieldName: - return storageUsedGet(inter) - - case sema.Account_StorageTypeCapacityFieldName: - return storageCapacityGet(inter) + return inter.newStorageIterationFunction( + storageValue, + sema.Account_StorageTypeForEachStoredFunctionType, + address, + common.PathDomainStorage, + sema.StoragePathType, + ) case sema.Account_StorageTypeTypeFunctionName: - if typeFunction == nil { - typeFunction = inter.authAccountTypeFunction(storageValue, address) - } - return typeFunction + return inter.authAccountTypeFunction(storageValue, address) case sema.Account_StorageTypeLoadFunctionName: - if loadFunction == nil { - loadFunction = inter.authAccountLoadFunction(storageValue, address) - } - return loadFunction + return inter.authAccountLoadFunction(storageValue, address) case sema.Account_StorageTypeCopyFunctionName: - if copyFunction == nil { - copyFunction = inter.authAccountCopyFunction(storageValue, address) - } - return copyFunction + return inter.authAccountCopyFunction(storageValue, address) case sema.Account_StorageTypeSaveFunctionName: - if saveFunction == nil { - saveFunction = inter.authAccountSaveFunction(storageValue, address) - } - return saveFunction + return inter.authAccountSaveFunction(storageValue, address) case sema.Account_StorageTypeBorrowFunctionName: - if borrowFunction == nil { - borrowFunction = inter.authAccountBorrowFunction(storageValue, address) - } - return borrowFunction + return inter.authAccountBorrowFunction(storageValue, address) case sema.Account_StorageTypeCheckFunctionName: - if checkFunction == nil { - checkFunction = inter.authAccountCheckFunction(storageValue, address) - } - return checkFunction + return inter.authAccountCheckFunction(storageValue, address) } return nil } + computeField := func(name string, inter *Interpreter, locationRange LocationRange) Value { + switch name { + case sema.Account_StorageTypePublicPathsFieldName: + return inter.publicAccountPaths(address, locationRange) + + case sema.Account_StorageTypeStoragePathsFieldName: + return inter.storageAccountPaths(address, locationRange) + + case sema.Account_StorageTypeUsedFieldName: + return storageUsedGet(inter) + + case sema.Account_StorageTypeCapacityFieldName: + return storageCapacityGet(inter) + } + + field := computeLazyStoredField(name, inter) + if field != nil { + fields[name] = field + } + return field + } + + var str string + stringer := func(interpreter *Interpreter, seenReferences SeenReferences, locationRange LocationRange) string { + if str == "" { + common.UseMemory(interpreter, common.AccountStorageStringMemoryUsage) + addressStr := address.MeteredString(interpreter, seenReferences, locationRange) + str = fmt.Sprintf("Account.Storage(%s)", addressStr) + } + return str + } + + storageValue = NewSimpleCompositeValue( + gauge, + account_StorageTypeID, + account_StorageStaticType, + account_StorageFieldNames, + fields, + computeField, + nil, + stringer, + ) + return storageValue } diff --git a/interpreter/value_account_storagecapabilities.go b/interpreter/value_account_storagecapabilities.go index a75b549903..4ad6cf1ce7 100644 --- a/interpreter/value_account_storagecapabilities.go +++ b/interpreter/value_account_storagecapabilities.go @@ -41,6 +41,35 @@ func NewAccountStorageCapabilitiesValue( issueWithTypeFunction BoundFunctionGenerator, ) Value { + var storageCapabilities *SimpleCompositeValue + + fields := map[string]Value{} + + computeLazyStoredField := func(name string) Value { + switch name { + case sema.Account_StorageCapabilitiesTypeGetControllerFunctionName: + return getControllerFunction(storageCapabilities) + case sema.Account_StorageCapabilitiesTypeGetControllersFunctionName: + return getControllersFunction(storageCapabilities) + case sema.Account_StorageCapabilitiesTypeForEachControllerFunctionName: + return forEachControllerFunction(storageCapabilities) + case sema.Account_StorageCapabilitiesTypeIssueFunctionName: + return issueFunction(storageCapabilities) + case sema.Account_StorageCapabilitiesTypeIssueWithTypeFunctionName: + return issueWithTypeFunction(storageCapabilities) + } + + return nil + } + + computeField := func(name string, _ *Interpreter, _ LocationRange) Value { + field := computeLazyStoredField(name) + if field != nil { + fields[name] = field + } + return field + } + var str string stringer := func(interpreter *Interpreter, seenReferences SeenReferences, locationRange LocationRange) string { if str == "" { @@ -51,24 +80,16 @@ func NewAccountStorageCapabilitiesValue( return str } - storageCapabilities := NewSimpleCompositeValue( + storageCapabilities = NewSimpleCompositeValue( gauge, account_StorageCapabilitiesTypeID, account_StorageCapabilitiesStaticType, account_StorageCapabilitiesFieldNames, - nil, - nil, + fields, + computeField, nil, stringer, ) - storageCapabilities.Fields = map[string]Value{ - sema.Account_StorageCapabilitiesTypeGetControllerFunctionName: getControllerFunction(storageCapabilities), - sema.Account_StorageCapabilitiesTypeGetControllersFunctionName: getControllersFunction(storageCapabilities), - sema.Account_StorageCapabilitiesTypeForEachControllerFunctionName: forEachControllerFunction(storageCapabilities), - sema.Account_StorageCapabilitiesTypeIssueFunctionName: issueFunction(storageCapabilities), - sema.Account_StorageCapabilitiesTypeIssueWithTypeFunctionName: issueWithTypeFunction(storageCapabilities), - } - return storageCapabilities } diff --git a/interpreter/value_array.go b/interpreter/value_array.go index b032b9625c..88a60cd329 100644 --- a/interpreter/value_array.go +++ b/interpreter/value_array.go @@ -213,14 +213,18 @@ func newArrayValueFromAtreeArray( var _ Value = &ArrayValue{} var _ atree.Value = &ArrayValue{} +var _ atree.WrapperValue = &ArrayValue{} var _ EquatableValue = &ArrayValue{} var _ ValueIndexableValue = &ArrayValue{} var _ MemberAccessibleValue = &ArrayValue{} var _ ReferenceTrackedResourceKindedValue = &ArrayValue{} var _ IterableValue = &ArrayValue{} +var _ atreeContainerBackedValue = &ArrayValue{} func (*ArrayValue) isValue() {} +func (*ArrayValue) isAtreeContainerBackedValue() {} + func (v *ArrayValue) Accept(interpreter *Interpreter, visitor Visitor, locationRange LocationRange) { descend := visitor.VisitArrayValue(interpreter, v) if !descend { @@ -1292,9 +1296,17 @@ func (v *ArrayValue) Storable( address atree.Address, maxInlineSize uint64, ) (atree.Storable, error) { + // NOTE: Need to change ArrayValue.UnwrapAtreeValue() + // if ArrayValue is stored with wrapping. return v.array.Storable(storage, address, maxInlineSize) } +func (v *ArrayValue) UnwrapAtreeValue() (atree.Value, uint64) { + // Wrapper size is 0 because ArrayValue is stored as + // atree.Array without any physical wrapping (see ArrayValue.Storable()). + return v.array, 0 +} + func (v *ArrayValue) IsReferenceTrackedResourceKindedValue() {} func (v *ArrayValue) Transfer( @@ -1994,3 +2006,7 @@ func (v *ArrayValue) SetType(staticType ArrayStaticType) { panic(errors.NewExternalError(err)) } } + +func (v *ArrayValue) Inlined() bool { + return v.array.Inlined() +} diff --git a/interpreter/value_authaccount_keys.go b/interpreter/value_authaccount_keys.go index ef7e47ab0b..39085cf685 100644 --- a/interpreter/value_authaccount_keys.go +++ b/interpreter/value_authaccount_keys.go @@ -29,6 +29,7 @@ import ( var account_KeysTypeID = sema.Account_KeysType.ID() var account_KeysStaticType StaticType = PrimitiveStaticTypeAccount_Keys +var account_KeysFieldNames []string = nil // NewAccountKeysValue constructs an Account.Keys value. func NewAccountKeysValue( @@ -41,12 +42,36 @@ func NewAccountKeysValue( getKeysCount AccountKeysCountGetter, ) Value { + var accountKeys *SimpleCompositeValue + + fields := map[string]Value{} + + computeLazyStoredField := func(name string) Value { + switch name { + case sema.Account_KeysTypeAddFunctionName: + return addFunction(accountKeys) + case sema.Account_KeysTypeGetFunctionName: + return getFunction(accountKeys) + case sema.Account_KeysTypeRevokeFunctionName: + return revokeFunction(accountKeys) + case sema.Account_KeysTypeForEachFunctionName: + return forEachFunction(accountKeys) + } + + return nil + } + computeField := func(name string, _ *Interpreter, _ LocationRange) Value { switch name { case sema.Account_KeysTypeCountFieldName: return getKeysCount() } - return nil + + field := computeLazyStoredField(name) + if field != nil { + fields[name] = field + } + return field } var str string @@ -59,24 +84,17 @@ func NewAccountKeysValue( return str } - accountKeys := NewSimpleCompositeValue( + accountKeys = NewSimpleCompositeValue( gauge, account_KeysTypeID, account_KeysStaticType, - nil, - nil, + account_KeysFieldNames, + fields, computeField, nil, stringer, ) - accountKeys.Fields = map[string]Value{ - sema.Account_KeysTypeAddFunctionName: addFunction(accountKeys), - sema.Account_KeysTypeGetFunctionName: getFunction(accountKeys), - sema.Account_KeysTypeRevokeFunctionName: revokeFunction(accountKeys), - sema.Account_KeysTypeForEachFunctionName: forEachFunction(accountKeys), - } - return accountKeys } diff --git a/interpreter/value_composite.go b/interpreter/value_composite.go index d19d4aef59..db1ce4c49f 100644 --- a/interpreter/value_composite.go +++ b/interpreter/value_composite.go @@ -236,9 +236,14 @@ var _ HashableValue = &CompositeValue{} var _ MemberAccessibleValue = &CompositeValue{} var _ ReferenceTrackedResourceKindedValue = &CompositeValue{} var _ ContractValue = &CompositeValue{} +var _ atree.Value = &CompositeValue{} +var _ atree.WrapperValue = &CompositeValue{} +var _ atreeContainerBackedValue = &CompositeValue{} func (*CompositeValue) isValue() {} +func (*CompositeValue) isAtreeContainerBackedValue() {} + func (v *CompositeValue) Accept(interpreter *Interpreter, visitor Visitor, locationRange LocationRange) { descend := visitor.VisitCompositeValue(interpreter, v) if !descend { @@ -1138,9 +1143,18 @@ func (v *CompositeValue) Storable( return NonStorable{Value: v}, nil } + // NOTE: Need to change CompositeValue.UnwrapAtreeValue() + // if CompositeValue is stored with wrapping. + return v.dictionary.Storable(storage, address, maxInlineSize) } +func (v *CompositeValue) UnwrapAtreeValue() (atree.Value, uint64) { + // Wrapper size is 0 because CompositeValue is stored as + // atree.OrderedMap without any physical wrapping (see CompositeValue.Storable()). + return v.dictionary, 0 +} + func (v *CompositeValue) NeedsStoreTo(address atree.Address) bool { return address != v.StorageAddress() } @@ -1658,7 +1672,7 @@ func (v *CompositeValue) getBaseValue( return NewEphemeralReferenceValue(interpreter, functionAuthorization, v.base, baseType, locationRange) } -func (v *CompositeValue) setBaseValue(interpreter *Interpreter, base *CompositeValue) { +func (v *CompositeValue) setBaseValue(_ *Interpreter, base *CompositeValue) { v.base = base } @@ -1942,3 +1956,7 @@ func (v *CompositeValue) ForEach( func (v *CompositeValue) AtreeMap() *atree.OrderedMap { return v.dictionary } + +func (v *CompositeValue) Inlined() bool { + return v.dictionary.Inlined() +} diff --git a/interpreter/value_dictionary.go b/interpreter/value_dictionary.go index a8d1f42197..8ee1be4856 100644 --- a/interpreter/value_dictionary.go +++ b/interpreter/value_dictionary.go @@ -243,13 +243,17 @@ func NewDictionaryValueFromAtreeMap( var _ Value = &DictionaryValue{} var _ atree.Value = &DictionaryValue{} +var _ atree.WrapperValue = &DictionaryValue{} var _ EquatableValue = &DictionaryValue{} var _ ValueIndexableValue = &DictionaryValue{} var _ MemberAccessibleValue = &DictionaryValue{} var _ ReferenceTrackedResourceKindedValue = &DictionaryValue{} +var _ atreeContainerBackedValue = &DictionaryValue{} func (*DictionaryValue) isValue() {} +func (*DictionaryValue) isAtreeContainerBackedValue() {} + func (v *DictionaryValue) Accept(interpreter *Interpreter, visitor Visitor, locationRange LocationRange) { descend := visitor.VisitDictionaryValue(interpreter, v) if !descend { @@ -1268,9 +1272,17 @@ func (v *DictionaryValue) Storable( address atree.Address, maxInlineSize uint64, ) (atree.Storable, error) { + // NOTE: Need to change DictionaryValue.UnwrapAtreeValue() + // if DictionaryValue is stored with wrapping. return v.dictionary.Storable(storage, address, maxInlineSize) } +func (v *DictionaryValue) UnwrapAtreeValue() (atree.Value, uint64) { + // Wrapper size is 0 because DictionaryValue is stored as + // atree.OrderedMap without any physical wrapping (see DictionaryValue.Storable()). + return v.dictionary, 0 +} + func (v *DictionaryValue) IsReferenceTrackedResourceKindedValue() {} func (v *DictionaryValue) Transfer( @@ -1594,3 +1606,7 @@ func (v *DictionaryValue) AtreeMap() *atree.OrderedMap { func (v *DictionaryValue) ElementSize() uint { return v.elementSize } + +func (v *DictionaryValue) Inlined() bool { + return v.dictionary.Inlined() +} diff --git a/interpreter/value_int.go b/interpreter/value_int.go index a9e6ef2c51..1c100e95c2 100644 --- a/interpreter/value_int.go +++ b/interpreter/value_int.go @@ -526,7 +526,7 @@ func (v IntValue) BitwiseLeftShift(interpreter *Interpreter, other IntegerValue, } if o.BigInt.Sign() < 0 { - panic(UnderflowError{ + panic(NegativeShiftError{ LocationRange: locationRange, }) } @@ -559,7 +559,7 @@ func (v IntValue) BitwiseRightShift(interpreter *Interpreter, other IntegerValue } if o.BigInt.Sign() < 0 { - panic(UnderflowError{ + panic(NegativeShiftError{ LocationRange: locationRange, }) } diff --git a/interpreter/value_int128.go b/interpreter/value_int128.go index b95c3b70f9..2df6c0e4db 100644 --- a/interpreter/value_int128.go +++ b/interpreter/value_int128.go @@ -20,6 +20,7 @@ package interpreter import ( "math/big" + "math/bits" "github.com/onflow/atree" @@ -30,6 +31,37 @@ import ( "github.com/onflow/cadence/sema" ) +// toTwosComplement sets `res` to the two's complement representation of a big.Int `x` in the given target bit size. +// `res` is returned and is awlways a positive big.Int. +func toTwosComplement(res, x *big.Int, targetBitSize uint) *big.Int { + bytes := SignedBigIntToSizedBigEndianBytes(x, targetBitSize/8) + return res.SetBytes(bytes) +} + +// toTwosComplement converts `res` to the big.Int representation from the two's complement format of a +// signed integer. +// `res` is returned and can be positive or negative. +func fromTwosComplement(res *big.Int) *big.Int { + bytes := res.Bytes() + return BigEndianBytesToSignedBigInt(bytes) +} + +// truncate trims a big.Int to maxWords by directly modifying its underlying representation. +func truncate(x *big.Int, maxWords int) *big.Int { + // Get the absolute value of x as a nat slice. + abs := x.Bits() + + // Limit the nat slice to maxWords. + if len(abs) > maxWords { + abs = abs[:maxWords] + } + + // Update the big.Int's internal representation. + x.SetBits(abs) + + return x +} + // Int128Value type Int128Value struct { @@ -652,20 +684,25 @@ func (v Int128Value) BitwiseLeftShift(interpreter *Interpreter, other IntegerVal } if o.BigInt.Sign() < 0 { - panic(UnderflowError{ + panic(NegativeShiftError{ LocationRange: locationRange, }) } - if !o.BigInt.IsUint64() { - panic(OverflowError{ - LocationRange: locationRange, - }) + if !o.BigInt.IsUint64() || o.BigInt.Uint64() >= 128 { + return NewInt128ValueFromUint64(interpreter, 0) } + // The maximum shift value at this point is 127, which may lead to an + // additional allocation of up to 128 bits. Add usage for possible + // intermediate value. + common.UseMemory(interpreter, Int128MemoryUsage) + valueGetter := func() *big.Int { res := new(big.Int) - res.Lsh(v.BigInt, uint(o.BigInt.Uint64())) - return res + res = toTwosComplement(res, v.BigInt, 128) + res = res.Lsh(res, uint(o.BigInt.Uint64())) + res = truncate(res, 128/bits.UintSize) + return fromTwosComplement(res) } return NewInt128ValueFromBigInt(interpreter, valueGetter) @@ -683,14 +720,12 @@ func (v Int128Value) BitwiseRightShift(interpreter *Interpreter, other IntegerVa } if o.BigInt.Sign() < 0 { - panic(UnderflowError{ + panic(NegativeShiftError{ LocationRange: locationRange, }) } if !o.BigInt.IsUint64() { - panic(OverflowError{ - LocationRange: locationRange, - }) + return NewInt128ValueFromUint64(interpreter, 0) } valueGetter := func() *big.Int { diff --git a/interpreter/value_int16.go b/interpreter/value_int16.go index 393093b153..33eb51f116 100644 --- a/interpreter/value_int16.go +++ b/interpreter/value_int16.go @@ -575,6 +575,12 @@ func (v Int16Value) BitwiseLeftShift(interpreter *Interpreter, other IntegerValu }) } + if o < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + valueGetter := func() int16 { return int16(v << o) } @@ -593,6 +599,12 @@ func (v Int16Value) BitwiseRightShift(interpreter *Interpreter, other IntegerVal }) } + if o < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + valueGetter := func() int16 { return int16(v >> o) } diff --git a/interpreter/value_int256.go b/interpreter/value_int256.go index c766978af7..09f27f3a60 100644 --- a/interpreter/value_int256.go +++ b/interpreter/value_int256.go @@ -20,6 +20,7 @@ package interpreter import ( "math/big" + "math/bits" "github.com/onflow/atree" @@ -649,21 +650,26 @@ func (v Int256Value) BitwiseLeftShift(interpreter *Interpreter, other IntegerVal }) } + if o.BigInt.Sign() < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + if !o.BigInt.IsUint64() || o.BigInt.Uint64() >= 256 { + return NewInt256ValueFromUint64(interpreter, 0) + } + + // The maximum shift value at this point is 255, which may lead to an + // additional allocation of up to 256 bits. Add usage for possible + // intermediate value. + common.UseMemory(interpreter, Int256MemoryUsage) + valueGetter := func() *big.Int { res := new(big.Int) - if o.BigInt.Sign() < 0 { - panic(UnderflowError{ - LocationRange: locationRange, - }) - } - if !o.BigInt.IsUint64() { - panic(OverflowError{ - LocationRange: locationRange, - }) - } - res.Lsh(v.BigInt, uint(o.BigInt.Uint64())) - - return res + res = toTwosComplement(res, v.BigInt, 256) + res = res.Lsh(res, uint(o.BigInt.Uint64())) + res = truncate(res, 256/bits.UintSize) + return fromTwosComplement(res) } return NewInt256ValueFromBigInt(interpreter, valueGetter) @@ -680,18 +686,17 @@ func (v Int256Value) BitwiseRightShift(interpreter *Interpreter, other IntegerVa }) } + if o.BigInt.Sign() < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + if !o.BigInt.IsUint64() { + return NewInt256ValueFromUint64(interpreter, 0) + } + valueGetter := func() *big.Int { res := new(big.Int) - if o.BigInt.Sign() < 0 { - panic(UnderflowError{ - LocationRange: locationRange, - }) - } - if !o.BigInt.IsUint64() { - panic(OverflowError{ - LocationRange: locationRange, - }) - } res.Rsh(v.BigInt, uint(o.BigInt.Uint64())) return res } diff --git a/interpreter/value_int32.go b/interpreter/value_int32.go index e5847fc477..6bc739ffe9 100644 --- a/interpreter/value_int32.go +++ b/interpreter/value_int32.go @@ -575,6 +575,12 @@ func (v Int32Value) BitwiseLeftShift(interpreter *Interpreter, other IntegerValu }) } + if o < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + valueGetter := func() int32 { return int32(v << o) } @@ -593,6 +599,12 @@ func (v Int32Value) BitwiseRightShift(interpreter *Interpreter, other IntegerVal }) } + if o < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + valueGetter := func() int32 { return int32(v >> o) } diff --git a/interpreter/value_int64.go b/interpreter/value_int64.go index 5f331e95aa..e00d378bc3 100644 --- a/interpreter/value_int64.go +++ b/interpreter/value_int64.go @@ -566,6 +566,12 @@ func (v Int64Value) BitwiseLeftShift(interpreter *Interpreter, other IntegerValu }) } + if o < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + valueGetter := func() int64 { return int64(v << o) } @@ -584,6 +590,12 @@ func (v Int64Value) BitwiseRightShift(interpreter *Interpreter, other IntegerVal }) } + if o < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + valueGetter := func() int64 { return int64(v >> o) } diff --git a/interpreter/value_int8.go b/interpreter/value_int8.go index 12cc547687..325cfb2a97 100644 --- a/interpreter/value_int8.go +++ b/interpreter/value_int8.go @@ -574,6 +574,12 @@ func (v Int8Value) BitwiseLeftShift(interpreter *Interpreter, other IntegerValue }) } + if o < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + valueGetter := func() int8 { return int8(v << o) } @@ -592,6 +598,12 @@ func (v Int8Value) BitwiseRightShift(interpreter *Interpreter, other IntegerValu }) } + if o < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + valueGetter := func() int8 { return int8(v >> o) } diff --git a/interpreter/value_some.go b/interpreter/value_some.go index f33f524b81..ddc0343d16 100644 --- a/interpreter/value_some.go +++ b/interpreter/value_some.go @@ -51,6 +51,31 @@ var _ Value = &SomeValue{} var _ EquatableValue = &SomeValue{} var _ MemberAccessibleValue = &SomeValue{} var _ OptionalValue = &SomeValue{} +var _ atree.Value = &SomeValue{} +var _ atree.WrapperValue = &SomeValue{} + +// UnwrapAtreeValue returns non-SomeValue and wrapper size. +func (v *SomeValue) UnwrapAtreeValue() (atree.Value, uint64) { + // NOTE: + // - non-SomeValue is the same as non-SomeValue in SomeValue.Storable() + // - non-SomeValue wrapper size is the same as encoded wrapper size in SomeStorable.ByteSize(). + + // Unwrap SomeValue(s) + nonSomeValue, nestedLevels := v.nonSomeValue() + + // Get SomeValue(s) wrapper size + someStorableEncodedPrefixSize := getSomeStorableEncodedPrefixSize(nestedLevels) + + // Unwrap nonSomeValue if needed + switch nonSomeValue := nonSomeValue.(type) { + case atree.WrapperValue: + unwrappedValue, wrapperSize := nonSomeValue.UnwrapAtreeValue() + return unwrappedValue, wrapperSize + uint64(someStorableEncodedPrefixSize) + + default: + return nonSomeValue, uint64(someStorableEncodedPrefixSize) + } +} func (*SomeValue) isValue() {} @@ -225,9 +250,18 @@ func (v *SomeValue) Storable( // The above applies to both immutable non-SomeValue (such as StringValue), // and mutable non-SomeValue (such as ArrayValue). - if v.valueStorable == nil { + // NOTE: + // - If SomeValue's inner value is a value with atree.Array or atree.OrderedMap, + // we MUST NOT cache SomeStorable because we need to call nonSomeValue.Storable() + // to trigger container inlining or un-inlining. + // - Otherwise, we need to cache SomeStorable because nonSomeValue.Storable() can + // create registers in storage, such as large string. + + nonSomeValue, nestedLevels := v.nonSomeValue() + + _, isContainerValue := nonSomeValue.(atreeContainerBackedValue) - nonSomeValue, nestedLevels := v.nonSomeValue() + if v.valueStorable == nil || isContainerValue { someStorableEncodedPrefixSize := getSomeStorableEncodedPrefixSize(nestedLevels) @@ -379,6 +413,31 @@ type SomeStorable struct { } var _ atree.ContainerStorable = SomeStorable{} +var _ atree.WrapperStorable = SomeStorable{} + +func (s SomeStorable) UnwrapAtreeStorable() atree.Storable { + storable := s.Storable + + switch storable := storable.(type) { + case atree.WrapperStorable: + return storable.UnwrapAtreeStorable() + + default: + return storable + } +} + +// WrapAtreeStorable() wraps storable as innermost wrapped value and +// returns new wrapped storable. +func (s SomeStorable) WrapAtreeStorable(storable atree.Storable) atree.Storable { + _, nestedLevels := s.nonSomeStorable() + + newStorable := SomeStorable{Storable: storable} + for i := 1; i < int(nestedLevels); i++ { + newStorable = SomeStorable{Storable: newStorable} + } + return newStorable +} func (s SomeStorable) HasPointer() bool { switch cs := s.Storable.(type) { diff --git a/interpreter/value_some_test.go b/interpreter/value_some_test.go new file mode 100644 index 0000000000..7adb51b0a7 --- /dev/null +++ b/interpreter/value_some_test.go @@ -0,0 +1,805 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package interpreter_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/ast" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/sema" + . "github.com/onflow/cadence/test_utils/common_utils" +) + +func TestSomeValueUnwrapAtreeValue(t *testing.T) { + + const ( + cborTagSize = 2 + someStorableWithMultipleNestedLevelsArraySize = 1 + ) + + t.Parallel() + + t.Run("SomeValue(bool)", func(t *testing.T) { + bv := interpreter.BoolValue(true) + + v := interpreter.NewUnmeteredSomeValueNonCopying(bv) + + unwrappedValue, wrapperSize := v.UnwrapAtreeValue() + require.Equal(t, bv, unwrappedValue) + require.Equal(t, uint64(cborTagSize), wrapperSize) + }) + + t.Run("SomeValue(SomeValue(bool))", func(t *testing.T) { + bv := interpreter.BoolValue(true) + + v := interpreter.NewUnmeteredSomeValueNonCopying( + interpreter.NewUnmeteredSomeValueNonCopying( + bv)) + + unwrappedValue, wrapperSize := v.UnwrapAtreeValue() + require.Equal(t, bv, unwrappedValue) + require.Equal(t, uint64(cborTagSize+someStorableWithMultipleNestedLevelsArraySize+1), wrapperSize) + }) + + t.Run("SomeValue(SomeValue(ArrayValue(...)))", func(t *testing.T) { + storage := newUnmeteredInMemoryStorage() + + inter, err := interpreter.NewInterpreter( + &interpreter.Program{ + Program: ast.NewProgram(nil, []ast.Declaration{}), + Elaboration: sema.NewElaboration(nil), + }, + TestLocation, + &interpreter.Config{ + Storage: storage, + ImportLocationHandler: func(inter *interpreter.Interpreter, location common.Location) interpreter.Import { + return interpreter.VirtualImport{ + Elaboration: inter.Program.Elaboration, + } + }, + }, + ) + require.NoError(t, err) + + address := common.Address{'A'} + + values := []interpreter.Value{ + interpreter.NewUnmeteredUInt64Value(0), + interpreter.NewUnmeteredUInt64Value(1), + } + + array := interpreter.NewArrayValue( + inter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + values..., + ) + + v := interpreter.NewUnmeteredSomeValueNonCopying( + interpreter.NewUnmeteredSomeValueNonCopying( + array)) + + unwrappedValue, wrapperSize := v.UnwrapAtreeValue() + require.IsType(t, &atree.Array{}, unwrappedValue) + require.Equal(t, uint64(cborTagSize+someStorableWithMultipleNestedLevelsArraySize+1), wrapperSize) + + atreeArray := unwrappedValue.(*atree.Array) + require.Equal(t, atree.Address(address), atreeArray.Address()) + require.Equal(t, uint64(len(values)), atreeArray.Count()) + + for i, expectedValue := range values { + v, err := atreeArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, expectedValue, v) + } + }) + + t.Run("SomeValue(SomeValue(DictionaryValue(...)))", func(t *testing.T) { + storage := newUnmeteredInMemoryStorage() + + inter, err := interpreter.NewInterpreter( + &interpreter.Program{ + Program: ast.NewProgram(nil, []ast.Declaration{}), + Elaboration: sema.NewElaboration(nil), + }, + TestLocation, + &interpreter.Config{ + Storage: storage, + ImportLocationHandler: func(inter *interpreter.Interpreter, location common.Location) interpreter.Import { + return interpreter.VirtualImport{ + Elaboration: inter.Program.Elaboration, + } + }, + }, + ) + require.NoError(t, err) + + address := common.Address{'A'} + + values := []interpreter.Value{ + interpreter.NewUnmeteredUInt64Value(0), + interpreter.NewUnmeteredStringValue("a"), + interpreter.NewUnmeteredUInt64Value(1), + interpreter.NewUnmeteredStringValue("b"), + } + + dict := interpreter.NewDictionaryValueWithAddress( + inter, + interpreter.EmptyLocationRange, + &interpreter.DictionaryStaticType{ + KeyType: interpreter.PrimitiveStaticTypeAnyStruct, + ValueType: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + values..., + ) + + v := interpreter.NewUnmeteredSomeValueNonCopying( + interpreter.NewUnmeteredSomeValueNonCopying( + dict)) + + unwrappedValue, wrapperSize := v.UnwrapAtreeValue() + require.IsType(t, &atree.OrderedMap{}, unwrappedValue) + require.Equal(t, uint64(cborTagSize+someStorableWithMultipleNestedLevelsArraySize+1), wrapperSize) + + // Verify unwrapped value + atreeMap := unwrappedValue.(*atree.OrderedMap) + require.Equal(t, atree.Address(address), atreeMap.Address()) + require.Equal(t, uint64(len(values)/2), atreeMap.Count()) + + valueComparator := func( + storage atree.SlabStorage, + atreeValue atree.Value, + otherStorable atree.Storable, + ) (bool, error) { + value := interpreter.MustConvertStoredValue(inter, atreeValue) + otherValue := interpreter.StoredValue(inter, otherStorable, storage) + return value.(interpreter.EquatableValue).Equal(inter, interpreter.EmptyLocationRange, otherValue), nil + } + + hashInputProvider := func( + value atree.Value, + scratch []byte, + ) ([]byte, error) { + hashInput := interpreter.MustConvertStoredValue(inter, value).(interpreter.HashableValue). + HashInput(inter, interpreter.EmptyLocationRange, scratch) + return hashInput, nil + } + + for i := 0; i < len(values); i += 2 { + key := values[i] + expectedValue := values[i+1] + + v, err := atreeMap.Get( + valueComparator, + hashInputProvider, + key, + ) + require.NoError(t, err) + require.Equal(t, expectedValue, v) + } + }) + + t.Run("SomeValue(SomeValue(CompositeValue(...)))", func(t *testing.T) { + storage := newUnmeteredInMemoryStorage() + + inter, err := interpreter.NewInterpreter( + &interpreter.Program{ + Program: ast.NewProgram(nil, []ast.Declaration{}), + Elaboration: sema.NewElaboration(nil), + }, + TestLocation, + &interpreter.Config{ + Storage: storage, + ImportLocationHandler: func(inter *interpreter.Interpreter, location common.Location) interpreter.Import { + return interpreter.VirtualImport{ + Elaboration: inter.Program.Elaboration, + } + }, + }, + ) + require.NoError(t, err) + + address := common.Address{'A'} + + identifier := "test" + + location := common.AddressLocation{ + Address: address, + Name: identifier, + } + + kind := common.CompositeKindStructure + + fields := []interpreter.CompositeField{ + interpreter.NewUnmeteredCompositeField( + "field1", + interpreter.NewUnmeteredStringValue("a"), + ), + interpreter.NewUnmeteredCompositeField( + "field2", + interpreter.NewUnmeteredStringValue("b"), + ), + } + + composite := interpreter.NewCompositeValue( + inter, + interpreter.EmptyLocationRange, + location, + identifier, + kind, + fields, + address, + ) + + v := interpreter.NewUnmeteredSomeValueNonCopying( + interpreter.NewUnmeteredSomeValueNonCopying( + composite)) + + unwrappedValue, wrapperSize := v.UnwrapAtreeValue() + require.IsType(t, &atree.OrderedMap{}, unwrappedValue) + require.Equal(t, uint64(cborTagSize+someStorableWithMultipleNestedLevelsArraySize+1), wrapperSize) + + // Verify unwrapped value + atreeMap := unwrappedValue.(*atree.OrderedMap) + require.Equal(t, atree.Address(address), atreeMap.Address()) + require.Equal(t, uint64(len(fields)), atreeMap.Count()) + + for _, f := range fields { + v, err := atreeMap.Get( + interpreter.StringAtreeValueComparator, + interpreter.StringAtreeValueHashInput, + interpreter.StringAtreeValue(f.Name), + ) + require.NoError(t, err) + require.Equal(t, f.Value, v) + } + }) +} + +func TestSomeStorableUnwrapAtreeStorable(t *testing.T) { + + t.Parallel() + + address := common.Address{'A'} + + t.Run("SomeValue(bool)", func(t *testing.T) { + storage := newUnmeteredInMemoryStorage() + + v := interpreter.NewUnmeteredSomeValueNonCopying( + interpreter.BoolValue(true)) + + const maxInlineSize = 1024 / 4 + storable, err := v.Storable(storage, atree.Address(address), maxInlineSize) + require.NoError(t, err) + require.IsType(t, interpreter.SomeStorable{}, storable) + + unwrappedStorable := storable.(interpreter.SomeStorable).UnwrapAtreeStorable() + require.Equal(t, interpreter.BoolValue(true), unwrappedStorable) + }) + + t.Run("SomeValue(SomeValue(bool))", func(t *testing.T) { + storage := newUnmeteredInMemoryStorage() + + v := interpreter.NewUnmeteredSomeValueNonCopying( + interpreter.NewUnmeteredSomeValueNonCopying( + interpreter.BoolValue(true))) + + const maxInlineSize = 1024 / 4 + storable, err := v.Storable(storage, atree.Address(address), maxInlineSize) + require.NoError(t, err) + require.IsType(t, interpreter.SomeStorable{}, storable) + + unwrappedStorable := storable.(interpreter.SomeStorable).UnwrapAtreeStorable() + require.Equal(t, interpreter.BoolValue(true), unwrappedStorable) + }) + + t.Run("SomeValue(SomeValue(ArrayValue(...))), small ArrayValue", func(t *testing.T) { + storage := newUnmeteredInMemoryStorage() + + inter, err := interpreter.NewInterpreter( + &interpreter.Program{ + Program: ast.NewProgram(nil, []ast.Declaration{}), + Elaboration: sema.NewElaboration(nil), + }, + TestLocation, + &interpreter.Config{ + Storage: storage, + ImportLocationHandler: func(inter *interpreter.Interpreter, location common.Location) interpreter.Import { + return interpreter.VirtualImport{ + Elaboration: inter.Program.Elaboration, + } + }, + }, + ) + require.NoError(t, err) + + values := []interpreter.Value{ + interpreter.NewUnmeteredUInt64Value(0), + interpreter.NewUnmeteredUInt64Value(1), + } + + array := interpreter.NewArrayValue( + inter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + values..., + ) + + v := interpreter.NewUnmeteredSomeValueNonCopying( + interpreter.NewUnmeteredSomeValueNonCopying( + array)) + + const maxInlineSize = 1024 / 4 + storable, err := v.Storable(storage, atree.Address(address), maxInlineSize) + require.NoError(t, err) + require.IsType(t, interpreter.SomeStorable{}, storable) + + unwrappedStorable := storable.(interpreter.SomeStorable).UnwrapAtreeStorable() + require.IsType(t, &atree.ArrayDataSlab{}, unwrappedStorable) + + unwrappedValue, err := unwrappedStorable.(*atree.ArrayDataSlab).StoredValue(storage) + require.NoError(t, err) + require.IsType(t, &atree.Array{}, unwrappedValue) + + atreeArray := unwrappedValue.(*atree.Array) + require.Equal(t, atree.Address(address), atreeArray.Address()) + require.Equal(t, uint64(len(values)), atreeArray.Count()) + + for i, expectedValue := range values { + v, err := atreeArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, expectedValue, v) + } + }) + + t.Run("SomeValue(SomeValue(ArrayValue(...))), large ArrayValue", func(t *testing.T) { + storage := newUnmeteredInMemoryStorage() + + inter, err := interpreter.NewInterpreter( + &interpreter.Program{ + Program: ast.NewProgram(nil, []ast.Declaration{}), + Elaboration: sema.NewElaboration(nil), + }, + TestLocation, + &interpreter.Config{ + Storage: storage, + ImportLocationHandler: func(inter *interpreter.Interpreter, location common.Location) interpreter.Import { + return interpreter.VirtualImport{ + Elaboration: inter.Program.Elaboration, + } + }, + }, + ) + require.NoError(t, err) + + const valuesCount = 40 + values := make([]interpreter.Value, valuesCount) + for i := range valuesCount { + values[i] = interpreter.NewUnmeteredUInt64Value(uint64(i)) + } + + array := interpreter.NewArrayValue( + inter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + values..., + ) + + v := interpreter.NewUnmeteredSomeValueNonCopying( + interpreter.NewUnmeteredSomeValueNonCopying( + array)) + + const maxInlineSize = 1024 / 8 + storable, err := v.Storable(storage, atree.Address(address), maxInlineSize) + require.NoError(t, err) + require.IsType(t, interpreter.SomeStorable{}, storable) + + unwrappedStorable := storable.(interpreter.SomeStorable).UnwrapAtreeStorable() + require.IsType(t, atree.SlabIDStorable{}, unwrappedStorable) + + unwrappedValue, err := unwrappedStorable.(atree.SlabIDStorable).StoredValue(storage) + require.NoError(t, err) + require.IsType(t, &atree.Array{}, unwrappedValue) + + atreeArray := unwrappedValue.(*atree.Array) + require.Equal(t, atree.Address(address), atreeArray.Address()) + require.Equal(t, uint64(len(values)), atreeArray.Count()) + + for i, expectedValue := range values { + v, err := atreeArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, expectedValue, v) + } + }) + + t.Run("SomeValue(SomeValue(DictionaryValue(...))), small DictionaryValue", func(t *testing.T) { + storage := newUnmeteredInMemoryStorage() + inter, err := interpreter.NewInterpreter( + &interpreter.Program{ + Program: ast.NewProgram(nil, []ast.Declaration{}), + Elaboration: sema.NewElaboration(nil), + }, + TestLocation, + &interpreter.Config{ + Storage: storage, + ImportLocationHandler: func(inter *interpreter.Interpreter, location common.Location) interpreter.Import { + return interpreter.VirtualImport{ + Elaboration: inter.Program.Elaboration, + } + }, + }, + ) + require.NoError(t, err) + + address := common.Address{'A'} + + values := []interpreter.Value{ + interpreter.NewUnmeteredUInt64Value(0), + interpreter.NewUnmeteredStringValue("a"), + interpreter.NewUnmeteredUInt64Value(1), + interpreter.NewUnmeteredStringValue("b"), + } + + dict := interpreter.NewDictionaryValueWithAddress( + inter, + interpreter.EmptyLocationRange, + &interpreter.DictionaryStaticType{ + KeyType: interpreter.PrimitiveStaticTypeAnyStruct, + ValueType: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + values..., + ) + + v := interpreter.NewUnmeteredSomeValueNonCopying( + interpreter.NewUnmeteredSomeValueNonCopying( + dict)) + + const maxInlineSize = 1024 / 4 + storable, err := v.Storable(storage, atree.Address(address), maxInlineSize) + require.NoError(t, err) + require.IsType(t, interpreter.SomeStorable{}, storable) + + unwrappedStorable := storable.(interpreter.SomeStorable).UnwrapAtreeStorable() + require.IsType(t, &atree.MapDataSlab{}, unwrappedStorable) + + unwrappedValue, err := unwrappedStorable.(*atree.MapDataSlab).StoredValue(storage) + require.NoError(t, err) + require.IsType(t, &atree.OrderedMap{}, unwrappedValue) + + // Verify unwrapped value + atreeMap := unwrappedValue.(*atree.OrderedMap) + require.Equal(t, atree.Address(address), atreeMap.Address()) + require.Equal(t, uint64(len(values)/2), atreeMap.Count()) + + valueComparator := func( + storage atree.SlabStorage, + atreeValue atree.Value, + otherStorable atree.Storable, + ) (bool, error) { + value := interpreter.MustConvertStoredValue(inter, atreeValue) + otherValue := interpreter.StoredValue(inter, otherStorable, storage) + return value.(interpreter.EquatableValue).Equal(inter, interpreter.EmptyLocationRange, otherValue), nil + } + + hashInputProvider := func( + value atree.Value, + scratch []byte, + ) ([]byte, error) { + hashInput := interpreter.MustConvertStoredValue(inter, value).(interpreter.HashableValue). + HashInput(inter, interpreter.EmptyLocationRange, scratch) + return hashInput, nil + } + + for i := 0; i < len(values); i += 2 { + key := values[i] + expectedValue := values[i+1] + + v, err := atreeMap.Get( + valueComparator, + hashInputProvider, + key, + ) + require.NoError(t, err) + require.Equal(t, expectedValue, v) + } + }) + + t.Run("SomeValue(SomeValue(DictionaryValue(...))), large DictionaryValue", func(t *testing.T) { + storage := newUnmeteredInMemoryStorage() + + inter, err := interpreter.NewInterpreter( + &interpreter.Program{ + Program: ast.NewProgram(nil, []ast.Declaration{}), + Elaboration: sema.NewElaboration(nil), + }, + TestLocation, + &interpreter.Config{ + Storage: storage, + ImportLocationHandler: func(inter *interpreter.Interpreter, location common.Location) interpreter.Import { + return interpreter.VirtualImport{ + Elaboration: inter.Program.Elaboration, + } + }, + }, + ) + require.NoError(t, err) + + address := common.Address{'A'} + + const valuesCount = 20 + values := make([]interpreter.Value, valuesCount*2) + + char := 'a' + for i := 0; i < len(values); i += 2 { + values[i] = interpreter.NewUnmeteredUInt64Value(uint64(i)) + values[i+1] = interpreter.NewUnmeteredStringValue(string(char)) + char += 1 + } + + dict := interpreter.NewDictionaryValueWithAddress( + inter, + interpreter.EmptyLocationRange, + &interpreter.DictionaryStaticType{ + KeyType: interpreter.PrimitiveStaticTypeAnyStruct, + ValueType: interpreter.PrimitiveStaticTypeAnyStruct, + }, + address, + values..., + ) + + v := interpreter.NewUnmeteredSomeValueNonCopying( + interpreter.NewUnmeteredSomeValueNonCopying( + dict)) + + const maxInlineSize = 1024 / 8 + storable, err := v.Storable(storage, atree.Address(address), maxInlineSize) + require.NoError(t, err) + require.IsType(t, interpreter.SomeStorable{}, storable) + + unwrappedStorable := storable.(interpreter.SomeStorable).UnwrapAtreeStorable() + require.IsType(t, atree.SlabIDStorable{}, unwrappedStorable) + + unwrappedValue, err := unwrappedStorable.(atree.SlabIDStorable).StoredValue(storage) + require.NoError(t, err) + require.IsType(t, &atree.OrderedMap{}, unwrappedValue) + + // Verify unwrapped value + atreeMap := unwrappedValue.(*atree.OrderedMap) + require.Equal(t, atree.Address(address), atreeMap.Address()) + require.Equal(t, uint64(len(values)/2), atreeMap.Count()) + + valueComparator := func( + storage atree.SlabStorage, + atreeValue atree.Value, + otherStorable atree.Storable, + ) (bool, error) { + value := interpreter.MustConvertStoredValue(inter, atreeValue) + otherValue := interpreter.StoredValue(inter, otherStorable, storage) + return value.(interpreter.EquatableValue).Equal(inter, interpreter.EmptyLocationRange, otherValue), nil + } + + hashInputProvider := func( + value atree.Value, + scratch []byte, + ) ([]byte, error) { + hashInput := interpreter.MustConvertStoredValue(inter, value).(interpreter.HashableValue). + HashInput(inter, interpreter.EmptyLocationRange, scratch) + return hashInput, nil + } + + for i := 0; i < len(values); i += 2 { + key := values[i] + expectedValue := values[i+1] + + v, err := atreeMap.Get( + valueComparator, + hashInputProvider, + key, + ) + require.NoError(t, err) + require.Equal(t, expectedValue, v) + } + }) + + t.Run("SomeValue(SomeValue(CompositeValue(...))), small CompositeValue", func(t *testing.T) { + storage := newUnmeteredInMemoryStorage() + + inter, err := interpreter.NewInterpreter( + &interpreter.Program{ + Program: ast.NewProgram(nil, []ast.Declaration{}), + Elaboration: sema.NewElaboration(nil), + }, + TestLocation, + &interpreter.Config{ + Storage: storage, + ImportLocationHandler: func(inter *interpreter.Interpreter, location common.Location) interpreter.Import { + return interpreter.VirtualImport{ + Elaboration: inter.Program.Elaboration, + } + }, + }, + ) + require.NoError(t, err) + + address := common.Address{'A'} + + identifier := "test" + + location := common.AddressLocation{ + Address: address, + Name: identifier, + } + + kind := common.CompositeKindStructure + + fields := []interpreter.CompositeField{ + interpreter.NewUnmeteredCompositeField( + "field1", + interpreter.NewUnmeteredStringValue("a"), + ), + interpreter.NewUnmeteredCompositeField( + "field2", + interpreter.NewUnmeteredStringValue("b"), + ), + } + + composite := interpreter.NewCompositeValue( + inter, + interpreter.EmptyLocationRange, + location, + identifier, + kind, + fields, + address, + ) + + v := interpreter.NewUnmeteredSomeValueNonCopying( + interpreter.NewUnmeteredSomeValueNonCopying( + composite)) + + const maxInlineSize = 1024 / 4 + storable, err := v.Storable(storage, atree.Address(address), maxInlineSize) + require.NoError(t, err) + require.IsType(t, interpreter.SomeStorable{}, storable) + + unwrappedStorable := storable.(interpreter.SomeStorable).UnwrapAtreeStorable() + require.IsType(t, &atree.MapDataSlab{}, unwrappedStorable) + + unwrappedValue, err := unwrappedStorable.(*atree.MapDataSlab).StoredValue(storage) + require.NoError(t, err) + require.IsType(t, &atree.OrderedMap{}, unwrappedValue) + + // Verify unwrapped value + atreeMap := unwrappedValue.(*atree.OrderedMap) + require.Equal(t, atree.Address(address), atreeMap.Address()) + require.Equal(t, uint64(len(fields)), atreeMap.Count()) + + for _, f := range fields { + v, err := atreeMap.Get( + interpreter.StringAtreeValueComparator, + interpreter.StringAtreeValueHashInput, + interpreter.StringAtreeValue(f.Name), + ) + require.NoError(t, err) + require.Equal(t, f.Value, v) + } + }) + + t.Run("SomeValue(SomeValue(CompositeValue(...))), large CompositeValue", func(t *testing.T) { + storage := newUnmeteredInMemoryStorage() + + inter, err := interpreter.NewInterpreter( + &interpreter.Program{ + Program: ast.NewProgram(nil, []ast.Declaration{}), + Elaboration: sema.NewElaboration(nil), + }, + TestLocation, + &interpreter.Config{ + Storage: storage, + ImportLocationHandler: func(inter *interpreter.Interpreter, location common.Location) interpreter.Import { + return interpreter.VirtualImport{ + Elaboration: inter.Program.Elaboration, + } + }, + }, + ) + require.NoError(t, err) + + address := common.Address{'A'} + + identifier := "test" + + location := common.AddressLocation{ + Address: address, + Name: identifier, + } + + kind := common.CompositeKindStructure + + const fieldsCount = 20 + fields := make([]interpreter.CompositeField, fieldsCount) + char := 'a' + for i := range len(fields) { + fields[i] = interpreter.NewUnmeteredCompositeField( + fmt.Sprintf("field%d", i), + interpreter.NewUnmeteredStringValue(string(char)), + ) + char += 1 + } + + composite := interpreter.NewCompositeValue( + inter, + interpreter.EmptyLocationRange, + location, + identifier, + kind, + fields, + address, + ) + + v := interpreter.NewUnmeteredSomeValueNonCopying( + interpreter.NewUnmeteredSomeValueNonCopying( + composite)) + + const maxInlineSize = 1024 / 8 + storable, err := v.Storable(storage, atree.Address(address), maxInlineSize) + require.NoError(t, err) + require.IsType(t, interpreter.SomeStorable{}, storable) + + unwrappedStorable := storable.(interpreter.SomeStorable).UnwrapAtreeStorable() + require.IsType(t, atree.SlabIDStorable{}, unwrappedStorable) + + unwrappedValue, err := unwrappedStorable.(atree.SlabIDStorable).StoredValue(storage) + require.NoError(t, err) + require.IsType(t, &atree.OrderedMap{}, unwrappedValue) + + // Verify unwrapped value + atreeMap := unwrappedValue.(*atree.OrderedMap) + require.Equal(t, atree.Address(address), atreeMap.Address()) + require.Equal(t, uint64(len(fields)), atreeMap.Count()) + + for _, f := range fields { + v, err := atreeMap.Get( + interpreter.StringAtreeValueComparator, + interpreter.StringAtreeValueHashInput, + interpreter.StringAtreeValue(f.Name), + ) + require.NoError(t, err) + require.Equal(t, f.Value, v) + } + }) +} diff --git a/interpreter/value_storage_reference.go b/interpreter/value_storage_reference.go index 741fd4ae18..906edc5819 100644 --- a/interpreter/value_storage_reference.go +++ b/interpreter/value_storage_reference.go @@ -123,7 +123,7 @@ func (*StorageReferenceValue) IsImportable(_ *Interpreter, _ LocationRange) bool func (v *StorageReferenceValue) dereference(interpreter *Interpreter, locationRange LocationRange) (*Value, error) { address := v.TargetStorageAddress - domain := v.TargetPath.Domain.Identifier() + domain := v.TargetPath.Domain.StorageDomain() identifier := v.TargetPath.Identifier storageMapKey := StringStorageMapKey(identifier) diff --git a/interpreter/value_test.go b/interpreter/value_test.go index 295f5a4346..3eaace67f8 100644 --- a/interpreter/value_test.go +++ b/interpreter/value_test.go @@ -3806,7 +3806,7 @@ func TestValue_ConformsToStaticType(t *testing.T) { ) require.NoError(t, err) - storageMap := storage.GetStorageMap(testAddress, "storage", true) + storageMap := storage.GetDomainStorageMap(inter, testAddress, common.StorageDomainPathStorage, true) storageMap.WriteValue(inter, StringStorageMapKey("test"), TrueValue) value := valueFactory(inter) diff --git a/interpreter/value_uint.go b/interpreter/value_uint.go index aec0c661e3..04a404a65a 100644 --- a/interpreter/value_uint.go +++ b/interpreter/value_uint.go @@ -534,7 +534,7 @@ func (v UIntValue) BitwiseLeftShift(interpreter *Interpreter, other IntegerValue } if o.BigInt.Sign() < 0 { - panic(UnderflowError{ + panic(NegativeShiftError{ LocationRange: locationRange, }) } @@ -568,7 +568,7 @@ func (v UIntValue) BitwiseRightShift(interpreter *Interpreter, other IntegerValu } if o.BigInt.Sign() < 0 { - panic(UnderflowError{ + panic(NegativeShiftError{ LocationRange: locationRange, }) } diff --git a/interpreter/value_uint128.go b/interpreter/value_uint128.go index e0d7cb0e92..dfde3cfa4d 100644 --- a/interpreter/value_uint128.go +++ b/interpreter/value_uint128.go @@ -20,6 +20,7 @@ package interpreter import ( "math/big" + "math/bits" "github.com/onflow/atree" @@ -580,21 +581,26 @@ func (v UInt128Value) BitwiseLeftShift(interpreter *Interpreter, other IntegerVa }) } + if o.BigInt.Sign() < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + if !o.BigInt.IsUint64() || o.BigInt.Uint64() >= 128 { + return NewUInt128ValueFromUint64(interpreter, 0) + } + + // The maximum shift value at this point is 127, which may lead to an + // additional allocation of up to 128 bits. Add usage for possible + // intermediate value. + common.UseMemory(interpreter, Uint128MemoryUsage) + return NewUInt128ValueFromBigInt( interpreter, func() *big.Int { res := new(big.Int) - if o.BigInt.Sign() < 0 { - panic(UnderflowError{ - LocationRange: locationRange, - }) - } - if !o.BigInt.IsUint64() { - panic(OverflowError{ - LocationRange: locationRange, - }) - } - return res.Lsh(v.BigInt, uint(o.BigInt.Uint64())) + res = res.Lsh(v.BigInt, uint(o.BigInt.Uint64())) + return truncate(res, 128/bits.UintSize) }, ) } @@ -610,20 +616,19 @@ func (v UInt128Value) BitwiseRightShift(interpreter *Interpreter, other IntegerV }) } + if o.BigInt.Sign() < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + if !o.BigInt.IsUint64() { + return NewUInt128ValueFromUint64(interpreter, 0) + } + return NewUInt128ValueFromBigInt( interpreter, func() *big.Int { res := new(big.Int) - if o.BigInt.Sign() < 0 { - panic(UnderflowError{ - LocationRange: locationRange, - }) - } - if !o.BigInt.IsUint64() { - panic(OverflowError{ - LocationRange: locationRange, - }) - } return res.Rsh(v.BigInt, uint(o.BigInt.Uint64())) }, ) diff --git a/interpreter/value_uint256.go b/interpreter/value_uint256.go index 975454387a..2050df91a8 100644 --- a/interpreter/value_uint256.go +++ b/interpreter/value_uint256.go @@ -20,6 +20,7 @@ package interpreter import ( "math/big" + "math/bits" "github.com/onflow/atree" @@ -580,21 +581,26 @@ func (v UInt256Value) BitwiseLeftShift(interpreter *Interpreter, other IntegerVa }) } + if o.BigInt.Sign() < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + if !o.BigInt.IsUint64() || o.BigInt.Uint64() >= 256 { + return NewUInt256ValueFromUint64(interpreter, 0) + } + + // The maximum shift value at this point is 255, which may lead to an + // additional allocation of up to 256 bits. Add usage for possible + // intermediate value. + common.UseMemory(interpreter, Uint256MemoryUsage) + return NewUInt256ValueFromBigInt( interpreter, func() *big.Int { res := new(big.Int) - if o.BigInt.Sign() < 0 { - panic(UnderflowError{ - LocationRange: locationRange, - }) - } - if !o.BigInt.IsUint64() { - panic(OverflowError{ - LocationRange: locationRange, - }) - } - return res.Lsh(v.BigInt, uint(o.BigInt.Uint64())) + res = res.Lsh(v.BigInt, uint(o.BigInt.Uint64())) + return truncate(res, 256/bits.UintSize) }, ) } @@ -610,20 +616,19 @@ func (v UInt256Value) BitwiseRightShift(interpreter *Interpreter, other IntegerV }) } + if o.BigInt.Sign() < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + if !o.BigInt.IsUint64() { + return NewUInt256ValueFromUint64(interpreter, 0) + } + return NewUInt256ValueFromBigInt( interpreter, func() *big.Int { res := new(big.Int) - if o.BigInt.Sign() < 0 { - panic(UnderflowError{ - LocationRange: locationRange, - }) - } - if !o.BigInt.IsUint64() { - panic(OverflowError{ - LocationRange: locationRange, - }) - } return res.Rsh(v.BigInt, uint(o.BigInt.Uint64())) }, ) diff --git a/interpreter/value_word128.go b/interpreter/value_word128.go index d845055941..a9aacc4bf0 100644 --- a/interpreter/value_word128.go +++ b/interpreter/value_word128.go @@ -20,6 +20,7 @@ package interpreter import ( "math/big" + "math/bits" "github.com/onflow/atree" @@ -486,21 +487,26 @@ func (v Word128Value) BitwiseLeftShift(interpreter *Interpreter, other IntegerVa }) } + if o.BigInt.Sign() < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + if !o.BigInt.IsUint64() || o.BigInt.Uint64() >= 128 { + return NewWord128ValueFromUint64(interpreter, 0) + } + + // The maximum shift value at this point is 127, which may lead to an + // additional allocation of up to 128 bits. Add usage for possible + // intermediate value. + common.UseMemory(interpreter, Uint128MemoryUsage) + return NewWord128ValueFromBigInt( interpreter, func() *big.Int { res := new(big.Int) - if o.BigInt.Sign() < 0 { - panic(UnderflowError{ - LocationRange: locationRange, - }) - } - if !o.BigInt.IsUint64() { - panic(OverflowError{ - LocationRange: locationRange, - }) - } - return res.Lsh(v.BigInt, uint(o.BigInt.Uint64())) + res = res.Lsh(v.BigInt, uint(o.BigInt.Uint64())) + return truncate(res, 128/bits.UintSize) }, ) } @@ -515,20 +521,19 @@ func (v Word128Value) BitwiseRightShift(interpreter *Interpreter, other IntegerV }) } + if o.BigInt.Sign() < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + if !o.BigInt.IsUint64() { + return NewWord128ValueFromUint64(interpreter, 0) + } + return NewWord128ValueFromBigInt( interpreter, func() *big.Int { res := new(big.Int) - if o.BigInt.Sign() < 0 { - panic(UnderflowError{ - LocationRange: locationRange, - }) - } - if !o.BigInt.IsUint64() { - panic(OverflowError{ - LocationRange: locationRange, - }) - } return res.Rsh(v.BigInt, uint(o.BigInt.Uint64())) }, ) diff --git a/interpreter/value_word256.go b/interpreter/value_word256.go index 49924b6240..9446d1c81e 100644 --- a/interpreter/value_word256.go +++ b/interpreter/value_word256.go @@ -20,6 +20,7 @@ package interpreter import ( "math/big" + "math/bits" "github.com/onflow/atree" @@ -486,21 +487,26 @@ func (v Word256Value) BitwiseLeftShift(interpreter *Interpreter, other IntegerVa }) } + if o.BigInt.Sign() < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + if !o.BigInt.IsUint64() || o.BigInt.Uint64() >= 256 { + return NewWord256ValueFromUint64(interpreter, 0) + } + + // The maximum shift value at this point is 255, which may lead to an + // additional allocation of up to 256 bits. Add usage for possible + // intermediate value. + common.UseMemory(interpreter, Uint256MemoryUsage) + return NewWord256ValueFromBigInt( interpreter, func() *big.Int { res := new(big.Int) - if o.BigInt.Sign() < 0 { - panic(UnderflowError{ - LocationRange: locationRange, - }) - } - if !o.BigInt.IsUint64() { - panic(OverflowError{ - LocationRange: locationRange, - }) - } - return res.Lsh(v.BigInt, uint(o.BigInt.Uint64())) + res = res.Lsh(v.BigInt, uint(o.BigInt.Uint64())) + return truncate(res, 256/bits.UintSize) }, ) } @@ -516,20 +522,19 @@ func (v Word256Value) BitwiseRightShift(interpreter *Interpreter, other IntegerV }) } + if o.BigInt.Sign() < 0 { + panic(NegativeShiftError{ + LocationRange: locationRange, + }) + } + if !o.BigInt.IsUint64() { + return NewWord256ValueFromUint64(interpreter, 0) + } + return NewWord256ValueFromBigInt( interpreter, func() *big.Int { res := new(big.Int) - if o.BigInt.Sign() < 0 { - panic(UnderflowError{ - LocationRange: locationRange, - }) - } - if !o.BigInt.IsUint64() { - panic(OverflowError{ - LocationRange: locationRange, - }) - } return res.Rsh(v.BigInt, uint(o.BigInt.Uint64())) }, ) diff --git a/interpreter/values_test.go b/interpreter/values_test.go index b874aa4213..fc8260f87e 100644 --- a/interpreter/values_test.go +++ b/interpreter/values_test.go @@ -23,6 +23,7 @@ import ( "fmt" "math" "math/rand" + "strconv" "strings" "testing" "time" @@ -32,1685 +33,4886 @@ import ( "github.com/onflow/atree" + "github.com/onflow/cadence" "github.com/onflow/cadence/ast" "github.com/onflow/cadence/common" + "github.com/onflow/cadence/errors" "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" "github.com/onflow/cadence/sema" . "github.com/onflow/cadence/test_utils/common_utils" . "github.com/onflow/cadence/test_utils/interpreter_utils" + . "github.com/onflow/cadence/test_utils/runtime_utils" ) -// TODO: make these program args? -const containerMaxDepth = 3 -const containerMaxSize = 100 -const compositeMaxFields = 10 +var defaultRandomValueLimits = randomValueLimits{ + containerMaxDepth: 4, + containerMaxSize: 40, + compositeMaxFields: 10, +} var runSmokeTests = flag.Bool("runSmokeTests", false, "Run smoke tests on values") var validateAtree = flag.Bool("validateAtree", true, "Enable atree validation") var smokeTestSeed = flag.Int64("smokeTestSeed", -1, "Seed for prng (-1 specifies current Unix time)") -func TestInterpretRandomMapOperations(t *testing.T) { - if !*runSmokeTests { - t.Skip("smoke tests are disabled") - } - - t.Parallel() +func newRandomValueTestInterpreter(t *testing.T) (inter *interpreter.Interpreter, resetStorage func()) { - r := newRandomValueGenerator() - t.Logf("seed: %d", r.seed) + config := &interpreter.Config{ + ImportLocationHandler: func(inter *interpreter.Interpreter, location common.Location) interpreter.Import { + return interpreter.VirtualImport{ + Elaboration: inter.Program.Elaboration, + } + }, + AtreeStorageValidationEnabled: *validateAtree, + AtreeValueValidationEnabled: *validateAtree, + } - storage := newUnmeteredInMemoryStorage() inter, err := interpreter.NewInterpreter( &interpreter.Program{ - Program: ast.NewProgram(nil, []ast.Declaration{}), Elaboration: sema.NewElaboration(nil), }, TestLocation, - &interpreter.Config{ - Storage: storage, - ImportLocationHandler: func(inter *interpreter.Interpreter, location common.Location) interpreter.Import { - return interpreter.VirtualImport{ - Elaboration: inter.Program.Elaboration, - } - }, - AtreeStorageValidationEnabled: *validateAtree, - AtreeValueValidationEnabled: *validateAtree, - }, + config, ) require.NoError(t, err) - numberOfValues := r.randomInt(containerMaxSize) + ledger := NewTestLedger(nil, nil) - var testMap, copyOfTestMap *interpreter.DictionaryValue - var storageSize, slabCounts int + resetStorage = func() { + if config.Storage != nil { + storage := config.Storage.(*runtime.Storage) + err := storage.Commit(inter, false) + require.NoError(t, err) + } + config.Storage = runtime.NewStorage(ledger, nil, runtime.StorageConfig{}) + } - entries := newValueMap(numberOfValues) - orgOwner := common.Address{'A'} + resetStorage() - t.Run("construction", func(t *testing.T) { - keyValues := make([]interpreter.Value, numberOfValues*2) - for i := 0; i < numberOfValues; i++ { - key := r.randomHashableValue(inter) - value := r.randomStorableValue(inter, 0) + return inter, resetStorage +} - entries.put(inter, key, value) +func importValue(t *testing.T, inter *interpreter.Interpreter, value cadence.Value) interpreter.Value { - keyValues[i*2] = key - keyValues[i*2+1] = value - } + switch value := value.(type) { + case cadence.Array: + // Work around for "cannot import array: elements do not belong to the same type", + // caused by import of array without expected type, which leads to inference of the element type: + // Create an empty array with an expected type, then append imported elements to it. - testMap = interpreter.NewDictionaryValueWithAddress( + arrayResult, err := runtime.ImportValue( inter, interpreter.EmptyLocationRange, - &interpreter.DictionaryStaticType{ - KeyType: interpreter.PrimitiveStaticTypeAnyStruct, - ValueType: interpreter.PrimitiveStaticTypeAnyStruct, - }, - orgOwner, - keyValues..., + nil, + nil, + cadence.Array{}, + sema.NewVariableSizedType(nil, sema.AnyStructType), ) + require.NoError(t, err) + require.IsType(t, &interpreter.ArrayValue{}, arrayResult) + array := arrayResult.(*interpreter.ArrayValue) - storageSize, slabCounts = getSlabStorageSize(t, storage) + for _, element := range value.Values { + array.Append( + inter, + interpreter.EmptyLocationRange, + importValue(t, inter, element), + ) + } - require.Equal(t, testMap.Count(), entries.size()) + return array - entries.foreach(func(orgKey, orgValue interpreter.Value) (exit bool) { - exists := testMap.ContainsKey(inter, interpreter.EmptyLocationRange, orgKey) - require.True(t, bool(exists)) + case cadence.Dictionary: + // Work around for "cannot import dictionary: keys does not belong to the same type", + // caused by import of dictionary without expected type, which leads to inference of the key type: + // Create an empty dictionary with an expected type, then append imported key-value pairs to it. - value, found := testMap.Get(inter, interpreter.EmptyLocationRange, orgKey) - require.True(t, found) - AssertValuesEqual(t, inter, orgValue, value) + dictionaryResult, err := runtime.ImportValue( + inter, + interpreter.EmptyLocationRange, + nil, + nil, + cadence.Dictionary{}, + sema.NewDictionaryType( + nil, + sema.HashableStructType, + sema.AnyStructType, + ), + ) + require.NoError(t, err) + require.IsType(t, &interpreter.DictionaryValue{}, dictionaryResult) + dictionary := dictionaryResult.(*interpreter.DictionaryValue) - return false - }) + for _, pair := range value.Pairs { + dictionary.Insert( + inter, + interpreter.EmptyLocationRange, + importValue(t, inter, pair.Key), + importValue(t, inter, pair.Value), + ) + } - owner := testMap.GetOwner() - assert.Equal(t, orgOwner, owner) - }) + return dictionary - t.Run("iterate", func(t *testing.T) { - require.Equal(t, testMap.Count(), entries.size()) + case cadence.Struct: - testMap.Iterate( + structResult, err := runtime.ImportValue( inter, interpreter.EmptyLocationRange, - func(key, value interpreter.Value) (resume bool) { - orgValue, ok := entries.get(inter, key) - require.True(t, ok, "cannot find key: %v", key) - - AssertValuesEqual(t, inter, orgValue, value) - return true + nil, + nil, + cadence.Struct{ + StructType: value.StructType, }, + nil, ) - }) + require.NoError(t, err) + require.IsType(t, &interpreter.CompositeValue{}, structResult) + composite := structResult.(*interpreter.CompositeValue) + + for fieldName, fieldValue := range value.FieldsMappedByName() { + composite.SetMember( + inter, + interpreter.EmptyLocationRange, + fieldName, + importValue(t, inter, fieldValue), + ) + } + + return composite + + case cadence.Optional: - t.Run("deep copy", func(t *testing.T) { - newOwner := atree.Address{'B'} - copyOfTestMap = testMap.Transfer( + if value.Value == nil { + return interpreter.NilValue{} + } + + return interpreter.NewUnmeteredSomeValueNonCopying( + importValue(t, inter, value.Value), + ) + + default: + result, err := runtime.ImportValue( inter, interpreter.EmptyLocationRange, - newOwner, - false, nil, nil, - true, // testMap is standalone. - ).(*interpreter.DictionaryValue) + value, + nil, + ) + require.NoError(t, err) + return result + } +} - require.Equal(t, entries.size(), copyOfTestMap.Count()) +func withoutAtreeStorageValidationEnabled[T any](inter *interpreter.Interpreter, f func() T) T { + config := inter.SharedState.Config + original := config.AtreeStorageValidationEnabled + config.AtreeStorageValidationEnabled = false + result := f() + config.AtreeStorageValidationEnabled = original + return result +} - entries.foreach(func(orgKey, orgValue interpreter.Value) (exit bool) { - exists := copyOfTestMap.ContainsKey(inter, interpreter.EmptyLocationRange, orgKey) - require.True(t, bool(exists)) +func TestInterpretSmokeRandomDictionaryOperations(t *testing.T) { + if !*runSmokeTests { + t.Skip("smoke tests are disabled") + } - value, found := copyOfTestMap.Get(inter, interpreter.EmptyLocationRange, orgKey) - require.True(t, found) - AssertValuesEqual(t, inter, orgValue, value) + t.Parallel() - return false - }) + orgOwner := common.Address{'A'} - owner := copyOfTestMap.GetOwner() - assert.Equal(t, newOwner[:], owner[:]) - }) + const dictionaryStorageMapKey = interpreter.StringStorageMapKey("dictionary") - t.Run("deep remove", func(t *testing.T) { - copyOfTestMap.DeepRemove(inter, true) - err = storage.Remove(copyOfTestMap.SlabID()) - require.NoError(t, err) + writeDictionary := func( + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + dictionary *interpreter.DictionaryValue, + ) { + inter.Storage(). + GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + true, + ). + WriteValue( + inter, + storageMapKey, + dictionary, + ) + } - // deep removal should clean up everything - newStorageSize, newSlabCounts := getSlabStorageSize(t, storage) - assert.Equal(t, slabCounts, newSlabCounts) - assert.Equal(t, storageSize, newStorageSize) + readDictionary := func( + t *testing.T, + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + ) *interpreter.DictionaryValue { + storageMap := inter.Storage().GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + false, + ) + require.NotNil(t, storageMap) - require.Equal(t, entries.size(), testMap.Count()) + readValue := storageMap.ReadValue(inter, storageMapKey) + require.NotNil(t, readValue) - // go over original values again and check no missing data (no side effect should be found) - entries.foreach(func(orgKey, orgValue interpreter.Value) (exit bool) { - exists := testMap.ContainsKey(inter, interpreter.EmptyLocationRange, orgKey) - require.True(t, bool(exists)) + require.IsType(t, &interpreter.DictionaryValue{}, readValue) + return readValue.(*interpreter.DictionaryValue) + } - value, found := testMap.Get(inter, interpreter.EmptyLocationRange, orgKey) - require.True(t, found) - AssertValuesEqual(t, inter, orgValue, value) + removeDictionary := func( + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + ) { + inter.Storage(). + GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + false, + ). + RemoveValue( + inter, + storageMapKey, + ) + } - return false - }) + createDictionary := func( + t *testing.T, + r *randomValueGenerator, + inter *interpreter.Interpreter, + ) ( + *interpreter.DictionaryValue, + cadence.Dictionary, + ) { - owner := testMap.GetOwner() - assert.Equal(t, orgOwner, owner) - }) + expectedValue := r.randomDictionaryValue(inter, 0) - t.Run("insert", func(t *testing.T) { - newEntries := newValueMap(numberOfValues) + keyValues := make([]interpreter.Value, 2*len(expectedValue.Pairs)) + for i, pair := range expectedValue.Pairs { + + key := importValue(t, inter, pair.Key) + value := importValue(t, inter, pair.Value) + + keyValues[i*2] = key + keyValues[i*2+1] = value + } + + // Construct a dictionary directly in the owner's account. + // However, the dictionary is not referenced by the root of the storage yet + // (a storage map), so atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. - dictionary := interpreter.NewDictionaryValueWithAddress( + dictionary := withoutAtreeStorageValidationEnabled( inter, - interpreter.EmptyLocationRange, - &interpreter.DictionaryStaticType{ - KeyType: interpreter.PrimitiveStaticTypeAnyStruct, - ValueType: interpreter.PrimitiveStaticTypeAnyStruct, + func() *interpreter.DictionaryValue { + return interpreter.NewDictionaryValueWithAddress( + inter, + interpreter.EmptyLocationRange, + &interpreter.DictionaryStaticType{ + KeyType: interpreter.PrimitiveStaticTypeHashableStruct, + ValueType: interpreter.PrimitiveStaticTypeAnyStruct, + }, + orgOwner, + keyValues..., + ) }, - orgOwner, ) - // Insert - for i := 0; i < numberOfValues; i++ { - key := r.randomHashableValue(inter) - value := r.randomStorableValue(inter, 0) + // Store the dictionary in a storage map, so that the dictionary's slab + // is referenced by the root of the storage. + + writeDictionary( + inter, + orgOwner, + dictionaryStorageMapKey, + dictionary, + ) - newEntries.put(inter, key, value) + return dictionary, expectedValue + } - _ = dictionary.Insert(inter, interpreter.EmptyLocationRange, key, value) - } + checkDictionary := func( + t *testing.T, + inter *interpreter.Interpreter, + dictionary *interpreter.DictionaryValue, + expectedValue cadence.Dictionary, + expectedOwner common.Address, + ) { + require.Equal(t, len(expectedValue.Pairs), dictionary.Count()) - require.Equal(t, newEntries.size(), dictionary.Count()) + for _, pair := range expectedValue.Pairs { + pairKey := importValue(t, inter, pair.Key) - // Go over original values again and check no missing data (no side effect should be found) - newEntries.foreach(func(orgKey, orgValue interpreter.Value) (exit bool) { - exists := dictionary.ContainsKey(inter, interpreter.EmptyLocationRange, orgKey) + exists := dictionary.ContainsKey(inter, interpreter.EmptyLocationRange, pairKey) require.True(t, bool(exists)) - value, found := dictionary.Get(inter, interpreter.EmptyLocationRange, orgKey) + value, found := dictionary.Get(inter, interpreter.EmptyLocationRange, pairKey) require.True(t, found) - AssertValuesEqual(t, inter, orgValue, value) - return false - }) - }) + pairValue := importValue(t, inter, pair.Value) + AssertValuesEqual(t, inter, pairValue, value) + } - t.Run("remove", func(t *testing.T) { - newEntries := newValueMap(numberOfValues) + owner := dictionary.GetOwner() + assert.Equal(t, expectedOwner, owner) + } - keyValues := make([][2]interpreter.Value, numberOfValues) - for i := 0; i < numberOfValues; i++ { - key := r.randomHashableValue(inter) - value := r.randomStorableValue(inter, 0) + checkIteration := func( + t *testing.T, + inter *interpreter.Interpreter, + dictionary *interpreter.DictionaryValue, + expectedValue cadence.Dictionary, + ) { + // Index the expected key-value pairs for lookup during iteration - newEntries.put(inter, key, value) + indexedExpected := map[any]interpreter.DictionaryEntryValues{} + for _, pair := range expectedValue.Pairs { + pairKey := importValue(t, inter, pair.Key) - keyValues[i][0] = key - keyValues[i][1] = value + mapKey := mapKey(inter, pairKey) + + require.NotContains(t, indexedExpected, mapKey) + indexedExpected[mapKey] = interpreter.DictionaryEntryValues{ + Key: pairKey, + Value: importValue(t, inter, pair.Value), + } } - dictionary := interpreter.NewDictionaryValueWithAddress( + require.Equal(t, len(expectedValue.Pairs), dictionary.Count()) + + var iterations int + + dictionary.Iterate( inter, interpreter.EmptyLocationRange, - &interpreter.DictionaryStaticType{ - KeyType: interpreter.PrimitiveStaticTypeAnyStruct, - ValueType: interpreter.PrimitiveStaticTypeAnyStruct, - }, - orgOwner, - ) - - require.Equal(t, 0, dictionary.Count()) + func(key, value interpreter.Value) (resume bool) { - // Get the initial storage size before inserting values - startingStorageSize, startingSlabCounts := getSlabStorageSize(t, storage) + mapKey := mapKey(inter, key) + require.Contains(t, indexedExpected, mapKey) - // Insert - for _, keyValue := range keyValues { - dictionary.Insert(inter, interpreter.EmptyLocationRange, keyValue[0], keyValue[1]) - } + pair := indexedExpected[mapKey] - require.Equal(t, newEntries.size(), dictionary.Count()) + AssertValuesEqual(t, inter, pair.Key, key) + AssertValuesEqual(t, inter, pair.Value, value) - // Remove - newEntries.foreach(func(orgKey, orgValue interpreter.Value) (exit bool) { - removedValue := dictionary.Remove(inter, interpreter.EmptyLocationRange, orgKey) + iterations += 1 - require.IsType(t, &interpreter.SomeValue{}, removedValue) - someValue := removedValue.(*interpreter.SomeValue) + return true + }, + ) - // Removed value must be same as the original value - innerValue := someValue.InnerValue(inter, interpreter.EmptyLocationRange) - AssertValuesEqual(t, inter, orgValue, innerValue) + assert.Equal(t, len(expectedValue.Pairs), iterations) + } - return false - }) + t.Run("construction", func(t *testing.T) { - // Dictionary must be empty - require.Equal(t, 0, dictionary.Count()) + t.Parallel() - storageSize, slabCounts := getSlabStorageSize(t, storage) + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) - // Storage size after removals should be same as the size before insertion. - assert.Equal(t, startingStorageSize, storageSize) - assert.Equal(t, startingSlabCounts, slabCounts) - }) + inter, resetStorage := newRandomValueTestInterpreter(t) - t.Run("remove enum key", func(t *testing.T) { + dictionary, expectedValue := createDictionary(t, &r, inter) - dictionary := interpreter.NewDictionaryValueWithAddress( + checkDictionary( + t, inter, - interpreter.EmptyLocationRange, - &interpreter.DictionaryStaticType{ - KeyType: interpreter.PrimitiveStaticTypeAnyStruct, - ValueType: interpreter.PrimitiveStaticTypeAnyStruct, - }, + dictionary, + expectedValue, orgOwner, ) - require.Equal(t, 0, dictionary.Count()) - - // Get the initial storage size after creating empty dictionary - startingStorageSize, startingSlabCounts := getSlabStorageSize(t, storage) + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } - newEntries := newValueMap(numberOfValues) + resetStorage() - keyValues := make([][2]interpreter.Value, numberOfValues) - for i := 0; i < numberOfValues; i++ { - // Create a random enum as key - key := r.generateRandomHashableValue(inter, randomValueKindEnum) - value := interpreter.Void + dictionary = readDictionary( + t, + inter, + orgOwner, + dictionaryStorageMapKey, + ) - newEntries.put(inter, key, value) + checkDictionary( + t, + inter, + dictionary, + expectedValue, + orgOwner, + ) - keyValues[i][0] = key - keyValues[i][1] = value + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) } + }) - // Insert - for _, keyValue := range keyValues { - dictionary.Insert(inter, interpreter.EmptyLocationRange, keyValue[0], keyValue[1]) - } + t.Run("iterate", func(t *testing.T) { - // Remove - newEntries.foreach(func(orgKey, orgValue interpreter.Value) (exit bool) { - removedValue := dictionary.Remove(inter, interpreter.EmptyLocationRange, orgKey) + t.Parallel() - require.IsType(t, &interpreter.SomeValue{}, removedValue) - someValue := removedValue.(*interpreter.SomeValue) + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) - // Removed value must be same as the original value - innerValue := someValue.InnerValue(inter, interpreter.EmptyLocationRange) - AssertValuesEqual(t, inter, orgValue, innerValue) + inter, resetStorage := newRandomValueTestInterpreter(t) - return false - }) + dictionary, expectedValue := createDictionary(t, &r, inter) - // Dictionary must be empty - require.Equal(t, 0, dictionary.Count()) + checkDictionary( + t, + inter, + dictionary, + expectedValue, + orgOwner, + ) - storageSize, slabCounts = getSlabStorageSize(t, storage) + checkIteration( + t, + inter, + dictionary, + expectedValue, + ) - // Storage size after removals should be same as the size before insertion. - assert.Equal(t, startingStorageSize, storageSize) - assert.Equal(t, startingSlabCounts, slabCounts) - }) + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } - t.Run("update enum key", func(t *testing.T) { + resetStorage() - dictionary := interpreter.NewDictionaryValueWithAddress( + dictionary = readDictionary( + t, inter, - interpreter.EmptyLocationRange, - &interpreter.DictionaryStaticType{ - KeyType: interpreter.PrimitiveStaticTypeAnyStruct, - ValueType: interpreter.PrimitiveStaticTypeAnyStruct, - }, orgOwner, + dictionaryStorageMapKey, ) - require.Equal(t, 0, dictionary.Count()) - - value1 := interpreter.NewUnmeteredIntValueFromInt64(1) - value2 := interpreter.NewUnmeteredIntValueFromInt64(2) - - keys := make([]interpreter.Value, numberOfValues) - for i := 0; i < numberOfValues; i++ { - // Create a random enum as key - key := r.generateRandomHashableValue(inter, randomValueKindEnum) + checkDictionary( + t, + inter, + dictionary, + expectedValue, + orgOwner, + ) - keys[i] = key - } + checkIteration( + t, + inter, + dictionary, + expectedValue, + ) - // Insert - for _, key := range keys { - dictionary.Insert( - inter, - interpreter.EmptyLocationRange, - // Need to clone the key, as it is transferred, and we want to keep using it. - key.Clone(inter), - // Always insert value1 - value1, - ) + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) } + }) - // Update - for _, key := range keys { - oldValue := dictionary.Insert( - inter, - interpreter.EmptyLocationRange, - // Need to clone the key, as it is transferred, and we want to keep using it. - key.Clone(inter), - // Change all value1 to value2 - value2, - ) - - require.IsType(t, &interpreter.SomeValue{}, oldValue) - someValue := oldValue.(*interpreter.SomeValue) + t.Run("move (transfer and deep remove)", func(t *testing.T) { - // Removed value must be same as the original value - innerValue := someValue.InnerValue(inter, interpreter.EmptyLocationRange) - AssertValuesEqual(t, inter, value1, innerValue) - } + t.Parallel() - // Check the values - for _, key := range keys { - readValue := dictionary.GetKey( - inter, - interpreter.EmptyLocationRange, - key, - ) + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) - require.IsType(t, &interpreter.SomeValue{}, readValue) - someValue := readValue.(*interpreter.SomeValue) + inter, resetStorage := newRandomValueTestInterpreter(t) - // Read value must be updated value - innerValue := someValue.InnerValue(inter, interpreter.EmptyLocationRange) - AssertValuesEqual(t, inter, value2, innerValue) - } - }) + original, expectedValue := createDictionary(t, &r, inter) - t.Run("random insert & remove", func(t *testing.T) { - keyValues := make([][2]interpreter.Value, numberOfValues) - for i := 0; i < numberOfValues; i++ { - // Generate unique key - var key interpreter.Value - for { - key = r.randomHashableValue(inter) + checkDictionary( + t, + inter, + original, + expectedValue, + orgOwner, + ) - var foundConflict bool - for j := 0; j < i; j++ { - existingKey := keyValues[j][0] - if key.(interpreter.EquatableValue).Equal(inter, interpreter.EmptyLocationRange, existingKey) { - foundConflict = true - break - } - } - if !foundConflict { - break - } - } + resetStorage() - keyValues[i][0] = key - keyValues[i][1] = r.randomStorableValue(inter, 0) - } + original = readDictionary( + t, + inter, + orgOwner, + dictionaryStorageMapKey, + ) - dictionary := interpreter.NewDictionaryValueWithAddress( + checkDictionary( + t, inter, - interpreter.EmptyLocationRange, - &interpreter.DictionaryStaticType{ - KeyType: interpreter.PrimitiveStaticTypeAnyStruct, - ValueType: interpreter.PrimitiveStaticTypeAnyStruct, - }, + original, + expectedValue, orgOwner, ) - require.Equal(t, 0, dictionary.Count()) + // Transfer the dictionary to a new owner - // Get the initial storage size before inserting values - startingStorageSize, startingSlabCounts := getSlabStorageSize(t, storage) + newOwner := common.Address{'B'} - insertCount := 0 - deleteCount := 0 + transferred := original.Transfer( + inter, + interpreter.EmptyLocationRange, + atree.Address(newOwner), + false, + nil, + nil, + false, + ).(*interpreter.DictionaryValue) - isInsert := func() bool { - if dictionary.Count() == 0 { - return true - } + // Store the transferred dictionary in a storage map, so that the dictionary's slab + // is referenced by the root of the storage. - if insertCount >= numberOfValues { - return false - } + const transferredStorageMapKey = interpreter.StringStorageMapKey("transferred") - return r.randomInt(1) == 1 - } + writeDictionary( + inter, + newOwner, + transferredStorageMapKey, + transferred, + ) - for insertCount < numberOfValues || dictionary.Count() > 0 { - // Perform a random operation out of insert/remove - if isInsert() { - key := keyValues[insertCount][0] - if _, ok := key.(*interpreter.CompositeValue); ok { - key = key.Clone(inter) - } + withoutAtreeStorageValidationEnabled(inter, func() struct{} { - value := keyValues[insertCount][1].Clone(inter) + removeDictionary( + inter, + orgOwner, + dictionaryStorageMapKey, + ) - dictionary.Insert( - inter, - interpreter.EmptyLocationRange, - key, - value, - ) - insertCount++ - } else { - key := keyValues[deleteCount][0] - orgValue := keyValues[deleteCount][1] + return struct{}{} + }) - removedValue := dictionary.Remove(inter, interpreter.EmptyLocationRange, key) + checkDictionary( + t, + inter, + transferred, + expectedValue, + newOwner, + ) - require.IsType(t, &interpreter.SomeValue{}, removedValue) - someValue := removedValue.(*interpreter.SomeValue) + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } - // Removed value must be same as the original value - innerValue := someValue.InnerValue(inter, interpreter.EmptyLocationRange) - AssertValuesEqual(t, inter, orgValue, innerValue) + resetStorage() - deleteCount++ - } - } + transferred = readDictionary( + t, + inter, + newOwner, + transferredStorageMapKey, + ) - // Dictionary must be empty - require.Equal(t, 0, dictionary.Count()) + checkDictionary( + t, + inter, + transferred, + expectedValue, + newOwner, + ) - storageSize, slabCounts := getSlabStorageSize(t, storage) + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } - // Storage size after removals should be same as the size before insertion. - assert.Equal(t, startingStorageSize, storageSize) - assert.Equal(t, startingSlabCounts, slabCounts) + // TODO: check deep removal cleaned up everything in original account (storage size, slab count) }) - t.Run("move", func(t *testing.T) { - newOwner := atree.Address{'B'} - - entries := newValueMap(numberOfValues) + t.Run("insert", func(t *testing.T) { + t.Parallel() - keyValues := make([]interpreter.Value, numberOfValues*2) - for i := 0; i < numberOfValues; i++ { - key := r.randomHashableValue(inter) - value := r.randomStorableValue(inter, 0) + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) - entries.put(inter, key, value) + inter, resetStorage := newRandomValueTestInterpreter(t) - keyValues[i*2] = key - keyValues[i*2+1] = value - } + dictionary, expectedValue := createDictionary(t, &r, inter) - dictionary := interpreter.NewDictionaryValueWithAddress( + checkDictionary( + t, inter, - interpreter.EmptyLocationRange, - &interpreter.DictionaryStaticType{ - KeyType: interpreter.PrimitiveStaticTypeAnyStruct, - ValueType: interpreter.PrimitiveStaticTypeAnyStruct, - }, + dictionary, + expectedValue, orgOwner, - keyValues..., ) - require.Equal(t, entries.size(), dictionary.Count()) + resetStorage() - movedDictionary := dictionary.Transfer( + dictionary = readDictionary( + t, inter, - interpreter.EmptyLocationRange, - newOwner, - true, - nil, - nil, - true, // dictionary is standalone. - ).(*interpreter.DictionaryValue) - - require.Equal(t, entries.size(), movedDictionary.Count()) - - // Cleanup the slab of original dictionary. - err := storage.Remove(dictionary.SlabID()) - require.NoError(t, err) - - // Check the values - entries.foreach(func(orgKey, orgValue interpreter.Value) (exit bool) { - exists := movedDictionary.ContainsKey(inter, interpreter.EmptyLocationRange, orgKey) - require.True(t, bool(exists)) + orgOwner, + dictionaryStorageMapKey, + ) - value, found := movedDictionary.Get(inter, interpreter.EmptyLocationRange, orgKey) - require.True(t, found) - AssertValuesEqual(t, inter, orgValue, value) + checkDictionary( + t, + inter, + dictionary, + expectedValue, + orgOwner, + ) - return false - }) + // Insert new values into the dictionary. + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. - owner := movedDictionary.GetOwner() - assert.Equal(t, newOwner[:], owner[:]) - }) -} + numberOfValues := r.randomInt(r.containerMaxSize) -func TestInterpretRandomArrayOperations(t *testing.T) { - if !*runSmokeTests { - t.Skip("smoke tests are disabled") - } + for i := 0; i < numberOfValues; i++ { - r := newRandomValueGenerator() - t.Logf("seed: %d", r.seed) + // Generate a unique key + var key cadence.Value + var importedKey interpreter.Value + for { + key = r.randomHashableValue(inter) + importedKey = importValue(t, inter, key) - storage := newUnmeteredInMemoryStorage() - inter, err := interpreter.NewInterpreter( - &interpreter.Program{ - Program: ast.NewProgram(nil, []ast.Declaration{}), - Elaboration: sema.NewElaboration(nil), - }, - TestLocation, - &interpreter.Config{ - Storage: storage, - ImportLocationHandler: func(inter *interpreter.Interpreter, location common.Location) interpreter.Import { - return interpreter.VirtualImport{ - Elaboration: inter.Program.Elaboration, + if !dictionary.ContainsKey( + inter, + interpreter.EmptyLocationRange, + importedKey, + ) { + break } - }, - }, - ) - require.NoError(t, err) + } - numberOfValues := r.randomInt(containerMaxSize) + value := r.randomStorableValue(inter, 0) + importedValue := importValue(t, inter, value) - var testArray, copyOfTestArray *interpreter.ArrayValue - var storageSize, slabCounts int + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. - elements := make([]interpreter.Value, numberOfValues) - orgOwner := common.Address{'A'} + _ = withoutAtreeStorageValidationEnabled(inter, func() struct{} { - t.Run("construction", func(t *testing.T) { - values := make([]interpreter.Value, numberOfValues) - for i := 0; i < numberOfValues; i++ { - value := r.randomStorableValue(inter, 0) - elements[i] = value - values[i] = value.Clone(inter) + existing := dictionary.Insert( + inter, + interpreter.EmptyLocationRange, + importedKey, + importedValue, + ) + require.Equal(t, + interpreter.NilOptionalValue, + existing, + ) + return struct{}{} + }) + + expectedValue.Pairs = append( + expectedValue.Pairs, + cadence.KeyValuePair{ + Key: key, + Value: value, + }, + ) } - testArray = interpreter.NewArrayValue( + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + checkDictionary( + t, inter, - interpreter.EmptyLocationRange, - &interpreter.VariableSizedStaticType{ - Type: interpreter.PrimitiveStaticTypeAnyStruct, - }, + dictionary, + expectedValue, orgOwner, - values..., ) - storageSize, slabCounts = getSlabStorageSize(t, storage) - - require.Equal(t, len(elements), testArray.Count()) - - for index, orgElement := range elements { - element := testArray.Get(inter, interpreter.EmptyLocationRange, index) - AssertValuesEqual(t, inter, orgElement, element) - } - - owner := testArray.GetOwner() - assert.Equal(t, orgOwner, owner) - }) - - t.Run("iterate", func(t *testing.T) { - require.Equal(t, testArray.Count(), len(elements)) + resetStorage() - index := 0 - testArray.Iterate( + dictionary = readDictionary( + t, inter, - func(element interpreter.Value) (resume bool) { - orgElement := elements[index] - AssertValuesEqual(t, inter, orgElement, element) - - elementByIndex := testArray.Get(inter, interpreter.EmptyLocationRange, index) - AssertValuesEqual(t, inter, element, elementByIndex) - - index++ - return true - }, - false, - interpreter.EmptyLocationRange, + orgOwner, + dictionaryStorageMapKey, ) - }) - t.Run("deep copy", func(t *testing.T) { - newOwner := atree.Address{'B'} - copyOfTestArray = testArray.Transfer( + checkDictionary( + t, inter, - interpreter.EmptyLocationRange, - newOwner, - false, - nil, - nil, - true, // testArray is standalone. - ).(*interpreter.ArrayValue) - - require.Equal(t, len(elements), copyOfTestArray.Count()) + dictionary, + expectedValue, + orgOwner, + ) - for index, orgElement := range elements { - element := copyOfTestArray.Get(inter, interpreter.EmptyLocationRange, index) - AssertValuesEqual(t, inter, orgElement, element) + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) } - - owner := copyOfTestArray.GetOwner() - assert.Equal(t, newOwner[:], owner[:]) }) - t.Run("deep removal", func(t *testing.T) { - copyOfTestArray.DeepRemove(inter, true) - err = storage.Remove(copyOfTestArray.SlabID()) - require.NoError(t, err) - - // deep removal should clean up everything - newStorageSize, newSlabCounts := getSlabStorageSize(t, storage) - assert.Equal(t, slabCounts, newSlabCounts) - assert.Equal(t, storageSize, newStorageSize) - - assert.Equal(t, len(elements), testArray.Count()) + t.Run("remove", func(t *testing.T) { + t.Parallel() - // go over original elements again and check no missing data (no side effect should be found) - for index, orgElement := range elements { - element := testArray.Get(inter, interpreter.EmptyLocationRange, index) - AssertValuesEqual(t, inter, orgElement, element) - } + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) - owner := testArray.GetOwner() - assert.Equal(t, orgOwner, owner) - }) + inter, resetStorage := newRandomValueTestInterpreter(t) - t.Run("insert", func(t *testing.T) { - newElements := make([]interpreter.Value, numberOfValues) + dictionary, expectedValue := createDictionary(t, &r, inter) - testArray = interpreter.NewArrayValue( + checkDictionary( + t, inter, - interpreter.EmptyLocationRange, - &interpreter.VariableSizedStaticType{ - Type: interpreter.PrimitiveStaticTypeAnyStruct, - }, + dictionary, + expectedValue, orgOwner, ) - require.Equal(t, 0, testArray.Count()) + resetStorage() - for i := 0; i < numberOfValues; i++ { - element := r.randomStorableValue(inter, 0) - newElements[i] = element + dictionary = readDictionary( + t, + inter, + orgOwner, + dictionaryStorageMapKey, + ) - testArray.Insert( - inter, - interpreter.EmptyLocationRange, - i, - element.Clone(inter), - ) - } + checkDictionary( + t, + inter, + dictionary, + expectedValue, + orgOwner, + ) - require.Equal(t, len(newElements), testArray.Count()) + // Remove + for _, pair := range expectedValue.Pairs { - // Go over original values again and check no missing data (no side effect should be found) - for index, element := range newElements { - value := testArray.Get(inter, interpreter.EmptyLocationRange, index) - AssertValuesEqual(t, inter, element, value) - } - }) + key := importValue(t, inter, pair.Key) - t.Run("append", func(t *testing.T) { - newElements := make([]interpreter.Value, numberOfValues) + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. - testArray = interpreter.NewArrayValue( - inter, - interpreter.EmptyLocationRange, - &interpreter.VariableSizedStaticType{ - Type: interpreter.PrimitiveStaticTypeAnyStruct, - }, - orgOwner, - ) + removedValue := withoutAtreeStorageValidationEnabled(inter, func() interpreter.OptionalValue { + return dictionary.Remove(inter, interpreter.EmptyLocationRange, key) + }) - require.Equal(t, 0, testArray.Count()) + require.IsType(t, &interpreter.SomeValue{}, removedValue) + someValue := removedValue.(*interpreter.SomeValue) - for i := 0; i < numberOfValues; i++ { - element := r.randomStorableValue(inter, 0) - newElements[i] = element + value := importValue(t, inter, pair.Value) - testArray.Append( - inter, - interpreter.EmptyLocationRange, - element.Clone(inter), - ) + // Removed value must be same as the original value + innerValue := someValue.InnerValue(inter, interpreter.EmptyLocationRange) + AssertValuesEqual(t, inter, value, innerValue) } - require.Equal(t, len(newElements), testArray.Count()) - - // Go over original values again and check no missing data (no side effect should be found) - for index, element := range newElements { - value := testArray.Get(inter, interpreter.EmptyLocationRange, index) - AssertValuesEqual(t, inter, element, value) + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) } - }) - t.Run("remove", func(t *testing.T) { - newElements := make([]interpreter.Value, numberOfValues) + expectedValue = cadence.Dictionary{}. + WithType(expectedValue.Type().(*cadence.DictionaryType)) - for i := 0; i < numberOfValues; i++ { - newElements[i] = r.randomStorableValue(inter, 0) - } + // Dictionary must be empty + require.Equal(t, 0, dictionary.Count()) - testArray = interpreter.NewArrayValue( + checkDictionary( + t, inter, - interpreter.EmptyLocationRange, - &interpreter.VariableSizedStaticType{ - Type: interpreter.PrimitiveStaticTypeAnyStruct, - }, + dictionary, + expectedValue, orgOwner, ) - require.Equal(t, 0, testArray.Count()) - - // Get the initial storage size before inserting values - startingStorageSize, startingSlabCounts := getSlabStorageSize(t, storage) - - // Insert - for index, element := range newElements { - testArray.Insert( - inter, - interpreter.EmptyLocationRange, - index, - element.Clone(inter), - ) - } + resetStorage() - require.Equal(t, len(newElements), testArray.Count()) + dictionary = readDictionary( + t, + inter, + orgOwner, + dictionaryStorageMapKey, + ) - // Remove - for _, element := range newElements { - removedValue := testArray.Remove(inter, interpreter.EmptyLocationRange, 0) + checkDictionary( + t, + inter, + dictionary, + expectedValue, + orgOwner, + ) - // Removed value must be same as the original value - AssertValuesEqual(t, inter, element, removedValue) + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) } - // Array must be empty - require.Equal(t, 0, testArray.Count()) + // TODO: check storage size, slab count + }) - storageSize, slabCounts := getSlabStorageSize(t, storage) + t.Run("update", func(t *testing.T) { + t.Parallel() - // Storage size after removals should be same as the size before insertion. - assert.Equal(t, startingStorageSize, storageSize) - assert.Equal(t, startingSlabCounts, slabCounts) - }) + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) - t.Run("random insert & remove", func(t *testing.T) { - elements := make([]interpreter.Value, numberOfValues) + inter, resetStorage := newRandomValueTestInterpreter(t) - for i := 0; i < numberOfValues; i++ { - elements[i] = r.randomStorableValue(inter, 0) - } + dictionary, expectedValue := createDictionary(t, &r, inter) - testArray = interpreter.NewArrayValue( + checkDictionary( + t, inter, - interpreter.EmptyLocationRange, - &interpreter.VariableSizedStaticType{ - Type: interpreter.PrimitiveStaticTypeAnyStruct, - }, + dictionary, + expectedValue, orgOwner, ) - require.Equal(t, 0, testArray.Count()) + resetStorage() - // Get the initial storage size before inserting values - startingStorageSize, startingSlabCounts := getSlabStorageSize(t, storage) + dictionary = readDictionary( + t, + inter, + orgOwner, + dictionaryStorageMapKey, + ) - insertCount := 0 - deleteCount := 0 + checkDictionary( + t, + inter, + dictionary, + expectedValue, + orgOwner, + ) - isInsert := func() bool { - if testArray.Count() == 0 { - return true - } + elementCount := dictionary.Count() - if insertCount >= numberOfValues { - return false - } + // Generate new values - return r.randomInt(1) == 1 + newValues := make([]cadence.Value, len(expectedValue.Pairs)) + for i := range expectedValue.Pairs { + newValues[i] = r.randomStorableValue(inter, 0) } - for insertCount < numberOfValues || testArray.Count() > 0 { - // Perform a random operation out of insert/remove - if isInsert() { - value := elements[insertCount].Clone(inter) + // Update + for i, pair := range expectedValue.Pairs { + + key := importValue(t, inter, pair.Key) + newValue := importValue(t, inter, newValues[i]) + + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. - testArray.Append( + existingValue := withoutAtreeStorageValidationEnabled(inter, func() interpreter.OptionalValue { + return dictionary.Insert( inter, interpreter.EmptyLocationRange, - value, + key, + newValue, ) - insertCount++ - } else { - orgValue := elements[deleteCount] - removedValue := testArray.RemoveFirst(inter, interpreter.EmptyLocationRange) - - // Removed value must be same as the original value - AssertValuesEqual(t, inter, orgValue, removedValue) + }) - deleteCount++ - } - } - - // Dictionary must be empty - require.Equal(t, 0, testArray.Count()) + require.IsType(t, &interpreter.SomeValue{}, existingValue) + someValue := existingValue.(*interpreter.SomeValue) - storageSize, slabCounts := getSlabStorageSize(t, storage) + value := importValue(t, inter, pair.Value) - // Storage size after removals should be same as the size before insertion. - assert.Equal(t, startingStorageSize, storageSize) - assert.Equal(t, startingSlabCounts, slabCounts) - }) + // Removed value must be same as the original value + innerValue := someValue.InnerValue(inter, interpreter.EmptyLocationRange) + AssertValuesEqual(t, inter, value, innerValue) - t.Run("move", func(t *testing.T) { - values := make([]interpreter.Value, numberOfValues) - elements := make([]interpreter.Value, numberOfValues) + expectedValue.Pairs[i].Value = newValues[i] + } - for i := 0; i < numberOfValues; i++ { - value := r.randomStorableValue(inter, 0) - elements[i] = value - values[i] = value.Clone(inter) + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) } - array := interpreter.NewArrayValue( + // Dictionary must have same number of key-value pairs + require.Equal(t, elementCount, dictionary.Count()) + + checkDictionary( + t, inter, - interpreter.EmptyLocationRange, - &interpreter.VariableSizedStaticType{ - Type: interpreter.PrimitiveStaticTypeAnyStruct, - }, + dictionary, + expectedValue, orgOwner, - values..., ) - require.Equal(t, len(elements), array.Count()) - - owner := array.GetOwner() - assert.Equal(t, orgOwner, owner) + resetStorage() - newOwner := atree.Address{'B'} - movedArray := array.Transfer( + dictionary = readDictionary( + t, inter, - interpreter.EmptyLocationRange, - newOwner, - true, - nil, - nil, - true, // array is standalone. - ).(*interpreter.ArrayValue) - - require.Equal(t, len(elements), movedArray.Count()) + orgOwner, + dictionaryStorageMapKey, + ) - // Cleanup the slab of original array. - err := storage.Remove(array.SlabID()) - require.NoError(t, err) + checkDictionary( + t, + inter, + dictionary, + expectedValue, + orgOwner, + ) - // Check the elements - for index, orgElement := range elements { - element := movedArray.Get(inter, interpreter.EmptyLocationRange, index) - AssertValuesEqual(t, inter, orgElement, element) + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) } - owner = movedArray.GetOwner() - assert.Equal(t, newOwner[:], owner[:]) + // TODO: check storage size, slab count }) } -func TestInterpretRandomCompositeValueOperations(t *testing.T) { +func TestInterpretSmokeRandomCompositeOperations(t *testing.T) { if !*runSmokeTests { t.Skip("smoke tests are disabled") } - r := newRandomValueGenerator() - t.Logf("seed: %d", r.seed) + t.Parallel() - storage := newUnmeteredInMemoryStorage() - inter, err := interpreter.NewInterpreter( - &interpreter.Program{ - Program: ast.NewProgram(nil, []ast.Declaration{}), - Elaboration: sema.NewElaboration(nil), - }, - TestLocation, - &interpreter.Config{ - Storage: storage, - ImportLocationHandler: func(inter *interpreter.Interpreter, location common.Location) interpreter.Import { - return interpreter.VirtualImport{ - Elaboration: inter.Program.Elaboration, - } - }, - }, - ) - require.NoError(t, err) + orgOwner := common.Address{'A'} - var testComposite, copyOfTestComposite *interpreter.CompositeValue - var storageSize, slabCounts int - var orgFields map[string]interpreter.Value + const compositeStorageMapKey = interpreter.StringStorageMapKey("composite") - fieldsCount := r.randomInt(compositeMaxFields) - orgOwner := common.Address{'A'} + writeComposite := func( + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + composite *interpreter.CompositeValue, + ) { + inter.Storage(). + GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + true, + ). + WriteValue( + inter, + storageMapKey, + composite, + ) + } - t.Run("construction", func(t *testing.T) { - testComposite, orgFields = r.randomCompositeValue(orgOwner, fieldsCount, inter, 0) + removeComposite := func( + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + ) { + inter.Storage(). + GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + true, + ). + RemoveValue( + inter, + storageMapKey, + ) + } + + readComposite := func( + t *testing.T, + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + ) *interpreter.CompositeValue { + storageMap := inter.Storage().GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + false, + ) + require.NotNil(t, storageMap) - storageSize, slabCounts = getSlabStorageSize(t, storage) + readValue := storageMap.ReadValue(inter, storageMapKey) + require.NotNil(t, readValue) + + require.IsType(t, &interpreter.CompositeValue{}, readValue) + return readValue.(*interpreter.CompositeValue) + } - for fieldName, orgFieldValue := range orgFields { - fieldValue := testComposite.GetField(inter, interpreter.EmptyLocationRange, fieldName) - AssertValuesEqual(t, inter, orgFieldValue, fieldValue) + createComposite := func( + t *testing.T, + r *randomValueGenerator, + inter *interpreter.Interpreter, + ) ( + *interpreter.CompositeValue, + cadence.Struct, + ) { + expectedValue := r.randomStructValue(inter, 0) + + fieldsMappedByName := expectedValue.FieldsMappedByName() + fields := make([]interpreter.CompositeField, 0, len(fieldsMappedByName)) + for name, field := range fieldsMappedByName { + + value := importValue(t, inter, field) + + fields = append(fields, interpreter.CompositeField{ + Name: name, + Value: value, + }) } - owner := testComposite.GetOwner() - assert.Equal(t, orgOwner, owner) - }) + // Construct a composite directly in the owner's account. + // However, the composite is not referenced by the root of the storage yet + // (a storage map), so atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. - t.Run("iterate", func(t *testing.T) { - fieldCount := 0 - testComposite.ForEachField(inter, func(name string, value interpreter.Value) (resume bool) { - orgValue, ok := orgFields[name] - require.True(t, ok) - AssertValuesEqual(t, inter, orgValue, value) - fieldCount++ - - // continue iteration - return true - }, interpreter.EmptyLocationRange) - - assert.Equal(t, len(orgFields), fieldCount) - }) + composite := withoutAtreeStorageValidationEnabled( + inter, + func() *interpreter.CompositeValue { + return interpreter.NewCompositeValue( + inter, + interpreter.EmptyLocationRange, + expectedValue.StructType.Location, + expectedValue.StructType.QualifiedIdentifier, + common.CompositeKindStructure, + fields, + orgOwner, + ) + }, + ) - t.Run("deep copy", func(t *testing.T) { - newOwner := atree.Address{'B'} + // Store the composite in a storage map, so that the composite's slab + // is referenced by the root of the storage. - copyOfTestComposite = testComposite.Transfer( + writeComposite( inter, - interpreter.EmptyLocationRange, - newOwner, - false, - nil, - nil, - true, // testComposite is standalone. - ).(*interpreter.CompositeValue) + orgOwner, + compositeStorageMapKey, + composite, + ) - for name, orgValue := range orgFields { - value := copyOfTestComposite.GetField(inter, interpreter.EmptyLocationRange, name) - AssertValuesEqual(t, inter, orgValue, value) - } + return composite, expectedValue + } - owner := copyOfTestComposite.GetOwner() - assert.Equal(t, newOwner[:], owner[:]) - }) + checkComposite := func( + t *testing.T, + inter *interpreter.Interpreter, + composite *interpreter.CompositeValue, + expectedValue cadence.Struct, + expectedOwner common.Address, + ) { + fieldsMappedByName := expectedValue.FieldsMappedByName() - t.Run("deep remove", func(t *testing.T) { - copyOfTestComposite.DeepRemove(inter, true) - err = storage.Remove(copyOfTestComposite.SlabID()) - require.NoError(t, err) + require.Equal(t, len(fieldsMappedByName), composite.FieldCount()) + + for name, field := range fieldsMappedByName { - // deep removal should clean up everything - newStorageSize, newSlabCounts := getSlabStorageSize(t, storage) - assert.Equal(t, slabCounts, newSlabCounts) - assert.Equal(t, storageSize, newStorageSize) + value := composite.GetMember(inter, interpreter.EmptyLocationRange, name) - // go over original values again and check no missing data (no side effect should be found) - for name, orgValue := range orgFields { - value := testComposite.GetField(inter, interpreter.EmptyLocationRange, name) - AssertValuesEqual(t, inter, orgValue, value) + fieldValue := importValue(t, inter, field) + AssertValuesEqual(t, inter, fieldValue, value) } - owner := testComposite.GetOwner() - assert.Equal(t, orgOwner, owner) - }) + owner := composite.GetOwner() + assert.Equal(t, expectedOwner, owner) + } - t.Run("remove field", func(t *testing.T) { - newOwner := atree.Address{'c'} + t.Run("construction", func(t *testing.T) { - composite := testComposite.Transfer( - inter, - interpreter.EmptyLocationRange, - newOwner, - false, - nil, - nil, - true, // testComposite is standalone. - ).(*interpreter.CompositeValue) + t.Parallel() - require.NoError(t, err) + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) - for name := range orgFields { - composite.RemoveField(inter, interpreter.EmptyLocationRange, name) - value := composite.GetField(inter, interpreter.EmptyLocationRange, name) - assert.Nil(t, value) - } - }) + inter, resetStorage := newRandomValueTestInterpreter(t) - t.Run("move", func(t *testing.T) { - composite, fields := r.randomCompositeValue(orgOwner, fieldsCount, inter, 0) + composite, expectedValue := createComposite(t, &r, inter) - owner := composite.GetOwner() - assert.Equal(t, orgOwner, owner) + checkComposite( + t, + inter, + composite, + expectedValue, + orgOwner, + ) - newOwner := atree.Address{'B'} - movedComposite := composite.Transfer( + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + resetStorage() + + composite = readComposite( + t, inter, - interpreter.EmptyLocationRange, - newOwner, - true, - nil, - nil, - true, // composite is standalone. - ).(*interpreter.CompositeValue) + orgOwner, + compositeStorageMapKey, + ) - // Cleanup the slab of original composite. - err := storage.Remove(composite.SlabID()) - require.NoError(t, err) + checkComposite( + t, + inter, + composite, + expectedValue, + orgOwner, + ) - // Check the elements - for fieldName, orgFieldValue := range fields { - fieldValue := movedComposite.GetField(inter, interpreter.EmptyLocationRange, fieldName) - AssertValuesEqual(t, inter, orgFieldValue, fieldValue) + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) } - owner = composite.GetOwner() - assert.Equal(t, orgOwner, owner) }) -} -func (r randomValueGenerator) randomCompositeValue( - orgOwner common.Address, - fieldsCount int, - inter *interpreter.Interpreter, - currentDepth int, -) (*interpreter.CompositeValue, map[string]interpreter.Value) { + t.Run("move (transfer and deep remove)", func(t *testing.T) { - orgFields := make(map[string]interpreter.Value, fieldsCount) + t.Parallel() - identifier := r.randomUTF8String() + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) - location := common.AddressLocation{ - Address: orgOwner, - Name: identifier, - } + inter, resetStorage := newRandomValueTestInterpreter(t) - fields := make([]interpreter.CompositeField, fieldsCount) + original, expectedValue := createComposite(t, &r, inter) - fieldNames := make(map[string]any, fieldsCount) + checkComposite( + t, + inter, + original, + expectedValue, + orgOwner, + ) - for i := 0; i < fieldsCount; { - fieldName := r.randomUTF8String() + resetStorage() - // avoid duplicate field names - if _, ok := fieldNames[fieldName]; ok { - continue - } - fieldNames[fieldName] = struct{}{} + original = readComposite( + t, + inter, + orgOwner, + compositeStorageMapKey, + ) - field := interpreter.NewUnmeteredCompositeField( - fieldName, - r.randomStorableValue(inter, currentDepth+1), + checkComposite( + t, + inter, + original, + expectedValue, + orgOwner, ) - fields[i] = field - orgFields[field.Name] = field.Value.Clone(inter) + // Transfer the composite to a new owner - i++ - } + newOwner := common.Address{'B'} - kind := common.CompositeKindStructure + transferred := original.Transfer( + inter, + interpreter.EmptyLocationRange, + atree.Address(newOwner), + false, + nil, + nil, + false, + ).(*interpreter.CompositeValue) - compositeType := &sema.CompositeType{ - Location: location, - Identifier: identifier, - Kind: kind, - } + // Store the transferred composite in a storage map, so that the composite's slab + // is referenced by the root of the storage. - compositeType.Members = &sema.StringMemberOrderedMap{} - for _, field := range fields { - compositeType.Members.Set( - field.Name, - sema.NewUnmeteredPublicConstantFieldMember( - compositeType, - field.Name, - sema.AnyStructType, - "", - ), + const transferredStorageMapKey = interpreter.StringStorageMapKey("transferred") + + writeComposite( + inter, + newOwner, + transferredStorageMapKey, + transferred, ) - } - // Add the type to the elaboration, to short-circuit the type-lookup - inter.Program.Elaboration.SetCompositeType( - compositeType.ID(), - compositeType, - ) + withoutAtreeStorageValidationEnabled(inter, func() struct{} { + removeComposite( + inter, + orgOwner, + compositeStorageMapKey, + ) - testComposite := interpreter.NewCompositeValue( - inter, - interpreter.EmptyLocationRange, - location, - identifier, - kind, - fields, - orgOwner, - ) - return testComposite, orgFields -} + return struct{}{} + }) -func getSlabStorageSize(t *testing.T, storage interpreter.InMemoryStorage) (totalSize int, slabCounts int) { - slabs, err := storage.Encode() - require.NoError(t, err) + checkComposite( + t, + inter, + transferred, + expectedValue, + newOwner, + ) - for id, slab := range slabs { - if id.HasTempAddress() { - continue + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) } - totalSize += len(slab) - slabCounts++ - } - - return -} - -type randomValueGenerator struct { - seed int64 - rand *rand.Rand -} + resetStorage() -func newRandomValueGenerator() randomValueGenerator { - seed := *smokeTestSeed - if seed == -1 { - seed = time.Now().UnixNano() - } + transferred = readComposite( + t, + inter, + newOwner, + transferredStorageMapKey, + ) - return randomValueGenerator{ - seed: seed, - rand: rand.New(rand.NewSource(seed)), - } -} -func (r randomValueGenerator) randomStorableValue(inter *interpreter.Interpreter, currentDepth int) interpreter.Value { - n := 0 - if currentDepth < containerMaxDepth { - n = r.randomInt(randomValueKindComposite) - } else { - n = r.randomInt(randomValueKindCapability) - } + checkComposite( + t, + inter, + transferred, + expectedValue, + newOwner, + ) - switch n { + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } - // Non-hashable - case randomValueKindVoid: - return interpreter.Void - case randomValueKindNil: - return interpreter.Nil - case randomValueKindDictionaryVariant1, - randomValueKindDictionaryVariant2: - return r.randomDictionaryValue(inter, currentDepth) - case randomValueKindArrayVariant1, - randomValueKindArrayVariant2: - return r.randomArrayValue(inter, currentDepth) - case randomValueKindComposite: - fieldsCount := r.randomInt(compositeMaxFields) - v, _ := r.randomCompositeValue(common.ZeroAddress, fieldsCount, inter, currentDepth) - return v - case randomValueKindCapability: - return interpreter.NewUnmeteredCapabilityValue( - interpreter.UInt64Value(r.randomInt(math.MaxInt-1)), - r.randomAddressValue(), - &interpreter.ReferenceStaticType{ - Authorization: interpreter.UnauthorizedAccess, - ReferencedType: interpreter.PrimitiveStaticTypeAnyStruct, - }, - ) - case randomValueKindSome: - return interpreter.NewUnmeteredSomeValueNonCopying( - r.randomStorableValue(inter, currentDepth+1), - ) + // TODO: check deep removal cleaned up everything in original account (storage size, slab count) + }) - // Hashable - default: - return r.generateRandomHashableValue(inter, n) - } -} + t.Run("update", func(t *testing.T) { + t.Parallel() -func (r randomValueGenerator) randomHashableValue(interpreter *interpreter.Interpreter) interpreter.Value { - return r.generateRandomHashableValue(interpreter, r.randomInt(randomValueKindEnum)) -} + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) -func (r randomValueGenerator) generateRandomHashableValue(inter *interpreter.Interpreter, n int) interpreter.Value { - switch n { + inter, resetStorage := newRandomValueTestInterpreter(t) - // Int* - case randomValueKindInt: - return interpreter.NewUnmeteredIntValueFromInt64(int64(r.randomSign()) * r.rand.Int63()) - case randomValueKindInt8: - return interpreter.NewUnmeteredInt8Value(int8(r.randomInt(math.MaxUint8))) - case randomValueKindInt16: - return interpreter.NewUnmeteredInt16Value(int16(r.randomInt(math.MaxUint16))) - case randomValueKindInt32: - return interpreter.NewUnmeteredInt32Value(int32(r.randomSign()) * r.rand.Int31()) - case randomValueKindInt64: - return interpreter.NewUnmeteredInt64Value(int64(r.randomSign()) * r.rand.Int63()) - case randomValueKindInt128: - return interpreter.NewUnmeteredInt128ValueFromInt64(int64(r.randomSign()) * r.rand.Int63()) - case randomValueKindInt256: - return interpreter.NewUnmeteredInt256ValueFromInt64(int64(r.randomSign()) * r.rand.Int63()) + composite, expectedValue := createComposite(t, &r, inter) - // UInt* - case randomValueKindUInt: - return interpreter.NewUnmeteredUIntValueFromUint64(r.rand.Uint64()) - case randomValueKindUInt8: - return interpreter.NewUnmeteredUInt8Value(uint8(r.randomInt(math.MaxUint8))) - case randomValueKindUInt16: - return interpreter.NewUnmeteredUInt16Value(uint16(r.randomInt(math.MaxUint16))) - case randomValueKindUInt32: - return interpreter.NewUnmeteredUInt32Value(r.rand.Uint32()) - case randomValueKindUInt64Variant1, - randomValueKindUInt64Variant2, - randomValueKindUInt64Variant3, - randomValueKindUInt64Variant4: // should be more common - return interpreter.NewUnmeteredUInt64Value(r.rand.Uint64()) - case randomValueKindUInt128: - return interpreter.NewUnmeteredUInt128ValueFromUint64(r.rand.Uint64()) - case randomValueKindUInt256: - return interpreter.NewUnmeteredUInt256ValueFromUint64(r.rand.Uint64()) + checkComposite( + t, + inter, + composite, + expectedValue, + orgOwner, + ) - // Word* - case randomValueKindWord8: - return interpreter.NewUnmeteredWord8Value(uint8(r.randomInt(math.MaxUint8))) - case randomValueKindWord16: - return interpreter.NewUnmeteredWord16Value(uint16(r.randomInt(math.MaxUint16))) - case randomValueKindWord32: - return interpreter.NewUnmeteredWord32Value(r.rand.Uint32()) - case randomValueKindWord64: - return interpreter.NewUnmeteredWord64Value(r.rand.Uint64()) - case randomValueKindWord128: - return interpreter.NewUnmeteredWord128ValueFromUint64(r.rand.Uint64()) - case randomValueKindWord256: - return interpreter.NewUnmeteredWord256ValueFromUint64(r.rand.Uint64()) + resetStorage() - // (U)Fix* - case randomValueKindFix64: - return interpreter.NewUnmeteredFix64ValueWithInteger( - int64(r.randomSign())*r.rand.Int63n(sema.Fix64TypeMaxInt), - interpreter.EmptyLocationRange, + composite = readComposite( + t, + inter, + orgOwner, + compositeStorageMapKey, ) - case randomValueKindUFix64: - return interpreter.NewUnmeteredUFix64ValueWithInteger( - uint64(r.rand.Int63n( - int64(sema.UFix64TypeMaxInt), - )), - interpreter.EmptyLocationRange, + + checkComposite( + t, + inter, + composite, + expectedValue, + orgOwner, ) - // String - case randomValueKindStringVariant1, - randomValueKindStringVariant2, - randomValueKindStringVariant3, - randomValueKindStringVariant4: // small string - should be more common - size := r.randomInt(255) - return interpreter.NewUnmeteredStringValue(r.randomUTF8StringOfSize(size)) - case randomValueKindStringVariant5: // large string - size := r.randomInt(4048) + 255 - return interpreter.NewUnmeteredStringValue(r.randomUTF8StringOfSize(size)) + typeID := expectedValue.StructType.Location. + TypeID(nil, expectedValue.StructType.QualifiedIdentifier) + compositeType := inter.Program.Elaboration.CompositeType(typeID) - case randomValueKindBoolVariantTrue: - return interpreter.TrueValue - case randomValueKindBoolVariantFalse: - return interpreter.FalseValue + typeFieldCount := len(compositeType.Fields) + require.Equal(t, typeFieldCount, len(expectedValue.FieldsMappedByName())) + require.Equal(t, typeFieldCount, composite.FieldCount()) - case randomValueKindAddress: - return r.randomAddressValue() + // Generate new values - case randomValueKindPath: - return r.randomPathValue() + newValues := make([]cadence.Value, typeFieldCount) - case randomValueKindEnum: - // Get a random integer subtype to be used as the raw-type of enum - typ := r.randomInt(randomValueKindWord64) + for i := range compositeType.Fields { + newValues[i] = r.randomStorableValue(inter, 0) + } - rawValue := r.generateRandomHashableValue(inter, typ).(interpreter.NumberValue) + // Update + for i, name := range compositeType.Fields { - identifier := r.randomUTF8String() + newValue := importValue(t, inter, newValues[i]) - address := r.randomAddressValue() + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. - location := common.AddressLocation{ - Address: common.Address(address), - Name: identifier, + existed := withoutAtreeStorageValidationEnabled(inter, func() bool { + return composite.SetMember( + inter, + interpreter.EmptyLocationRange, + name, + newValue, + ) + }) + + require.True(t, existed) } - enumType := &sema.CompositeType{ - Identifier: identifier, - EnumRawType: r.intSubtype(typ), - Kind: common.CompositeKindEnum, - Location: location, + expectedValue = cadence.NewStruct(newValues). + WithType(expectedValue.Type().(*cadence.StructType)) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) } - inter.Program.Elaboration.SetCompositeType( - enumType.ID(), - enumType, + // Composite must have same number of key-value pairs + require.Equal(t, typeFieldCount, composite.FieldCount()) + + checkComposite( + t, + inter, + composite, + expectedValue, + orgOwner, ) - enum := interpreter.NewCompositeValue( + resetStorage() + + composite = readComposite( + t, inter, - interpreter.EmptyLocationRange, - location, - enumType.QualifiedIdentifier(), - enumType.Kind, - []interpreter.CompositeField{ - { - Name: sema.EnumRawValueFieldName, - Value: rawValue, - }, - }, - common.ZeroAddress, + orgOwner, + compositeStorageMapKey, + ) + + checkComposite( + t, + inter, + composite, + expectedValue, + orgOwner, ) - if enum.GetField(inter, interpreter.EmptyLocationRange, sema.EnumRawValueFieldName) == nil { - panic("enum without raw value") + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) } - return enum + // TODO: check storage size, slab count + }) +} - default: - panic(fmt.Sprintf("unsupported: %d", n)) +func TestInterpretSmokeRandomArrayOperations(t *testing.T) { + if !*runSmokeTests { + t.Skip("smoke tests are disabled") } -} -func (r randomValueGenerator) randomSign() int { - if r.randomInt(1) == 1 { - return 1 + t.Parallel() + + orgOwner := common.Address{'A'} + + const arrayStorageMapKey = interpreter.StringStorageMapKey("array") + + writeArray := func( + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + array *interpreter.ArrayValue, + ) { + inter.Storage(). + GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + true, + ). + WriteValue( + inter, + storageMapKey, + array, + ) } - return -1 -} + removeArray := func( + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + ) { + inter.Storage(). + GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + true, + ). + RemoveValue( + inter, + storageMapKey, + ) + } -func (r randomValueGenerator) randomAddressValue() interpreter.AddressValue { - data := make([]byte, 8) - r.rand.Read(data) - return interpreter.NewUnmeteredAddressValueFromBytes(data) -} + readArray := func( + t *testing.T, + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + ) *interpreter.ArrayValue { + storageMap := inter.Storage().GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + false, + ) + require.NotNil(t, storageMap) -func (r randomValueGenerator) randomPathValue() interpreter.PathValue { - randomDomain := r.rand.Intn(len(common.AllPathDomains)) - identifier := r.randomUTF8String() + readValue := storageMap.ReadValue(inter, storageMapKey) + require.NotNil(t, readValue) - return interpreter.PathValue{ - Domain: common.AllPathDomains[randomDomain], - Identifier: identifier, + require.IsType(t, &interpreter.ArrayValue{}, readValue) + return readValue.(*interpreter.ArrayValue) } -} -func (r randomValueGenerator) randomDictionaryValue( - inter *interpreter.Interpreter, - currentDepth int, -) interpreter.Value { + createArray := func( + t *testing.T, + r *randomValueGenerator, + inter *interpreter.Interpreter, + ) ( + *interpreter.ArrayValue, + cadence.Array, + ) { + expectedValue := r.randomArrayValue(inter, 0) + + elements := make([]interpreter.Value, len(expectedValue.Values)) + for i, value := range expectedValue.Values { + elements[i] = importValue(t, inter, value) + } - entryCount := r.randomInt(containerMaxSize) - keyValues := make([]interpreter.Value, entryCount*2) + // Construct an array directly in the owner's account. + // However, the array is not referenced by the root of the storage yet + // (a storage map), so atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. - for i := 0; i < entryCount; i++ { - key := r.randomHashableValue(inter) - value := r.randomStorableValue(inter, currentDepth+1) - keyValues[i*2] = key - keyValues[i*2+1] = value + array := withoutAtreeStorageValidationEnabled( + inter, + func() *interpreter.ArrayValue { + return interpreter.NewArrayValue( + inter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + orgOwner, + elements..., + ) + }, + ) + + // Store the array in a storage map, so that the array's slab + // is referenced by the root of the storage. + + writeArray( + inter, + orgOwner, + arrayStorageMapKey, + array, + ) + + return array, expectedValue } - return interpreter.NewDictionaryValueWithAddress( - inter, - interpreter.EmptyLocationRange, - &interpreter.DictionaryStaticType{ - KeyType: interpreter.PrimitiveStaticTypeAnyStruct, - ValueType: interpreter.PrimitiveStaticTypeAnyStruct, - }, - common.ZeroAddress, - keyValues..., - ) -} + checkArray := func( + t *testing.T, + inter *interpreter.Interpreter, + array *interpreter.ArrayValue, + expectedValue cadence.Array, + expectedOwner common.Address, + ) { + require.Equal(t, len(expectedValue.Values), array.Count()) -func (r randomValueGenerator) randomInt(upperBound int) int { - return r.rand.Intn(upperBound + 1) -} + for i, value := range expectedValue.Values { + value := importValue(t, inter, value) -func (r randomValueGenerator) randomArrayValue(inter *interpreter.Interpreter, currentDepth int) interpreter.Value { - elementsCount := r.randomInt(containerMaxSize) - elements := make([]interpreter.Value, elementsCount) + element := array.Get(inter, interpreter.EmptyLocationRange, i) - for i := 0; i < elementsCount; i++ { - value := r.randomStorableValue(inter, currentDepth+1) - elements[i] = value.Clone(inter) + AssertValuesEqual(t, inter, value, element) + } + + owner := array.GetOwner() + assert.Equal(t, expectedOwner, owner) } - return interpreter.NewArrayValue( - inter, - interpreter.EmptyLocationRange, - &interpreter.VariableSizedStaticType{ - Type: interpreter.PrimitiveStaticTypeAnyStruct, - }, - common.ZeroAddress, - elements..., - ) -} + checkIteration := func( + t *testing.T, + inter *interpreter.Interpreter, + array *interpreter.ArrayValue, + expectedValue cadence.Array, + ) { + require.Equal(t, len(expectedValue.Values), array.Count()) -func (r randomValueGenerator) intSubtype(n int) sema.Type { - switch n { - // Int - case randomValueKindInt: - return sema.IntType - case randomValueKindInt8: - return sema.Int8Type - case randomValueKindInt16: - return sema.Int16Type - case randomValueKindInt32: - return sema.Int32Type - case randomValueKindInt64: - return sema.Int64Type - case randomValueKindInt128: - return sema.Int128Type - case randomValueKindInt256: - return sema.Int256Type + var iterations int - // UInt - case randomValueKindUInt: - return sema.UIntType - case randomValueKindUInt8: - return sema.UInt8Type - case randomValueKindUInt16: - return sema.UInt16Type - case randomValueKindUInt32: - return sema.UInt32Type - case randomValueKindUInt64Variant1, - randomValueKindUInt64Variant2, - randomValueKindUInt64Variant3, - randomValueKindUInt64Variant4: - return sema.UInt64Type - case randomValueKindUInt128: - return sema.UInt128Type - case randomValueKindUInt256: - return sema.UInt256Type + array.Iterate( + inter, + func(element interpreter.Value) (resume bool) { + value := importValue(t, inter, expectedValue.Values[iterations]) - // Word - case randomValueKindWord8: - return sema.Word8Type - case randomValueKindWord16: - return sema.Word16Type - case randomValueKindWord32: - return sema.Word32Type - case randomValueKindWord64: - return sema.Word64Type - case randomValueKindWord128: - return sema.Word128Type - case randomValueKindWord256: - return sema.Word256Type + AssertValuesEqual(t, inter, value, element) - default: - panic(fmt.Sprintf("unsupported: %d", n)) + iterations += 1 + + return true + }, + false, + interpreter.EmptyLocationRange, + ) + + assert.Equal(t, len(expectedValue.Values), iterations) } -} -const ( - // Hashable values - // Int* - randomValueKindInt = iota - randomValueKindInt8 - randomValueKindInt16 - randomValueKindInt32 - randomValueKindInt64 - randomValueKindInt128 - randomValueKindInt256 + t.Run("construction", func(t *testing.T) { - // UInt* - randomValueKindUInt - randomValueKindUInt8 - randomValueKindUInt16 - randomValueKindUInt32 - randomValueKindUInt64Variant1 - randomValueKindUInt64Variant2 - randomValueKindUInt64Variant3 - randomValueKindUInt64Variant4 - randomValueKindUInt128 - randomValueKindUInt256 + t.Parallel() - // Word* - randomValueKindWord8 - randomValueKindWord16 - randomValueKindWord32 - randomValueKindWord64 - randomValueKindWord128 - randomValueKindWord256 + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) - // (U)Fix* - randomValueKindFix64 - randomValueKindUFix64 + inter, resetStorage := newRandomValueTestInterpreter(t) - // String - randomValueKindStringVariant1 - randomValueKindStringVariant2 - randomValueKindStringVariant3 - randomValueKindStringVariant4 - randomValueKindStringVariant5 + array, expectedValue := createArray(t, &r, inter) - randomValueKindBoolVariantTrue - randomValueKindBoolVariantFalse - randomValueKindPath - randomValueKindAddress - randomValueKindEnum + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) - // Non-hashable values - randomValueKindVoid - randomValueKindNil // `Never?` - randomValueKindCapability + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } - // Containers - randomValueKindSome - randomValueKindArrayVariant1 - randomValueKindArrayVariant2 - randomValueKindDictionaryVariant1 - randomValueKindDictionaryVariant2 - randomValueKindComposite -) + resetStorage() -func (r randomValueGenerator) randomUTF8String() string { - return r.randomUTF8StringOfSize(8) -} + array = readArray( + t, + inter, + orgOwner, + arrayStorageMapKey, + ) -func (r randomValueGenerator) randomUTF8StringOfSize(size int) string { - identifier := make([]byte, size) - r.rand.Read(identifier) - return strings.ToValidUTF8(string(identifier), "$") -} + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) -type valueMap struct { - values map[any]interpreter.Value - keys map[any]interpreter.Value -} + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + }) -func newValueMap(size int) *valueMap { - return &valueMap{ - values: make(map[any]interpreter.Value, size), - keys: make(map[any]interpreter.Value, size), - } -} + t.Run("iterate", func(t *testing.T) { -type enumKey struct { - location common.Location - qualifiedIdentifier string - kind common.CompositeKind - rawValue interpreter.Value -} + t.Parallel() -func (m *valueMap) put(inter *interpreter.Interpreter, key, value interpreter.Value) { - internalKey := m.internalKey(inter, key) + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) - // Deep copy enum keys. This should be fine since we use an internal key for enums. - // Deep copying other values would mess key-lookup. - if _, ok := key.(*interpreter.CompositeValue); ok { - key = key.Clone(inter) - } + inter, resetStorage := newRandomValueTestInterpreter(t) - m.keys[internalKey] = key - m.values[internalKey] = value.Clone(inter) -} + array, expectedValue := createArray(t, &r, inter) -func (m *valueMap) get(inter *interpreter.Interpreter, key interpreter.Value) (interpreter.Value, bool) { - internalKey := m.internalKey(inter, key) - value, ok := m.values[internalKey] - return value, ok -} + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) -func (m *valueMap) foreach(apply func(key, value interpreter.Value) (exit bool)) { - for internalKey, key := range m.keys { - value := m.values[internalKey] - exit := apply(key, value) + checkIteration( + t, + inter, + array, + expectedValue, + ) - if exit { - return + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) } - } -} -func (m *valueMap) internalKey(inter *interpreter.Interpreter, key interpreter.Value) any { - switch key := key.(type) { - case *interpreter.StringValue: - return *key - case *interpreter.CompositeValue: - return enumKey{ - location: key.Location, - qualifiedIdentifier: key.QualifiedIdentifier, - kind: key.Kind, - rawValue: key.GetField(inter, interpreter.EmptyLocationRange, sema.EnumRawValueFieldName), + resetStorage() + + array = readArray( + t, + inter, + orgOwner, + arrayStorageMapKey, + ) + + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) + + checkIteration( + t, + inter, + array, + expectedValue, + ) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) } - case interpreter.Value: - return key - default: - panic("unreachable") - } -} -func (m *valueMap) size() int { - return len(m.keys) -} + }) -// This test is a reproducer for "slab was not reachable from leaves" false alarm. -// https://github.com/onflow/cadence/pull/2882#issuecomment-1781298107 -// In this test, storage.CheckHealth() should be called after array.DeepRemove(), -// not in the middle of array.DeepRemove(). -// CheckHealth() is called in the middle of array.DeepRemove() when: -// - array.DeepRemove() calls childArray1 and childArray2 DeepRemove() -// - DeepRemove() calls maybeValidateAtreeValue() -// - maybeValidateAtreeValue() calls CheckHealth() -func TestCheckStorageHealthInMiddleOfDeepRemove(t *testing.T) { + t.Run("move (transfer and deep remove)", func(t *testing.T) { - storage := newUnmeteredInMemoryStorage() - inter, err := interpreter.NewInterpreter( + t.Parallel() + + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) + + inter, resetStorage := newRandomValueTestInterpreter(t) + + original, expectedValue := createArray(t, &r, inter) + + checkArray( + t, + inter, + original, + expectedValue, + orgOwner, + ) + + resetStorage() + + original = readArray( + t, + inter, + orgOwner, + arrayStorageMapKey, + ) + + checkArray( + t, + inter, + original, + expectedValue, + orgOwner, + ) + + // Transfer the array to a new owner + + newOwner := common.Address{'B'} + + transferred := original.Transfer( + inter, + interpreter.EmptyLocationRange, + atree.Address(newOwner), + false, + nil, + nil, + false, + ).(*interpreter.ArrayValue) + + // Store the transferred array in a storage map, so that the array's slab + // is referenced by the root of the storage. + + const transferredStorageMapKey = interpreter.StringStorageMapKey("transferred") + + writeArray( + inter, + newOwner, + transferredStorageMapKey, + transferred, + ) + + withoutAtreeStorageValidationEnabled(inter, func() struct{} { + + removeArray( + inter, + orgOwner, + arrayStorageMapKey, + ) + + return struct{}{} + }) + + checkArray( + t, + inter, + transferred, + expectedValue, + newOwner, + ) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + resetStorage() + + transferred = readArray( + t, + inter, + newOwner, + transferredStorageMapKey, + ) + + checkArray( + t, + inter, + transferred, + expectedValue, + newOwner, + ) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + // TODO: check deep removal cleaned up everything in original account (storage size, slab count) + }) + + t.Run("insert", func(t *testing.T) { + t.Parallel() + + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) + + inter, resetStorage := newRandomValueTestInterpreter(t) + + array, expectedValue := createArray(t, &r, inter) + + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) + + resetStorage() + + array = readArray( + t, + inter, + orgOwner, + arrayStorageMapKey, + ) + + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) + + existingValueCount := len(expectedValue.Values) + + // Insert new values into the array. + + newValueCount := r.randomInt(r.containerMaxSize) + + for i := 0; i < newValueCount; i++ { + + value := r.randomStorableValue(inter, 0) + importedValue := importValue(t, inter, value) + + // Generate a random index + index := 0 + if existingValueCount > 0 { + index = r.rand.Intn(existingValueCount) + } + + expectedValue.Values = append(expectedValue.Values, nil) + copy(expectedValue.Values[index+1:], expectedValue.Values[index:]) + expectedValue.Values[index] = value + + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. + + _ = withoutAtreeStorageValidationEnabled(inter, func() struct{} { + + array.Insert( + inter, + interpreter.EmptyLocationRange, + index, + importedValue, + ) + + return struct{}{} + }) + } + + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + resetStorage() + + array = readArray( + t, + inter, + orgOwner, + arrayStorageMapKey, + ) + + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + }) + + t.Run("remove", func(t *testing.T) { + t.Parallel() + + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) + + inter, resetStorage := newRandomValueTestInterpreter(t) + + array, expectedValue := createArray(t, &r, inter) + + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) + + resetStorage() + + array = readArray( + t, + inter, + orgOwner, + arrayStorageMapKey, + ) + + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) + + // Random remove + numberOfValues := len(expectedValue.Values) + for i := 0; i < numberOfValues; i++ { + + index := r.rand.Intn(len(expectedValue.Values)) + + value := importValue(t, inter, expectedValue.Values[index]) + + expectedValue.Values = append( + expectedValue.Values[:index], + expectedValue.Values[index+1:]..., + ) + + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. + + removedValue := withoutAtreeStorageValidationEnabled(inter, func() interpreter.Value { + return array.Remove(inter, interpreter.EmptyLocationRange, index) + }) + + // Removed value must be same as the original value + AssertValuesEqual(t, inter, value, removedValue) + } + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + // Array must be empty + require.Equal(t, 0, array.Count()) + + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) + + resetStorage() + + array = readArray( + t, + inter, + orgOwner, + arrayStorageMapKey, + ) + + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + // TODO: check storage size, slab count + }) + + t.Run("update", func(t *testing.T) { + t.Parallel() + + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) + + inter, resetStorage := newRandomValueTestInterpreter(t) + + array, expectedValue := createArray(t, &r, inter) + + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) + + resetStorage() + + array = readArray( + t, + inter, + orgOwner, + arrayStorageMapKey, + ) + + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) + + elementCount := array.Count() + + // Random update + for i := 0; i < len(expectedValue.Values); i++ { + + index := r.rand.Intn(len(expectedValue.Values)) + + expectedValue.Values[index] = r.randomStorableValue(inter, 0) + newValue := importValue(t, inter, expectedValue.Values[index]) + + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. + + withoutAtreeStorageValidationEnabled(inter, func() struct{} { + array.Set( + inter, + interpreter.EmptyLocationRange, + index, + newValue, + ) + return struct{}{} + }) + + } + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + // Array must have same number of elements + require.Equal(t, elementCount, array.Count()) + + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) + + resetStorage() + + array = readArray( + t, + inter, + orgOwner, + arrayStorageMapKey, + ) + + checkArray( + t, + inter, + array, + expectedValue, + orgOwner, + ) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + // TODO: check storage size, slab count + }) +} + +func TestInterpretSmokeRandomNestedArrayOperations(t *testing.T) { + if !*runSmokeTests { + t.Skip("smoke tests are disabled") + } + + owner := common.Address{'A'} + + limits := randomValueLimits{ + containerMaxDepth: 6, + containerMaxSize: 20, + compositeMaxFields: 10, + } + + const opCount = 5 + + const arrayStorageMapKey = interpreter.StringStorageMapKey("array") + + writeArray := func( + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + array *interpreter.ArrayValue, + ) { + inter.Storage(). + GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + true, + ). + WriteValue( + inter, + storageMapKey, + array, + ) + } + + readArray := func( + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + ) *interpreter.ArrayValue { + storageMap := inter.Storage().GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + false, + ) + require.NotNil(t, storageMap) + + readValue := storageMap.ReadValue(inter, storageMapKey) + require.NotNil(t, readValue) + + require.IsType(t, &interpreter.ArrayValue{}, readValue) + return readValue.(*interpreter.ArrayValue) + } + + getNestedArray := func( + inter *interpreter.Interpreter, + rootValue interpreter.Value, + owner common.Address, + path []pathElement, + ) *interpreter.ArrayValue { + nestedValue := getNestedValue(t, inter, rootValue, path) + require.IsType(t, &interpreter.ArrayValue{}, nestedValue) + nestedArray := nestedValue.(*interpreter.ArrayValue) + require.Equal(t, owner, nestedArray.GetOwner()) + return nestedArray + } + + createValue := func( + t *testing.T, + r *randomValueGenerator, + inter *interpreter.Interpreter, + predicate func(cadence.Array) bool, + ) ( + actualRootValue interpreter.Value, + generatedValue cadence.Value, + path []pathElement, + ) { + + // It does not matter what the root value is, + // as long as it contains a nested array, + // which it is nested inside an optional, + // and it satisfies the given predicate. + + for { + generatedValue = r.randomArrayValue(inter, 0) + + path = findNestedCadenceValue( + generatedValue, + func(value cadence.Value, path []pathElement) bool { + array, ok := value.(cadence.Array) + if !ok { + return false + } + + if !predicate(array) { + return false + } + + var foundSome bool + for _, element := range path { + if _, ok := element.(somePathElement); ok { + foundSome = true + break + } + } + return foundSome + }, + ) + if path != nil { + break + } + } + + actualRootValue = importValue(t, inter, generatedValue).Transfer( + inter, + interpreter.EmptyLocationRange, + atree.Address(owner), + false, + nil, + nil, + // TODO: is has no parent container = true correct? + true, + ) + + // Store the array in a storage map, so that the array's slab + // is referenced by the root of the storage. + + writeArray( + inter, + owner, + arrayStorageMapKey, + actualRootValue.(*interpreter.ArrayValue), + ) + + return + } + + checkIteration := func( + t *testing.T, + inter *interpreter.Interpreter, + actualArray *interpreter.ArrayValue, + expectedArray *interpreter.ArrayValue, + ) { + expectedCount := expectedArray.Count() + require.Equal(t, expectedCount, actualArray.Count()) + + var iterations int + + actualArray.Iterate( + inter, + func(element interpreter.Value) (resume bool) { + + expectedElement := expectedArray.Get( + inter, + interpreter.EmptyLocationRange, + iterations, + ) + AssertValuesEqual(t, inter, expectedElement, element) + + iterations += 1 + + return true + }, + false, + interpreter.EmptyLocationRange, + ) + + assert.Equal(t, expectedCount, iterations) + } + + t.Run("insert", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + r := newRandomValueGenerator( + *smokeTestSeed, + limits, + ) + t.Logf("seed: %d", r.seed) + + actualRootValue, generatedValue, path := + createValue( + t, + &r, + inter, + // Accept any array, even empty ones, + // given we're only inserting + func(array cadence.Array) bool { + return true + }, + ) + + actualNestedArray := getNestedArray( + inter, + actualRootValue, + owner, + path, + ) + + type insert struct { + index int + value cadence.Value + } + + performInsert := func(array *interpreter.ArrayValue, insert insert) { + + newValue := importValue(t, inter, insert.value) + + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. + + withoutAtreeStorageValidationEnabled(inter, func() struct{} { + array.Insert( + inter, + interpreter.EmptyLocationRange, + insert.index, + newValue, + ) + return struct{}{} + }) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + } + + // We use the generated value twice: once as the expected value, and once as the actual value. + // We first perform mutations on the actual value, and then compare it to the expected value. + // The actual value is stored in an account and reloaded. + // The expected value is temporary (zero address), and is not stored in storage. + // Given that the storage reset destroys the data for the expected value because it is temporary, + // we re-import it each time and perform all operations on it from scratch. + + var inserts []insert + + elementCount := actualNestedArray.Count() + + for i := 0; i < opCount; i++ { + var index int + elementCountAfterInserts := elementCount + i + if elementCountAfterInserts > 0 { + index = r.rand.Intn(elementCountAfterInserts) + } + + inserts = append( + inserts, + insert{ + index: index, + value: r.randomStorableValue(inter, 0), + }, + ) + } + + for i, insert := range inserts { + + resetStorage() + + actualRootValue = readArray(inter, owner, arrayStorageMapKey) + actualNestedArray = getNestedArray( + inter, + actualRootValue, + owner, + path, + ) + + performInsert( + actualNestedArray, + insert, + ) + + // Re-create the expected value from scratch, + // by importing the generated value, and performing all updates on it + // that have been performed on the actual value so far. + + expectedRootValue := importValue(t, inter, generatedValue) + expectedNestedArray := getNestedArray( + inter, + expectedRootValue, + common.ZeroAddress, + path, + ) + + for _, insert := range inserts[:i+1] { + + performInsert( + expectedNestedArray, + insert, + ) + } + AssertValuesEqual(t, inter, expectedRootValue, actualRootValue) + + checkIteration( + t, + inter, + actualNestedArray, + expectedNestedArray, + ) + } + }) + + t.Run("update", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + r := newRandomValueGenerator( + *smokeTestSeed, + limits, + ) + t.Logf("seed: %d", r.seed) + + actualRootValue, generatedValue, path := + createValue( + t, + &r, + inter, + // Generate a non-empty array, + // so we have at least one element to update + func(array cadence.Array) bool { + return len(array.Values) > 0 + }, + ) + + actualNestedArray := getNestedArray( + inter, + actualRootValue, + owner, + path, + ) + + elementCount := actualNestedArray.Count() + require.Greater(t, elementCount, 0) + + type update struct { + index int + value cadence.Value + } + + performUpdate := func(array *interpreter.ArrayValue, update update) { + + newValue := importValue(t, inter, update.value) + + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. + + withoutAtreeStorageValidationEnabled(inter, func() struct{} { + array.Set( + inter, + interpreter.EmptyLocationRange, + update.index, + newValue, + ) + return struct{}{} + }) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + // Array must have same number of elements + require.Equal(t, elementCount, array.Count()) + } + + // We use the generated value twice: once as the expected value, and once as the actual value. + // We first perform mutations on the actual value, and then compare it to the expected value. + // The actual value is stored in an account and reloaded. + // The expected value is temporary (zero address), and is not stored in storage. + // Given that the storage reset destroys the data for the expected value because it is temporary, + // we re-import it each time and perform all operations on it from scratch. + + var updates []update + + for i := 0; i < opCount; i++ { + updates = append( + updates, + update{ + index: r.rand.Intn(elementCount), + value: r.randomStorableValue(inter, 0), + }, + ) + } + + for i, update := range updates { + + resetStorage() + + actualRootValue = readArray(inter, owner, arrayStorageMapKey) + actualNestedArray = getNestedArray( + inter, + actualRootValue, + owner, + path, + ) + + performUpdate( + actualNestedArray, + update, + ) + + // Re-create the expected value from scratch, + // by importing the generated value, and performing all updates on it + // that have been performed on the actual value so far. + + expectedRootValue := importValue(t, inter, generatedValue) + expectedNestedArray := getNestedArray( + inter, + expectedRootValue, + common.ZeroAddress, + path, + ) + + for _, update := range updates[:i+1] { + + performUpdate( + expectedNestedArray, + update, + ) + } + AssertValuesEqual(t, inter, expectedRootValue, actualRootValue) + + checkIteration( + t, + inter, + actualNestedArray, + expectedNestedArray, + ) + } + }) + + t.Run("remove", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + r := newRandomValueGenerator( + *smokeTestSeed, + limits, + ) + t.Logf("seed: %d", r.seed) + + actualRootValue, generatedValue, path := + createValue( + t, + &r, + inter, + func(array cadence.Array) bool { + return len(array.Values) >= opCount + }, + ) + + actualNestedArray := getNestedArray( + inter, + actualRootValue, + owner, + path, + ) + + elementCount := actualNestedArray.Count() + require.GreaterOrEqual(t, elementCount, opCount) + + performRemove := func(array *interpreter.ArrayValue, index int) { + + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. + + withoutAtreeStorageValidationEnabled(inter, func() struct{} { + array.Remove( + inter, + interpreter.EmptyLocationRange, + index, + ) + return struct{}{} + }) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + } + + // We use the generated value twice: once as the expected value, and once as the actual value. + // We first perform mutations on the actual value, and then compare it to the expected value. + // The actual value is stored in an account and reloaded. + // The expected value is temporary (zero address), and is not stored in storage. + // Given that the storage reset destroys the data for the expected value because it is temporary, + // we re-import it each time and perform all operations on it from scratch. + + var removes []int + + for i := 0; i < opCount; i++ { + index := r.rand.Intn(elementCount - i) + removes = append(removes, index) + } + + for i, index := range removes { + + resetStorage() + + actualRootValue = readArray(inter, owner, arrayStorageMapKey) + actualNestedArray = getNestedArray( + inter, + actualRootValue, + owner, + path, + ) + + performRemove( + actualNestedArray, + index, + ) + + // Re-create the expected value from scratch, + // by importing the generated value, and performing all updates on it + // that have been performed on the actual value so far. + + expectedRootValue := importValue(t, inter, generatedValue) + expectedNestedArray := getNestedArray( + inter, + expectedRootValue, + common.ZeroAddress, + path, + ) + + for _, index := range removes[:i+1] { + + performRemove( + expectedNestedArray, + index, + ) + } + AssertValuesEqual(t, inter, expectedRootValue, actualRootValue) + + checkIteration( + t, + inter, + actualNestedArray, + expectedNestedArray, + ) + } + }) +} + +func TestInterpretSmokeRandomNestedDictionaryOperations(t *testing.T) { + if !*runSmokeTests { + t.Skip("smoke tests are disabled") + } + + owner := common.Address{'A'} + + limits := randomValueLimits{ + containerMaxDepth: 6, + containerMaxSize: 20, + compositeMaxFields: 10, + } + + const opCount = 5 + + const dictionaryStorageMapKey = interpreter.StringStorageMapKey("dictionary") + + writeDictionary := func( + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + dictionary *interpreter.DictionaryValue, + ) { + inter.Storage(). + GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + true, + ). + WriteValue( + inter, + storageMapKey, + dictionary, + ) + } + + readDictionary := func( + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + ) *interpreter.DictionaryValue { + storageMap := inter.Storage().GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + false, + ) + require.NotNil(t, storageMap) + + readValue := storageMap.ReadValue(inter, storageMapKey) + require.NotNil(t, readValue) + + require.IsType(t, &interpreter.DictionaryValue{}, readValue) + return readValue.(*interpreter.DictionaryValue) + } + + getNestedDictionary := func( + inter *interpreter.Interpreter, + rootValue interpreter.Value, + owner common.Address, + path []pathElement, + ) *interpreter.DictionaryValue { + nestedValue := getNestedValue(t, inter, rootValue, path) + require.IsType(t, &interpreter.DictionaryValue{}, nestedValue) + nestedDictionary := nestedValue.(*interpreter.DictionaryValue) + require.Equal(t, owner, nestedDictionary.GetOwner()) + return nestedDictionary + } + + createValue := func( + t *testing.T, + r *randomValueGenerator, + inter *interpreter.Interpreter, + predicate func(cadence.Dictionary) bool, + ) ( + actualRootValue interpreter.Value, + generatedValue cadence.Value, + path []pathElement, + ) { + + // It does not matter what the root value is, + // as long as it contains a nested dictionary, + // which it is nested inside an optional, + // and it satisfies the given predicate. + + for { + generatedValue = r.randomDictionaryValue(inter, 0) + + path = findNestedCadenceValue( + generatedValue, + func(value cadence.Value, path []pathElement) bool { + dictionary, ok := value.(cadence.Dictionary) + if !ok { + return false + } + + if !predicate(dictionary) { + return false + } + + var foundSome bool + for _, element := range path { + if _, ok := element.(somePathElement); ok { + foundSome = true + break + } + } + return foundSome + }, + ) + if path != nil { + break + } + } + + actualRootValue = importValue(t, inter, generatedValue).Transfer( + inter, + interpreter.EmptyLocationRange, + atree.Address(owner), + false, + nil, + nil, + // TODO: is has no parent container = true correct? + true, + ) + + // Store the dictionary in a storage map, so that the dictionary's slab + // is referenced by the root of the storage. + + writeDictionary( + inter, + owner, + dictionaryStorageMapKey, + actualRootValue.(*interpreter.DictionaryValue), + ) + + return + } + + checkIteration := func( + t *testing.T, + inter *interpreter.Interpreter, + actualDictionary *interpreter.DictionaryValue, + expectedDictionary *interpreter.DictionaryValue, + ) { + expectedCount := expectedDictionary.Count() + require.Equal(t, expectedCount, actualDictionary.Count()) + + var iterations int + + actualDictionary.Iterate( + inter, + interpreter.EmptyLocationRange, + func(key, element interpreter.Value) (resume bool) { + + expectedElement, exists := expectedDictionary.Get( + inter, + interpreter.EmptyLocationRange, + key, + ) + require.True(t, exists) + AssertValuesEqual(t, inter, expectedElement, element) + + iterations += 1 + + return true + }, + ) + + assert.Equal(t, expectedCount, iterations) + } + + t.Run("insert", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + r := newRandomValueGenerator( + *smokeTestSeed, + limits, + ) + t.Logf("seed: %d", r.seed) + + actualRootValue, generatedValue, path := + createValue( + t, + &r, + inter, + // Accept any dictionary, even empty ones, + // given we're only inserting + func(dictionary cadence.Dictionary) bool { + return true + }, + ) + + actualNestedDictionary := getNestedDictionary( + inter, + actualRootValue, + owner, + path, + ) + + type insert struct { + key cadence.Value + value cadence.Value + } + + performInsert := func(dictionary *interpreter.DictionaryValue, insert insert) { + + newKey := importValue(t, inter, insert.key) + newValue := importValue(t, inter, insert.value) + + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. + + withoutAtreeStorageValidationEnabled(inter, func() struct{} { + dictionary.Insert( + inter, + interpreter.EmptyLocationRange, + newKey, + newValue, + ) + return struct{}{} + }) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + } + + // We use the generated value twice: once as the expected value, and once as the actual value. + // We first perform mutations on the actual value, and then compare it to the expected value. + // The actual value is stored in an account and reloaded. + // The expected value is temporary (zero address), and is not stored in storage. + // Given that the storage reset destroys the data for the expected value because it is temporary, + // we re-import it each time and perform all operations on it from scratch. + + var inserts []insert + insertSet := map[any]struct{}{} + + for i := 0; i < opCount; i++ { + // Generate a unique key + var key cadence.Value + for { + key = r.randomHashableValue(inter) + + importedKey := importValue(t, inter, key) + if actualNestedDictionary.ContainsKey( + inter, + interpreter.EmptyLocationRange, + importedKey, + ) { + continue + } + + mapKey := mapKey(inter, importedKey) + if _, ok := insertSet[mapKey]; ok { + continue + } + insertSet[mapKey] = struct{}{} + + break + } + + inserts = append( + inserts, + insert{ + key: key, + value: r.randomStorableValue(inter, 0), + }, + ) + } + + for i, insert := range inserts { + + resetStorage() + + actualRootValue = readDictionary(inter, owner, dictionaryStorageMapKey) + actualNestedDictionary = getNestedDictionary( + inter, + actualRootValue, + owner, + path, + ) + + performInsert( + actualNestedDictionary, + insert, + ) + + // Re-create the expected value from scratch, + // by importing the generated value, and performing all updates on it + // that have been performed on the actual value so far. + + expectedRootValue := importValue(t, inter, generatedValue) + expectedNestedDictionary := getNestedDictionary( + inter, + expectedRootValue, + common.ZeroAddress, + path, + ) + + for _, insert := range inserts[:i+1] { + + performInsert( + expectedNestedDictionary, + insert, + ) + } + AssertValuesEqual(t, inter, expectedRootValue, actualRootValue) + + checkIteration( + t, + inter, + actualNestedDictionary, + expectedNestedDictionary, + ) + } + }) + + t.Run("update", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + r := newRandomValueGenerator( + *smokeTestSeed, + limits, + ) + t.Logf("seed: %d", r.seed) + + actualRootValue, generatedValue, path := + createValue( + t, + &r, + inter, + // Generate a non-empty dictionary, + // so we have at least one element to update + func(dictionary cadence.Dictionary) bool { + return len(dictionary.Pairs) > 0 + }, + ) + + actualNestedDictionary := getNestedDictionary( + inter, + actualRootValue, + owner, + path, + ) + + elementCount := actualNestedDictionary.Count() + require.Greater(t, elementCount, 0) + + type update struct { + key cadence.Value + value cadence.Value + } + + performUpdate := func(dictionary *interpreter.DictionaryValue, update update) { + + key := importValue(t, inter, update.key) + newValue := importValue(t, inter, update.value) + + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. + + withoutAtreeStorageValidationEnabled(inter, func() struct{} { + dictionary.SetKey( + inter, + interpreter.EmptyLocationRange, + key, + interpreter.NewUnmeteredSomeValueNonCopying(newValue), + ) + return struct{}{} + }) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + // Dictionary must have same number of elements + require.Equal(t, elementCount, dictionary.Count()) + } + + // We use the generated value twice: once as the expected value, and once as the actual value. + // We first perform mutations on the actual value, and then compare it to the expected value. + // The actual value is stored in an account and reloaded. + // The expected value is temporary (zero address), and is not stored in storage. + // Given that the storage reset destroys the data for the expected value because it is temporary, + // we re-import it each time and perform all operations on it from scratch. + + keys := make([]cadence.Value, 0, elementCount) + + actualNestedDictionary.IterateKeys( + inter, + interpreter.EmptyLocationRange, + func(key interpreter.Value) (resume bool) { + cadenceKey, err := runtime.ExportValue( + key, + inter, + interpreter.EmptyLocationRange, + ) + require.NoError(t, err) + + keys = append(keys, cadenceKey) + + return true + }, + ) + + var updates []update + + for i := 0; i < opCount; i++ { + index := r.rand.Intn(elementCount) + + updates = append( + updates, + update{ + key: keys[index], + value: r.randomStorableValue(inter, 0), + }, + ) + } + + for i, update := range updates { + + resetStorage() + + actualRootValue = readDictionary(inter, owner, dictionaryStorageMapKey) + actualNestedDictionary = getNestedDictionary( + inter, + actualRootValue, + owner, + path, + ) + + performUpdate( + actualNestedDictionary, + update, + ) + + // Re-create the expected value from scratch, + // by importing the generated value, and performing all updates on it + // that have been performed on the actual value so far. + + expectedRootValue := importValue(t, inter, generatedValue) + expectedNestedDictionary := getNestedDictionary( + inter, + expectedRootValue, + common.ZeroAddress, + path, + ) + + for _, update := range updates[:i+1] { + + performUpdate( + expectedNestedDictionary, + update, + ) + } + AssertValuesEqual(t, inter, expectedRootValue, actualRootValue) + + checkIteration( + t, + inter, + actualNestedDictionary, + expectedNestedDictionary, + ) + } + }) + + t.Run("remove", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + r := newRandomValueGenerator( + *smokeTestSeed, + limits, + ) + t.Logf("seed: %d", r.seed) + + actualRootValue, generatedValue, path := + createValue( + t, + &r, + inter, + func(dictionary cadence.Dictionary) bool { + return len(dictionary.Pairs) >= opCount + }, + ) + + actualNestedDictionary := getNestedDictionary( + inter, + actualRootValue, + owner, + path, + ) + + elementCount := actualNestedDictionary.Count() + require.GreaterOrEqual(t, elementCount, opCount) + + performRemove := func(dictionary *interpreter.DictionaryValue, key cadence.Value) { + + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. + + withoutAtreeStorageValidationEnabled(inter, func() struct{} { + dictionary.Remove( + inter, + interpreter.EmptyLocationRange, + importValue(t, inter, key), + ) + return struct{}{} + }) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + } + + // We use the generated value twice: once as the expected value, and once as the actual value. + // We first perform mutations on the actual value, and then compare it to the expected value. + // The actual value is stored in an account and reloaded. + // The expected value is temporary (zero address), and is not stored in storage. + // Given that the storage reset destroys the data for the expected value because it is temporary, + // we re-import it each time and perform all operations on it from scratch. + + keys := make([]interpreter.Value, 0, elementCount) + + actualNestedDictionary.IterateKeys( + inter, + interpreter.EmptyLocationRange, + func(key interpreter.Value) (resume bool) { + + keys = append(keys, key) + + return true + }, + ) + + var removes []cadence.Value + removeSet := map[any]struct{}{} + + for i := 0; i < opCount; i++ { + // Find a unique key + var key interpreter.Value + for { + key = keys[r.rand.Intn(elementCount)] + + mapKey := mapKey(inter, key) + if _, ok := removeSet[mapKey]; ok { + continue + } + removeSet[mapKey] = struct{}{} + + break + } + + cadenceKey, err := runtime.ExportValue( + key, + inter, + interpreter.EmptyLocationRange, + ) + require.NoError(t, err) + + removes = append(removes, cadenceKey) + } + + for i, index := range removes { + + resetStorage() + + actualRootValue = readDictionary(inter, owner, dictionaryStorageMapKey) + actualNestedDictionary = getNestedDictionary( + inter, + actualRootValue, + owner, + path, + ) + + performRemove( + actualNestedDictionary, + index, + ) + + // Re-create the expected value from scratch, + // by importing the generated value, and performing all updates on it + // that have been performed on the actual value so far. + + expectedRootValue := importValue(t, inter, generatedValue) + expectedNestedDictionary := getNestedDictionary( + inter, + expectedRootValue, + common.ZeroAddress, + path, + ) + + for _, index := range removes[:i+1] { + + performRemove( + expectedNestedDictionary, + index, + ) + } + AssertValuesEqual(t, inter, expectedRootValue, actualRootValue) + + checkIteration( + t, + inter, + actualNestedDictionary, + expectedNestedDictionary, + ) + } + }) +} + +func TestInterpretSmokeRandomNestedCompositeOperations(t *testing.T) { + if !*runSmokeTests { + t.Skip("smoke tests are disabled") + } + + owner := common.Address{'A'} + + limits := randomValueLimits{ + containerMaxDepth: 6, + containerMaxSize: 20, + compositeMaxFields: 10, + } + + const opCount = 5 + + const compositeStorageMapKey = interpreter.StringStorageMapKey("composite") + + writeComposite := func( + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + composite *interpreter.CompositeValue, + ) { + inter.Storage(). + GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + true, + ). + WriteValue( + inter, + storageMapKey, + composite, + ) + } + + readComposite := func( + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + ) *interpreter.CompositeValue { + storageMap := inter.Storage().GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + false, + ) + require.NotNil(t, storageMap) + + readValue := storageMap.ReadValue(inter, storageMapKey) + require.NotNil(t, readValue) + + require.IsType(t, &interpreter.CompositeValue{}, readValue) + return readValue.(*interpreter.CompositeValue) + } + + getNestedComposite := func( + inter *interpreter.Interpreter, + rootValue interpreter.Value, + owner common.Address, + path []pathElement, + ) *interpreter.CompositeValue { + nestedValue := getNestedValue(t, inter, rootValue, path) + require.IsType(t, &interpreter.CompositeValue{}, nestedValue) + nestedComposite := nestedValue.(*interpreter.CompositeValue) + require.Equal(t, owner, nestedComposite.GetOwner()) + return nestedComposite + } + + createValue := func( + t *testing.T, + r *randomValueGenerator, + inter *interpreter.Interpreter, + predicate func(cadence.Composite) bool, + ) ( + actualRootValue interpreter.Value, + generatedValue cadence.Value, + path []pathElement, + ) { + + // It does not matter what the root value is, + // as long as it contains a nested composite, + // which it is nested inside an optional, + // and it satisfies the given predicate. + + for { + generatedValue = r.randomStructValue(inter, 0) + + path = findNestedCadenceValue( + generatedValue, + func(value cadence.Value, path []pathElement) bool { + composite, ok := value.(cadence.Struct) + if !ok { + return false + } + + if !predicate(composite) { + return false + } + + var foundSome bool + for _, element := range path { + if _, ok := element.(somePathElement); ok { + foundSome = true + break + } + } + return foundSome + }, + ) + if path != nil { + break + } + } + + actualRootValue = importValue(t, inter, generatedValue).Transfer( + inter, + interpreter.EmptyLocationRange, + atree.Address(owner), + false, + nil, + nil, + // TODO: is has no parent container = true correct? + true, + ) + + // Store the composite in a storage map, so that the composite's slab + // is referenced by the root of the storage. + + writeComposite( + inter, + owner, + compositeStorageMapKey, + actualRootValue.(*interpreter.CompositeValue), + ) + + return + } + + checkIteration := func( + t *testing.T, + inter *interpreter.Interpreter, + actualComposite *interpreter.CompositeValue, + expectedComposite *interpreter.CompositeValue, + ) { + expectedCount := expectedComposite.FieldCount() + require.Equal(t, expectedCount, actualComposite.FieldCount()) + + var iterations int + + actualComposite.ForEachField( + inter, + func(name string, element interpreter.Value) (resume bool) { + + expectedElement := expectedComposite.GetMember( + inter, + interpreter.EmptyLocationRange, + name, + ) + AssertValuesEqual(t, inter, expectedElement, element) + + iterations += 1 + + return true + }, + interpreter.EmptyLocationRange, + ) + + assert.Equal(t, expectedCount, iterations) + } + + t.Run("insert", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + r := newRandomValueGenerator( + *smokeTestSeed, + limits, + ) + t.Logf("seed: %d", r.seed) + + actualRootValue, generatedValue, path := + createValue( + t, + &r, + inter, + // Accept any composite, even empty ones, + // given we're only inserting + func(composite cadence.Composite) bool { + return true + }, + ) + + actualNestedComposite := getNestedComposite( + inter, + actualRootValue, + owner, + path, + ) + + type insert struct { + name string + value cadence.Value + } + + performInsert := func(composite *interpreter.CompositeValue, insert insert) { + + newValue := importValue(t, inter, insert.value) + + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. + + withoutAtreeStorageValidationEnabled(inter, func() struct{} { + composite.SetMember( + inter, + interpreter.EmptyLocationRange, + insert.name, + newValue, + ) + return struct{}{} + }) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + } + + // We use the generated value twice: once as the expected value, and once as the actual value. + // We first perform mutations on the actual value, and then compare it to the expected value. + // The actual value is stored in an account and reloaded. + // The expected value is temporary (zero address), and is not stored in storage. + // Given that the storage reset destroys the data for the expected value because it is temporary, + // we re-import it each time and perform all operations on it from scratch. + + var inserts []insert + insertSet := map[string]struct{}{} + + for i := 0; i < opCount; i++ { + // Generate a unique name + var name string + for { + name = r.randomUTF8String() + + if actualNestedComposite.GetMember( + inter, + interpreter.EmptyLocationRange, + name, + ) != nil { + continue + } + + if _, ok := insertSet[name]; ok { + continue + } + insertSet[name] = struct{}{} + + break + } + + inserts = append( + inserts, + insert{ + name: name, + value: r.randomStorableValue(inter, 0), + }, + ) + } + + for i, insert := range inserts { + + resetStorage() + + actualRootValue = readComposite(inter, owner, compositeStorageMapKey) + actualNestedComposite = getNestedComposite( + inter, + actualRootValue, + owner, + path, + ) + + performInsert( + actualNestedComposite, + insert, + ) + + // Re-create the expected value from scratch, + // by importing the generated value, and performing all updates on it + // that have been performed on the actual value so far. + + expectedRootValue := importValue(t, inter, generatedValue) + expectedNestedComposite := getNestedComposite( + inter, + expectedRootValue, + common.ZeroAddress, + path, + ) + + for _, insert := range inserts[:i+1] { + + performInsert( + expectedNestedComposite, + insert, + ) + } + AssertValuesEqual(t, inter, expectedRootValue, actualRootValue) + + checkIteration( + t, + inter, + actualNestedComposite, + expectedNestedComposite, + ) + } + }) + + t.Run("update", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + r := newRandomValueGenerator( + *smokeTestSeed, + limits, + ) + t.Logf("seed: %d", r.seed) + + actualRootValue, generatedValue, path := + createValue( + t, + &r, + inter, + // Generate a non-empty composite, + // so we have at least one element to update + func(composite cadence.Composite) bool { + return len(composite.FieldsMappedByName()) > 0 + }, + ) + + actualNestedComposite := getNestedComposite( + inter, + actualRootValue, + owner, + path, + ) + + fieldCount := actualNestedComposite.FieldCount() + require.Greater(t, fieldCount, 0) + + type update struct { + name string + value cadence.Value + } + + performUpdate := func(composite *interpreter.CompositeValue, update update) { + + newValue := importValue(t, inter, update.value) + + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. + + withoutAtreeStorageValidationEnabled(inter, func() struct{} { + composite.SetMember( + inter, + interpreter.EmptyLocationRange, + update.name, + interpreter.NewUnmeteredSomeValueNonCopying(newValue), + ) + return struct{}{} + }) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + // Composite must have same number of elements + require.Equal(t, fieldCount, composite.FieldCount()) + } + + // We use the generated value twice: once as the expected value, and once as the actual value. + // We first perform mutations on the actual value, and then compare it to the expected value. + // The actual value is stored in an account and reloaded. + // The expected value is temporary (zero address), and is not stored in storage. + // Given that the storage reset destroys the data for the expected value because it is temporary, + // we re-import it each time and perform all operations on it from scratch. + + var updates []update + + fieldNames := make([]string, 0, fieldCount) + + actualNestedComposite.ForEachFieldName( + func(name string) (resume bool) { + fieldNames = append(fieldNames, name) + return true + }, + ) + + for i := 0; i < opCount; i++ { + index := r.rand.Intn(fieldCount) + + updates = append( + updates, + update{ + name: fieldNames[index], + value: r.randomStorableValue(inter, 0), + }, + ) + } + + for i, update := range updates { + + resetStorage() + + actualRootValue = readComposite(inter, owner, compositeStorageMapKey) + actualNestedComposite = getNestedComposite( + inter, + actualRootValue, + owner, + path, + ) + + performUpdate( + actualNestedComposite, + update, + ) + + // Re-create the expected value from scratch, + // by importing the generated value, and performing all updates on it + // that have been performed on the actual value so far. + + expectedRootValue := importValue(t, inter, generatedValue) + expectedNestedComposite := getNestedComposite( + inter, + expectedRootValue, + common.ZeroAddress, + path, + ) + + for _, update := range updates[:i+1] { + + performUpdate( + expectedNestedComposite, + update, + ) + } + AssertValuesEqual(t, inter, expectedRootValue, actualRootValue) + + checkIteration( + t, + inter, + actualNestedComposite, + expectedNestedComposite, + ) + } + }) + + t.Run("remove", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + r := newRandomValueGenerator( + *smokeTestSeed, + limits, + ) + t.Logf("seed: %d", r.seed) + + actualRootValue, generatedValue, path := + createValue( + t, + &r, + inter, + func(composite cadence.Composite) bool { + return len(composite.FieldsMappedByName()) >= opCount + }, + ) + + actualNestedComposite := getNestedComposite( + inter, + actualRootValue, + owner, + path, + ) + + fieldCount := actualNestedComposite.FieldCount() + require.GreaterOrEqual(t, fieldCount, opCount) + + performRemove := func(composite *interpreter.CompositeValue, name string) { + + // Atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. + + withoutAtreeStorageValidationEnabled(inter, func() struct{} { + composite.RemoveMember( + inter, + interpreter.EmptyLocationRange, + name, + ) + return struct{}{} + }) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + } + + // We use the generated value twice: once as the expected value, and once as the actual value. + // We first perform mutations on the actual value, and then compare it to the expected value. + // The actual value is stored in an account and reloaded. + // The expected value is temporary (zero address), and is not stored in storage. + // Given that the storage reset destroys the data for the expected value because it is temporary, + // we re-import it each time and perform all operations on it from scratch. + + fieldNames := make([]string, 0, fieldCount) + + actualNestedComposite.ForEachFieldName( + func(name string) (resume bool) { + + fieldNames = append(fieldNames, name) + + return true + }, + ) + + var removes []string + removeSet := map[string]struct{}{} + + for i := 0; i < opCount; i++ { + // Find a unique name + var name string + for { + name = fieldNames[r.rand.Intn(fieldCount)] + + if _, ok := removeSet[name]; ok { + continue + } + removeSet[name] = struct{}{} + + break + } + + removes = append(removes, name) + } + + for i, index := range removes { + + resetStorage() + + actualRootValue = readComposite(inter, owner, compositeStorageMapKey) + actualNestedComposite = getNestedComposite( + inter, + actualRootValue, + owner, + path, + ) + + performRemove( + actualNestedComposite, + index, + ) + + // Re-create the expected value from scratch, + // by importing the generated value, and performing all updates on it + // that have been performed on the actual value so far. + + expectedRootValue := importValue(t, inter, generatedValue) + expectedNestedComposite := getNestedComposite( + inter, + expectedRootValue, + common.ZeroAddress, + path, + ) + + for _, index := range removes[:i+1] { + + performRemove( + expectedNestedComposite, + index, + ) + } + AssertValuesEqual(t, inter, expectedRootValue, actualRootValue) + + checkIteration( + t, + inter, + actualNestedComposite, + expectedNestedComposite, + ) + } + }) +} + +func findNestedCadenceValue( + value cadence.Value, + predicate func(value cadence.Value, path []pathElement) bool, +) []pathElement { + return findNestedCadenceRecursive(value, nil, predicate) +} + +func findNestedCadenceRecursive( + value cadence.Value, + path []pathElement, + predicate func(value cadence.Value, path []pathElement) bool, +) []pathElement { + if predicate(value, path) { + return path + } + + switch value := value.(type) { + case cadence.Array: + for index, element := range value.Values { + + nestedPath := path + nestedPath = append(nestedPath, arrayPathElement{index}) + + result := findNestedCadenceRecursive(element, nestedPath, predicate) + if result != nil { + return result + } + } + + case cadence.Dictionary: + for _, pair := range value.Pairs { + + nestedPath := path + nestedPath = append(nestedPath, dictionaryPathElement{pair.Key}) + + result := findNestedCadenceRecursive(pair.Value, nestedPath, predicate) + if result != nil { + return result + } + } + + case cadence.Struct: + for name, field := range value.FieldsMappedByName() { + + nestedPath := path + nestedPath = append(nestedPath, structPathElement{name}) + + result := findNestedCadenceRecursive(field, nestedPath, predicate) + if result != nil { + return result + } + } + + case cadence.Optional: + nestedValue := value.Value + if nestedValue == nil { + break + } + + nestedPath := path + nestedPath = append(nestedPath, somePathElement{}) + + result := findNestedCadenceRecursive(nestedValue, nestedPath, predicate) + if result != nil { + return result + } + } + + return nil +} + +func getNestedValue( + t *testing.T, + inter *interpreter.Interpreter, + value interpreter.Value, + path []pathElement, +) interpreter.Value { + for i, element := range path { + switch element := element.(type) { + case arrayPathElement: + require.IsType( + t, + &interpreter.ArrayValue{}, + value, + "path: %v", + path[:i], + ) + array := value.(*interpreter.ArrayValue) + + value = array.Get( + inter, + interpreter.EmptyLocationRange, + element.index, + ) + + require.NotNil(t, + value, + "missing value for array element %d (path: %v)", + element.index, + path[:i], + ) + + case dictionaryPathElement: + require.IsType( + t, + &interpreter.DictionaryValue{}, + value, + "path: %v", + path[:i], + ) + dictionary := value.(*interpreter.DictionaryValue) + + key := importValue(t, inter, element.key) + + var found bool + value, found = dictionary.Get( + inter, + interpreter.EmptyLocationRange, + key, + ) + require.True(t, + found, + "missing value for dictionary key %s (path: %v)", + element.key, + path[:i], + ) + require.NotNil(t, + value, + "missing value for dictionary key %s (path: %v)", + element.key, + path[:i], + ) + + case structPathElement: + require.IsType( + t, + &interpreter.CompositeValue{}, + value, + "path: %v", + path[:i], + ) + composite := value.(*interpreter.CompositeValue) + + value = composite.GetMember( + inter, + interpreter.EmptyLocationRange, + element.name, + ) + + require.NotNil(t, + value, + "missing value for composite field %q (path: %v)", + element.name, + path[:i], + ) + + case somePathElement: + require.IsType( + t, + &interpreter.SomeValue{}, + value, + "path: %v", + path[:i], + ) + optional := value.(*interpreter.SomeValue) + + value = optional.InnerValue(inter, interpreter.EmptyLocationRange) + + require.NotNil(t, + value, + "missing value for optional (path: %v)", + path[:i], + ) + + default: + panic(errors.NewUnexpectedError("unsupported path element: %T", element)) + } + } + + return value +} + +type pathElement interface { + isPathElement() +} + +type arrayPathElement struct { + index int +} + +var _ pathElement = arrayPathElement{} + +func (arrayPathElement) isPathElement() {} + +type dictionaryPathElement struct { + key cadence.Value +} + +var _ pathElement = dictionaryPathElement{} + +func (dictionaryPathElement) isPathElement() {} + +type structPathElement struct { + name string +} + +var _ pathElement = structPathElement{} + +func (structPathElement) isPathElement() {} + +type somePathElement struct{} + +var _ pathElement = somePathElement{} + +func (somePathElement) isPathElement() {} + +type randomValueLimits struct { + containerMaxDepth int + containerMaxSize int + compositeMaxFields int +} + +type randomValueGenerator struct { + seed int64 + rand *rand.Rand + randomValueLimits +} + +func newRandomValueGenerator(seed int64, limits randomValueLimits) randomValueGenerator { + if seed == -1 { + seed = time.Now().UnixNano() + } + + return randomValueGenerator{ + seed: seed, + rand: rand.New(rand.NewSource(seed)), + randomValueLimits: limits, + } +} +func (r randomValueGenerator) randomStorableValue(inter *interpreter.Interpreter, currentDepth int) cadence.Value { + var kind randomValueKind + if currentDepth < r.containerMaxDepth { + kind = r.randomValueKind(randomValueKindStruct) + } else { + kind = r.randomValueKind(randomValueKindCapability) + } + + switch kind { + + // Non-hashable + case randomValueKindVoid: + return cadence.Void{} + + case randomValueKindNil: + return cadence.NewOptional(nil) + + case randomValueKindDictionaryVariant1, + randomValueKindDictionaryVariant2: + return r.randomDictionaryValue(inter, currentDepth) + + case randomValueKindArrayVariant1, + randomValueKindArrayVariant2: + return r.randomArrayValue(inter, currentDepth) + + case randomValueKindStruct: + return r.randomStructValue(inter, currentDepth) + + case randomValueKindCapability: + return r.randomCapabilityValue() + + case randomValueKindSome: + return cadence.NewOptional( + r.randomStorableValue(inter, currentDepth+1), + ) + + // Hashable + default: + return r.generateHashableValueOfKind(inter, kind) + } +} + +func (r randomValueGenerator) randomHashableValue(inter *interpreter.Interpreter) cadence.Value { + return r.generateHashableValueOfKind(inter, r.randomValueKind(randomValueKindEnum)) +} + +func (r randomValueGenerator) generateHashableValueOfKind(inter *interpreter.Interpreter, kind randomValueKind) cadence.Value { + switch kind { + + // Int* + case randomValueKindInt: + // TODO: generate larger numbers + return cadence.NewInt(r.randomSign() * int(r.rand.Int63())) + case randomValueKindInt8: + return cadence.NewInt8(int8(r.randomInt(math.MaxUint8))) + case randomValueKindInt16: + return cadence.NewInt16(int16(r.randomInt(math.MaxUint16))) + case randomValueKindInt32: + return cadence.NewInt32(int32(r.randomSign()) * r.rand.Int31()) + case randomValueKindInt64: + return cadence.NewInt64(int64(r.randomSign()) * r.rand.Int63()) + case randomValueKindInt128: + // TODO: generate larger numbers + return cadence.NewInt128(r.randomSign() * int(r.rand.Int63())) + case randomValueKindInt256: + // TODO: generate larger numbers + return cadence.NewInt256(r.randomSign() * int(r.rand.Int63())) + + // UInt* + case randomValueKindUInt: + // TODO: generate larger numbers + return cadence.NewUInt(uint(r.rand.Uint64())) + case randomValueKindUInt8: + return cadence.NewUInt8(uint8(r.randomInt(math.MaxUint8))) + case randomValueKindUInt16: + return cadence.NewUInt16(uint16(r.randomInt(math.MaxUint16))) + case randomValueKindUInt32: + return cadence.NewUInt32(r.rand.Uint32()) + case randomValueKindUInt64Variant1, + randomValueKindUInt64Variant2, + randomValueKindUInt64Variant3, + randomValueKindUInt64Variant4: // should be more common + return cadence.NewUInt64(r.rand.Uint64()) + case randomValueKindUInt128: + // TODO: generate larger numbers + return cadence.NewUInt128(uint(r.rand.Uint64())) + case randomValueKindUInt256: + // TODO: generate larger numbers + return cadence.NewUInt256(uint(r.rand.Uint64())) + + // Word* + case randomValueKindWord8: + return cadence.NewWord8(uint8(r.randomInt(math.MaxUint8))) + case randomValueKindWord16: + return cadence.NewWord16(uint16(r.randomInt(math.MaxUint16))) + case randomValueKindWord32: + return cadence.NewWord32(r.rand.Uint32()) + case randomValueKindWord64: + return cadence.NewWord64(r.rand.Uint64()) + case randomValueKindWord128: + // TODO: generate larger numbers + return cadence.NewWord128(uint(r.rand.Uint64())) + case randomValueKindWord256: + // TODO: generate larger numbers + return cadence.NewWord256(uint(r.rand.Uint64())) + + // (U)Fix* + case randomValueKindFix64: + return cadence.Fix64( + int64(r.randomSign()) * r.rand.Int63n(sema.Fix64TypeMaxInt), + ) + case randomValueKindUFix64: + return cadence.UFix64( + uint64(r.rand.Int63n(int64(sema.UFix64TypeMaxInt))), + ) + + // String + case randomValueKindStringVariant1, + randomValueKindStringVariant2, + randomValueKindStringVariant3, + randomValueKindStringVariant4: // small string - should be more common + size := r.randomInt(255) + return cadence.String(r.randomUTF8StringOfSize(size)) + case randomValueKindStringVariant5: // large string + size := r.randomInt(4048) + 255 + return cadence.String(r.randomUTF8StringOfSize(size)) + + case randomValueKindBoolVariantTrue: + return cadence.NewBool(true) + case randomValueKindBoolVariantFalse: + return cadence.NewBool(false) + + case randomValueKindAddress: + return r.randomAddressValue() + + case randomValueKindPath: + return r.randomPathValue() + + case randomValueKindEnum: + return r.randomEnumValue(inter) + + default: + panic(fmt.Sprintf("unsupported: %d", kind)) + } +} + +func (r randomValueGenerator) randomSign() int { + if r.randomInt(1) == 1 { + return 1 + } + + return -1 +} + +func (r randomValueGenerator) randomAddressValue() (address cadence.Address) { + r.rand.Read(address[:]) + return address +} + +func (r randomValueGenerator) randomPathValue() cadence.Path { + randomDomain := r.rand.Intn(len(common.AllPathDomains)) + identifier := r.randomUTF8String() + + return cadence.Path{ + Domain: common.AllPathDomains[randomDomain], + Identifier: identifier, + } +} + +func (r randomValueGenerator) randomCapabilityValue() cadence.Capability { + return cadence.NewCapability( + cadence.UInt64(r.randomInt(math.MaxInt-1)), + r.randomAddressValue(), + cadence.NewReferenceType( + cadence.UnauthorizedAccess, + cadence.AnyStructType, + ), + ) +} + +func (r randomValueGenerator) randomDictionaryValue(inter *interpreter.Interpreter, currentDepth int) cadence.Dictionary { + + entryCount := r.randomInt(r.containerMaxSize) + keyValues := make([]cadence.KeyValuePair, entryCount) + + existingKeys := map[string]struct{}{} + + for i := 0; i < entryCount; i++ { + + // generate a unique key + var key cadence.Value + for { + key = r.randomHashableValue(inter) + keyStr := key.String() + + // avoid duplicate keys + _, exists := existingKeys[keyStr] + if !exists { + existingKeys[keyStr] = struct{}{} + break + } + } + + keyValues[i] = cadence.KeyValuePair{ + Key: key, + Value: r.randomStorableValue(inter, currentDepth+1), + } + } + + return cadence.NewDictionary(keyValues). + WithType( + cadence.NewDictionaryType( + cadence.HashableStructType, + cadence.AnyStructType, + ), + ) +} + +func (r randomValueGenerator) randomInt(upperBound int) int { + return r.rand.Intn(upperBound + 1) +} + +func (r randomValueGenerator) randomArrayValue(inter *interpreter.Interpreter, currentDepth int) cadence.Array { + elementsCount := r.randomInt(r.containerMaxSize) + elements := make([]cadence.Value, elementsCount) + + for i := 0; i < elementsCount; i++ { + elements[i] = r.randomStorableValue(inter, currentDepth+1) + } + + return cadence.NewArray(elements). + WithType(cadence.NewVariableSizedArrayType(cadence.AnyStructType)) +} + +func (r randomValueGenerator) randomStructValue(inter *interpreter.Interpreter, currentDepth int) cadence.Struct { + fieldsCount := r.randomInt(r.compositeMaxFields) + + fields := make([]cadence.Field, fieldsCount) + fieldValues := make([]cadence.Value, fieldsCount) + + existingFieldNames := make(map[string]any, fieldsCount) + + for i := 0; i < fieldsCount; i++ { + // generate a unique field name + var fieldName string + for { + fieldName = r.randomUTF8String() + + // avoid duplicate field names + _, exists := existingFieldNames[fieldName] + if !exists { + existingFieldNames[fieldName] = struct{}{} + break + } + } + + fields[i] = cadence.NewField(fieldName, cadence.AnyStructType) + fieldValues[i] = r.randomStorableValue(inter, currentDepth+1) + } + + identifier := fmt.Sprintf("S%d", r.rand.Uint64()) + + address := r.randomAddressValue() + + location := common.AddressLocation{ + Address: common.Address(address), + Name: identifier, + } + + kind := common.CompositeKindStructure + + compositeType := &sema.CompositeType{ + Location: location, + Identifier: identifier, + Kind: kind, + Members: &sema.StringMemberOrderedMap{}, + } + + fieldNames := make([]string, fieldsCount) + + for i := 0; i < fieldsCount; i++ { + fieldName := fields[i].Identifier + compositeType.Members.Set( + fieldName, + sema.NewUnmeteredPublicConstantFieldMember( + compositeType, + fieldName, + sema.AnyStructType, + "", + ), + ) + fieldNames[i] = fieldName + } + compositeType.Fields = fieldNames + + // Add the type to the elaboration, to short-circuit the type-lookup. + inter.Program.Elaboration.SetCompositeType( + compositeType.ID(), + compositeType, + ) + + return cadence.NewStruct(fieldValues).WithType( + cadence.NewStructType( + location, + identifier, + fields, + nil, + ), + ) +} + +func (r randomValueGenerator) cadenceIntegerType(kind randomValueKind) cadence.Type { + switch kind { + // Int + case randomValueKindInt: + return cadence.IntType + case randomValueKindInt8: + return cadence.Int8Type + case randomValueKindInt16: + return cadence.Int16Type + case randomValueKindInt32: + return cadence.Int32Type + case randomValueKindInt64: + return cadence.Int64Type + case randomValueKindInt128: + return cadence.Int128Type + case randomValueKindInt256: + return cadence.Int256Type + + // UInt + case randomValueKindUInt: + return cadence.UIntType + case randomValueKindUInt8: + return cadence.UInt8Type + case randomValueKindUInt16: + return cadence.UInt16Type + case randomValueKindUInt32: + return cadence.UInt32Type + case randomValueKindUInt64Variant1, + randomValueKindUInt64Variant2, + randomValueKindUInt64Variant3, + randomValueKindUInt64Variant4: + return cadence.UInt64Type + case randomValueKindUInt128: + return cadence.UInt128Type + case randomValueKindUInt256: + return cadence.UInt256Type + + // Word + case randomValueKindWord8: + return cadence.Word8Type + case randomValueKindWord16: + return cadence.Word16Type + case randomValueKindWord32: + return cadence.Word32Type + case randomValueKindWord64: + return cadence.Word64Type + case randomValueKindWord128: + return cadence.Word128Type + case randomValueKindWord256: + return cadence.Word256Type + + default: + panic(fmt.Sprintf("unsupported kind: %d", kind)) + } +} + +func (r randomValueGenerator) semaIntegerType(kind randomValueKind) sema.Type { + switch kind { + // Int + case randomValueKindInt: + return sema.IntType + case randomValueKindInt8: + return sema.Int8Type + case randomValueKindInt16: + return sema.Int16Type + case randomValueKindInt32: + return sema.Int32Type + case randomValueKindInt64: + return sema.Int64Type + case randomValueKindInt128: + return sema.Int128Type + case randomValueKindInt256: + return sema.Int256Type + + // UInt + case randomValueKindUInt: + return sema.UIntType + case randomValueKindUInt8: + return sema.UInt8Type + case randomValueKindUInt16: + return sema.UInt16Type + case randomValueKindUInt32: + return sema.UInt32Type + case randomValueKindUInt64Variant1, + randomValueKindUInt64Variant2, + randomValueKindUInt64Variant3, + randomValueKindUInt64Variant4: + return sema.UInt64Type + case randomValueKindUInt128: + return sema.UInt128Type + case randomValueKindUInt256: + return sema.UInt256Type + + // Word + case randomValueKindWord8: + return sema.Word8Type + case randomValueKindWord16: + return sema.Word16Type + case randomValueKindWord32: + return sema.Word32Type + case randomValueKindWord64: + return sema.Word64Type + case randomValueKindWord128: + return sema.Word128Type + case randomValueKindWord256: + return sema.Word256Type + + default: + panic(fmt.Sprintf("unsupported kind: %d", kind)) + } +} + +type randomValueKind uint8 + +const ( + // Hashable values + // Int* + randomValueKindInt randomValueKind = iota + randomValueKindInt8 + randomValueKindInt16 + randomValueKindInt32 + randomValueKindInt64 + randomValueKindInt128 + randomValueKindInt256 + + // UInt* + randomValueKindUInt + randomValueKindUInt8 + randomValueKindUInt16 + randomValueKindUInt32 + randomValueKindUInt64Variant1 + randomValueKindUInt64Variant2 + randomValueKindUInt64Variant3 + randomValueKindUInt64Variant4 + randomValueKindUInt128 + randomValueKindUInt256 + + // Word* + randomValueKindWord8 + randomValueKindWord16 + randomValueKindWord32 + randomValueKindWord64 + randomValueKindWord128 + randomValueKindWord256 + + // (U)Fix* + randomValueKindFix64 + randomValueKindUFix64 + + // String + randomValueKindStringVariant1 + randomValueKindStringVariant2 + randomValueKindStringVariant3 + randomValueKindStringVariant4 + randomValueKindStringVariant5 + + randomValueKindBoolVariantTrue + randomValueKindBoolVariantFalse + randomValueKindPath + randomValueKindAddress + randomValueKindEnum + + // Non-hashable values + randomValueKindVoid + randomValueKindNil // `Never?` + randomValueKindCapability + + // Containers + randomValueKindSome + randomValueKindArrayVariant1 + randomValueKindArrayVariant2 + randomValueKindDictionaryVariant1 + randomValueKindDictionaryVariant2 + randomValueKindStruct +) + +func (r randomValueGenerator) randomUTF8String() string { + return r.randomUTF8StringOfSize(8) +} + +func (r randomValueGenerator) randomUTF8StringOfSize(size int) string { + identifier := make([]byte, size) + r.rand.Read(identifier) + return strings.ToValidUTF8(string(identifier), "$") +} + +func (r randomValueGenerator) randomEnumValue(inter *interpreter.Interpreter) cadence.Enum { + // Get a random integer subtype to be used as the raw-type of enum + typ := r.randomValueKind(randomValueKindWord64) + + rawValue := r.generateHashableValueOfKind(inter, typ).(cadence.NumberValue) + + identifier := fmt.Sprintf("E%d", r.rand.Uint64()) + + address := r.randomAddressValue() + + location := common.AddressLocation{ + Address: common.Address(address), + Name: identifier, + } + + semaRawType := r.semaIntegerType(typ) + + semaEnumType := &sema.CompositeType{ + Identifier: identifier, + EnumRawType: semaRawType, + Kind: common.CompositeKindEnum, + Location: location, + Members: &sema.StringMemberOrderedMap{}, + Fields: []string{ + sema.EnumRawValueFieldName, + }, + } + + semaEnumType.Members.Set( + sema.EnumRawValueFieldName, + sema.NewUnmeteredPublicConstantFieldMember( + semaEnumType, + sema.EnumRawValueFieldName, + semaRawType, + "", + ), + ) + + // Add the type to the elaboration, to short-circuit the type-lookup. + inter.Program.Elaboration.SetCompositeType( + semaEnumType.ID(), + semaEnumType, + ) + + rawType := r.cadenceIntegerType(typ) + + fields := []cadence.Value{ + rawValue, + } + + return cadence.NewEnum(fields).WithType( + cadence.NewEnumType( + location, + identifier, + rawType, + []cadence.Field{ + { + Identifier: sema.EnumRawValueFieldName, + Type: rawType, + }, + }, + nil, + ), + ) +} + +func (r randomValueGenerator) randomValueKind(kind randomValueKind) randomValueKind { + return randomValueKind(r.randomInt(int(kind))) +} + +func TestRandomValueGeneration(t *testing.T) { + + inter, _ := newRandomValueTestInterpreter(t) + + limits := defaultRandomValueLimits + + // Generate random values + for i := 0; i < 1000; i++ { + r1 := newRandomValueGenerator(int64(i), limits) + v1 := r1.randomStorableValue(inter, 0) + + r2 := newRandomValueGenerator(int64(i), limits) + v2 := r2.randomStorableValue(inter, 0) + + // Check if the generated values are equal + assert.Equal(t, v1, v2) + } +} + +func mapKey(inter *interpreter.Interpreter, key interpreter.Value) any { + + switch key := key.(type) { + case *interpreter.StringValue: + type stringValue string + return stringValue(key.Str) + + case interpreter.CharacterValue: + type characterValue string + return characterValue(key.Str) + + case interpreter.TypeValue: + type typeValue common.TypeID + return typeValue(key.Type.ID()) + + case *interpreter.CompositeValue: + type enumKey struct { + location common.Location + qualifiedIdentifier string + kind common.CompositeKind + rawValue string + } + return enumKey{ + location: key.Location, + qualifiedIdentifier: key.QualifiedIdentifier, + kind: key.Kind, + rawValue: key.GetField( + inter, + interpreter.EmptyLocationRange, + sema.EnumRawValueFieldName, + ).String(), + } + + case interpreter.IntValue: + type intValue string + return intValue(key.String()) + + case interpreter.UIntValue: + type uintValue string + return uintValue(key.String()) + + case interpreter.Int8Value: + type int8Value string + return int8Value(key.String()) + + case interpreter.UInt8Value: + type uint8Value string + return uint8Value(key.String()) + + case interpreter.Int16Value: + type int16Value string + return int16Value(key.String()) + + case interpreter.UInt16Value: + type uint16Value string + return uint16Value(key.String()) + + case interpreter.Int32Value: + type int32Value string + return int32Value(key.String()) + + case interpreter.UInt32Value: + type uint32Value string + return uint32Value(key.String()) + + case interpreter.Int64Value: + type int64Value string + return int64Value(key.String()) + + case interpreter.UInt64Value: + type uint64Value string + return uint64Value(key.String()) + + case interpreter.Int128Value: + type int128Value string + return int128Value(key.String()) + + case interpreter.UInt128Value: + type uint128Value string + return uint128Value(key.String()) + + case interpreter.Int256Value: + type int256Value string + return int256Value(key.String()) + + case interpreter.UInt256Value: + type uint256Value string + return uint256Value(key.String()) + + case interpreter.Word8Value: + type word8Value string + return word8Value(key.String()) + + case interpreter.Word16Value: + type word16Value string + return word16Value(key.String()) + + case interpreter.Word32Value: + type word32Value string + return word32Value(key.String()) + + case interpreter.Word64Value: + type word64Value string + return word64Value(key.String()) + + case interpreter.Word128Value: + type word128Value string + return word128Value(key.String()) + + case interpreter.Word256Value: + type word256Value string + return word256Value(key.String()) + + case interpreter.PathValue: + return key + + case interpreter.AddressValue: + return key + + case interpreter.BoolValue: + return key + + case interpreter.Fix64Value: + type fix64Value string + return fix64Value(key.String()) + + case interpreter.UFix64Value: + type ufix64Value string + return ufix64Value(key.String()) + + default: + panic(errors.NewUnexpectedError("unsupported map key type: %T", key)) + } +} + +// This test is a reproducer for "slab was not reachable from leaves" false alarm. +// https://github.com/onflow/cadence/pull/2882#issuecomment-1781298107 +// In this test, storage.CheckHealth() should be called after array.DeepRemove(), +// not in the middle of array.DeepRemove(). +// CheckHealth() is called in the middle of array.DeepRemove() when: +// - array.DeepRemove() calls childArray1 and childArray2 DeepRemove() +// - DeepRemove() calls maybeValidateAtreeValue() +// - maybeValidateAtreeValue() calls CheckHealth() +func TestCheckStorageHealthInMiddleOfDeepRemove(t *testing.T) { + + t.Parallel() + + storage := newUnmeteredInMemoryStorage() + inter, err := interpreter.NewInterpreter( + &interpreter.Program{ + Program: ast.NewProgram(nil, []ast.Declaration{}), + Elaboration: sema.NewElaboration(nil), + }, + TestLocation, + &interpreter.Config{ + Storage: storage, + ImportLocationHandler: func(inter *interpreter.Interpreter, location common.Location) interpreter.Import { + return interpreter.VirtualImport{ + Elaboration: inter.Program.Elaboration, + } + }, + AtreeStorageValidationEnabled: true, + AtreeValueValidationEnabled: true, + }, + ) + require.NoError(t, err) + + owner := common.Address{'A'} + + // Create a small child array which will be inlined in parent container. + childArray1 := interpreter.NewArrayValue( + inter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + owner, + interpreter.NewUnmeteredStringValue("a"), + ) + + size := int(atree.MaxInlineArrayElementSize()) - 10 + + // Create a large child array which will NOT be inlined in parent container. + childArray2 := interpreter.NewArrayValue( + inter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + owner, + interpreter.NewUnmeteredStringValue(strings.Repeat("b", size)), + interpreter.NewUnmeteredStringValue(strings.Repeat("c", size)), + ) + + // Create an array with childArray1 and childArray2. + array := interpreter.NewArrayValue( + inter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + owner, + childArray1, // inlined + childArray2, // not inlined + ) + + // DeepRemove removes all elements (childArray1 and childArray2) recursively in array. + array.DeepRemove(inter, true) + + // As noted earlier in comments at the top of this test: + // storage.CheckHealth() should be called after array.DeepRemove(), not in the middle of array.DeepRemove(). + // This happens when: + // - array.DeepRemove() calls childArray1 and childArray2 DeepRemove() + // - DeepRemove() calls maybeValidateAtreeValue() + // - maybeValidateAtreeValue() calls CheckHealth() +} + +// This test is a reproducer for "slab was not reachable from leaves" false alarm. +// https://github.com/onflow/cadence/pull/2882#issuecomment-1796381227 +// In this test, storage.CheckHealth() should be called after DictionaryValue.Transfer() +// with remove flag, not in the middle of DictionaryValue.Transfer(). +func TestInterpretCheckStorageHealthInMiddleOfTransferAndRemove(t *testing.T) { + + t.Parallel() + + r := newRandomValueGenerator(*smokeTestSeed, defaultRandomValueLimits) + t.Logf("seed: %d", r.seed) + + storage := newUnmeteredInMemoryStorage() + inter, err := interpreter.NewInterpreter( &interpreter.Program{ Program: ast.NewProgram(nil, []ast.Declaration{}), Elaboration: sema.NewElaboration(nil), @@ -1723,206 +4925,1588 @@ func TestCheckStorageHealthInMiddleOfDeepRemove(t *testing.T) { Elaboration: inter.Program.Elaboration, } }, - AtreeStorageValidationEnabled: true, - AtreeValueValidationEnabled: true, - }, - ) - require.NoError(t, err) + AtreeStorageValidationEnabled: true, + AtreeValueValidationEnabled: true, + }, + ) + require.NoError(t, err) + + // Create large array value with zero address which will not be inlined. + gchildArray := interpreter.NewArrayValue( + inter, + interpreter.EmptyLocationRange, + &interpreter.VariableSizedStaticType{ + Type: interpreter.PrimitiveStaticTypeAnyStruct, + }, + common.ZeroAddress, + interpreter.NewUnmeteredStringValue(strings.Repeat("b", int(atree.MaxInlineArrayElementSize())-10)), + interpreter.NewUnmeteredStringValue(strings.Repeat("c", int(atree.MaxInlineArrayElementSize())-10)), + ) + + // Create small composite value with zero address which will be inlined. + identifier := "test" + + location := common.AddressLocation{ + Address: common.ZeroAddress, + Name: identifier, + } + + compositeType := &sema.CompositeType{ + Location: location, + Identifier: identifier, + Kind: common.CompositeKindStructure, + } + + fields := []interpreter.CompositeField{ + interpreter.NewUnmeteredCompositeField("a", interpreter.NewUnmeteredUInt64Value(0)), + interpreter.NewUnmeteredCompositeField("b", interpreter.NewUnmeteredUInt64Value(1)), + interpreter.NewUnmeteredCompositeField("c", interpreter.NewUnmeteredUInt64Value(2)), + } + + compositeType.Members = &sema.StringMemberOrderedMap{} + for _, field := range fields { + compositeType.Members.Set( + field.Name, + sema.NewUnmeteredPublicConstantFieldMember( + compositeType, + field.Name, + sema.AnyStructType, + "", + ), + ) + } + + // Add the type to the elaboration, to short-circuit the type-lookup. + inter.Program.Elaboration.SetCompositeType( + compositeType.ID(), + compositeType, + ) + + gchildComposite := interpreter.NewCompositeValue( + inter, + interpreter.EmptyLocationRange, + location, + identifier, + common.CompositeKindStructure, + fields, + common.ZeroAddress, + ) + + // Create large dictionary with zero address with 2 data slabs containing: + // - SomeValue(SlabID) as first physical element in the first data slab + // - inlined CompositeValue as last physical element in the second data slab + + numberOfValues := 10 + firstElementIndex := 7 // index of first physical element in the first data slab + lastElementIndex := 8 // index of last physical element in the last data slab + keyValues := make([]interpreter.Value, numberOfValues*2) + for i := 0; i < numberOfValues; i++ { + key := interpreter.NewUnmeteredUInt64Value(uint64(i)) + + var value interpreter.Value + switch i { + case firstElementIndex: + value = interpreter.NewUnmeteredSomeValueNonCopying(gchildArray) + + case lastElementIndex: + value = gchildComposite + + default: + // Other values are inlined random strings. + const size = 235 + value = interpreter.NewUnmeteredStringValue(r.randomUTF8StringOfSize(size)) + } + + keyValues[i*2] = key + keyValues[i*2+1] = value + } + + childMap := interpreter.NewDictionaryValueWithAddress( + inter, + interpreter.EmptyLocationRange, + &interpreter.DictionaryStaticType{ + KeyType: interpreter.PrimitiveStaticTypeAnyStruct, + ValueType: interpreter.PrimitiveStaticTypeAnyStruct, + }, + common.ZeroAddress, + keyValues..., + ) + + // Create dictionary with non-zero address containing child dictionary. + owner := common.Address{'A'} + m := interpreter.NewDictionaryValueWithAddress( + inter, + interpreter.EmptyLocationRange, + &interpreter.DictionaryStaticType{ + KeyType: interpreter.PrimitiveStaticTypeAnyStruct, + ValueType: interpreter.PrimitiveStaticTypeAnyStruct, + }, + owner, + interpreter.NewUnmeteredUInt64Value(0), + childMap, + ) + + inter.ValidateAtreeValue(m) + + require.NoError(t, storage.CheckHealth()) +} + +// TestInterpretIterateReadOnlyLoadedWithSomeValueChildren tests https://github.com/onflow/atree-internal/pull/7 +func TestInterpretIterateReadOnlyLoadedWithSomeValueChildren(t *testing.T) { + t.Parallel() + + owner := common.Address{'A'} + + const storageMapKey = interpreter.StringStorageMapKey("value") + + writeValue := func( + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + value interpreter.Value, + ) { + value = value.Transfer( + inter, + interpreter.EmptyLocationRange, + atree.Address(owner), + false, + nil, + nil, + // TODO: is has no parent container = true correct? + true, + ) + + // Write the value to the storage map. + // However, the value is not referenced by the root of the storage yet + // (a storage map), so atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. + withoutAtreeStorageValidationEnabled( + inter, + func() struct{} { + inter.Storage(). + GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + true, + ). + WriteValue( + inter, + storageMapKey, + value, + ) + + return struct{}{} + }, + ) + } + + readValue := func( + t *testing.T, + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + ) interpreter.Value { + storageMap := inter.Storage().GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + false, + ) + require.NotNil(t, storageMap) + + readValue := storageMap.ReadValue(inter, storageMapKey) + require.NotNil(t, readValue) + + return readValue + } + + t.Run("dictionary", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + var cadenceRootPairs []cadence.KeyValuePair + + const expectedRootCount = 10 + const expectedInnerCount = 100 + + for i := 0; i < expectedRootCount; i++ { + var cadenceInnerPairs []cadence.KeyValuePair + + for j := 0; j < expectedInnerCount; j++ { + cadenceInnerPairs = append( + cadenceInnerPairs, + cadence.KeyValuePair{ + Key: cadence.NewInt(j), + Value: cadence.String(strings.Repeat("cadence", 1000)), + }, + ) + } + + cadenceRootPairs = append( + cadenceRootPairs, + cadence.KeyValuePair{ + Key: cadence.NewInt(i), + Value: cadence.NewOptional( + cadence.NewDictionary(cadenceInnerPairs), + ), + }, + ) + } + + cadenceRootDictionary := cadence.NewDictionary(cadenceRootPairs) + + rootDictionary := importValue(t, inter, cadenceRootDictionary).(*interpreter.DictionaryValue) + + // Check that the inner dictionaries are not inlined. + // If the test fails here, adjust the value generation code above + // to ensure that the inner dictionaries are not inlined. + + rootDictionary.Iterate( + inter, + interpreter.EmptyLocationRange, + func(key, value interpreter.Value) (resume bool) { + + require.IsType(t, &interpreter.SomeValue{}, value) + someValue := value.(*interpreter.SomeValue) + + innerValue := someValue.InnerValue(inter, interpreter.EmptyLocationRange) + + require.IsType(t, &interpreter.DictionaryValue{}, innerValue) + innerDictionary := innerValue.(*interpreter.DictionaryValue) + require.False(t, innerDictionary.Inlined()) + + // continue iteration + return true + }, + ) + + writeValue( + inter, + owner, + storageMapKey, + rootDictionary, + ) + + resetStorage() + + rootDictionary = readValue( + t, + inter, + owner, + storageMapKey, + ).(*interpreter.DictionaryValue) + + var iterations int + rootDictionary.IterateReadOnlyLoaded( + inter, + interpreter.EmptyLocationRange, + func(_, _ interpreter.Value) (resume bool) { + iterations += 1 + + // continue iteration + return true + }, + ) + + require.Equal(t, 0, iterations) + + iterations = 0 + rootDictionary.Iterate( + inter, + interpreter.EmptyLocationRange, + func(_, _ interpreter.Value) (resume bool) { + iterations += 1 + + // continue iteration + return true + }, + ) + + require.Equal(t, expectedRootCount, iterations) + }) + + t.Run("array", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + var cadenceRootElements []cadence.Value + + const expectedRootCount = 10 + const expectedInnerCount = 100 + + for i := 0; i < expectedRootCount; i++ { + var cadenceInnerElements []cadence.Value + + for j := 0; j < expectedInnerCount; j++ { + cadenceInnerElements = append( + cadenceInnerElements, + cadence.String(strings.Repeat("cadence", 1000)), + ) + } + + cadenceRootElements = append( + cadenceRootElements, + cadence.NewOptional( + cadence.NewArray(cadenceInnerElements), + ), + ) + } + + cadenceRootArray := cadence.NewArray(cadenceRootElements) + + rootArray := importValue(t, inter, cadenceRootArray).(*interpreter.ArrayValue) + + // Check that the inner arrays are not inlined. + // If the test fails here, adjust the value generation code above + // to ensure that the inner arrays are not inlined. + + rootArray.Iterate( + inter, + func(value interpreter.Value) (resume bool) { + + require.IsType(t, &interpreter.SomeValue{}, value) + someValue := value.(*interpreter.SomeValue) + + innerValue := someValue.InnerValue(inter, interpreter.EmptyLocationRange) + + require.IsType(t, &interpreter.ArrayValue{}, innerValue) + innerArray := innerValue.(*interpreter.ArrayValue) + require.False(t, innerArray.Inlined()) + + // continue iteration + return true + }, + false, + interpreter.EmptyLocationRange, + ) + + writeValue( + inter, + owner, + storageMapKey, + rootArray, + ) + + resetStorage() + + rootArray = readValue( + t, + inter, + owner, + storageMapKey, + ).(*interpreter.ArrayValue) + + var iterations int + rootArray.IterateReadOnlyLoaded( + inter, + func(_ interpreter.Value) (resume bool) { + iterations += 1 + + // continue iteration + return true + }, + interpreter.EmptyLocationRange, + ) + + require.Equal(t, 0, iterations) + + iterations = 0 + + rootArray.Iterate( + inter, + func(_ interpreter.Value) (resume bool) { + iterations += 1 + + // continue iteration + return true + }, + false, + interpreter.EmptyLocationRange, + ) + + require.Equal(t, expectedRootCount, iterations) + }) + + t.Run("composite", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + newCadenceType := func(fieldCount int) *cadence.StructType { + typeIdentifier := fmt.Sprintf("S%d", fieldCount) + + typeLocation := common.AddressLocation{ + Address: owner, + Name: typeIdentifier, + } + + fieldNames := make([]string, 0, fieldCount) + for i := 0; i < fieldCount; i++ { + fieldName := fmt.Sprintf("field%d", i) + fieldNames = append(fieldNames, fieldName) + } + + cadenceFields := make([]cadence.Field, 0, fieldCount) + for _, fieldName := range fieldNames { + cadenceFields = append( + cadenceFields, + cadence.Field{ + Identifier: fieldName, + Type: cadence.AnyStructType, + }, + ) + } + + structType := cadence.NewStructType( + typeLocation, + typeIdentifier, + cadenceFields, + nil, + ) + + compositeType := &sema.CompositeType{ + Location: typeLocation, + Identifier: typeIdentifier, + Kind: common.CompositeKindStructure, + Members: &sema.StringMemberOrderedMap{}, + Fields: fieldNames, + } + + for _, fieldName := range fieldNames { + compositeType.Members.Set( + fieldName, + sema.NewUnmeteredPublicConstantFieldMember( + compositeType, + fieldName, + sema.AnyStructType, + "", + ), + ) + } + + // Add the type to the elaboration, to short-circuit the type-lookup. + inter.Program.Elaboration.SetCompositeType( + compositeType.ID(), + compositeType, + ) + + return structType + } + + var cadenceRootValues []cadence.Value + + const expectedRootCount = 10 + const expectedInnerCount = 100 + + rootStructType := newCadenceType(expectedRootCount) + innerStructType := newCadenceType(expectedInnerCount) + + for i := 0; i < expectedRootCount; i++ { + var cadenceInnerValues []cadence.Value + + for j := 0; j < expectedInnerCount; j++ { + cadenceInnerValues = append( + cadenceInnerValues, + cadence.String(strings.Repeat("cadence", 1000)), + ) + } + + cadenceRootValues = append( + cadenceRootValues, + cadence.NewOptional( + cadence.NewStruct(cadenceInnerValues). + WithType(innerStructType), + ), + ) + } + + cadenceRootStruct := cadence.NewStruct(cadenceRootValues). + WithType(rootStructType) + + rootStruct := importValue(t, inter, cadenceRootStruct).(*interpreter.CompositeValue) + + // Check that the inner structs are not inlined. + // If the test fails here, adjust the value generation code above + // to ensure that the inner structs are not inlined. + + rootStruct.ForEachField( + inter, + func(fieldName string, value interpreter.Value) (resume bool) { + + require.IsType(t, &interpreter.SomeValue{}, value) + someValue := value.(*interpreter.SomeValue) + + innerValue := someValue.InnerValue(inter, interpreter.EmptyLocationRange) + + require.IsType(t, &interpreter.CompositeValue{}, innerValue) + innerStruct := innerValue.(*interpreter.CompositeValue) + require.False(t, innerStruct.Inlined()) + + // continue iteration + return true + }, + interpreter.EmptyLocationRange, + ) + + writeValue( + inter, + owner, + storageMapKey, + rootStruct, + ) + + resetStorage() + + rootStruct = readValue( + t, + inter, + owner, + storageMapKey, + ).(*interpreter.CompositeValue) + + var iterations int + rootStruct.ForEachReadOnlyLoadedField( + inter, + func(_ string, _ interpreter.Value) (resume bool) { + iterations += 1 + + // continue iteration + return true + }, + interpreter.EmptyLocationRange, + ) + + require.Equal(t, 0, iterations) + + iterations = 0 + rootStruct.ForEachField( + inter, + func(_ string, _ interpreter.Value) (resume bool) { + iterations += 1 + + // continue iteration + return true + }, + interpreter.EmptyLocationRange, + ) + + require.Equal(t, expectedRootCount, iterations) + }) +} + +func TestInterpretNestedAtreeContainerInSomeValueStorableTracking(t *testing.T) { + t.Parallel() + + owner := common.Address{'A'} + + const storageMapKey = interpreter.StringStorageMapKey("value") + + writeValue := func( + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + value interpreter.Value, + ) { + value = value.Transfer( + inter, + interpreter.EmptyLocationRange, + atree.Address(owner), + false, + nil, + nil, + // TODO: is has no parent container = true correct? + true, + ) + + // Write the value to the storage map. + // However, the value is not referenced by the root of the storage yet + // (a storage map), so atree storage validation must be temporarily disabled + // to not report any "unreferenced slab" errors. + withoutAtreeStorageValidationEnabled( + inter, + func() struct{} { + inter.Storage(). + GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + true, + ). + WriteValue( + inter, + storageMapKey, + value, + ) + + return struct{}{} + }, + ) + } + + readValue := func( + t *testing.T, + inter *interpreter.Interpreter, + owner common.Address, + storageMapKey interpreter.StorageMapKey, + ) interpreter.Value { + storageMap := inter.Storage().GetDomainStorageMap( + inter, + owner, + common.StorageDomainPathStorage, + false, + ) + require.NotNil(t, storageMap) + + readValue := storageMap.ReadValue(inter, storageMapKey) + require.NotNil(t, readValue) + + return readValue + } + + t.Run("dictionary (inlined -> uninlined -> inlined)", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + // Start with an empty dictionary + + cadenceChildDictionary := cadence.NewDictionary(nil) + + cadenceRootOptionalValue := cadence.NewOptional(cadenceChildDictionary) + + rootSomeValue := importValue(t, inter, cadenceRootOptionalValue).(*interpreter.SomeValue) + + writeValue( + inter, + owner, + storageMapKey, + rootSomeValue, + ) + + resetStorage() + + rootSomeValue = readValue( + t, + inter, + owner, + storageMapKey, + ).(*interpreter.SomeValue) + + // Fill the dictionary until it becomes uninlined + + childDictionary := rootSomeValue.InnerValue( + inter, + interpreter.EmptyLocationRange, + ).(*interpreter.DictionaryValue) + + require.True(t, childDictionary.Inlined()) + + for i := 0; childDictionary.Inlined(); i++ { + childDictionary.Insert( + inter, + interpreter.EmptyLocationRange, + interpreter.NewUnmeteredStringValue(strconv.Itoa(i)), + interpreter.NewUnmeteredIntValueFromInt64(int64(i)), + ) + } + + require.False(t, childDictionary.Inlined()) + + uninlinedCount := childDictionary.Count() + + // Verify the contents of the dictionary + + childDictionary = rootSomeValue.InnerValue(inter, interpreter.EmptyLocationRange).(*interpreter.DictionaryValue) + + verify := func(count int) { + require.Equal(t, count, childDictionary.Count()) + + for i := 0; i < count; i++ { + key := interpreter.NewUnmeteredStringValue(strconv.Itoa(i)) + value, exists := childDictionary.Get( + inter, + interpreter.EmptyLocationRange, + key, + ) + require.True(t, exists) + expectedValue := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + AssertValuesEqual(t, inter, expectedValue, value) + } + } + + verify(uninlinedCount) + + // Remove the last element to make the dictionary inlined again + + inlinedCount := uninlinedCount - 1 + + existingValue := childDictionary.Remove( + inter, + interpreter.EmptyLocationRange, + interpreter.NewUnmeteredStringValue(strconv.Itoa(inlinedCount)), + ) + require.IsType(t, &interpreter.SomeValue{}, existingValue) + + require.True(t, childDictionary.Inlined()) + + // Verify the contents of the dictionary again + + verify(inlinedCount) + + // Add a new element to make the dictionary uninlined again + + childDictionary.Insert( + inter, + interpreter.EmptyLocationRange, + interpreter.NewUnmeteredStringValue(strconv.Itoa(inlinedCount)), + interpreter.NewUnmeteredIntValueFromInt64(int64(inlinedCount)), + ) + + require.False(t, childDictionary.Inlined()) + + // Verify the contents of the dictionary again + + verify(uninlinedCount) + + // Remove all elements + + for i := 0; i < uninlinedCount; i++ { + existingValue := childDictionary.Remove( + inter, + interpreter.EmptyLocationRange, + interpreter.NewUnmeteredStringValue(strconv.Itoa(i)), + ) + require.IsType(t, &interpreter.SomeValue{}, existingValue) + } + + require.Equal(t, 0, childDictionary.Count()) + require.True(t, childDictionary.Inlined()) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + // Validate after storage reset and reload of root value + + resetStorage() + + rootSomeValue = readValue( + t, + inter, + owner, + storageMapKey, + ).(*interpreter.SomeValue) + + childDictionary = rootSomeValue.InnerValue( + inter, + interpreter.EmptyLocationRange, + ).(*interpreter.DictionaryValue) + + require.Equal(t, 0, childDictionary.Count()) + require.True(t, childDictionary.Inlined()) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + }) + + t.Run("dictionary (uninlined -> inlined -> uninlined)", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + // Start with a large dictionary which will get uninlined + + var cadenceChildPairs []cadence.KeyValuePair + + for i := 0; i < 1000; i++ { + cadenceChildPairs = append( + cadenceChildPairs, + cadence.KeyValuePair{ + Key: cadence.String(strconv.Itoa(i)), + Value: cadence.NewInt(i), + }, + ) + } + + cadenceChildDictionary := cadence.NewDictionary(cadenceChildPairs) + + cadenceRootOptionalValue := cadence.NewOptional(cadenceChildDictionary) + + rootSomeValue := importValue(t, inter, cadenceRootOptionalValue).(*interpreter.SomeValue) + + writeValue( + inter, + owner, + storageMapKey, + rootSomeValue, + ) + + resetStorage() + + rootSomeValue = readValue( + t, + inter, + owner, + storageMapKey, + ).(*interpreter.SomeValue) + + childDictionary := rootSomeValue.InnerValue( + inter, + interpreter.EmptyLocationRange, + ).(*interpreter.DictionaryValue) + + // Check that the inner dictionary is not inlined. + // If the test fails here, adjust the value generation code above + // to ensure that the inner dictionary is not inlined. + + require.False(t, childDictionary.Inlined()) + + // Verify the contents of the dictionary + + inlinedCount := childDictionary.Count() + + // Verify the contents of the dictionary + + verify := func(count int) { + require.Equal(t, count, childDictionary.Count()) + + for i := 0; i < count; i++ { + key := interpreter.NewUnmeteredStringValue(strconv.Itoa(i)) + value, exists := childDictionary.Get( + inter, + interpreter.EmptyLocationRange, + key, + ) + require.True(t, exists) + expectedValue := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + AssertValuesEqual(t, inter, expectedValue, value) + } + } + + verify(inlinedCount) + + // Remove elements until the dictionary is inlined + + for i := inlinedCount - 1; !childDictionary.Inlined(); i-- { + existingValue := childDictionary.Remove( + inter, + interpreter.EmptyLocationRange, + interpreter.NewUnmeteredStringValue(strconv.Itoa(i)), + ) + + require.IsType(t, &interpreter.SomeValue{}, existingValue) + existingSomeValue := existingValue.(*interpreter.SomeValue) + + existingInnerValue := existingSomeValue.InnerValue(inter, interpreter.EmptyLocationRange) + expectedValue := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + AssertValuesEqual(t, inter, expectedValue, existingInnerValue) + + } + + inlinedCount = childDictionary.Count() + + require.True(t, childDictionary.Inlined()) + + // Verify the contents of the dictionary again + + verify(inlinedCount) + + // Add element to make the dictionary uninlined again + + childDictionary.Insert( + inter, + interpreter.EmptyLocationRange, + interpreter.NewUnmeteredStringValue(strconv.Itoa(inlinedCount)), + interpreter.NewUnmeteredIntValueFromInt64(int64(inlinedCount)), + ) + + require.False(t, childDictionary.Inlined()) + + // Verify the contents of the dictionary again + + uninlinedCount := inlinedCount + 1 + + verify(uninlinedCount) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + // Validate after storage reset and reload of root value + + resetStorage() + + rootSomeValue = readValue( + t, + inter, + owner, + storageMapKey, + ).(*interpreter.SomeValue) + + childDictionary = rootSomeValue.InnerValue( + inter, + interpreter.EmptyLocationRange, + ).(*interpreter.DictionaryValue) + + verify(uninlinedCount) + + require.False(t, childDictionary.Inlined()) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + }) + + t.Run("array (inlined -> uninlined -> inlined)", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + // Start with an empty array + + cadenceChildArray := cadence.NewArray(nil) + + cadenceRootOptionalValue := cadence.NewOptional(cadenceChildArray) + + rootSomeValue := importValue(t, inter, cadenceRootOptionalValue).(*interpreter.SomeValue) + + writeValue( + inter, + owner, + storageMapKey, + rootSomeValue, + ) + + resetStorage() + + rootSomeValue = readValue( + t, + inter, + owner, + storageMapKey, + ).(*interpreter.SomeValue) + + // Fill the array until it becomes uninlined + + childArray := rootSomeValue.InnerValue( + inter, + interpreter.EmptyLocationRange, + ).(*interpreter.ArrayValue) + + require.True(t, childArray.Inlined()) + + for i := 0; childArray.Inlined(); i++ { + childArray.Append( + inter, + interpreter.EmptyLocationRange, + interpreter.NewUnmeteredStringValue(strconv.Itoa(i)), + ) + } + + require.False(t, childArray.Inlined()) + + uninlinedCount := childArray.Count() + + // Verify the contents of the array + + childArray = rootSomeValue.InnerValue( + inter, + interpreter.EmptyLocationRange, + ).(*interpreter.ArrayValue) + + verify := func(count int) { + require.Equal(t, count, childArray.Count()) + + for i := 0; i < count; i++ { + value := childArray.Get(inter, interpreter.EmptyLocationRange, i) + expectedValue := interpreter.NewUnmeteredStringValue(strconv.Itoa(i)) + AssertValuesEqual(t, inter, expectedValue, value) + } + } + + verify(uninlinedCount) + + // Remove the last element to make the array inlined again + + inlinedCount := uninlinedCount - 1 + + childArray.Remove( + inter, + interpreter.EmptyLocationRange, + inlinedCount, + ) + + require.True(t, childArray.Inlined()) + + // Verify the contents of the array again + + verify(inlinedCount) + + // Add a new element to make the array uninlined again + + childArray.Append( + inter, + interpreter.EmptyLocationRange, + interpreter.NewUnmeteredStringValue(strconv.Itoa(inlinedCount)), + ) + + require.False(t, childArray.Inlined()) + + // Verify the contents of the array again + + verify(uninlinedCount) + + // Remove all elements + + for i := uninlinedCount - 1; i >= 0; i-- { + childArray.Remove( + inter, + interpreter.EmptyLocationRange, + i, + ) + } + + require.Equal(t, 0, childArray.Count()) + require.True(t, childArray.Inlined()) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + // Validate after storage reset and reload of root value + + resetStorage() + + rootSomeValue = readValue( + t, + inter, + owner, + storageMapKey, + ).(*interpreter.SomeValue) + + childArray = rootSomeValue.InnerValue( + inter, + interpreter.EmptyLocationRange, + ).(*interpreter.ArrayValue) + + require.Equal(t, 0, childArray.Count()) + require.True(t, childArray.Inlined()) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + }) + + t.Run("array (uninlined -> inlined -> uninlined)", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + // Start with a large array which will get uninlined + + var cadenceChildElements []cadence.Value + + for i := 0; i < 1000; i++ { + cadenceChildElements = append( + cadenceChildElements, + cadence.String(strconv.Itoa(i)), + ) + } + + cadenceChildArray := cadence.NewArray(cadenceChildElements) + + cadenceRootOptionalValue := cadence.NewOptional(cadenceChildArray) + + rootSomeValue := importValue(t, inter, cadenceRootOptionalValue).(*interpreter.SomeValue) + + writeValue( + inter, + owner, + storageMapKey, + rootSomeValue, + ) + + resetStorage() + + rootSomeValue = readValue( + t, + inter, + owner, + storageMapKey, + ).(*interpreter.SomeValue) + + childArray := rootSomeValue.InnerValue( + inter, + interpreter.EmptyLocationRange, + ).(*interpreter.ArrayValue) + + // Check that the inner array is not inlined. + // If the test fails here, adjust the value generation code above + // to ensure that the inner array is not inlined. + + require.False(t, childArray.Inlined()) + + // Verify the contents of the array + + inlinedCount := childArray.Count() + + // Verify the contents of the array + + verify := func(count int) { + require.Equal(t, count, childArray.Count()) + + for i := 0; i < count; i++ { + value := childArray.Get(inter, interpreter.EmptyLocationRange, i) + expectedValue := interpreter.NewUnmeteredStringValue(strconv.Itoa(i)) + AssertValuesEqual(t, inter, expectedValue, value) + } + } + + verify(inlinedCount) + + // Remove elements until the array is inlined + + for i := inlinedCount - 1; !childArray.Inlined(); i-- { + existingValue := childArray.Remove( + inter, + interpreter.EmptyLocationRange, + i, + ) + expectedValue := interpreter.NewUnmeteredStringValue(strconv.Itoa(i)) + AssertValuesEqual(t, inter, expectedValue, existingValue) + } + + inlinedCount = childArray.Count() + + require.True(t, childArray.Inlined()) + + // Verify the contents of the array again + + verify(inlinedCount) + + // Add element to make the array uninlined again + + childArray.Append( + inter, + interpreter.EmptyLocationRange, + interpreter.NewUnmeteredStringValue(strconv.Itoa(inlinedCount)), + ) + + require.False(t, childArray.Inlined()) + + // Verify the contents of the array again + + uninlinedCount := inlinedCount + 1 + + verify(uninlinedCount) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + // Validate after storage reset and reload of root value + + resetStorage() + + rootSomeValue = readValue( + t, + inter, + owner, + storageMapKey, + ).(*interpreter.SomeValue) + + childArray = rootSomeValue.InnerValue( + inter, + interpreter.EmptyLocationRange, + ).(*interpreter.ArrayValue) + + verify(uninlinedCount) + + require.False(t, childArray.Inlined()) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + }) + + t.Run("composite (inlined -> uninlined -> inlined)", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + // Start with an empty composite + + const qualifiedIdentifier = "Test" + location := common.AddressLocation{ + Address: owner, + Name: qualifiedIdentifier, + } + + cadenceStructType := cadence.NewStructType( + location, + qualifiedIdentifier, + nil, + nil, + ) + + semaStructType := &sema.CompositeType{ + Location: location, + Identifier: qualifiedIdentifier, + Kind: common.CompositeKindStructure, + Members: &sema.StringMemberOrderedMap{}, + } + + // Add the type to the elaboration, to short-circuit the type-lookup. + inter.Program.Elaboration.SetCompositeType( + semaStructType.ID(), + semaStructType, + ) + + cadenceChildComposite := cadence.NewStruct(nil).WithType(cadenceStructType) + + cadenceRootOptionalValue := cadence.NewOptional(cadenceChildComposite) + + rootSomeValue := importValue(t, inter, cadenceRootOptionalValue).(*interpreter.SomeValue) + + writeValue( + inter, + owner, + storageMapKey, + rootSomeValue, + ) + + resetStorage() + + rootSomeValue = readValue( + t, + inter, + owner, + storageMapKey, + ).(*interpreter.SomeValue) + + // Fill the composite until it becomes uninlined + + childComposite := rootSomeValue.InnerValue( + inter, + interpreter.EmptyLocationRange, + ).(*interpreter.CompositeValue) + + require.True(t, childComposite.Inlined()) + + for i := 0; childComposite.Inlined(); i++ { + childComposite.SetMember( + inter, + interpreter.EmptyLocationRange, + strconv.Itoa(i), + interpreter.NewUnmeteredIntValueFromInt64(int64(i)), + ) + } + + require.False(t, childComposite.Inlined()) + + uninlinedCount := childComposite.FieldCount() + + // Verify the contents of the composite + + childComposite = rootSomeValue.InnerValue( + inter, + interpreter.EmptyLocationRange, + ).(*interpreter.CompositeValue) + + verify := func(count int) { + require.Equal(t, count, childComposite.FieldCount()) + + for i := 0; i < count; i++ { + value := childComposite.GetMember( + inter, + interpreter.EmptyLocationRange, + strconv.Itoa(i), + ) + expectedValue := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + AssertValuesEqual(t, inter, expectedValue, value) + } + } + + verify(uninlinedCount) + + // Remove the last element to make the composite inlined again + + inlinedCount := uninlinedCount - 1 + + childComposite.RemoveMember( + inter, + interpreter.EmptyLocationRange, + strconv.Itoa(inlinedCount), + ) + + require.True(t, childComposite.Inlined()) - owner := common.Address{'A'} + // Verify the contents of the composite again - // Create a small child array which will be inlined in parent container. - childArray1 := interpreter.NewArrayValue( - inter, - interpreter.EmptyLocationRange, - &interpreter.VariableSizedStaticType{ - Type: interpreter.PrimitiveStaticTypeAnyStruct, - }, - owner, - interpreter.NewUnmeteredStringValue("a"), - ) + verify(inlinedCount) - size := int(atree.MaxInlineArrayElementSize()) - 10 + // Add a new element to make the composite uninlined again - // Create a large child array which will NOT be inlined in parent container. - childArray2 := interpreter.NewArrayValue( - inter, - interpreter.EmptyLocationRange, - &interpreter.VariableSizedStaticType{ - Type: interpreter.PrimitiveStaticTypeAnyStruct, - }, - owner, - interpreter.NewUnmeteredStringValue(strings.Repeat("b", size)), - interpreter.NewUnmeteredStringValue(strings.Repeat("c", size)), - ) + childComposite.SetMember( + inter, + interpreter.EmptyLocationRange, + strconv.Itoa(inlinedCount), + interpreter.NewUnmeteredIntValueFromInt64(int64(inlinedCount)), + ) - // Create an array with childArray1 and childArray2. - array := interpreter.NewArrayValue( - inter, - interpreter.EmptyLocationRange, - &interpreter.VariableSizedStaticType{ - Type: interpreter.PrimitiveStaticTypeAnyStruct, - }, - owner, - childArray1, // inlined - childArray2, // not inlined - ) + require.False(t, childComposite.Inlined()) - // DeepRemove removes all elements (childArray1 and childArray2) recursively in array. - array.DeepRemove(inter, true) + // Verify the contents of the composite again - // As noted earlier in comments at the top of this test: - // storage.CheckHealth() should be called after array.DeepRemove(), not in the middle of array.DeepRemove(). - // This happens when: - // - array.DeepRemove() calls childArray1 and childArray2 DeepRemove() - // - DeepRemove() calls maybeValidateAtreeValue() - // - maybeValidateAtreeValue() calls CheckHealth() -} + verify(uninlinedCount) -// This test is a reproducer for "slab was not reachable from leaves" false alarm. -// https://github.com/onflow/cadence/pull/2882#issuecomment-1796381227 -// In this test, storage.CheckHealth() should be called after DictionaryValue.Transfer() -// with remove flag, not in the middle of DictionaryValue.Transfer(). -func TestCheckStorageHealthInMiddleOfTransferAndRemove(t *testing.T) { - r := newRandomValueGenerator() - t.Logf("seed: %d", r.seed) + // Remove all elements - storage := newUnmeteredInMemoryStorage() - inter, err := interpreter.NewInterpreter( - &interpreter.Program{ - Program: ast.NewProgram(nil, []ast.Declaration{}), - Elaboration: sema.NewElaboration(nil), - }, - TestLocation, - &interpreter.Config{ - Storage: storage, - ImportLocationHandler: func(inter *interpreter.Interpreter, location common.Location) interpreter.Import { - return interpreter.VirtualImport{ - Elaboration: inter.Program.Elaboration, - } - }, - AtreeStorageValidationEnabled: true, - AtreeValueValidationEnabled: true, - }, - ) - require.NoError(t, err) + for i := 0; i < uninlinedCount; i++ { + childComposite.RemoveMember( + inter, + interpreter.EmptyLocationRange, + strconv.Itoa(i), + ) + } - // Create large array value with zero address which will not be inlined. - gchildArray := interpreter.NewArrayValue( - inter, - interpreter.EmptyLocationRange, - &interpreter.VariableSizedStaticType{ - Type: interpreter.PrimitiveStaticTypeAnyStruct, - }, - common.ZeroAddress, - interpreter.NewUnmeteredStringValue(strings.Repeat("b", int(atree.MaxInlineArrayElementSize())-10)), - interpreter.NewUnmeteredStringValue(strings.Repeat("c", int(atree.MaxInlineArrayElementSize())-10)), - ) + require.Equal(t, 0, childComposite.FieldCount()) + require.True(t, childComposite.Inlined()) - // Create small composite value with zero address which will be inlined. - identifier := "test" + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } - location := common.AddressLocation{ - Address: common.ZeroAddress, - Name: identifier, - } + // Validate after storage reset and reload of root value - compositeType := &sema.CompositeType{ - Location: location, - Identifier: identifier, - Kind: common.CompositeKindStructure, - } + resetStorage() - fields := []interpreter.CompositeField{ - interpreter.NewUnmeteredCompositeField("a", interpreter.NewUnmeteredUInt64Value(0)), - interpreter.NewUnmeteredCompositeField("b", interpreter.NewUnmeteredUInt64Value(1)), - interpreter.NewUnmeteredCompositeField("c", interpreter.NewUnmeteredUInt64Value(2)), - } + rootSomeValue = readValue( + t, + inter, + owner, + storageMapKey, + ).(*interpreter.SomeValue) - compositeType.Members = &sema.StringMemberOrderedMap{} - for _, field := range fields { - compositeType.Members.Set( - field.Name, - sema.NewUnmeteredPublicConstantFieldMember( - compositeType, - field.Name, - sema.AnyStructType, - "", - ), + childComposite = rootSomeValue.InnerValue( + inter, + interpreter.EmptyLocationRange, + ).(*interpreter.CompositeValue) + + require.Equal(t, 0, childComposite.FieldCount()) + require.True(t, childComposite.Inlined()) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + }) + + t.Run("composite (uninlined -> inlined -> uninlined)", func(t *testing.T) { + t.Parallel() + + inter, resetStorage := newRandomValueTestInterpreter(t) + + // Start with a large composite which will get uninlined + + const qualifiedIdentifier = "Test" + location := common.AddressLocation{ + Address: owner, + Name: qualifiedIdentifier, + } + + const fieldCount = 1000 + + fields := make([]cadence.Field, fieldCount) + for i := 0; i < fieldCount; i++ { + fields[i] = cadence.Field{ + Identifier: strconv.Itoa(i), + Type: cadence.IntType, + } + } + + cadenceStructType := cadence.NewStructType( + location, + qualifiedIdentifier, + fields, + nil, ) - } - // Add the type to the elaboration, to short-circuit the type-lookup. - inter.Program.Elaboration.SetCompositeType( - compositeType.ID(), - compositeType, - ) + semaStructType := &sema.CompositeType{ + Location: location, + Identifier: qualifiedIdentifier, + Kind: common.CompositeKindStructure, + Members: &sema.StringMemberOrderedMap{}, + } - gchildComposite := interpreter.NewCompositeValue( - inter, - interpreter.EmptyLocationRange, - location, - identifier, - common.CompositeKindStructure, - fields, - common.ZeroAddress, - ) + // Add the type to the elaboration, to short-circuit the type-lookup. + inter.Program.Elaboration.SetCompositeType( + semaStructType.ID(), + semaStructType, + ) + fieldNames := make([]string, fieldCount) + + for i := 0; i < fieldCount; i++ { + fieldName := fields[0].Identifier + semaStructType.Members.Set( + fieldName, + sema.NewUnmeteredPublicConstantFieldMember( + semaStructType, + fieldName, + sema.IntType, + "", + ), + ) + fieldNames[i] = fieldName + } + semaStructType.Fields = fieldNames - // Create large dictionary with zero address with 2 data slabs containing: - // - SomeValue(SlabID) as first physical element in the first data slab - // - inlined CompositeValue as last physical element in the second data slab + var cadenceChildElements []cadence.Value - numberOfValues := 10 - firstElementIndex := 7 // index of first physical element in the first data slab - lastElementIndex := 8 // index of last physical element in the last data slab - keyValues := make([]interpreter.Value, numberOfValues*2) - for i := 0; i < numberOfValues; i++ { - key := interpreter.NewUnmeteredUInt64Value(uint64(i)) + for i := 0; i < fieldCount; i++ { + cadenceChildElements = append( + cadenceChildElements, + cadence.NewInt(i), + ) - var value interpreter.Value - switch i { - case firstElementIndex: - value = interpreter.NewUnmeteredSomeValueNonCopying(gchildArray) + } - case lastElementIndex: - value = gchildComposite + cadenceChildComposite := cadence.NewStruct(cadenceChildElements). + WithType(cadenceStructType) - default: - // Other values are inlined random strings. - const size = 235 - value = interpreter.NewUnmeteredStringValue(r.randomUTF8StringOfSize(size)) + cadenceRootOptionalValue := cadence.NewOptional(cadenceChildComposite) + + rootSomeValue := importValue(t, inter, cadenceRootOptionalValue).(*interpreter.SomeValue) + + writeValue( + inter, + owner, + storageMapKey, + rootSomeValue, + ) + + resetStorage() + + rootSomeValue = readValue( + t, + inter, + owner, + storageMapKey, + ).(*interpreter.SomeValue) + + childComposite := rootSomeValue.InnerValue( + inter, + interpreter.EmptyLocationRange, + ).(*interpreter.CompositeValue) + + // Check that the inner composite is not inlined. + // If the test fails here, adjust the value generation code above + // to ensure that the inner composite is not inlined. + + require.False(t, childComposite.Inlined()) + + // Verify the contents of the composite + + inlinedCount := childComposite.FieldCount() + + // Verify the contents of the composite + + verify := func(count int) { + require.Equal(t, count, childComposite.FieldCount()) + + for i := 0; i < count; i++ { + value := childComposite.GetMember( + inter, + interpreter.EmptyLocationRange, + strconv.Itoa(i), + ) + expectedValue := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + AssertValuesEqual(t, inter, expectedValue, value) + } } - keyValues[i*2] = key - keyValues[i*2+1] = value - } + verify(inlinedCount) - childMap := interpreter.NewDictionaryValueWithAddress( - inter, - interpreter.EmptyLocationRange, - &interpreter.DictionaryStaticType{ - KeyType: interpreter.PrimitiveStaticTypeAnyStruct, - ValueType: interpreter.PrimitiveStaticTypeAnyStruct, - }, - common.ZeroAddress, - keyValues..., - ) + // Remove elements until the composite is inlined - // Create dictionary with non-zero address containing child dictionary. - owner := common.Address{'A'} - m := interpreter.NewDictionaryValueWithAddress( - inter, - interpreter.EmptyLocationRange, - &interpreter.DictionaryStaticType{ - KeyType: interpreter.PrimitiveStaticTypeAnyStruct, - ValueType: interpreter.PrimitiveStaticTypeAnyStruct, - }, - owner, - interpreter.NewUnmeteredUInt64Value(0), - childMap, - ) + for i := inlinedCount - 1; !childComposite.Inlined(); i-- { + existingValue := childComposite.RemoveMember( + inter, + interpreter.EmptyLocationRange, + strconv.Itoa(i), + ) - inter.ValidateAtreeValue(m) + expectedValue := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + AssertValuesEqual(t, inter, expectedValue, existingValue) - require.NoError(t, storage.CheckHealth()) + } + + inlinedCount = childComposite.FieldCount() + + require.True(t, childComposite.Inlined()) + + // Verify the contents of the composite again + + verify(inlinedCount) + + // Add element to make the composite uninlined again + + childComposite.SetMember( + inter, + interpreter.EmptyLocationRange, + strconv.Itoa(inlinedCount), + interpreter.NewUnmeteredIntValueFromInt64(int64(inlinedCount)), + ) + + require.False(t, childComposite.Inlined()) + + // Verify the contents of the composite again + + uninlinedCount := inlinedCount + 1 + + verify(uninlinedCount) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + + // Validate after storage reset and reload of root value + + resetStorage() + + rootSomeValue = readValue( + t, + inter, + owner, + storageMapKey, + ).(*interpreter.SomeValue) + + childComposite = rootSomeValue.InnerValue( + inter, + interpreter.EmptyLocationRange, + ).(*interpreter.CompositeValue) + + verify(uninlinedCount) + + require.False(t, childComposite.Inlined()) + + if *validateAtree { + err := inter.Storage().CheckHealth() + require.NoError(t, err) + } + }) } diff --git a/npm-packages/cadence-parser/package.json b/npm-packages/cadence-parser/package.json index b82eeda922..8ddc977ba4 100644 --- a/npm-packages/cadence-parser/package.json +++ b/npm-packages/cadence-parser/package.json @@ -1,6 +1,6 @@ { "name": "@onflow/cadence-parser", - "version": "1.2.2", + "version": "1.3.1", "description": "The Cadence parser", "homepage": "https://github.com/onflow/cadence", "repository": { diff --git a/old_parser/declaration.go b/old_parser/declaration.go index ac34e98799..aeb704375a 100644 --- a/old_parser/declaration.go +++ b/old_parser/declaration.go @@ -747,7 +747,10 @@ func parseHexadecimalLocation(p *parser) common.AddressLocation { address, err := common.BytesToAddress(rawAddress) if err != nil { // Any returned error is a syntax error. e.g: Address too large error. - p.reportSyntaxError(err.Error()) + p.report(&SyntaxError{ + Pos: p.current.StartPos, + Message: err.Error(), + }) } return common.NewAddressLocation(p.memoryGauge, address, "") diff --git a/old_parser/parser.go b/old_parser/parser.go index 34e19a400c..952d0418bc 100644 --- a/old_parser/parser.go +++ b/old_parser/parser.go @@ -243,10 +243,10 @@ func (p *parser) next() { } parseError, ok := err.(ParseError) if !ok { - parseError = NewSyntaxError( - token.StartPos, - err.Error(), - ) + parseError = &SyntaxError{ + Pos: token.StartPos, + Message: err.Error(), + } } p.report(parseError) continue diff --git a/parser/declaration.go b/parser/declaration.go index 511bb9ecd7..809490cd07 100644 --- a/parser/declaration.go +++ b/parser/declaration.go @@ -910,7 +910,10 @@ func parseHexadecimalLocation(p *parser) common.AddressLocation { address, err := common.BytesToAddress(rawAddress) if err != nil { // Any returned error is a syntax error. e.g: Address too large error. - p.reportSyntaxError(err.Error()) + p.report(&SyntaxError{ + Pos: p.current.StartPos, + Message: err.Error(), + }) } return common.NewAddressLocation(p.memoryGauge, address, "") diff --git a/parser/expression.go b/parser/expression.go index 8499f95e60..83b11d616b 100644 --- a/parser/expression.go +++ b/parser/expression.go @@ -1191,10 +1191,6 @@ func defineStringExpression() { if err != nil { return nil, err } - // limit string templates to identifiers only - if _, ok := value.(*ast.IdentifierExpression); !ok { - return nil, p.syntaxError("expected identifier got: %s", value.String()) - } _, err = p.mustOne(lexer.TokenParenClose) if err != nil { return nil, err diff --git a/parser/expression_test.go b/parser/expression_test.go index 172884de18..566c9e39a2 100644 --- a/parser/expression_test.go +++ b/parser/expression_test.go @@ -6054,14 +6054,7 @@ func TestParseStringTemplate(t *testing.T) { "\(test)" `) - var err error - if len(errs) > 0 { - err = Error{ - Errors: errs, - } - } - - require.NoError(t, err) + require.Empty(t, errs) expected := &ast.StringTemplateExpression{ Values: []string{ @@ -6093,14 +6086,7 @@ func TestParseStringTemplate(t *testing.T) { "this is a test \(abc)\(def) test" `) - var err error - if len(errs) > 0 { - err = Error{ - Errors: errs, - } - } - - require.NoError(t, err) + require.Empty(t, errs) expected := &ast.StringTemplateExpression{ Values: []string{ @@ -6139,14 +6125,7 @@ func TestParseStringTemplate(t *testing.T) { "this is a test \(FOO) `) - var err error - if len(errs) > 0 { - err = Error{ - Errors: errs, - } - } - - require.Error(t, err) + require.NotEmpty(t, errs) AssertEqualWithDiff(t, []error{ &SyntaxError{ @@ -6166,14 +6145,7 @@ func TestParseStringTemplate(t *testing.T) { "\(.)" `) - var err error - if len(errs) > 0 { - err = Error{ - Errors: errs, - } - } - - require.Error(t, err) + require.NotEmpty(t, errs) AssertEqualWithDiff(t, []error{ &SyntaxError{ @@ -6185,7 +6157,7 @@ func TestParseStringTemplate(t *testing.T) { ) }) - t.Run("invalid, num", func(t *testing.T) { + t.Run("valid, num", func(t *testing.T) { t.Parallel() @@ -6193,23 +6165,7 @@ func TestParseStringTemplate(t *testing.T) { "\(2 + 2) is a" `) - var err error - if len(errs) > 0 { - err = Error{ - Errors: errs, - } - } - - require.Error(t, err) - AssertEqualWithDiff(t, - []error{ - &SyntaxError{ - Message: "expected identifier got: 2 + 2", - Pos: ast.Position{Offset: 13, Line: 2, Column: 12}, - }, - }, - errs, - ) + require.Empty(t, errs) }) t.Run("valid, nested identifier", func(t *testing.T) { @@ -6220,14 +6176,7 @@ func TestParseStringTemplate(t *testing.T) { "\((a))" `) - var err error - if len(errs) > 0 { - err = Error{ - Errors: errs, - } - } - - require.NoError(t, err) + require.Empty(t, errs) expected := &ast.StringTemplateExpression{ Values: []string{ @@ -6259,14 +6208,7 @@ func TestParseStringTemplate(t *testing.T) { "\()" `) - var err error - if len(errs) > 0 { - err = Error{ - Errors: errs, - } - } - - require.Error(t, err) + require.NotEmpty(t, errs) AssertEqualWithDiff(t, []error{ &SyntaxError{ @@ -6278,7 +6220,7 @@ func TestParseStringTemplate(t *testing.T) { ) }) - t.Run("invalid, function identifier", func(t *testing.T) { + t.Run("valid, function identifier", func(t *testing.T) { t.Parallel() @@ -6286,46 +6228,43 @@ func TestParseStringTemplate(t *testing.T) { "\(add())" `) - var err error - if len(errs) > 0 { - err = Error{ - Errors: errs, - } - } + require.Empty(t, errs) + }) - require.Error(t, err) + t.Run("invalid, missing paren", func(t *testing.T) { + + t.Parallel() + + _, errs := testParseExpression(` + "\(add" + `) + + require.NotEmpty(t, errs) AssertEqualWithDiff(t, []error{ &SyntaxError{ - Message: "expected identifier got: add()", - Pos: ast.Position{Offset: 12, Line: 2, Column: 11}, + Message: "expected token ')'", + Pos: ast.Position{Offset: 10, Line: 2, Column: 9}, }, }, errs, ) }) - t.Run("invalid, unbalanced paren", func(t *testing.T) { + t.Run("invalid, nested expression paren", func(t *testing.T) { t.Parallel() _, errs := testParseExpression(` - "\(add" + "\((2+2)/2()" `) - var err error - if len(errs) > 0 { - err = Error{ - Errors: errs, - } - } - - require.Error(t, err) + require.NotEmpty(t, errs) AssertEqualWithDiff(t, []error{ &SyntaxError{ Message: "expected token ')'", - Pos: ast.Position{Offset: 10, Line: 2, Column: 9}, + Pos: ast.Position{Offset: 16, Line: 2, Column: 15}, }, }, errs, @@ -6340,14 +6279,7 @@ func TestParseStringTemplate(t *testing.T) { "outer string \( "\(inner template)" )" `) - var err error - if len(errs) > 0 { - err = Error{ - Errors: errs, - } - } - - require.Error(t, err) + require.NotEmpty(t, errs) AssertEqualWithDiff(t, []error{ &SyntaxError{ @@ -6367,14 +6299,7 @@ func TestParseStringTemplate(t *testing.T) { "a\(b)c" `) - var err error - if len(errs) > 0 { - err = Error{ - Errors: errs, - } - } - - require.NoError(t, err) + require.Empty(t, errs) expected := &ast.StringTemplateExpression{ Values: []string{ @@ -6406,14 +6331,7 @@ func TestParseStringTemplate(t *testing.T) { "\(a)b\(c)" `) - var err error - if len(errs) > 0 { - err = Error{ - Errors: errs, - } - } - - require.NoError(t, err) + require.Empty(t, errs) expected := &ast.StringTemplateExpression{ Values: []string{ @@ -6452,14 +6370,7 @@ func TestParseStringTemplate(t *testing.T) { "\(a)\(b)\(c)" `) - var err error - if len(errs) > 0 { - err = Error{ - Errors: errs, - } - } - - require.NoError(t, err) + require.Empty(t, errs) expected := &ast.StringTemplateExpression{ Values: []string{ @@ -6496,6 +6407,38 @@ func TestParseStringTemplate(t *testing.T) { AssertEqualWithDiff(t, expected, actual) }) + + t.Run("valid, extra closing paren", func(t *testing.T) { + + t.Parallel() + + actual, errs := testParseExpression(` + "\(a))" + `) + + require.Empty(t, errs) + + expected := &ast.StringTemplateExpression{ + Values: []string{ + "", + ")", + }, + Expressions: []ast.Expression{ + &ast.IdentifierExpression{ + Identifier: ast.Identifier{ + Identifier: "a", + Pos: ast.Position{Offset: 7, Line: 2, Column: 6}, + }, + }, + }, + Range: ast.Range{ + StartPos: ast.Position{Offset: 4, Line: 2, Column: 3}, + EndPos: ast.Position{Offset: 10, Line: 2, Column: 9}, + }, + } + + AssertEqualWithDiff(t, expected, actual) + }) } func TestParseNilCoalescing(t *testing.T) { diff --git a/parser/parser.go b/parser/parser.go index 741e98c78b..8d8b886afb 100644 --- a/parser/parser.go +++ b/parser/parser.go @@ -260,10 +260,10 @@ func (p *parser) next() { } parseError, ok := err.(ParseError) if !ok { - parseError = NewSyntaxError( - token.StartPos, - err.Error(), - ) + parseError = &SyntaxError{ + Pos: token.StartPos, + Message: err.Error(), + } } p.report(parseError) continue diff --git a/runtime/account_storage_v1.go b/runtime/account_storage_v1.go new file mode 100644 index 0000000000..53eec2fef2 --- /dev/null +++ b/runtime/account_storage_v1.go @@ -0,0 +1,212 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package runtime + +import ( + "sort" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/errors" + "github.com/onflow/cadence/interpreter" +) + +type AccountStorageV1 struct { + ledger atree.Ledger + slabStorage atree.SlabStorage + memoryGauge common.MemoryGauge + + // newDomainStorageMapSlabIndices contains root slab indices of new domain storage maps. + // The indices are saved using Ledger.SetValue() during commit(). + // Key is StorageDomainKey{common.StorageDomain, Address} and value is 8-byte slab index. + newDomainStorageMapSlabIndices map[interpreter.StorageDomainKey]atree.SlabIndex +} + +func NewAccountStorageV1( + ledger atree.Ledger, + slabStorage atree.SlabStorage, + memoryGauge common.MemoryGauge, +) *AccountStorageV1 { + return &AccountStorageV1{ + ledger: ledger, + slabStorage: slabStorage, + memoryGauge: memoryGauge, + } +} + +func (s *AccountStorageV1) GetDomainStorageMap( + address common.Address, + domain common.StorageDomain, + createIfNotExists bool, +) ( + domainStorageMap *interpreter.DomainStorageMap, +) { + var err error + domainStorageMap, err = getDomainStorageMapFromV1DomainRegister( + s.ledger, + s.slabStorage, + address, + domain, + ) + if err != nil { + panic(err) + } + + if domainStorageMap == nil && createIfNotExists { + domainStorageMap = s.storeNewDomainStorageMap(address, domain) + } + + return domainStorageMap +} + +func (s *AccountStorageV1) storeNewDomainStorageMap( + address common.Address, + domain common.StorageDomain, +) *interpreter.DomainStorageMap { + + domainStorageMap := interpreter.NewDomainStorageMap( + s.memoryGauge, + s.slabStorage, + atree.Address(address), + ) + + slabIndex := domainStorageMap.SlabID().Index() + + storageKey := interpreter.NewStorageDomainKey(s.memoryGauge, address, domain) + + if s.newDomainStorageMapSlabIndices == nil { + s.newDomainStorageMapSlabIndices = map[interpreter.StorageDomainKey]atree.SlabIndex{} + } + s.newDomainStorageMapSlabIndices[storageKey] = slabIndex + + return domainStorageMap +} + +func (s *AccountStorageV1) commit() error { + + switch len(s.newDomainStorageMapSlabIndices) { + case 0: + // Nothing to commit. + return nil + + case 1: + // Optimize for the common case of a single domain storage map. + + var updated int + for storageDomainKey, slabIndex := range s.newDomainStorageMapSlabIndices { //nolint:maprange + if updated > 0 { + panic(errors.NewUnreachableError()) + } + + err := s.writeStorageDomainSlabIndex( + storageDomainKey, + slabIndex, + ) + if err != nil { + return err + } + + updated++ + } + + default: + // Sort the indices to ensure deterministic order + + type domainStorageMapSlabIndex struct { + StorageDomainKey interpreter.StorageDomainKey + SlabIndex atree.SlabIndex + } + + slabIndices := make([]domainStorageMapSlabIndex, 0, len(s.newDomainStorageMapSlabIndices)) + for storageDomainKey, slabIndex := range s.newDomainStorageMapSlabIndices { //nolint:maprange + slabIndices = append( + slabIndices, + domainStorageMapSlabIndex{ + StorageDomainKey: storageDomainKey, + SlabIndex: slabIndex, + }, + ) + } + sort.Slice( + slabIndices, + func(i, j int) bool { + slabIndex1 := slabIndices[i] + slabIndex2 := slabIndices[j] + domainKey1 := slabIndex1.StorageDomainKey + domainKey2 := slabIndex2.StorageDomainKey + return domainKey1.Compare(domainKey2) < 0 + }, + ) + + for _, slabIndex := range slabIndices { + err := s.writeStorageDomainSlabIndex( + slabIndex.StorageDomainKey, + slabIndex.SlabIndex, + ) + if err != nil { + return err + } + } + } + + s.newDomainStorageMapSlabIndices = nil + + return nil +} + +func (s *AccountStorageV1) writeStorageDomainSlabIndex( + storageDomainKey interpreter.StorageDomainKey, + slabIndex atree.SlabIndex, +) error { + return writeSlabIndexToRegister( + s.ledger, + storageDomainKey.Address, + []byte(storageDomainKey.Domain.Identifier()), + slabIndex, + ) +} + +// getDomainStorageMapFromV1DomainRegister returns domain storage map from legacy domain register. +func getDomainStorageMapFromV1DomainRegister( + ledger atree.Ledger, + storage atree.SlabStorage, + address common.Address, + domain common.StorageDomain, +) (*interpreter.DomainStorageMap, error) { + + domainStorageSlabIndex, domainRegisterExists, err := readSlabIndexFromRegister( + ledger, + address, + []byte(domain.Identifier()), + ) + if err != nil { + return nil, err + } + if !domainRegisterExists { + return nil, nil + } + + slabID := atree.NewSlabID( + atree.Address(address), + domainStorageSlabIndex, + ) + + return interpreter.NewDomainStorageMapWithRootID(storage, slabID), nil +} diff --git a/runtime/account_storage_v2.go b/runtime/account_storage_v2.go new file mode 100644 index 0000000000..71e19fdaed --- /dev/null +++ b/runtime/account_storage_v2.go @@ -0,0 +1,319 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package runtime + +import ( + "sort" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/errors" + "github.com/onflow/cadence/interpreter" +) + +type AccountStorageV2 struct { + ledger atree.Ledger + slabStorage atree.SlabStorage + memoryGauge common.MemoryGauge + + // cachedAccountStorageMaps is a cache of account storage maps. + cachedAccountStorageMaps map[common.Address]*interpreter.AccountStorageMap + + // newAccountStorageMapSlabIndices contains root slab indices of new account storage maps. + // The indices are saved using Ledger.SetValue() during commit(). + newAccountStorageMapSlabIndices map[common.Address]atree.SlabIndex +} + +func NewAccountStorageV2( + ledger atree.Ledger, + slabStorage atree.SlabStorage, + memoryGauge common.MemoryGauge, +) *AccountStorageV2 { + return &AccountStorageV2{ + ledger: ledger, + slabStorage: slabStorage, + memoryGauge: memoryGauge, + } +} + +func (s *AccountStorageV2) GetDomainStorageMap( + inter *interpreter.Interpreter, + address common.Address, + domain common.StorageDomain, + createIfNotExists bool, +) ( + domainStorageMap *interpreter.DomainStorageMap, +) { + accountStorageMap := s.getAccountStorageMap(address) + + if accountStorageMap == nil && createIfNotExists { + accountStorageMap = s.storeNewAccountStorageMap(address) + } + + if accountStorageMap != nil { + domainStorageMap = accountStorageMap.GetDomain( + s.memoryGauge, + inter, + domain, + createIfNotExists, + ) + } + + return +} + +// getAccountStorageMap returns AccountStorageMap if exists, or nil otherwise. +func (s *AccountStorageV2) getAccountStorageMap( + address common.Address, +) ( + accountStorageMap *interpreter.AccountStorageMap, +) { + // Return cached account storage map if it exists. + + if s.cachedAccountStorageMaps != nil { + accountStorageMap = s.cachedAccountStorageMaps[address] + if accountStorageMap != nil { + return accountStorageMap + } + } + + defer func() { + if accountStorageMap != nil { + s.cacheAccountStorageMap( + address, + accountStorageMap, + ) + } + }() + + // Load account storage map if account storage register exists. + + var err error + accountStorageMap, err = getAccountStorageMapFromRegister( + s.ledger, + s.slabStorage, + address, + ) + if err != nil { + panic(err) + } + + return +} + +func (s *AccountStorageV2) cacheAccountStorageMap( + address common.Address, + accountStorageMap *interpreter.AccountStorageMap, +) { + if s.cachedAccountStorageMaps == nil { + s.cachedAccountStorageMaps = map[common.Address]*interpreter.AccountStorageMap{} + } + s.cachedAccountStorageMaps[address] = accountStorageMap +} + +func (s *AccountStorageV2) storeNewAccountStorageMap( + address common.Address, +) *interpreter.AccountStorageMap { + + accountStorageMap := interpreter.NewAccountStorageMap( + s.memoryGauge, + s.slabStorage, + atree.Address(address), + ) + + slabIndex := accountStorageMap.SlabID().Index() + + s.SetNewAccountStorageMapSlabIndex( + address, + slabIndex, + ) + + s.cacheAccountStorageMap( + address, + accountStorageMap, + ) + + return accountStorageMap +} + +func (s *AccountStorageV2) SetNewAccountStorageMapSlabIndex( + address common.Address, + slabIndex atree.SlabIndex, +) { + if s.newAccountStorageMapSlabIndices == nil { + s.newAccountStorageMapSlabIndices = map[common.Address]atree.SlabIndex{} + } + s.newAccountStorageMapSlabIndices[address] = slabIndex +} + +func (s *AccountStorageV2) commit() error { + switch len(s.newAccountStorageMapSlabIndices) { + case 0: + // Nothing to commit. + return nil + + case 1: + // Optimize for the common case of a single account storage map. + + var updated int + for address, slabIndex := range s.newAccountStorageMapSlabIndices { //nolint:maprange + if updated > 0 { + panic(errors.NewUnreachableError()) + } + + err := s.writeAccountStorageSlabIndex( + address, + slabIndex, + ) + if err != nil { + return err + } + + updated++ + } + + default: + // Sort the indices to ensure deterministic order + + type accountStorageMapSlabIndex struct { + Address common.Address + SlabIndex atree.SlabIndex + } + + slabIndices := make([]accountStorageMapSlabIndex, 0, len(s.newAccountStorageMapSlabIndices)) + for address, slabIndex := range s.newAccountStorageMapSlabIndices { //nolint:maprange + slabIndices = append( + slabIndices, + accountStorageMapSlabIndex{ + Address: address, + SlabIndex: slabIndex, + }, + ) + } + sort.Slice( + slabIndices, + func(i, j int) bool { + slabIndex1 := slabIndices[i] + slabIndex2 := slabIndices[j] + address1 := slabIndex1.Address + address2 := slabIndex2.Address + return address1.Compare(address2) < 0 + }, + ) + + for _, slabIndex := range slabIndices { + err := s.writeAccountStorageSlabIndex( + slabIndex.Address, + slabIndex.SlabIndex, + ) + if err != nil { + return err + } + } + } + + s.newAccountStorageMapSlabIndices = nil + + return nil +} + +func (s *AccountStorageV2) writeAccountStorageSlabIndex( + address common.Address, + slabIndex atree.SlabIndex, +) error { + return writeSlabIndexToRegister( + s.ledger, + address, + []byte(AccountStorageKey), + slabIndex, + ) +} + +func readAccountStorageSlabIndexFromRegister( + ledger atree.Ledger, + address common.Address, +) ( + atree.SlabIndex, + bool, + error, +) { + return readSlabIndexFromRegister( + ledger, + address, + []byte(AccountStorageKey), + ) +} + +func getAccountStorageMapFromRegister( + ledger atree.Ledger, + slabStorage atree.SlabStorage, + address common.Address, +) ( + *interpreter.AccountStorageMap, + error, +) { + slabIndex, registerExists, err := readAccountStorageSlabIndexFromRegister( + ledger, + address, + ) + if err != nil { + return nil, err + } + if !registerExists { + return nil, nil + } + + slabID := atree.NewSlabID( + atree.Address(address), + slabIndex, + ) + + return interpreter.NewAccountStorageMapWithRootID(slabStorage, slabID), nil +} + +func hasAccountStorageMap( + ledger atree.Ledger, + address common.Address, +) (bool, error) { + + _, registerExists, err := readAccountStorageSlabIndexFromRegister( + ledger, + address, + ) + if err != nil { + return false, err + } + return registerExists, nil +} + +func (s *AccountStorageV2) cachedRootSlabIDs() []atree.SlabID { + + var slabIDs []atree.SlabID + + // Get cached account storage map slab IDs. + for _, storageMap := range s.cachedAccountStorageMaps { //nolint:maprange + slabIDs = append( + slabIDs, + storageMap.SlabID(), + ) + } + + return slabIDs +} diff --git a/runtime/capabilitycontrollers_test.go b/runtime/capabilitycontrollers_test.go index fc6f692fe4..78ff497b96 100644 --- a/runtime/capabilitycontrollers_test.go +++ b/runtime/capabilitycontrollers_test.go @@ -3251,9 +3251,12 @@ func TestRuntimeCapabilityControllers(t *testing.T) { ) require.NoError(t, err) - storageMap := storage.GetStorageMap( + // Use *interpreter.Interpreter(nil) here because createIfNotExists is false. + + storageMap := storage.GetDomainStorageMap( + nil, common.MustBytesToAddress([]byte{0x1}), - stdlib.PathCapabilityStorageDomain, + common.StorageDomainPathCapability, false, ) require.Zero(t, storageMap.Count()) @@ -3840,9 +3843,10 @@ func TestRuntimeCapabilitiesGetBackwardCompatibility(t *testing.T) { }) require.NoError(t, err) - publicStorageMap := storage.GetStorageMap( + publicStorageMap := storage.GetDomainStorageMap( + inter, testAddress, - common.PathDomainPublic.Identifier(), + common.PathDomainPublic.StorageDomain(), true, ) @@ -3947,9 +3951,10 @@ func TestRuntimeCapabilitiesPublishBackwardCompatibility(t *testing.T) { }) require.NoError(t, err) - publicStorageMap := storage.GetStorageMap( + publicStorageMap := storage.GetDomainStorageMap( + inter, testAddress, - common.PathDomainStorage.Identifier(), + common.PathDomainStorage.StorageDomain(), true, ) @@ -4037,9 +4042,10 @@ func TestRuntimeCapabilitiesUnpublishBackwardCompatibility(t *testing.T) { }) require.NoError(t, err) - publicStorageMap := storage.GetStorageMap( + publicStorageMap := storage.GetDomainStorageMap( + inter, testAddress, - common.PathDomainPublic.Identifier(), + common.PathDomainPublic.StorageDomain(), true, ) diff --git a/runtime/config.go b/runtime/config.go index d6882cb353..68926367d0 100644 --- a/runtime/config.go +++ b/runtime/config.go @@ -37,4 +37,6 @@ type Config struct { CoverageReport *CoverageReport // LegacyContractUpgradeEnabled enabled specifies whether to use the old parser when parsing an old contract LegacyContractUpgradeEnabled bool + // StorageFormatV2Enabled specifies whether storage format V2 is enabled + StorageFormatV2Enabled bool } diff --git a/runtime/contract_function_executor.go b/runtime/contract_function_executor.go index 8ba0f49bf8..19960185f2 100644 --- a/runtime/contract_function_executor.go +++ b/runtime/contract_function_executor.go @@ -105,7 +105,13 @@ func (executor *interpreterContractFunctionExecutor) preprocess() (err error) { runtimeInterface := context.Interface - storage := NewStorage(runtimeInterface, runtimeInterface) + storage := NewStorage( + runtimeInterface, + runtimeInterface, + StorageConfig{ + StorageFormatV2Enabled: interpreterRuntime.defaultConfig.StorageFormatV2Enabled, + }, + ) executor.storage = storage environment := context.Environment diff --git a/runtime/contract_test.go b/runtime/contract_test.go index 2bb89a2cb5..d62436af55 100644 --- a/runtime/contract_test.go +++ b/runtime/contract_test.go @@ -44,18 +44,21 @@ func TestRuntimeContract(t *testing.T) { t.Parallel() type testCase struct { - name string // the name of the contract used in add/update calls - code string // the code we use to add the contract - code2 string // the code we use to update the contract - valid bool - isInterface bool + name string // the name of the contract used in add/update calls + code string // the code we use to add the contract + code2 string // the code we use to update the contract + valid bool + isInterface bool + storageFormatV2Enabled bool } - test := func(t *testing.T, tc testCase) { - + runTest := func(t *testing.T, tc testCase) { t.Parallel() - runtime := NewTestInterpreterRuntime() + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = tc.storageFormatV2Enabled + + runtime := NewTestInterpreterRuntimeWithConfig(config) var loggedMessages []string @@ -222,8 +225,18 @@ func TestRuntimeContract(t *testing.T) { // so getting the storage map here once upfront would result in outdated data getContractValueExists := func() bool { - storageMap := NewStorage(storage, nil). - GetStorageMap(signerAddress, StorageDomainContract, false) + storageMap := NewStorage( + storage, + nil, + StorageConfig{ + StorageFormatV2Enabled: tc.storageFormatV2Enabled, + }, + ).GetDomainStorageMap( + inter, + signerAddress, + common.StorageDomainContract, + false, + ) if storageMap == nil { return false } @@ -514,6 +527,18 @@ func TestRuntimeContract(t *testing.T) { } + test := func(t *testing.T, tc testCase) { + t.Run("storage format V2 disabled", func(t *testing.T) { + tc.storageFormatV2Enabled = false + runTest(t, tc) + }) + + t.Run("storage format V2 enabled", func(t *testing.T) { + tc.storageFormatV2Enabled = true + runTest(t, tc) + }) + } + t.Run("valid contract, correct name", func(t *testing.T) { test(t, testCase{ name: "Test", diff --git a/runtime/environment.go b/runtime/environment.go index 7873d4e990..6caa05fc8b 100644 --- a/runtime/environment.go +++ b/runtime/environment.go @@ -1106,9 +1106,10 @@ func (e *interpreterEnvironment) loadContract( location := compositeType.Location if addressLocation, ok := location.(common.AddressLocation); ok { - storageMap := e.storage.GetStorageMap( + storageMap := e.storage.GetDomainStorageMap( + inter, addressLocation.Address, - StorageDomainContract, + common.StorageDomainContract, false, ) if storageMap != nil { diff --git a/runtime/ft_test.go b/runtime/ft_test.go index b9374d986e..144f062e1c 100644 --- a/runtime/ft_test.go +++ b/runtime/ft_test.go @@ -1083,9 +1083,10 @@ func TestRuntimeBrokenFungibleTokenRecovery(t *testing.T) { contractsAddress, ) - contractStorage := storage.GetStorageMap( + contractStorage := storage.GetDomainStorageMap( + inter, contractsAddress, - StorageDomainContract, + common.StorageDomainContract, true, ) contractStorage.SetValue( @@ -1118,9 +1119,10 @@ func TestRuntimeBrokenFungibleTokenRecovery(t *testing.T) { userAddress, ) - userStorage := storage.GetStorageMap( + userStorage := storage.GetDomainStorageMap( + inter, userAddress, - common.PathDomainStorage.Identifier(), + common.PathDomainStorage.StorageDomain(), true, ) const storagePathIdentifier = "exampleTokenVault" diff --git a/runtime/migrate_domain_registers.go b/runtime/migrate_domain_registers.go new file mode 100644 index 0000000000..08d6afa55d --- /dev/null +++ b/runtime/migrate_domain_registers.go @@ -0,0 +1,171 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package runtime + +import ( + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/errors" + "github.com/onflow/cadence/interpreter" +) + +type GetDomainStorageMapFunc func( + ledger atree.Ledger, + storage atree.SlabStorage, + address common.Address, + domain common.StorageDomain, +) ( + *interpreter.DomainStorageMap, + error, +) + +// DomainRegisterMigration migrates domain registers to account storage maps. +type DomainRegisterMigration struct { + ledger atree.Ledger + storage atree.SlabStorage + inter *interpreter.Interpreter + memoryGauge common.MemoryGauge + getDomainStorageMap GetDomainStorageMapFunc +} + +func NewDomainRegisterMigration( + ledger atree.Ledger, + storage atree.SlabStorage, + inter *interpreter.Interpreter, + memoryGauge common.MemoryGauge, + getDomainStorageMap GetDomainStorageMapFunc, +) *DomainRegisterMigration { + if getDomainStorageMap == nil { + getDomainStorageMap = getDomainStorageMapFromV1DomainRegister + } + return &DomainRegisterMigration{ + ledger: ledger, + storage: storage, + inter: inter, + memoryGauge: memoryGauge, + getDomainStorageMap: getDomainStorageMap, + } +} + +func (m *DomainRegisterMigration) MigrateAccount( + address common.Address, +) ( + *interpreter.AccountStorageMap, + error, +) { + exists, err := hasAccountStorageMap(m.ledger, address) + if err != nil { + return nil, err + } + if exists { + // Account storage map already exists + return nil, nil + } + + // Migrate existing domains + accountStorageMap, err := m.migrateDomainRegisters(address) + if err != nil { + return nil, err + } + + if accountStorageMap == nil { + // Nothing migrated + return nil, nil + } + + slabIndex := accountStorageMap.SlabID().Index() + + // Write account register + errors.WrapPanic(func() { + err = m.ledger.SetValue( + address[:], + []byte(AccountStorageKey), + slabIndex[:], + ) + }) + if err != nil { + return nil, interpreter.WrappedExternalError(err) + } + + return accountStorageMap, nil +} + +// migrateDomainRegisters migrates all existing domain storage maps to a new account storage map, +// and removes the domain registers. +func (m *DomainRegisterMigration) migrateDomainRegisters( + address common.Address, +) ( + *interpreter.AccountStorageMap, + error, +) { + + var accountStorageMap *interpreter.AccountStorageMap + + for _, domain := range common.AllStorageDomains { + + domainStorageMap, err := m.getDomainStorageMap( + m.ledger, + m.storage, + address, + domain, + ) + if err != nil { + return nil, err + } + + if domainStorageMap == nil { + // Skip non-existent domain + continue + } + + if accountStorageMap == nil { + accountStorageMap = interpreter.NewAccountStorageMap( + m.memoryGauge, + m.storage, + atree.Address(address), + ) + } + + // Migrate (insert) existing domain storage map to account storage map + existed := accountStorageMap.WriteDomain(m.inter, domain, domainStorageMap) + if existed { + // This shouldn't happen because we are inserting domain storage map into empty account storage map. + return nil, errors.NewUnexpectedError( + "failed to migrate domain %s for account %x: domain already exists in account storage map", + domain.Identifier(), + address, + ) + } + + // Remove migrated domain registers + errors.WrapPanic(func() { + // NOTE: removing non-existent domain registers is no-op. + err = m.ledger.SetValue( + address[:], + []byte(domain.Identifier()), + nil) + }) + if err != nil { + return nil, interpreter.WrappedExternalError(err) + } + } + + return accountStorageMap, nil +} diff --git a/runtime/migrate_domain_registers_test.go b/runtime/migrate_domain_registers_test.go new file mode 100644 index 0000000000..f1d6e4304c --- /dev/null +++ b/runtime/migrate_domain_registers_test.go @@ -0,0 +1,532 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package runtime_test + +import ( + "math" + "math/rand" + goruntime "runtime" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/errors" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + . "github.com/onflow/cadence/test_utils/interpreter_utils" + . "github.com/onflow/cadence/test_utils/runtime_utils" +) + +func TestMigrateDomainRegisters(t *testing.T) { + t.Parallel() + + isAtreeRegister := func(key string) bool { + return key[0] == '$' && len(key) == 9 + } + + getNonAtreeRegisters := func(values map[string][]byte) map[string][]byte { + nonAtreeRegisters := make(map[string][]byte) + for k, v := range values { + ks := strings.Split(k, "|") + if !isAtreeRegister(ks[1]) && len(v) > 0 { + nonAtreeRegisters[k] = v + } + } + return nonAtreeRegisters + } + + address1 := common.MustBytesToAddress([]byte{0x1}) + address2 := common.MustBytesToAddress([]byte{0x2}) + + addresses := []common.Address{address2, address1} + + t.Run("accounts without domain registers", func(t *testing.T) { + t.Parallel() + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + migrator := runtime.NewDomainRegisterMigration( + ledger, + storage, + inter, + nil, + nil, + ) + + for _, address := range addresses { + accountStorageMap, err := migrator.MigrateAccount(address) + require.Nil(t, accountStorageMap) + require.NoError(t, err) + } + + err := storage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + require.Equal(t, 0, len(ledger.StoredValues)) + }) + + t.Run("accounts with domain registers", func(t *testing.T) { + t.Parallel() + + accountsInfo := []accountInfo{ + { + address: address1, + domains: []domainInfo{ + {domain: common.PathDomainStorage.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3}, + {domain: common.PathDomainPrivate.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + { + address: address2, + domains: []domainInfo{ + {domain: common.PathDomainPublic.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + } + + ledger, accountsValues := newTestLedgerWithUnmigratedAccounts(t, nil, nil, accountsInfo) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + migrator := runtime.NewDomainRegisterMigration( + ledger, + storage, + inter, + nil, + nil, + ) + + var accountStorageMaps []*interpreter.AccountStorageMap + for _, address := range addresses { + accountStorageMap, err := migrator.MigrateAccount(address) + require.NotNil(t, accountStorageMap) + require.NoError(t, err) + accountStorageMaps = append(accountStorageMaps, accountStorageMap) + } + + err := storage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + // Check non-atree registers + nonAtreeRegisters := getNonAtreeRegisters(ledger.StoredValues) + require.Equal(t, len(addresses), len(nonAtreeRegisters)) + require.Contains(t, nonAtreeRegisters, string(address1[:])+"|"+runtime.AccountStorageKey) + require.Contains(t, nonAtreeRegisters, string(address2[:])+"|"+runtime.AccountStorageKey) + + // Check atree storage + expectedRootSlabIDs := make([]atree.SlabID, 0, len(accountStorageMaps)) + for _, accountStorageMap := range accountStorageMaps { + expectedRootSlabIDs = append(expectedRootSlabIDs, accountStorageMap.SlabID()) + } + + CheckAtreeStorageHealth(t, storage, expectedRootSlabIDs) + + // Check account storage map data + for address, accountValues := range accountsValues { + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + }) + + t.Run("migrated accounts", func(t *testing.T) { + t.Parallel() + + accountsInfo := []accountInfo{ + { + address: address1, + domains: []domainInfo{ + {domain: common.PathDomainStorage.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3}, + {domain: common.PathDomainPrivate.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + { + address: address2, + domains: []domainInfo{ + {domain: common.PathDomainPublic.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + } + + ledger, accountsValues := newTestLedgerWithMigratedAccounts(t, nil, nil, accountsInfo) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + migrator := runtime.NewDomainRegisterMigration( + ledger, + storage, + inter, + nil, + nil, + ) + + for _, address := range addresses { + accountStorageMap, err := migrator.MigrateAccount(address) + require.Nil(t, accountStorageMap) + require.NoError(t, err) + } + + // Check account storage map data + for address, accountValues := range accountsValues { + checkAccountStorageMapData( + t, + ledger.StoredValues, + ledger.StorageIndices, + address, + accountValues, + ) + } + }) +} + +type domainInfo struct { + domain common.StorageDomain + domainStorageMapCount int + maxDepth int +} + +type accountInfo struct { + address common.Address + domains []domainInfo +} + +func newTestLedgerWithUnmigratedAccounts( + tb testing.TB, + onRead LedgerOnRead, + onWrite LedgerOnWrite, + accounts []accountInfo, +) (TestLedger, map[common.Address]accountStorageMapValues) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any + // domain register to match DomainStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + tb, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + random := rand.New(rand.NewSource(42)) + + accountsValues := make(map[common.Address]accountStorageMapValues) + + var expectedDomainRootSlabIDs []atree.SlabID + + for _, account := range accounts { + + address := account.address + + accountValues := make(accountStorageMapValues) + + accountsValues[address] = accountValues + + for _, domainInfo := range account.domains { + + domain := domainInfo.domain + domainStorageMapCount := domainInfo.domainStorageMapCount + maxDepth := domainInfo.maxDepth + + accountValues[domain] = make(domainStorageMapValues) + + // Create domain storage map + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + + // Write domain register + domainStorageMapValueID := domainStorageMap.ValueID() + err := ledger.SetValue(address[:], []byte(domain.Identifier()), domainStorageMapValueID[8:]) + require.NoError(tb, err) + + vid := domainStorageMap.ValueID() + expectedDomainRootSlabIDs = append( + expectedDomainRootSlabIDs, + atree.NewSlabID(atree.Address(address), atree.SlabIndex(vid[8:]))) + + // Write elements to to domain storage map + for len(accountValues[domain]) < domainStorageMapCount { + + key := interpreter.StringStorageMapKey(strconv.Itoa(random.Int())) + + depth := random.Intn(maxDepth + 1) + value := randomCadenceValues(inter, address, depth, random) + + _ = domainStorageMap.WriteValue(inter, key, value) + + accountValues[domain][key] = value + } + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(tb, err) + + CheckAtreeStorageHealth(tb, storage, expectedDomainRootSlabIDs) + + // Create a new storage + newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices) + + return newLedger, accountsValues +} + +func newTestLedgerWithMigratedAccounts( + tb testing.TB, + onRead LedgerOnRead, + onWrite LedgerOnWrite, + accounts []accountInfo, +) (TestLedger, map[common.Address]accountStorageMapValues) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any + // domain register to match DomainStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + tb, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + random := rand.New(rand.NewSource(42)) + + expectedRootSlabIDs := make([]atree.SlabID, 0, len(accounts)) + + accountsValues := make(map[common.Address]accountStorageMapValues) + + for _, account := range accounts { + + address := account.address + + accountValues := make(accountStorageMapValues) + + accountsValues[address] = accountValues + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + + // Write account register + accountStorageMapSlabIndex := accountStorageMap.SlabID().Index() + err := ledger.SetValue(address[:], []byte(runtime.AccountStorageKey), accountStorageMapSlabIndex[:]) + require.NoError(tb, err) + + expectedRootSlabIDs = append(expectedRootSlabIDs, accountStorageMap.SlabID()) + + for _, domainInfo := range account.domains { + + domain := domainInfo.domain + domainStorageMapCount := domainInfo.domainStorageMapCount + maxDepth := domainInfo.maxDepth + + accountValues[domain] = make(domainStorageMapValues) + + // Create domain storage map + domainStorageMap := accountStorageMap.NewDomain(nil, inter, domain) + + // Write elements to to domain storage map + for len(accountValues[domain]) < domainStorageMapCount { + + key := interpreter.StringStorageMapKey(strconv.Itoa(random.Int())) + + depth := random.Intn(maxDepth + 1) + value := randomCadenceValues(inter, address, depth, random) + + _ = domainStorageMap.WriteValue(inter, key, value) + + accountValues[domain][key] = value + } + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(tb, err) + + CheckAtreeStorageHealth(tb, storage, expectedRootSlabIDs) + + newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices) + + return newLedger, accountsValues +} + +func randomCadenceValues( + inter *interpreter.Interpreter, + address common.Address, + depth int, + random *rand.Rand, +) interpreter.EquatableValue { + var typeIndex int + if depth == 0 { + typeIndex = random.Intn(typeLargeString + 1) + } else { + typeIndex = random.Intn(maxType) + } + + switch typeIndex { + case typeUint8: + num := random.Intn(math.MaxUint8 + 1) + return interpreter.NewUnmeteredUInt8Value(uint8(num)) + + case typeUint16: + num := random.Intn(math.MaxUint16 + 1) + return interpreter.NewUnmeteredUInt16Value(uint16(num)) + + case typeUint32: + num := random.Uint32() + return interpreter.NewUnmeteredUInt32Value(num) + + case typeUint64: + num := random.Uint64() + return interpreter.NewUnmeteredUInt64Value(num) + + case typeSmallString: + const maxSmallStringLength = 32 + + size := random.Intn(maxSmallStringLength + 1) + + b := make([]byte, size) + random.Read(b) + s := strings.ToValidUTF8(string(b), "$") + return interpreter.NewUnmeteredStringValue(s) + + case typeLargeString: + const minLargeStringLength = 256 + const maxLargeStringLength = 1024 + + size := random.Intn(maxLargeStringLength+1-minLargeStringLength) + minLargeStringLength + + b := make([]byte, size) + random.Read(b) + s := strings.ToValidUTF8(string(b), "$") + return interpreter.NewUnmeteredStringValue(s) + + case typeArray: + const minArrayLength = 1 + const maxArrayLength = 20 + + size := random.Intn(maxArrayLength+1-minArrayLength) + minArrayLength + + arrayType := interpreter.NewVariableSizedStaticType( + nil, + interpreter.PrimitiveStaticTypeAny, + ) + + depth-- + + values := make([]interpreter.Value, size) + for i := range size { + values[i] = randomCadenceValues(inter, common.ZeroAddress, depth, random) + } + + return interpreter.NewArrayValue( + inter, + interpreter.EmptyLocationRange, + arrayType, + address, + values..., + ) + + case typeDictionary: + const minDictLength = 1 + const maxDictLength = 20 + + size := random.Intn(maxDictLength+1-minDictLength) + minDictLength + + dictType := interpreter.NewDictionaryStaticType( + nil, + interpreter.PrimitiveStaticTypeAny, + interpreter.PrimitiveStaticTypeAny, + ) + + depth-- + + keyAndValues := make([]interpreter.Value, 0, size*2) + for i := range size * 2 { + if i%2 == 0 { + // Key (0 depth for element) + keyAndValues = append(keyAndValues, randomCadenceValues(inter, common.ZeroAddress, 0, random)) + } else { + // Value (decremented depth for element) + keyAndValues = append(keyAndValues, randomCadenceValues(inter, common.ZeroAddress, depth, random)) + } + } + + return interpreter.NewDictionaryValueWithAddress(inter, interpreter.EmptyLocationRange, dictType, address, keyAndValues...) + + default: + panic(errors.NewUnreachableError()) + } +} + +const ( + typeUint8 = iota + typeUint16 + typeUint32 + typeUint64 + typeSmallString + typeLargeString + typeArray + typeDictionary + maxType +) diff --git a/runtime/runtime.go b/runtime/runtime.go index 44fdd08211..a8b520ef3b 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -558,7 +558,15 @@ func (r *interpreterRuntime) Storage(context Context) (*Storage, *interpreter.In codesAndPrograms := NewCodesAndPrograms() - storage := NewStorage(context.Interface, context.Interface) + runtimeInterface := context.Interface + + storage := NewStorage( + runtimeInterface, + runtimeInterface, + StorageConfig{ + StorageFormatV2Enabled: r.defaultConfig.StorageFormatV2Enabled, + }, + ) environment := context.Environment if environment == nil { @@ -566,7 +574,7 @@ func (r *interpreterRuntime) Storage(context Context) (*Storage, *interpreter.In } environment.Configure( - context.Interface, + runtimeInterface, codesAndPrograms, storage, context.CoverageReport, @@ -612,7 +620,7 @@ func (r *interpreterRuntime) ReadStored( pathValue := valueImporter{inter: inter}.importPathValue(path) - domain := pathValue.Domain.Identifier() + domain := pathValue.Domain.StorageDomain() identifier := pathValue.Identifier storageMapKey := interpreter.StringStorageMapKey(identifier) diff --git a/runtime/runtime_memory_metering_test.go b/runtime/runtime_memory_metering_test.go index d6d389ff73..6e6687e4c8 100644 --- a/runtime/runtime_memory_metering_test.go +++ b/runtime/runtime_memory_metering_test.go @@ -815,7 +815,9 @@ func TestRuntimeStorageCommitsMetering(t *testing.T) { // Before the storageUsed function is invoked, the deltas must have been committed. // So the encoded slabs must have been metered at this point. assert.Equal(t, uint64(0), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) + storageUsedInvoked = true + return 1, nil }, } @@ -840,85 +842,152 @@ func TestRuntimeStorageCommitsMetering(t *testing.T) { t.Run("account.storage.save", func(t *testing.T) { t.Parallel() - code := []byte(` - transaction { - prepare(signer: auth(Storage) &Account) { - signer.storage.save([[1, 2, 3], [4, 5, 6]], to: /storage/test) - } - } - `) + test := func(storageFormatV2Enabled bool) { - meter := newTestMemoryGauge() + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) - runtimeInterface := &TestRuntimeInterface{ - Storage: NewTestLedger(nil, nil), - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{{42}}, nil - }, - OnMeterMemory: meter.MeterMemory, - } + t.Run(name, func(t *testing.T) { + t.Parallel() - runtime := NewTestInterpreterRuntime() + code := []byte(` + transaction { + prepare(signer: auth(Storage) &Account) { + signer.storage.save([[1, 2, 3], [4, 5, 6]], to: /storage/test) + } + } + `) - err := runtime.ExecuteTransaction( - Script{ - Source: code, - }, - Context{ - Interface: runtimeInterface, - Location: common.TransactionLocation{}, - }, - ) + meter := newTestMemoryGauge() - require.NoError(t, err) - assert.Equal(t, uint64(4), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{{42}}, nil + }, + OnMeterMemory: meter.MeterMemory, + } + + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = storageFormatV2Enabled + runtime := NewTestInterpreterRuntimeWithConfig(config) + + err := runtime.ExecuteTransaction( + Script{ + Source: code, + }, + Context{ + Interface: runtimeInterface, + Location: common.TransactionLocation{}, + }, + ) + + require.NoError(t, err) + + var expected uint64 + if storageFormatV2Enabled { + expected = 5 + } else { + expected = 4 + } + assert.Equal(t, + expected, + meter.getMemory(common.MemoryKindAtreeEncodedSlab), + ) + }) + } + + for _, storageFormatV2Enabled := range []bool{false, true} { + test(storageFormatV2Enabled) + } }) t.Run("storage used non empty", func(t *testing.T) { t.Parallel() - code := []byte(` - transaction { - prepare(signer: auth(Storage) &Account) { - signer.storage.save([[1, 2, 3], [4, 5, 6]], to: /storage/test) - signer.storage.used - } - } - `) + test := func(storageFormatV2Enabled bool) { - meter := newTestMemoryGauge() - storageUsedInvoked := false + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) - runtimeInterface := &TestRuntimeInterface{ - Storage: NewTestLedger(nil, nil), - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{{42}}, nil - }, - OnMeterMemory: meter.MeterMemory, - OnGetStorageUsed: func(_ Address) (uint64, error) { - // Before the storageUsed function is invoked, the deltas must have been committed. - // So the encoded slabs must have been metered at this point. - assert.Equal(t, uint64(4), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) - storageUsedInvoked = true - return 1, nil - }, - } + t.Run(name, func(t *testing.T) { + t.Parallel() - runtime := NewTestInterpreterRuntime() + code := []byte(` + transaction { + prepare(signer: auth(Storage) &Account) { + signer.storage.save([[1, 2, 3], [4, 5, 6]], to: /storage/test) + signer.storage.used + } + } + `) - err := runtime.ExecuteTransaction( - Script{ - Source: code, - }, - Context{ - Interface: runtimeInterface, - Location: common.TransactionLocation{}, - }, - ) + meter := newTestMemoryGauge() + storageUsedInvoked := false - require.NoError(t, err) - assert.True(t, storageUsedInvoked) - assert.Equal(t, uint64(4), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{{42}}, nil + }, + OnMeterMemory: meter.MeterMemory, + OnGetStorageUsed: func(_ Address) (uint64, error) { + // Before the storageUsed function is invoked, the deltas must have been committed. + // So the encoded slabs must have been metered at this point. + var expected uint64 + if storageFormatV2Enabled { + expected = 5 + } else { + expected = 4 + } + assert.Equal(t, + expected, + meter.getMemory(common.MemoryKindAtreeEncodedSlab), + ) + + storageUsedInvoked = true + + return 1, nil + }, + } + + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = storageFormatV2Enabled + runtime := NewTestInterpreterRuntimeWithConfig(config) + + err := runtime.ExecuteTransaction( + Script{ + Source: code, + }, + Context{ + Interface: runtimeInterface, + Location: common.TransactionLocation{}, + }, + ) + + require.NoError(t, err) + assert.True(t, storageUsedInvoked) + + var expected uint64 + if storageFormatV2Enabled { + expected = 5 + } else { + expected = 4 + } + assert.Equal(t, + expected, + meter.getMemory(common.MemoryKindAtreeEncodedSlab), + ) + }) + } + + for _, storageFormatV2Enabled := range []bool{false, true} { + test(storageFormatV2Enabled) + } }) } @@ -930,7 +999,7 @@ func TestRuntimeMemoryMeteringErrors(t *testing.T) { type memoryMeter map[common.MemoryKind]uint64 - runtimeInterface := func(meter memoryMeter) *TestRuntimeInterface { + runtimeInterface := func(memoryMeter) *TestRuntimeInterface { return &TestRuntimeInterface{ OnMeterMemory: func(usage common.MemoryUsage) error { if usage.Kind == common.MemoryKindStringValue || @@ -1036,143 +1105,226 @@ func TestRuntimeMeterEncoding(t *testing.T) { t.Parallel() - config := DefaultTestInterpreterConfig - config.AtreeValidationEnabled = false - rt := NewTestInterpreterRuntimeWithConfig(config) + test := func(storageFormatV2Enabled bool) { - address := common.MustBytesToAddress([]byte{0x1}) - storage := NewTestLedger(nil, nil) - meter := newTestMemoryGauge() + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) - runtimeInterface := &TestRuntimeInterface{ - Storage: storage, - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{address}, nil - }, - OnMeterMemory: meter.MeterMemory, - } + t.Run(name, func(t *testing.T) { + t.Parallel() - text := "A quick brown fox jumps over the lazy dog" + config := DefaultTestInterpreterConfig + config.AtreeValidationEnabled = false + config.StorageFormatV2Enabled = storageFormatV2Enabled + rt := NewTestInterpreterRuntimeWithConfig(config) - err := rt.ExecuteTransaction( - Script{ - Source: []byte(fmt.Sprintf(` - transaction() { - prepare(acc: auth(Storage) &Account) { - var s = "%s" - acc.storage.save(s, to:/storage/some_path) - } - }`, - text, - )), - }, - Context{ - Interface: runtimeInterface, - Location: common.TransactionLocation{}, - }, - ) + address := common.MustBytesToAddress([]byte{0x1}) + storage := NewTestLedger(nil, nil) + meter := newTestMemoryGauge() - require.NoError(t, err) - assert.Equal(t, 75, int(meter.getMemory(common.MemoryKindBytes))) + runtimeInterface := &TestRuntimeInterface{ + Storage: storage, + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{address}, nil + }, + OnMeterMemory: meter.MeterMemory, + } + + text := "A quick brown fox jumps over the lazy dog" + + err := rt.ExecuteTransaction( + Script{ + Source: []byte(fmt.Sprintf(` + transaction() { + prepare(acc: auth(Storage) &Account) { + var s = "%s" + acc.storage.save(s, to:/storage/some_path) + } + }`, + text, + )), + }, + Context{ + Interface: runtimeInterface, + Location: common.TransactionLocation{}, + }, + ) + + require.NoError(t, err) + + var expected uint64 + if storageFormatV2Enabled { + expected = 107 + } else { + expected = 75 + } + assert.Equal(t, + expected, + meter.getMemory(common.MemoryKindBytes), + ) + }) + } + + for _, storageFormatV2Enabled := range []bool{false, true} { + test(storageFormatV2Enabled) + } }) t.Run("string in loop", func(t *testing.T) { t.Parallel() - config := DefaultTestInterpreterConfig - config.AtreeValidationEnabled = false - rt := NewTestInterpreterRuntimeWithConfig(config) + test := func(storageFormatV2Enabled bool) { - address := common.MustBytesToAddress([]byte{0x1}) - storage := NewTestLedger(nil, nil) - meter := newTestMemoryGauge() + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) - runtimeInterface := &TestRuntimeInterface{ - Storage: storage, - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{address}, nil - }, - OnMeterMemory: meter.MeterMemory, - } + t.Run(name, func(t *testing.T) { + t.Parallel() - text := "A quick brown fox jumps over the lazy dog" + config := DefaultTestInterpreterConfig + config.AtreeValidationEnabled = false + config.StorageFormatV2Enabled = storageFormatV2Enabled + rt := NewTestInterpreterRuntimeWithConfig(config) - err := rt.ExecuteTransaction( - Script{ - Source: []byte(fmt.Sprintf(` - transaction() { - prepare(acc: auth(Storage) &Account) { - var i = 0 - var s = "%s" - while i<1000 { - let path = StoragePath(identifier: "i".concat(i.toString()))! - acc.storage.save(s, to: path) - i=i+1 - } - } - }`, - text, - )), - }, - Context{ - Interface: runtimeInterface, - Location: common.TransactionLocation{}, - }, - ) + address := common.MustBytesToAddress([]byte{0x1}) + storage := NewTestLedger(nil, nil) + meter := newTestMemoryGauge() - require.NoError(t, err) - assert.Equal(t, 61455, int(meter.getMemory(common.MemoryKindBytes))) + runtimeInterface := &TestRuntimeInterface{ + Storage: storage, + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{address}, nil + }, + OnMeterMemory: meter.MeterMemory, + } + + text := "A quick brown fox jumps over the lazy dog" + + err := rt.ExecuteTransaction( + Script{ + Source: []byte(fmt.Sprintf(` + transaction() { + prepare(acc: auth(Storage) &Account) { + var i = 0 + var s = "%s" + while i<1000 { + let path = StoragePath(identifier: "i".concat(i.toString()))! + acc.storage.save(s, to: path) + i=i+1 + } + } + }`, + text, + )), + }, + Context{ + Interface: runtimeInterface, + Location: common.TransactionLocation{}, + }, + ) + + require.NoError(t, err) + + var expected uint64 + if storageFormatV2Enabled { + expected = 61494 + } else { + expected = 61455 + } + assert.Equal(t, + expected, + meter.getMemory(common.MemoryKindBytes), + ) + }) + } + + for _, storageFormatV2Enabled := range []bool{false, true} { + test(storageFormatV2Enabled) + } }) t.Run("composite", func(t *testing.T) { t.Parallel() - config := DefaultTestInterpreterConfig - config.AtreeValidationEnabled = false - rt := NewTestInterpreterRuntimeWithConfig(config) + test := func(storageFormatV2Enabled bool) { - address := common.MustBytesToAddress([]byte{0x1}) - storage := NewTestLedger(nil, nil) - meter := newTestMemoryGauge() + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) - runtimeInterface := &TestRuntimeInterface{ - Storage: storage, - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{address}, nil - }, - OnMeterMemory: meter.MeterMemory, - } + t.Run(name, func(t *testing.T) { + t.Parallel() - _, err := rt.ExecuteScript( - Script{ - Source: []byte(` - access(all) fun main() { - let acc = getAuthAccount(0x02) - var i = 0 - var f = Foo() - while i<1000 { - let path = StoragePath(identifier: "i".concat(i.toString()))! - acc.storage.save(f, to: path) - i=i+1 - } - } + config := DefaultTestInterpreterConfig + config.AtreeValidationEnabled = false + config.StorageFormatV2Enabled = storageFormatV2Enabled + rt := NewTestInterpreterRuntimeWithConfig(config) - access(all) struct Foo { - access(self) var id: Int - init() { - self.id = 123456789 - } - }`), - }, - Context{ - Interface: runtimeInterface, - Location: common.ScriptLocation{}, - }, - ) + address := common.MustBytesToAddress([]byte{0x1}) + storage := NewTestLedger(nil, nil) + meter := newTestMemoryGauge() - require.NoError(t, err) - assert.Equal(t, 58323, int(meter.getMemory(common.MemoryKindBytes))) + runtimeInterface := &TestRuntimeInterface{ + Storage: storage, + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{address}, nil + }, + OnMeterMemory: meter.MeterMemory, + } + + _, err := rt.ExecuteScript( + Script{ + Source: []byte(` + access(all) fun main() { + let acc = getAuthAccount(0x02) + var i = 0 + var f = Foo() + while i<1000 { + let path = StoragePath(identifier: "i".concat(i.toString()))! + acc.storage.save(f, to: path) + i=i+1 + } + } + + access(all) struct Foo { + access(self) var id: Int + init() { + self.id = 123456789 + } + } + `), + }, + Context{ + Interface: runtimeInterface, + Location: common.ScriptLocation{}, + }, + ) + + require.NoError(t, err) + + var expected uint64 + if storageFormatV2Enabled { + expected = 58362 + } else { + expected = 58323 + } + + assert.Equal(t, + expected, + meter.getMemory(common.MemoryKindBytes), + ) + }) + } + + for _, storageFormatV2Enabled := range []bool{false, true} { + test(storageFormatV2Enabled) + } }) } diff --git a/runtime/runtime_test.go b/runtime/runtime_test.go index 3c41142877..4394b73e93 100644 --- a/runtime/runtime_test.go +++ b/runtime/runtime_test.go @@ -5683,102 +5683,159 @@ func TestRuntimeContractWriteback(t *testing.T) { t.Parallel() - runtime := NewTestInterpreterRuntime() - addressValue := cadence.BytesToAddress([]byte{0xCA, 0xDE}) - contract := []byte(` - access(all) contract Test { + test := func( + storageFormatV2Enabled bool, + expectedDeployTxWrites []ownerKeyPair, + expectedWriteTxWrites []ownerKeyPair, + ) { - access(all) var test: Int + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) - init() { - self.test = 1 - } + t.Run(name, func(t *testing.T) { + t.Parallel() - access(all) fun setTest(_ test: Int) { - self.test = test - } - } - `) + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = storageFormatV2Enabled + runtime := NewTestInterpreterRuntimeWithConfig(config) - deploy := DeploymentTransaction("Test", contract) + contract := []byte(` + access(all) contract Test { - readTx := []byte(` - import Test from 0xCADE + access(all) var test: Int - transaction { + init() { + self.test = 1 + } - prepare(signer: &Account) { - log(Test.test) - } - } - `) + access(all) fun setTest(_ test: Int) { + self.test = test + } + } + `) - writeTx := []byte(` - import Test from 0xCADE + deploy := DeploymentTransaction("Test", contract) - transaction { + readTx := []byte(` + import Test from 0xCADE - prepare(signer: &Account) { - Test.setTest(2) - } - } - `) + transaction { - var accountCode []byte - var events []cadence.Event - var loggedMessages []string - var writes []ownerKeyPair + prepare(signer: &Account) { + log(Test.test) + } + } + `) - onWrite := func(owner, key, value []byte) { - writes = append(writes, ownerKeyPair{ - owner, - key, - }) - } + writeTx := []byte(` + import Test from 0xCADE - runtimeInterface := &TestRuntimeInterface{ - OnGetCode: func(_ Location) (bytes []byte, err error) { - return accountCode, nil - }, - Storage: NewTestLedger(nil, onWrite), - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{Address(addressValue)}, nil - }, - OnResolveLocation: NewSingleIdentifierLocationResolver(t), - OnGetAccountContractCode: func(_ common.AddressLocation) (code []byte, err error) { - return accountCode, nil - }, - OnUpdateAccountContractCode: func(_ common.AddressLocation, code []byte) (err error) { - accountCode = code - return nil - }, - OnEmitEvent: func(event cadence.Event) error { - events = append(events, event) - return nil - }, - OnProgramLog: func(message string) { - loggedMessages = append(loggedMessages, message) - }, - } + transaction { - nextTransactionLocation := NewTransactionLocationGenerator() + prepare(signer: &Account) { + Test.setTest(2) + } + } + `) - err := runtime.ExecuteTransaction( - Script{ - Source: deploy, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) + var accountCode []byte + var events []cadence.Event + var loggedMessages []string + var writes []ownerKeyPair - assert.NotNil(t, accountCode) + onWrite := func(owner, key, value []byte) { + writes = append(writes, ownerKeyPair{ + owner, + key, + }) + } - assert.Equal(t, + runtimeInterface := &TestRuntimeInterface{ + OnGetCode: func(_ Location) (bytes []byte, err error) { + return accountCode, nil + }, + Storage: NewTestLedger(nil, onWrite), + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{Address(addressValue)}, nil + }, + OnResolveLocation: NewSingleIdentifierLocationResolver(t), + OnGetAccountContractCode: func(_ common.AddressLocation) (code []byte, err error) { + return accountCode, nil + }, + OnUpdateAccountContractCode: func(_ common.AddressLocation, code []byte) (err error) { + accountCode = code + return nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnProgramLog: func(message string) { + loggedMessages = append(loggedMessages, message) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + + err := runtime.ExecuteTransaction( + Script{ + Source: deploy, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) + + assert.NotNil(t, accountCode) + + assert.Equal(t, + expectedDeployTxWrites, + writes, + ) + + writes = nil + + err = runtime.ExecuteTransaction( + Script{ + Source: readTx, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) + + assert.Empty(t, writes) + + writes = nil + + err = runtime.ExecuteTransaction( + Script{ + Source: writeTx, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) + + assert.Equal(t, + expectedWriteTxWrites, + writes, + ) + + }) + } + + test(false, []ownerKeyPair{ // storage index to contract domain storage map { @@ -5796,47 +5853,55 @@ func TestRuntimeContractWriteback(t *testing.T) { []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, }, - writes, - ) - - writes = nil - err = runtime.ExecuteTransaction( - Script{ - Source: readTx, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), + []ownerKeyPair{ + // Storage map is modified because contract value is inlined in contract storage map. + // NOTE: contract value slab doesn't exist. + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, + }, }, ) - require.NoError(t, err) - - assert.Empty(t, writes) - writes = nil + test( + true, - err = runtime.ExecuteTransaction( - Script{ - Source: writeTx, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), + []ownerKeyPair{ + // storage index to account storage map + { + addressValue[:], + []byte(AccountStorageKey), + }, + // contract value + // NOTE: contract value is empty because it is inlined in contract domain storage map + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, + }, + // contract domain storage map + // NOTE: contract domain storage map is empty because it is inlined in account storage map + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, + }, + // account storage map + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + }, }, - ) - require.NoError(t, err) - assert.Equal(t, []ownerKeyPair{ - // Storage map is modified because contract value is inlined in contract storage map. - // NOTE: contract value slab doesn't exist. + // Account storage map is modified because: + // - contract value is inlined in contract storage map, and + // - contract storage map is inlined in account storage map. + // NOTE: both contract storage map slab and contract value slab don't exist. { addressValue[:], []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, }, - writes, ) } @@ -5844,88 +5909,195 @@ func TestRuntimeStorageWriteback(t *testing.T) { t.Parallel() - runtime := NewTestInterpreterRuntime() - addressValue := cadence.BytesToAddress([]byte{0xCA, 0xDE}) - contract := []byte(` - access(all) contract Test { + test := func( + storageFormatV2Enabled bool, + expectedDeployTxWrites []ownerKeyPair, + expectedSaveToStorageTxWrites []ownerKeyPair, + expectedModifyStorageTxWrites []ownerKeyPair, + ) { - access(all) resource R { + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) + t.Run(name, func(t *testing.T) { + t.Parallel() + + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = storageFormatV2Enabled + runtime := NewTestInterpreterRuntimeWithConfig(config) + + contract := []byte(` + access(all) contract Test { - access(all) var test: Int + access(all) resource R { + + access(all) var test: Int + + init() { + self.test = 1 + } - init() { - self.test = 1 + access(all) fun setTest(_ test: Int) { + self.test = test + } + } + + + access(all) fun createR(): @R { + return <-create R() + } } + `) - access(all) fun setTest(_ test: Int) { - self.test = test - } - } + deploy := DeploymentTransaction("Test", contract) + var accountCode []byte + var events []cadence.Event + var loggedMessages []string + var writes []ownerKeyPair - access(all) fun createR(): @R { - return <-create R() - } - } - `) + onWrite := func(owner, key, _ []byte) { + writes = append(writes, ownerKeyPair{ + owner, + key, + }) + } - deploy := DeploymentTransaction("Test", contract) + runtimeInterface := &TestRuntimeInterface{ + OnGetCode: func(_ Location) (bytes []byte, err error) { + return accountCode, nil + }, + Storage: NewTestLedger(nil, onWrite), + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{Address(addressValue)}, nil + }, + OnResolveLocation: NewSingleIdentifierLocationResolver(t), + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + return accountCode, nil + }, + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCode = code + return nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnProgramLog: func(message string) { + loggedMessages = append(loggedMessages, message) + }, + } - var accountCode []byte - var events []cadence.Event - var loggedMessages []string - var writes []ownerKeyPair + nextTransactionLocation := NewTransactionLocationGenerator() - onWrite := func(owner, key, _ []byte) { - writes = append(writes, ownerKeyPair{ - owner, - key, - }) - } + err := runtime.ExecuteTransaction( + Script{ + Source: deploy, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) - runtimeInterface := &TestRuntimeInterface{ - OnGetCode: func(_ Location) (bytes []byte, err error) { - return accountCode, nil - }, - Storage: NewTestLedger(nil, onWrite), - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{Address(addressValue)}, nil - }, - OnResolveLocation: NewSingleIdentifierLocationResolver(t), - OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { - return accountCode, nil - }, - OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { - accountCode = code - return nil - }, - OnEmitEvent: func(event cadence.Event) error { - events = append(events, event) - return nil - }, - OnProgramLog: func(message string) { - loggedMessages = append(loggedMessages, message) - }, - } + assert.NotNil(t, accountCode) - nextTransactionLocation := NewTransactionLocationGenerator() + assert.Equal(t, + expectedDeployTxWrites, + writes, + ) - err := runtime.ExecuteTransaction( - Script{ - Source: deploy, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) + writes = nil - assert.NotNil(t, accountCode) + err = runtime.ExecuteTransaction( + Script{ + Source: []byte(` + import Test from 0xCADE - assert.Equal(t, + transaction { + + prepare(signer: auth(Storage) &Account) { + signer.storage.save(<-Test.createR(), to: /storage/r) + } + } + `), + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) + + assert.Equal(t, + expectedSaveToStorageTxWrites, + writes, + ) + + readTx := []byte(` + import Test from 0xCADE + + transaction { + + prepare(signer: auth(Storage) &Account) { + log(signer.storage.borrow<&Test.R>(from: /storage/r)!.test) + } + } + `) + + writes = nil + + err = runtime.ExecuteTransaction( + Script{ + Source: readTx, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) + + assert.Empty(t, writes) + + writeTx := []byte(` + import Test from 0xCADE + + transaction { + + prepare(signer: auth(Storage) &Account) { + let r = signer.storage.borrow<&Test.R>(from: /storage/r)! + r.setTest(2) + } + } + `) + + writes = nil + + err = runtime.ExecuteTransaction( + Script{ + Source: writeTx, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) + + assert.Equal(t, + expectedModifyStorageTxWrites, + writes, + ) + }) + } + + test( + false, []ownerKeyPair{ // storage index to contract domain storage map { @@ -5944,115 +6116,92 @@ func TestRuntimeStorageWriteback(t *testing.T) { []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, }, - writes, + []ownerKeyPair{ + // storage index to storage domain storage map + { + addressValue[:], + []byte("storage"), + }, + // resource value + // NOTE: resource value slab is empty because it is inlined in storage domain storage map + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + }, + // storage domain storage map + // NOTE: resource value slab is inlined. + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + }, + }, + []ownerKeyPair{ + // Storage map is modified because resource value is inlined in storage map + // NOTE: resource value slab is empty. + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + }, + }, ) - writes = nil - - err = runtime.ExecuteTransaction( - Script{ - Source: []byte(` - import Test from 0xCADE - - transaction { - - prepare(signer: auth(Storage) &Account) { - signer.storage.save(<-Test.createR(), to: /storage/r) - } - } - `), - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), + test( + true, + []ownerKeyPair{ + // storage index to account storage map + { + addressValue[:], + []byte(AccountStorageKey), + }, + // contract value + // NOTE: contract value slab is empty because it is inlined in contract domain storage map + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, + }, + // contract domain storage map + // NOTE: contract domain storage map is empty because it is inlined in account storage map + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, + }, + // account storage map + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + }, }, - ) - require.NoError(t, err) - assert.Equal(t, []ownerKeyPair{ - // storage index to storage domain storage map + // account storage map + // NOTE: account storage map is updated with new storage domain storage map (inlined). { addressValue[:], - []byte("storage"), + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, // resource value // NOTE: resource value slab is empty because it is inlined in storage domain storage map { addressValue[:], - []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, }, // storage domain storage map - // NOTE: resource value slab is inlined. + // NOTE: storage domain storage map is empty because it is inlined in account storage map. { addressValue[:], - []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5}, }, }, - writes, - ) - - readTx := []byte(` - import Test from 0xCADE - - transaction { - - prepare(signer: auth(Storage) &Account) { - log(signer.storage.borrow<&Test.R>(from: /storage/r)!.test) - } - } - `) - - writes = nil - - err = runtime.ExecuteTransaction( - Script{ - Source: readTx, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) - - assert.Empty(t, writes) - - writeTx := []byte(` - import Test from 0xCADE - - transaction { - - prepare(signer: auth(Storage) &Account) { - let r = signer.storage.borrow<&Test.R>(from: /storage/r)! - r.setTest(2) - } - } - `) - - writes = nil - - err = runtime.ExecuteTransaction( - Script{ - Source: writeTx, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) - assert.Equal(t, []ownerKeyPair{ - // Storage map is modified because resource value is inlined in storage map + // Account storage map is modified because resource value is inlined in storage map, + // and storage map is inlined in account storage map. // NOTE: resource value slab is empty. { addressValue[:], - []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, }, - writes, ) } @@ -7506,11 +7655,12 @@ func TestRuntimeComputationMetring(t *testing.T) { t.Parallel() type test struct { - name string - code string - ok bool - hits uint - intensity uint + name string + code string + ok bool + hits uint + v1Intensity uint + v2Intensity uint } compLimit := uint(6) @@ -7519,116 +7669,143 @@ func TestRuntimeComputationMetring(t *testing.T) { { name: "Infinite while loop", code: ` - while true {} - `, - ok: false, - hits: compLimit, - intensity: 6, + while true {} + `, + ok: false, + hits: compLimit, + v1Intensity: 6, + v2Intensity: 6, }, { name: "Limited while loop", code: ` - var i = 0 - while i < 5 { - i = i + 1 - } - `, - ok: false, - hits: compLimit, - intensity: 6, + var i = 0 + while i < 5 { + i = i + 1 + } + `, + ok: false, + hits: compLimit, + v1Intensity: 6, + v2Intensity: 6, }, { name: "statement + createArray + transferArray + too many for-in loop iterations", code: ` - for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] {} - `, - ok: false, - hits: compLimit, - intensity: 6, + for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] {} + `, + ok: false, + hits: compLimit, + v1Intensity: 6, + v2Intensity: 6, }, { name: "statement + createArray + transferArray + two for-in loop iterations", code: ` - for i in [1, 2] {} - `, - ok: true, - hits: 4, - intensity: 4, + for i in [1, 2] {} + `, + ok: true, + hits: 4, + v1Intensity: 4, + v2Intensity: 4, }, { name: "statement + functionInvocation + encoding", code: ` - acc.storage.save("A quick brown fox jumps over the lazy dog", to:/storage/some_path) - `, - ok: true, - hits: 3, - intensity: 76, + acc.storage.save("A quick brown fox jumps over the lazy dog", to:/storage/some_path) + `, + ok: true, + hits: 3, + v1Intensity: 76, + v2Intensity: 108, }, } - for _, test := range tests { + for _, testCase := range tests { - t.Run(test.name, func(t *testing.T) { + t.Run(testCase.name, func(t *testing.T) { - script := []byte( - fmt.Sprintf( - ` - transaction { - prepare(acc: auth(Storage) &Account) { - %s - } - } - `, - test.code, - ), - ) + test := func(storageFormatV2Enabled bool) { - runtime := NewTestInterpreterRuntime() + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) + t.Run(name, func(t *testing.T) { + t.Parallel() - compErr := errors.New("computation exceeded limit") - var hits, totalIntensity uint - meterComputationFunc := func(kind common.ComputationKind, intensity uint) error { - hits++ - totalIntensity += intensity - if hits >= compLimit { - return compErr - } - return nil - } + script := []byte( + fmt.Sprintf( + ` + transaction { + prepare(acc: auth(Storage) &Account) { + %s + } + } + `, + testCase.code, + ), + ) + + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = storageFormatV2Enabled + runtime := NewTestInterpreterRuntimeWithConfig(config) + + compErr := errors.New("computation exceeded limit") + var hits, totalIntensity uint + meterComputationFunc := func(kind common.ComputationKind, intensity uint) error { + hits++ + totalIntensity += intensity + if hits >= compLimit { + return compErr + } + return nil + } - address := common.MustBytesToAddress([]byte{0x1}) + address := common.MustBytesToAddress([]byte{0x1}) - runtimeInterface := &TestRuntimeInterface{ - Storage: NewTestLedger(nil, nil), - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{address}, nil - }, - OnMeterComputation: meterComputationFunc, - } + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{address}, nil + }, + OnMeterComputation: meterComputationFunc, + } - nextTransactionLocation := NewTransactionLocationGenerator() + nextTransactionLocation := NewTransactionLocationGenerator() - err := runtime.ExecuteTransaction( - Script{ - Source: script, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - if test.ok { - require.NoError(t, err) - } else { - RequireError(t, err) + err := runtime.ExecuteTransaction( + Script{ + Source: script, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + if testCase.ok { + require.NoError(t, err) + } else { + RequireError(t, err) + + var executionErr Error + require.ErrorAs(t, err, &executionErr) + require.ErrorAs(t, err.(Error).Unwrap(), &compErr) + } - var executionErr Error - require.ErrorAs(t, err, &executionErr) - require.ErrorAs(t, err.(Error).Unwrap(), &compErr) + assert.Equal(t, testCase.hits, hits) + + if storageFormatV2Enabled { + assert.Equal(t, testCase.v2Intensity, totalIntensity) + } else { + assert.Equal(t, testCase.v1Intensity, totalIntensity) + } + }) } - assert.Equal(t, test.hits, hits) - assert.Equal(t, test.intensity, totalIntensity) + for _, storageFormatV2Enabled := range []bool{false, true} { + test(storageFormatV2Enabled) + } }) } } @@ -11841,3 +12018,370 @@ func BenchmarkContractFunctionInvocation(b *testing.B) { require.NoError(b, err) } } + +func TestRuntimeInvocationReturnTypeInferenceFailure(t *testing.T) { + + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + newRuntimeInterface := func() Interface { + + return &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]common.Address, error) { + return []common.Address{address}, nil + }, + } + } + + runtime := NewTestInterpreterRuntime() + + nextTransactionLocation := NewTransactionLocationGenerator() + + tx := []byte(` + transaction{ + prepare(signer: auth(Storage) &Account){ + let functions = [signer.storage.save].reverse() + } + } + `) + + err := runtime.ExecuteTransaction( + Script{ + Source: tx, + }, + Context{ + Interface: newRuntimeInterface(), + Location: nextTransactionLocation(), + }, + ) + RequireError(t, err) + + var typeErr *sema.InvocationTypeInferenceError + require.ErrorAs(t, err, &typeErr) +} + +func TestRuntimeSomeValueChildContainerMutation(t *testing.T) { + + t.Parallel() + + buyTicketTx := []byte(` + import Foo from 0x1 + + transaction() { + prepare(acct: auth(Storage, Capabilities) &Account) { + Foo.logVaultBalance() + var pool = Foo.borrowLotteryPool()! + pool.buyTickets() + Foo.logVaultBalance() + } + execute {} + } + `) + + nextTransactionLocation := NewTransactionLocationGenerator() + + setupTest := func(t *testing.T) ( + runTransaction func(tx []byte) (logs []string), + ) { + + rt := NewTestInterpreterRuntime() + + accountCodes := map[Location][]byte{} + + address := common.MustBytesToAddress([]byte{0x1}) + + var logs []string + + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{address}, nil + }, + OnResolveLocation: NewSingleIdentifierLocationResolver(t), + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + return accountCodes[location], nil + }, + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnProgramLog: func(message string) { + logs = append(logs, message) + }, + OnDecodeArgument: func(b []byte, t cadence.Type) (cadence.Value, error) { + return json.Decode(nil, b) + }, + OnEmitEvent: func(event cadence.Event) error { + return nil + }, + } + + runTransaction = func(tx []byte) []string { + + logs = logs[:0] + + err := rt.ExecuteTransaction( + Script{ + Source: tx, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) + + return logs + } + + return runTransaction + } + + t.Run("non optional vault", func(t *testing.T) { + + t.Parallel() + + contractFoo := ` + access(all) contract Foo { + access(all) resource Vault { + access(all) + var balance: UFix64 + init(balance: UFix64) { + self.balance = balance + } + access(all) fun withdraw(amount: UFix64): @Vault { + self.balance = self.balance - amount + return <-create Vault(balance: amount) + } + access(all) fun deposit(from: @Vault) { + self.balance = self.balance + from.balance + destroy from + } + } + access(all) fun createEmptyVault(): @Vault { + return <- create Vault(balance: 0.0) + } + access(all) resource LotteryPool { + access(contract) + let ftVault: @Vault + init() { + self.ftVault <- Foo.createEmptyVault() + } + access(all) + fun buyTickets() { + self.borrowVault().deposit(from: <- create Vault(balance: 5.0)) + } + access(all) fun buyNewTicket() { + self.borrowVault().deposit(from: <- create Vault(balance: 5.0)) + } + access(self) + view fun borrowVault(): &Vault { + return &self.ftVault as &Vault + } + } + init() { + self.account.storage.save(<- create LotteryPool(), to: /storage/lottery_pool) + } + access(all) fun borrowLotteryPool(): &LotteryPool? { + return self.account.storage.borrow<&LotteryPool>(from: /storage/lottery_pool) + } + access(all) fun logVaultBalance() { + var pool = self.borrowLotteryPool()! + log(pool.ftVault.balance) + } + } + ` + + runTransaction := setupTest(t) + + runTransaction(DeploymentTransaction( + "Foo", + []byte(contractFoo), + )) + + logs := runTransaction(buyTicketTx) + assert.Equal(t, []string{"0.00000000", "5.00000000"}, logs) + + logs = runTransaction(buyTicketTx) + assert.Equal(t, []string{"5.00000000", "10.00000000"}, logs) + }) + + t.Run("optional vault", func(t *testing.T) { + + t.Parallel() + + contractFoo := ` + access(all) contract Foo { + access(all) resource Vault { + access(all) + var balance: UFix64 + init(balance: UFix64) { + self.balance = balance + } + access(all) fun withdraw(amount: UFix64): @Vault { + self.balance = self.balance - amount + return <-create Vault(balance: amount) + } + access(all) fun deposit(from: @Vault) { + self.balance = self.balance + from.balance + destroy from + } + } + access(all) fun createEmptyVault(): @Vault { + return <- create Vault(balance: 0.0) + } + access(all) resource LotteryPool { + access(contract) + let ftVault: @Vault? + init() { + self.ftVault <- Foo.createEmptyVault() + } + access(all) + fun buyTickets() { + self.borrowVault().deposit(from: <- create Vault(balance: 5.0)) + } + access(all) fun buyNewTicket() { + self.borrowVault().deposit(from: <- create Vault(balance: 5.0)) + } + access(self) + view fun borrowVault(): &Vault { + return &self.ftVault as &Vault? ?? panic("Cannot borrow vault") + } + } + init() { + self.account.storage.save(<- create LotteryPool(), to: /storage/lottery_pool) + } + access(all) fun borrowLotteryPool(): &LotteryPool? { + return self.account.storage.borrow<&LotteryPool>(from: /storage/lottery_pool) + } + access(all) fun logVaultBalance() { + var pool = self.borrowLotteryPool()! + log(pool.ftVault!.balance) + } + } + ` + + runTransaction := setupTest(t) + + runTransaction(DeploymentTransaction( + "Foo", + []byte(contractFoo), + )) + + logs := runTransaction(buyTicketTx) + assert.Equal(t, []string{"0.00000000", "5.00000000"}, logs) + + logs = runTransaction(buyTicketTx) + assert.Equal(t, []string{"5.00000000", "10.00000000"}, logs) + }) + + t.Run("deeply nested optional vault", func(t *testing.T) { + contractFoo := ` + access(all) + contract Foo { + access(all) + resource Vault { + access(all) + var balance: UFix64 + init(balance: UFix64) { + self.balance = balance + } + access(all) + fun withdraw(amount: UFix64): @Vault { + self.balance = self.balance - amount + return <-create Vault(balance: amount) + } + access(all) + fun deposit(from: @Vault) { + self.balance = self.balance + from.balance + destroy from + } + } + access(all) + fun createEmptyVault(): @Vault { + return <- create Vault(balance: 0.0) + } + access(all) + resource LotteryPool { + access(contract) + let jackpotPool: @Change + access(contract) + let lotteries: @{UInt64: Lottery} + init() { + self.jackpotPool <- create Change() + self.lotteries <- {0: <- create Lottery()} + } + access(all) + fun buyTickets() { + var lotteryRef = self.borrowLotteryRef()! + lotteryRef.buyNewTicket() + } + access(self) + fun borrowLotteryRef(): &Lottery? { + return &self.lotteries[0] + } + } + access(all) + resource Lottery { + access(contract) + let current: @Change + init() { + self.current <- create Change() + } + access(all) + fun buyNewTicket() { + var change = self.borrowCurrentLotteryChange() + change.forceMerge() + } + access(contract) + view fun borrowCurrentLotteryChange(): &Change { + return &self.current + } + } + access(all) + resource Change { + access(contract) + var ftVault: @Vault? + init() { + self.ftVault <- Foo.createEmptyVault() + } + access(all) + fun forceMerge() { + self.borrowVault().deposit(from: <- create Vault(balance: 5.0)) + } + access(self) + view fun borrowVault(): &Vault { + return &self.ftVault as &Vault? ?? panic("Cannot borrow vault") + } + } + init() { + self.account.storage.save(<- create LotteryPool(), to: /storage/lottery_pool) + } + access(all) + fun borrowLotteryPool(): &LotteryPool? { + return self.account.storage.borrow<&LotteryPool>(from: /storage/lottery_pool) + } + access(all) + fun logVaultBalance() { + var pool = self.borrowLotteryPool()! + log(pool.lotteries[0]!.current.ftVault!.balance) + } + } + ` + + runTransaction := setupTest(t) + + runTransaction(DeploymentTransaction( + "Foo", + []byte(contractFoo), + )) + + logs := runTransaction(buyTicketTx) + assert.Equal(t, []string{"0.00000000", "5.00000000"}, logs) + + logs = runTransaction(buyTicketTx) + assert.Equal(t, []string{"5.00000000", "10.00000000"}, logs) + }) +} diff --git a/runtime/script_executor.go b/runtime/script_executor.go index ca07c4cb00..8a51088a3d 100644 --- a/runtime/script_executor.go +++ b/runtime/script_executor.go @@ -107,7 +107,13 @@ func (executor *interpreterScriptExecutor) preprocess() (err error) { runtimeInterface := context.Interface - storage := NewStorage(runtimeInterface, runtimeInterface) + storage := NewStorage( + runtimeInterface, + runtimeInterface, + StorageConfig{ + StorageFormatV2Enabled: interpreterRuntime.defaultConfig.StorageFormatV2Enabled, + }, + ) executor.storage = storage environment := context.Environment diff --git a/runtime/sharedstate_test.go b/runtime/sharedstate_test.go index 3008c85fff..b2cd5cc495 100644 --- a/runtime/sharedstate_test.go +++ b/runtime/sharedstate_test.go @@ -19,6 +19,7 @@ package runtime_test import ( + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -35,195 +36,291 @@ func TestRuntimeSharedState(t *testing.T) { t.Parallel() - runtime := NewTestInterpreterRuntime() - signerAddress := common.MustBytesToAddress([]byte{0x1}) - deploy1 := DeploymentTransaction("C1", []byte(` - access(all) contract C1 { - access(all) fun hello() { - log("Hello from C1!") - } - } - `)) + test := func( + storageFormatV2Enabled bool, + expectedReads []ownerKeyPair, + ) { - deploy2 := DeploymentTransaction("C2", []byte(` - access(all) contract C2 { - access(all) fun hello() { - log("Hello from C2!") - } - } - `)) + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) - accountCodes := map[common.Location][]byte{} + t.Run(name, func(t *testing.T) { + t.Parallel() - var events []cadence.Event - var loggedMessages []string + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = storageFormatV2Enabled + config.AtreeValidationEnabled = false + runtime := NewTestInterpreterRuntimeWithConfig(config) - var interpreterState *interpreter.SharedState + deploy1 := DeploymentTransaction("C1", []byte(` + access(all) contract C1 { + access(all) fun hello() { + log("Hello from C1!") + } + } + `)) - var ledgerReads []ownerKeyPair + deploy2 := DeploymentTransaction("C2", []byte(` + access(all) contract C2 { + access(all) fun hello() { + log("Hello from C2!") + } + } + `)) - ledger := NewTestLedger( - func(owner, key, value []byte) { - ledgerReads = append( - ledgerReads, - ownerKeyPair{ - owner: owner, - key: key, - }, - ) - }, - nil, - ) + accountCodes := map[common.Location][]byte{} - runtimeInterface := &TestRuntimeInterface{ - Storage: ledger, - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{signerAddress}, nil - }, - OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { - accountCodes[location] = code - return nil - }, - OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { - code = accountCodes[location] - return code, nil - }, - OnRemoveAccountContractCode: func(location common.AddressLocation) error { - delete(accountCodes, location) - return nil - }, - OnResolveLocation: MultipleIdentifierLocationResolver, - OnProgramLog: func(message string) { - loggedMessages = append(loggedMessages, message) - }, - OnEmitEvent: func(event cadence.Event) error { - events = append(events, event) - return nil - }, - OnSetInterpreterSharedState: func(state *interpreter.SharedState) { - interpreterState = state - }, - OnGetInterpreterSharedState: func() *interpreter.SharedState { - return interpreterState - }, - } + var events []cadence.Event + var loggedMessages []string - environment := NewBaseInterpreterEnvironment(Config{}) + var interpreterState *interpreter.SharedState - nextTransactionLocation := NewTransactionLocationGenerator() + var ledgerReads []ownerKeyPair - // Deploy contracts + ledger := NewTestLedger( + func(owner, key, value []byte) { + ledgerReads = append( + ledgerReads, + ownerKeyPair{ + owner: owner, + key: key, + }, + ) + }, + nil, + ) - for _, source := range [][]byte{ - deploy1, - deploy2, - } { - err := runtime.ExecuteTransaction( - Script{ - Source: source, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - Environment: environment, - }, - ) - require.NoError(t, err) - } + runtimeInterface := &TestRuntimeInterface{ + Storage: ledger, + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{signerAddress}, nil + }, + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnRemoveAccountContractCode: func(location common.AddressLocation) error { + delete(accountCodes, location) + return nil + }, + OnResolveLocation: MultipleIdentifierLocationResolver, + OnProgramLog: func(message string) { + loggedMessages = append(loggedMessages, message) + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnSetInterpreterSharedState: func(state *interpreter.SharedState) { + interpreterState = state + }, + OnGetInterpreterSharedState: func() *interpreter.SharedState { + return interpreterState + }, + } + + environment := NewBaseInterpreterEnvironment(config) + + nextTransactionLocation := NewTransactionLocationGenerator() + + // Deploy contracts + + for _, source := range [][]byte{ + deploy1, + deploy2, + } { + err := runtime.ExecuteTransaction( + Script{ + Source: source, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + Environment: environment, + }, + ) + require.NoError(t, err) + } + + assert.NotEmpty(t, accountCodes) + + // Call C1.hello using transaction + + loggedMessages = nil + + err := runtime.ExecuteTransaction( + Script{ + Source: []byte(` + import C1 from 0x1 + + transaction { + prepare(signer: &Account) { + C1.hello() + } + } + `), + Arguments: nil, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + Environment: environment, + }, + ) + require.NoError(t, err) - assert.NotEmpty(t, accountCodes) + assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages) - // Call C1.hello using transaction + // Call C1.hello manually - loggedMessages = nil + loggedMessages = nil - err := runtime.ExecuteTransaction( - Script{ - Source: []byte(` - import C1 from 0x1 + _, err = runtime.InvokeContractFunction( + common.AddressLocation{ + Address: signerAddress, + Name: "C1", + }, + "hello", + nil, + nil, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + Environment: environment, + }, + ) + require.NoError(t, err) - transaction { - prepare(signer: &Account) { - C1.hello() - } - } - `), - Arguments: nil, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - Environment: environment, - }, - ) - require.NoError(t, err) + assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages) - assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages) + // Call C2.hello manually - // Call C1.hello manually + loggedMessages = nil - loggedMessages = nil + _, err = runtime.InvokeContractFunction( + common.AddressLocation{ + Address: signerAddress, + Name: "C2", + }, + "hello", + nil, + nil, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + Environment: environment, + }, + ) + require.NoError(t, err) - _, err = runtime.InvokeContractFunction( - common.AddressLocation{ - Address: signerAddress, - Name: "C1", - }, - "hello", - nil, - nil, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - Environment: environment, - }, - ) - require.NoError(t, err) + assert.Equal(t, []string{`"Hello from C2!"`}, loggedMessages) - assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages) + // Assert shared state was used, + // i.e. data was not re-read - // Call C2.hello manually + require.Equal(t, + expectedReads, + ledgerReads, + ) + }) + } - loggedMessages = nil + test( + false, - _, err = runtime.InvokeContractFunction( - common.AddressLocation{ - Address: signerAddress, - Name: "C2", - }, - "hello", - nil, - nil, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - Environment: environment, + []ownerKeyPair{ + { + owner: signerAddress[:], + key: []byte(common.StorageDomainContract.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.StorageDomainContract.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, + }, }, ) - require.NoError(t, err) - - assert.Equal(t, []string{`"Hello from C2!"`}, loggedMessages) - // Assert shared state was used, - // i.e. data was not re-read - - require.Equal(t, + test( + true, []ownerKeyPair{ + // Read account register to check if it is a migrated account + // Read returns no value. + { + owner: signerAddress[:], + key: []byte(AccountStorageKey), + }, + // Read contract domain register. + // Read returns no value. + { + owner: signerAddress[:], + key: []byte(common.StorageDomainContract.Identifier()), + }, + // Read all available domain registers to check if it is a new account + // Read returns no value. + { + owner: signerAddress[:], + key: []byte(common.PathDomainStorage.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.PathDomainPrivate.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.PathDomainPublic.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.StorageDomainContract.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.StorageDomainInbox.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.StorageDomainCapabilityController.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.StorageDomainCapabilityControllerTag.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.StorageDomainPathCapability.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.StorageDomainAccountCapability.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(AccountStorageKey), + }, { owner: signerAddress[:], - key: []byte(StorageDomainContract), + key: []byte(AccountStorageKey), }, { owner: signerAddress[:], - key: []byte(StorageDomainContract), + key: []byte(AccountStorageKey), }, { owner: signerAddress[:], key: []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, }, - ledgerReads, ) } diff --git a/runtime/slabindex.go b/runtime/slabindex.go new file mode 100644 index 0000000000..00178608d0 --- /dev/null +++ b/runtime/slabindex.go @@ -0,0 +1,86 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package runtime + +import ( + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/errors" + "github.com/onflow/cadence/interpreter" +) + +// readSlabIndexFromRegister returns register value as atree.SlabIndex. +// This function returns error if +// - underlying ledger panics, or +// - underlying ledger returns error when retrieving ledger value, or +// - retrieved ledger value is invalid (for atree.SlabIndex). +func readSlabIndexFromRegister( + ledger atree.Ledger, + address common.Address, + key []byte, +) (atree.SlabIndex, bool, error) { + var data []byte + var err error + errors.WrapPanic(func() { + data, err = ledger.GetValue(address[:], key) + }) + if err != nil { + return atree.SlabIndex{}, false, interpreter.WrappedExternalError(err) + } + + dataLength := len(data) + + if dataLength == 0 { + return atree.SlabIndex{}, false, nil + } + + isStorageIndex := dataLength == storageIndexLength + if !isStorageIndex { + // Invalid data in register + + // TODO: add dedicated error type? + return atree.SlabIndex{}, false, errors.NewUnexpectedError( + "invalid storage index for storage map of account '%x': expected length %d, got %d", + address[:], storageIndexLength, dataLength, + ) + } + + return atree.SlabIndex(data), true, nil +} + +func writeSlabIndexToRegister( + ledger atree.Ledger, + address common.Address, + key []byte, + slabIndex atree.SlabIndex, +) error { + var err error + errors.WrapPanic(func() { + err = ledger.SetValue( + address[:], + key, + slabIndex[:], + ) + }) + if err != nil { + return interpreter.WrappedExternalError(err) + } + return nil +} diff --git a/runtime/storage.go b/runtime/storage.go index 7b9a567285..2345ddb8ed 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -32,21 +32,55 @@ import ( "github.com/onflow/cadence/interpreter" ) -const StorageDomainContract = "contract" +const ( + AccountStorageKey = "stored" +) + +type StorageConfig struct { + StorageFormatV2Enabled bool +} + +type StorageFormat uint8 + +const ( + StorageFormatUnknown StorageFormat = iota + StorageFormatV1 + StorageFormatV2 +) type Storage struct { *atree.PersistentSlabStorage - NewStorageMaps *orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex] - storageMaps map[interpreter.StorageKey]*interpreter.StorageMap + + // cachedDomainStorageMaps is a cache of domain storage maps. + // Key is StorageKey{address, domain} and value is domain storage map. + cachedDomainStorageMaps map[interpreter.StorageDomainKey]*interpreter.DomainStorageMap + + // cachedV1Accounts contains the cached result of determining + // if the account is in storage format v1 or not. + cachedV1Accounts map[common.Address]bool + + // contractUpdates is a cache of contract updates. + // Key is StorageKey{contract_address, contract_name} and value is contract composite value. contractUpdates *orderedmap.OrderedMap[interpreter.StorageKey, *interpreter.CompositeValue] - Ledger atree.Ledger - memoryGauge common.MemoryGauge + + Ledger atree.Ledger + + memoryGauge common.MemoryGauge + + Config StorageConfig + + AccountStorageV1 *AccountStorageV1 + AccountStorageV2 *AccountStorageV2 + scheduledV2Migrations []common.Address } var _ atree.SlabStorage = &Storage{} var _ interpreter.Storage = &Storage{} -func NewStorage(ledger atree.Ledger, memoryGauge common.MemoryGauge) *Storage { +func NewPersistentSlabStorage( + ledger atree.Ledger, + memoryGauge common.MemoryGauge, +) *atree.PersistentSlabStorage { decodeStorable := func( decoder *cbor.StreamDecoder, slabID atree.SlabID, @@ -68,96 +102,293 @@ func NewStorage(ledger atree.Ledger, memoryGauge common.MemoryGauge) *Storage { } ledgerStorage := atree.NewLedgerBaseStorage(ledger) - persistentSlabStorage := atree.NewPersistentSlabStorage( + + return atree.NewPersistentSlabStorage( ledgerStorage, interpreter.CBOREncMode, interpreter.CBORDecMode, decodeStorable, decodeTypeInfo, ) +} + +func NewStorage( + ledger atree.Ledger, + memoryGauge common.MemoryGauge, + config StorageConfig, +) *Storage { + persistentSlabStorage := NewPersistentSlabStorage(ledger, memoryGauge) + + accountStorageV1 := NewAccountStorageV1( + ledger, + persistentSlabStorage, + memoryGauge, + ) + + var accountStorageV2 *AccountStorageV2 + if config.StorageFormatV2Enabled { + accountStorageV2 = NewAccountStorageV2( + ledger, + persistentSlabStorage, + memoryGauge, + ) + } + return &Storage{ Ledger: ledger, PersistentSlabStorage: persistentSlabStorage, - storageMaps: map[interpreter.StorageKey]*interpreter.StorageMap{}, memoryGauge: memoryGauge, + Config: config, + AccountStorageV1: accountStorageV1, + AccountStorageV2: accountStorageV2, } } const storageIndexLength = 8 -func (s *Storage) GetStorageMap( +// GetDomainStorageMap returns existing or new domain storage map for the given account and domain. +func (s *Storage) GetDomainStorageMap( + inter *interpreter.Interpreter, address common.Address, - domain string, + domain common.StorageDomain, createIfNotExists bool, ) ( - storageMap *interpreter.StorageMap, + domainStorageMap *interpreter.DomainStorageMap, ) { - key := interpreter.NewStorageKey(s.memoryGauge, address, domain) - - storageMap = s.storageMaps[key] - if storageMap == nil { + // Get cached domain storage map if it exists. - // Load data through the runtime interface + domainStorageKey := interpreter.NewStorageDomainKey(s.memoryGauge, address, domain) - var data []byte - var err error - errors.WrapPanic(func() { - data, err = s.Ledger.GetValue(key.Address[:], []byte(key.Key)) - }) - if err != nil { - panic(interpreter.WrappedExternalError(err)) + if s.cachedDomainStorageMaps != nil { + domainStorageMap = s.cachedDomainStorageMaps[domainStorageKey] + if domainStorageMap != nil { + return domainStorageMap } + } - dataLength := len(data) - isStorageIndex := dataLength == storageIndexLength - if dataLength > 0 && !isStorageIndex { - // TODO: add dedicated error type? - panic(errors.NewUnexpectedError( - "invalid storage index for storage map with domain '%s': expected length %d, got %d", - domain, storageIndexLength, dataLength, - )) + defer func() { + // Cache domain storage map + if domainStorageMap != nil { + s.cacheDomainStorageMap( + domainStorageKey, + domainStorageMap, + ) } + }() - // Load existing storage or create and store new one + if !s.Config.StorageFormatV2Enabled { - atreeAddress := atree.Address(address) + // When StorageFormatV2 is disabled, handle all accounts as v1 accounts. - if isStorageIndex { - var slabIndex atree.SlabIndex - copy(slabIndex[:], data[:]) - storageMap = s.loadExistingStorageMap(atreeAddress, slabIndex) - } else if createIfNotExists { - storageMap = s.StoreNewStorageMap(atreeAddress, domain) - } + // Only read requested domain register. - if storageMap != nil { - s.storageMaps[key] = storageMap - } + domainStorageMap = s.getDomainStorageMapForV1Account( + address, + domain, + createIfNotExists, + ) + + return } - return storageMap + // StorageFormatV2 is enabled. + + // Check if cached account format is available. + + cachedFormat, known := s.getCachedAccountFormat(address) + if known { + return s.getDomainStorageMap( + cachedFormat, + inter, + address, + domain, + createIfNotExists, + ) + } + + // Check if account is v2 (by reading "stored" register). + + if s.isV2Account(address) { + return s.getDomainStorageMapForV2Account( + inter, + address, + domain, + createIfNotExists, + ) + } + + // Check if account is v1 (by reading requested domain register). + + if s.hasDomainRegister(address, domain) { + return s.getDomainStorageMapForV1Account( + address, + domain, + createIfNotExists, + ) + } + + // Domain register doesn't exist. + + // Return early if !createIfNotExists to avoid more register reading. + + if !createIfNotExists { + return nil + } + + // At this point, account is either new account or v1 account without requested domain register. + + // Check if account is v1 (by reading more domain registers) + + if s.isV1Account(address) { + return s.getDomainStorageMapForV1Account( + address, + domain, + createIfNotExists, + ) + } + + // New account is treated as v2 account when feature flag is enabled. + + return s.getDomainStorageMapForV2Account( + inter, + address, + domain, + createIfNotExists, + ) +} + +func (s *Storage) getDomainStorageMapForV1Account( + address common.Address, + domain common.StorageDomain, + createIfNotExists bool, +) *interpreter.DomainStorageMap { + domainStorageMap := s.AccountStorageV1.GetDomainStorageMap( + address, + domain, + createIfNotExists, + ) + + s.cacheIsV1Account(address, true) + + return domainStorageMap } -func (s *Storage) loadExistingStorageMap(address atree.Address, slabIndex atree.SlabIndex) *interpreter.StorageMap { +func (s *Storage) getDomainStorageMapForV2Account( + inter *interpreter.Interpreter, + address common.Address, + domain common.StorageDomain, + createIfNotExists bool, +) *interpreter.DomainStorageMap { + domainStorageMap := s.AccountStorageV2.GetDomainStorageMap( + inter, + address, + domain, + createIfNotExists, + ) - slabID := atree.NewSlabID(address, slabIndex) + s.cacheIsV1Account(address, false) - return interpreter.NewStorageMapWithRootID(s, slabID) + return domainStorageMap } -func (s *Storage) StoreNewStorageMap(address atree.Address, domain string) *interpreter.StorageMap { - storageMap := interpreter.NewStorageMap(s.memoryGauge, s, address) +func (s *Storage) getDomainStorageMap( + format StorageFormat, + inter *interpreter.Interpreter, + address common.Address, + domain common.StorageDomain, + createIfNotExists bool, +) *interpreter.DomainStorageMap { + switch format { + + case StorageFormatV1: + return s.getDomainStorageMapForV1Account( + address, + domain, + createIfNotExists, + ) - slabIndex := storageMap.SlabID().Index() + case StorageFormatV2: + return s.getDomainStorageMapForV2Account( + inter, + address, + domain, + createIfNotExists, + ) - storageKey := interpreter.NewStorageKey(s.memoryGauge, common.Address(address), domain) + default: + panic(errors.NewUnreachableError()) + } +} + +func (s *Storage) getCachedAccountFormat(address common.Address) (format StorageFormat, known bool) { + isV1, cached := s.cachedV1Accounts[address] + if !cached { + return StorageFormatUnknown, false + } + if isV1 { + return StorageFormatV1, true + } else { + return StorageFormatV2, true + } +} - if s.NewStorageMaps == nil { - s.NewStorageMaps = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} +// isV2Account returns true if given account is in account storage format v2. +func (s *Storage) isV2Account(address common.Address) bool { + accountStorageMapExists, err := hasAccountStorageMap(s.Ledger, address) + if err != nil { + panic(err) } - s.NewStorageMaps.Set(storageKey, slabIndex) - return storageMap + return accountStorageMapExists +} + +// hasDomainRegister returns true if given account has given domain register. +// NOTE: account storage format v1 has domain registers. +func (s *Storage) hasDomainRegister(address common.Address, domain common.StorageDomain) (domainExists bool) { + _, domainExists, err := readSlabIndexFromRegister( + s.Ledger, + address, + []byte(domain.Identifier()), + ) + if err != nil { + panic(err) + } + + return domainExists +} + +// isV1Account returns true if given account is in account storage format v1 +// by checking if any of the domain registers exist. +func (s *Storage) isV1Account(address common.Address) (isV1 bool) { + + // Check if a storage map register exists for any of the domains. + // Check the most frequently used domains first, such as storage, public, private. + for _, domain := range common.AllStorageDomains { + domainExists := s.hasDomainRegister(address, domain) + if domainExists { + return true + } + } + + return false +} + +func (s *Storage) cacheIsV1Account(address common.Address, isV1 bool) { + if s.cachedV1Accounts == nil { + s.cachedV1Accounts = map[common.Address]bool{} + } + s.cachedV1Accounts[address] = isV1 +} + +func (s *Storage) cacheDomainStorageMap( + storageDomainKey interpreter.StorageDomainKey, + domainStorageMap *interpreter.DomainStorageMap, +) { + if s.cachedDomainStorageMaps == nil { + s.cachedDomainStorageMaps = map[interpreter.StorageDomainKey]*interpreter.DomainStorageMap{} + } + + s.cachedDomainStorageMaps[storageDomainKey] = domainStorageMap } func (s *Storage) recordContractUpdate( @@ -216,7 +447,7 @@ func (s *Storage) writeContractUpdate( key interpreter.StorageKey, contractValue *interpreter.CompositeValue, ) { - storageMap := s.GetStorageMap(key.Address, StorageDomainContract, true) + storageMap := s.GetDomainStorageMap(inter, key.Address, common.StorageDomainContract, true) // NOTE: pass nil instead of allocating a Value-typed interface that points to nil storageMapKey := interpreter.StringStorageMapKey(key.Key) if contractValue == nil { @@ -231,7 +462,7 @@ func (s *Storage) Commit(inter *interpreter.Interpreter, commitContractUpdates b return s.commit(inter, commitContractUpdates, true) } -// NondeterministicCommit serializes and commits all values in the deltas storage +// Deprecated: NondeterministicCommit serializes and commits all values in the deltas storage // in nondeterministic order. This function is used when commit ordering isn't // required (e.g. migration programs). func (s *Storage) NondeterministicCommit(inter *interpreter.Interpreter, commitContractUpdates bool) error { @@ -244,54 +475,137 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b s.commitContractUpdates(inter) } - err := s.commitNewStorageMaps() + err := s.AccountStorageV1.commit() if err != nil { return err } + if s.Config.StorageFormatV2Enabled { + err = s.AccountStorageV2.commit() + if err != nil { + return err + } + + err = s.migrateV1AccountsToV2(inter) + if err != nil { + return err + } + } + // Commit the underlying slab storage's writes - size := s.PersistentSlabStorage.DeltasSizeWithoutTempAddresses() + slabStorage := s.PersistentSlabStorage + + size := slabStorage.DeltasSizeWithoutTempAddresses() if size > 0 { inter.ReportComputation(common.ComputationKindEncodeValue, uint(size)) usage := common.NewBytesMemoryUsage(int(size)) - common.UseMemory(s.memoryGauge, usage) + common.UseMemory(inter, usage) } - deltas := s.PersistentSlabStorage.DeltasWithoutTempAddresses() - common.UseMemory(s.memoryGauge, common.NewAtreeEncodedSlabMemoryUsage(deltas)) + deltas := slabStorage.DeltasWithoutTempAddresses() + common.UseMemory(inter, common.NewAtreeEncodedSlabMemoryUsage(deltas)) // TODO: report encoding metric for all encoded slabs if deterministic { - return s.PersistentSlabStorage.FastCommit(runtime.NumCPU()) + return slabStorage.FastCommit(runtime.NumCPU()) } else { - return s.PersistentSlabStorage.NondeterministicFastCommit(runtime.NumCPU()) + return slabStorage.NondeterministicFastCommit(runtime.NumCPU()) } } -func (s *Storage) commitNewStorageMaps() error { - if s.NewStorageMaps == nil { +func (s *Storage) ScheduleV2Migration(address common.Address) bool { + if !s.Config.StorageFormatV2Enabled { + return false + } + s.scheduledV2Migrations = append(s.scheduledV2Migrations, address) + return true +} + +func (s *Storage) ScheduleV2MigrationForModifiedAccounts() bool { + for address, isV1 := range s.cachedV1Accounts { //nolint:maprange + if isV1 && s.PersistentSlabStorage.HasUnsavedChanges(atree.Address(address)) { + if !s.ScheduleV2Migration(address) { + return false + } + } + } + + return true +} + +func (s *Storage) migrateV1AccountsToV2(inter *interpreter.Interpreter) error { + + if !s.Config.StorageFormatV2Enabled { + return errors.NewUnexpectedError("cannot migrate to storage format v2, as it is not enabled") + } + + if len(s.scheduledV2Migrations) == 0 { return nil } - for pair := s.NewStorageMaps.Oldest(); pair != nil; pair = pair.Next() { - var err error - errors.WrapPanic(func() { - err = s.Ledger.SetValue( - pair.Key.Address[:], - []byte(pair.Key.Key), - pair.Value[:], - ) - }) + // getDomainStorageMap function returns cached domain storage map if it is available + // before loading domain storage map from storage. + // This is necessary to migrate uncommitted (new) but cached domain storage map. + getDomainStorageMap := func( + ledger atree.Ledger, + storage atree.SlabStorage, + address common.Address, + domain common.StorageDomain, + ) (*interpreter.DomainStorageMap, error) { + domainStorageKey := interpreter.NewStorageDomainKey(s.memoryGauge, address, domain) + + // Get cached domain storage map if available. + domainStorageMap := s.cachedDomainStorageMaps[domainStorageKey] + + if domainStorageMap != nil { + return domainStorageMap, nil + } + + return getDomainStorageMapFromV1DomainRegister(ledger, storage, address, domain) + } + + migrator := NewDomainRegisterMigration( + s.Ledger, + s.PersistentSlabStorage, + inter, + s.memoryGauge, + getDomainStorageMap, + ) + + // Ensure the scheduled accounts are migrated in a deterministic order + + sort.Slice( + s.scheduledV2Migrations, + func(i, j int) bool { + address1 := s.scheduledV2Migrations[i] + address2 := s.scheduledV2Migrations[j] + return address1.Compare(address2) < 0 + }, + ) + + for _, address := range s.scheduledV2Migrations { + + accountStorageMap, err := migrator.MigrateAccount(address) if err != nil { - return interpreter.WrappedExternalError(err) + return err } + + s.AccountStorageV2.cacheAccountStorageMap( + address, + accountStorageMap, + ) + + s.cacheIsV1Account(address, false) } + s.scheduledV2Migrations = nil + return nil } func (s *Storage) CheckHealth() error { + // Check slab storage health rootSlabIDs, err := atree.CheckStorageHealth(s, -1) if err != nil { @@ -311,28 +625,52 @@ func (s *Storage) CheckHealth() error { accountRootSlabIDs[rootSlabID] = struct{}{} } - // Check that each storage map refers to an existing slab. - - found := map[atree.SlabID]struct{}{} + // Check that account storage maps and unmigrated domain storage maps + // match returned root slabs from atree.CheckStorageHealth. var storageMapStorageIDs []atree.SlabID - for _, storageMap := range s.storageMaps { //nolint:maprange + if s.Config.StorageFormatV2Enabled { + // Get cached account storage map slab IDs. + storageMapStorageIDs = append( + storageMapStorageIDs, + s.AccountStorageV2.cachedRootSlabIDs()..., + ) + } + + // Get slab IDs of cached domain storage maps that are in account storage format v1. + for storageKey, storageMap := range s.cachedDomainStorageMaps { //nolint:maprange + address := storageKey.Address + + // Only accounts in storage format v1 store domain storage maps + // directly at the root of the account + if !s.isV1Account(address) { + continue + } + storageMapStorageIDs = append( storageMapStorageIDs, storageMap.SlabID(), ) } - sort.Slice(storageMapStorageIDs, func(i, j int) bool { - a := storageMapStorageIDs[i] - b := storageMapStorageIDs[j] - return a.Compare(b) < 0 - }) + sort.Slice( + storageMapStorageIDs, + func(i, j int) bool { + a := storageMapStorageIDs[i] + b := storageMapStorageIDs[j] + return a.Compare(b) < 0 + }, + ) + + found := map[atree.SlabID]struct{}{} for _, storageMapStorageID := range storageMapStorageIDs { if _, ok := accountRootSlabIDs[storageMapStorageID]; !ok { - return errors.NewUnexpectedError("account storage map points to non-existing slab %s", storageMapStorageID) + return errors.NewUnexpectedError( + "account storage map (and unmigrated domain storage map) points to non-root slab %s", + storageMapStorageID, + ) } found[storageMapStorageID] = struct{}{} @@ -370,6 +708,37 @@ func (s *Storage) CheckHealth() error { return nil } +// AccountStorageFormat returns either StorageFormatV1 or StorageFormatV2 for existing accounts, +// and StorageFormatUnknown for non-existing accounts. +func (s *Storage) AccountStorageFormat(address common.Address) (format StorageFormat) { + cachedFormat, known := s.getCachedAccountFormat(address) + if known { + return cachedFormat + } + + defer func() { + // Cache account fomat + switch format { + case StorageFormatV1: + s.cacheIsV1Account(address, true) + case StorageFormatV2: + s.cacheIsV1Account(address, false) + } + }() + + if s.Config.StorageFormatV2Enabled { + if s.isV2Account(address) { + return StorageFormatV2 + } + } + + if s.isV1Account(address) { + return StorageFormatV1 + } + + return StorageFormatUnknown +} + type UnreferencedRootSlabsError struct { UnreferencedRootSlabIDs []atree.SlabID } @@ -379,5 +748,9 @@ var _ errors.InternalError = UnreferencedRootSlabsError{} func (UnreferencedRootSlabsError) IsInternalError() {} func (e UnreferencedRootSlabsError) Error() string { - return fmt.Sprintf("slabs not referenced: %s", e.UnreferencedRootSlabIDs) + return fmt.Sprintf( + "%s slabs not referenced: %s", + errors.InternalErrorMessagePrefix, + e.UnreferencedRootSlabIDs, + ) } diff --git a/runtime/storage_test.go b/runtime/storage_test.go index 5a7bb60bc9..c9efbfdc5f 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -23,7 +23,11 @@ import ( "encoding/hex" "fmt" "math/rand" + "runtime" + "slices" "sort" + "strconv" + "strings" "testing" "github.com/onflow/atree" @@ -32,7 +36,6 @@ import ( "github.com/onflow/cadence" "github.com/onflow/cadence/common" - "github.com/onflow/cadence/common/orderedmap" "github.com/onflow/cadence/encoding/json" "github.com/onflow/cadence/interpreter" . "github.com/onflow/cadence/runtime" @@ -49,28 +52,27 @@ func withWritesToStorage( handler func(*Storage, *interpreter.Interpreter), ) { ledger := NewTestLedger(nil, onWrite) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreter(tb) - address := common.MustBytesToAddress([]byte{0x1}) - for i := 0; i < count; i++ { randomIndex := random.Uint32() - storageKey := interpreter.StorageKey{ - Address: address, - Key: fmt.Sprintf("%d", randomIndex), - } + var address common.Address + random.Read(address[:]) var slabIndex atree.SlabIndex binary.BigEndian.PutUint32(slabIndex[:], randomIndex) - if storage.NewStorageMaps == nil { - storage.NewStorageMaps = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} - } - storage.NewStorageMaps.Set(storageKey, slabIndex) + storage.AccountStorageV2.SetNewAccountStorageMapSlabIndex(address, slabIndex) } handler(storage, inter) @@ -154,7 +156,9 @@ func TestRuntimeStorageWrite(t *testing.T) { t.Parallel() - runtime := NewTestInterpreterRuntime() + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = true + runtime := NewTestInterpreterRuntimeWithConfig(config) address := common.MustBytesToAddress([]byte{0x1}) @@ -197,16 +201,22 @@ func TestRuntimeStorageWrite(t *testing.T) { assert.Equal(t, []ownerKeyPair{ - // storage index to storage domain storage map + // storage index to account storage map { []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, - []byte("storage"), + []byte(AccountStorageKey), }, // storage domain storage map + // NOTE: storage domain storage map is empty because it is inlined in account storage map { []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, }, + // account storage map + { + []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, + }, }, writes, ) @@ -1609,161 +1619,219 @@ func TestRuntimeResourceOwnerChange(t *testing.T) { t.Parallel() - config := DefaultTestInterpreterConfig - config.ResourceOwnerChangeHandlerEnabled = true - runtime := NewTestInterpreterRuntimeWithConfig(config) + test := func( + storageFormatV2Enabled bool, + expectedNonEmptyKeys []string, + ) { - address1 := common.MustBytesToAddress([]byte{0x1}) - address2 := common.MustBytesToAddress([]byte{0x2}) + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) + t.Run(name, func(t *testing.T) { + t.Parallel() - ledger := NewTestLedger(nil, nil) + config := DefaultTestInterpreterConfig + config.ResourceOwnerChangeHandlerEnabled = true + config.StorageFormatV2Enabled = storageFormatV2Enabled + runtime := NewTestInterpreterRuntimeWithConfig(config) - var signers []Address + address1 := common.MustBytesToAddress([]byte{0x1}) + address2 := common.MustBytesToAddress([]byte{0x2}) - deployTx := DeploymentTransaction("Test", []byte(` - access(all) contract Test { + ledger := NewTestLedger(nil, nil) - access(all) resource R {} + var signers []Address - access(all) fun createR(): @R { - return <-create R() - } - } - `)) + deployTx := DeploymentTransaction("Test", []byte(` + access(all) contract Test { - type resourceOwnerChange struct { - uuid *interpreter.UInt64Value - typeID common.TypeID - oldAddress common.Address - newAddress common.Address - } + access(all) resource R {} - accountCodes := map[Location][]byte{} - var events []cadence.Event - var loggedMessages []string - var resourceOwnerChanges []resourceOwnerChange + access(all) fun createR(): @R { + return <-create R() + } + } + `)) - runtimeInterface := &TestRuntimeInterface{ - Storage: ledger, - OnGetSigningAccounts: func() ([]Address, error) { - return signers, nil - }, - OnResolveLocation: NewSingleIdentifierLocationResolver(t), - OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { - accountCodes[location] = code - return nil - }, - OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { - code = accountCodes[location] - return code, nil - }, - OnEmitEvent: func(event cadence.Event) error { - events = append(events, event) - return nil - }, - OnProgramLog: func(message string) { - loggedMessages = append(loggedMessages, message) - }, - OnResourceOwnerChanged: func( - inter *interpreter.Interpreter, - resource *interpreter.CompositeValue, - oldAddress common.Address, - newAddress common.Address, - ) { - resourceOwnerChanges = append( - resourceOwnerChanges, - resourceOwnerChange{ - typeID: resource.TypeID(), - // TODO: provide proper location range - uuid: resource.ResourceUUID(inter, interpreter.EmptyLocationRange), - oldAddress: oldAddress, - newAddress: newAddress, + type resourceOwnerChange struct { + uuid *interpreter.UInt64Value + typeID common.TypeID + oldAddress common.Address + newAddress common.Address + } + + accountCodes := map[Location][]byte{} + var events []cadence.Event + var loggedMessages []string + var resourceOwnerChanges []resourceOwnerChange + + runtimeInterface := &TestRuntimeInterface{ + Storage: ledger, + OnGetSigningAccounts: func() ([]Address, error) { + return signers, nil }, - ) - }, - } + OnResolveLocation: NewSingleIdentifierLocationResolver(t), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnProgramLog: func(message string) { + loggedMessages = append(loggedMessages, message) + }, + OnResourceOwnerChanged: func( + inter *interpreter.Interpreter, + resource *interpreter.CompositeValue, + oldAddress common.Address, + newAddress common.Address, + ) { + resourceOwnerChanges = append( + resourceOwnerChanges, + resourceOwnerChange{ + typeID: resource.TypeID(), + // TODO: provide proper location range + uuid: resource.ResourceUUID(inter, interpreter.EmptyLocationRange), + oldAddress: oldAddress, + newAddress: newAddress, + }, + ) + }, + } - nextTransactionLocation := NewTransactionLocationGenerator() + nextTransactionLocation := NewTransactionLocationGenerator() - // Deploy contract + // Deploy contract - signers = []Address{address1} + signers = []Address{address1} - err := runtime.ExecuteTransaction( - Script{ - Source: deployTx, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) + err := runtime.ExecuteTransaction( + Script{ + Source: deployTx, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) - // Store + // Store - signers = []Address{address1} + signers = []Address{address1} - storeTx := []byte(` - import Test from 0x1 + storeTx := []byte(` + import Test from 0x1 - transaction { - prepare(signer: auth(Storage) &Account) { - signer.storage.save(<-Test.createR(), to: /storage/test) - } - } - `) + transaction { + prepare(signer: auth(Storage) &Account) { + signer.storage.save(<-Test.createR(), to: /storage/test) + } + } + `) - err = runtime.ExecuteTransaction( - Script{ - Source: storeTx, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) + err = runtime.ExecuteTransaction( + Script{ + Source: storeTx, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) - // Transfer + // Transfer - signers = []Address{address1, address2} + signers = []Address{address1, address2} - transferTx := []byte(` - import Test from 0x1 + transferTx := []byte(` + import Test from 0x1 - transaction { - prepare( - signer1: auth(Storage) &Account, - signer2: auth(Storage) &Account - ) { - let value <- signer1.storage.load<@Test.R>(from: /storage/test)! - signer2.storage.save(<-value, to: /storage/test) - } - } - `) + transaction { + prepare( + signer1: auth(Storage) &Account, + signer2: auth(Storage) &Account + ) { + let value <- signer1.storage.load<@Test.R>(from: /storage/test)! + signer2.storage.save(<-value, to: /storage/test) + } + } + `) - err = runtime.ExecuteTransaction( - Script{ - Source: transferTx, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) + err = runtime.ExecuteTransaction( + Script{ + Source: transferTx, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) - var nonEmptyKeys []string - for key, data := range ledger.StoredValues { - if len(data) > 0 { - nonEmptyKeys = append(nonEmptyKeys, key) - } - } + var actualNonEmptyKeys []string + for key, data := range ledger.StoredValues { + if len(data) > 0 { + actualNonEmptyKeys = append(actualNonEmptyKeys, key) + } + } - sort.Strings(nonEmptyKeys) + sort.Strings(actualNonEmptyKeys) - assert.Equal(t, + assert.Equal(t, + expectedNonEmptyKeys, + actualNonEmptyKeys, + ) + + expectedUUID := interpreter.NewUnmeteredUInt64Value(1) + assert.Equal(t, + []resourceOwnerChange{ + { + typeID: "A.0000000000000001.Test.R", + uuid: &expectedUUID, + oldAddress: common.Address{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + }, + newAddress: common.Address{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, + }, + }, + { + typeID: "A.0000000000000001.Test.R", + uuid: &expectedUUID, + oldAddress: common.Address{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, + }, + newAddress: common.Address{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + }, + }, + { + typeID: "A.0000000000000001.Test.R", + uuid: &expectedUUID, + oldAddress: common.Address{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + }, + newAddress: common.Address{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, + }, + }, + }, + resourceOwnerChanges, + ) + }) + } + + test( + false, []string{ // account 0x1: // NOTE: with atree inlining, contract is inlined in contract map @@ -1779,44 +1847,25 @@ func TestRuntimeResourceOwnerChange(t *testing.T) { "\x00\x00\x00\x00\x00\x00\x00\x02|$\x00\x00\x00\x00\x00\x00\x00\x02", "\x00\x00\x00\x00\x00\x00\x00\x02|storage", }, - nonEmptyKeys, ) - expectedUUID := interpreter.NewUnmeteredUInt64Value(1) - assert.Equal(t, - []resourceOwnerChange{ - { - typeID: "A.0000000000000001.Test.R", - uuid: &expectedUUID, - oldAddress: common.Address{ - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - }, - newAddress: common.Address{ - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, - }, - }, - { - typeID: "A.0000000000000001.Test.R", - uuid: &expectedUUID, - oldAddress: common.Address{ - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, - }, - newAddress: common.Address{ - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - }, - }, - { - typeID: "A.0000000000000001.Test.R", - uuid: &expectedUUID, - oldAddress: common.Address{ - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - }, - newAddress: common.Address{ - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, - }, - }, + test( + true, + []string{ + // account 0x1: + // NOTE: with account storage map and atree inlining, + // both storage domain storage map (with inlined storage data) + // and contract domain storage map (with inlined contract data) + // are inlined in account storage map. + "\x00\x00\x00\x00\x00\x00\x00\x01|$\x00\x00\x00\x00\x00\x00\x00\x02", + "\x00\x00\x00\x00\x00\x00\x00\x01|stored", + // account 0x2 + // NOTE: with account storage map and atree inlining, + // storage domain storage map (with inlined resource) + // is inlined in account storage map. + "\x00\x00\x00\x00\x00\x00\x00\x02|$\x00\x00\x00\x00\x00\x00\x00\x02", + "\x00\x00\x00\x00\x00\x00\x00\x02|stored", }, - resourceOwnerChanges, ) } @@ -3100,7 +3149,7 @@ func TestRuntimeStorageInternalAccess(t *testing.T) { }) require.NoError(t, err) - storageMap := storage.GetStorageMap(address, common.PathDomainStorage.Identifier(), false) + storageMap := storage.GetDomainStorageMap(inter, address, common.PathDomainStorage.StorageDomain(), false) require.NotNil(t, storageMap) // Read first @@ -6229,3 +6278,3084 @@ func TestRuntimeStorageReferenceAccess(t *testing.T) { require.ErrorAs(t, err, &interpreter.DereferenceError{}) }) } + +type ( + domainStorageMapValues map[interpreter.StorageMapKey]interpreter.Value + accountStorageMapValues map[common.StorageDomain]domainStorageMapValues +) + +func TestRuntimeStorageForNewAccount(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + // This test reads non-existent domain storage map and commit changes. + // pre-condition: empty storage + // post-condition: empty storage + // migration: no migration + t.Run("read non-existent domain storage map", func(t *testing.T) { + + var writeCount int + + // Create empty storage + ledger := NewTestLedger(nil, LedgerOnWriteCounter(&writeCount)) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + domain := common.PathDomainStorage.StorageDomain() + + // Get non-existent domain storage map + const createIfNotExists = false + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.Nil(t, domainStorageMap) + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health + err = storage.CheckHealth() + require.NoError(t, err) + + // Check number of writes to underlying storage + require.Equal(t, 0, writeCount) + }) + + // This test creates and writes to new domain storage map and commit changes. + // pre-condition: empty storage + // post-condition: storage containing + // - account register + // - account storage map + // - zero or more non-inlined domain storage map + // migration: no migraiton for new account. + createDomainTestCases := []struct { + name string + newDomains []common.StorageDomain + domainStorageMapCount int + inlined bool + }{ + {name: "empty domain storage map", newDomains: []common.StorageDomain{common.PathDomainStorage.StorageDomain()}, domainStorageMapCount: 0, inlined: true}, + {name: "small domain storage map", newDomains: []common.StorageDomain{common.PathDomainStorage.StorageDomain()}, domainStorageMapCount: 10, inlined: true}, + {name: "large domain storage map", newDomains: []common.StorageDomain{common.PathDomainStorage.StorageDomain()}, domainStorageMapCount: 20, inlined: false}, + } + + for _, tc := range createDomainTestCases { + t.Run("create "+tc.name, func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + // Create empty storage + ledger := NewTestLedger(nil, LedgerOnWriteEntries(&writeEntries)) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + accountValues := make(accountStorageMapValues) + + // Create and write to domain storage map (createIfNotExists is true) + for _, domain := range tc.newDomains { + // Create new domain storage map + const createIfNotExists = true + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + // Write to domain storage map + accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, tc.domainStorageMapCount, random) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 2+len(tc.newDomains), len(writeEntries)) + + // writes[0]: account register + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte(AccountStorageKey), writeEntries[0].Key) + require.Equal(t, []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[0].Value) + + // writes[1]: account storage map + require.Equal(t, address[:], writeEntries[1].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[1].Key) + require.True(t, len(writeEntries[1].Value) > 0) + + for i := range len(tc.newDomains) { + // writes[2+i]: domain storage map + + writeEntryIndex := 2 + i + owner := writeEntries[writeEntryIndex].Owner + key := writeEntries[writeEntryIndex].Key + value := writeEntries[writeEntryIndex].Value + + var slabKey [9]byte + slabKey[0] = '$' + binary.BigEndian.PutUint64(slabKey[1:], uint64(2+i)) + + require.Equal(t, address[:], owner) + require.Equal(t, slabKey[:], key) + + // Domain storage map value is empty if it is inlined in account storage map + if tc.inlined { + require.True(t, len(value) == 0) + } else { + require.True(t, len(value) > 0) + } + } + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + }) + } + + // This test tests storage map operations with intermittent Commit(): + // - create domain storage map and commit + // - write to domain storage map and commit + // - remove all elements from domain storage map and commit + // - read domain storage map and commit + t.Run("create, commit, write, commit, remove, commit", func(t *testing.T) { + // Create empty storage + ledger := NewTestLedger(nil, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + accountValues := make(accountStorageMapValues) + + domains := []common.StorageDomain{ + common.PathDomainStorage.StorageDomain(), + common.PathDomainPublic.StorageDomain(), + } + + // Create empty domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = true + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + accountValues[domain] = make(domainStorageMapValues) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Write to existing domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + // Write to domain storage map + const domainStorageMapCount = 2 + accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, domainStorageMapCount, random) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Remove all elements from existing domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + expectedDomainValues := accountValues[domain] + require.Equal(t, uint64(len(expectedDomainValues)), domainStorageMap.Count()) + + // Remove elements from domain storage map + for k := range expectedDomainValues { + existed := domainStorageMap.WriteValue(inter, k, nil) + require.True(t, existed) + + delete(expectedDomainValues, k) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Read domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + }) +} + +func TestRuntimeStorageForMigratedAccount(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + // newTestLedgerWithMigratedAccount creates a new TestLedger containing + // account storage map with given domains for given address. + newTestLedgerWithMigratedAccount := func( + onRead LedgerOnRead, + onWrite LedgerOnWrite, + address common.Address, + domains []common.StorageDomain, + domainStorageMapCount int, + ) (TestLedger, accountStorageMapValues) { + ledger := NewTestLedger(nil, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + accountValues := createAndWriteAccountStorageMap(t, storage, inter, address, domains, domainStorageMapCount, random) + + newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices) + + return newLedger, accountValues + } + + // This test reads non-existent domain storage map and commit changes. + // pre-condition: storage contains account register and account storage map + // post-condition: no change + // migration: none + t.Run("read non-existent domain storage map", func(t *testing.T) { + existingDomains := []common.StorageDomain{ + common.PathDomainStorage.StorageDomain(), + } + + nonexistentDomain := common.PathDomainPublic.StorageDomain() + + var writeCount int + + // Create storage with account storage map + const domainStorageMapCount = 5 + ledger, _ := newTestLedgerWithMigratedAccount( + nil, + LedgerOnWriteCounter(&writeCount), + address, + existingDomains, + domainStorageMapCount) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + // Get non-existent domain storage map + const createIfNotExists = false + domainStorageMap := storage.GetDomainStorageMap(inter, address, nonexistentDomain, createIfNotExists) + require.Nil(t, domainStorageMap) + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 0, writeCount) + }) + + // This test reads existing domain storage map and commit changes. + // pre-condition: storage contains account register and account storage map + // post-condition: no change + // migration: none + readExistingDomainTestCases := []struct { + name string + createIfNotExists bool + }{ + {name: "(createIfNotExists is true)", createIfNotExists: true}, + {name: "(createIfNotExists is false)", createIfNotExists: false}, + } + + for _, tc := range readExistingDomainTestCases { + t.Run("read existing domain storage map "+tc.name, func(t *testing.T) { + + existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()} + + var writeCount int + + // Create storage with account storage map + const domainStorageMapCount = 5 + ledger, accountValues := newTestLedgerWithMigratedAccount( + nil, + LedgerOnWriteCounter(&writeCount), + address, + existingDomains, + domainStorageMapCount, + ) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + // Read existing domain storage map + for domain, domainValues := range accountValues { + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, tc.createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + for k, expectedV := range domainValues { + v := domainStorageMap.ReadValue(nil, k) + ev, ok := v.(interpreter.EquatableValue) + require.True(t, ok) + require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedV)) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 0, writeCount) + }) + } + + // This test creates and writes to new domain storage map and commit changes. + // pre-condition: storage contains account register and account storage map + // post-condition: storage contains + // - account register + // - account storage map with new domain storage map. + createDomainTestCases := []struct { + name string + existingDomains []common.StorageDomain + newDomains []common.StorageDomain + existingDomainStorageMapCount int + newDomainStorageMapCount int + isNewDomainStorageMapInlined bool + }{ + { + name: "empty domain storage map", + existingDomains: []common.StorageDomain{common.PathDomainStorage.StorageDomain()}, + existingDomainStorageMapCount: 5, + newDomains: []common.StorageDomain{common.PathDomainPublic.StorageDomain()}, + newDomainStorageMapCount: 0, + isNewDomainStorageMapInlined: true, + }, + { + name: "small domain storage map", + existingDomains: []common.StorageDomain{common.PathDomainStorage.StorageDomain()}, + existingDomainStorageMapCount: 5, + newDomains: []common.StorageDomain{common.PathDomainPublic.StorageDomain()}, + newDomainStorageMapCount: 10, + isNewDomainStorageMapInlined: true, + }, + { + name: "large domain storage map", + existingDomains: []common.StorageDomain{common.PathDomainStorage.StorageDomain()}, + existingDomainStorageMapCount: 5, + newDomains: []common.StorageDomain{common.PathDomainPublic.StorageDomain()}, + newDomainStorageMapCount: 20, + isNewDomainStorageMapInlined: false, + }, + } + + for _, tc := range createDomainTestCases { + t.Run("create and write "+tc.name, func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + // Create storage with existing account storage map + ledger, accountValues := newTestLedgerWithMigratedAccount( + nil, + LedgerOnWriteEntries(&writeEntries), + address, + tc.existingDomains, + tc.existingDomainStorageMapCount, + ) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + lastIndex := ledger.StorageIndices[string(address[:])] + + random := rand.New(rand.NewSource(42)) + + // Create and write to domain storage map (createIfNotExists is true) + for _, domain := range tc.newDomains { + const createIfNotExists = true + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + // Write elements to to domain storage map + accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, tc.newDomainStorageMapCount, random) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 1+len(tc.newDomains), len(writeEntries)) + + // writes[0]: account storage map + // account storage map is updated to include new domains. + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[0].Key) + require.True(t, len(writeEntries[0].Value) > 0) + + for i := range len(tc.newDomains) { + // writes[1+i]: domain storage map + // domain storage map value is empty if it is inlined in account storage map + + writeEntryIndex := 1 + i + owner := writeEntries[writeEntryIndex].Owner + key := writeEntries[writeEntryIndex].Key + value := writeEntries[writeEntryIndex].Value + + var slabKey [9]byte + slabKey[0] = '$' + binary.BigEndian.PutUint64(slabKey[1:], lastIndex+1+uint64(i)) + + require.Equal(t, address[:], owner) + require.Equal(t, slabKey[:], key) + + if tc.isNewDomainStorageMapInlined { + require.True(t, len(value) == 0) + } else { + require.True(t, len(value) > 0) + } + } + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + }) + } + + // This test reads and writes to existing domain storage map and commit changes. + // pre-condition: storage contains account register and account storage map + // post-condition: storage contains + // - account register + // - account storage map with updated domain storage map. + t.Run("read and write to existing domain storage map", func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()} + const existingDomainStorageMapCount = 5 + + // Create storage with account storage map + ledger, accountValues := newTestLedgerWithMigratedAccount( + nil, + LedgerOnWriteEntries(&writeEntries), + address, + existingDomains, + existingDomainStorageMapCount, + ) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + // Write to existing domain storage map (createIfNotExists is false) + for _, domain := range existingDomains { + const createIfNotExists = false + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues)) + for k := range domainValues { //nolint:maprange + domainKeys = append(domainKeys, k) + } + + // Update or remove existing elements + for i, k := range domainKeys { + if i%2 == 0 { + n := random.Int() + newValue := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + // Update existing element + existed := domainStorageMap.WriteValue(inter, k, newValue) + require.True(t, existed) + + domainValues[k] = newValue + } else { + // Remove existing element + existed := domainStorageMap.WriteValue(inter, k, nil) + require.True(t, existed) + + delete(domainValues, k) + } + } + + // Write new elements + const newElementCount = 2 + newDomainValues := writeToDomainStorageMap(inter, domainStorageMap, newElementCount, random) + + for k, v := range newDomainValues { + domainValues[k] = v + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 1, len(writeEntries)) + + // writes[0]: account storage map + // account storage map is updated because inlined domain storage map is updated. + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[0].Key) + require.True(t, len(writeEntries[0].Value) > 0) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + }) + + // This test tests storage map operations with intermittent Commit(): + // - read domain storage map and commit + // - write to domain storage map and commit + // - remove all elements from domain storage map and commit + // - read domain storage map and commit + t.Run("read, commit, update, commit, remove, commit", func(t *testing.T) { + + domains := []common.StorageDomain{ + common.PathDomainStorage.StorageDomain(), + common.PathDomainPublic.StorageDomain(), + } + const domainStorageMapCount = 5 + + // Create storage with existing account storage map + ledger, accountValues := newTestLedgerWithMigratedAccount( + nil, + nil, + address, + domains, + domainStorageMapCount, + ) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + // Read domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + for k, expectedValue := range domainValues { + v := domainStorageMap.ReadValue(nil, k) + ev := v.(interpreter.EquatableValue) + require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue)) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Write to existing domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + // Write to domain storage map + const domainStorageMapCount = 2 + newDomainValues := writeToDomainStorageMap(inter, domainStorageMap, domainStorageMapCount, random) + for k, v := range newDomainValues { + domainValues[k] = v + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Remove all elements from existing domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + expectedDomainValues := accountValues[domain] + require.Equal(t, uint64(len(expectedDomainValues)), domainStorageMap.Count()) + + // Remove elements from domain storage map + for k := range expectedDomainValues { + existed := domainStorageMap.WriteValue(inter, k, nil) + require.True(t, existed) + + delete(expectedDomainValues, k) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Read domain storage map + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + }) +} + +func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { + + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + newTestLedgerWithUnmigratedAccount := func( + onRead LedgerOnRead, + onWrite LedgerOnWrite, + address common.Address, + domains []common.StorageDomain, + domainStorageMapCount int, + ) (TestLedger, accountStorageMapValues) { + ledger := NewTestLedger(nil, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: false, + }, + ) + + inter := NewTestInterpreter(t) + + accountValues := make(accountStorageMapValues) + + random := rand.New(rand.NewSource(42)) + + for _, domain := range domains { + accountValues[domain] = make(domainStorageMapValues) + + // Create domain storage map + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + + // Write domain register + domainStorageMapValueID := domainStorageMap.ValueID() + err := ledger.SetValue(address[:], []byte(domain.Identifier()), domainStorageMapValueID[8:]) + require.NoError(t, err) + + // Write elements to to domain storage map + for len(accountValues[domain]) < domainStorageMapCount { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + value := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + _ = domainStorageMap.WriteValue(inter, key, value) + + accountValues[domain][key] = value + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Create a new storage + newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices) + + return newLedger, accountValues + } + + // This test reads non-existent domain storage map and commit changes. + // pre-condition: storage contains domain register and domain storage map + // post-condition: no change + // migration: none because only read ops. + t.Run("read non-existent domain storage map", func(t *testing.T) { + existingDomains := []common.StorageDomain{ + common.PathDomainStorage.StorageDomain(), + } + + var writeCount int + + // Create storage with unmigrated accounts + const domainStorageMapCount = 5 + ledger, _ := newTestLedgerWithUnmigratedAccount( + nil, + LedgerOnWriteCounter(&writeCount), + address, + existingDomains, + domainStorageMapCount) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + // Get non-existent domain storage map + const createIfNotExists = false + nonExistingDomain := common.PathDomainPublic.StorageDomain() + domainStorageMap := storage.GetDomainStorageMap(inter, address, nonExistingDomain, createIfNotExists) + require.Nil(t, domainStorageMap) + + storage.ScheduleV2MigrationForModifiedAccounts() + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check there are no writes to underlying storage + require.Equal(t, 0, writeCount) + }) + + // This test reads existing domain storage map and commit changes. + // pre-condition: storage contains domain register and domain storage map + // post-condition: no change + // migration: none because only read ops + readExistingDomainTestCases := []struct { + name string + createIfNotExists bool + }{ + {name: "(createIfNotExists is true)", createIfNotExists: true}, + {name: "(createIfNotExists is false)", createIfNotExists: false}, + } + + for _, tc := range readExistingDomainTestCases { + t.Run("read existing domain storage map "+tc.name, func(t *testing.T) { + + var writeCount int + + existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()} + const existingDomainStorageMapCount = 5 + + // Create storage with existing domain storage map + ledger, accountValues := newTestLedgerWithUnmigratedAccount( + nil, + LedgerOnWriteCounter(&writeCount), + address, + existingDomains, + existingDomainStorageMapCount, + ) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + // Read existing domain storage map + for domain, domainValues := range accountValues { + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, tc.createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + // Read elements to domain storage map + for k, expectedV := range domainValues { + v := domainStorageMap.ReadValue(nil, k) + ev, ok := v.(interpreter.EquatableValue) + require.True(t, ok) + require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedV)) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 0, writeCount) + }) + } + + // This test creates and writes to new domain storage map and commit changes. + // pre-condition: storage contains + // - domain register + // - domain storage map + // post-condition: storage contains + // - account register + // - account storage map with existing and new domain storage map. + // migration: yes + createDomainTestCases := []struct { + name string + existingDomains []common.StorageDomain + newDomains []common.StorageDomain + existingDomainStorageMapCount int + newDomainStorageMapCount int + isNewDomainStorageMapInlined bool + }{ + { + name: "empty domain storage map", + existingDomains: []common.StorageDomain{common.PathDomainStorage.StorageDomain()}, + existingDomainStorageMapCount: 5, + newDomains: []common.StorageDomain{common.PathDomainPublic.StorageDomain()}, + newDomainStorageMapCount: 0, + isNewDomainStorageMapInlined: true, + }, + { + name: "small domain storage map", + existingDomains: []common.StorageDomain{common.PathDomainStorage.StorageDomain()}, + existingDomainStorageMapCount: 5, + newDomains: []common.StorageDomain{common.PathDomainPublic.StorageDomain()}, + newDomainStorageMapCount: 10, + isNewDomainStorageMapInlined: true, + }, + { + name: "large domain storage map", + existingDomains: []common.StorageDomain{common.PathDomainStorage.StorageDomain()}, + existingDomainStorageMapCount: 5, + newDomains: []common.StorageDomain{common.PathDomainPublic.StorageDomain()}, + newDomainStorageMapCount: 20, + isNewDomainStorageMapInlined: false, + }, + } + + for _, tc := range createDomainTestCases { + t.Run("create and write "+tc.name, func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + // Create storage with existing account storage map + ledger, accountValues := newTestLedgerWithUnmigratedAccount( + nil, + LedgerOnWriteEntries(&writeEntries), + address, + tc.existingDomains, + tc.existingDomainStorageMapCount, + ) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + // Create and write to new domain storage map + for _, domain := range tc.newDomains { + const createIfNotExists = true + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + // Write elements to to domain storage map + accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, tc.newDomainStorageMapCount, random) + } + + // TODO: + storage.ScheduleV2MigrationForModifiedAccounts() + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + // writes include: + // - empty registers for all existing and new domains + // - 1 account register + // - 1 account storage map register + // - other non-inlined domain storage map + require.True(t, len(writeEntries) > 1+len(tc.existingDomains)+len(tc.newDomains)) + + i := 0 + + // Check new domain register committed in V1 format. + for _, domain := range common.AllStorageDomains { + + if slices.Contains(tc.newDomains, domain) { + + // New domains are committed in V1 format (with domain register). + require.Equal(t, address[:], writeEntries[i].Owner) + require.Equal(t, []byte(domain.Identifier()), writeEntries[i].Key) + require.True(t, len(writeEntries[i].Value) > 0) + + i++ + } + } + + // Check modified registers in migration. + for _, domain := range common.AllStorageDomains { + + if slices.Contains(tc.existingDomains, domain) || + slices.Contains(tc.newDomains, domain) { + + // Existing and new domain registers are removed (migrated). + // Removing new (non-existent) domain registers is no-op. + require.Equal(t, address[:], writeEntries[i].Owner) + require.Equal(t, []byte(domain.Identifier()), writeEntries[i].Key) + require.True(t, len(writeEntries[i].Value) == 0) + + i++ + } + } + + // Account register is created + require.Equal(t, address[:], writeEntries[i].Owner) + require.Equal(t, []byte(AccountStorageKey), writeEntries[i].Key) + require.True(t, len(writeEntries[i].Value) > 0) + + i++ + + // Remaining writes are atree slabs (either empty for migrated domain storage map or non-empty for account storage map) + for ; i < len(writeEntries); i++ { + require.Equal(t, address[:], writeEntries[i].Owner) + require.Equal(t, byte('$'), writeEntries[i].Key[0]) + } + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + }) + } + + // This test reads and writes to existing domain storage map and commit changes. + // pre-condition: storage contains + // - domain register + // - domain storage map + // post-condition: storage contains + // - account register + // - account storage map with updated domain storage map. + // migration: yes + t.Run("read and write to existing domain storage map", func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + domains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()} + const existingDomainStorageMapCount = 5 + + // Create storage with existing domain storage maps + ledger, accountValues := newTestLedgerWithUnmigratedAccount( + nil, + LedgerOnWriteEntries(&writeEntries), + address, + domains, + existingDomainStorageMapCount, + ) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + // write to existing domain storage map (createIfNotExists is false) + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues)) + for k := range domainValues { //nolint:maprange + domainKeys = append(domainKeys, k) + } + + // Update or remove elements + for i, k := range domainKeys { + if i%2 == 0 { + n := random.Int() + newValue := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + // Update existing element + existed := domainStorageMap.WriteValue(inter, k, newValue) + require.True(t, existed) + + domainValues[k] = newValue + } else { + // Remove existing element + existed := domainStorageMap.WriteValue(inter, k, nil) + require.True(t, existed) + + delete(domainValues, k) + } + } + + // Write new elements + const newElementCount = 2 + newDomainValues := writeToDomainStorageMap(inter, domainStorageMap, newElementCount, random) + + for k, v := range newDomainValues { + domainValues[k] = v + } + } + + // TODO: + storage.ScheduleV2MigrationForModifiedAccounts() + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 4, len(writeEntries)) + + // writes[0]: domain register + // storage domain register is removed + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte(common.PathDomainStorage.Identifier()), writeEntries[0].Key) + require.True(t, len(writeEntries[0].Value) == 0) + + // writes[1]: account register + // account register is created + require.Equal(t, address[:], writeEntries[1].Owner) + require.Equal(t, []byte(AccountStorageKey), writeEntries[1].Key) + require.True(t, len(writeEntries[1].Value) > 0) + + // writes[2]: storage domain storage map + // storage domain storage map is removed because it is inlined in account storage map. + require.Equal(t, address[:], writeEntries[2].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[2].Key) + require.True(t, len(writeEntries[2].Value) == 0) + + // writes[3]: account storage map + // account storage map is created with inlined domain storage map. + require.Equal(t, address[:], writeEntries[3].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, writeEntries[3].Key) + require.True(t, len(writeEntries[3].Value) > 0) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + }) + + // This test storage map operations (including account migration) with intermittent Commit() + // - read domain storage map and commit + // - write to domain storage map and commit (including account migration) + // - remove all elements from domain storage map and commit + // - read domain storage map and commit + t.Run("read, commit, update, commit, remove, commit", func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + domains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()} + const domainStorageMapCount = 5 + + // Create storage with existing account storage map + ledger, accountValues := newTestLedgerWithUnmigratedAccount( + nil, + LedgerOnWriteEntries(&writeEntries), + address, + domains, + domainStorageMapCount, + ) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + // Read domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + for k, expectedValue := range domainValues { + v := domainStorageMap.ReadValue(nil, k) + ev := v.(interpreter.EquatableValue) + require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue)) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + } + + // Update domain storage map, and commit changes (account is migrated during commmit) + { + // update existing domain storage map (loaded from storage) + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues)) + for k := range domainValues { //nolint:maprange + domainKeys = append(domainKeys, k) + } + + // Update elements + for _, k := range domainKeys { + n := random.Int() + newValue := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + // Update existing element + existed := domainStorageMap.WriteValue(inter, k, newValue) + require.True(t, existed) + + domainValues[k] = newValue + } + } + + // TODO: + storage.ScheduleV2MigrationForModifiedAccounts() + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 4, len(writeEntries)) + + // writes[0]: storage domain register + // Storage domain register is removed + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte(common.PathDomainStorage.Identifier()), writeEntries[0].Key) + require.True(t, len(writeEntries[0].Value) == 0) + + // writes[1]: account register + // Account register is created + require.Equal(t, address[:], writeEntries[1].Owner) + require.Equal(t, []byte(AccountStorageKey), writeEntries[1].Key) + require.True(t, len(writeEntries[1].Value) > 0) + + // writes[2]: storage domain storage map + // storage domain storage map is removed because it is inlined in account storage map. + require.Equal(t, address[:], writeEntries[2].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[2].Key) + require.True(t, len(writeEntries[2].Value) == 0) + + // writes[3]: account storage map + // account storage map is created with inlined domain storage map. + require.Equal(t, address[:], writeEntries[3].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, writeEntries[3].Key) + require.True(t, len(writeEntries[3].Value) > 0) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + + writeEntries = nil + } + + // Remove all elements from domain storage map, and commit changes + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues)) + for k := range domainValues { //nolint:maprange + domainKeys = append(domainKeys, k) + } + + // Remove elements + for _, k := range domainKeys { + + // Update existing element + existed := domainStorageMap.WriteValue(inter, k, nil) + require.True(t, existed) + + delete(domainValues, k) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 1, len(writeEntries)) + + // writes[0]: account storage map + // account storage map is modified because inlined domain storage map is modified. + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, writeEntries[0].Key) + require.True(t, len(writeEntries[0].Value) > 0) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Read domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + for k, expectedValue := range domainValues { + v := domainStorageMap.ReadValue(nil, k) + ev := v.(interpreter.EquatableValue) + require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue)) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + }) +} + +// TestRuntimeStorageDomainStorageMapInlinedState tests inlined state +// of domain storage map when large number of elements are inserted, +// updated, and removed from domain storage map. +// Initially domain storage map is inlined in account storage map, it +// becomes un-inlined when large number elements are inserted, and then +// inlined again when all elements are removed. +func TestRuntimeStorageDomainStorageMapInlinedState(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + address := common.MustBytesToAddress([]byte{0x1}) + + // Create empty storage + ledger := NewTestLedger(nil, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + domains := []common.StorageDomain{ + common.PathDomainStorage.StorageDomain(), + common.PathDomainPublic.StorageDomain(), + common.PathDomainPrivate.StorageDomain(), + } + + const domainStorageMapCount = 500 + + accountValues := make(accountStorageMapValues) + + for _, domain := range domains { + + // Create domain storage map + const createIfNotExists = true + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.True(t, domainStorageMap.Inlined()) + + valueID := domainStorageMap.ValueID() + + accountValues[domain] = make(domainStorageMapValues) + + domainValues := accountValues[domain] + + // Insert new values to domain storage map + for domainStorageMap.Count() < domainStorageMapCount { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + if _, exists := domainValues[key]; exists { + continue + } + value := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + existed := domainStorageMap.WriteValue(inter, key, value) + require.False(t, existed) + + domainValues[key] = value + } + + require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.False(t, domainStorageMap.Inlined()) + + // Check storage health + err := storage.CheckHealth() + require.NoError(t, err) + + // Overwrite values in domain storage map + for key := range domainValues { + n := random.Int() + value := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + existed := domainStorageMap.WriteValue(inter, key, value) + require.True(t, existed) + + domainValues[key] = value + } + + require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.False(t, domainStorageMap.Inlined()) + + // Check storage health + err = storage.CheckHealth() + require.NoError(t, err) + + // Remove all values in domain storage map + for key := range domainValues { + existed := domainStorageMap.WriteValue(inter, key, nil) + require.True(t, existed) + + delete(domainValues, key) + } + + require.Equal(t, uint64(0), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.True(t, domainStorageMap.Inlined()) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health + err = storage.CheckHealth() + require.NoError(t, err) + + // There should be 2 non-empty registers in ledger after commits: + // - account register (key is "stored") + // - account storage map (atree slab) + nonEmptyRegisters := make(map[string][]byte) + for k, v := range ledger.StoredValues { + if len(v) > 0 { + nonEmptyRegisters[k] = v + } + } + require.Equal(t, 2, len(nonEmptyRegisters)) + + accountRegisterValue, accountRegisterExists := nonEmptyRegisters[string(address[:])+"|"+AccountStorageKey] + require.True(t, accountRegisterExists) + require.Equal(t, 8, len(accountRegisterValue)) + + _, accountStorageMapRegisterExists := nonEmptyRegisters[string(address[:])+"|$"+string(accountRegisterValue)] + require.True(t, accountStorageMapRegisterExists) + + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) +} + +// TestRuntimeStorageLargeDomainValues tests large values +// in domain storage map. +func TestRuntimeStorageLargeDomainValues(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + address := common.MustBytesToAddress([]byte{0x1}) + + // Create empty storage + ledger := NewTestLedger(nil, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + domains := []common.StorageDomain{ + common.PathDomainStorage.StorageDomain(), + common.PathDomainPublic.StorageDomain(), + common.PathDomainPrivate.StorageDomain(), + } + + const domainStorageMapCount = 5 + + accountValues := make(accountStorageMapValues) + + for _, domain := range domains { + + // Create domain storage map + const createIfNotExists = true + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.True(t, domainStorageMap.Inlined()) + + valueID := domainStorageMap.ValueID() + + accountValues[domain] = make(domainStorageMapValues) + + domainValues := accountValues[domain] + + // Insert new values to domain storage map + for domainStorageMap.Count() < domainStorageMapCount { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + if _, exists := domainValues[key]; exists { + continue + } + value := interpreter.NewUnmeteredStringValue(strings.Repeat("a", 1_000)) + + existed := domainStorageMap.WriteValue(inter, key, value) + require.False(t, existed) + + domainValues[key] = value + } + + require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.True(t, domainStorageMap.Inlined()) + + // Check storage health + err := storage.CheckHealth() + require.NoError(t, err) + + // Overwrite values in domain storage map + for key := range domainValues { + value := interpreter.NewUnmeteredStringValue(strings.Repeat("b", 1_000)) + + existed := domainStorageMap.WriteValue(inter, key, value) + require.True(t, existed) + + domainValues[key] = value + } + + require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.True(t, domainStorageMap.Inlined()) + + // Check storage health + err = storage.CheckHealth() + require.NoError(t, err) + + // Remove all values in domain storage map + for key := range domainValues { + existed := domainStorageMap.WriteValue(inter, key, nil) + require.True(t, existed) + + delete(domainValues, key) + } + + require.Equal(t, uint64(0), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.True(t, domainStorageMap.Inlined()) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health + err = storage.CheckHealth() + require.NoError(t, err) + + // There should be 2 non-empty registers in ledger after commits: + // - account register (key is "stored") + // - account storage map (atree slab) + nonEmptyRegisters := make(map[string][]byte) + for k, v := range ledger.StoredValues { + if len(v) > 0 { + nonEmptyRegisters[k] = v + } + } + require.Equal(t, 2, len(nonEmptyRegisters)) + + accountRegisterValue, accountRegisterExists := nonEmptyRegisters[string(address[:])+"|"+AccountStorageKey] + require.True(t, accountRegisterExists) + require.Equal(t, 8, len(accountRegisterValue)) + + _, accountStorageMapRegisterExists := nonEmptyRegisters[string(address[:])+"|$"+string(accountRegisterValue)] + require.True(t, accountStorageMapRegisterExists) + + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) +} + +func TestDomainRegisterMigrationForLargeAccount(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + var writeCount int + + accountsInfo := []accountInfo{ + { + address: address, + domains: []domainInfo{ + {domain: common.PathDomainStorage.StorageDomain(), domainStorageMapCount: 100, maxDepth: 3}, + {domain: common.PathDomainPublic.StorageDomain(), domainStorageMapCount: 100, maxDepth: 3}, + {domain: common.PathDomainPrivate.StorageDomain(), domainStorageMapCount: 100, maxDepth: 3}, + }, + }, + } + + ledger, accountsValues := newTestLedgerWithUnmigratedAccounts( + t, + nil, + LedgerOnWriteCounter(&writeCount), + accountsInfo, + ) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + accountValues := accountsValues[address] + + // Create new domain storage map + const createIfNotExists = true + domain := common.StorageDomainInbox + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + accountValues[domain] = make(domainStorageMapValues) + + // TODO: + storage.ScheduleV2MigrationForModifiedAccounts() + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check there are writes to underlying storage + require.True(t, writeCount > 0) + + // Check there isn't any domain registers + nonAtreeRegisters := make(map[string][]byte) + for k, v := range ledger.StoredValues { + if len(v) == 0 { + continue + } + ks := strings.Split(k, "|") + if ks[1][0] != '$' { + nonAtreeRegisters[k] = v + } + } + + require.Equal(t, 1, len(nonAtreeRegisters)) + for k := range nonAtreeRegisters { + ks := strings.Split(k, "|") + require.Equal(t, address[:], []byte(ks[0])) + require.Equal(t, AccountStorageKey, ks[1]) + } + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) +} + +func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + testCases := []struct { + name string + storageFormatV2Enabled bool + domain common.StorageDomain + createIfNotExists bool + expectedDomainStorageMapIsNil bool + expectedReadsFor1stGetDomainStorageMapCall []ownerKeyPair + expectedReadsFor2ndGetDomainStorageMapCall []ownerKeyPair + expectedReadsSet map[string]struct{} + }{ + // Test cases with storageFormatV2Enabled = false + { + name: "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = false", + storageFormatV2Enabled: false, + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: true, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + }, + }, + { + name: "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = true", + storageFormatV2Enabled: false, + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reads from the second GetDomainStorageMap() because + // domain storage map is created and cached in the first GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + }, + }, + // Test cases with storageFormatV2Enabled = true + { + name: "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = false", + storageFormatV2Enabled: true, + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: true, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // Second GetDomainStorageMap() has the same register reading as the first GetDomainStorageMap() + // because account status can't be cached in previous call. + + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + }, + }, + { + name: "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = true", + storageFormatV2Enabled: true, + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Check all domain registers + { + owner: address[:], + key: []byte(common.PathDomainStorage.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPrivate.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPublic.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainContract.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainInbox.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainCapabilityController.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainCapabilityControllerTag.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainPathCapability.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainAccountCapability.Identifier()), + }, + // Read account register to load account storage map + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reads from the second GetDomainStorageMap() because + // domain storage map is created and cached in the first GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathPrivate): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathPublic): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainContract): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainInbox): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainCapabilityController): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainCapabilityControllerTag): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathCapability): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainAccountCapability): {}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + var ledgerReads []ownerKeyPair + ledgerReadsSet := make(map[string]struct{}) + + // Create empty storage + ledger := NewTestLedger( + func(owner, key, _ []byte) { + ledgerReads = append( + ledgerReads, + ownerKeyPair{ + owner: owner, + key: key, + }, + ) + ledgerReadsSet[string(owner)+"|"+string(key)] = struct{}{} + }, + nil) + + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: tc.storageFormatV2Enabled, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + domainStorageMap := storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor1stGetDomainStorageMapCall, ledgerReads) + + ledgerReads = ledgerReads[:0] + + // Call GetDomainStorageMap() again to test account status is cached and no register reading is needed. + + domainStorageMap = storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor2ndGetDomainStorageMapCall, ledgerReads) + + // Check underlying ledger reads + require.Equal(t, len(ledgerReadsSet), len(tc.expectedReadsSet)) + for k := range ledgerReadsSet { + require.Contains(t, tc.expectedReadsSet, k) + } + }) + } +} + +func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { + + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + type getStorageDataFunc func() (storedValues map[string][]byte, StorageIndices map[string]uint64) + + createV1AccountWithDomain := func( + address common.Address, + domain common.StorageDomain, + ) getStorageDataFunc { + return func() (storedValues map[string][]byte, StorageIndices map[string]uint64) { + ledger := NewTestLedger(nil, nil) + + persistentSlabStorage := NewPersistentSlabStorage(ledger, nil) + + orderedMap, err := atree.NewMap( + persistentSlabStorage, + atree.Address(address), + atree.NewDefaultDigesterBuilder(), + interpreter.EmptyTypeInfo{}, + ) + require.NoError(t, err) + + slabIndex := orderedMap.SlabID().Index() + + for i := range 3 { + + key := interpreter.StringStorageMapKey(strconv.Itoa(i)) + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + + existingStorable, err := orderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + value, + ) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Commit domain storage map + err = persistentSlabStorage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create domain register + err = ledger.SetValue(address[:], []byte(domain.Identifier()), slabIndex[:]) + require.NoError(t, err) + + return ledger.StoredValues, ledger.StorageIndices + } + } + + testCases := []struct { + name string + getStorageData getStorageDataFunc + storageFormatV2Enabled bool + domain common.StorageDomain + createIfNotExists bool + expectedDomainStorageMapIsNil bool + expectedReadsFor1stGetDomainStorageMapCall []ownerKeyPair + expectedReadsFor2ndGetDomainStorageMapCall []ownerKeyPair + expectedReadsSet map[string]struct{} + }{ + // Test cases with storageFormatV2Enabled = false + { + name: "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = false", + storageFormatV2Enabled: false, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: true, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + }, + }, + { + name: "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = true", + storageFormatV2Enabled: false, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading in second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(0). + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + }, + }, + { + name: "storageFormatV2Enabled = false, domain storage map exists, createIfNotExists = false", + storageFormatV2Enabled: false, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read domain storage map register + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading in second GetDomainStorageMap() because + // domain storage map is loaded and cached in the first + // GetDomainStorageMap(0). + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + { + name: "storageFormatV2Enabled = false, domain storage map exists, createIfNotExists = true", + storageFormatV2Enabled: false, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read domain storage map register + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading in second GetDomainStorageMap() because + // domain storage map is loaded and cached in the first + // GetDomainStorageMap(0). + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + // Test cases with storageFormatV2Enabled = true + { + name: "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = false", + storageFormatV2Enabled: true, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: true, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + }, + }, + { + name: "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = true", + storageFormatV2Enabled: true, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Check all domain registers until any existing domain is checked + { + owner: address[:], + key: []byte(common.PathDomainStorage.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPrivate.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPublic.Identifier()), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathPrivate): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathPublic): {}, + }, + }, + { + name: "storageFormatV2Enabled = true, domain storage map exists, createIfNotExists = false", + storageFormatV2Enabled: true, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read domain storage map register + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + { + name: "storageFormatV2Enabled = true, domain storage map exists, createIfNotExists = true", + storageFormatV2Enabled: true, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check given domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read given domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read domain storage map register + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + storedValues, storedIndices := tc.getStorageData() + + var ledgerReads []ownerKeyPair + ledgerReadsSet := make(map[string]struct{}) + + ledger := NewTestLedgerWithData( + func(owner, key, _ []byte) { + ledgerReads = append( + ledgerReads, + ownerKeyPair{ + owner: owner, + key: key, + }, + ) + ledgerReadsSet[string(owner)+"|"+string(key)] = struct{}{} + }, + nil, + storedValues, + storedIndices, + ) + + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: tc.storageFormatV2Enabled, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + domainStorageMap := storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor1stGetDomainStorageMapCall, ledgerReads) + + ledgerReads = ledgerReads[:0] + + domainStorageMap = storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor2ndGetDomainStorageMapCall, ledgerReads) + + // Check underlying ledger reads + require.Equal(t, len(ledgerReadsSet), len(tc.expectedReadsSet)) + for k := range ledgerReadsSet { + require.Contains(t, tc.expectedReadsSet, k) + } + }) + } +} + +func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + type getStorageDataFunc func() (storedValues map[string][]byte, StorageIndices map[string]uint64) + + createV2AccountWithDomain := func( + address common.Address, + domain common.StorageDomain, + ) getStorageDataFunc { + return func() (storedValues map[string][]byte, StorageIndices map[string]uint64) { + ledger := NewTestLedger(nil, nil) + + persistentSlabStorage := NewPersistentSlabStorage(ledger, nil) + + accountOrderedMap, err := atree.NewMap( + persistentSlabStorage, + atree.Address(address), + atree.NewDefaultDigesterBuilder(), + interpreter.EmptyTypeInfo{}, + ) + require.NoError(t, err) + + slabIndex := accountOrderedMap.SlabID().Index() + + domainOrderedMap, err := atree.NewMap( + persistentSlabStorage, + atree.Address(address), + atree.NewDefaultDigesterBuilder(), + interpreter.EmptyTypeInfo{}, + ) + require.NoError(t, err) + + domainKey := interpreter.Uint64StorageMapKey(domain) + + existingDomain, err := accountOrderedMap.Set( + domainKey.AtreeValueCompare, + domainKey.AtreeValueHashInput, + domainKey.AtreeValue(), + domainOrderedMap, + ) + require.NoError(t, err) + require.Nil(t, existingDomain) + + for i := range 3 { + + key := interpreter.StringStorageMapKey(strconv.Itoa(i)) + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + + existingStorable, err := domainOrderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + value, + ) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Commit domain storage map + err = persistentSlabStorage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create account register + err = ledger.SetValue(address[:], []byte(AccountStorageKey), slabIndex[:]) + require.NoError(t, err) + + return ledger.StoredValues, ledger.StorageIndices + } + } + + testCases := []struct { + name string + getStorageData getStorageDataFunc + domain common.StorageDomain + createIfNotExists bool + expectedDomainStorageMapIsNil bool + expectedReadsFor1stGetDomainStorageMapCall []ownerKeyPair + expectedReadsFor2ndGetDomainStorageMapCall []ownerKeyPair + expectedReadsSet map[string]struct{} + }{ + { + name: "domain storage map does not exist, createIfNotExists = false", + getStorageData: createV2AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: true, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account register + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account storage map + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap because + // account storage map is loaded and cached from first + // GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + { + name: "domain storage map does not exist, createIfNotExists = true", + getStorageData: createV2AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account register + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account storage map + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + { + name: "domain storage map exists, createIfNotExists = false", + getStorageData: createV2AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account register + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account storage map + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + { + name: "domain storage map exists, createIfNotExists = true", + getStorageData: createV2AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account register + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account storage map + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + storedValues, storedIndices := tc.getStorageData() + + var ledgerReads []ownerKeyPair + ledgerReadsSet := make(map[string]struct{}) + + ledger := NewTestLedgerWithData( + func(owner, key, _ []byte) { + ledgerReads = append( + ledgerReads, + ownerKeyPair{ + owner: owner, + key: key, + }, + ) + ledgerReadsSet[string(owner)+"|"+string(key)] = struct{}{} + }, + nil, + storedValues, + storedIndices, + ) + + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + domainStorageMap := storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor1stGetDomainStorageMapCall, ledgerReads) + + ledgerReads = ledgerReads[:0] + + domainStorageMap = storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor2ndGetDomainStorageMapCall, ledgerReads) + + // Check underlying ledger reads + require.Equal(t, len(ledgerReadsSet), len(tc.expectedReadsSet)) + for k := range ledgerReadsSet { + require.Contains(t, tc.expectedReadsSet, k) + } + }) + } +} + +func TestAccountStorageFormatForNonExistingAccount(t *testing.T) { + + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + testCases := []struct { + name string + storageFormatV2Enabled bool + format StorageFormat + }{ + { + name: "non-existing account, storageFormatV2Enabled = false", + storageFormatV2Enabled: false, + format: StorageFormatUnknown, + }, + { + name: "non-existing account, storageFormatV2Enabled = true", + storageFormatV2Enabled: true, + format: StorageFormatUnknown, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: tc.storageFormatV2Enabled, + }, + ) + + for range 2 { + format := storage.AccountStorageFormat(address) + require.Equal(t, tc.format, format) + } + }) + } +} + +func TestAccountStorageFormatForV1Account(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + createV1AccountWithDomain := func( + address common.Address, + domain common.StorageDomain, + ) (storedValues map[string][]byte, StorageIndices map[string]uint64) { + ledger := NewTestLedger(nil, nil) + + persistentSlabStorage := NewPersistentSlabStorage(ledger, nil) + + orderedMap, err := atree.NewMap( + persistentSlabStorage, + atree.Address(address), + atree.NewDefaultDigesterBuilder(), + interpreter.EmptyTypeInfo{}, + ) + require.NoError(t, err) + + slabIndex := orderedMap.SlabID().Index() + + for i := range 3 { + + key := interpreter.StringStorageMapKey(strconv.Itoa(i)) + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + + existingStorable, err := orderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + value, + ) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Commit domain storage map + err = persistentSlabStorage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create domain register + err = ledger.SetValue(address[:], []byte(domain.Identifier()), slabIndex[:]) + require.NoError(t, err) + + return ledger.StoredValues, ledger.StorageIndices + } + + testCases := []struct { + name string + storageFormatV2Enabled bool + format StorageFormat + }{ + { + name: "v1 account, storageFormatV2Enabled = false", + storageFormatV2Enabled: false, + format: StorageFormatV1, + }, + { + name: "v1 account, storageFormatV2Enabled = true", + storageFormatV2Enabled: true, + format: StorageFormatV1, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + storedValues, storedIndices := createV1AccountWithDomain( + address, + common.StorageDomainPathStorage, + ) + + ledger := NewTestLedgerWithData(nil, nil, storedValues, storedIndices) + + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: tc.storageFormatV2Enabled, + }, + ) + + for range 2 { + format := storage.AccountStorageFormat(address) + require.Equal(t, tc.format, format) + } + }) + } +} + +func TestAccountStorageFormatForV2Account(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + createV2AccountWithDomain := func( + address common.Address, + domain common.StorageDomain, + ) (storedValues map[string][]byte, StorageIndices map[string]uint64) { + ledger := NewTestLedger(nil, nil) + + persistentSlabStorage := NewPersistentSlabStorage(ledger, nil) + + accountOrderedMap, err := atree.NewMap( + persistentSlabStorage, + atree.Address(address), + atree.NewDefaultDigesterBuilder(), + interpreter.EmptyTypeInfo{}, + ) + require.NoError(t, err) + + slabIndex := accountOrderedMap.SlabID().Index() + + domainOrderedMap, err := atree.NewMap( + persistentSlabStorage, + atree.Address(address), + atree.NewDefaultDigesterBuilder(), + interpreter.EmptyTypeInfo{}, + ) + require.NoError(t, err) + + domainKey := interpreter.Uint64StorageMapKey(domain) + + existingDomain, err := accountOrderedMap.Set( + domainKey.AtreeValueCompare, + domainKey.AtreeValueHashInput, + domainKey.AtreeValue(), + domainOrderedMap, + ) + require.NoError(t, err) + require.Nil(t, existingDomain) + + for i := range 3 { + + key := interpreter.StringStorageMapKey(strconv.Itoa(i)) + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + + existingStorable, err := domainOrderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + value, + ) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Commit domain storage map + err = persistentSlabStorage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create account register + err = ledger.SetValue(address[:], []byte(AccountStorageKey), slabIndex[:]) + require.NoError(t, err) + + return ledger.StoredValues, ledger.StorageIndices + } + + testCases := []struct { + name string + storageFormatV2Enabled bool + format StorageFormat + }{ + { + name: "v2 account, storageFormatV2Enabled = true", + storageFormatV2Enabled: true, + format: StorageFormatV2, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + storedValues, storedIndices := createV2AccountWithDomain( + address, + common.StorageDomainPathStorage, + ) + + ledger := NewTestLedgerWithData(nil, nil, storedValues, storedIndices) + + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: tc.storageFormatV2Enabled, + }, + ) + + for range 2 { + format := storage.AccountStorageFormat(address) + require.Equal(t, tc.format, format) + } + }) + } +} + +// createAndWriteAccountStorageMap creates account storage map with given domains and writes random values to domain storage map. +func createAndWriteAccountStorageMap( + t testing.TB, + storage *Storage, + inter *interpreter.Interpreter, + address common.Address, + domains []common.StorageDomain, + count int, + random *rand.Rand, +) accountStorageMapValues { + + accountValues := make(accountStorageMapValues) + + // Create domain storage map + for _, domain := range domains { + const createIfNotExists = true + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + // Write to to domain storage map + accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, count, random) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + return accountValues +} + +func writeToDomainStorageMap( + inter *interpreter.Interpreter, + domainStorageMap *interpreter.DomainStorageMap, + count int, + random *rand.Rand, +) domainStorageMapValues { + domainValues := make(domainStorageMapValues) + + for len(domainValues) < count { + n := random.Int() + + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + domainStorageMap.WriteValue(inter, key, value) + + domainValues[key] = value + } + + return domainValues +} + +// checkAccountStorageMapData creates new storage with given storedValues, +// and compares account storage map values with given expectedAccountValues. +func checkAccountStorageMapData( + tb testing.TB, + storedValues map[string][]byte, + storageIndices map[string]uint64, + address common.Address, + expectedAccountValues accountStorageMapValues, +) { + // Create storage with given storedValues and storageIndices + ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(tb, storage) + + // Get account register + accountStorageMapSlabIndex, err := ledger.GetValue(address[:], []byte(AccountStorageKey)) + require.NoError(tb, err) + require.Equal(tb, 8, len(accountStorageMapSlabIndex)) + + // Load account storage map + accountSlabID := atree.NewSlabID( + atree.Address(address[:]), + atree.SlabIndex(accountStorageMapSlabIndex[:]), + ) + accountStorageMap := interpreter.NewAccountStorageMapWithRootID(storage, accountSlabID) + require.NotNil(tb, accountStorageMap) + require.Equal(tb, uint64(len(expectedAccountValues)), accountStorageMap.Count()) + + domainCount := 0 + iter := accountStorageMap.Iterator() + for { + domain, domainStorageMap := iter.Next() + if domain == common.StorageDomainUnknown { + break + } + + domainCount++ + + expectedDomainValues, exist := expectedAccountValues[domain] + require.True(tb, exist) + require.Equal(tb, uint64(len(expectedDomainValues)), domainStorageMap.Count()) + + // Check values stored in domain storage map + for key, expectedValue := range expectedDomainValues { + value := domainStorageMap.ReadValue(nil, key) + + ev, ok := value.(interpreter.EquatableValue) + require.True(tb, ok) + require.True(tb, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue)) + } + } + + require.Equal(tb, len(expectedAccountValues), domainCount) + + // Check atree storage health + rootSlabIDs, err := atree.CheckStorageHealth(storage.PersistentSlabStorage, 1) + require.NoError(tb, err) + require.Equal(tb, 1, len(rootSlabIDs)) + require.Contains(tb, rootSlabIDs, accountSlabID) +} + +func concatRegisterAddressAndKey( + address common.Address, + key []byte, +) string { + return string(address[:]) + "|" + string(key) +} + +func concatRegisterAddressAndDomain( + address common.Address, + domain common.StorageDomain, +) string { + return string(address[:]) + "|" + domain.Identifier() +} diff --git a/runtime/transaction_executor.go b/runtime/transaction_executor.go index a8d3f30a90..f071aeb8e2 100644 --- a/runtime/transaction_executor.go +++ b/runtime/transaction_executor.go @@ -106,7 +106,13 @@ func (executor *interpreterTransactionExecutor) preprocess() (err error) { runtimeInterface := context.Interface - storage := NewStorage(runtimeInterface, runtimeInterface) + storage := NewStorage( + runtimeInterface, + runtimeInterface, + StorageConfig{ + StorageFormatV2Enabled: interpreterRuntime.defaultConfig.StorageFormatV2Enabled, + }, + ) executor.storage = storage environment := context.Environment diff --git a/sema/account_test.go b/sema/account_test.go index 6084a5042b..7c7bc60fd5 100644 --- a/sema/account_test.go +++ b/sema/account_test.go @@ -425,15 +425,17 @@ func TestCheckAccountStorageLoad(t *testing.T) { ) if domain == common.PathDomainStorage { - errs := RequireCheckerErrors(t, err, 1) + errs := RequireCheckerErrors(t, err, 2) - require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[0]) + require.IsType(t, &sema.InvocationTypeInferenceError{}, errs[0]) + require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[1]) } else { - errs := RequireCheckerErrors(t, err, 2) + errs := RequireCheckerErrors(t, err, 3) require.IsType(t, &sema.TypeMismatchError{}, errs[0]) - require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[1]) + require.IsType(t, &sema.InvocationTypeInferenceError{}, errs[1]) + require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[2]) } }) } @@ -553,15 +555,17 @@ func TestCheckAccountStorageCopy(t *testing.T) { ) if domain == common.PathDomainStorage { - errs := RequireCheckerErrors(t, err, 1) + errs := RequireCheckerErrors(t, err, 2) - require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[0]) + require.IsType(t, &sema.InvocationTypeInferenceError{}, errs[0]) + require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[1]) } else { - errs := RequireCheckerErrors(t, err, 2) + errs := RequireCheckerErrors(t, err, 3) require.IsType(t, &sema.TypeMismatchError{}, errs[0]) - require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[1]) + require.IsType(t, &sema.InvocationTypeInferenceError{}, errs[1]) + require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[2]) } }) } @@ -687,15 +691,17 @@ func TestCheckAccountStorageBorrow(t *testing.T) { ) if domain == common.PathDomainStorage { - errs := RequireCheckerErrors(t, err, 1) + errs := RequireCheckerErrors(t, err, 2) - require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[0]) + require.IsType(t, &sema.InvocationTypeInferenceError{}, errs[0]) + require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[1]) } else { - errs := RequireCheckerErrors(t, err, 2) + errs := RequireCheckerErrors(t, err, 3) require.IsType(t, &sema.TypeMismatchError{}, errs[0]) - require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[1]) + require.IsType(t, &sema.InvocationTypeInferenceError{}, errs[1]) + require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[2]) } }) @@ -715,15 +721,17 @@ func TestCheckAccountStorageBorrow(t *testing.T) { ) if domain == common.PathDomainStorage { - errs := RequireCheckerErrors(t, err, 1) + errs := RequireCheckerErrors(t, err, 2) - require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[0]) + require.IsType(t, &sema.InvocationTypeInferenceError{}, errs[0]) + require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[1]) } else { - errs := RequireCheckerErrors(t, err, 2) + errs := RequireCheckerErrors(t, err, 3) require.IsType(t, &sema.TypeMismatchError{}, errs[0]) - require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[1]) + require.IsType(t, &sema.InvocationTypeInferenceError{}, errs[1]) + require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[2]) } }) }) @@ -1027,9 +1035,10 @@ func TestCheckAccountContractsBorrow(t *testing.T) { } `) - errors := RequireCheckerErrors(t, err, 1) + errors := RequireCheckerErrors(t, err, 2) - assert.IsType(t, &sema.TypeParameterTypeInferenceError{}, errors[0]) + assert.IsType(t, &sema.InvocationTypeInferenceError{}, errors[0]) + assert.IsType(t, &sema.TypeParameterTypeInferenceError{}, errors[1]) }) } diff --git a/sema/arrays_dictionaries_test.go b/sema/arrays_dictionaries_test.go index 6e5a512d05..8ae10c8afe 100644 --- a/sema/arrays_dictionaries_test.go +++ b/sema/arrays_dictionaries_test.go @@ -1281,6 +1281,7 @@ func TestCheckArrayMapInvalidArgs(t *testing.T) { `, []sema.SemanticError{ &sema.TypeMismatchError{}, + &sema.InvocationTypeInferenceError{}, // since we're not passing a function. &sema.TypeParameterTypeInferenceError{}, // since we're not passing a function. }, ) @@ -2660,9 +2661,10 @@ func TestCheckArrayToConstantSizedMissingTypeArgument(t *testing.T) { } `) - errs := RequireCheckerErrors(t, err, 1) + errs := RequireCheckerErrors(t, err, 2) - assert.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[0]) + assert.IsType(t, &sema.InvocationTypeInferenceError{}, errs[0]) + assert.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[1]) } func TestCheckArrayReferenceTypeInference(t *testing.T) { diff --git a/sema/builtinfunctions_test.go b/sema/builtinfunctions_test.go index 07ca190593..b82471b30b 100644 --- a/sema/builtinfunctions_test.go +++ b/sema/builtinfunctions_test.go @@ -431,6 +431,7 @@ func TestCheckRevertibleRandom(t *testing.T) { "missing type argument", `let rand = revertibleRandom()`, []error{ + &sema.InvocationTypeInferenceError{}, &sema.TypeParameterTypeInferenceError{}, }, ) diff --git a/sema/capability_test.go b/sema/capability_test.go index cb430f7b82..ba1dd9f7cf 100644 --- a/sema/capability_test.go +++ b/sema/capability_test.go @@ -86,9 +86,10 @@ func TestCheckCapability_borrow(t *testing.T) { let r = capability.borrow() `) - errs := RequireCheckerErrors(t, err, 1) + errs := RequireCheckerErrors(t, err, 2) - require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[0]) + require.IsType(t, &sema.InvocationTypeInferenceError{}, errs[0]) + require.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[1]) }) for _, auth := range []sema.Access{sema.UnauthorizedAccess, diff --git a/sema/check_expression.go b/sema/check_expression.go index caff4f0ee6..2d05b41a96 100644 --- a/sema/check_expression.go +++ b/sema/check_expression.go @@ -417,20 +417,7 @@ func (checker *Checker) visitIndexExpression( return InvalidType } - elementType := checker.checkTypeIndexingExpression(typeIndexedType, indexExpression) - if elementType == InvalidType { - checker.report( - &InvalidTypeIndexingError{ - BaseType: typeIndexedType, - IndexingExpression: indexExpression.IndexingExpression, - Range: ast.NewRangeFromPositioned( - checker.memoryGauge, - indexExpression.IndexingExpression, - ), - }, - ) - } - return elementType + return checker.checkTypeIndexingExpression(typeIndexedType, indexExpression) } reportNonIndexable(targetType) @@ -444,19 +431,35 @@ func (checker *Checker) checkTypeIndexingExpression( targetExpression := indexExpression.TargetExpression + reportInvalid := func() { + checker.report( + &InvalidTypeIndexingError{ + BaseType: targetType, + IndexingExpression: indexExpression.IndexingExpression, + Range: ast.NewRangeFromPositioned( + checker.memoryGauge, + indexExpression.IndexingExpression, + ), + }, + ) + } + expressionType := ast.ExpressionAsType(indexExpression.IndexingExpression) if expressionType == nil { + reportInvalid() return InvalidType } nominalTypeExpression, isNominalType := expressionType.(*ast.NominalType) if !isNominalType { + reportInvalid() return InvalidType } nominalType := checker.convertNominalType(nominalTypeExpression) if !targetType.IsValidIndexingType(nominalType) { + reportInvalid() return InvalidType } diff --git a/sema/check_invocation_expression.go b/sema/check_invocation_expression.go index 49f714f24c..d1d5ea6e95 100644 --- a/sema/check_invocation_expression.go +++ b/sema/check_invocation_expression.go @@ -503,7 +503,13 @@ func (checker *Checker) checkInvocation( returnType = functionType.ReturnTypeAnnotation.Type.Resolve(typeArguments) if returnType == nil { - // TODO: report error? does `checkTypeParameterInference` below already do that? + checker.report(&InvocationTypeInferenceError{ + Range: ast.NewRangeFromPositioned( + checker.memoryGauge, + invocationExpression, + ), + }) + returnType = InvalidType } @@ -599,6 +605,12 @@ func (checker *Checker) checkInvocationRequiredArgument( parameterType = parameterType.Resolve(typeParameters) // If the type parameter could not be resolved, use the invalid type. if parameterType == nil { + checker.report(&InvocationTypeInferenceError{ + Range: ast.NewRangeFromPositioned( + checker.memoryGauge, + argument.Expression, + ), + }) parameterType = InvalidType } } @@ -674,6 +686,12 @@ func (checker *Checker) checkInvocationRequiredArgument( parameterType = parameterType.Resolve(typeParameters) // If the type parameter could not be resolved, use the invalid type. if parameterType == nil { + checker.report(&InvocationTypeInferenceError{ + Range: ast.NewRangeFromPositioned( + checker.memoryGauge, + argument.Expression, + ), + }) parameterType = InvalidType } } diff --git a/sema/checker.go b/sema/checker.go index d94b2b64de..a3c0408b94 100644 --- a/sema/checker.go +++ b/sema/checker.go @@ -886,12 +886,12 @@ func (checker *Checker) ConvertType(t ast.Type) Type { case *ast.InstantiationType: return checker.convertInstantiationType(t) - case nil: - // The AST might contain "holes" if parsing failed + default: + checker.report(&UnconvertableTypeError{ + Range: ast.NewRangeFromPositioned(checker.memoryGauge, t), + }) return InvalidType } - - panic(&astTypeConversionError{invalidASTType: t}) } func CheckIntersectionType( @@ -2611,6 +2611,8 @@ func (checker *Checker) visitExpressionWithForceType( actualType = ast.AcceptExpression[Type](expr, checker) + checker.checkErrorsForInvalidExpressionTypes(actualType, expectedType) + if checker.Config.ExtendedElaborationEnabled { checker.Elaboration.SetExpressionTypes( expr, @@ -2645,6 +2647,20 @@ func (checker *Checker) visitExpressionWithForceType( return actualType, actualType } +func (checker *Checker) checkErrorsForInvalidExpressionTypes(actualType Type, expectedType Type) { + // Defensive check: If an invalid type was produced, + // then also an error should have been reported for the invalid program. + // + // Check for errors first, which is cheap, + // before checking for an invalid type, which is more expensive. + + if len(checker.errors) == 0 && + (actualType.IsInvalidType() || (expectedType != nil && expectedType.IsInvalidType())) { + + panic(errors.NewUnexpectedError("invalid type produced without error")) + } +} + func (checker *Checker) expressionRange(expression ast.Expression) ast.Range { if indexExpr, ok := expression.(*ast.IndexExpression); ok { return ast.NewRange( diff --git a/sema/conditions_test.go b/sema/conditions_test.go index afcc7762fc..e3a996046c 100644 --- a/sema/conditions_test.go +++ b/sema/conditions_test.go @@ -266,10 +266,11 @@ func TestCheckInvalidFunctionPostConditionWithBeforeAndNoArgument(t *testing.T) } `) - errs := RequireCheckerErrors(t, err, 2) + errs := RequireCheckerErrors(t, err, 3) assert.IsType(t, &sema.InsufficientArgumentsError{}, errs[0]) - assert.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[1]) + assert.IsType(t, &sema.InvocationTypeInferenceError{}, errs[1]) + assert.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[2]) }) t.Run("emit condition", func(t *testing.T) { @@ -285,10 +286,11 @@ func TestCheckInvalidFunctionPostConditionWithBeforeAndNoArgument(t *testing.T) } `) - errs := RequireCheckerErrors(t, err, 2) + errs := RequireCheckerErrors(t, err, 3) assert.IsType(t, &sema.InsufficientArgumentsError{}, errs[0]) - assert.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[1]) + assert.IsType(t, &sema.InvocationTypeInferenceError{}, errs[1]) + assert.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[2]) }) } diff --git a/sema/dictionary_test.go b/sema/dictionary_test.go index edd9dd83cb..b159e55c95 100644 --- a/sema/dictionary_test.go +++ b/sema/dictionary_test.go @@ -41,7 +41,9 @@ func TestCheckIncompleteDictionaryType(t *testing.T) { }, ) - require.NoError(t, err) + errs := RequireCheckerErrors(t, err, 1) + + assert.IsType(t, errs[0], &sema.UnconvertableTypeError{}) assert.Equal(t, &sema.DictionaryType{ diff --git a/sema/errors.go b/sema/errors.go index 1760e33eb9..80daa240c8 100644 --- a/sema/errors.go +++ b/sema/errors.go @@ -51,16 +51,6 @@ func ErrorMessageExpectedActualTypes( return } -// astTypeConversionError - -type astTypeConversionError struct { - invalidASTType ast.Type -} - -func (e *astTypeConversionError) Error() string { - return fmt.Sprintf("cannot convert unsupported AST type: %#+v", e.invalidASTType) -} - // unsupportedOperation type unsupportedOperation struct { @@ -4883,3 +4873,38 @@ var _ errors.ErrorNote = ResultVariablePostConditionsNote{} func (ResultVariablePostConditionsNote) Message() string { return "post-conditions declared here" } + +// InvocationTypeInferenceError + +type InvocationTypeInferenceError struct { + ast.Range +} + +var _ SemanticError = &InvocationTypeInferenceError{} +var _ errors.UserError = &InvocationTypeInferenceError{} + +func (e *InvocationTypeInferenceError) isSemanticError() {} + +func (*InvocationTypeInferenceError) IsUserError() {} + +func (e *InvocationTypeInferenceError) Error() string { + return "cannot infer type of invocation" +} + +// UnconvertableTypeError + +type UnconvertableTypeError struct { + Type ast.Type + ast.Range +} + +var _ SemanticError = &UnconvertableTypeError{} +var _ errors.UserError = &UnconvertableTypeError{} + +func (e *UnconvertableTypeError) isSemanticError() {} + +func (*UnconvertableTypeError) IsUserError() {} + +func (e *UnconvertableTypeError) Error() string { + return fmt.Sprintf("cannot convert type `%s`", e.Type) +} diff --git a/sema/genericfunction_test.go b/sema/genericfunction_test.go index f88e9ac00a..8a2dd72652 100644 --- a/sema/genericfunction_test.go +++ b/sema/genericfunction_test.go @@ -453,9 +453,10 @@ func TestCheckGenericFunctionInvocation(t *testing.T) { }, ) - errs := RequireCheckerErrors(t, err, 1) + errs := RequireCheckerErrors(t, err, 2) - assert.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[0]) + assert.IsType(t, &sema.InvocationTypeInferenceError{}, errs[0]) + assert.IsType(t, &sema.TypeParameterTypeInferenceError{}, errs[1]) }) t.Run("valid: one type parameter, one type argument, no parameters, no arguments, return type", func(t *testing.T) { diff --git a/sema/invalid_test.go b/sema/invalid_test.go index 51ea254984..26857454ce 100644 --- a/sema/invalid_test.go +++ b/sema/invalid_test.go @@ -22,8 +22,12 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/errors" "github.com/onflow/cadence/sema" + "github.com/onflow/cadence/stdlib" . "github.com/onflow/cadence/test_utils/sema_utils" ) @@ -202,3 +206,118 @@ func TestCheckSpuriousCastWithInvalidValueTypeMismatch(t *testing.T) { assert.IsType(t, &sema.NotDeclaredError{}, errs[0]) } + +func TestCheckInvalidInvocationFunctionReturnType(t *testing.T) { + + t.Parallel() + + typeParameter := &sema.TypeParameter{ + Name: "T", + } + + fType := &sema.FunctionType{ + TypeParameters: []*sema.TypeParameter{ + typeParameter, + }, + ReturnTypeAnnotation: sema.NewTypeAnnotation( + &sema.GenericType{ + TypeParameter: typeParameter, + }, + ), + } + + baseValueActivation := sema.NewVariableActivation(sema.BaseValueActivation) + baseValueActivation.DeclareValue(stdlib.StandardLibraryValue{ + Type: fType, + Name: "f", + Kind: common.DeclarationKindFunction, + }) + + _, err := ParseAndCheckWithOptions(t, + ` + let res = [f].reverse() + `, + ParseAndCheckOptions{ + Config: &sema.Config{ + BaseValueActivationHandler: func(_ common.Location) *sema.VariableActivation { + return baseValueActivation + }, + }, + }, + ) + + errs := RequireCheckerErrors(t, err, 1) + + assert.IsType(t, &sema.InvocationTypeInferenceError{}, errs[0]) +} + +func TestCheckInvalidTypeDefensiveCheck(t *testing.T) { + + t.Parallel() + + baseValueActivation := sema.NewVariableActivation(sema.BaseValueActivation) + baseValueActivation.DeclareValue(stdlib.StandardLibraryValue{ + Type: sema.InvalidType, + Name: "invalid", + Kind: common.DeclarationKindConstant, + }) + + var r any + func() { + defer func() { + r = recover() + }() + + _, _ = ParseAndCheckWithOptions(t, + ` + let res = invalid + `, + ParseAndCheckOptions{ + Config: &sema.Config{ + BaseValueActivationHandler: func(_ common.Location) *sema.VariableActivation { + return baseValueActivation + }, + }, + }, + ) + }() + + require.IsType(t, errors.UnexpectedError{}, r) + err := r.(errors.UnexpectedError) + require.ErrorContains(t, err, "invalid type produced without error") +} + +func TestCheckInvalidTypeIndexing(t *testing.T) { + + t.Parallel() + + _, err := ParseAndCheck(t, ` + struct S {} + let s = S() + let res = s[[]] + `) + + errs := RequireCheckerErrors(t, err, 1) + + assert.IsType(t, &sema.InvalidTypeIndexingError{}, errs[0]) +} + +func TestCheckInvalidRemove(t *testing.T) { + + t.Parallel() + + _, err := ParseAndCheck(t, ` + struct S {} + + attachment A for S {} + + fun test() { + let s = S() + remove B from s + } + `) + + errs := RequireCheckerErrors(t, err, 1) + + assert.IsType(t, &sema.NotDeclaredError{}, errs[0]) +} diff --git a/sema/invocation_test.go b/sema/invocation_test.go index 1d0543b2cb..38f4cd4881 100644 --- a/sema/invocation_test.go +++ b/sema/invocation_test.go @@ -593,3 +593,62 @@ func TestCheckArgumentLabels(t *testing.T) { }) } + +func TestCheckInvocationWithIncorrectTypeParameter(t *testing.T) { + + t.Parallel() + + // function type has incorrect type-arguments: + // `fun Foo(_ a: R)` + // + funcType := &sema.FunctionType{ + ReturnTypeAnnotation: sema.VoidTypeAnnotation, + TypeParameters: []*sema.TypeParameter{ + { + Name: "T", + TypeBound: sema.AnyStructType, + }, + }, + Parameters: []sema.Parameter{ + { + Label: sema.ArgumentLabelNotRequired, + Identifier: "a", + TypeAnnotation: sema.NewTypeAnnotation( + &sema.GenericType{ + TypeParameter: &sema.TypeParameter{ + Name: "R", // This is an incorrect/undefined type-parameter + TypeBound: sema.AnyStructType, + }, + }, + ), + }, + }, + } + + baseValueActivation := sema.NewVariableActivation(sema.BaseValueActivation) + baseValueActivation.DeclareValue(stdlib.NewStandardLibraryStaticFunction( + "foo", + funcType, + "", + nil, // no need, we only type-check + )) + + _, err := ParseAndCheckWithOptions(t, + ` + access(all) fun test() { + foo("hello") + } + `, + ParseAndCheckOptions{ + Config: &sema.Config{ + BaseValueActivationHandler: func(_ common.Location) *sema.VariableActivation { + return baseValueActivation + }, + }, + }, + ) + + errs := RequireCheckerErrors(t, err, 1) + + assert.IsType(t, &sema.InvocationTypeInferenceError{}, errs[0]) +} diff --git a/sema/string_test.go b/sema/string_test.go index cc46bc786d..578f81d194 100644 --- a/sema/string_test.go +++ b/sema/string_test.go @@ -743,6 +743,27 @@ func TestCheckStringTemplate(t *testing.T) { assert.IsType(t, &sema.TypeMismatchWithDescriptionError{}, errs[0]) }) + t.Run("invalid, struct with tostring", func(t *testing.T) { + + t.Parallel() + + _, err := ParseAndCheck(t, ` + access(all) + struct SomeStruct { + access(all) + view fun toString(): String { + return "SomeStruct" + } + } + let a = SomeStruct() + let x: String = "\(a)" + `) + + errs := RequireCheckerErrors(t, err, 1) + + assert.IsType(t, &sema.TypeMismatchWithDescriptionError{}, errs[0]) + }) + t.Run("invalid, array", func(t *testing.T) { t.Parallel() @@ -788,4 +809,18 @@ func TestCheckStringTemplate(t *testing.T) { assert.IsType(t, &sema.TypeMismatchWithDescriptionError{}, errs[0]) }) + + t.Run("invalid, expression type", func(t *testing.T) { + + t.Parallel() + + _, err := ParseAndCheck(t, ` + let y: Int = 0 + let x: String = "\(y > 0 ? "String" : true)" + `) + + errs := RequireCheckerErrors(t, err, 1) + + assert.IsType(t, &sema.TypeMismatchWithDescriptionError{}, errs[0]) + }) } diff --git a/sema/type.go b/sema/type.go index 9a13a2e762..577e69640e 100644 --- a/sema/type.go +++ b/sema/type.go @@ -1615,324 +1615,293 @@ func (*FixedPointNumericType) CheckInstantiated(_ ast.HasPosition, _ common.Memo // Numeric types -var ( - - // NumberType represents the super-type of all number types - NumberType = NewNumericType(NumberTypeName). - WithTag(NumberTypeTag). - AsSuperType() - - NumberTypeAnnotation = NewTypeAnnotation(NumberType) - - // SignedNumberType represents the super-type of all signed number types - SignedNumberType = NewNumericType(SignedNumberTypeName). - WithTag(SignedNumberTypeTag). - AsSuperType() - - SignedNumberTypeAnnotation = NewTypeAnnotation(SignedNumberType) - - // IntegerType represents the super-type of all integer types - IntegerType = NewNumericType(IntegerTypeName). - WithTag(IntegerTypeTag). - AsSuperType() - - IntegerTypeAnnotation = NewTypeAnnotation(IntegerType) - - // SignedIntegerType represents the super-type of all signed integer types - SignedIntegerType = NewNumericType(SignedIntegerTypeName). - WithTag(SignedIntegerTypeTag). - AsSuperType() - - SignedIntegerTypeAnnotation = NewTypeAnnotation(SignedIntegerType) - - // FixedSizeUnsignedIntegerType represents the super-type of all unsigned integer types which have a fixed size. - FixedSizeUnsignedIntegerType = NewNumericType(FixedSizeUnsignedIntegerTypeName). - WithTag(FixedSizeUnsignedIntegerTypeTag). - AsSuperType() - - // IntType represents the arbitrary-precision integer type `Int` - IntType = NewNumericType(IntTypeName). - WithTag(IntTypeTag) - - IntTypeAnnotation = NewTypeAnnotation(IntType) - - // Int8Type represents the 8-bit signed integer type `Int8` - Int8Type = NewNumericType(Int8TypeName). - WithTag(Int8TypeTag). - WithIntRange(Int8TypeMinInt, Int8TypeMaxInt). - WithByteSize(1). - WithSaturatingFunctions(SaturatingArithmeticSupport{ - Add: true, - Subtract: true, - Multiply: true, - Divide: true, - }) - - Int8TypeAnnotation = NewTypeAnnotation(Int8Type) - - // Int16Type represents the 16-bit signed integer type `Int16` - Int16Type = NewNumericType(Int16TypeName). - WithTag(Int16TypeTag). - WithIntRange(Int16TypeMinInt, Int16TypeMaxInt). - WithByteSize(2). - WithSaturatingFunctions(SaturatingArithmeticSupport{ - Add: true, - Subtract: true, - Multiply: true, - Divide: true, - }) - - Int16TypeAnnotation = NewTypeAnnotation(Int16Type) - - // Int32Type represents the 32-bit signed integer type `Int32` - Int32Type = NewNumericType(Int32TypeName). - WithTag(Int32TypeTag). - WithIntRange(Int32TypeMinInt, Int32TypeMaxInt). - WithByteSize(4). - WithSaturatingFunctions(SaturatingArithmeticSupport{ - Add: true, - Subtract: true, - Multiply: true, - Divide: true, - }) - - Int32TypeAnnotation = NewTypeAnnotation(Int32Type) - - // Int64Type represents the 64-bit signed integer type `Int64` - Int64Type = NewNumericType(Int64TypeName). - WithTag(Int64TypeTag). - WithIntRange(Int64TypeMinInt, Int64TypeMaxInt). - WithByteSize(8). - WithSaturatingFunctions(SaturatingArithmeticSupport{ - Add: true, - Subtract: true, - Multiply: true, - Divide: true, - }) - - Int64TypeAnnotation = NewTypeAnnotation(Int64Type) - - // Int128Type represents the 128-bit signed integer type `Int128` - Int128Type = NewNumericType(Int128TypeName). - WithTag(Int128TypeTag). - WithIntRange(Int128TypeMinIntBig, Int128TypeMaxIntBig). - WithByteSize(16). - WithSaturatingFunctions(SaturatingArithmeticSupport{ - Add: true, - Subtract: true, - Multiply: true, - Divide: true, - }) - - Int128TypeAnnotation = NewTypeAnnotation(Int128Type) - - // Int256Type represents the 256-bit signed integer type `Int256` - Int256Type = NewNumericType(Int256TypeName). - WithTag(Int256TypeTag). - WithIntRange(Int256TypeMinIntBig, Int256TypeMaxIntBig). - WithByteSize(32). - WithSaturatingFunctions(SaturatingArithmeticSupport{ - Add: true, - Subtract: true, - Multiply: true, - Divide: true, - }) - - Int256TypeAnnotation = NewTypeAnnotation(Int256Type) - - // UIntType represents the arbitrary-precision unsigned integer type `UInt` - UIntType = NewNumericType(UIntTypeName). - WithTag(UIntTypeTag). - WithIntRange(UIntTypeMin, nil). - WithSaturatingFunctions(SaturatingArithmeticSupport{ - Subtract: true, - }) - - UIntTypeAnnotation = NewTypeAnnotation(UIntType) - - // UInt8Type represents the 8-bit unsigned integer type `UInt8` - // which checks for overflow and underflow - UInt8Type = NewNumericType(UInt8TypeName). - WithTag(UInt8TypeTag). - WithIntRange(UInt8TypeMinInt, UInt8TypeMaxInt). - WithByteSize(1). - WithSaturatingFunctions(SaturatingArithmeticSupport{ - Add: true, - Subtract: true, - Multiply: true, - }) - - UInt8TypeAnnotation = NewTypeAnnotation(UInt8Type) - - // UInt16Type represents the 16-bit unsigned integer type `UInt16` - // which checks for overflow and underflow - UInt16Type = NewNumericType(UInt16TypeName). - WithTag(UInt16TypeTag). - WithIntRange(UInt16TypeMinInt, UInt16TypeMaxInt). - WithByteSize(2). - WithSaturatingFunctions(SaturatingArithmeticSupport{ - Add: true, - Subtract: true, - Multiply: true, - }) - - UInt16TypeAnnotation = NewTypeAnnotation(UInt16Type) - - // UInt32Type represents the 32-bit unsigned integer type `UInt32` - // which checks for overflow and underflow - UInt32Type = NewNumericType(UInt32TypeName). - WithTag(UInt32TypeTag). - WithIntRange(UInt32TypeMinInt, UInt32TypeMaxInt). - WithByteSize(4). - WithSaturatingFunctions(SaturatingArithmeticSupport{ - Add: true, - Subtract: true, - Multiply: true, - }) - - UInt32TypeAnnotation = NewTypeAnnotation(UInt32Type) - - // UInt64Type represents the 64-bit unsigned integer type `UInt64` - // which checks for overflow and underflow - UInt64Type = NewNumericType(UInt64TypeName). - WithTag(UInt64TypeTag). - WithIntRange(UInt64TypeMinInt, UInt64TypeMaxInt). - WithByteSize(8). - WithSaturatingFunctions(SaturatingArithmeticSupport{ - Add: true, - Subtract: true, - Multiply: true, - }) - - UInt64TypeAnnotation = NewTypeAnnotation(UInt64Type) - - // UInt128Type represents the 128-bit unsigned integer type `UInt128` - // which checks for overflow and underflow - UInt128Type = NewNumericType(UInt128TypeName). - WithTag(UInt128TypeTag). - WithIntRange(UInt128TypeMinIntBig, UInt128TypeMaxIntBig). - WithByteSize(16). - WithSaturatingFunctions(SaturatingArithmeticSupport{ - Add: true, - Subtract: true, - Multiply: true, - }) - - UInt128TypeAnnotation = NewTypeAnnotation(UInt128Type) - - // UInt256Type represents the 256-bit unsigned integer type `UInt256` - // which checks for overflow and underflow - UInt256Type = NewNumericType(UInt256TypeName). - WithTag(UInt256TypeTag). - WithIntRange(UInt256TypeMinIntBig, UInt256TypeMaxIntBig). - WithByteSize(32). - WithSaturatingFunctions(SaturatingArithmeticSupport{ - Add: true, - Subtract: true, - Multiply: true, - }) - - UInt256TypeAnnotation = NewTypeAnnotation(UInt256Type) - - // Word8Type represents the 8-bit unsigned integer type `Word8` - // which does NOT check for overflow and underflow - Word8Type = NewNumericType(Word8TypeName). - WithTag(Word8TypeTag). - WithByteSize(1). - WithIntRange(Word8TypeMinInt, Word8TypeMaxInt) - - Word8TypeAnnotation = NewTypeAnnotation(Word8Type) - - // Word16Type represents the 16-bit unsigned integer type `Word16` - // which does NOT check for overflow and underflow - Word16Type = NewNumericType(Word16TypeName). - WithTag(Word16TypeTag). - WithByteSize(2). - WithIntRange(Word16TypeMinInt, Word16TypeMaxInt) - - Word16TypeAnnotation = NewTypeAnnotation(Word16Type) - - // Word32Type represents the 32-bit unsigned integer type `Word32` - // which does NOT check for overflow and underflow - Word32Type = NewNumericType(Word32TypeName). - WithTag(Word32TypeTag). - WithByteSize(4). - WithIntRange(Word32TypeMinInt, Word32TypeMaxInt) - - Word32TypeAnnotation = NewTypeAnnotation(Word32Type) - - // Word64Type represents the 64-bit unsigned integer type `Word64` - // which does NOT check for overflow and underflow - Word64Type = NewNumericType(Word64TypeName). - WithTag(Word64TypeTag). - WithByteSize(8). - WithIntRange(Word64TypeMinInt, Word64TypeMaxInt) - - Word64TypeAnnotation = NewTypeAnnotation(Word64Type) - - // Word128Type represents the 128-bit unsigned integer type `Word128` - // which does NOT check for overflow and underflow - Word128Type = NewNumericType(Word128TypeName). - WithTag(Word128TypeTag). - WithByteSize(16). - WithIntRange(Word128TypeMinIntBig, Word128TypeMaxIntBig) - - Word128TypeAnnotation = NewTypeAnnotation(Word128Type) - - // Word256Type represents the 256-bit unsigned integer type `Word256` - // which does NOT check for overflow and underflow - Word256Type = NewNumericType(Word256TypeName). - WithTag(Word256TypeTag). - WithByteSize(32). - WithIntRange(Word256TypeMinIntBig, Word256TypeMaxIntBig) - - Word256TypeAnnotation = NewTypeAnnotation(Word256Type) - - // FixedPointType represents the super-type of all fixed-point types - FixedPointType = NewNumericType(FixedPointTypeName). - WithTag(FixedPointTypeTag). - AsSuperType() - - FixedPointTypeAnnotation = NewTypeAnnotation(FixedPointType) - - // SignedFixedPointType represents the super-type of all signed fixed-point types - SignedFixedPointType = NewNumericType(SignedFixedPointTypeName). - WithTag(SignedFixedPointTypeTag). - AsSuperType() - - SignedFixedPointTypeAnnotation = NewTypeAnnotation(SignedFixedPointType) - - // Fix64Type represents the 64-bit signed decimal fixed-point type `Fix64` - // which has a scale of Fix64Scale, and checks for overflow and underflow - Fix64Type = NewFixedPointNumericType(Fix64TypeName). - WithTag(Fix64TypeTag). - WithIntRange(Fix64TypeMinIntBig, Fix64TypeMaxIntBig). - WithFractionalRange(Fix64TypeMinFractionalBig, Fix64TypeMaxFractionalBig). - WithScale(Fix64Scale). - WithSaturatingFunctions(SaturatingArithmeticSupport{ - Add: true, - Subtract: true, - Multiply: true, - Divide: true, - }) - - Fix64TypeAnnotation = NewTypeAnnotation(Fix64Type) - - // UFix64Type represents the 64-bit unsigned decimal fixed-point type `UFix64` - // which has a scale of 1E9, and checks for overflow and underflow - UFix64Type = NewFixedPointNumericType(UFix64TypeName). - WithTag(UFix64TypeTag). - WithIntRange(UFix64TypeMinIntBig, UFix64TypeMaxIntBig). - WithFractionalRange(UFix64TypeMinFractionalBig, UFix64TypeMaxFractionalBig). - WithScale(Fix64Scale). - WithSaturatingFunctions(SaturatingArithmeticSupport{ - Add: true, - Subtract: true, - Multiply: true, - }) - - UFix64TypeAnnotation = NewTypeAnnotation(UFix64Type) -) +// NumberType represents the super-type of all number types +var NumberType = NewNumericType(NumberTypeName). + WithTag(NumberTypeTag). + AsSuperType() +var NumberTypeAnnotation = NewTypeAnnotation(NumberType) + +// SignedNumberType represents the super-type of all signed number types +var SignedNumberType = NewNumericType(SignedNumberTypeName). + WithTag(SignedNumberTypeTag). + AsSuperType() +var SignedNumberTypeAnnotation = NewTypeAnnotation(SignedNumberType) + +// IntegerType represents the super-type of all integer types +var IntegerType = NewNumericType(IntegerTypeName). + WithTag(IntegerTypeTag). + AsSuperType() +var IntegerTypeAnnotation = NewTypeAnnotation(IntegerType) + +// SignedIntegerType represents the super-type of all signed integer types +var SignedIntegerType = NewNumericType(SignedIntegerTypeName). + WithTag(SignedIntegerTypeTag). + AsSuperType() +var SignedIntegerTypeAnnotation = NewTypeAnnotation(SignedIntegerType) + +// FixedSizeUnsignedIntegerType represents the super-type of all unsigned integer types which have a fixed size. +var FixedSizeUnsignedIntegerType = NewNumericType(FixedSizeUnsignedIntegerTypeName). + WithTag(FixedSizeUnsignedIntegerTypeTag). + AsSuperType() + +// IntType represents the arbitrary-precision integer type `Int` +var IntType = NewNumericType(IntTypeName). + WithTag(IntTypeTag) +var IntTypeAnnotation = NewTypeAnnotation(IntType) + +// Int8Type represents the 8-bit signed integer type `Int8` +var Int8Type = NewNumericType(Int8TypeName). + WithTag(Int8TypeTag). + WithIntRange(Int8TypeMinInt, Int8TypeMaxInt). + WithByteSize(1). + WithSaturatingFunctions(SaturatingArithmeticSupport{ + Add: true, + Subtract: true, + Multiply: true, + Divide: true, + }) +var Int8TypeAnnotation = NewTypeAnnotation(Int8Type) + +// Int16Type represents the 16-bit signed integer type `Int16` +var Int16Type = NewNumericType(Int16TypeName). + WithTag(Int16TypeTag). + WithIntRange(Int16TypeMinInt, Int16TypeMaxInt). + WithByteSize(2). + WithSaturatingFunctions(SaturatingArithmeticSupport{ + Add: true, + Subtract: true, + Multiply: true, + Divide: true, + }) +var Int16TypeAnnotation = NewTypeAnnotation(Int16Type) + +// Int32Type represents the 32-bit signed integer type `Int32` +var Int32Type = NewNumericType(Int32TypeName). + WithTag(Int32TypeTag). + WithIntRange(Int32TypeMinInt, Int32TypeMaxInt). + WithByteSize(4). + WithSaturatingFunctions(SaturatingArithmeticSupport{ + Add: true, + Subtract: true, + Multiply: true, + Divide: true, + }) +var Int32TypeAnnotation = NewTypeAnnotation(Int32Type) + +// Int64Type represents the 64-bit signed integer type `Int64` +var Int64Type = NewNumericType(Int64TypeName). + WithTag(Int64TypeTag). + WithIntRange(Int64TypeMinInt, Int64TypeMaxInt). + WithByteSize(8). + WithSaturatingFunctions(SaturatingArithmeticSupport{ + Add: true, + Subtract: true, + Multiply: true, + Divide: true, + }) +var Int64TypeAnnotation = NewTypeAnnotation(Int64Type) + +// Int128Type represents the 128-bit signed integer type `Int128` +var Int128Type = NewNumericType(Int128TypeName). + WithTag(Int128TypeTag). + WithIntRange(Int128TypeMinIntBig, Int128TypeMaxIntBig). + WithByteSize(16). + WithSaturatingFunctions(SaturatingArithmeticSupport{ + Add: true, + Subtract: true, + Multiply: true, + Divide: true, + }) +var Int128TypeAnnotation = NewTypeAnnotation(Int128Type) + +// Int256Type represents the 256-bit signed integer type `Int256` +var Int256Type = NewNumericType(Int256TypeName). + WithTag(Int256TypeTag). + WithIntRange(Int256TypeMinIntBig, Int256TypeMaxIntBig). + WithByteSize(32). + WithSaturatingFunctions(SaturatingArithmeticSupport{ + Add: true, + Subtract: true, + Multiply: true, + Divide: true, + }) +var Int256TypeAnnotation = NewTypeAnnotation(Int256Type) + +// UIntType represents the arbitrary-precision unsigned integer type `UInt` +var UIntType = NewNumericType(UIntTypeName). + WithTag(UIntTypeTag). + WithIntRange(UIntTypeMin, nil). + WithSaturatingFunctions(SaturatingArithmeticSupport{ + Subtract: true, + }) +var UIntTypeAnnotation = NewTypeAnnotation(UIntType) + +// UInt8Type represents the 8-bit unsigned integer type `UInt8` +// which checks for overflow and underflow +var UInt8Type = NewNumericType(UInt8TypeName). + WithTag(UInt8TypeTag). + WithIntRange(UInt8TypeMinInt, UInt8TypeMaxInt). + WithByteSize(1). + WithSaturatingFunctions(SaturatingArithmeticSupport{ + Add: true, + Subtract: true, + Multiply: true, + }) +var UInt8TypeAnnotation = NewTypeAnnotation(UInt8Type) + +// UInt16Type represents the 16-bit unsigned integer type `UInt16` +// which checks for overflow and underflow +var UInt16Type = NewNumericType(UInt16TypeName). + WithTag(UInt16TypeTag). + WithIntRange(UInt16TypeMinInt, UInt16TypeMaxInt). + WithByteSize(2). + WithSaturatingFunctions(SaturatingArithmeticSupport{ + Add: true, + Subtract: true, + Multiply: true, + }) +var UInt16TypeAnnotation = NewTypeAnnotation(UInt16Type) + +// UInt32Type represents the 32-bit unsigned integer type `UInt32` +// which checks for overflow and underflow +var UInt32Type = NewNumericType(UInt32TypeName). + WithTag(UInt32TypeTag). + WithIntRange(UInt32TypeMinInt, UInt32TypeMaxInt). + WithByteSize(4). + WithSaturatingFunctions(SaturatingArithmeticSupport{ + Add: true, + Subtract: true, + Multiply: true, + }) +var UInt32TypeAnnotation = NewTypeAnnotation(UInt32Type) + +// UInt64Type represents the 64-bit unsigned integer type `UInt64` +// which checks for overflow and underflow +var UInt64Type = NewNumericType(UInt64TypeName). + WithTag(UInt64TypeTag). + WithIntRange(UInt64TypeMinInt, UInt64TypeMaxInt). + WithByteSize(8). + WithSaturatingFunctions(SaturatingArithmeticSupport{ + Add: true, + Subtract: true, + Multiply: true, + }) +var UInt64TypeAnnotation = NewTypeAnnotation(UInt64Type) + +// UInt128Type represents the 128-bit unsigned integer type `UInt128` +// which checks for overflow and underflow +var UInt128Type = NewNumericType(UInt128TypeName). + WithTag(UInt128TypeTag). + WithIntRange(UInt128TypeMinIntBig, UInt128TypeMaxIntBig). + WithByteSize(16). + WithSaturatingFunctions(SaturatingArithmeticSupport{ + Add: true, + Subtract: true, + Multiply: true, + }) +var UInt128TypeAnnotation = NewTypeAnnotation(UInt128Type) + +// UInt256Type represents the 256-bit unsigned integer type `UInt256` +// which checks for overflow and underflow +var UInt256Type = NewNumericType(UInt256TypeName). + WithTag(UInt256TypeTag). + WithIntRange(UInt256TypeMinIntBig, UInt256TypeMaxIntBig). + WithByteSize(32). + WithSaturatingFunctions(SaturatingArithmeticSupport{ + Add: true, + Subtract: true, + Multiply: true, + }) +var UInt256TypeAnnotation = NewTypeAnnotation(UInt256Type) + +// Word8Type represents the 8-bit unsigned integer type `Word8` +// which does NOT check for overflow and underflow +var Word8Type = NewNumericType(Word8TypeName). + WithTag(Word8TypeTag). + WithByteSize(1). + WithIntRange(Word8TypeMinInt, Word8TypeMaxInt) +var Word8TypeAnnotation = NewTypeAnnotation(Word8Type) + +// Word16Type represents the 16-bit unsigned integer type `Word16` +// which does NOT check for overflow and underflow +var Word16Type = NewNumericType(Word16TypeName). + WithTag(Word16TypeTag). + WithByteSize(2). + WithIntRange(Word16TypeMinInt, Word16TypeMaxInt) +var Word16TypeAnnotation = NewTypeAnnotation(Word16Type) + +// Word32Type represents the 32-bit unsigned integer type `Word32` +// which does NOT check for overflow and underflow +var Word32Type = NewNumericType(Word32TypeName). + WithTag(Word32TypeTag). + WithByteSize(4). + WithIntRange(Word32TypeMinInt, Word32TypeMaxInt) +var Word32TypeAnnotation = NewTypeAnnotation(Word32Type) + +// Word64Type represents the 64-bit unsigned integer type `Word64` +// which does NOT check for overflow and underflow +var Word64Type = NewNumericType(Word64TypeName). + WithTag(Word64TypeTag). + WithByteSize(8). + WithIntRange(Word64TypeMinInt, Word64TypeMaxInt) +var Word64TypeAnnotation = NewTypeAnnotation(Word64Type) + +// Word128Type represents the 128-bit unsigned integer type `Word128` +// which does NOT check for overflow and underflow +var Word128Type = NewNumericType(Word128TypeName). + WithTag(Word128TypeTag). + WithByteSize(16). + WithIntRange(Word128TypeMinIntBig, Word128TypeMaxIntBig) +var Word128TypeAnnotation = NewTypeAnnotation(Word128Type) + +// Word256Type represents the 256-bit unsigned integer type `Word256` +// which does NOT check for overflow and underflow +var Word256Type = NewNumericType(Word256TypeName). + WithTag(Word256TypeTag). + WithByteSize(32). + WithIntRange(Word256TypeMinIntBig, Word256TypeMaxIntBig) +var Word256TypeAnnotation = NewTypeAnnotation(Word256Type) + +// FixedPointType represents the super-type of all fixed-point types +var FixedPointType = NewNumericType(FixedPointTypeName). + WithTag(FixedPointTypeTag). + AsSuperType() +var FixedPointTypeAnnotation = NewTypeAnnotation(FixedPointType) + +// SignedFixedPointType represents the super-type of all signed fixed-point types +var SignedFixedPointType = NewNumericType(SignedFixedPointTypeName). + WithTag(SignedFixedPointTypeTag). + AsSuperType() +var SignedFixedPointTypeAnnotation = NewTypeAnnotation(SignedFixedPointType) + +// Fix64Type represents the 64-bit signed decimal fixed-point type `Fix64` +// which has a scale of Fix64Scale, and checks for overflow and underflow +var Fix64Type = NewFixedPointNumericType(Fix64TypeName). + WithTag(Fix64TypeTag). + WithIntRange(Fix64TypeMinIntBig, Fix64TypeMaxIntBig). + WithFractionalRange(Fix64TypeMinFractionalBig, Fix64TypeMaxFractionalBig). + WithScale(Fix64Scale). + WithSaturatingFunctions(SaturatingArithmeticSupport{ + Add: true, + Subtract: true, + Multiply: true, + Divide: true, + }) +var Fix64TypeAnnotation = NewTypeAnnotation(Fix64Type) + +// UFix64Type represents the 64-bit unsigned decimal fixed-point type `UFix64` +// which has a scale of 1E9, and checks for overflow and underflow +var UFix64Type = NewFixedPointNumericType(UFix64TypeName). + WithTag(UFix64TypeTag). + WithIntRange(UFix64TypeMinIntBig, UFix64TypeMaxIntBig). + WithFractionalRange(UFix64TypeMinFractionalBig, UFix64TypeMaxFractionalBig). + WithScale(Fix64Scale). + WithSaturatingFunctions(SaturatingArithmeticSupport{ + Add: true, + Subtract: true, + Multiply: true, + }) +var UFix64TypeAnnotation = NewTypeAnnotation(UFix64Type) // Numeric type ranges var ( @@ -8457,7 +8426,7 @@ func (t *IntersectionType) Map(gauge common.MemoryGauge, typeParamMap map[*TypeP if mappedType, isInterface := mapped.(*InterfaceType); isInterface { intersectionTypes = append(intersectionTypes, mappedType) } else { - panic(errors.NewUnexpectedError(fmt.Sprintf("intersection mapped to non-interface type %T", mapped))) + panic(errors.NewUnexpectedError("intersection mapped to non-interface type %T", mapped)) } } } diff --git a/stdlib/account.go b/stdlib/account.go index e9f323cc78..1919f67328 100644 --- a/stdlib/account.go +++ b/stdlib/account.go @@ -938,8 +938,6 @@ func newAccountKeysRevokeFunction( } } -const InboxStorageDomain = "inbox" - func newAccountInboxPublishFunction( inter *interpreter.Interpreter, handler EventEmitter, @@ -996,7 +994,7 @@ func newAccountInboxPublishFunction( inter.WriteStored( provider, - InboxStorageDomain, + common.StorageDomainInbox, storageMapKey, publishedValue, ) @@ -1029,7 +1027,7 @@ func newAccountInboxUnpublishFunction( storageMapKey := interpreter.StringStorageMapKey(nameValue.Str) - readValue := inter.ReadStored(provider, InboxStorageDomain, storageMapKey) + readValue := inter.ReadStored(provider, common.StorageDomainInbox, storageMapKey) if readValue == nil { return interpreter.Nil } @@ -1065,7 +1063,7 @@ func newAccountInboxUnpublishFunction( inter.WriteStored( provider, - InboxStorageDomain, + common.StorageDomainInbox, storageMapKey, nil, ) @@ -1114,7 +1112,7 @@ func newAccountInboxClaimFunction( storageMapKey := interpreter.StringStorageMapKey(nameValue.Str) - readValue := inter.ReadStored(providerAddress, InboxStorageDomain, storageMapKey) + readValue := inter.ReadStored(providerAddress, common.StorageDomainInbox, storageMapKey) if readValue == nil { return interpreter.Nil } @@ -1155,7 +1153,7 @@ func newAccountInboxClaimFunction( inter.WriteStored( providerAddress, - InboxStorageDomain, + common.StorageDomainInbox, storageMapKey, nil, ) @@ -2983,10 +2981,6 @@ func IssueAccountCapabilityController( return capabilityIDValue } -// CapabilityControllerStorageDomain is the storage domain which stores -// capability controllers by capability ID -const CapabilityControllerStorageDomain = "cap_con" - // storeCapabilityController stores a capability controller in the account's capability ID to controller storage map func storeCapabilityController( inter *interpreter.Interpreter, @@ -2998,7 +2992,7 @@ func storeCapabilityController( existed := inter.WriteStored( address, - CapabilityControllerStorageDomain, + common.StorageDomainCapabilityController, storageMapKey, controller, ) @@ -3017,7 +3011,7 @@ func removeCapabilityController( existed := inter.WriteStored( address, - CapabilityControllerStorageDomain, + common.StorageDomainCapabilityController, storageMapKey, nil, ) @@ -3045,7 +3039,7 @@ func getCapabilityController( readValue := inter.ReadStored( address, - CapabilityControllerStorageDomain, + common.StorageDomainCapabilityController, storageMapKey, ) if readValue == nil { @@ -3225,10 +3219,6 @@ var capabilityIDSetStaticType = &interpreter.DictionaryStaticType{ ValueType: interpreter.NilStaticType, } -// PathCapabilityStorageDomain is the storage domain which stores -// capability ID dictionaries (sets) by storage path identifier -const PathCapabilityStorageDomain = "path_cap" - func recordStorageCapabilityController( inter *interpreter.Interpreter, locationRange interpreter.LocationRange, @@ -3252,9 +3242,10 @@ func recordStorageCapabilityController( storageMapKey := interpreter.StringStorageMapKey(identifier) - storageMap := inter.Storage().GetStorageMap( + storageMap := inter.Storage().GetDomainStorageMap( + inter, address, - PathCapabilityStorageDomain, + common.StorageDomainPathCapability, true, ) @@ -3294,9 +3285,10 @@ func getPathCapabilityIDSet( storageMapKey := interpreter.StringStorageMapKey(identifier) - storageMap := inter.Storage().GetStorageMap( + storageMap := inter.Storage().GetDomainStorageMap( + inter, address, - PathCapabilityStorageDomain, + common.StorageDomainPathCapability, false, ) if storageMap == nil { @@ -3344,9 +3336,10 @@ func unrecordStorageCapabilityController( // Remove capability set if empty if capabilityIDSet.Count() == 0 { - storageMap := inter.Storage().GetStorageMap( + storageMap := inter.Storage().GetDomainStorageMap( + inter, address, - PathCapabilityStorageDomain, + common.StorageDomainPathCapability, true, ) if storageMap == nil { @@ -3397,10 +3390,6 @@ func getStorageCapabilityControllerIDsIterator( return } -// AccountCapabilityStorageDomain is the storage domain which -// records active account capability controller IDs -const AccountCapabilityStorageDomain = "acc_cap" - func recordAccountCapabilityController( inter *interpreter.Interpreter, locationRange interpreter.LocationRange, @@ -3416,9 +3405,10 @@ func recordAccountCapabilityController( storageMapKey := interpreter.Uint64StorageMapKey(capabilityIDValue) - storageMap := inter.Storage().GetStorageMap( + storageMap := inter.Storage().GetDomainStorageMap( + inter, address, - AccountCapabilityStorageDomain, + common.StorageDomainAccountCapability, true, ) @@ -3443,9 +3433,10 @@ func unrecordAccountCapabilityController( storageMapKey := interpreter.Uint64StorageMapKey(capabilityIDValue) - storageMap := inter.Storage().GetStorageMap( + storageMap := inter.Storage().GetDomainStorageMap( + inter, address, - AccountCapabilityStorageDomain, + common.StorageDomainAccountCapability, true, ) @@ -3462,9 +3453,10 @@ func getAccountCapabilityControllerIDsIterator( nextCapabilityID func() (uint64, bool), count uint64, ) { - storageMap := inter.Storage().GetStorageMap( + storageMap := inter.Storage().GetDomainStorageMap( + inter, address, - AccountCapabilityStorageDomain, + common.StorageDomainAccountCapability, false, ) if storageMap == nil { @@ -3531,7 +3523,7 @@ func newAccountCapabilitiesPublishFunction( panic(errors.NewUnreachableError()) } - domain := pathValue.Domain.Identifier() + domain := pathValue.Domain.StorageDomain() identifier := pathValue.Identifier capabilityType, ok := capabilityValue.StaticType(inter).(*interpreter.CapabilityStaticType) @@ -3650,7 +3642,7 @@ func newAccountCapabilitiesUnpublishFunction( panic(errors.NewUnreachableError()) } - domain := pathValue.Domain.Identifier() + domain := pathValue.Domain.StorageDomain() identifier := pathValue.Identifier // Read/remove capability @@ -3924,7 +3916,7 @@ func newAccountCapabilitiesGetFunction( panic(errors.NewUnreachableError()) } - domain := pathValue.Domain.Identifier() + domain := pathValue.Domain.StorageDomain() identifier := pathValue.Identifier // Get borrow type type argument @@ -4109,7 +4101,7 @@ func newAccountCapabilitiesExistsFunction( panic(errors.NewUnreachableError()) } - domain := pathValue.Domain.Identifier() + domain := pathValue.Domain.StorageDomain() identifier := pathValue.Identifier // Read stored capability, if any @@ -4427,10 +4419,6 @@ func newAccountCapabilityControllerDeleteFunction( } } -// CapabilityControllerTagStorageDomain is the storage domain which stores -// capability controller tags by capability ID -const CapabilityControllerTagStorageDomain = "cap_tag" - func getCapabilityControllerTag( inter *interpreter.Interpreter, address common.Address, @@ -4439,7 +4427,7 @@ func getCapabilityControllerTag( value := inter.ReadStored( address, - CapabilityControllerTagStorageDomain, + common.StorageDomainCapabilityControllerTag, interpreter.Uint64StorageMapKey(capabilityID), ) if value == nil { @@ -4501,7 +4489,7 @@ func SetCapabilityControllerTag( inter.WriteStored( address, - CapabilityControllerTagStorageDomain, + common.StorageDomainCapabilityControllerTag, interpreter.Uint64StorageMapKey(capabilityID), value, ) diff --git a/stdlib/account_test.go b/stdlib/account_test.go index 2429663cc2..2218655c86 100644 --- a/stdlib/account_test.go +++ b/stdlib/account_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/onflow/cadence/common" "github.com/onflow/cadence/sema" . "github.com/onflow/cadence/test_utils/common_utils" ) @@ -33,11 +34,11 @@ func TestSemaCheckPathLiteralForInternalStorageDomains(t *testing.T) { t.Parallel() internalStorageDomains := []string{ - InboxStorageDomain, - AccountCapabilityStorageDomain, - CapabilityControllerStorageDomain, - PathCapabilityStorageDomain, - CapabilityControllerTagStorageDomain, + common.StorageDomainInbox.Identifier(), + common.StorageDomainAccountCapability.Identifier(), + common.StorageDomainCapabilityController.Identifier(), + common.StorageDomainPathCapability.Identifier(), + common.StorageDomainCapabilityControllerTag.Identifier(), } test := func(domain string) { diff --git a/stdlib/test_contract.go b/stdlib/test_contract.go index cc5bdb5492..c7d447b1ae 100644 --- a/stdlib/test_contract.go +++ b/stdlib/test_contract.go @@ -927,13 +927,10 @@ func newTestTypeExpectFailureFunction( if !failedAsExpected { panic(internalErr) } else if !strings.Contains(internalErr.Error(), errorMessage.Str) { - msg := fmt.Sprintf( - "Expected error message to include: %s.", + panic(errors.NewDefaultUserError( + "Expected error message to include: %s", errorMessage, - ) - panic( - errors.NewDefaultUserError(msg), - ) + )) } }) diff --git a/stdlib/test_test.go b/stdlib/test_test.go index 9e602af451..ee245b295d 100644 --- a/stdlib/test_test.go +++ b/stdlib/test_test.go @@ -2138,7 +2138,7 @@ func TestTestExpectFailure(t *testing.T) { assert.ErrorContains( t, err, - "Expected error message to include: \"what is wrong?\".", + "Expected error message to include: \"what is wrong?\"", ) }) diff --git a/test_utils/interpreter_utils/interpreter.go b/test_utils/interpreter_utils/interpreter.go index 46a8182023..e92a5264dc 100644 --- a/test_utils/interpreter_utils/interpreter.go +++ b/test_utils/interpreter_utils/interpreter.go @@ -29,14 +29,26 @@ import ( func NewTestInterpreter(tb testing.TB) *interpreter.Interpreter { storage := NewUnmeteredInMemoryStorage() + return NewTestInterpreterWithStorage(tb, storage) +} + +func NewTestInterpreterWithStorage(tb testing.TB, storage interpreter.Storage) *interpreter.Interpreter { + return NewTestInterpreterWithStorageAndAtreeValidationConfig(tb, storage, true, true) +} +func NewTestInterpreterWithStorageAndAtreeValidationConfig( + tb testing.TB, + storage interpreter.Storage, + atreeValueValidationEnabled bool, + atreeStorageValidationEnabled bool, +) *interpreter.Interpreter { inter, err := interpreter.NewInterpreter( nil, TestLocation, &interpreter.Config{ Storage: storage, - AtreeValueValidationEnabled: true, - AtreeStorageValidationEnabled: true, + AtreeValueValidationEnabled: atreeValueValidationEnabled, + AtreeStorageValidationEnabled: atreeStorageValidationEnabled, }, ) require.NoError(tb, err) diff --git a/compiler/wasm/wasm2wat.go b/test_utils/runtime_utils/storage.go similarity index 54% rename from compiler/wasm/wasm2wat.go rename to test_utils/runtime_utils/storage.go index 517cb217f4..0c971d8c5d 100644 --- a/compiler/wasm/wasm2wat.go +++ b/test_utils/runtime_utils/storage.go @@ -16,41 +16,27 @@ * limitations under the License. */ -package wasm +package runtime_utils import ( - "fmt" - "os" - "os/exec" -) - -func WASM2WAT(binary []byte) string { - f, err := os.CreateTemp("", "wasm") - if err != nil { - panic(err) - } + "testing" - defer os.Remove(f.Name()) + "github.com/onflow/atree" + "github.com/stretchr/testify/require" +) - _, err = f.Write(binary) - if err != nil { - panic(err) - } +func CheckAtreeStorageHealth(tb testing.TB, storage atree.SlabStorage, expectedRootSlabIDs []atree.SlabID) { + rootSlabIDs, err := atree.CheckStorageHealth(storage, -1) + require.NoError(tb, err) - err = f.Close() - if err != nil { - panic(err) - } + nonTempRootSlabIDs := make([]atree.SlabID, 0, len(rootSlabIDs)) - cmd := exec.Command("wasm2wat", f.Name()) - out, err := cmd.Output() - if err != nil { - if ee, ok := err.(*exec.ExitError); ok { - panic(fmt.Errorf("wasm2wat failed: %w:\n%s", err, ee.Stderr)) - } else { - panic(fmt.Errorf("wasm2wat failed: %w", err)) + for rootSlabID := range rootSlabIDs { //nolint:maprange + if rootSlabID.HasTempAddress() { + continue } + nonTempRootSlabIDs = append(nonTempRootSlabIDs, rootSlabID) } - return string(out) + require.ElementsMatch(tb, nonTempRootSlabIDs, expectedRootSlabIDs) } diff --git a/test_utils/runtime_utils/testledger.go b/test_utils/runtime_utils/testledger.go index 4d4c846172..ef77d134f8 100644 --- a/test_utils/runtime_utils/testledger.go +++ b/test_utils/runtime_utils/testledger.go @@ -31,6 +31,7 @@ import ( type TestLedger struct { StoredValues map[string][]byte + StorageIndices map[string]uint64 OnValueExists func(owner, key []byte) (exists bool, err error) OnGetValue func(owner, key []byte) (value []byte, err error) OnSetValue func(owner, key, value []byte) (err error) @@ -92,9 +93,30 @@ func (s TestLedger) Dump() { } } +type LedgerOnRead func(owner, key, value []byte) +type LedgerOnWrite func(owner, key, value []byte) + +type OwnerKeyValue struct { + Owner, Key, Value []byte +} + +var LedgerOnWriteCounter = func(counter *int) LedgerOnWrite { + return func(_, _, _ []byte) { + (*counter)++ + } +} + +var LedgerOnWriteEntries = func(entries *[]OwnerKeyValue) LedgerOnWrite { + return func(owner, key, value []byte) { + *entries = append( + *entries, + OwnerKeyValue{Owner: owner, Key: key, Value: value}) + } +} + func NewTestLedger( - onRead func(owner, key, value []byte), - onWrite func(owner, key, value []byte), + onRead LedgerOnRead, + onWrite LedgerOnWrite, ) TestLedger { storedValues := map[string][]byte{} @@ -102,7 +124,8 @@ func NewTestLedger( storageIndices := map[string]uint64{} return TestLedger{ - StoredValues: storedValues, + StoredValues: storedValues, + StorageIndices: storageIndices, OnValueExists: func(owner, key []byte) (bool, error) { value := storedValues[TestStorageKey(string(owner), string(key))] return len(value) > 0, nil @@ -142,7 +165,8 @@ func NewTestLedgerWithData( } return TestLedger{ - StoredValues: storedValues, + StoredValues: storedValues, + StorageIndices: storageIndices, OnValueExists: func(owner, key []byte) (bool, error) { value := storedValues[storageKey(string(owner), string(key))] return len(value) > 0, nil diff --git a/tools/compatibility-check/go.mod b/tools/compatibility-check/go.mod index 628bfdc184..e4755a3d03 100644 --- a/tools/compatibility-check/go.mod +++ b/tools/compatibility-check/go.mod @@ -1,13 +1,13 @@ module github.com/onflow/cadence/tools/compatibility_check -go 1.22 +go 1.23 require ( github.com/onflow/cadence v1.1.1-0.20241018202510-7f1b6fbc57c2 github.com/onflow/flow-core-contracts/lib/go/contracts v1.4.0 github.com/onflow/flow-go v0.38.0-preview.0.0.20241018215103-774056466e36 github.com/rs/zerolog v1.29.0 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 ) require ( @@ -43,7 +43,7 @@ require ( github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onflow/atree v0.8.0 // indirect + github.com/onflow/atree v0.9.0 // indirect github.com/onflow/crypto v0.25.2 // indirect github.com/onflow/flow-core-contracts/lib/go/templates v1.3.3-0.20241017220455-79fdc6c8ba53 // indirect github.com/onflow/flow-ft/lib/go/contracts v1.0.1 // indirect @@ -73,7 +73,7 @@ require ( github.com/vmihailenco/msgpack/v4 v4.3.11 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/zeebo/blake3 v0.2.3 // indirect + github.com/zeebo/blake3 v0.2.4 // indirect go.opentelemetry.io/otel v1.24.0 // indirect golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect diff --git a/tools/compatibility-check/go.sum b/tools/compatibility-check/go.sum index 3c453fa707..79fce2ea6e 100644 --- a/tools/compatibility-check/go.sum +++ b/tools/compatibility-check/go.sum @@ -278,7 +278,6 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -343,8 +342,8 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onflow/atree v0.8.0 h1:qg5c6J1gVDNObughpEeWm8oxqhPGdEyGrda121GM4u0= -github.com/onflow/atree v0.8.0/go.mod h1:yccR+LR7xc1Jdic0mrjocbHvUD7lnVvg8/Ct1AA5zBo= +github.com/onflow/atree v0.9.0 h1:M+Z/UPwzv0/Yy7ChI5T1ZIHD3YN1cs/hxGEs/HWhzaY= +github.com/onflow/atree v0.9.0/go.mod h1:FT6udJF9Q7VQTu3wknDhFX+VV4D44ZGdqtTAE5iztck= github.com/onflow/crypto v0.25.2 h1:GjHunqVt+vPcdqhxxhAXiMIF3YiLX7gTuTR5O+VG2ns= github.com/onflow/crypto v0.25.2/go.mod h1:fY7eLqUdMKV8EGOw301unP8h7PvLVy8/6gVR++/g0BY= github.com/onflow/flow-core-contracts/lib/go/contracts v1.4.0 h1:R86HaOuk6vpuECZnriEUE7bw9inC2AtdSn8lL/iwQLQ= @@ -454,8 +453,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c h1:HelZ2kAFadG0La9d+4htN4HzQ68Bm2iM9qKMSMES6xg= @@ -480,11 +479,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= -github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= +github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= +github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= diff --git a/tools/constructorcheck/go.mod b/tools/constructorcheck/go.mod index 57adcbf9a7..66b1728614 100644 --- a/tools/constructorcheck/go.mod +++ b/tools/constructorcheck/go.mod @@ -1,10 +1,10 @@ module github.com/onflow/cadence/tools/constructorcheck -go 1.22 +go 1.23 -require golang.org/x/tools v0.21.0 +require golang.org/x/tools v0.28.0 require ( - golang.org/x/mod v0.17.0 // indirect - golang.org/x/sync v0.7.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/sync v0.10.0 // indirect ) diff --git a/tools/constructorcheck/go.sum b/tools/constructorcheck/go.sum index 05680b3128..e3144c2c5f 100644 --- a/tools/constructorcheck/go.sum +++ b/tools/constructorcheck/go.sum @@ -1,6 +1,8 @@ -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= -golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= diff --git a/tools/get-contracts/go.mod b/tools/get-contracts/go.mod index 94a8db16a0..11faddc78e 100644 --- a/tools/get-contracts/go.mod +++ b/tools/get-contracts/go.mod @@ -1,6 +1,6 @@ module github.com/onflow/cadence/tools/get-contracts -go 1.22 +go 1.23 require github.com/hasura/go-graphql-client v0.10.2 diff --git a/tools/get-contracts/main.go b/tools/get-contracts/main.go index 3519d24f27..7ee944ed10 100644 --- a/tools/get-contracts/main.go +++ b/tools/get-contracts/main.go @@ -21,16 +21,13 @@ package main import ( - "context" + "encoding/base64" "encoding/csv" "flag" - "fmt" "log" "net/http" "os" "sort" - - "github.com/hasura/go-graphql-client" ) type chainID string @@ -41,17 +38,15 @@ const ( ) var chainFlag = flag.String("chain", "", "mainnet or testnet") -var apiKeyFlag = flag.String("apiKey", "", "Flowdiver API key") -var batchFlag = flag.Int("batch", 500, "batch size") -var csvHeader = []string{"location", "code"} +const authFlagUsage = "find.xyz API auth (username:password)" -func main() { - flag.Parse() +var authFlag = flag.String("auth", "", authFlagUsage) - // Get batch size from flags +var resultCSVHeader = []string{"location", "code"} - batchSize := *batchFlag +func main() { + flag.Parse() // Get chain ID from flags @@ -65,11 +60,11 @@ func main() { log.Fatalf("invalid chain: %s", chain) } - // Get API key from flags + // Get auth from flags - apiKey := *apiKeyFlag - if apiKey == "" { - log.Fatal("missing Flowdiver API key") + auth := *authFlag + if auth == "" { + log.Fatal("missing " + authFlagUsage) } // Get contracts from network @@ -77,71 +72,37 @@ func main() { var apiURL string switch chain { case mainnet: - apiURL = "https://api.findlabs.io/hasura/v1/graphql" + apiURL = "https://api.find.xyz" case testnet: - apiURL = "https://api.findlabs.io/hasura_testnet/v1/graphql" + apiURL = "https://api.test-find.xyz" } - client := graphql.NewClient(apiURL, nil). - WithRequestModifier(func(r *http.Request) { - r.Header.Set("Authorization", fmt.Sprintf("Bearer %s", apiKey)) - // NOTE: important, default is forbidden by API's bot prevention - // (https://github.com/Kong/kong/blob/master/kong/plugins/bot-detection/rules.lua) - r.Header.Set("User-Agent", "Flow Foundation Cadence Tool") - }) - - var total, offset int - var contracts [][]string - - for { - - log.Printf("fetching contracts %d-%d", offset, offset+batchSize) - - var req struct { - ContractsAggregate struct { - Aggregate struct { - Count int - } - } `graphql:"contracts_aggregate(where: {valid_to: {_is_null: true}})"` - Contracts []struct { - Identifier string - Body string - } `graphql:"contracts(where: {valid_to: {_is_null: true}}, limit: $limit, offset: $offset)"` - } - - if err := client.Query( - context.Background(), - &req, - map[string]any{ - "offset": offset, - "limit": batchSize, - }, - ); err != nil { - log.Fatalf("failed to query: %s", err) - } + apiURL += "/bulk/v1/contract?valid_only=true" - total = req.ContractsAggregate.Aggregate.Count + req, err := http.NewRequest("GET", apiURL, nil) + if err != nil { + log.Fatalf("failed to create HTTP request: %s", err) + } - if contracts == nil { - contracts = make([][]string, 0, total) - } + req.Header.Set("Accept", "text/csv") + req.Header.Add("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(auth))) - for _, contract := range req.Contracts { - contracts = append( - contracts, []string{ - contract.Identifier, - contract.Body, - }, - ) - } + res, err := http.DefaultClient.Do(req) + if err != nil { + log.Fatalf("failed to send HTTP request: %s", err) + } - offset += batchSize + reader := csv.NewReader(res.Body) + reader.FieldsPerRecord = -1 - if offset >= total { - break - } + contracts, err := reader.ReadAll() + if err != nil { + log.Fatalf("failed to read CSV: %s", err) } + // Skip header + contracts = contracts[1:] + // Sort sort.Slice( @@ -155,12 +116,17 @@ func main() { writer := csv.NewWriter(os.Stdout) - if err := writer.Write(csvHeader); err != nil { + if err := writer.Write(resultCSVHeader); err != nil { log.Fatalf("failed to write CSV header: %s", err) return } for _, contract := range contracts { + identifier := contract[0] + if identifier == "A." || identifier == "null" { + continue + } + err := writer.Write(contract) if err != nil { log.Fatalf("failed to write contract to CSV: %s", err) diff --git a/tools/golangci-lint/go.mod b/tools/golangci-lint/go.mod index ce7784304a..aaaf858ba5 100644 --- a/tools/golangci-lint/go.mod +++ b/tools/golangci-lint/go.mod @@ -1,55 +1,56 @@ module github.com/onflow/cadence/tools/golangci-lint -go 1.22 +go 1.23 -require github.com/golangci/golangci-lint v1.59.0 +require github.com/golangci/golangci-lint v1.63.4 require ( 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect 4d63.com/gochecknoglobals v0.2.1 // indirect - github.com/4meepo/tagalign v1.3.4 // indirect - github.com/Abirdcfly/dupword v0.0.14 // indirect - github.com/Antonboom/errname v0.1.13 // indirect - github.com/Antonboom/nilnil v0.1.9 // indirect - github.com/Antonboom/testifylint v1.3.0 // indirect - github.com/BurntSushi/toml v1.4.0 // indirect - github.com/Crocmagnon/fatcontext v0.2.2 // indirect + github.com/4meepo/tagalign v1.4.1 // indirect + github.com/Abirdcfly/dupword v0.1.3 // indirect + github.com/Antonboom/errname v1.0.0 // indirect + github.com/Antonboom/nilnil v1.0.1 // indirect + github.com/Antonboom/testifylint v1.5.2 // indirect + github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect + github.com/Crocmagnon/fatcontext v0.5.3 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect - github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 // indirect - github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 // indirect + github.com/Masterminds/semver/v3 v3.3.0 // indirect github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect - github.com/alecthomas/go-check-sumtype v0.1.4 // indirect - github.com/alexkohler/nakedret/v2 v2.0.4 // indirect + github.com/alecthomas/go-check-sumtype v0.3.1 // indirect + github.com/alexkohler/nakedret/v2 v2.0.5 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect + github.com/alingse/nilnesserr v0.1.1 // indirect github.com/ashanbrown/forbidigo v1.6.0 // indirect - github.com/ashanbrown/makezero v1.1.1 // indirect + github.com/ashanbrown/makezero v1.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bkielbasa/cyclop v1.2.1 // indirect + github.com/bkielbasa/cyclop v1.2.3 // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect - github.com/bombsimon/wsl/v4 v4.2.1 // indirect - github.com/breml/bidichk v0.2.7 // indirect - github.com/breml/errchkjson v0.3.6 // indirect - github.com/butuzov/ireturn v0.3.0 // indirect - github.com/butuzov/mirror v1.2.0 // indirect + github.com/bombsimon/wsl/v4 v4.5.0 // indirect + github.com/breml/bidichk v0.3.2 // indirect + github.com/breml/errchkjson v0.4.0 // indirect + github.com/butuzov/ireturn v0.3.1 // indirect + github.com/butuzov/mirror v1.3.0 // indirect github.com/catenacyber/perfsprint v0.7.1 // indirect github.com/ccojocar/zxcvbn-go v1.0.2 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/charithe/durationcheck v0.0.10 // indirect github.com/chavacava/garif v0.1.0 // indirect - github.com/ckaznocha/intrange v0.1.2 // indirect - github.com/curioswitch/go-reassign v0.2.0 // indirect - github.com/daixiang0/gci v0.13.4 // indirect + github.com/ckaznocha/intrange v0.3.0 // indirect + github.com/curioswitch/go-reassign v0.3.0 // indirect + github.com/daixiang0/gci v0.13.5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect github.com/ettle/strcase v0.2.0 // indirect - github.com/fatih/color v1.17.0 // indirect + github.com/fatih/color v1.18.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/firefart/nonamedreturns v1.0.5 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect - github.com/ghostiam/protogetter v0.3.6 // indirect - github.com/go-critic/go-critic v0.11.4 // indirect + github.com/ghostiam/protogetter v0.3.8 // indirect + github.com/go-critic/go-critic v0.11.5 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect github.com/go-toolsmith/astequal v1.2.0 // indirect @@ -57,15 +58,15 @@ require ( github.com/go-toolsmith/astp v1.1.0 // indirect github.com/go-toolsmith/strparse v1.1.0 // indirect github.com/go-toolsmith/typep v1.1.0 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect - github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/flock v0.12.1 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect - github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e // indirect - github.com/golangci/misspell v0.5.1 // indirect - github.com/golangci/modinfo v0.3.4 // indirect + github.com/golangci/go-printf-func-name v0.1.0 // indirect + github.com/golangci/gofmt v0.0.0-20241223200906-057b0627d9b9 // indirect + github.com/golangci/misspell v0.6.0 // indirect github.com/golangci/plugin-module-register v0.1.1 // indirect github.com/golangci/revgrep v0.5.3 // indirect github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect @@ -75,26 +76,29 @@ require ( github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jgautheron/goconst v1.7.1 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect - github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect - github.com/jjti/go-spancheck v0.6.1 // indirect - github.com/julz/importas v0.1.0 // indirect + github.com/jjti/go-spancheck v0.6.4 // indirect + github.com/julz/importas v0.2.0 // indirect github.com/karamaru-alpha/copyloopvar v1.1.0 // indirect - github.com/kisielk/errcheck v1.7.0 // indirect + github.com/kisielk/errcheck v1.8.0 // indirect github.com/kkHAIKE/contextcheck v1.1.5 // indirect github.com/kulti/thelper v0.6.3 // indirect github.com/kunwardeep/paralleltest v1.0.10 // indirect github.com/kyoh86/exportloopref v0.1.11 // indirect - github.com/lasiar/canonicalheader v1.1.1 // indirect - github.com/ldez/gomoddirectives v0.2.4 // indirect - github.com/ldez/tagliatelle v0.5.0 // indirect + github.com/lasiar/canonicalheader v1.1.2 // indirect + github.com/ldez/exptostd v0.3.1 // indirect + github.com/ldez/gomoddirectives v0.6.0 // indirect + github.com/ldez/grignotin v0.7.0 // indirect + github.com/ldez/tagliatelle v0.7.1 // indirect + github.com/ldez/usetesting v0.4.2 // indirect github.com/leonklingele/grouper v1.1.2 // indirect - github.com/lufeee/execinquery v1.2.1 // indirect github.com/macabu/inamedparam v0.1.3 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/maratori/testableexamples v1.0.0 // indirect @@ -102,89 +106,90 @@ require ( github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/mgechev/revive v1.3.7 // indirect + github.com/mgechev/revive v1.5.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moricho/tparallel v0.3.1 // indirect + github.com/moricho/tparallel v0.3.2 // indirect github.com/nakabonne/nestif v0.3.1 // indirect - github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect github.com/nishanths/exhaustive v0.12.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect - github.com/nunnatsa/ginkgolinter v0.16.2 // indirect + github.com/nunnatsa/ginkgolinter v0.18.4 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/polyfloyd/go-errorlint v1.5.1 // indirect + github.com/polyfloyd/go-errorlint v1.7.0 // indirect github.com/prometheus/client_golang v1.12.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect - github.com/quasilyte/go-ruleguard v0.4.2 // indirect + github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 // indirect github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect - github.com/ryancurrah/gomodguard v1.3.2 // indirect + github.com/raeperd/recvcheck v0.2.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect + github.com/ryancurrah/gomodguard v1.3.5 // indirect github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect - github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect - github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect + github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect - github.com/sashamelentyev/usestdlibvars v1.25.0 // indirect - github.com/securego/gosec/v2 v2.20.1-0.20240525090044-5f0084eb01a9 // indirect + github.com/sashamelentyev/usestdlibvars v1.28.0 // indirect + github.com/securego/gosec/v2 v2.21.4 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect - github.com/sivchari/tenv v1.7.1 // indirect - github.com/sonatard/noctx v0.0.2 // indirect + github.com/sivchari/tenv v1.12.1 // indirect + github.com/sonatard/noctx v0.1.0 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.12.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect - github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect + github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/stretchr/testify v1.9.0 // indirect + github.com/stretchr/testify v1.10.0 // indirect github.com/subosito/gotenv v1.4.1 // indirect - github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect - github.com/tdakkota/asciicheck v0.2.0 // indirect - github.com/tetafro/godot v1.4.16 // indirect - github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect - github.com/timonwong/loggercheck v0.9.4 // indirect - github.com/tomarrell/wrapcheck/v2 v2.8.3 // indirect + github.com/tdakkota/asciicheck v0.3.0 // indirect + github.com/tetafro/godot v1.4.20 // indirect + github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 // indirect + github.com/timonwong/loggercheck v0.10.1 // indirect + github.com/tomarrell/wrapcheck/v2 v2.10.0 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect - github.com/ultraware/funlen v0.1.0 // indirect - github.com/ultraware/whitespace v0.1.1 // indirect - github.com/uudashr/gocognit v1.1.2 // indirect + github.com/ultraware/funlen v0.2.0 // indirect + github.com/ultraware/whitespace v0.2.0 // indirect + github.com/uudashr/gocognit v1.2.0 // indirect + github.com/uudashr/iface v1.3.0 // indirect github.com/xen0n/gosmopolitan v1.2.2 // indirect github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.3.0 // indirect github.com/ykadowak/zerologlint v0.1.5 // indirect gitlab.com/bosi/decorder v0.4.2 // indirect - go-simpler.org/musttag v0.12.2 // indirect - go-simpler.org/sloglint v0.7.0 // indirect + go-simpler.org/musttag v0.13.0 // indirect + go-simpler.org/sloglint v0.7.2 // indirect go.uber.org/atomic v1.7.0 // indirect - go.uber.org/automaxprocs v1.5.3 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc // indirect - golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect - golang.org/x/tools v0.21.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect - gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.20.0 // indirect + golang.org/x/tools v0.28.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - honnef.co/go/tools v0.4.7 // indirect - mvdan.cc/gofumpt v0.6.0 // indirect - mvdan.cc/unparam v0.0.0-20240427195214-063aff900ca1 // indirect + honnef.co/go/tools v0.5.1 // indirect + mvdan.cc/gofumpt v0.7.0 // indirect + mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect ) diff --git a/tools/golangci-lint/go.sum b/tools/golangci-lint/go.sum index be04ca737e..a698f87148 100644 --- a/tools/golangci-lint/go.sum +++ b/tools/golangci-lint/go.sum @@ -35,71 +35,73 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/4meepo/tagalign v1.3.4 h1:P51VcvBnf04YkHzjfclN6BbsopfJR5rxs1n+5zHt+w8= -github.com/4meepo/tagalign v1.3.4/go.mod h1:M+pnkHH2vG8+qhE5bVc/zeP7HS/j910Fwa9TUSyZVI0= -github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8= -github.com/Abirdcfly/dupword v0.0.14/go.mod h1:VKDAbxdY8YbKUByLGg8EETzYSuC4crm9WwI6Y3S0cLI= -github.com/Antonboom/errname v0.1.13 h1:JHICqsewj/fNckzrfVSe+T33svwQxmjC+1ntDsHOVvM= -github.com/Antonboom/errname v0.1.13/go.mod h1:uWyefRYRN54lBg6HseYCFhs6Qjcy41Y3Jl/dVhA87Ns= -github.com/Antonboom/nilnil v0.1.9 h1:eKFMejSxPSA9eLSensFmjW2XTgTwJMjZ8hUHtV4s/SQ= -github.com/Antonboom/nilnil v0.1.9/go.mod h1:iGe2rYwCq5/Me1khrysB4nwI7swQvjclR8/YRPl5ihQ= -github.com/Antonboom/testifylint v1.3.0 h1:UiqrddKs1W3YK8R0TUuWwrVKlVAnS07DTUVWWs9c+y4= -github.com/Antonboom/testifylint v1.3.0/go.mod h1:NV0hTlteCkViPW9mSR4wEMfwp+Hs1T3dY60bkvSfhpM= +github.com/4meepo/tagalign v1.4.1 h1:GYTu2FaPGOGb/xJalcqHeD4il5BiCywyEYZOA55P6J4= +github.com/4meepo/tagalign v1.4.1/go.mod h1:2H9Yu6sZ67hmuraFgfZkNcg5Py9Ch/Om9l2K/2W1qS4= +github.com/Abirdcfly/dupword v0.1.3 h1:9Pa1NuAsZvpFPi9Pqkd93I7LIYRURj+A//dFd5tgBeE= +github.com/Abirdcfly/dupword v0.1.3/go.mod h1:8VbB2t7e10KRNdwTVoxdBaxla6avbhGzb8sCTygUMhw= +github.com/Antonboom/errname v1.0.0 h1:oJOOWR07vS1kRusl6YRSlat7HFnb3mSfMl6sDMRoTBA= +github.com/Antonboom/errname v1.0.0/go.mod h1:gMOBFzK/vrTiXN9Oh+HFs+e6Ndl0eTFbtsRTSRdXyGI= +github.com/Antonboom/nilnil v1.0.1 h1:C3Tkm0KUxgfO4Duk3PM+ztPncTFlOf0b2qadmS0s4xs= +github.com/Antonboom/nilnil v1.0.1/go.mod h1:CH7pW2JsRNFgEh8B2UaPZTEPhCMuFowP/e8Udp9Nnb0= +github.com/Antonboom/testifylint v1.5.2 h1:4s3Xhuv5AvdIgbd8wOOEeo0uZG7PbDKQyKY5lGoQazk= +github.com/Antonboom/testifylint v1.5.2/go.mod h1:vxy8VJ0bc6NavlYqjZfmp6EfqXMtBgQ4+mhCojwC1P8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= -github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= +github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Crocmagnon/fatcontext v0.2.2 h1:OrFlsDdOj9hW/oBEJBNSuH7QWf+E9WPVHw+x52bXVbk= -github.com/Crocmagnon/fatcontext v0.2.2/go.mod h1:WSn/c/+MMNiD8Pri0ahRj0o9jVpeowzavOQplBJw6u0= +github.com/Crocmagnon/fatcontext v0.5.3 h1:zCh/wjc9oyeF+Gmp+V60wetm8ph2tlsxocgg/J0hOps= +github.com/Crocmagnon/fatcontext v0.5.3/go.mod h1:XoCQYY1J+XTfyv74qLXvNw4xFunr3L1wkopIIKG7wGM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0 h1:sATXp1x6/axKxz2Gjxv8MALP0bXaNRfQinEwyfMcx8c= -github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0/go.mod h1:Nl76DrGNJTA1KJ0LePKBw/vznBX1EHbAZX8mwjR82nI= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 h1:/fTUt5vmbkAcMBt4YQiuC23cV0kEsN1MVMNqeOW43cU= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0/go.mod h1:ONJg5sxcbsdQQ4pOW8TGdTidT2TMAUy/2Xhr8mrYaao= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA= github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= -github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk= -github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= -github.com/alecthomas/go-check-sumtype v0.1.4 h1:WCvlB3l5Vq5dZQTFmodqL2g68uHiSwwlWcT5a2FGK0c= -github.com/alecthomas/go-check-sumtype v0.1.4/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= -github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= -github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= +github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= +github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU= +github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E= +github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= +github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexkohler/nakedret/v2 v2.0.4 h1:yZuKmjqGi0pSmjGpOC016LtPJysIL0WEUiaXW5SUnNg= -github.com/alexkohler/nakedret/v2 v2.0.4/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU= +github.com/alexkohler/nakedret/v2 v2.0.5 h1:fP5qLgtwbx9EJE8dGEERT02YwS8En4r9nnZ71RK+EVU= +github.com/alexkohler/nakedret/v2 v2.0.5/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/alingse/nilnesserr v0.1.1 h1:7cYuJewpy9jFNMEA72Q1+3Nm3zKHzg+Q28D5f2bBFUA= +github.com/alingse/nilnesserr v0.1.1/go.mod h1:1xJPrXonEtX7wyTq8Dytns5P2hNzoWymVUIaKm4HNFg= github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= -github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= -github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= +github.com/ashanbrown/makezero v1.2.0 h1:/2Lp1bypdmK9wDIq7uWBlDF1iMUpIIS4A+pF6C9IEUU= +github.com/ashanbrown/makezero v1.2.0/go.mod h1:dxlPhHbDMC6N6xICzFBSK+4njQDdK8euNO0qjQMtGY4= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY= -github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= +github.com/bkielbasa/cyclop v1.2.3 h1:faIVMIGDIANuGPWH031CZJTi2ymOQBULs9H21HSMa5w= +github.com/bkielbasa/cyclop v1.2.3/go.mod h1:kHTwA9Q0uZqOADdupvcFJQtp/ksSnytRMe8ztxG8Fuo= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= -github.com/bombsimon/wsl/v4 v4.2.1 h1:Cxg6u+XDWff75SIFFmNsqnIOgob+Q9hG6y/ioKbRFiM= -github.com/bombsimon/wsl/v4 v4.2.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= -github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= -github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= -github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA= -github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= -github.com/butuzov/ireturn v0.3.0 h1:hTjMqWw3y5JC3kpnC5vXmFJAWI/m31jaCYQqzkS6PL0= -github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA= -github.com/butuzov/mirror v1.2.0 h1:9YVK1qIjNspaqWutSv8gsge2e/Xpq1eqEkslEUHy5cs= -github.com/butuzov/mirror v1.2.0/go.mod h1:DqZZDtzm42wIAIyHXeN8W/qb1EPlb9Qn/if9icBOpdQ= +github.com/bombsimon/wsl/v4 v4.5.0 h1:iZRsEvDdyhd2La0FVi5k6tYehpOR/R7qIUjmKk7N74A= +github.com/bombsimon/wsl/v4 v4.5.0/go.mod h1:NOQ3aLF4nD7N5YPXMruR6ZXDOAqLoM0GEpLwTdvmOSc= +github.com/breml/bidichk v0.3.2 h1:xV4flJ9V5xWTqxL+/PMFF6dtJPvZLPsyixAoPe8BGJs= +github.com/breml/bidichk v0.3.2/go.mod h1:VzFLBxuYtT23z5+iVkamXO386OB+/sVwZOpIj6zXGos= +github.com/breml/errchkjson v0.4.0 h1:gftf6uWZMtIa/Is3XJgibewBm2ksAQSY/kABDNFTAdk= +github.com/breml/errchkjson v0.4.0/go.mod h1:AuBOSTHyLSaaAFlWsRSuRBIroCh3eh7ZHh5YeelDIk8= +github.com/butuzov/ireturn v0.3.1 h1:mFgbEI6m+9W8oP/oDdfA34dLisRFCj2G6o/yiI1yZrY= +github.com/butuzov/ireturn v0.3.1/go.mod h1:ZfRp+E7eJLC0NQmk1Nrm1LOrn/gQlOykv+cVPdiXH5M= +github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= +github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI= github.com/catenacyber/perfsprint v0.7.1 h1:PGW5G/Kxn+YrN04cRAZKC+ZuvlVwolYMrIyyTJ/rMmc= github.com/catenacyber/perfsprint v0.7.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg= @@ -115,42 +117,44 @@ github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+U github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/ckaznocha/intrange v0.1.2 h1:3Y4JAxcMntgb/wABQ6e8Q8leMd26JbX2790lIss9MTI= -github.com/ckaznocha/intrange v0.1.2/go.mod h1:RWffCw/vKBwHeOEwWdCikAtY0q4gGt8VhJZEEA5n+RE= +github.com/ckaznocha/intrange v0.3.0 h1:VqnxtK32pxgkhJgYQEeOArVidIPg+ahLP7WBOXZd5ZY= +github.com/ckaznocha/intrange v0.3.0/go.mod h1:+I/o2d2A1FBHgGELbGxzIcyd3/9l9DuwjM8FsbSS3Lo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= -github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= -github.com/daixiang0/gci v0.13.4 h1:61UGkmpoAcxHM2hhNkZEf5SzwQtWJXTSws7jaPyqwlw= -github.com/daixiang0/gci v0.13.4/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/curioswitch/go-reassign v0.3.0 h1:dh3kpQHuADL3cobV/sSGETA8DOv457dwl+fbBAhrQPs= +github.com/curioswitch/go-reassign v0.3.0/go.mod h1:nApPCCTtqLJN/s8HfItCcKV0jIPwluBOvZP+dsJGA88= +github.com/daixiang0/gci v0.13.5 h1:kThgmH1yBmZSBCh1EJVxQ7JsHpm5Oms0AMed/0LaH4c= +github.com/daixiang0/gci v0.13.5/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42t4429eC9k8= github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= -github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= -github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/firefart/nonamedreturns v1.0.5 h1:tM+Me2ZaXs8tfdDw3X6DOX++wMCOqzYUho6tUTYIdRA= github.com/firefart/nonamedreturns v1.0.5/go.mod h1:gHJjDqhGM4WyPt639SOZs+G89Ko7QKH5R5BhnO6xJhw= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= -github.com/ghostiam/protogetter v0.3.6 h1:R7qEWaSgFCsy20yYHNIJsU9ZOb8TziSRRxuAOTVKeOk= -github.com/ghostiam/protogetter v0.3.6/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw= -github.com/go-critic/go-critic v0.11.4 h1:O7kGOCx0NDIni4czrkRIXTnit0mkyKOCePh3My6OyEU= -github.com/go-critic/go-critic v0.11.4/go.mod h1:2QAdo4iuLik5S9YG0rT4wcZ8QxwHYkrr6/2MWAiv/vc= +github.com/ghostiam/protogetter v0.3.8 h1:LYcXbYvybUyTIxN2Mj9h6rHrDZBDwZloPoKctWrFyJY= +github.com/ghostiam/protogetter v0.3.8/go.mod h1:WZ0nw9pfzsgxuRsPOFQomgDVSWtDLJRfQJEhsGbmQMA= +github.com/go-critic/go-critic v0.11.5 h1:TkDTOn5v7EEngMxu8KbuFqFR43USaaH8XRJLz1jhVYA= +github.com/go-critic/go-critic v0.11.5/go.mod h1:wu6U7ny9PiaHaZHcvMDmdysMqvDem162Rh3zWTrqk8M= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -160,8 +164,10 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -184,14 +190,14 @@ github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQi github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= -github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-xmlfmt/xmlfmt v1.1.3 h1:t8Ey3Uy7jDSEisW2K3somuMKIpzktkWptA0iFCnRUWY= +github.com/go-xmlfmt/xmlfmt v1.1.3/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -224,14 +230,14 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e h1:ULcKCDV1LOZPFxGZaA6TlQbiM3J2GCPnkx/bGF6sX/g= -github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM= -github.com/golangci/golangci-lint v1.59.0 h1:st69YDnAH/v2QXDcgUaZ0seQajHScPALBVkyitYLXEk= -github.com/golangci/golangci-lint v1.59.0/go.mod h1:QNA32UWdUdHXnu+Ap5/ZU4WVwyp2tL94UxEXrSErjg0= -github.com/golangci/misspell v0.5.1 h1:/SjR1clj5uDjNLwYzCahHwIOPmQgoH04AyQIiWGbhCM= -github.com/golangci/misspell v0.5.1/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo= -github.com/golangci/modinfo v0.3.4 h1:oU5huX3fbxqQXdfspamej74DFX0kyGLkw1ppvXoJ8GA= -github.com/golangci/modinfo v0.3.4/go.mod h1:wytF1M5xl9u0ij8YSvhkEVPP3M5Mc7XLl1pxH3B2aUM= +github.com/golangci/go-printf-func-name v0.1.0 h1:dVokQP+NMTO7jwO4bwsRwLWeudOVUPPyAKJuzv8pEJU= +github.com/golangci/go-printf-func-name v0.1.0/go.mod h1:wqhWFH5mUdJQhweRnldEywnR5021wTdZSNgwYceV14s= +github.com/golangci/gofmt v0.0.0-20241223200906-057b0627d9b9 h1:t5wybL6RtO83VwoMOb7U/Peqe3gGKQlPIC66wXmnkvM= +github.com/golangci/gofmt v0.0.0-20241223200906-057b0627d9b9/go.mod h1:Ag3L7sh7E28qAp/5xnpMMTuGYqxLZoSaEHZDkZB1RgU= +github.com/golangci/golangci-lint v1.63.4 h1:bJQFQ3hSfUto597dkL7ipDzOxsGEpiWdLiZ359OWOBI= +github.com/golangci/golangci-lint v1.63.4/go.mod h1:Hx0B7Lg5/NXbaOHem8+KU+ZUIzMI6zNj/7tFwdnn10I= +github.com/golangci/misspell v0.6.0 h1:JCle2HUTNWirNlDIAUO44hUsKhOFqGPoC4LZxlaSXDs= +github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo= github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c= github.com/golangci/plugin-module-register v0.1.1/go.mod h1:TTpqoB6KkwOJMV8u7+NyXMrkwwESJLOkfl9TxR1DGFc= github.com/golangci/revgrep v0.5.3 h1:3tL7c1XBMtWHHqVpS5ChmiAAoe4PF/d5+ULzV9sLAzs= @@ -264,8 +270,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= -github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -281,13 +287,19 @@ github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3 github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= -github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= -github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= +github.com/gostaticanalysis/testutil v0.5.0 h1:Dq4wT1DdTwTGCQQv3rl3IvD5Ld0E6HiY+3Zh0sUGqw8= +github.com/gostaticanalysis/testutil v0.5.0/go.mod h1:OLQSbuM6zw2EvCcXTz1lVq5unyoNft372msDY0nY5Hs= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0 h1:CUW5RYIcysz+D3B+l1mDeXrQ7fUvGGCwJfdASSzbrfo= +github.com/hashicorp/go-immutable-radix/v2 v2.1.0/go.mod h1:hgdqLXA4f6NIjRVisM1TJ9aOJVNRqKZj+xDGF6m7PBw= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= @@ -299,10 +311,8 @@ github.com/jgautheron/goconst v1.7.1 h1:VpdAG7Ca7yvvJk5n8dMwQhfEZJh95kl/Hl9S1OI5 github.com/jgautheron/goconst v1.7.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= -github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= -github.com/jjti/go-spancheck v0.6.1 h1:ZK/wE5Kyi1VX3PJpUO2oEgeoI4FWOUm7Shb2Gbv5obI= -github.com/jjti/go-spancheck v0.6.1/go.mod h1:vF1QkOO159prdo6mHRxak2CpzDpHAfKiPUDP/NeRnX8= +github.com/jjti/go-spancheck v0.6.4 h1:Tl7gQpYf4/TMU7AT84MN83/6PutY21Nb9fuQjFTpRRc= +github.com/jjti/go-spancheck v0.6.4/go.mod h1:yAEYdKJ2lRkDA8g7X+oKUHXOWVAXSBJRv04OhF+QUjk= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -312,12 +322,12 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= -github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= +github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY= github.com/karamaru-alpha/copyloopvar v1.1.0 h1:x7gNyKcC2vRBO1H2Mks5u1VxQtYvFiym7fCjIP8RPos= github.com/karamaru-alpha/copyloopvar v1.1.0/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k= -github.com/kisielk/errcheck v1.7.0 h1:+SbscKmWJ5mOK/bO1zS60F5I9WwZDWOfRsC4RwfwRV0= -github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= +github.com/kisielk/errcheck v1.8.0 h1:ZX/URYa7ilESY19ik/vBmCn6zdGQLxACwjAcWbHlYlg= +github.com/kisielk/errcheck v1.8.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkHAIKE/contextcheck v1.1.5 h1:CdnJh63tcDe53vG+RebdpdXJTc9atMgGqdx8LXxiilg= github.com/kkHAIKE/contextcheck v1.1.5/go.mod h1:O930cpht4xb1YQpK+1+AgoM3mFsvxr7uyFptcnWTYUA= @@ -337,16 +347,20 @@ github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCT github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= -github.com/lasiar/canonicalheader v1.1.1 h1:wC+dY9ZfiqiPwAexUApFush/csSPXeIi4QqyxXmng8I= -github.com/lasiar/canonicalheader v1.1.1/go.mod h1:cXkb3Dlk6XXy+8MVQnF23CYKWlyA7kfQhSw2CcZtZb0= -github.com/ldez/gomoddirectives v0.2.4 h1:j3YjBIjEBbqZ0NKtBNzr8rtMHTOrLPeiwTkfUJZ3alg= -github.com/ldez/gomoddirectives v0.2.4/go.mod h1:oWu9i62VcQDYp9EQ0ONTfqLNh+mDLWWDO+SO0qSQw5g= -github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= -github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= +github.com/lasiar/canonicalheader v1.1.2 h1:vZ5uqwvDbyJCnMhmFYimgMZnJMjwljN5VGY0VKbMXb4= +github.com/lasiar/canonicalheader v1.1.2/go.mod h1:qJCeLFS0G/QlLQ506T+Fk/fWMa2VmBUiEI2cuMK4djI= +github.com/ldez/exptostd v0.3.1 h1:90yWWoAKMFHeovTK8uzBms9Ppp8Du/xQ20DRO26Ymrw= +github.com/ldez/exptostd v0.3.1/go.mod h1:iZBRYaUmcW5jwCR3KROEZ1KivQQp6PHXbDPk9hqJKCQ= +github.com/ldez/gomoddirectives v0.6.0 h1:Jyf1ZdTeiIB4dd+2n4qw+g4aI9IJ6JyfOZ8BityWvnA= +github.com/ldez/gomoddirectives v0.6.0/go.mod h1:TuwOGYoPAoENDWQpe8DMqEm5nIfjrxZXmxX/CExWyZ4= +github.com/ldez/grignotin v0.7.0 h1:vh0dI32WhHaq6LLPZ38g7WxXuZ1+RzyrJ7iPG9JMa8c= +github.com/ldez/grignotin v0.7.0/go.mod h1:uaVTr0SoZ1KBii33c47O1M8Jp3OP3YDwhZCmzT9GHEk= +github.com/ldez/tagliatelle v0.7.1 h1:bTgKjjc2sQcsgPiT902+aadvMjCeMHrY7ly2XKFORIk= +github.com/ldez/tagliatelle v0.7.1/go.mod h1:3zjxUpsNB2aEZScWiZTHrAXOl1x25t3cRmzfK1mlo2I= +github.com/ldez/usetesting v0.4.2 h1:J2WwbrFGk3wx4cZwSMiCQQ00kjGR0+tuuyW0Lqm4lwA= +github.com/ldez/usetesting v0.4.2/go.mod h1:eEs46T3PpQ+9RgN9VjpY6qWdiw2/QmfiDeWmdZdrjIQ= github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY= github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= -github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= -github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk= github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= @@ -364,12 +378,13 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mgechev/revive v1.3.7 h1:502QY0vQGe9KtYJ9FpxMz9rL+Fc/P13CI5POL4uHCcE= -github.com/mgechev/revive v1.3.7/go.mod h1:RJ16jUbF0OWC3co/+XTxmFNgEpUPwnnA0BRllX2aDNA= +github.com/mgechev/revive v1.5.1 h1:hE+QPeq0/wIzJwOphdVyUJ82njdd8Khp4fUIHGZHW3M= +github.com/mgechev/revive v1.5.1/go.mod h1:lC9AhkJIBs5zwx8wkudyHrU+IJkrEKmpCmGMnIJPk4o= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -379,26 +394,24 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA= -github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= +github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI= +github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg= github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= -github.com/nunnatsa/ginkgolinter v0.16.2 h1:8iLqHIZvN4fTLDC0Ke9tbSZVcyVHoBs0HIbnVSxfHJk= -github.com/nunnatsa/ginkgolinter v0.16.2/go.mod h1:4tWRinDN1FeJgU+iJANW/kz7xKN5nYRAOfJDQUS9dOQ= +github.com/nunnatsa/ginkgolinter v0.18.4 h1:zmX4KUR+6fk/vhUFt8DOP6KwznekhkmVSzzVJve2vyM= +github.com/nunnatsa/ginkgolinter v0.18.4/go.mod h1:AMEane4QQ6JwFz5GgjI5xLUM9S/CylO+UyM97fN2iBI= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo/v2 v2.17.3 h1:oJcvKpIb7/8uLpDDtnQuf18xVnwKp8DTD7DQ6gTd/MU= -github.com/onsi/ginkgo/v2 v2.17.3/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= -github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= -github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= +github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= @@ -408,16 +421,16 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v1.5.1 h1:5gHxDjLyyWij7fhfrjYNNlHsUNQeyx0LFQKUelO3RBo= -github.com/polyfloyd/go-errorlint v1.5.1/go.mod h1:sH1QC1pxxi0fFecsVIzBmxtrgd9IF/SkJpA6wqyKAJs= +github.com/polyfloyd/go-errorlint v1.7.0 h1:Zp6lzCK4hpBDj8y8a237YK4EPrMXQWvOe3nGoH4pFrU= +github.com/polyfloyd/go-errorlint v1.7.0/go.mod h1:dGWKu85mGHnegQ2SWpEybFityCg3j7ZbwsVUxAOk9gY= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -442,8 +455,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs= -github.com/quasilyte/go-ruleguard v0.4.2/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= +github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 h1:+Wl/0aFp0hpuHM3H//KMft64WQ1yX9LdJY64Qm/gFCo= +github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE= github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= @@ -452,24 +465,29 @@ github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/raeperd/recvcheck v0.2.0 h1:GnU+NsbiCqdC2XX5+vMZzP+jAJC5fht7rcVTAhX74UI= +github.com/raeperd/recvcheck v0.2.0/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtzBnWNocnYU= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.3.2 h1:CuG27ulzEB1Gu5Dk5gP8PFxSOZ3ptSdP5iI/3IXxM18= -github.com/ryancurrah/gomodguard v1.3.2/go.mod h1:LqdemiFomEjcxOqirbQCb3JFvSxH2JUYMerTFd3sF2o= +github.com/ryancurrah/gomodguard v1.3.5 h1:cShyguSwUEeC0jS7ylOiG/idnd1TpJ1LfHGpV3oJmPU= +github.com/ryancurrah/gomodguard v1.3.5/go.mod h1:MXlEPQRxgfPQa62O8wzK3Ozbkv9Rkqr+wKjSxTdsNJE= github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= -github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= -github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= -github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= -github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= +github.com/sanposhiho/wastedassign/v2 v2.1.0 h1:crurBF7fJKIORrV85u9UUpePDYGWnwvv3+A96WvwXT0= +github.com/sanposhiho/wastedassign/v2 v2.1.0/go.mod h1:+oSmSC+9bQ+VUAxA66nBb0Z7N8CK7mscKTDYC6aIek4= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= -github.com/sashamelentyev/usestdlibvars v1.25.0 h1:IK8SI2QyFzy/2OD2PYnhy84dpfNo9qADrRt6LH8vSzU= -github.com/sashamelentyev/usestdlibvars v1.25.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= -github.com/securego/gosec/v2 v2.20.1-0.20240525090044-5f0084eb01a9 h1:rnO6Zp1YMQwv8AyxzuwsVohljJgp4L0ZqiCgtACsPsc= -github.com/securego/gosec/v2 v2.20.1-0.20240525090044-5f0084eb01a9/go.mod h1:dg7lPlu/xK/Ut9SedURCoZbVCR4yC7fM65DtH9/CDHs= +github.com/sashamelentyev/usestdlibvars v1.28.0 h1:jZnudE2zKCtYlGzLVreNp5pmCdOxXUzwsMDBkR21cyQ= +github.com/sashamelentyev/usestdlibvars v1.28.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= +github.com/securego/gosec/v2 v2.21.4 h1:Le8MSj0PDmOnHJgUATjD96PaXRvCpKC+DGJvwyy0Mlk= +github.com/securego/gosec/v2 v2.21.4/go.mod h1:Jtb/MwRQfRxCXyCm1rfM1BEiiiTfUOdyzzAhlr6lUTA= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= @@ -481,18 +499,18 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= -github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= -github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= -github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= -github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= +github.com/sivchari/tenv v1.12.1 h1:+E0QzjktdnExv/wwsnnyk4oqZBUfuh89YMQT1cyuvSY= +github.com/sivchari/tenv v1.12.1/go.mod h1:1LjSOUCc25snIr5n3DtGGrENhX3LuWefcplwVGC24mw= +github.com/sonatard/noctx v0.1.0 h1:JjqOc2WN16ISWAjAk8M5ej0RfExEXtkEyExl2hLW+OM= +github.com/sonatard/noctx v0.1.0/go.mod h1:0RvBxqY8D4j9cTTTWE8ylt2vqj2EPI8fHmrxHdsaZ2c= github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -501,8 +519,8 @@ github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= -github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= -github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= +github.com/stbenjam/no-sprintf-host-port v0.2.0 h1:i8pxvGrt1+4G0czLr/WnmyH7zbZ8Bg8etvARQ1rpyl4= +github.com/stbenjam/no-sprintf-host-port v0.2.0/go.mod h1:eL0bQ9PasS0hsyTyfTjjG+E80QIyPnBVQbYZyv20Jfk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -517,34 +535,34 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8= -github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= -github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= -github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= +github.com/tdakkota/asciicheck v0.3.0 h1:LqDGgZdholxZMaJgpM6b0U9CFIjDCbFdUF00bDnBKOQ= +github.com/tdakkota/asciicheck v0.3.0/go.mod h1:KoJKXuX/Z/lt6XzLo8WMBfQGzak0SrAKZlvRr4tg8Ac= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.16 h1:4ChfhveiNLk4NveAZ9Pu2AN8QZ2nkUGFuadM9lrr5D0= -github.com/tetafro/godot v1.4.16/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= -github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M= -github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= -github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= -github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= -github.com/tomarrell/wrapcheck/v2 v2.8.3 h1:5ov+Cbhlgi7s/a42BprYoxsr73CbdMUTzE3bRDFASUs= -github.com/tomarrell/wrapcheck/v2 v2.8.3/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= +github.com/tetafro/godot v1.4.20 h1:z/p8Ek55UdNvzt4TFn2zx2KscpW4rWqcnUrdmvWJj7E= +github.com/tetafro/godot v1.4.20/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= +github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 h1:y4mJRFlM6fUyPhoXuFg/Yu02fg/nIPFMOY8tOqppoFg= +github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= +github.com/timonwong/loggercheck v0.10.1 h1:uVZYClxQFpw55eh+PIoqM7uAOHMrhVcDoWDery9R8Lg= +github.com/timonwong/loggercheck v0.10.1/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8= +github.com/tomarrell/wrapcheck/v2 v2.10.0 h1:SzRCryzy4IrAH7bVGG4cK40tNUhmVmMDuJujy4XwYDg= +github.com/tomarrell/wrapcheck/v2 v2.10.0/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= -github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= -github.com/ultraware/whitespace v0.1.1 h1:bTPOGejYFulW3PkcrqkeQwOd6NKOOXvmGD9bo/Gk8VQ= -github.com/ultraware/whitespace v0.1.1/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= -github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI= -github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= +github.com/ultraware/funlen v0.2.0 h1:gCHmCn+d2/1SemTdYMiKLAHFYxTYz7z9VIDRaTGyLkI= +github.com/ultraware/funlen v0.2.0/go.mod h1:ZE0q4TsJ8T1SQcjmkhN/w+MceuatI6pBFSxxyteHIJA= +github.com/ultraware/whitespace v0.2.0 h1:TYowo2m9Nfj1baEQBjuHzvMRbp19i+RCcRYrSWoFa+g= +github.com/ultraware/whitespace v0.2.0/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= +github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYRA= +github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU= +github.com/uudashr/iface v1.3.0 h1:zwPch0fs9tdh9BmL5kcgSpvnObV+yHjO4JjVBl8IA10= +github.com/uudashr/iface v1.3.0/go.mod h1:4QvspiRd3JLPAEXBQ9AiZpLbJlrWWgRChOKDJEuQTdg= github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= @@ -564,10 +582,10 @@ gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo= gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8= go-simpler.org/assert v0.9.0 h1:PfpmcSvL7yAnWyChSjOz6Sp6m9j5lyK8Ok9pEL31YkQ= go-simpler.org/assert v0.9.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= -go-simpler.org/musttag v0.12.2 h1:J7lRc2ysXOq7eM8rwaTYnNrHd5JwjppzB6mScysB2Cs= -go-simpler.org/musttag v0.12.2/go.mod h1:uN1DVIasMTQKk6XSik7yrJoEysGtR2GRqvWnI9S7TYM= -go-simpler.org/sloglint v0.7.0 h1:rMZRxD9MbaGoRFobIOicMxZzum7AXNFDlez6xxJs5V4= -go-simpler.org/sloglint v0.7.0/go.mod h1:g9SXiSWY0JJh4LS39/Q0GxzP/QX2cVcbTOYhDpXrJEs= +go-simpler.org/musttag v0.13.0 h1:Q/YAW0AHvaoaIbsPj3bvEI5/QFP7w696IMUpnKXQfCE= +go-simpler.org/musttag v0.13.0/go.mod h1:FTzIGeK6OkKlUDVpj0iQUXZLUO1Js9+mvykDQy9C5yM= +go-simpler.org/sloglint v0.7.2 h1:Wc9Em/Zeuu7JYpl+oKoYOsQSy2X560aVueCW/m6IijY= +go-simpler.org/sloglint v0.7.2/go.mod h1:US+9C80ppl7VsThQclkM7BkCHQAzuz8kHLsW3ppuluo= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -575,8 +593,8 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= @@ -590,7 +608,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -601,12 +620,12 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc h1:ao2WRsKSzW6KuUY9IWPwWahcHCgR0s52IfwutMfEbdM= -golang.org/x/exp v0.0.0-20240103183307-be819d1f06fc/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= -golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f h1:WTyX8eCCyfdqiPYkRGm0MqElSfYFH3yR1+rl/mct9sA= +golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -629,14 +648,15 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -671,12 +691,15 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI= +golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -696,8 +719,10 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -731,7 +756,6 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -743,23 +767,27 @@ golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -770,8 +798,11 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -792,7 +823,6 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -823,21 +853,20 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= -golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -917,14 +946,14 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -945,12 +974,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.4.7 h1:9MDAWxMoSnB6QoSqiVr7P5mtkT9pOc1kSxchzPCnqJs= -honnef.co/go/tools v0.4.7/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= -mvdan.cc/gofumpt v0.6.0 h1:G3QvahNDmpD+Aek/bNOLrFR2XC6ZAdo62dZu65gmwGo= -mvdan.cc/gofumpt v0.6.0/go.mod h1:4L0wf+kgIPZtcCWXynNS2e6bhmj73umwnuXSZarixzA= -mvdan.cc/unparam v0.0.0-20240427195214-063aff900ca1 h1:Nykk7fggxChwLK4rUPYESzeIwqsuxXXlFEAh5YhaMRo= -mvdan.cc/unparam v0.0.0-20240427195214-063aff900ca1/go.mod h1:ZzZjEpJDOmx8TdVU6umamY3Xy0UAQUI2DHbf05USVbI= +honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I= +honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs= +mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU= +mvdan.cc/gofumpt v0.7.0/go.mod h1:txVFJy/Sc/mvaycET54pV8SW8gWxTlUuGHVEcncmNUo= +mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8XzX0u72NttUGVFjO3U= +mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f/go.mod h1:RSLa7mKKCNeTTMHBw5Hsy2rfJmd6O2ivt9Dw9ZqCQpQ= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/tools/maprange/go.mod b/tools/maprange/go.mod index d2ed5e21a6..161d5b46c3 100644 --- a/tools/maprange/go.mod +++ b/tools/maprange/go.mod @@ -1,10 +1,10 @@ module github.com/onflow/cadence/tools/maprange -go 1.22 +go 1.23 -require golang.org/x/tools v0.21.0 +require golang.org/x/tools v0.28.0 require ( - golang.org/x/mod v0.17.0 // indirect - golang.org/x/sync v0.7.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/sync v0.10.0 // indirect ) diff --git a/tools/maprange/go.sum b/tools/maprange/go.sum index 05680b3128..e3144c2c5f 100644 --- a/tools/maprange/go.sum +++ b/tools/maprange/go.sum @@ -1,6 +1,8 @@ -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= -golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= diff --git a/tools/storage-explorer/go.mod b/tools/storage-explorer/go.mod index f706377729..03f9bd4300 100644 --- a/tools/storage-explorer/go.mod +++ b/tools/storage-explorer/go.mod @@ -1,12 +1,12 @@ module github.com/onflow/cadence/tools/storage-explorer -go 1.22 +go 1.23 require ( github.com/gorilla/mux v1.8.1 - github.com/onflow/atree v0.8.0 - github.com/onflow/cadence v1.0.0-preview.52 - github.com/onflow/flow-go v0.37.10 + github.com/onflow/atree v0.9.0 + github.com/onflow/cadence v1.0.0-preview-atree-register-inlining.29 + github.com/onflow/flow-go v0.35.7-crescendo-preview.23-atree-inlining github.com/rs/zerolog v1.32.0 ) @@ -164,7 +164,7 @@ require ( github.com/spf13/viper v1.15.0 // indirect github.com/status-im/keycard-go v0.2.0 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/stretchr/testify v1.9.0 // indirect + github.com/stretchr/testify v1.10.0 // indirect github.com/subosito/gotenv v1.4.2 // indirect github.com/supranational/blst v0.3.11 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect @@ -177,10 +177,7 @@ require ( github.com/vmihailenco/msgpack/v4 v4.3.11 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/zeebo/blake3 v0.2.3 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect + github.com/zeebo/blake3 v0.2.4 // indirect go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect diff --git a/tools/storage-explorer/go.sum b/tools/storage-explorer/go.sum index 728dc28555..e881014560 100644 --- a/tools/storage-explorer/go.sum +++ b/tools/storage-explorer/go.sum @@ -1903,11 +1903,11 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onflow/atree v0.6.1-0.20230711151834-86040b30171f/go.mod h1:xvP61FoOs95K7IYdIYRnNcYQGf4nbF/uuJ0tHf4DRuM= -github.com/onflow/atree v0.8.0 h1:qg5c6J1gVDNObughpEeWm8oxqhPGdEyGrda121GM4u0= -github.com/onflow/atree v0.8.0/go.mod h1:yccR+LR7xc1Jdic0mrjocbHvUD7lnVvg8/Ct1AA5zBo= +github.com/onflow/atree v0.8.0-rc.6 h1:GWgaylK24b5ta2Hq+TvyOF7X5tZLiLzMMn7lEt59fsA= +github.com/onflow/atree v0.8.0-rc.6/go.mod h1:yccR+LR7xc1Jdic0mrjocbHvUD7lnVvg8/Ct1AA5zBo= +github.com/onflow/atree v0.9.0/go.mod h1:FT6udJF9Q7VQTu3wknDhFX+VV4D44ZGdqtTAE5iztck= github.com/onflow/cadence v1.0.0-M3/go.mod h1:odXGZZ/wGNA5mwT8bC9v8u8EXACHllB2ABSZK65TGL8= -github.com/onflow/cadence v1.0.0-preview.52 h1:hZ92e6lL2+PQa3C1i5jJh0zZYFdW89+X1MS0Bkd6Ayo= -github.com/onflow/cadence v1.0.0-preview.52/go.mod h1:7wvvecnAZtYOspLOS3Lh+FuAmMeSrXhAWiycC3kQ1UU= +github.com/onflow/cadence v1.0.0-preview-atree-register-inlining.29/go.mod h1:KclJlSGWG4USgPK4CsI3V/YtCHYOwPpjyzb6iEfWlbM= github.com/onflow/crypto v0.25.0/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/onflow/crypto v0.25.2 h1:GjHunqVt+vPcdqhxxhAXiMIF3YiLX7gTuTR5O+VG2ns= github.com/onflow/crypto v0.25.2/go.mod h1:fY7eLqUdMKV8EGOw301unP8h7PvLVy8/6gVR++/g0BY= @@ -1919,6 +1919,7 @@ github.com/onflow/flow-ft/lib/go/contracts v1.0.0 h1:mToacZ5NWqtlWwk/7RgIl/jeKB/ github.com/onflow/flow-ft/lib/go/contracts v1.0.0/go.mod h1:PwsL8fC81cjnUnTfmyL/HOIyHnyaw/JA474Wfj2tl6A= github.com/onflow/flow-ft/lib/go/templates v1.0.0 h1:6cMS/lUJJ17HjKBfMO/eh0GGvnpElPgBXx7h5aoWJhs= github.com/onflow/flow-ft/lib/go/templates v1.0.0/go.mod h1:uQ8XFqmMK2jxyBSVrmyuwdWjTEb+6zGjRYotfDJ5pAE= +github.com/onflow/flow-go v0.35.7-crescendo-preview.23-atree-inlining/go.mod h1:rTPlD+FVYJDKp+TbVkoOlo9cEZ1co3w438/o/IUGgH8= github.com/onflow/flow-go v0.37.10 h1:Nz2Gp63+0ubb9FuQaEZgCsXNXM5WsXq/j0ukC74N5Vw= github.com/onflow/flow-go v0.37.10/go.mod h1:bfOCsCk0v1J93vXd+zrYkCmRIVOaL9oAXvNFWgVOujE= github.com/onflow/flow-go-sdk v1.0.0-M1/go.mod h1:TDW0MNuCs4SvqYRUzkbRnRmHQL1h4X8wURsCw9P9beo= @@ -2137,6 +2138,7 @@ github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/supranational/blst v0.3.8-0.20220526154634-513d2456b344/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= @@ -2204,6 +2206,7 @@ github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= +github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= diff --git a/tools/storage-explorer/main.go b/tools/storage-explorer/main.go index d9123f955c..6ec3e72fc5 100644 --- a/tools/storage-explorer/main.go +++ b/tools/storage-explorer/main.go @@ -184,7 +184,7 @@ func NewAccountStorageMapKeysHandler( } var keys []string - storageMap := storage.GetStorageMap(address, storageMapDomain, false) + storageMap := storage.GetDomainStorageMap(address, storageMapDomain, false) if storageMap == nil { keys = make([]string, 0) } else { @@ -225,7 +225,7 @@ func NewAccountStorageMapValueHandler( return } - storageMap := storage.GetStorageMap(address, storageMapDomain, false) + storageMap := storage.GetDomainStorageMap(address, storageMapDomain, false) if storageMap == nil { http.Error(w, "storage map does not exist", http.StatusNotFound) return diff --git a/tools/unkeyed/go.mod b/tools/unkeyed/go.mod index 0042561a2f..1bc0975d32 100644 --- a/tools/unkeyed/go.mod +++ b/tools/unkeyed/go.mod @@ -1,13 +1,13 @@ module github.com/onflow/cadence/tools/unkeyed -go 1.22 +go 1.23 require ( - golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f - golang.org/x/tools v0.21.0 + golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f + golang.org/x/tools v0.28.0 ) require ( - golang.org/x/mod v0.17.0 // indirect - golang.org/x/sync v0.7.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/sync v0.10.0 // indirect ) diff --git a/tools/unkeyed/go.sum b/tools/unkeyed/go.sum index 5327c8597f..3abc6186b6 100644 --- a/tools/unkeyed/go.sum +++ b/tools/unkeyed/go.sum @@ -1,8 +1,10 @@ -golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= -golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= -golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f h1:WTyX8eCCyfdqiPYkRGm0MqElSfYFH3yR1+rl/mct9sA= +golang.org/x/exp/typeparams v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= diff --git a/values.go b/values.go index d3799c0a88..fe235afd84 100644 --- a/values.go +++ b/values.go @@ -653,7 +653,7 @@ var _ Value = UInt{} func NewUInt(i uint) UInt { return UInt{ - Value: big.NewInt(int64(i)), + Value: (&big.Int{}).SetUint64(uint64(i)), } } @@ -860,7 +860,7 @@ var UInt128MemoryUsage = common.NewCadenceBigIntMemoryUsage(16) func NewUInt128(i uint) UInt128 { return UInt128{ - Value: big.NewInt(int64(i)), + Value: (&big.Int{}).SetUint64(uint64(i)), } } @@ -924,7 +924,7 @@ var UInt256MemoryUsage = common.NewCadenceBigIntMemoryUsage(32) func NewUInt256(i uint) UInt256 { return UInt256{ - Value: big.NewInt(int64(i)), + Value: (&big.Int{}).SetUint64(uint64(i)), } } @@ -1134,7 +1134,7 @@ var Word128MemoryUsage = common.NewCadenceBigIntMemoryUsage(16) func NewWord128(i uint) Word128 { return Word128{ - Value: big.NewInt(int64(i)), + Value: (&big.Int{}).SetUint64(uint64(i)), } } @@ -1198,7 +1198,7 @@ var Word256MemoryUsage = common.NewCadenceBigIntMemoryUsage(32) func NewWord256(i uint) Word256 { return Word256{ - Value: big.NewInt(int64(i)), + Value: (&big.Int{}).SetUint64(uint64(i)), } } diff --git a/values_test.go b/values_test.go index ac5ea3b067..fd960f180a 100644 --- a/values_test.go +++ b/values_test.go @@ -20,6 +20,7 @@ package cadence import ( "fmt" + "math" "math/big" "testing" "unicode/utf8" @@ -58,8 +59,8 @@ func newValueTestCases() map[string]valueTestCase { return map[string]valueTestCase{ "UInt": { - value: NewUInt(10), - string: "10", + value: NewUInt(math.MaxUint64), + string: "18446744073709551615", expectedType: UIntType, }, "UInt8": { @@ -83,13 +84,13 @@ func newValueTestCases() map[string]valueTestCase { expectedType: UInt64Type, }, "UInt128": { - value: NewUInt128(128), - string: "128", + value: NewUInt128(math.MaxUint64), + string: "18446744073709551615", expectedType: UInt128Type, }, "UInt256": { - value: NewUInt256(256), - string: "256", + value: NewUInt256(math.MaxUint64), + string: "18446744073709551615", expectedType: UInt256Type, }, "Int": { @@ -148,13 +149,13 @@ func newValueTestCases() map[string]valueTestCase { expectedType: Word64Type, }, "Word128": { - value: NewWord128(128), - string: "128", + value: NewWord128(math.MaxUint64), + string: "18446744073709551615", expectedType: Word128Type, }, "Word256": { - value: NewWord256(256), - string: "256", + value: NewWord256(math.MaxUint64), + string: "18446744073709551615", expectedType: Word256Type, }, "UFix64": { diff --git a/version.go b/version.go index 5052b1647e..315c0d8fc3 100644 --- a/version.go +++ b/version.go @@ -21,4 +21,4 @@ package cadence -const Version = "v1.2.2" +const Version = "v1.3.1" diff --git a/vm/vm.go b/vm/vm.go deleted file mode 100644 index 74f2cf0542..0000000000 --- a/vm/vm.go +++ /dev/null @@ -1,164 +0,0 @@ -//go:build wasmtime -// +build wasmtime - -/* - * Cadence - The resource-oriented smart contract programming language - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package vm - -import ( - "fmt" - "math/big" - - "C" - - "github.com/bytecodealliance/wasmtime-go/v7" - - "github.com/onflow/cadence/interpreter" -) - -type VM interface { - Invoke(name string, arguments ...interpreter.Value) (interpreter.Value, error) -} - -type vm struct { - instance *wasmtime.Instance - store *wasmtime.Store -} - -func (m *vm) Invoke(name string, arguments ...interpreter.Value) (interpreter.Value, error) { - f := m.instance.GetExport(m.store, name).Func() - - rawArguments := make([]any, len(arguments)) - for i, argument := range arguments { - rawArguments[i] = argument - } - - res, err := f.Call(m.store, rawArguments...) - if err != nil { - return nil, err - } - - if res == nil { - return nil, nil - } - - return res.(interpreter.Value), nil -} - -func NewVM(wasm []byte) (VM, error) { - - inter, err := interpreter.NewInterpreter(nil, nil, &interpreter.Config{}) - if err != nil { - return nil, err - } - - config := wasmtime.NewConfig() - config.SetWasmReferenceTypes(true) - - engine := wasmtime.NewEngineWithConfig(config) - - store := wasmtime.NewStore(engine) - - module, err := wasmtime.NewModule(store.Engine, wasm) - if err != nil { - return nil, err - } - - intFunc := wasmtime.WrapFunc( - store, - func(caller *wasmtime.Caller, offset int32, length int32) (any, *wasmtime.Trap) { - if offset < 0 { - return nil, wasmtime.NewTrap(fmt.Sprintf("Int: invalid offset: %d", offset)) - } - - if length < 2 { - return nil, wasmtime.NewTrap(fmt.Sprintf("Int: invalid length: %d", length)) - } - - mem := caller.GetExport("mem").Memory() - - bytes := C.GoBytes(mem.Data(store), C.int(length)) - - value := new(big.Int).SetBytes(bytes[1:]) - if bytes[0] == 0 { - value = value.Neg(value) - } - - return interpreter.NewUnmeteredIntValueFromBigInt(value), nil - }, - ) - - stringFunc := wasmtime.WrapFunc( - store, - func(caller *wasmtime.Caller, offset int32, length int32) (any, *wasmtime.Trap) { - if offset < 0 { - return nil, wasmtime.NewTrap(fmt.Sprintf("String: invalid offset: %d", offset)) - } - - if length < 0 { - return nil, wasmtime.NewTrap(fmt.Sprintf("String: invalid length: %d", length)) - } - - mem := caller.GetExport("mem").Memory() - - bytes := C.GoBytes(mem.Data(store), C.int(length)) - - return interpreter.NewUnmeteredStringValue(string(bytes)), nil - }, - ) - - addFunc := wasmtime.WrapFunc( - store, - func(left, right any) (any, *wasmtime.Trap) { - leftNumber, ok := left.(interpreter.NumberValue) - if !ok { - return nil, wasmtime.NewTrap(fmt.Sprintf("add: invalid left: %#+v", left)) - } - - rightNumber, ok := right.(interpreter.NumberValue) - if !ok { - return nil, wasmtime.NewTrap(fmt.Sprintf("add: invalid right: %#+v", right)) - } - - return leftNumber.Plus(inter, rightNumber, interpreter.EmptyLocationRange), nil - }, - ) - - // NOTE: wasmtime currently does not support specifying imports by name, - // unlike other WebAssembly APIs like wasmer, JavaScript, etc., - // i.e. imports are imported in the order they are given. - - instance, err := wasmtime.NewInstance( - store, - module, - []wasmtime.AsExtern{ - intFunc, - stringFunc, - addFunc, - }, - ) - if err != nil { - return nil, err - } - - return &vm{ - instance: instance, - store: store, - }, nil -}