diff --git a/.github/workflows/gosec.yaml b/.github/workflows/gosec.yaml index bfd86ffc1..ecd811852 100644 --- a/.github/workflows/gosec.yaml +++ b/.github/workflows/gosec.yaml @@ -44,5 +44,5 @@ jobs: { "commit-url": "${{ github.event.head_commit.url }}", "branch": "${{ github.ref }}", - "repository": "${{ github.repository }}", - } \ No newline at end of file + "repository": "${{ github.repository }}" + } diff --git a/.github/workflows/on-master-commit.yaml b/.github/workflows/on-master-commit.yaml index 95addefd4..9a73b8c0a 100644 --- a/.github/workflows/on-master-commit.yaml +++ b/.github/workflows/on-master-commit.yaml @@ -1,9 +1,11 @@ -name: Master branch CI +name: Master / Release CI on: push: branches: - 'master' + - 'release/*' + - 'releases/*' jobs: run-unit-tests: @@ -34,6 +36,7 @@ jobs: outputs: tag_date: ${{ steps.tag_date.outputs.tag_date }} short_sha: ${{ steps.short_sha.outputs.short_sha }} + clean_ref_name: ${{ steps.clean_ref_name.outputs.clean_ref_name }} steps: - name: Generate Tag Date id: tag_date @@ -41,6 +44,10 @@ jobs: - name: Generate Short SHA id: short_sha run: echo "short_sha=$(echo $GITHUB_SHA | cut -c1-7)" >> "$GITHUB_OUTPUT" + - name: Clean Ref Name + id: clean_ref_name + # replace '/' with '-' in the case of release branches + run: echo "clean_ref_name=$(echo $GITHUB_REF_NAME | sed 's/\//-/g')" >> "$GITHUB_OUTPUT" publish-docker-image: name: Publish Docker Image @@ -58,12 +65,10 @@ jobs: with: images: | ghcr.io/${{ github.repository }} - # eg: master-20240321-7d8e9f2 tags: | - type=raw,value=master-${{ needs.generate-tags.outputs.tag_date }}-${{ needs.generate-tags.outputs.short_sha }} - type=raw,value=master-latest + type=raw,value=${{ needs.generate-tags.outputs.clean_ref_name }}-${{ needs.generate-tags.outputs.tag_date }}-${{ needs.generate-tags.outputs.short_sha }} + type=raw,value=${{ needs.generate-tags.outputs.clean_ref_name }}-latest trigger_internal_ci: true - notify-slack: name: Notify Slack needs: diff --git a/.github/workflows/publish-docker-images.yaml b/.github/workflows/publish-docker-images.yaml index b75138035..3e7c8b53d 100644 --- a/.github/workflows/publish-docker-images.yaml +++ b/.github/workflows/publish-docker-images.yaml @@ -31,7 +31,7 @@ on: jobs: build-and-push-image: name: Build and Push Docker Image - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 permissions: contents: read diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index d6c2bd010..5abec3e50 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -51,8 +51,9 @@ jobs: run: make test-coverage - name: Upload coverage to Codecov - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v5 + if: ${{ github.repository == 'vechain/thor' }} with: fail_ci_if_error: true - file: ./coverage.out + files: ./coverage.out token: ${{ secrets.CODECOV_TOKEN }} diff --git a/Dockerfile b/Dockerfile index 3dbe82284..742506671 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,6 +3,10 @@ FROM golang:1.22-alpine3.20 AS builder RUN apk add --no-cache make gcc musl-dev linux-headers git WORKDIR /go/thor + +COPY go.mod go.sum ./ +RUN go mod download + COPY . /go/thor RUN make all diff --git a/api/accounts/accounts.go b/api/accounts/accounts.go index 4e78e8711..508035bbd 100644 --- a/api/accounts/accounts.go +++ b/api/accounts/accounts.go @@ -298,7 +298,9 @@ func (a *Accounts) handleBatchCallData(batchCallData *BatchCallData) (txCtx *xen gas = batchCallData.Gas } - txCtx = &xenv.TransactionContext{} + txCtx = &xenv.TransactionContext{ + ClauseCount: big.NewInt(int64(len(batchCallData.Clauses))), + } if batchCallData.GasPrice == nil { txCtx.GasPrice = new(big.Int) diff --git a/api/debug/debug.go b/api/debug/debug.go index 497518982..667ff2302 100644 --- a/api/debug/debug.go +++ b/api/debug/debug.go @@ -376,14 +376,15 @@ func (d *Debug) parseTarget(target string) (block *block.Block, txID thor.Bytes3 if err != nil { return nil, thor.Bytes32{}, 0, utils.BadRequest(errors.WithMessage(err, "target([0]")) } - txMeta, err := d.repo.NewBestChain().GetTransactionMeta(txID) + bestChain := d.repo.NewBestChain() + txMeta, err := bestChain.GetTransactionMeta(txID) if err != nil { if d.repo.IsNotFound(err) { return nil, thor.Bytes32{}, 0, utils.Forbidden(errors.New("transaction not found")) } return nil, thor.Bytes32{}, 0, err } - block, err = d.repo.GetBlock(txMeta.BlockID) + block, err = bestChain.GetBlock(txMeta.BlockNum) if err != nil { return nil, thor.Bytes32{}, 0, err } diff --git a/api/debug/debug_test.go b/api/debug/debug_test.go index d56718143..478fe4a42 100644 --- a/api/debug/debug_test.go +++ b/api/debug/debug_test.go @@ -28,6 +28,7 @@ import ( "github.com/vechain/thor/v2/thor" "github.com/vechain/thor/v2/thorclient" "github.com/vechain/thor/v2/tracers/logger" + "github.com/vechain/thor/v2/trie" "github.com/vechain/thor/v2/tx" // Force-load the tracer native engines to trigger registration @@ -94,8 +95,7 @@ func TestDebug(t *testing.T) { } func TestStorageRangeFunc(t *testing.T) { - db := muxdb.NewMem() - state := state.New(db, thor.Bytes32{}, 0, 0, 0) + state := state.New(muxdb.NewMem(), trie.Root{}) // Create an account and set storage values addr := thor.BytesToAddress([]byte("account1")) @@ -124,8 +124,7 @@ func TestStorageRangeFunc(t *testing.T) { } func TestStorageRangeMaxResult(t *testing.T) { - db := muxdb.NewMem() - state := state.New(db, thor.Bytes32{}, 0, 0, 0) + state := state.New(muxdb.NewMem(), trie.Root{}) addr := thor.BytesToAddress([]byte("account1")) for i := 0; i < 1001; i++ { diff --git a/api/doc/thor.yaml b/api/doc/thor.yaml index ace9a2adc..99416a039 100644 --- a/api/doc/thor.yaml +++ b/api/doc/thor.yaml @@ -12,7 +12,7 @@ info: license: name: LGPL 3.0 url: https://www.gnu.org/licenses/lgpl-3.0.en.html - version: 2.1.6 + version: 2.2.0 servers: - url: / description: Current Node @@ -1326,6 +1326,16 @@ components: description: The index of the clause in the transaction, from which the log was generated. example: 0 nullable: false + txIndex: + description: The index of the transaction in the block, from which the log was generated. + type: integer + nullable: true + example: 1 + logIndex: + description: The index of the log in the receipt's outputs. This is an overall index among all clauses. + type: integer + nullable: true + example: 1 Block: title: Block @@ -1856,6 +1866,11 @@ components: The limit of records to be included in the output. Use this parameter for pagination. Default's to all results. + includeIndexes: + type: boolean + example: true + nullable: true + description: Include both transaction and log index in the response. description: | Include these parameters to receive filtered results in a paged format. @@ -1866,7 +1881,8 @@ components: { "options": { "offset": 0, - "limit": 10 + "limit": 10, + "includeIndexes": true } } ``` @@ -1917,6 +1933,26 @@ components: } ``` This refers to the range from block 10 to block 1000. + + EventOptionalData: + nullable: true + type: object + title: EventOptionalData + properties: + txIndex: + type: boolean + example: true + nullable: true + description: | + Specifies whether to include in the response the event transaction index. + loglIndex: + type: boolean + example: true + nullable: true + description: | + Specifies whether to include in the response the event log index. + description: | + Specifies all the optional data that can be included in the response. EventCriteria: type: object diff --git a/api/events/events.go b/api/events/events.go index 669c47b03..d203212db 100644 --- a/api/events/events.go +++ b/api/events/events.go @@ -44,7 +44,7 @@ func (e *Events) filter(ctx context.Context, ef *EventFilter) ([]*FilteredEvent, } fes := make([]*FilteredEvent, len(events)) for i, e := range events { - fes[i] = convertEvent(e) + fes[i] = convertEvent(e, ef.Options.IncludeIndexes) } return fes, nil } @@ -60,9 +60,10 @@ func (e *Events) handleFilter(w http.ResponseWriter, req *http.Request) error { if filter.Options == nil { // if filter.Options is nil, set to the default limit +1 // to detect whether there are more logs than the default limit - filter.Options = &logdb.Options{ - Offset: 0, - Limit: e.limit + 1, + filter.Options = &Options{ + Offset: 0, + Limit: e.limit + 1, + IncludeIndexes: false, } } diff --git a/api/events/events_test.go b/api/events/events_test.go index b1268d378..ffc247843 100644 --- a/api/events/events_test.go +++ b/api/events/events_test.go @@ -7,6 +7,7 @@ package events_test import ( "encoding/json" + "math/big" "net/http" "net/http/httptest" "strings" @@ -16,8 +17,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vechain/thor/v2/api/events" - "github.com/vechain/thor/v2/block" + "github.com/vechain/thor/v2/builtin" + "github.com/vechain/thor/v2/genesis" "github.com/vechain/thor/v2/logdb" + "github.com/vechain/thor/v2/test/datagen" "github.com/vechain/thor/v2/test/testchain" "github.com/vechain/thor/v2/thor" "github.com/vechain/thor/v2/thorclient" @@ -28,8 +31,6 @@ const defaultLogLimit uint64 = 1000 var ( ts *httptest.Server - addr = thor.BytesToAddress([]byte("address")) - topic = thor.BytesToBytes32([]byte("topic")) tclient *thorclient.Client ) @@ -52,20 +53,70 @@ func TestEvents(t *testing.T) { blocksToInsert := 5 tclient = thorclient.New(ts.URL) - insertBlocks(t, thorChain.LogDB(), blocksToInsert) + insertBlocks(t, thorChain, blocksToInsert) testEventWithBlocks(t, blocksToInsert) } +func TestOptionalIndexes(t *testing.T) { + thorChain := initEventServer(t, defaultLogLimit) + defer ts.Close() + insertBlocks(t, thorChain, 5) + tclient = thorclient.New(ts.URL) + + testCases := []struct { + name string + includeIndexes bool + expected *uint32 + }{ + { + name: "do not include indexes", + includeIndexes: false, + expected: nil, + }, + { + name: "include indexes", + includeIndexes: true, + expected: new(uint32), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + filter := events.EventFilter{ + CriteriaSet: make([]*events.EventCriteria, 0), + Range: nil, + Options: &events.Options{Limit: 6, IncludeIndexes: tc.includeIndexes}, + Order: logdb.DESC, + } + + res, statusCode, err := tclient.RawHTTPClient().RawHTTPPost("/logs/event", filter) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, statusCode) + var tLogs []*events.FilteredEvent + if err := json.Unmarshal(res, &tLogs); err != nil { + t.Fatal(err) + } + assert.Equal(t, http.StatusOK, statusCode) + assert.Equal(t, 5, len(tLogs)) + + for _, tLog := range tLogs { + assert.Equal(t, tc.expected, tLog.Meta.TxIndex) + assert.Equal(t, tc.expected, tLog.Meta.LogIndex) + } + }) + } +} + func TestOption(t *testing.T) { thorChain := initEventServer(t, 5) defer ts.Close() - insertBlocks(t, thorChain.LogDB(), 5) + insertBlocks(t, thorChain, 5) tclient = thorclient.New(ts.URL) filter := events.EventFilter{ CriteriaSet: make([]*events.EventCriteria, 0), Range: nil, - Options: &logdb.Options{Limit: 6}, + Options: &events.Options{Limit: 6}, Order: logdb.DESC, } @@ -93,13 +144,47 @@ func TestOption(t *testing.T) { assert.Equal(t, 5, len(tLogs)) // when the filtered events exceed the limit, should return the forbidden - insertBlocks(t, thorChain.LogDB(), 6) + insertBlocks(t, thorChain, 6) res, statusCode, err = tclient.RawHTTPClient().RawHTTPPost("/logs/event", filter) require.NoError(t, err) assert.Equal(t, http.StatusForbidden, statusCode) assert.Equal(t, "the number of filtered logs exceeds the maximum allowed value of 5, please use pagination", strings.Trim(string(res), "\n")) } +func TestZeroFrom(t *testing.T) { + thorChain := initEventServer(t, 100) + defer ts.Close() + insertBlocks(t, thorChain, 5) + + tclient = thorclient.New(ts.URL) + transferTopic := thor.MustParseBytes32("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef") + criteria := []*events.EventCriteria{ + { + TopicSet: events.TopicSet{ + Topic0: &transferTopic, + }, + }, + } + + from := uint64(0) + filter := events.EventFilter{ + CriteriaSet: criteria, + Range: &events.Range{From: &from}, + Options: nil, + Order: logdb.DESC, + } + + res, statusCode, err := tclient.RawHTTPClient().RawHTTPPost("/logs/event", filter) + require.NoError(t, err) + var tLogs []*events.FilteredEvent + if err := json.Unmarshal(res, &tLogs); err != nil { + t.Fatal(err) + } + + assert.Equal(t, http.StatusOK, statusCode) + assert.NotEmpty(t, tLogs) +} + // Test functions func testEventsBadRequest(t *testing.T) { badBody := []byte{0x00, 0x01, 0x02} @@ -149,16 +234,14 @@ func testEventWithBlocks(t *testing.T, expectedBlocks int) { assert.NotEmpty(t, tLog) } + transferEvent := thor.MustParseBytes32("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef") + // Test with matching filter matchingFilter := events.EventFilter{ CriteriaSet: []*events.EventCriteria{{ - Address: &addr, + Address: &builtin.Energy.Address, TopicSet: events.TopicSet{ - &topic, - &topic, - &topic, - &topic, - &topic, + Topic0: &transferEvent, }, }}, } @@ -189,41 +272,17 @@ func initEventServer(t *testing.T, limit uint64) *testchain.Chain { } // Utilities functions -func insertBlocks(t *testing.T, db *logdb.LogDB, n int) { - b := new(block.Builder).Build() - for i := 0; i < n; i++ { - b = new(block.Builder). - ParentID(b.Header().ID()). - Build() - receipts := tx.Receipts{newReceipt()} +func insertBlocks(t *testing.T, chain *testchain.Chain, n int) { + transferABI, ok := builtin.Energy.ABI.MethodByName("transfer") + require.True(t, ok) - w := db.NewWriter() - if err := w.Write(b, receipts); err != nil { - t.Fatal(err) - } + encoded, err := transferABI.EncodeInput(genesis.DevAccounts()[2].Address, new(big.Int).SetUint64(datagen.RandUint64())) + require.NoError(t, err) - if err := w.Commit(); err != nil { - t.Fatal(err) - } - } -} + transferClause := tx.NewClause(&builtin.Energy.Address).WithData(encoded) -func newReceipt() *tx.Receipt { - return &tx.Receipt{ - Outputs: []*tx.Output{ - { - Events: tx.Events{{ - Address: addr, - Topics: []thor.Bytes32{ - topic, - topic, - topic, - topic, - topic, - }, - Data: []byte("0x0"), - }}, - }, - }, + for i := 0; i < n; i++ { + err := chain.MintClauses(genesis.DevAccounts()[0], []*tx.Clause{transferClause}) + require.NoError(t, err) } } diff --git a/api/events/types.go b/api/events/types.go index 0dce06aa4..432099085 100644 --- a/api/events/types.go +++ b/api/events/types.go @@ -6,9 +6,6 @@ package events import ( - "fmt" - "math" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/vechain/thor/v2/block" "github.com/vechain/thor/v2/chain" @@ -23,6 +20,8 @@ type LogMeta struct { TxID thor.Bytes32 `json:"txID"` TxOrigin thor.Address `json:"txOrigin"` ClauseIndex uint32 `json:"clauseIndex"` + TxIndex *uint32 `json:"txIndex,omitempty"` + LogIndex *uint32 `json:"logIndex,omitempty"` } type TopicSet struct { @@ -42,8 +41,8 @@ type FilteredEvent struct { } // convert a logdb.Event into a json format Event -func convertEvent(event *logdb.Event) *FilteredEvent { - fe := FilteredEvent{ +func convertEvent(event *logdb.Event, addIndexes bool) *FilteredEvent { + fe := &FilteredEvent{ Address: event.Address, Data: hexutil.Encode(event.Data), Meta: LogMeta{ @@ -55,38 +54,19 @@ func convertEvent(event *logdb.Event) *FilteredEvent { ClauseIndex: event.ClauseIndex, }, } + + if addIndexes { + fe.Meta.TxIndex = &event.TxIndex + fe.Meta.LogIndex = &event.LogIndex + } + fe.Topics = make([]*thor.Bytes32, 0) for i := 0; i < 5; i++ { if event.Topics[i] != nil { fe.Topics = append(fe.Topics, event.Topics[i]) } } - return &fe -} - -func (e *FilteredEvent) String() string { - return fmt.Sprintf(` - Event( - address: %v, - topics: %v, - data: %v, - meta: (blockID %v, - blockNumber %v, - blockTimestamp %v), - txID %v, - txOrigin %v, - clauseIndex %v) - )`, - e.Address, - e.Topics, - e.Data, - e.Meta.BlockID, - e.Meta.BlockNumber, - e.Meta.BlockTimestamp, - e.Meta.TxID, - e.Meta.TxOrigin, - e.Meta.ClauseIndex, - ) + return fe } type EventCriteria struct { @@ -94,11 +74,17 @@ type EventCriteria struct { TopicSet } +type Options struct { + Offset uint64 + Limit uint64 + IncludeIndexes bool +} + type EventFilter struct { - CriteriaSet []*EventCriteria `json:"criteriaSet"` - Range *Range `json:"range"` - Options *logdb.Options `json:"options"` - Order logdb.Order `json:"order"` + CriteriaSet []*EventCriteria + Range *Range + Options *Options + Order logdb.Order // default asc } func convertEventFilter(chain *chain.Chain, filter *EventFilter) (*logdb.EventFilter, error) { @@ -107,9 +93,12 @@ func convertEventFilter(chain *chain.Chain, filter *EventFilter) (*logdb.EventFi return nil, err } f := &logdb.EventFilter{ - Range: rng, - Options: filter.Options, - Order: filter.Order, + Range: rng, + Options: &logdb.Options{ + Offset: filter.Options.Offset, + Limit: filter.Options.Limit, + }, + Order: filter.Order, } if len(filter.CriteriaSet) > 0 { f.CriteriaSet = make([]*logdb.EventCriteria, len(filter.CriteriaSet)) @@ -138,43 +127,50 @@ const ( type Range struct { Unit RangeType - From uint64 - To uint64 + From *uint64 `json:"from,omitempty"` + To *uint64 `json:"to,omitempty"` +} + +var emptyRange = logdb.Range{ + From: logdb.MaxBlockNumber, + To: logdb.MaxBlockNumber, } func ConvertRange(chain *chain.Chain, r *Range) (*logdb.Range, error) { if r == nil { return nil, nil } - if r.Unit == TimeRangeType { - emptyRange := logdb.Range{ - From: math.MaxUint32, - To: math.MaxUint32, - } + if r.Unit == TimeRangeType { genesis, err := chain.GetBlockHeader(0) if err != nil { return nil, err } - if r.To < genesis.Timestamp() { + if r.To != nil && *r.To < genesis.Timestamp() { return &emptyRange, nil } head, err := chain.GetBlockHeader(block.Number(chain.HeadID())) if err != nil { return nil, err } - if r.From > head.Timestamp() { + if r.From != nil && *r.From > head.Timestamp() { return &emptyRange, nil } - fromHeader, err := chain.FindBlockHeaderByTimestamp(r.From, 1) - if err != nil { - return nil, err + fromHeader := genesis + if r.From != nil { + fromHeader, err = chain.FindBlockHeaderByTimestamp(*r.From, 1) + if err != nil { + return nil, err + } } - toHeader, err := chain.FindBlockHeaderByTimestamp(r.To, -1) - if err != nil { - return nil, err + toHeader := head + if r.To != nil { + toHeader, err = chain.FindBlockHeaderByTimestamp(*r.To, -1) + if err != nil { + return nil, err + } } return &logdb.Range{ @@ -182,8 +178,24 @@ func ConvertRange(chain *chain.Chain, r *Range) (*logdb.Range, error) { To: toHeader.Number(), }, nil } + + // Units are block numbers - numbers will have a max ceiling at logdb.MaxBlockNumber + if r.From != nil && *r.From > logdb.MaxBlockNumber { + return &emptyRange, nil + } + + from := uint32(0) + if r.From != nil { + from = uint32(*r.From) + } + + to := uint32(logdb.MaxBlockNumber) + if r.To != nil && *r.To < logdb.MaxBlockNumber { + to = uint32(*r.To) + } + return &logdb.Range{ - From: uint32(r.From), - To: uint32(r.To), + From: from, + To: to, }, nil } diff --git a/api/events/types_test.go b/api/events/types_test.go index a02f441c5..78b1bfe5e 100644 --- a/api/events/types_test.go +++ b/api/events/types_test.go @@ -3,28 +3,39 @@ // Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying // file LICENSE or -package events_test +package events import ( - "math" "testing" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/stretchr/testify/assert" - "github.com/vechain/thor/v2/api/events" - "github.com/vechain/thor/v2/chain" + "github.com/stretchr/testify/require" "github.com/vechain/thor/v2/genesis" "github.com/vechain/thor/v2/logdb" - "github.com/vechain/thor/v2/muxdb" - "github.com/vechain/thor/v2/state" + "github.com/vechain/thor/v2/test/testchain" + "github.com/vechain/thor/v2/thor" ) +func newRange(unit RangeType, from uint64, to uint64) *Range { + return &Range{ + Unit: unit, + From: &from, + To: &to, + } +} + func TestEventsTypes(t *testing.T) { c := initChain(t) - for name, tt := range map[string]func(*testing.T, *chain.Chain){ - "testConvertRangeWithBlockRangeType": testConvertRangeWithBlockRangeType, - "testConvertRangeWithTimeRangeTypeLessThenGenesis": testConvertRangeWithTimeRangeTypeLessThenGenesis, - "testConvertRangeWithTimeRangeType": testConvertRangeWithTimeRangeType, - "testConvertRangeWithFromGreaterThanGenesis": testConvertRangeWithFromGreaterThanGenesis, + for name, tt := range map[string]func(*testing.T, *testchain.Chain){ + "testConvertRangeWithBlockRangeType": testConvertRangeWithBlockRangeType, + "testConvertRangeWithBlockRangeTypeMoreThanMaxBlockNumber": testConvertRangeWithBlockRangeTypeMoreThanMaxBlockNumber, + "testConvertRangeWithBlockRangeTypeWithSwitchedFromAndTo": testConvertRangeWithBlockRangeTypeWithSwitchedFromAndTo, + "testConvertRangeWithTimeRangeTypeLessThenGenesis": testConvertRangeWithTimeRangeTypeLessThenGenesis, + "testConvertRangeWithTimeRangeType": testConvertRangeWithTimeRangeType, + "testConvertRangeWithFromGreaterThanGenesis": testConvertRangeWithFromGreaterThanGenesis, + "testConvertRangeWithTimeRangeLessThanGenesisGreaterThanBest": testConvertRangeWithTimeRangeLessThanGenesisGreaterThanBest, + "testConvertRangeWithTimeRangeTypeWithSwitchedFromAndTo": testConvertRangeWithTimeRangeTypeWithSwitchedFromAndTo, } { t.Run(name, func(t *testing.T) { tt(t, c) @@ -32,94 +43,160 @@ func TestEventsTypes(t *testing.T) { } } -func testConvertRangeWithBlockRangeType(t *testing.T, chain *chain.Chain) { - rng := &events.Range{ - Unit: events.BlockRangeType, - From: 1, - To: 2, +func testConvertRangeWithTimeRangeLessThanGenesisGreaterThanBest(t *testing.T, chain *testchain.Chain) { + genesis := chain.GenesisBlock().Header() + bestBlock := chain.Repo().BestBlockSummary() + + rng := newRange(TimeRangeType, genesis.Timestamp()-1_000, bestBlock.Header.Timestamp()+1_000) + expectedRange := &logdb.Range{ + From: genesis.Number(), + To: bestBlock.Header.Number(), } - convertedRng, err := events.ConvertRange(chain, rng) + convRng, err := ConvertRange(chain.Repo().NewBestChain(), rng) assert.NoError(t, err) - assert.Equal(t, uint32(rng.From), convertedRng.From) - assert.Equal(t, uint32(rng.To), convertedRng.To) + assert.Equal(t, expectedRange, convRng) } -func testConvertRangeWithTimeRangeTypeLessThenGenesis(t *testing.T, chain *chain.Chain) { - rng := &events.Range{ - Unit: events.TimeRangeType, - From: 1, - To: 2, +func testConvertRangeWithTimeRangeTypeWithSwitchedFromAndTo(t *testing.T, chain *testchain.Chain) { + genesis := chain.GenesisBlock().Header() + bestBlock := chain.Repo().BestBlockSummary() + + rng := newRange(TimeRangeType, bestBlock.Header.Timestamp(), genesis.Timestamp()) + expectedRange := &logdb.Range{ + From: bestBlock.Header.Number(), + To: genesis.Number(), } + + convRng, err := ConvertRange(chain.Repo().NewBestChain(), rng) + + assert.NoError(t, err) + assert.Equal(t, expectedRange, convRng) +} + +func testConvertRangeWithBlockRangeType(t *testing.T, chain *testchain.Chain) { + rng := newRange(BlockRangeType, 1, 2) + + convertedRng, err := ConvertRange(chain.Repo().NewBestChain(), rng) + + assert.NoError(t, err) + assert.Equal(t, uint32(*rng.From), convertedRng.From) + assert.Equal(t, uint32(*rng.To), convertedRng.To) +} + +func testConvertRangeWithBlockRangeTypeMoreThanMaxBlockNumber(t *testing.T, chain *testchain.Chain) { + rng := newRange(BlockRangeType, logdb.MaxBlockNumber+1, logdb.MaxBlockNumber+2) + + convertedRng, err := ConvertRange(chain.Repo().NewBestChain(), rng) + + assert.NoError(t, err) + assert.Equal(t, &emptyRange, convertedRng) +} + +func testConvertRangeWithBlockRangeTypeWithSwitchedFromAndTo(t *testing.T, chain *testchain.Chain) { + rng := newRange(BlockRangeType, logdb.MaxBlockNumber, 0) + + convertedRng, err := ConvertRange(chain.Repo().NewBestChain(), rng) + + assert.NoError(t, err) + assert.Equal(t, emptyRange.From, convertedRng.From) + assert.Equal(t, uint32(*rng.To), convertedRng.To) +} + +func testConvertRangeWithTimeRangeTypeLessThenGenesis(t *testing.T, chain *testchain.Chain) { + rng := newRange(TimeRangeType, chain.GenesisBlock().Header().Timestamp()-1000, chain.GenesisBlock().Header().Timestamp()-100) expectedEmptyRange := &logdb.Range{ - From: math.MaxUint32, - To: math.MaxUint32, + From: logdb.MaxBlockNumber, + To: logdb.MaxBlockNumber, } - convRng, err := events.ConvertRange(chain, rng) + convRng, err := ConvertRange(chain.Repo().NewBestChain(), rng) assert.NoError(t, err) assert.Equal(t, expectedEmptyRange, convRng) } -func testConvertRangeWithTimeRangeType(t *testing.T, chain *chain.Chain) { - genesis, err := chain.GetBlockHeader(0) - if err != nil { - t.Fatal(err) - } - rng := &events.Range{ - Unit: events.TimeRangeType, - From: 1, - To: genesis.Timestamp(), - } +func testConvertRangeWithTimeRangeType(t *testing.T, chain *testchain.Chain) { + genesis := chain.GenesisBlock().Header() + + rng := newRange(TimeRangeType, 1, genesis.Timestamp()) expectedZeroRange := &logdb.Range{ From: 0, To: 0, } - convRng, err := events.ConvertRange(chain, rng) + convRng, err := ConvertRange(chain.Repo().NewBestChain(), rng) assert.NoError(t, err) assert.Equal(t, expectedZeroRange, convRng) } -func testConvertRangeWithFromGreaterThanGenesis(t *testing.T, chain *chain.Chain) { - genesis, err := chain.GetBlockHeader(0) - if err != nil { - t.Fatal(err) - } - rng := &events.Range{ - Unit: events.TimeRangeType, - From: genesis.Timestamp() + 1_000, - To: genesis.Timestamp() + 10_000, - } +func testConvertRangeWithFromGreaterThanGenesis(t *testing.T, chain *testchain.Chain) { + genesis := chain.GenesisBlock().Header() + + rng := newRange(TimeRangeType, genesis.Timestamp()+1_000, genesis.Timestamp()+10_000) expectedEmptyRange := &logdb.Range{ - From: math.MaxUint32, - To: math.MaxUint32, + From: logdb.MaxBlockNumber, + To: logdb.MaxBlockNumber, } - convRng, err := events.ConvertRange(chain, rng) + convRng, err := ConvertRange(chain.Repo().NewBestChain(), rng) assert.NoError(t, err) assert.Equal(t, expectedEmptyRange, convRng) } // Init functions -func initChain(t *testing.T) *chain.Chain { - muxDb := muxdb.NewMem() - stater := state.NewStater(muxDb) - gene := genesis.NewDevnet() - - b, _, _, err := gene.Build(stater) - if err != nil { - t.Fatal(err) +func initChain(t *testing.T) *testchain.Chain { + thorChain, err := testchain.NewIntegrationTestChain() + require.NoError(t, err) + + for i := 0; i < 10; i++ { + require.NoError(t, thorChain.MintBlock(genesis.DevAccounts()[0])) } - repo, err := chain.NewRepository(muxDb, b) - if err != nil { - t.Fatal(err) + return thorChain +} + +func TestConvertEvent(t *testing.T) { + event := &logdb.Event{ + Address: thor.Address{0x01}, + Data: []byte{0x02, 0x03}, + BlockID: thor.Bytes32{0x04}, + BlockNumber: 5, + BlockTime: 6, + TxID: thor.Bytes32{0x07}, + TxIndex: 8, + LogIndex: 9, + TxOrigin: thor.Address{0x0A}, + ClauseIndex: 10, + Topics: [5]*thor.Bytes32{ + {0x0B}, + {0x0C}, + nil, + nil, + nil, + }, } - return repo.NewBestChain() + expectedTopics := []*thor.Bytes32{ + {0x0B}, + {0x0C}, + } + expectedData := hexutil.Encode(event.Data) + + result := convertEvent(event, true) + + assert.Equal(t, event.Address, result.Address) + assert.Equal(t, expectedData, result.Data) + assert.Equal(t, event.BlockID, result.Meta.BlockID) + assert.Equal(t, event.BlockNumber, result.Meta.BlockNumber) + assert.Equal(t, event.BlockTime, result.Meta.BlockTimestamp) + assert.Equal(t, event.TxID, result.Meta.TxID) + assert.Equal(t, event.TxIndex, *result.Meta.TxIndex) + assert.Equal(t, event.LogIndex, *result.Meta.LogIndex) + assert.Equal(t, event.TxOrigin, result.Meta.TxOrigin) + assert.Equal(t, event.ClauseIndex, result.Meta.ClauseIndex) + assert.Equal(t, expectedTopics, result.Topics) } diff --git a/api/metrics_test.go b/api/metrics_test.go index 9b83a08a6..c76533162 100644 --- a/api/metrics_test.go +++ b/api/metrics_test.go @@ -38,7 +38,7 @@ func TestMetricsMiddleware(t *testing.T) { require.NoError(t, err) // inject some invalid data to db - data := thorChain.Database().NewStore("chain.data") + data := thorChain.Database().NewStore("chain.hdr") var blkID thor.Bytes32 rand.Read(blkID[:]) data.Put(blkID[:], []byte("invalid data")) diff --git a/api/subscriptions/pending_tx_test.go b/api/subscriptions/pending_tx_test.go index f7fdbf2bd..3aa332efe 100644 --- a/api/subscriptions/pending_tx_test.go +++ b/api/subscriptions/pending_tx_test.go @@ -139,10 +139,7 @@ func addNewBlock(repo *chain.Repository, stater *state.Stater, b0 *block.Block, if _, err := stage.Commit(); err != nil { t.Fatal(err) } - if err := repo.AddBlock(blk, receipts, 0); err != nil { - t.Fatal(err) - } - if err := repo.SetBestBlockID(blk.Header().ID()); err != nil { + if err := repo.AddBlock(blk, receipts, 0, true); err != nil { t.Fatal(err) } } diff --git a/api/subscriptions/subscriptions.go b/api/subscriptions/subscriptions.go index 3d85f139b..afd454949 100644 --- a/api/subscriptions/subscriptions.go +++ b/api/subscriptions/subscriptions.go @@ -6,6 +6,7 @@ package subscriptions import ( + "fmt" "net/http" "sync" "time" @@ -183,6 +184,7 @@ func (s *Subscriptions) handlePendingTransactions(w http.ResponseWriter, req *ht // since the conn is hijacked here, no error should be returned in lines below if err != nil { logger.Debug("upgrade to websocket", "err", err) + // websocket connection do not return errors to the wrapHandler return nil } defer s.closeConn(conn, err) @@ -200,8 +202,8 @@ func (s *Subscriptions) handlePendingTransactions(w http.ResponseWriter, req *ht for { select { case tx := <-txCh: - err = conn.WriteJSON(&PendingTxIDMessage{ID: tx.ID()}) - if err != nil { + if err = conn.WriteJSON(&PendingTxIDMessage{ID: tx.ID()}); err != nil { + // likely conn has failed return nil } case <-s.done: @@ -209,7 +211,10 @@ func (s *Subscriptions) handlePendingTransactions(w http.ResponseWriter, req *ht case <-closed: return nil case <-pingTicker.C: - conn.WriteMessage(websocket.PingMessage, nil) + if err = conn.WriteMessage(websocket.PingMessage, nil); err != nil { + // likely conn has failed + return nil + } } } } @@ -219,21 +224,29 @@ func (s *Subscriptions) setupConn(w http.ResponseWriter, req *http.Request) (*we if err != nil { return nil, nil, err } + conn.SetReadLimit(100 * 1024) // 100 KB closed := make(chan struct{}) // start read loop to handle close event s.wg.Add(1) go func() { defer s.wg.Done() - conn.SetReadDeadline(time.Now().Add(pongWait)) + // close connections if not closed already + defer close(closed) + + if err = conn.SetReadDeadline(time.Now().Add(pongWait)); err != nil { + logger.Debug("failed to set initial read deadline", "err", err) + return + } conn.SetPongHandler(func(string) error { - conn.SetReadDeadline(time.Now().Add(pongWait)) + if err = conn.SetReadDeadline(time.Now().Add(pongWait)); err != nil { + logger.Debug("failed to set pong read deadline", "err", err) + } return nil }) for { if _, _, err := conn.ReadMessage(); err != nil { logger.Debug("websocket read err", "err", err) - close(closed) break } } @@ -266,11 +279,11 @@ func (s *Subscriptions) pipe(conn *websocket.Conn, reader msgReader, closed chan for { msgs, hasMore, err := reader.Read() if err != nil { - return err + return fmt.Errorf("unable to read subscription message: %w", err) } for _, msg := range msgs { if err := conn.WriteJSON(msg); err != nil { - return err + return fmt.Errorf("unable to write subscription json: %w", err) } } if hasMore { @@ -280,7 +293,9 @@ func (s *Subscriptions) pipe(conn *websocket.Conn, reader msgReader, closed chan case <-closed: return nil case <-pingTicker.C: - conn.WriteMessage(websocket.PingMessage, nil) + if err = conn.WriteMessage(websocket.PingMessage, nil); err != nil { + return fmt.Errorf("failed to write ping message: %w", err) + } default: } } else { @@ -291,7 +306,9 @@ func (s *Subscriptions) pipe(conn *websocket.Conn, reader msgReader, closed chan return nil case <-ticker.C(): case <-pingTicker.C: - conn.WriteMessage(websocket.PingMessage, nil) + if err = conn.WriteMessage(websocket.PingMessage, nil); err != nil { + return fmt.Errorf("failed to write ping message: %w", err) + } } } } @@ -347,6 +364,7 @@ func (s *Subscriptions) websocket(readerFunc func(http.ResponseWriter, *http.Req // Call the provided reader function reader, err := readerFunc(w, req) if err != nil { + // it's not yet a websocket connection, this is likely a setup error in the original http request return err } @@ -354,7 +372,8 @@ func (s *Subscriptions) websocket(readerFunc func(http.ResponseWriter, *http.Req conn, closed, err := s.setupConn(w, req) if err != nil { logger.Debug("upgrade to websocket", "err", err) - return err + // websocket connection do not return errors to the wrapHandler + return nil } defer s.closeConn(conn, err) @@ -362,8 +381,9 @@ func (s *Subscriptions) websocket(readerFunc func(http.ResponseWriter, *http.Req err = s.pipe(conn, reader, closed) if err != nil { logger.Debug("error in websocket pipe", "err", err) + // websocket connection do not return errors to the wrapHandler } - return err + return nil } } diff --git a/api/transactions/transactions.go b/api/transactions/transactions.go index 3cf2e4d65..e4846bbde 100644 --- a/api/transactions/transactions.go +++ b/api/transactions/transactions.go @@ -51,7 +51,7 @@ func (t *Transactions) getRawTransaction(txID thor.Bytes32, head thor.Bytes32, a return nil, err } - summary, err := t.repo.GetBlockSummary(meta.BlockID) + header, err := chain.GetBlockHeader(meta.BlockNum) if err != nil { return nil, err } @@ -62,9 +62,9 @@ func (t *Transactions) getRawTransaction(txID thor.Bytes32, head thor.Bytes32, a return &RawTransaction{ RawTx: RawTx{hexutil.Encode(raw)}, Meta: &TxMeta{ - BlockID: summary.Header.ID(), - BlockNumber: summary.Header.Number(), - BlockTimestamp: summary.Header.Timestamp(), + BlockID: header.ID(), + BlockNumber: header.Number(), + BlockTimestamp: header.Timestamp(), }, }, nil } @@ -84,11 +84,11 @@ func (t *Transactions) getTransactionByID(txID thor.Bytes32, head thor.Bytes32, return nil, err } - summary, err := t.repo.GetBlockSummary(meta.BlockID) + header, err := chain.GetBlockHeader(meta.BlockNum) if err != nil { return nil, err } - return convertTransaction(tx, summary.Header), nil + return convertTransaction(tx, header), nil } // GetTransactionReceiptByID get tx's receipt @@ -107,12 +107,12 @@ func (t *Transactions) getTransactionReceiptByID(txID thor.Bytes32, head thor.By return nil, err } - summary, err := t.repo.GetBlockSummary(meta.BlockID) + header, err := chain.GetBlockHeader(meta.BlockNum) if err != nil { return nil, err } - return convertReceipt(receipt, summary.Header, tx) + return convertReceipt(receipt, header, tx) } func (t *Transactions) handleSendTransaction(w http.ResponseWriter, req *http.Request) error { var rawTx *RawTx diff --git a/api/transactions/transactions_benchmark_test.go b/api/transactions/transactions_benchmark_test.go index f6f1fccd6..be8080455 100644 --- a/api/transactions/transactions_benchmark_test.go +++ b/api/transactions/transactions_benchmark_test.go @@ -314,11 +314,7 @@ func packTxsIntoBlock(thorChain *testchain.Chain, proposerAccount *genesis.DevAc return nil, err } - if err := thorChain.Repo().AddBlock(b1, receipts, 0); err != nil { - return nil, err - } - - if err := thorChain.Repo().SetBestBlockID(b1.Header().ID()); err != nil { + if err := thorChain.Repo().AddBlock(b1, receipts, 0, true); err != nil { return nil, err } @@ -444,9 +440,7 @@ func openTempMainDB(dir string) (*muxdb.MuxDB, error) { opts := muxdb.Options{ TrieNodeCacheSizeMB: cacheMB, - TrieRootCacheCapacity: 256, TrieCachedNodeTTL: 30, // 5min - TrieLeafBankSlotCapacity: 256, TrieDedupedPartitionFactor: math.MaxUint32, TrieWillCleanHistory: true, OpenFilesCacheCapacity: fdCache, diff --git a/api/transactions/types.go b/api/transactions/types.go index 7c3a892ac..93cae1660 100644 --- a/api/transactions/types.go +++ b/api/transactions/types.go @@ -89,8 +89,9 @@ func convertTransaction(tx *tx.Transaction, header *block.Header) *Transaction { origin, _ := tx.Origin() delegator, _ := tx.Delegator() - cls := make(Clauses, len(tx.Clauses())) - for i, c := range tx.Clauses() { + txClauses := tx.Clauses() + cls := make(Clauses, len(txClauses)) + for i, c := range txClauses { cls[i] = convertClause(c) } br := tx.BlockRef() @@ -187,9 +188,10 @@ func convertReceipt(txReceipt *tx.Receipt, header *block.Header, tx *tx.Transact origin, }, } + txClauses := tx.Clauses() receipt.Outputs = make([]*Output, len(txReceipt.Outputs)) for i, output := range txReceipt.Outputs { - clause := tx.Clauses()[i] + clause := txClauses[i] var contractAddr *thor.Address if clause.To() == nil { cAddr := thor.CreateContractAddress(tx.ID(), uint32(i), 0) diff --git a/api/transfers/transfers.go b/api/transfers/transfers.go index 25d2e2599..a036f30bd 100644 --- a/api/transfers/transfers.go +++ b/api/transfers/transfers.go @@ -42,15 +42,18 @@ func (t *Transfers) filter(ctx context.Context, filter *TransferFilter) ([]*Filt transfers, err := t.db.FilterTransfers(ctx, &logdb.TransferFilter{ CriteriaSet: filter.CriteriaSet, Range: rng, - Options: filter.Options, - Order: filter.Order, + Options: &logdb.Options{ + Offset: filter.Options.Offset, + Limit: filter.Options.Limit, + }, + Order: filter.Order, }) if err != nil { return nil, err } tLogs := make([]*FilteredTransfer, len(transfers)) for i, trans := range transfers { - tLogs[i] = convertTransfer(trans) + tLogs[i] = convertTransfer(trans, filter.Options.IncludeIndexes) } return tLogs, nil } @@ -66,9 +69,10 @@ func (t *Transfers) handleFilterTransferLogs(w http.ResponseWriter, req *http.Re if filter.Options == nil { // if filter.Options is nil, set to the default limit +1 // to detect whether there are more logs than the default limit - filter.Options = &logdb.Options{ - Offset: 0, - Limit: t.limit + 1, + filter.Options = &events.Options{ + Offset: 0, + Limit: t.limit + 1, + IncludeIndexes: false, } } diff --git a/api/transfers/transfers_test.go b/api/transfers/transfers_test.go index 04a8c7b42..eb028414f 100644 --- a/api/transfers/transfers_test.go +++ b/api/transfers/transfers_test.go @@ -65,7 +65,7 @@ func TestOption(t *testing.T) { filter := transfers.TransferFilter{ CriteriaSet: make([]*logdb.TransferCriteria, 0), Range: nil, - Options: &logdb.Options{Limit: 6}, + Options: &events.Options{Limit: 6}, Order: logdb.DESC, } @@ -100,6 +100,57 @@ func TestOption(t *testing.T) { assert.Equal(t, "the number of filtered logs exceeds the maximum allowed value of 5, please use pagination", strings.Trim(string(res), "\n")) } +func TestOptionalData(t *testing.T) { + db := createDb(t) + initTransferServer(t, db, defaultLogLimit) + defer ts.Close() + insertBlocks(t, db, 5) + tclient = thorclient.New(ts.URL) + + testCases := []struct { + name string + includeIndexes bool + expected *uint32 + }{ + { + name: "do not include indexes", + includeIndexes: false, + expected: nil, + }, + { + name: "include indexes", + includeIndexes: true, + expected: new(uint32), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + filter := transfers.TransferFilter{ + CriteriaSet: make([]*logdb.TransferCriteria, 0), + Range: nil, + Options: &events.Options{Limit: 5, IncludeIndexes: tc.includeIndexes}, + Order: logdb.DESC, + } + + res, statusCode, err := tclient.RawHTTPClient().RawHTTPPost("/logs/transfers", filter) + assert.NoError(t, err) + assert.Equal(t, http.StatusOK, statusCode) + var tLogs []*transfers.FilteredTransfer + if err := json.Unmarshal(res, &tLogs); err != nil { + t.Fatal(err) + } + assert.Equal(t, http.StatusOK, statusCode) + assert.Equal(t, 5, len(tLogs)) + + for _, tLog := range tLogs { + assert.Equal(t, tc.expected, tLog.Meta.TxIndex) + assert.Equal(t, tc.expected, tLog.Meta.LogIndex) + } + }) + } +} + // Test functions func testTransferBadRequest(t *testing.T) { badBody := []byte{0x00, 0x01, 0x02} diff --git a/api/transfers/types.go b/api/transfers/types.go index 29ad9b328..1574acf5a 100644 --- a/api/transfers/types.go +++ b/api/transfers/types.go @@ -19,6 +19,8 @@ type LogMeta struct { TxID thor.Bytes32 `json:"txID"` TxOrigin thor.Address `json:"txOrigin"` ClauseIndex uint32 `json:"clauseIndex"` + TxIndex *uint32 `json:"txIndex,omitempty"` + LogIndex *uint32 `json:"logIndex,omitempty"` } type FilteredTransfer struct { @@ -28,9 +30,9 @@ type FilteredTransfer struct { Meta LogMeta `json:"meta"` } -func convertTransfer(transfer *logdb.Transfer) *FilteredTransfer { +func convertTransfer(transfer *logdb.Transfer, addIndexes bool) *FilteredTransfer { v := math.HexOrDecimal256(*transfer.Amount) - return &FilteredTransfer{ + ft := &FilteredTransfer{ Sender: transfer.Sender, Recipient: transfer.Recipient, Amount: &v, @@ -43,11 +45,18 @@ func convertTransfer(transfer *logdb.Transfer) *FilteredTransfer { ClauseIndex: transfer.ClauseIndex, }, } + + if addIndexes { + ft.Meta.TxIndex = &transfer.TxIndex + ft.Meta.LogIndex = &transfer.LogIndex + } + + return ft } type TransferFilter struct { CriteriaSet []*logdb.TransferCriteria Range *events.Range - Options *logdb.Options + Options *events.Options Order logdb.Order //default asc } diff --git a/api/utils/http.go b/api/utils/http.go index 6379b54f9..3556ae3be 100644 --- a/api/utils/http.go +++ b/api/utils/http.go @@ -11,8 +11,11 @@ import ( "net/http" "github.com/pkg/errors" + "github.com/vechain/thor/v2/log" ) +var logger = log.WithContext("pkg", "http-utils") + type httpError struct { cause error status int @@ -66,16 +69,20 @@ type HandlerFunc func(http.ResponseWriter, *http.Request) error func WrapHandlerFunc(f HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { err := f(w, r) - if err != nil { - if he, ok := err.(*httpError); ok { - if he.cause != nil { - http.Error(w, he.cause.Error(), he.status) - } else { - w.WriteHeader(he.status) - } + if err == nil { + return // No error, nothing to do + } + + // Otherwise, proceed with normal HTTP error handling + if he, ok := err.(*httpError); ok { + if he.cause != nil { + http.Error(w, he.cause.Error(), he.status) } else { - http.Error(w, err.Error(), http.StatusInternalServerError) + w.WriteHeader(he.status) } + } else { + logger.Debug("all errors should be wrapped in httpError", "err", err) + http.Error(w, err.Error(), http.StatusInternalServerError) } } } diff --git a/api/utils/revisions.go b/api/utils/revisions.go index de64473aa..11df0364f 100644 --- a/api/utils/revisions.go +++ b/api/utils/revisions.go @@ -136,7 +136,7 @@ func GetSummaryAndState(rev *Revision, repo *chain.Repository, bft bft.Committer mocked := builder.Build() // state is also reused from the parent block - st := stater.NewState(best.Header.StateRoot(), best.Header.Number(), best.Conflicts, best.SteadyNum) + st := stater.NewState(best.Root()) // rebuild the block summary with the next header (mocked) AND the best block status return &chain.BlockSummary{ @@ -144,7 +144,6 @@ func GetSummaryAndState(rev *Revision, repo *chain.Repository, bft bft.Committer Txs: best.Txs, Size: uint64(mocked.Size()), Conflicts: best.Conflicts, - SteadyNum: best.SteadyNum, }, st, nil } sum, err := GetSummary(rev, repo, bft) @@ -152,6 +151,6 @@ func GetSummaryAndState(rev *Revision, repo *chain.Repository, bft bft.Committer return nil, nil, err } - st := stater.NewState(sum.Header.StateRoot(), sum.Header.Number(), sum.Conflicts, sum.SteadyNum) + st := stater.NewState(sum.Root()) return sum, st, nil } diff --git a/bft/engine.go b/bft/engine.go index d4e893702..3e0c88059 100644 --- a/bft/engine.go +++ b/bft/engine.go @@ -392,7 +392,7 @@ func (engine *Engine) findCheckpointByQuality(target uint32, finalized, headID t } func (engine *Engine) getMaxBlockProposers(sum *chain.BlockSummary) (uint64, error) { - state := engine.stater.NewState(sum.Header.StateRoot(), sum.Header.Number(), sum.Conflicts, sum.SteadyNum) + state := engine.stater.NewState(sum.Root()) params, err := builtin.Params.Native(state).Get(thor.KeyMaxBlockProposers) if err != nil { return 0, err diff --git a/bft/engine_test.go b/bft/engine_test.go index 54e2e8bec..d36494587 100644 --- a/bft/engine_test.go +++ b/bft/engine_test.go @@ -113,7 +113,7 @@ func (test *TestBFT) reCreateEngine() error { return nil } -func (test *TestBFT) newBlock(parentSummary *chain.BlockSummary, master genesis.DevAccount, shouldVote bool) (*chain.BlockSummary, error) { +func (test *TestBFT) newBlock(parentSummary *chain.BlockSummary, master genesis.DevAccount, shouldVote bool, asBest bool) (*chain.BlockSummary, error) { packer := packer.New(test.repo, test.stater, master.Address, &thor.Address{}, test.fc) flow, err := packer.Mock(parentSummary, parentSummary.Header.Timestamp()+thor.BlockInterval, parentSummary.Header.GasLimit()) if err != nil { @@ -134,7 +134,7 @@ func (test *TestBFT) newBlock(parentSummary *chain.BlockSummary, master genesis. return nil, err } - if err = test.repo.AddBlock(b, nil, conflicts); err != nil { + if err = test.repo.AddBlock(b, nil, conflicts, asBest); err != nil { return nil, err } @@ -155,13 +155,13 @@ func (test *TestBFT) fastForward(cnt int) error { acc := devAccounts[(int(parent.Header.Number())+1)%devCnt] var err error - parent, err = test.newBlock(parent, acc, true) + parent, err = test.newBlock(parent, acc, true, true) if err != nil { return err } } - return test.repo.SetBestBlockID(parent.Header.ID()) + return nil } func (test *TestBFT) fastForwardWithMinority(cnt int) error { @@ -172,13 +172,13 @@ func (test *TestBFT) fastForwardWithMinority(cnt int) error { acc := devAccounts[(int(parent.Header.Number())+1)%(devCnt/3)] var err error - parent, err = test.newBlock(parent, acc, true) + parent, err = test.newBlock(parent, acc, true, true) if err != nil { return err } } - return test.repo.SetBestBlockID(parent.Header.ID()) + return nil } func (test *TestBFT) buildBranch(cnt int) (*chain.Chain, error) { @@ -189,7 +189,7 @@ func (test *TestBFT) buildBranch(cnt int) (*chain.Chain, error) { acc := devAccounts[(int(parent.Header.Number())+1+4)%devCnt] var err error - parent, err = test.newBlock(parent, acc, true) + parent, err = test.newBlock(parent, acc, true, false) if err != nil { return nil, err } @@ -197,14 +197,14 @@ func (test *TestBFT) buildBranch(cnt int) (*chain.Chain, error) { return test.repo.NewChain(parent.Header.ID()), nil } -func (test *TestBFT) pack(parentID thor.Bytes32, shouldVote bool, best bool) (*chain.BlockSummary, error) { +func (test *TestBFT) pack(parentID thor.Bytes32, shouldVote bool, asBest bool) (*chain.BlockSummary, error) { acc := devAccounts[len(devAccounts)-1] parent, err := test.repo.GetBlockSummary(parentID) if err != nil { return nil, err } - blk, err := test.newBlock(parent, acc, shouldVote) + blk, err := test.newBlock(parent, acc, shouldVote, asBest) if err != nil { return nil, err } @@ -215,12 +215,6 @@ func (test *TestBFT) pack(parentID thor.Bytes32, shouldVote bool, best bool) (*c } } - if best { - if err := test.repo.SetBestBlockID(blk.Header.ID()); err != nil { - return nil, err - } - } - return test.repo.GetBlockSummary(blk.Header.ID()) } @@ -255,7 +249,7 @@ func TestNewBlock(t *testing.T) { PrivateKey: priv, } - summary, err := testBFT.newBlock(testBFT.repo.BestBlockSummary(), master, true) + summary, err := testBFT.newBlock(testBFT.repo.BestBlockSummary(), master, true, false) if err != nil { t.Fatal(err) } diff --git a/builtin/authority/authority_test.go b/builtin/authority/authority_test.go index 7f07ae21d..b60b6fc88 100644 --- a/builtin/authority/authority_test.go +++ b/builtin/authority/authority_test.go @@ -13,6 +13,7 @@ import ( "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" ) func M(a ...interface{}) []interface{} { @@ -20,8 +21,7 @@ func M(a ...interface{}) []interface{} { } func TestAuthority(t *testing.T) { - db := muxdb.NewMem() - st := state.New(db, thor.Bytes32{}, 0, 0, 0) + st := state.New(muxdb.NewMem(), trie.Root{}) p1 := thor.BytesToAddress([]byte("p1")) p2 := thor.BytesToAddress([]byte("p2")) diff --git a/builtin/builtin.go b/builtin/builtin.go index e9d87a81e..19281cfab 100644 --- a/builtin/builtin.go +++ b/builtin/builtin.go @@ -28,6 +28,7 @@ var ( Extension = &extensionContract{ mustLoadContract("Extension"), mustLoadContract("ExtensionV2"), + mustLoadContract("ExtensionV3"), } Measure = mustLoadContract("Measure") ) @@ -41,6 +42,7 @@ type ( extensionContract struct { *contract V2 *contract + V3 *contract } ) diff --git a/builtin/energy/energy_test.go b/builtin/energy/energy_test.go index e9a2c2373..065b563e0 100644 --- a/builtin/energy/energy_test.go +++ b/builtin/energy/energy_test.go @@ -13,6 +13,7 @@ import ( "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" ) func M(a ...interface{}) []interface{} { @@ -20,8 +21,7 @@ func M(a ...interface{}) []interface{} { } func TestEnergy(t *testing.T) { - db := muxdb.NewMem() - st := state.New(db, thor.Bytes32{}, 0, 0, 0) + st := state.New(muxdb.NewMem(), trie.Root{}) acc := thor.BytesToAddress([]byte("a1")) @@ -45,8 +45,7 @@ func TestEnergy(t *testing.T) { } func TestInitialSupply(t *testing.T) { - db := muxdb.NewMem() - st := state.New(db, thor.Bytes32{}, 0, 0, 0) + st := state.New(muxdb.NewMem(), trie.Root{}) eng := New(thor.BytesToAddress([]byte("eng")), st, 0) @@ -63,8 +62,7 @@ func TestInitialSupply(t *testing.T) { } func TestInitialSupplyError(t *testing.T) { - db := muxdb.NewMem() - st := state.New(db, thor.Bytes32{}, 0, 0, 0) + st := state.New(muxdb.NewMem(), trie.Root{}) eng := New(thor.BytesToAddress([]byte("a1")), st, 0) @@ -77,8 +75,7 @@ func TestInitialSupplyError(t *testing.T) { } func TestTotalSupply(t *testing.T) { - db := muxdb.NewMem() - st := state.New(db, thor.Bytes32{}, 0, 0, 0) + st := state.New(muxdb.NewMem(), trie.Root{}) eng := New(thor.BytesToAddress([]byte("eng")), st, 0) @@ -91,8 +88,7 @@ func TestTotalSupply(t *testing.T) { } func TestTokenTotalSupply(t *testing.T) { - db := muxdb.NewMem() - st := state.New(db, thor.Bytes32{}, 0, 0, 0) + st := state.New(muxdb.NewMem(), trie.Root{}) eng := New(thor.BytesToAddress([]byte("eng")), st, 0) @@ -105,8 +101,7 @@ func TestTokenTotalSupply(t *testing.T) { } func TestTotalBurned(t *testing.T) { - db := muxdb.NewMem() - st := state.New(db, thor.Bytes32{}, 0, 0, 0) + st := state.New(muxdb.NewMem(), trie.Root{}) eng := New(thor.BytesToAddress([]byte("eng")), st, 0) @@ -119,8 +114,7 @@ func TestTotalBurned(t *testing.T) { } func TestEnergyGrowth(t *testing.T) { - db := muxdb.NewMem() - st := state.New(db, thor.Bytes32{}, 0, 0, 0) + st := state.New(muxdb.NewMem(), trie.Root{}) acc := thor.BytesToAddress([]byte("a1")) diff --git a/builtin/executor_test.go b/builtin/executor_test.go index 2053f15b4..41df7f7aa 100644 --- a/builtin/executor_test.go +++ b/builtin/executor_test.go @@ -19,6 +19,7 @@ import ( "github.com/vechain/thor/v2/runtime" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" "github.com/vechain/thor/v2/tx" "github.com/vechain/thor/v2/xenv" ) @@ -73,7 +74,7 @@ func initExectorTest() *ctest { }) repo, _ := chain.NewRepository(db, b0) - st := state.New(db, b0.Header().StateRoot(), 0, 0, 0) + st := state.New(db, trie.Root{Hash: b0.Header().StateRoot()}) chain := repo.NewChain(b0.Header().ID()) rt := runtime.New(chain, st, &xenv.BlockContext{Time: uint64(time.Now().Unix())}, thor.NoFork) diff --git a/builtin/extension_native.go b/builtin/extension_native.go index 470a690e5..74e0abba7 100644 --- a/builtin/extension_native.go +++ b/builtin/extension_native.go @@ -134,9 +134,17 @@ func init() { output := env.TransactionContext().GasPayer return []interface{}{output} }}, + {"native_txClauseIndex", func(env *xenv.Environment) []interface{} { + output := env.ClauseIndex() + return []interface{}{output} + }}, + {"native_txClauseCount", func(env *xenv.Environment) []interface{} { + count := env.TransactionContext().ClauseCount + return []interface{}{count} + }}, } - abi := Extension.V2.NativeABI() + abi := Extension.V3.NativeABI() for _, def := range defines { if method, found := abi.MethodByName(def.name); found { nativeMethods[methodKey{Extension.Address, method.ID()}] = &nativeMethod{ diff --git a/builtin/gen/bindata.go b/builtin/gen/bindata.go index c0724a53d..68175385b 100644 --- a/builtin/gen/bindata.go +++ b/builtin/gen/bindata.go @@ -18,6 +18,10 @@ // compiled/ExtensionV2.bin-runtime // compiled/ExtensionV2Native.abi // compiled/ExtensionV2Native.bin-runtime +// compiled/ExtensionV3.abi +// compiled/ExtensionV3.bin-runtime +// compiled/ExtensionV3Native.abi +// compiled/ExtensionV3Native.bin-runtime // compiled/Measure.abi // compiled/Measure.bin-runtime // compiled/Params.abi @@ -455,6 +459,86 @@ func compiledExtensionv2nativeBinRuntime() (*asset, error) { return a, nil } +var _compiledExtensionv3Abi = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\xd4\x41\x6b\x83\x30\x14\x07\xf0\xef\xf2\xce\x39\xb9\xad\x0c\x8f\xeb\xc6\xe8\x61\x50\xd6\xc2\x0e\xc5\xc3\x53\x9f\x23\x18\x93\x90\xbc\x74\x86\xd2\xef\x3e\x94\xae\x2d\x6c\x50\x47\xa1\x7a\x13\xcc\xe3\xfd\xf8\xeb\x3f\x9b\x1d\x14\x46\x7b\x46\xcd\x90\xb2\x0b\x24\x40\x6a\x1b\xd8\x43\xba\xc9\x04\x68\x6c\x08\x52\x60\xc3\xa8\x56\xc1\x5a\x15\x41\x80\x09\x7c\x38\xb1\xfb\x39\x00\x02\x38\xda\xee\x29\x48\xcd\xc9\xc3\x0c\xf6\x99\x00\x8b\x11\x73\x45\x90\x56\xa8\x3c\x09\xf0\x8c\x4c\x6f\x81\x31\x97\x4a\x72\x84\x14\xb6\x92\xbe\x4e\xb3\x55\xd0\x05\x4b\xa3\x61\x2f\x86\xb1\xda\xb9\xc2\xe0\x69\xa1\x4b\x6a\xc7\x87\x1d\x97\x96\xc8\x78\x1a\xce\x23\x93\xef\xd7\x1e\x5e\xe7\x0a\x6b\x4a\xf2\x0e\x73\xc1\xdc\x8f\xde\x25\xb7\x30\xeb\xd0\xfc\x9d\xd5\x11\x6d\x8a\x7a\x2d\x1b\x9a\x50\xce\x83\xcc\x2b\xf9\xa9\xc9\x5d\x52\x63\x59\x3a\xf2\x7e\x32\xea\x75\xdf\xb8\xc2\xb8\x41\x79\xcf\xee\x6f\xd4\xb7\x57\xf4\x4b\x8c\x53\x88\xf3\x5c\xf5\xd2\x5a\xe9\xb0\x9f\x19\xfd\xe7\x3c\x77\x2d\x9e\xc7\x2f\xf8\xef\xdb\x72\x6e\x82\xe6\x69\x05\xb5\x74\x66\x4b\xe5\x87\x71\xf5\xf8\xae\xff\xf5\x74\x6a\x9f\xf8\xa9\x43\xbd\x53\x35\x48\xf5\x78\x25\x2a\xfb\x0e\x00\x00\xff\xff\x11\xc6\x4d\x09\x3e\x08\x00\x00") + +func compiledExtensionv3AbiBytes() ([]byte, error) { + return bindataRead( + _compiledExtensionv3Abi, + "compiled/ExtensionV3.abi", + ) +} + +func compiledExtensionv3Abi() (*asset, error) { + bytes, err := compiledExtensionv3AbiBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "compiled/ExtensionV3.abi", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _compiledExtensionv3BinRuntime = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x97\x0f\x72\xe3\x3a\x08\xc6\xaf\xf4\x21\x04\x82\xe3\xe8\xef\xfd\x8f\xf0\x46\x96\xbb\x6d\xea\x4e\xba\xd1\xee\x26\xaf\xe9\x74\xda\x7c\xb6\x10\xfc\x40\x18\x2b\x0c\x8a\x08\x09\x0a\x44\x56\x82\x12\x50\x45\x92\x02\x60\x49\x15\x84\xcd\x8f\x03\x51\x79\x9c\x1f\x52\x83\x32\x19\x29\x5a\x6b\x14\x8f\x7d\xb2\xa4\xa9\x32\x47\x2a\x88\xa7\x3a\xe4\x54\x53\x88\x62\x14\x0f\x95\x02\x4e\xb5\x18\x7a\xcf\x61\xa9\xf9\xbc\x37\x62\xf8\xc8\xa3\x2f\xb5\xeb\xa9\xd2\x70\xa4\x40\x87\x1a\x84\x4f\x35\xd4\x48\x22\x65\xa9\xd9\x96\xaa\x90\x36\xc4\xeb\x52\xc7\x58\xaa\xe7\x2a\xdc\xca\xba\x97\xc3\xe9\x6f\x0d\x9a\xbb\xf4\x65\x97\xa5\x9d\x6a\x19\xda\x5a\x5d\x3e\xb0\x9d\x76\x9b\x84\xd4\x39\xae\x28\xb8\x9c\x3e\x74\x83\x88\xf1\x69\x61\x54\x49\x52\x26\x71\xc3\x68\x52\x38\x1a\x48\x26\x8b\xa6\x2b\x13\x4b\x97\x23\x3b\x6d\x28\x21\x4a\x13\x9d\x6b\x22\x84\x0c\x16\x8c\x66\x0e\x03\x40\x4e\x02\xc1\xdb\x15\x27\xb0\x63\xf0\x07\xab\x04\xba\x58\x25\x64\x25\x08\x78\xdf\x6a\xa8\x57\xab\x36\x95\x68\x60\x05\x1b\x81\x1c\x06\x03\x8b\xe3\xb4\x0a\x0b\xa0\x4f\xca\x51\x93\x34\x40\x53\x39\xf6\x8a\x38\xbd\x58\xfb\x3b\xa6\xad\x55\xb7\x06\x67\x0f\x7e\x68\xef\xde\x1a\x1b\x1f\xfe\x47\x4e\x73\x87\xe5\xfd\xdb\x8f\xd3\xb1\x22\xf8\xf9\x7d\x46\x9e\xc7\x6d\xe4\x33\x12\x72\xd2\xb7\xbf\x8f\x90\x28\x5f\xf0\x6d\xf8\x8e\xc4\x8a\xe2\x97\x47\x5a\xd3\x7e\x2e\x46\xb8\x78\x10\x88\x1e\xf4\x20\x39\x6e\x3d\x48\xbf\x4e\xf3\x77\x1f\xd2\x47\xee\x7d\x20\xb2\x20\xe3\x1a\x59\xea\x0f\x46\x66\xf1\x73\xb6\xd3\xd5\xab\xaf\xb4\x47\x3c\x2d\xf1\xea\x69\x69\x4a\x70\xf4\xff\x1d\x57\x46\xb9\x78\xcb\x47\x77\xf2\x12\xb7\xeb\x90\xf9\xda\xbf\x98\x67\xff\xca\x8a\xbf\x76\xde\x58\xfd\xba\x4b\x0a\x4a\x28\xd0\x7d\xdf\xfd\x9a\x3f\xf6\x99\xbf\x92\xeb\xbe\xd5\x72\xad\x5f\x6e\x8f\xd6\x6f\x95\xf0\xd7\xe8\x45\xd8\xc5\xa3\x48\xa4\x84\x46\xf4\xa9\x4e\x2f\x27\xe2\xdb\x1a\x74\xd2\xbd\x55\xdf\x47\x70\x4c\x28\x78\xe4\x3c\x28\x8b\x87\xc2\xe6\xf5\xb4\x46\x1f\xe7\x93\x3f\x99\x75\x70\xd6\x00\xe2\x7a\x6a\xfd\x62\xc6\x33\xa3\x07\xdd\x64\xe0\x42\xf2\x46\xbd\xf2\x2d\x75\xc9\x83\xde\xaf\xb6\x24\x89\xdb\xba\xce\x7d\xfd\x77\xde\x87\x77\x1a\x53\x0f\x30\xa2\x73\x55\x6f\x1f\x6d\xbe\x57\xd2\x7c\x62\x7e\x55\x49\xbe\x7e\x75\x8f\x65\xd6\x36\x72\xa5\xfc\x72\x96\xf2\xf9\xfc\xdf\xb0\x94\xd4\x36\x58\x8a\xf3\x63\x2c\x3f\x46\xff\x87\x5c\x53\x47\xce\xc1\x8c\xcf\xc8\xc3\xbf\x20\xbb\xa6\xac\x63\x56\x0a\x36\xe7\xd5\x20\xc1\xf8\x98\xa1\xe8\xf6\xec\xdd\xc6\x3c\xf3\x30\x3d\x03\xa4\xcc\xfb\x4f\x5e\xca\x3e\x27\xdb\x39\x6b\xcd\xf5\x16\x71\xda\x58\xe4\xd6\x44\x43\xc7\x73\x4f\x6e\xaa\x6f\x5d\x9f\xf3\xde\x7c\x43\x58\xb6\x54\x4f\x5b\x3c\xf7\xd6\x63\x9e\x3b\xf6\xe7\x63\x52\x05\x32\xf8\x8b\x1e\x51\x04\x1e\xde\x72\xfa\x1b\x75\xa3\x26\x77\xea\x46\xdd\x37\xea\x46\xf3\xd8\x38\x83\xeb\xdb\x66\xbd\x04\xce\xdd\xad\xfa\x3f\xaf\x97\x1b\xbb\x37\x6f\x76\x97\x5e\xfd\x1b\xf4\x53\x8c\x77\xe8\x27\xb1\x0d\xfa\x49\xfb\x83\xa7\xf6\x8b\x19\xef\x2f\x64\xa4\xa0\x69\x1c\xe1\x87\x65\xc4\xd0\xee\x64\xc4\x02\x6d\x64\xc4\x38\x3d\xfb\x3c\x58\xb7\x8a\xc1\xfd\xa7\xd1\xaf\xf5\x1e\xfd\x8e\x1d\xfa\x43\x9f\x4e\x5f\xad\x35\x14\x7d\xf9\x54\xe0\xe9\x5e\x7f\x71\xdb\xe9\x2f\xee\x0f\xf6\x97\x3f\x9b\x04\xaa\x17\x93\x00\x79\x39\xcb\x4c\xf9\x0e\xcb\x1c\xfa\x06\xcb\x7c\xdb\xff\x9f\x39\x61\xb5\x2c\x24\x3d\xbe\xfe\x2d\x20\x57\xbd\xc7\xb5\xe5\x1d\xae\x03\xcf\xac\xd1\x36\x52\x2f\x5e\xed\xe5\x2c\x8b\xde\xeb\x9f\xc5\x76\xfa\x67\xf1\x9d\xfe\xb9\xcd\x92\x6d\x70\x22\x2b\x2f\x67\x59\x29\xdc\x61\x59\x83\x6e\xb0\xac\x5c\x9f\xc9\x32\xa4\x98\x63\xaa\xe9\x87\x4d\x01\xb5\x8e\x7b\xe4\x3b\xef\x90\x1f\xfe\xec\x29\x80\x86\x07\x56\xa6\x97\x57\x72\x4b\xe9\x0e\xcf\x66\x65\x83\x67\xcb\xb4\x57\xc9\x40\x26\x15\x0d\x29\xa7\x9c\x02\x43\x2c\xa0\xfa\xd0\x5e\x06\x20\x23\x0f\x45\xec\x83\x85\x3d\x25\x1d\x5c\x9c\xa5\xb3\x57\x4d\xdd\x9b\xc6\x06\x4b\xc5\x87\x4b\x66\xca\x91\x45\x5a\x65\x2f\x40\xf0\xff\x02\x00\x00\xff\xff\x74\xf5\x1a\xf5\xc6\x1b\x00\x00") + +func compiledExtensionv3BinRuntimeBytes() ([]byte, error) { + return bindataRead( + _compiledExtensionv3BinRuntime, + "compiled/ExtensionV3.bin-runtime", + ) +} + +func compiledExtensionv3BinRuntime() (*asset, error) { + bytes, err := compiledExtensionv3BinRuntimeBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "compiled/ExtensionV3.bin-runtime", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _compiledExtensionv3nativeAbi = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x94\x41\x4b\xc3\x40\x10\x85\xff\xcb\x9c\xf7\x54\xb5\x48\x8e\x56\x91\x1e\x84\x62\x05\x0f\xa5\xc8\x24\x99\xca\xd2\xed\xec\xb2\x3b\x5b\xb3\x94\xfe\x77\x49\xa9\x16\x25\xda\x20\x98\xe4\x96\xc3\xbc\x99\x8f\x97\xb7\x6f\xb1\x83\xc2\x72\x10\x64\x81\x4c\x7c\x24\x05\x9a\x5d\x94\x00\xd9\x62\xa9\x80\x71\x43\x90\x01\xa3\xe8\x2d\xbd\x48\x75\x63\x6c\xb1\x7e\xa4\x15\x28\xb0\x51\x8e\x73\xbb\x8f\x31\x50\x20\xc9\xd5\x5f\x79\x12\x0a\xd7\xb0\x5f\x2a\x70\x98\x30\x37\x04\xd9\x0a\x4d\x20\x05\x41\x50\xe8\x21\x0a\xe6\xda\x68\x49\x90\xc1\x56\xd3\xdb\x49\xba\x8a\x5c\x88\xb6\x0c\x7b\xf5\x0b\xdb\xe7\x4d\x8e\x9b\x93\x36\x6a\x96\x8b\xd1\xe1\xec\x57\xf2\xbc\xe6\x7e\xd2\x1b\x3a\x07\x5e\x6f\x18\x5f\x0e\x0c\x7c\x7a\xdb\xca\xef\xe3\x82\xe1\x70\xcf\xf5\x2b\x93\x3f\xc7\x8e\x65\xe9\x29\x84\x7f\x64\x6f\x08\xf2\xcc\xdb\x2d\x95\xcf\xd6\xaf\xdb\x24\x62\x74\x35\xee\x14\xcf\x0a\x9a\x79\x74\xce\xa4\x21\xd2\x55\xf7\x18\x66\x98\x86\xf0\x67\xff\x54\x03\x07\x77\x0b\xeb\xfb\x2f\x83\x06\x6f\x27\x06\x63\xa0\x29\x97\x54\xb5\xc1\xeb\xe6\xcd\x97\x28\xf8\xad\x6c\x1a\xdd\xc5\x35\x8d\xf2\x3a\x8e\xbd\xd7\x55\x83\xb3\x77\x95\xd3\x1e\x0f\xd2\xbe\x8d\x6d\xa0\x1b\x42\xc7\xff\x18\xc7\x89\x8d\x2c\xdd\x54\xd1\xf2\x3d\x00\x00\xff\xff\x48\xb3\x02\xb9\x92\x08\x00\x00") + +func compiledExtensionv3nativeAbiBytes() ([]byte, error) { + return bindataRead( + _compiledExtensionv3nativeAbi, + "compiled/ExtensionV3Native.abi", + ) +} + +func compiledExtensionv3nativeAbi() (*asset, error) { + bytes, err := compiledExtensionv3nativeAbiBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "compiled/ExtensionV3Native.abi", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _compiledExtensionv3nativeBinRuntime = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x01\x00\x00\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00") + +func compiledExtensionv3nativeBinRuntimeBytes() ([]byte, error) { + return bindataRead( + _compiledExtensionv3nativeBinRuntime, + "compiled/ExtensionV3Native.bin-runtime", + ) +} + +func compiledExtensionv3nativeBinRuntime() (*asset, error) { + bytes, err := compiledExtensionv3nativeBinRuntimeBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "compiled/ExtensionV3Native.bin-runtime", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + var _compiledMeasureAbi = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\xcd\x31\x0a\xc2\x40\x10\x85\xe1\xbb\xbc\x7a\x4e\xb0\x77\xf0\x04\x21\xc5\x24\x4c\x60\x20\xce\x2e\xbb\x6f\x94\x45\xbc\xbb\x58\xd9\x58\x88\xf5\x0f\xff\xb7\x3c\xb0\xd7\x18\xd4\x20\x0a\x7b\x9a\xc0\xa3\x25\x07\xca\xb2\x0a\x42\xaf\x86\x02\x8f\xb0\x0e\x41\x4d\x7e\x5a\xd3\xa9\xdb\x69\x28\x87\x9e\xc3\x04\x83\x4a\xbb\x24\x75\xf3\xd3\x39\x51\xd0\xb2\x1b\x04\x9c\xed\x3d\x39\x32\x76\x7a\x0d\x3c\xe5\x27\xb4\x26\xff\x41\x6f\x6e\xf7\x6f\xe8\xfa\x0a\x00\x00\xff\xff\x0a\x34\xa0\xdd\xeb\x00\x00\x00") func compiledMeasureAbiBytes() ([]byte, error) { @@ -765,6 +849,10 @@ var _bindata = map[string]func() (*asset, error){ "compiled/ExtensionV2.bin-runtime": compiledExtensionv2BinRuntime, "compiled/ExtensionV2Native.abi": compiledExtensionv2nativeAbi, "compiled/ExtensionV2Native.bin-runtime": compiledExtensionv2nativeBinRuntime, + "compiled/ExtensionV3.abi": compiledExtensionv3Abi, + "compiled/ExtensionV3.bin-runtime": compiledExtensionv3BinRuntime, + "compiled/ExtensionV3Native.abi": compiledExtensionv3nativeAbi, + "compiled/ExtensionV3Native.bin-runtime": compiledExtensionv3nativeBinRuntime, "compiled/Measure.abi": compiledMeasureAbi, "compiled/Measure.bin-runtime": compiledMeasureBinRuntime, "compiled/Params.abi": compiledParamsAbi, @@ -841,6 +929,10 @@ var _bintree = &bintree{nil, map[string]*bintree{ "ExtensionV2.bin-runtime": &bintree{compiledExtensionv2BinRuntime, map[string]*bintree{}}, "ExtensionV2Native.abi": &bintree{compiledExtensionv2nativeAbi, map[string]*bintree{}}, "ExtensionV2Native.bin-runtime": &bintree{compiledExtensionv2nativeBinRuntime, map[string]*bintree{}}, + "ExtensionV3.abi": &bintree{compiledExtensionv3Abi, map[string]*bintree{}}, + "ExtensionV3.bin-runtime": &bintree{compiledExtensionv3BinRuntime, map[string]*bintree{}}, + "ExtensionV3Native.abi": &bintree{compiledExtensionv3nativeAbi, map[string]*bintree{}}, + "ExtensionV3Native.bin-runtime": &bintree{compiledExtensionv3nativeBinRuntime, map[string]*bintree{}}, "Measure.abi": &bintree{compiledMeasureAbi, map[string]*bintree{}}, "Measure.bin-runtime": &bintree{compiledMeasureBinRuntime, map[string]*bintree{}}, "Params.abi": &bintree{compiledParamsAbi, map[string]*bintree{}}, diff --git a/builtin/gen/extension-v3.sol b/builtin/gen/extension-v3.sol new file mode 100644 index 000000000..4dd21a904 --- /dev/null +++ b/builtin/gen/extension-v3.sol @@ -0,0 +1,32 @@ +// Copyright (c) 2018 The VeChainThor developers + +// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying +// file LICENSE or + +pragma solidity 0.4.24; + +import './extension-v2.sol'; + +/// @title ExtensionV3 extends EVM global functions. +contract ExtensionV3 is ExtensionV2 { + + /** + * @dev Get the index of the current clause in the transaction. + */ + function txClauseIndex() public view returns (uint256) { + return ExtensionV3Native(this).native_txClauseIndex(); + } + + /** + * @dev Get the total number of clauses in the transaction. + */ + function txClauseCount() public view returns (uint) { + return ExtensionV3Native(this).native_txClauseCount(); + } +} + +contract ExtensionV3Native is ExtensionV2Native { + function native_txClauseCount() public view returns (uint); + + function native_txClauseIndex()public view returns(uint32); +} diff --git a/builtin/gen/gen.go b/builtin/gen/gen.go index ce5fed8e1..aa8666008 100644 --- a/builtin/gen/gen.go +++ b/builtin/gen/gen.go @@ -6,6 +6,6 @@ package gen //go:generate rm -rf ./compiled/ -//go:generate docker run -v ./:/solidity ethereum/solc:0.4.24 --optimize-runs 200 --overwrite --bin-runtime --abi -o /solidity/compiled authority.sol energy.sol executor.sol extension.sol extension-v2.sol measure.sol params.sol prototype.sol +//go:generate docker run -v ./:/solidity ethereum/solc:0.4.24 --optimize-runs 200 --overwrite --bin-runtime --abi -o /solidity/compiled authority.sol energy.sol executor.sol extension.sol extension-v2.sol extension-v3.sol measure.sol params.sol prototype.sol //go:generate go run github.com/go-bindata/go-bindata/go-bindata@v1.0.0 -nometadata -ignore=_ -pkg gen -o bindata.go compiled/ //go:generate go fmt diff --git a/builtin/native_calls_test.go b/builtin/native_calls_test.go index 48d53d3f4..ca6af57ff 100644 --- a/builtin/native_calls_test.go +++ b/builtin/native_calls_test.go @@ -26,6 +26,7 @@ import ( "github.com/vechain/thor/v2/runtime" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" "github.com/vechain/thor/v2/tx" "github.com/vechain/thor/v2/vm" "github.com/vechain/thor/v2/xenv" @@ -121,7 +122,7 @@ func (c *ccase) Assert(t *testing.T) *ccase { assert.True(t, ok, "should have method") constant := method.Const() - stage, err := c.rt.State().Stage(0, 0) + stage, err := c.rt.State().Stage(trie.Version{}) assert.Nil(t, err, "should stage state") stateRoot := stage.Hash() @@ -140,7 +141,7 @@ func (c *ccase) Assert(t *testing.T) *ccase { vmout, _, err := exec() assert.Nil(t, err) if constant || vmout.VMErr != nil { - stage, err := c.rt.State().Stage(0, 0) + stage, err := c.rt.State().Stage(trie.Version{}) assert.Nil(t, err, "should stage state") newStateRoot := stage.Hash() assert.Equal(t, stateRoot, newStateRoot) @@ -195,7 +196,7 @@ func TestParamsNative(t *testing.T) { return nil }) repo, _ := chain.NewRepository(db, b0) - st := state.New(db, b0.Header().StateRoot(), 0, 0, 0) + st := state.New(db, trie.Root{Hash: b0.Header().StateRoot()}) chain := repo.NewChain(b0.Header().ID()) rt := runtime.New(chain, st, &xenv.BlockContext{}, thor.NoFork) @@ -263,7 +264,7 @@ func TestAuthorityNative(t *testing.T) { return nil }) repo, _ := chain.NewRepository(db, b0) - st := state.New(db, b0.Header().StateRoot(), 0, 0, 0) + st := state.New(db, trie.Root{Hash: b0.Header().StateRoot()}) chain := repo.NewChain(b0.Header().ID()) rt := runtime.New(chain, st, &xenv.BlockContext{}, thor.NoFork) @@ -369,7 +370,7 @@ func TestEnergyNative(t *testing.T) { }) repo, _ := chain.NewRepository(db, b0) - st := state.New(db, b0.Header().StateRoot(), 0, 0, 0) + st := state.New(db, trie.Root{Hash: b0.Header().StateRoot()}) chain := repo.NewChain(b0.Header().ID()) st.SetEnergy(addr, eng, b0.Header().Timestamp()) @@ -495,7 +496,7 @@ func TestPrototypeNative(t *testing.T) { gene := genesis.NewDevnet() genesisBlock, _, _, _ := gene.Build(state.NewStater(db)) repo, _ := chain.NewRepository(db, genesisBlock) - st := state.New(db, genesisBlock.Header().StateRoot(), 0, 0, 0) + st := state.New(db, trie.Root{Hash: genesisBlock.Header().StateRoot()}) chain := repo.NewChain(genesisBlock.Header().ID()) st.SetStorage(thor.Address(acc1), key, value) @@ -768,14 +769,14 @@ func TestPrototypeNativeWithLongerBlockNumber(t *testing.T) { db := muxdb.NewMem() gene := genesis.NewDevnet() genesisBlock, _, _, _ := gene.Build(state.NewStater(db)) - st := state.New(db, genesisBlock.Header().StateRoot(), 0, 0, 0) + st := state.New(db, trie.Root{Hash: genesisBlock.Header().StateRoot()}) repo, _ := chain.NewRepository(db, genesisBlock) launchTime := genesisBlock.Header().Timestamp() for i := 1; i < 100; i++ { st.SetBalance(acc1, big.NewInt(int64(i))) st.SetEnergy(acc1, big.NewInt(int64(i)), launchTime+uint64(i)*10) - stage, _ := st.Stage(uint32(i), 0) + stage, _ := st.Stage(trie.Version{Major: uint32(i)}) stateRoot, _ := stage.Commit() b := new(block.Builder). ParentID(repo.BestBlockSummary().Header.ID()). @@ -784,11 +785,10 @@ func TestPrototypeNativeWithLongerBlockNumber(t *testing.T) { StateRoot(stateRoot). Build(). WithSignature(sig[:]) - repo.AddBlock(b, tx.Receipts{}, 0) - repo.SetBestBlockID(b.Header().ID()) + repo.AddBlock(b, tx.Receipts{}, 0, true) } - st = state.New(db, repo.BestBlockSummary().Header.StateRoot(), repo.BestBlockSummary().Header.Number(), 0, 0) + st = state.New(db, repo.BestBlockSummary().Root()) chain := repo.NewBestChain() rt := runtime.New(chain, st, &xenv.BlockContext{ @@ -838,14 +838,14 @@ func TestPrototypeNativeWithBlockNumber(t *testing.T) { db := muxdb.NewMem() gene := genesis.NewDevnet() genesisBlock, _, _, _ := gene.Build(state.NewStater(db)) - st := state.New(db, genesisBlock.Header().StateRoot(), 0, 0, 0) + st := state.New(db, trie.Root{Hash: genesisBlock.Header().StateRoot()}) repo, _ := chain.NewRepository(db, genesisBlock) launchTime := genesisBlock.Header().Timestamp() for i := 1; i < 100; i++ { st.SetBalance(acc1, big.NewInt(int64(i))) st.SetEnergy(acc1, big.NewInt(int64(i)), launchTime+uint64(i)*10) - stage, _ := st.Stage(uint32(i), 0) + stage, _ := st.Stage(trie.Version{Major: uint32(i)}) stateRoot, _ := stage.Commit() b := new(block.Builder). ParentID(repo.BestBlockSummary().Header.ID()). @@ -854,11 +854,10 @@ func TestPrototypeNativeWithBlockNumber(t *testing.T) { StateRoot(stateRoot). Build(). WithSignature(sig[:]) - repo.AddBlock(b, tx.Receipts{}, 0) - repo.SetBestBlockID(b.Header().ID()) + repo.AddBlock(b, tx.Receipts{}, 0, true) } - st = state.New(db, repo.BestBlockSummary().Header.StateRoot(), repo.BestBlockSummary().Header.Number(), 0, repo.BestBlockSummary().SteadyNum) + st = state.New(db, repo.BestBlockSummary().Root()) chain := repo.NewBestChain() rt := runtime.New(chain, st, &xenv.BlockContext{ @@ -898,7 +897,7 @@ func newBlock(parent *block.Block, score uint64, timestamp uint64, privateKey *e func TestExtensionNative(t *testing.T) { db := muxdb.NewMem() - st := state.New(db, thor.Bytes32{}, 0, 0, 0) + st := state.New(db, trie.Root{}) gene := genesis.NewDevnet() genesisBlock, _, _, _ := gene.Build(state.NewStater(db)) repo, _ := chain.NewRepository(db, genesisBlock) @@ -920,9 +919,9 @@ func TestExtensionNative(t *testing.T) { gasPayer := thor.BytesToAddress([]byte("gasPayer")) - err := repo.AddBlock(b1, nil, 0) + err := repo.AddBlock(b1, nil, 0, false) assert.Equal(t, err, nil) - err = repo.AddBlock(b2, nil, 0) + err = repo.AddBlock(b2, nil, 0, false) assert.Equal(t, err, nil) assert.Equal(t, builtin.Extension.Address, builtin.Extension.Address) diff --git a/builtin/params/params_test.go b/builtin/params/params_test.go index 484442b14..277e99930 100644 --- a/builtin/params/params_test.go +++ b/builtin/params/params_test.go @@ -13,11 +13,11 @@ import ( "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" ) func TestParamsGetSet(t *testing.T) { - db := muxdb.NewMem() - st := state.New(db, thor.Bytes32{}, 0, 0, 0) + st := state.New(muxdb.NewMem(), trie.Root{}) setv := big.NewInt(10) key := thor.BytesToBytes32([]byte("key")) p := New(thor.BytesToAddress([]byte("par")), st) diff --git a/builtin/prototype/prototype_test.go b/builtin/prototype/prototype_test.go index 6cdf127af..d187cc16d 100644 --- a/builtin/prototype/prototype_test.go +++ b/builtin/prototype/prototype_test.go @@ -14,6 +14,7 @@ import ( "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" ) func M(a ...interface{}) []interface{} { @@ -21,8 +22,7 @@ func M(a ...interface{}) []interface{} { } func TestPrototype(t *testing.T) { - db := muxdb.NewMem() - st := state.New(db, thor.Bytes32{}, 0, 0, 0) + st := state.New(muxdb.NewMem(), trie.Root{}) proto := prototype.New(thor.BytesToAddress([]byte("proto")), st) binding := proto.Bind(thor.BytesToAddress([]byte("binding"))) diff --git a/builtin/prototype_native.go b/builtin/prototype_native.go index 97e72fce1..5a039bd2e 100644 --- a/builtin/prototype_native.go +++ b/builtin/prototype_native.go @@ -94,7 +94,7 @@ func init() { } env.UseGas(thor.SloadGas) - state := env.State().Checkout(summary.Header.StateRoot(), summary.Header.Number(), summary.Conflicts, summary.SteadyNum) + state := env.State().Checkout(summary.Root()) env.UseGas(thor.GetBalanceGas) val, err := state.GetBalance(thor.Address(args.Self)) @@ -136,7 +136,7 @@ func init() { } env.UseGas(thor.SloadGas) - state := env.State().Checkout(summary.Header.StateRoot(), summary.Header.Number(), summary.Conflicts, summary.SteadyNum) + state := env.State().Checkout(summary.Root()) env.UseGas(thor.GetBalanceGas) val, err := state.GetEnergy(thor.Address(args.Self), summary.Header.Timestamp()) diff --git a/chain/block_reader_test.go b/chain/block_reader_test.go index 7d4c306e3..804c91ce6 100644 --- a/chain/block_reader_test.go +++ b/chain/block_reader_test.go @@ -3,14 +3,13 @@ // Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying // file LICENSE or -package chain_test +package chain import ( "testing" "github.com/stretchr/testify/assert" "github.com/vechain/thor/v2/block" - "github.com/vechain/thor/v2/chain" ) func TestBlockReader(t *testing.T) { @@ -18,22 +17,20 @@ func TestBlockReader(t *testing.T) { b0 := repo.GenesisBlock() b1 := newBlock(b0, 10) - repo.AddBlock(b1, nil, 0) + repo.AddBlock(b1, nil, 0, false) b2 := newBlock(b1, 20) - repo.AddBlock(b2, nil, 0) + repo.AddBlock(b2, nil, 0, false) b3 := newBlock(b2, 30) - repo.AddBlock(b3, nil, 0) + repo.AddBlock(b3, nil, 0, false) b4 := newBlock(b3, 40) - repo.AddBlock(b4, nil, 0) - - repo.SetBestBlockID(b4.Header().ID()) + repo.AddBlock(b4, nil, 0, true) br := repo.NewBlockReader(b2.Header().ID()) - var blks []*chain.ExtendedBlock + var blks []*ExtendedBlock for { r, err := br.Read() @@ -46,7 +43,7 @@ func TestBlockReader(t *testing.T) { blks = append(blks, r...) } - assert.Equal(t, []*chain.ExtendedBlock{ + assert.Equal(t, []*ExtendedBlock{ {block.Compose(b3.Header(), b3.Transactions()), false}, {block.Compose(b4.Header(), b4.Transactions()), false}}, blks) @@ -57,25 +54,23 @@ func TestBlockReaderFork(t *testing.T) { b0 := repo.GenesisBlock() b1 := newBlock(b0, 10) - repo.AddBlock(b1, nil, 0) + repo.AddBlock(b1, nil, 0, false) b2 := newBlock(b1, 20) - repo.AddBlock(b2, nil, 0) + repo.AddBlock(b2, nil, 0, false) b2x := newBlock(b1, 20) - repo.AddBlock(b2x, nil, 1) + repo.AddBlock(b2x, nil, 1, false) b3 := newBlock(b2, 30) - repo.AddBlock(b3, nil, 0) + repo.AddBlock(b3, nil, 0, false) b4 := newBlock(b3, 40) - repo.AddBlock(b4, nil, 0) - - repo.SetBestBlockID(b4.Header().ID()) + repo.AddBlock(b4, nil, 0, true) br := repo.NewBlockReader(b2x.Header().ID()) - var blks []*chain.ExtendedBlock + var blks []*ExtendedBlock for { r, err := br.Read() @@ -89,7 +84,7 @@ func TestBlockReaderFork(t *testing.T) { blks = append(blks, r...) } - assert.Equal(t, []*chain.ExtendedBlock{ + assert.Equal(t, []*ExtendedBlock{ {block.Compose(b2x.Header(), b2x.Transactions()), true}, {block.Compose(b2.Header(), b2.Transactions()), false}, {block.Compose(b3.Header(), b3.Transactions()), false}, diff --git a/chain/chain.go b/chain/chain.go index 0ee205402..c7e30d9e9 100644 --- a/chain/chain.go +++ b/chain/chain.go @@ -7,6 +7,7 @@ package chain import ( "encoding/binary" + "fmt" "math" "sort" @@ -34,8 +35,8 @@ type storageTxMeta struct { // TxMeta contains tx location and reversal state. type TxMeta struct { - // The block id this tx is involved. - BlockID thor.Bytes32 + // The number of block this tx is involved. + BlockNum, BlockConflicts uint32 // Index the position of the tx in block's txs. Index uint64 // rlp require uint64. @@ -64,9 +65,9 @@ func newChain(repo *Repository, headID thor.Bytes32) *Chain { func() (*muxdb.Trie, error) { if indexTrie == nil && initErr == nil { if summary, err := repo.GetBlockSummary(headID); err == nil { - indexTrie = repo.db.NewNonCryptoTrie(IndexTrieName, trie.NonCryptoNodeHash, summary.Header.Number(), summary.Conflicts) + indexTrie = repo.db.NewTrie(IndexTrieName, summary.IndexRoot()) } else { - initErr = errors.Wrap(err, "lazy init chain") + initErr = errors.Wrap(err, fmt.Sprintf("lazy init chain, head=%v", headID)) } } return indexTrie, initErr @@ -106,35 +107,31 @@ func (c *Chain) GetBlockID(num uint32) (thor.Bytes32, error) { // GetTransactionMeta returns tx meta by given tx id. func (c *Chain) GetTransactionMeta(id thor.Bytes32) (*TxMeta, error) { - // precheck. point access is faster than range access. - if has, err := c.repo.txIndexer.Has(id[:]); err != nil { - return nil, err - } else if !has { - return nil, errNotFound - } - iter := c.repo.txIndexer.Iterate(kv.Range(*util.BytesPrefix(id[:]))) defer iter.Release() for iter.Next() { - if len(iter.Key()) != 64 { // skip the pure txid key + ver := iter.Key()[32:] + blockNum, n := binary.Uvarint(ver) + conflicts, _ := binary.Uvarint(ver[n:]) + + if blockNum > uint64(block.Number(c.headID)) { continue } - blockID := thor.BytesToBytes32(iter.Key()[32:]) - - has, err := c.HasBlock(blockID) + s, err := c.GetBlockSummary(uint32(blockNum)) if err != nil { - return nil, err + return nil, errors.Wrap(err, "block missing") } - if has { + if s.Conflicts == uint32(conflicts) { var sMeta storageTxMeta if err := rlp.DecodeBytes(iter.Value(), &sMeta); err != nil { return nil, err } return &TxMeta{ - BlockID: blockID, - Index: sMeta.Index, - Reverted: sMeta.Reverted, + BlockNum: uint32(blockNum), + BlockConflicts: uint32(conflicts), + Index: sMeta.Index, + Reverted: sMeta.Reverted, }, nil } } @@ -152,30 +149,55 @@ func (c *Chain) HasTransaction(txid thor.Bytes32, txBlockRef uint32) (bool, erro if txBlockRef > headNum { return false, nil } - // tx block ref too old, fallback to retrieve tx meta. - if headNum-txBlockRef > 100 { - if _, err := c.GetTransactionMeta(txid); err != nil { - if c.IsNotFound(err) { - return false, nil + + // the tx is in recent blocks, if there is. + if headNum-txBlockRef < 100 { + // iterate block summaries from head block to ref block, + // to match tx id. + for nextID := c.headID; block.Number(nextID) >= txBlockRef && block.Number(nextID) != math.MaxUint32; { + s, err := c.repo.GetBlockSummary(nextID) + if err != nil { + return false, err } - return false, err + for _, _txid := range s.Txs { + if _txid == txid { + return true, nil + } + } + nextID = s.Header.ParentID() } - return true, nil + return false, nil + } + + // tx block ref too old, fallback to check tx meta. + if has, err := c.repo.txIndexer.Has(txid[:txFilterKeyLen]); err != nil { + return false, err + } else if !has { + return false, nil } - // iterate block summaries from head block to ref block, - // to match tx id. - for nextID := c.headID; block.Number(nextID) >= txBlockRef && block.Number(nextID) != math.MaxUint32; { - s, err := c.repo.GetBlockSummary(nextID) + iter := c.repo.txIndexer.Iterate(kv.Range(*util.BytesPrefix(txid[:]))) + defer iter.Release() + for iter.Next() { + ver := iter.Key()[32:] + blockNum, n := binary.Uvarint(ver) + conflicts, _ := binary.Uvarint(ver[n:]) + + if blockNum > uint64(block.Number(c.headID)) { + continue + } + + s, err := c.GetBlockSummary(uint32(blockNum)) if err != nil { - return false, err + return false, errors.Wrap(err, "block missing") } - for _, _txid := range s.Txs { - if _txid == txid { - return true, nil - } + + if s.Conflicts == uint32(conflicts) { + return true, nil } - nextID = s.Header.ParentID() + } + if err := iter.Error(); err != nil { + return false, err } return false, nil } @@ -190,7 +212,7 @@ func (c *Chain) GetBlockHeader(num uint32) (*block.Header, error) { } // GetBlockSummary returns block summary by given block number. -func (c Chain) GetBlockSummary(num uint32) (*BlockSummary, error) { +func (c *Chain) GetBlockSummary(num uint32) (*BlockSummary, error) { id, err := c.GetBlockID(num) if err != nil { return nil, err @@ -214,8 +236,7 @@ func (c *Chain) GetTransaction(id thor.Bytes32) (*tx.Transaction, *TxMeta, error return nil, nil, err } - key := makeTxKey(txMeta.BlockID, txInfix) - key.SetIndex(txMeta.Index) + key := appendTxKey(nil, txMeta.BlockNum, txMeta.BlockConflicts, txMeta.Index, txFlag) tx, err := c.repo.getTransaction(key) if err != nil { return nil, nil, err @@ -230,8 +251,7 @@ func (c *Chain) GetTransactionReceipt(txID thor.Bytes32) (*tx.Receipt, error) { return nil, err } - key := makeTxKey(txMeta.BlockID, receiptInfix) - key.SetIndex(txMeta.Index) + key := appendTxKey(nil, txMeta.BlockNum, txMeta.BlockConflicts, txMeta.Index, receiptFlag) receipt, err := c.repo.getReceipt(key) if err != nil { return nil, err @@ -352,22 +372,15 @@ func (r *Repository) NewChain(headID thor.Bytes32) *Chain { return newChain(r, headID) } -func (r *Repository) indexBlock(parentConflicts uint32, newBlockID thor.Bytes32, newConflicts uint32) error { - var ( - newNum = block.Number(newBlockID) - root thor.Bytes32 - ) - - if newNum != 0 { // not a genesis block - root = trie.NonCryptoNodeHash - } - - trie := r.db.NewNonCryptoTrie(IndexTrieName, root, newNum-1, parentConflicts) +func (r *Repository) indexBlock(parentRoot trie.Root, newBlockID thor.Bytes32, newConflicts uint32) error { + t := r.db.NewTrie(IndexTrieName, parentRoot) // map block number to block ID - if err := trie.Update(newBlockID[:4], newBlockID[:], nil); err != nil { + if err := t.Update(newBlockID[:4], newBlockID[:], nil); err != nil { return err } - - _, commit := trie.Stage(newNum, newConflicts) - return commit() + return t.Commit( + trie.Version{ + Major: block.Number(newBlockID), + Minor: newConflicts}, + true) } diff --git a/chain/chain_test.go b/chain/chain_test.go index d61b38c52..1b6a3f970 100644 --- a/chain/chain_test.go +++ b/chain/chain_test.go @@ -3,15 +3,16 @@ // Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying // file LICENSE or -package chain_test +package chain import ( "testing" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" "github.com/vechain/thor/v2/block" - "github.com/vechain/thor/v2/chain" + "github.com/vechain/thor/v2/test/datagen" "github.com/vechain/thor/v2/thor" "github.com/vechain/thor/v2/tx" ) @@ -29,18 +30,18 @@ func TestChain(t *testing.T) { _, repo := newTestRepo() b1 := newBlock(repo.GenesisBlock(), 10, tx1) - tx1Meta := &chain.TxMeta{BlockID: b1.Header().ID(), Index: 0, Reverted: false} + tx1Meta := &TxMeta{BlockNum: 1, Index: 0, Reverted: false} tx1Receipt := &tx.Receipt{} - repo.AddBlock(b1, tx.Receipts{tx1Receipt}, 0) + repo.AddBlock(b1, tx.Receipts{tx1Receipt}, 0, false) b2 := newBlock(b1, 20) - repo.AddBlock(b2, nil, 0) + repo.AddBlock(b2, nil, 0, false) b3 := newBlock(b2, 30) - repo.AddBlock(b3, nil, 0) + repo.AddBlock(b3, nil, 0, false) b3x := newBlock(b2, 30) - repo.AddBlock(b3x, nil, 1) + repo.AddBlock(b3x, nil, 1, false) c := repo.NewChain(b3.Header().ID()) @@ -48,13 +49,26 @@ func TestChain(t *testing.T) { assert.Equal(t, M(b3.Header().ID(), nil), M(c.GetBlockID(3))) assert.Equal(t, M(b3.Header(), nil), M(c.GetBlockHeader(3))) assert.Equal(t, M(block.Compose(b3.Header(), b3.Transactions()), nil), M(c.GetBlock(3))) + assert.Equal(t, repo.NewBestChain().GenesisID(), repo.GenesisBlock().Header().ID()) _, err := c.GetBlockID(4) assert.True(t, c.IsNotFound(err)) assert.Equal(t, M(tx1Meta, nil), M(c.GetTransactionMeta(tx1.ID()))) - assert.Equal(t, M(tx1, tx1Meta, nil), M(c.GetTransaction(tx1.ID()))) - assert.Equal(t, M(tx1Receipt, nil), M(c.GetTransactionReceipt(tx1.ID()))) + { + tx, meta, err := c.GetTransaction(tx1.ID()) + assert.Nil(t, err) + assert.Equal(t, tx1Meta, meta) + assert.Equal(t, tx1.ID(), tx.ID()) + } + { + r, err := c.GetTransactionReceipt(tx1.ID()) + assert.Nil(t, err) + got, _ := rlp.EncodeToBytes(r) + want, _ := rlp.EncodeToBytes(tx1Receipt) + assert.Equal(t, want, got) + } + _, err = c.GetTransactionMeta(thor.Bytes32{}) assert.True(t, c.IsNotFound(err)) @@ -84,3 +98,27 @@ func TestChain(t *testing.T) { _, err = dangleChain.Exclude(c1) assert.Error(t, err) } + +func TestHasTransaction(t *testing.T) { + _, repo := newTestRepo() + + parent := repo.GenesisBlock() + for i := 1; i <= 101; i++ { + b := newBlock(parent, uint64(i)*10) + asBest := i == 101 + repo.AddBlock(b, nil, 0, asBest) + parent = b + } + + has, err := repo.NewBestChain().HasTransaction(datagen.RandomHash(), 0) + assert.Nil(t, err) + assert.False(t, has) + + tx1 := newTx() + bx := newBlock(parent, 10020, tx1) + repo.AddBlock(bx, tx.Receipts{&tx.Receipt{}}, 0, true) + + has, err = repo.NewBestChain().HasTransaction(tx1.ID(), 0) + assert.Nil(t, err) + assert.True(t, has) +} diff --git a/chain/metric.go b/chain/metrics.go similarity index 100% rename from chain/metric.go rename to chain/metrics.go diff --git a/chain/persist.go b/chain/persist.go index fa1f97a9d..0a73b98ac 100644 --- a/chain/persist.go +++ b/chain/persist.go @@ -12,13 +12,16 @@ import ( "github.com/vechain/thor/v2/block" "github.com/vechain/thor/v2/kv" "github.com/vechain/thor/v2/thor" - "github.com/vechain/thor/v2/tx" + "github.com/vechain/thor/v2/trie" ) -const ( - txInfix = byte(0) - receiptInfix = byte(1) -) +// appendTxKey composes the key to access tx or receipt. +func appendTxKey(buf []byte, blockNum, blockConflicts uint32, index uint64, flag byte) []byte { + buf = binary.BigEndian.AppendUint32(buf, blockNum) + buf = binary.AppendUvarint(buf, uint64(blockConflicts)) + buf = append(buf, flag) + return binary.AppendUvarint(buf, index) +} // BlockSummary presents block summary. type BlockSummary struct { @@ -26,21 +29,29 @@ type BlockSummary struct { Txs []thor.Bytes32 Size uint64 Conflicts uint32 - SteadyNum uint32 } -// the key for tx/receipt. -// it consists of: ( block id | infix | index ) -type txKey [32 + 1 + 8]byte - -func makeTxKey(blockID thor.Bytes32, infix byte) (k txKey) { - copy(k[:], blockID[:]) - k[32] = infix - return +// Root returns state root for accessing state trie. +func (s *BlockSummary) Root() trie.Root { + return trie.Root{ + Hash: s.Header.StateRoot(), + Ver: trie.Version{ + Major: s.Header.Number(), + Minor: s.Conflicts, + }, + } } -func (k *txKey) SetIndex(i uint64) { - binary.BigEndian.PutUint64(k[33:], i) +// IndexRoot returns index root for accessing index trie. +func (s *BlockSummary) IndexRoot() trie.Root { + return trie.Root{ + // index trie skips hash, so here just provide a non-zero hash + Hash: thor.BytesToBytes32([]byte{1}), + Ver: trie.Version{ + Major: s.Header.Number(), + Minor: s.Conflicts, + }, + } } func saveRLP(w kv.Putter, key []byte, val interface{}) error { @@ -63,6 +74,9 @@ func saveBlockSummary(w kv.Putter, summary *BlockSummary) error { return saveRLP(w, summary.Header.ID().Bytes(), summary) } +// indexChainHead puts a header into store, it will put the block id and delete the parent id. +// So there is only one block id stored for every branch(fork). Thus will result we can scan all +// possible fork's head by iterating the index store. func indexChainHead(w kv.Putter, header *block.Header) error { if err := w.Delete(header.ParentID().Bytes()); err != nil { return err @@ -78,27 +92,3 @@ func loadBlockSummary(r kv.Getter, id thor.Bytes32) (*BlockSummary, error) { } return &summary, nil } - -func saveTransaction(w kv.Putter, key txKey, tx *tx.Transaction) error { - return saveRLP(w, key[:], tx) -} - -func loadTransaction(r kv.Getter, key txKey) (*tx.Transaction, error) { - var tx tx.Transaction - if err := loadRLP(r, key[:], &tx); err != nil { - return nil, err - } - return &tx, nil -} - -func saveReceipt(w kv.Putter, key txKey, receipt *tx.Receipt) error { - return saveRLP(w, key[:], receipt) -} - -func loadReceipt(r kv.Getter, key txKey) (*tx.Receipt, error) { - var receipt tx.Receipt - if err := loadRLP(r, key[:], &receipt); err != nil { - return nil, err - } - return &receipt, nil -} diff --git a/chain/repository.go b/chain/repository.go index 44b8b7e6b..58ce6ae17 100644 --- a/chain/repository.go +++ b/chain/repository.go @@ -17,20 +17,25 @@ import ( "github.com/vechain/thor/v2/kv" "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" "github.com/vechain/thor/v2/tx" ) const ( - dataStoreName = "chain.data" - propStoreName = "chain.props" - headStoreName = "chain.heads" - txIndexStoreName = "chain.txi" + hdrStoreName = "chain.hdr" // for block headers + bodyStoreName = "chain.body" // for block bodies and receipts + propStoreName = "chain.props" // for property-named blocks such as best block + headStoreName = "chain.heads" // for chain heads ( including uncles ) + txIndexStoreName = "chain.txi" // for tx metadata + + txFlag = byte(0) // flag byte of the key for saving tx blob + receiptFlag = byte(1) // flag byte fo the key for saving receipt blob + txFilterKeyLen = 8 ) var ( - errNotFound = errors.New("not found") - bestBlockIDKey = []byte("best-block-id") - steadyBlockIDKey = []byte("steady-block-id") + errNotFound = errors.New("not found") + bestBlockIDKey = []byte("best-block-id") ) // Repository stores block headers, txs and receipts. @@ -38,15 +43,16 @@ var ( // It's thread-safe. type Repository struct { db *muxdb.MuxDB - data kv.Store - head kv.Store - props kv.Store + hdrStore kv.Store + bodyStore kv.Store + propStore kv.Store + headStore kv.Store txIndexer kv.Store - genesis *block.Block + genesis *block.Block + tag byte + bestSummary atomic.Value - steadyID atomic.Value - tag byte tick co.Signal caches struct { @@ -74,9 +80,10 @@ func NewRepository(db *muxdb.MuxDB, genesis *block.Block) (*Repository, error) { genesisID := genesis.Header().ID() repo := &Repository{ db: db, - data: db.NewStore(dataStoreName), - head: db.NewStore(headStoreName), - props: db.NewStore(propStoreName), + hdrStore: db.NewStore(hdrStoreName), + bodyStore: db.NewStore(bodyStoreName), + propStore: db.NewStore(propStoreName), + headStore: db.NewStore(headStoreName), txIndexer: db.NewStore(txIndexStoreName), genesis: genesis, tag: genesisID[31], @@ -86,17 +93,15 @@ func NewRepository(db *muxdb.MuxDB, genesis *block.Block) (*Repository, error) { repo.caches.txs = newCache(2048) repo.caches.receipts = newCache(2048) - if val, err := repo.props.Get(bestBlockIDKey); err != nil { - if !repo.props.IsNotFound(err) { + if val, err := repo.propStore.Get(bestBlockIDKey); err != nil { + if !repo.propStore.IsNotFound(err) { return nil, err } - if err := repo.indexBlock(0, genesis.Header().ID(), 0); err != nil { + if err := repo.indexBlock(trie.Root{}, genesis.Header().ID(), 0); err != nil { return nil, err } - if summary, err := repo.saveBlock(genesis, nil, 0, 0); err != nil { - return nil, err - } else if err := repo.setBestBlockSummary(summary); err != nil { + if _, err := repo.saveBlock(genesis, nil, 0, true); err != nil { return nil, err } } else { @@ -116,14 +121,6 @@ func NewRepository(db *muxdb.MuxDB, genesis *block.Block) (*Repository, error) { repo.bestSummary.Store(summary) } - if val, err := repo.props.Get(steadyBlockIDKey); err != nil { - if !repo.props.IsNotFound(err) { - return nil, err - } - repo.steadyID.Store(genesis.Header().ID()) - } else { - repo.steadyID.Store(thor.BytesToBytes32(val)) - } return repo, nil } @@ -142,115 +139,89 @@ func (r *Repository) BestBlockSummary() *BlockSummary { return r.bestSummary.Load().(*BlockSummary) } -// SetBestBlockID set the given block id as best block id. -func (r *Repository) SetBestBlockID(id thor.Bytes32) (err error) { - defer func() { - if err == nil { - r.tick.Broadcast() - } - }() - summary, err := r.GetBlockSummary(id) - if err != nil { - return err - } - return r.setBestBlockSummary(summary) -} - -func (r *Repository) setBestBlockSummary(summary *BlockSummary) error { - if err := r.props.Put(bestBlockIDKey, summary.Header.ID().Bytes()); err != nil { - return err - } - r.bestSummary.Store(summary) - return nil -} - -// SteadyBlockID return the head block id of the steady chain. -func (r *Repository) SteadyBlockID() thor.Bytes32 { - return r.steadyID.Load().(thor.Bytes32) -} - -// SetSteadyBlockID set the given block id as the head block id of the steady chain. -func (r *Repository) SetSteadyBlockID(id thor.Bytes32) error { - prev := r.steadyID.Load().(thor.Bytes32) - - if has, err := r.NewChain(id).HasBlock(prev); err != nil { - return err - } else if !has { - // the previous steady id is not on the chain of the new id. - return errors.New("invalid new steady block id") - } - if err := r.props.Put(steadyBlockIDKey, id[:]); err != nil { - return err - } - r.steadyID.Store(id) - return nil -} - -func (r *Repository) saveBlock(block *block.Block, receipts tx.Receipts, conflicts, steadyNum uint32) (*BlockSummary, error) { +func (r *Repository) saveBlock(block *block.Block, receipts tx.Receipts, conflicts uint32, asBest bool) (*BlockSummary, error) { var ( - header = block.Header() - id = header.ID() - txs = block.Transactions() - summary = BlockSummary{header, []thor.Bytes32{}, uint64(block.Size()), conflicts, steadyNum} - bulk = r.db.NewStore("").Bulk() - indexPutter = kv.Bucket(txIndexStoreName).NewPutter(bulk) - dataPutter = kv.Bucket(dataStoreName).NewPutter(bulk) - headPutter = kv.Bucket(headStoreName).NewPutter(bulk) + header = block.Header() + id = header.ID() + num = header.Number() + txs = block.Transactions() + txIDs = []thor.Bytes32{} + bulk = r.db.NewStore("").Bulk() + hdrPutter = kv.Bucket(hdrStoreName).NewPutter(bulk) + bodyPutter = kv.Bucket(bodyStoreName).NewPutter(bulk) + propPutter = kv.Bucket(propStoreName).NewPutter(bulk) + headPutter = kv.Bucket(headStoreName).NewPutter(bulk) + txIndexPutter = kv.Bucket(txIndexStoreName).NewPutter(bulk) + keyBuf []byte ) if len(txs) > 0 { - // index txs - buf := make([]byte, 64) - copy(buf[32:], id[:]) + // index and save txs for i, tx := range txs { txid := tx.ID() - summary.Txs = append(summary.Txs, txid) + txIDs = append(txIDs, txid) - // to accelerate point access - if err := indexPutter.Put(txid[:], nil); err != nil { + // write the filter key + if err := txIndexPutter.Put(txid[:txFilterKeyLen], nil); err != nil { return nil, err } + // write tx metadata + keyBuf = append(keyBuf[:0], txid[:]...) + keyBuf = binary.AppendUvarint(keyBuf, uint64(header.Number())) + keyBuf = binary.AppendUvarint(keyBuf, uint64(conflicts)) - copy(buf, txid[:]) - if err := saveRLP(indexPutter, buf, &storageTxMeta{ + if err := saveRLP(txIndexPutter, keyBuf, &storageTxMeta{ Index: uint64(i), Reverted: receipts[i].Reverted, }); err != nil { return nil, err } - } - // save tx & receipt data - key := makeTxKey(id, txInfix) - for i, tx := range txs { - key.SetIndex(uint64(i)) - if err := saveTransaction(dataPutter, key, tx); err != nil { + // write the tx blob + keyBuf = appendTxKey(keyBuf[:0], num, conflicts, uint64(i), txFlag) + if err := saveRLP(bodyPutter, keyBuf[:], tx); err != nil { return nil, err } - r.caches.txs.Add(key, tx) + r.caches.txs.Add(string(keyBuf), tx) } - key = makeTxKey(id, receiptInfix) + + // save receipts for i, receipt := range receipts { - key.SetIndex(uint64(i)) - if err := saveReceipt(dataPutter, key, receipt); err != nil { + keyBuf = appendTxKey(keyBuf[:0], num, conflicts, uint64(i), receiptFlag) + if err := saveRLP(bodyPutter, keyBuf, receipt); err != nil { return nil, err } - r.caches.receipts.Add(key, receipt) + r.caches.receipts.Add(string(keyBuf), receipt) } } if err := indexChainHead(headPutter, header); err != nil { return nil, err } - if err := saveBlockSummary(dataPutter, &summary); err != nil { + summary := BlockSummary{header, txIDs, uint64(block.Size()), conflicts} + if err := saveBlockSummary(hdrPutter, &summary); err != nil { + return nil, err + } + + if asBest { + if err := propPutter.Put(bestBlockIDKey, id[:]); err != nil { + return nil, err + } + } + + if err := bulk.Write(); err != nil { return nil, err } r.caches.summaries.Add(id, &summary) - return &summary, bulk.Write() + if asBest { + r.bestSummary.Store(&summary) + r.tick.Broadcast() + } + return &summary, nil } // AddBlock add a new block with its receipts into repository. -func (r *Repository) AddBlock(newBlock *block.Block, receipts tx.Receipts, conflicts uint32) error { +func (r *Repository) AddBlock(newBlock *block.Block, receipts tx.Receipts, conflicts uint32, asBest bool) error { parentSummary, err := r.GetBlockSummary(newBlock.Header().ParentID()) if err != nil { if r.IsNotFound(err) { @@ -258,21 +229,11 @@ func (r *Repository) AddBlock(newBlock *block.Block, receipts tx.Receipts, confl } return err } - if err := r.indexBlock(parentSummary.Conflicts, newBlock.Header().ID(), conflicts); err != nil { + if err := r.indexBlock(parentSummary.IndexRoot(), newBlock.Header().ID(), conflicts); err != nil { return err } - steadyNum := parentSummary.SteadyNum // initially inherits parent's steady num. - newSteadyID := r.steadyID.Load().(thor.Bytes32) - if newSteadyNum := block.Number(newSteadyID); steadyNum != newSteadyNum { - if has, err := r.NewChain(parentSummary.Header.ID()).HasBlock(newSteadyID); err != nil { - return err - } else if has { - // the chain of the new block contains the new steady id, - steadyNum = newSteadyNum - } - } - if _, err := r.saveBlock(newBlock, receipts, conflicts, steadyNum); err != nil { + if _, err := r.saveBlock(newBlock, receipts, conflicts, asBest); err != nil { return err } return nil @@ -280,27 +241,28 @@ func (r *Repository) AddBlock(newBlock *block.Block, receipts tx.Receipts, confl // ScanConflicts returns the count of saved blocks with the given blockNum. func (r *Repository) ScanConflicts(blockNum uint32) (uint32, error) { - var prefix [4]byte - binary.BigEndian.PutUint32(prefix[:], blockNum) + prefix := binary.BigEndian.AppendUint32(nil, blockNum) - iter := r.data.Iterate(kv.Range(*util.BytesPrefix(prefix[:]))) + iter := r.hdrStore.Iterate(kv.Range(*util.BytesPrefix(prefix))) defer iter.Release() count := uint32(0) for iter.Next() { - if len(iter.Key()) == 32 { - count++ - } + count++ } return count, iter.Error() } // ScanHeads returns all head blockIDs from the given blockNum(included) in descending order. +// It will return all fork's head block id stored in to local database after the given block number. +// The following example will return B' and C. +// A -> B -> C +// +// \ -> B' func (r *Repository) ScanHeads(from uint32) ([]thor.Bytes32, error) { - var start [4]byte - binary.BigEndian.PutUint32(start[:], from) + start := binary.BigEndian.AppendUint32(nil, from) - iter := r.head.Iterate(kv.Range{Start: start[:]}) + iter := r.headStore.Iterate(kv.Range{Start: start}) defer iter.Release() heads := make([]thor.Bytes32, 0, 16) @@ -318,7 +280,7 @@ func (r *Repository) ScanHeads(from uint32) ([]thor.Bytes32, error) { // GetMaxBlockNum returns the max committed block number. func (r *Repository) GetMaxBlockNum() (uint32, error) { - iter := r.data.Iterate(kv.Range{}) + iter := r.hdrStore.Iterate(kv.Range{}) defer iter.Release() if iter.Last() { @@ -330,7 +292,7 @@ func (r *Repository) GetMaxBlockNum() (uint32, error) { // GetBlockSummary get block summary by block id. func (r *Repository) GetBlockSummary(id thor.Bytes32) (*BlockSummary, error) { blk, cached, err := r.caches.summaries.GetOrLoad(id, func() (interface{}, error) { - return loadBlockSummary(r.data, id) + return loadBlockSummary(r.hdrStore, id) }) if err != nil { return nil, err @@ -339,19 +301,18 @@ func (r *Repository) GetBlockSummary(id thor.Bytes32) (*BlockSummary, error) { if cached { if r.caches.stats.summaries.Hit()%2000 == 0 { _, hit, miss := r.caches.stats.summaries.Stats() - metricCacheHitMiss().SetWithLabel(hit, map[string]string{"type": "blocks", "event": "hit"}) + metricCacheHitMiss().SetWithLabel(hit, map[string]string{"type": "block-summary", "event": "hit"}) metricCacheHitMiss().SetWithLabel(miss, map[string]string{"type": "blocks", "event": "miss"}) } } else { r.caches.stats.summaries.Miss() } - return blk.(*BlockSummary), nil } -func (r *Repository) getTransaction(key txKey) (*tx.Transaction, error) { - trx, cached, err := r.caches.txs.GetOrLoad(key, func() (interface{}, error) { - return loadTransaction(r.data, key) +func (r *Repository) getTransaction(key []byte) (*tx.Transaction, error) { + trx, cached, err := r.caches.txs.GetOrLoad(string(key), func() (interface{}, error) { + return loadTransaction(r.bodyStore, key) }) if err != nil { return nil, err @@ -369,6 +330,14 @@ func (r *Repository) getTransaction(key txKey) (*tx.Transaction, error) { return trx.(*tx.Transaction), nil } +func loadTransaction(r kv.Getter, key []byte) (*tx.Transaction, error) { + var tx tx.Transaction + if err := loadRLP(r, key[:], &tx); err != nil { + return nil, err + } + return &tx, nil +} + // GetBlockTransactions get all transactions of the block for given block id. func (r *Repository) GetBlockTransactions(id thor.Bytes32) (tx.Transactions, error) { summary, err := r.GetBlockSummary(id) @@ -378,9 +347,9 @@ func (r *Repository) GetBlockTransactions(id thor.Bytes32) (tx.Transactions, err if n := len(summary.Txs); n > 0 { txs := make(tx.Transactions, n) - key := makeTxKey(id, txInfix) + var key []byte for i := range summary.Txs { - key.SetIndex(uint64(i)) + key := appendTxKey(key[:0], summary.Header.Number(), summary.Conflicts, uint64(i), txFlag) txs[i], err = r.getTransaction(key) if err != nil { return nil, err @@ -404,9 +373,9 @@ func (r *Repository) GetBlock(id thor.Bytes32) (*block.Block, error) { return block.Compose(summary.Header, txs), nil } -func (r *Repository) getReceipt(key txKey) (*tx.Receipt, error) { - receipt, cached, err := r.caches.receipts.GetOrLoad(key, func() (interface{}, error) { - return loadReceipt(r.data, key) +func (r *Repository) getReceipt(key []byte) (*tx.Receipt, error) { + receipt, cached, err := r.caches.receipts.GetOrLoad(string(key), func() (interface{}, error) { + return loadReceipt(r.bodyStore, key) }) if err != nil { return nil, err @@ -420,10 +389,17 @@ func (r *Repository) getReceipt(key txKey) (*tx.Receipt, error) { } else { r.caches.stats.receipts.Miss() } - return receipt.(*tx.Receipt), nil } +func loadReceipt(r kv.Getter, key []byte) (*tx.Receipt, error) { + var receipt tx.Receipt + if err := loadRLP(r, key[:], &receipt); err != nil { + return nil, err + } + return &receipt, nil +} + // GetBlockReceipts get all tx receipts of the block for given block id. func (r *Repository) GetBlockReceipts(id thor.Bytes32) (tx.Receipts, error) { summary, err := r.GetBlockSummary(id) @@ -433,9 +409,9 @@ func (r *Repository) GetBlockReceipts(id thor.Bytes32) (tx.Receipts, error) { if n := len(summary.Txs); n > 0 { receipts := make(tx.Receipts, n) - key := makeTxKey(id, receiptInfix) + var key []byte for i := range summary.Txs { - key.SetIndex(uint64(i)) + key := appendTxKey(key[:0], summary.Header.Number(), summary.Conflicts, uint64(i), receiptFlag) receipts[i], err = r.getReceipt(key) if err != nil { return nil, err diff --git a/chain/repository_test.go b/chain/repository_test.go index 1391acb8d..81bef17a5 100644 --- a/chain/repository_test.go +++ b/chain/repository_test.go @@ -3,7 +3,7 @@ // Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying // file LICENSE or -package chain_test +package chain import ( "testing" @@ -11,10 +11,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/assert" "github.com/vechain/thor/v2/block" - "github.com/vechain/thor/v2/chain" - "github.com/vechain/thor/v2/genesis" "github.com/vechain/thor/v2/muxdb" - "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" "github.com/vechain/thor/v2/tx" ) @@ -23,26 +20,19 @@ func M(args ...interface{}) []interface{} { return args } -func newTestRepo() (*muxdb.MuxDB, *chain.Repository) { +func newTestRepo() (*muxdb.MuxDB, *Repository) { db := muxdb.NewMem() - g := genesis.NewDevnet() - b0, _, _, _ := g.Build(state.NewStater(db)) + b0 := new(block.Builder). + ParentID(thor.Bytes32{0xff, 0xff, 0xff, 0xff}). + Build() - repo, err := chain.NewRepository(db, b0) + repo, err := NewRepository(db, b0) if err != nil { panic(err) } return db, repo } -func reopenRepo(db *muxdb.MuxDB, b0 *block.Block) *chain.Repository { - repo, err := chain.NewRepository(db, b0) - if err != nil { - panic(err) - } - return repo -} - func newBlock(parent *block.Block, ts uint64, txs ...*tx.Transaction) *block.Block { builder := new(block.Builder). ParentID(parent.Header().ID()). @@ -58,12 +48,11 @@ func newBlock(parent *block.Block, ts uint64, txs ...*tx.Transaction) *block.Blo return b.WithSignature(sig) } -func TestRepository(t *testing.T) { - db := muxdb.NewMem() - g := genesis.NewDevnet() - b0, _, _, _ := g.Build(state.NewStater(db)) +func TestRepositoryFunc(t *testing.T) { + db, repo1 := newTestRepo() + b0 := repo1.GenesisBlock() - repo1, err := chain.NewRepository(db, b0) + repo1, err := NewRepository(db, b0) if err != nil { panic(err) } @@ -75,14 +64,15 @@ func TestRepository(t *testing.T) { receipt1 := &tx.Receipt{} b1 := newBlock(repo1.GenesisBlock(), 10, tx1) - assert.Nil(t, repo1.AddBlock(b1, tx.Receipts{receipt1}, 0)) - + assert.Nil(t, repo1.AddBlock(b1, tx.Receipts{receipt1}, 0, false)) // best block not set, so still 0 assert.Equal(t, uint32(0), repo1.BestBlockSummary().Header.Number()) - repo1.SetBestBlockID(b1.Header().ID()) - repo2, _ := chain.NewRepository(db, b0) - for _, repo := range []*chain.Repository{repo1, repo2} { + assert.Nil(t, repo1.AddBlock(b1, tx.Receipts{receipt1}, 0, true)) + assert.Equal(t, uint32(1), repo1.BestBlockSummary().Header.Number()) + + repo2, _ := NewRepository(db, b0) + for _, repo := range []*Repository{repo1, repo2} { assert.Equal(t, b1.Header().ID(), repo.BestBlockSummary().Header.ID()) s, err := repo.GetBlockSummary(b1.Header().ID()) assert.Nil(t, err) @@ -99,54 +89,32 @@ func TestRepository(t *testing.T) { } } +func TestAddBlock(t *testing.T) { + _, repo := newTestRepo() + + err := repo.AddBlock(new(block.Builder).Build(), nil, 0, false) + assert.Error(t, err, "parent missing") + + b1 := newBlock(repo.GenesisBlock(), 10) + assert.Nil(t, repo.AddBlock(b1, nil, 0, false)) +} + func TestConflicts(t *testing.T) { _, repo := newTestRepo() b0 := repo.GenesisBlock() b1 := newBlock(b0, 10) - repo.AddBlock(b1, nil, 0) + repo.AddBlock(b1, nil, 0, false) assert.Equal(t, []interface{}{uint32(1), nil}, M(repo.GetMaxBlockNum())) assert.Equal(t, []interface{}{uint32(1), nil}, M(repo.ScanConflicts(1))) b1x := newBlock(b0, 20) - repo.AddBlock(b1x, nil, 1) + repo.AddBlock(b1x, nil, 1, false) assert.Equal(t, []interface{}{uint32(1), nil}, M(repo.GetMaxBlockNum())) assert.Equal(t, []interface{}{uint32(2), nil}, M(repo.ScanConflicts(1))) } -func TestSteadyBlockID(t *testing.T) { - db, repo := newTestRepo() - b0 := repo.GenesisBlock() - - assert.Equal(t, b0.Header().ID(), repo.SteadyBlockID()) - - b1 := newBlock(b0, 10) - repo.AddBlock(b1, nil, 0) - - assert.Nil(t, repo.SetSteadyBlockID(b1.Header().ID())) - assert.Equal(t, b1.Header().ID(), repo.SteadyBlockID()) - - b2 := newBlock(b1, 10) - repo.AddBlock(b2, nil, 0) - - assert.Nil(t, repo.SetSteadyBlockID(b2.Header().ID())) - assert.Equal(t, b2.Header().ID(), repo.SteadyBlockID()) - - b2x := newBlock(b1, 10) - repo.AddBlock(b2x, nil, 1) - assert.Error(t, repo.SetSteadyBlockID(b2x.Header().ID())) - assert.Equal(t, b2.Header().ID(), repo.SteadyBlockID()) - - b3 := newBlock(b2, 10) - repo.AddBlock(b3, nil, 0) - assert.Nil(t, repo.SetSteadyBlockID(b3.Header().ID())) - assert.Equal(t, b3.Header().ID(), repo.SteadyBlockID()) - - repo = reopenRepo(db, b0) - assert.Equal(t, b3.Header().ID(), repo.SteadyBlockID()) -} - func TestScanHeads(t *testing.T) { _, repo := newTestRepo() @@ -156,14 +124,14 @@ func TestScanHeads(t *testing.T) { assert.Equal(t, []thor.Bytes32{repo.GenesisBlock().Header().ID()}, heads) b1 := newBlock(repo.GenesisBlock(), 10) - err = repo.AddBlock(b1, nil, 0) + err = repo.AddBlock(b1, nil, 0, false) assert.Nil(t, err) heads, err = repo.ScanHeads(0) assert.Nil(t, err) assert.Equal(t, []thor.Bytes32{b1.Header().ID()}, heads) b2 := newBlock(b1, 20) - err = repo.AddBlock(b2, nil, 0) + err = repo.AddBlock(b2, nil, 0, false) assert.Nil(t, err) heads, err = repo.ScanHeads(0) assert.Nil(t, err) @@ -174,7 +142,7 @@ func TestScanHeads(t *testing.T) { assert.Equal(t, 0, len(heads)) b2x := newBlock(b1, 20) - err = repo.AddBlock(b2x, nil, 0) + err = repo.AddBlock(b2x, nil, 0, false) assert.Nil(t, err) heads, err = repo.ScanHeads(0) assert.Nil(t, err) @@ -186,7 +154,7 @@ func TestScanHeads(t *testing.T) { } b3 := newBlock(b2, 30) - err = repo.AddBlock(b3, nil, 0) + err = repo.AddBlock(b3, nil, 0, false) assert.Nil(t, err) heads, err = repo.ScanHeads(0) assert.Nil(t, err) @@ -201,7 +169,7 @@ func TestScanHeads(t *testing.T) { assert.Equal(t, []thor.Bytes32{b3.Header().ID()}, heads) b3x := newBlock(b2, 30) - err = repo.AddBlock(b3x, nil, 0) + err = repo.AddBlock(b3x, nil, 0, false) assert.Nil(t, err) heads, err = repo.ScanHeads(0) assert.Nil(t, err) diff --git a/cmd/thor/VERSION b/cmd/thor/VERSION index 399088bf4..ccbccc3dc 100644 --- a/cmd/thor/VERSION +++ b/cmd/thor/VERSION @@ -1 +1 @@ -2.1.6 +2.2.0 diff --git a/cmd/thor/main.go b/cmd/thor/main.go index 2cb638f2a..9409d7660 100644 --- a/cmd/thor/main.go +++ b/cmd/thor/main.go @@ -22,7 +22,7 @@ import ( "github.com/vechain/thor/v2/api" "github.com/vechain/thor/v2/bft" "github.com/vechain/thor/v2/cmd/thor/node" - "github.com/vechain/thor/v2/cmd/thor/optimizer" + "github.com/vechain/thor/v2/cmd/thor/pruner" "github.com/vechain/thor/v2/cmd/thor/solo" "github.com/vechain/thor/v2/genesis" "github.com/vechain/thor/v2/log" @@ -170,8 +170,9 @@ func defaultAction(ctx *cli.Context) error { logLevel := initLogger(lvl, ctx.Bool(jsonLogsFlag.Name)) // enable metrics as soon as possible + enableMetrics := ctx.Bool(enableMetricsFlag.Name) metricsURL := "" - if ctx.Bool(enableMetricsFlag.Name) { + if enableMetrics { metrics.InitializePrometheusMetrics() url, closeFunc, err := api.StartMetricsServer(ctx.String(metricsAddrFlag.Name)) if err != nil { @@ -194,6 +195,9 @@ func defaultAction(ctx *cli.Context) error { if err != nil { return err } + if enableMetrics { + mainDB.EnableMetrics() + } defer func() { log.Info("closing main database..."); mainDB.Close() }() logDB, err := openLogDB(instanceDir) @@ -282,8 +286,10 @@ func defaultAction(ctx *cli.Context) error { } defer p2pCommunicator.Stop() - optimizer := optimizer.New(mainDB, repo, !ctx.Bool(disablePrunerFlag.Name)) - defer func() { log.Info("stopping optimizer..."); optimizer.Stop() }() + if !ctx.Bool(disablePrunerFlag.Name) { + pruner := pruner.New(mainDB, repo) + defer func() { log.Info("stopping pruner..."); pruner.Stop() }() + } return node.New( master, @@ -318,8 +324,9 @@ func soloAction(ctx *cli.Context) error { } // enable metrics as soon as possible + enableMetrics := ctx.Bool(enableMetricsFlag.Name) metricsURL := "" - if ctx.Bool(enableMetricsFlag.Name) { + if enableMetrics { metrics.InitializePrometheusMetrics() url, closeFunc, err := api.StartMetricsServer(ctx.String(metricsAddrFlag.Name)) if err != nil { @@ -337,7 +344,7 @@ func soloAction(ctx *cli.Context) error { flagGenesis := ctx.String(genesisFlag.Name) if flagGenesis == "" { gene = genesis.NewDevnet() - forkConfig = thor.ForkConfig{} // Devnet forks from the start + forkConfig = thor.SoloFork } else { gene, forkConfig, err = parseGenesisFile(flagGenesis) if err != nil { @@ -356,6 +363,9 @@ func soloAction(ctx *cli.Context) error { if mainDB, err = openMainDB(ctx, instanceDir); err != nil { return err } + if enableMetrics { + mainDB.EnableMetrics() + } defer func() { log.Info("closing main database..."); mainDB.Close() }() if logDB, err = openLogDB(instanceDir); err != nil { @@ -364,7 +374,7 @@ func soloAction(ctx *cli.Context) error { defer func() { log.Info("closing log database..."); logDB.Close() }() } else { instanceDir = "Memory" - mainDB = openMemMainDB() + mainDB = openMemMainDB() // Skip metrics of in-memory DB logDB = openMemLogDB() } @@ -443,8 +453,10 @@ func soloAction(ctx *cli.Context) error { printStartupMessage2(gene, apiURL, "", metricsURL, adminURL) - optimizer := optimizer.New(mainDB, repo, !ctx.Bool(disablePrunerFlag.Name)) - defer func() { log.Info("stopping optimizer..."); optimizer.Stop() }() + if !ctx.Bool(disablePrunerFlag.Name) { + pruner := pruner.New(mainDB, repo) + defer func() { log.Info("stopping pruner..."); pruner.Stop() }() + } return solo.New(repo, state.NewStater(mainDB), diff --git a/cmd/thor/node/metrics.go b/cmd/thor/node/metrics.go index a6d56f2dc..422aee961 100644 --- a/cmd/thor/node/metrics.go +++ b/cmd/thor/node/metrics.go @@ -15,5 +15,4 @@ var ( metricBlockProcessedGas = metrics.LazyLoadGaugeVec("block_processed_gas_gauge", []string{"type"}) metricBlockProcessedDuration = metrics.LazyLoadHistogram("block_processed_duration_ms", metrics.Bucket10s) metricChainForkCount = metrics.LazyLoadCounter("chain_fork_count") - metricChainForkSize = metrics.LazyLoadGauge("chain_fork_gauge") ) diff --git a/cmd/thor/node/node.go b/cmd/thor/node/node.go index d103f227a..c8d07d995 100644 --- a/cmd/thor/node/node.go +++ b/cmd/thor/node/node.go @@ -360,8 +360,16 @@ func (n *Node) processBlock(newBlock *block.Block, stats *blockStats) (bool, err return errors.Wrap(err, "commit state") } + // sync the log-writing task + if logEnabled { + if err := n.logWorker.Sync(); err != nil { + log.Warn("failed to write logs", "err", err) + n.logDBFailed = true + } + } + // add the new block into repository - if err := n.repo.AddBlock(newBlock, receipts, conflicts); err != nil { + if err := n.repo.AddBlock(newBlock, receipts, conflicts, becomeNewBest); err != nil { return errors.Wrap(err, "add block") } @@ -374,18 +382,7 @@ func (n *Node) processBlock(newBlock *block.Block, stats *blockStats) (bool, err realElapsed := mclock.Now() - startTime - // sync the log-writing task - if logEnabled { - if err := n.logWorker.Sync(); err != nil { - logger.Warn("failed to write logs", "err", err) - n.logDBFailed = true - } - } - if becomeNewBest { - if err := n.repo.SetBestBlockID(newBlock.Header().ID()); err != nil { - return err - } n.processFork(newBlock, oldBest.Header.ID()) } @@ -490,15 +487,13 @@ func (n *Node) processFork(newBlock *block.Block, oldBestBlockID thor.Bytes32) { return } - // Set the gauge metric to the size of the fork (0 if there are no forks) - metricChainForkSize().Set(int64(len(sideIDs))) + metricChainForkCount().Add(int64(len(sideIDs))) if len(sideIDs) == 0 { return } if n := len(sideIDs); n >= 2 { - metricChainForkCount().Add(1) logger.Warn(fmt.Sprintf( `â‘‚â‘‚â‘‚â‘‚â‘‚â‘‚â‘‚â‘‚ FORK HAPPENED â‘‚â‘‚â‘‚â‘‚â‘‚â‘‚â‘‚â‘‚ side-chain: %v %v`, diff --git a/cmd/thor/node/node_benchmark_test.go b/cmd/thor/node/node_benchmark_test.go index 0f6d1f1f0..bdcf26f0b 100644 --- a/cmd/thor/node/node_benchmark_test.go +++ b/cmd/thor/node/node_benchmark_test.go @@ -343,11 +343,7 @@ func packTxsIntoBlock(thorChain *testchain.Chain, proposerAccount *genesis.DevAc return nil, err } - if err := thorChain.Repo().AddBlock(b1, receipts, 0); err != nil { - return nil, err - } - - if err := thorChain.Repo().SetBestBlockID(b1.Header().ID()); err != nil { + if err := thorChain.Repo().AddBlock(b1, receipts, 0, true); err != nil { return nil, err } @@ -473,9 +469,7 @@ func openTempMainDB(dir string) (*muxdb.MuxDB, error) { opts := muxdb.Options{ TrieNodeCacheSizeMB: cacheMB, - TrieRootCacheCapacity: 256, TrieCachedNodeTTL: 30, // 5min - TrieLeafBankSlotCapacity: 256, TrieDedupedPartitionFactor: math.MaxUint32, TrieWillCleanHistory: true, OpenFilesCacheCapacity: fdCache, @@ -491,9 +485,9 @@ func openTempMainDB(dir string) (*muxdb.MuxDB, error) { debug.SetGCPercent(int(gogc)) if opts.TrieWillCleanHistory { - opts.TrieHistPartitionFactor = 1000 + opts.TrieHistPartitionFactor = 256 } else { - opts.TrieHistPartitionFactor = 500000 + opts.TrieHistPartitionFactor = 524288 } db, err := muxdb.Open(filepath.Join(dir, "maindb"), &opts) diff --git a/cmd/thor/node/packer_loop.go b/cmd/thor/node/packer_loop.go index 675ab041b..f7d43413f 100644 --- a/cmd/thor/node/packer_loop.go +++ b/cmd/thor/node/packer_loop.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/mclock" "github.com/pkg/errors" + "github.com/vechain/thor/v2/log" "github.com/vechain/thor/v2/packer" "github.com/vechain/thor/v2/thor" "github.com/vechain/thor/v2/tx" @@ -168,8 +169,16 @@ func (n *Node) pack(flow *packer.Flow) (err error) { return errors.Wrap(err, "commit state") } + // sync the log-writing task + if logEnabled { + if err := n.logWorker.Sync(); err != nil { + log.Warn("failed to write logs", "err", err) + n.logDBFailed = true + } + } + // add the new block into repository - if err := n.repo.AddBlock(newBlock, receipts, conflicts); err != nil { + if err := n.repo.AddBlock(newBlock, receipts, conflicts, true); err != nil { return errors.Wrap(err, "add block") } @@ -181,18 +190,6 @@ func (n *Node) pack(flow *packer.Flow) (err error) { } realElapsed := mclock.Now() - startTime - // sync the log-writing task - if logEnabled { - if err := n.logWorker.Sync(); err != nil { - logger.Warn("failed to write logs", "err", err) - n.logDBFailed = true - } - } - - if err := n.repo.SetBestBlockID(newBlock.Header().ID()); err != nil { - return err - } - n.processFork(newBlock, oldBest.Header.ID()) commitElapsed := mclock.Now() - startTime - execElapsed diff --git a/cmd/thor/optimizer/optimizer.go b/cmd/thor/optimizer/optimizer.go deleted file mode 100644 index b61e75813..000000000 --- a/cmd/thor/optimizer/optimizer.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright (c) 2019 The VeChainThor developers - -// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying -// file LICENSE or - -package optimizer - -import ( - "context" - "fmt" - "math" - "time" - - "github.com/ethereum/go-ethereum/rlp" - "github.com/pkg/errors" - "github.com/vechain/thor/v2/block" - "github.com/vechain/thor/v2/chain" - "github.com/vechain/thor/v2/co" - "github.com/vechain/thor/v2/log" - "github.com/vechain/thor/v2/muxdb" - "github.com/vechain/thor/v2/state" - "github.com/vechain/thor/v2/thor" - "github.com/vechain/thor/v2/trie" -) - -var logger = log.WithContext("pkg", "optimizer") - -const ( - propsStoreName = "optimizer.props" - statusKey = "status" -) - -// Optimizer is a background task to optimize tries. -type Optimizer struct { - db *muxdb.MuxDB - repo *chain.Repository - ctx context.Context - cancel func() - goes co.Goes -} - -// New creates and starts the optimizer. -func New(db *muxdb.MuxDB, repo *chain.Repository, prune bool) *Optimizer { - ctx, cancel := context.WithCancel(context.Background()) - o := &Optimizer{ - db: db, - repo: repo, - ctx: ctx, - cancel: cancel, - } - o.goes.Go(func() { - if err := o.loop(prune); err != nil { - if err != context.Canceled && errors.Cause(err) != context.Canceled { - logger.Warn("optimizer interrupted", "error", err) - } - } - }) - return o -} - -// Stop stops the optimizer. -func (p *Optimizer) Stop() { - p.cancel() - p.goes.Wait() -} - -// loop is the main loop. -func (p *Optimizer) loop(prune bool) error { - logger.Info("optimizer started") - - const ( - period = 2000 // the period to update leafbank. - prunePeriod = 10000 // the period to prune tries. - pruneReserved = 70000 // must be > thor.MaxStateHistory - ) - - var ( - status status - lastLogTime = time.Now().UnixNano() - propsStore = p.db.NewStore(propsStoreName) - ) - if err := status.Load(propsStore); err != nil { - return errors.Wrap(err, "load status") - } - - for { - // select target - target := status.Base + period - - targetChain, err := p.awaitUntilSteady(target) - if err != nil { - return errors.Wrap(err, "awaitUntilSteady") - } - startTime := time.Now().UnixNano() - - // dump account/storage trie leaves into leafbank - if err := p.dumpStateLeaves(targetChain, status.Base, target); err != nil { - return errors.Wrap(err, "dump state trie leaves") - } - - // prune index/account/storage tries - if prune && target > pruneReserved { - if pruneTarget := target - pruneReserved; pruneTarget >= status.PruneBase+prunePeriod { - if err := p.pruneTries(targetChain, status.PruneBase, pruneTarget); err != nil { - return errors.Wrap(err, "prune tries") - } - status.PruneBase = pruneTarget - } - } - - if now := time.Now().UnixNano(); now-lastLogTime > int64(time.Second*20) { - lastLogTime = now - logger.Info("optimized tries", - "range", fmt.Sprintf("#%v+%v", status.Base, target-status.Base), - "et", time.Duration(now-startTime), - ) - } - status.Base = target - if err := status.Save(propsStore); err != nil { - return errors.Wrap(err, "save status") - } - } -} - -// newStorageTrieIfUpdated creates a storage trie object from the account leaf if the storage trie updated since base. -func (p *Optimizer) newStorageTrieIfUpdated(accLeaf *trie.Leaf, base uint32) *muxdb.Trie { - if len(accLeaf.Meta) == 0 { - return nil - } - - var ( - acc state.Account - meta state.AccountMetadata - ) - if err := rlp.DecodeBytes(accLeaf.Value, &acc); err != nil { - panic(errors.Wrap(err, "decode account")) - } - - if err := rlp.DecodeBytes(accLeaf.Meta, &meta); err != nil { - panic(errors.Wrap(err, "decode account metadata")) - } - - if meta.StorageCommitNum >= base { - return p.db.NewTrie( - state.StorageTrieName(meta.StorageID), - thor.BytesToBytes32(acc.StorageRoot), - meta.StorageCommitNum, - meta.StorageDistinctNum, - ) - } - return nil -} - -// dumpStateLeaves dumps account/storage trie leaves updated within [base, target) into leafbank. -func (p *Optimizer) dumpStateLeaves(targetChain *chain.Chain, base, target uint32) error { - h, err := targetChain.GetBlockSummary(target - 1) - if err != nil { - return err - } - accTrie := p.db.NewTrie(state.AccountTrieName, h.Header.StateRoot(), h.Header.Number(), h.Conflicts) - accTrie.SetNoFillCache(true) - - var sTries []*muxdb.Trie - if err := accTrie.DumpLeaves(p.ctx, base, h.Header.Number(), func(leaf *trie.Leaf) *trie.Leaf { - if sTrie := p.newStorageTrieIfUpdated(leaf, base); sTrie != nil { - sTries = append(sTries, sTrie) - } - return leaf - }); err != nil { - return err - } - for _, sTrie := range sTries { - sTrie.SetNoFillCache(true) - if err := sTrie.DumpLeaves(p.ctx, base, h.Header.Number(), func(leaf *trie.Leaf) *trie.Leaf { - return &trie.Leaf{Value: leaf.Value} // skip metadata to save space - }); err != nil { - return err - } - } - return nil -} - -// dumpTrieNodes dumps index/account/storage trie nodes committed within [base, target] into deduped space. -func (p *Optimizer) dumpTrieNodes(targetChain *chain.Chain, base, target uint32) error { - summary, err := targetChain.GetBlockSummary(target - 1) - if err != nil { - return err - } - - // dump index trie - indexTrie := p.db.NewNonCryptoTrie(chain.IndexTrieName, trie.NonCryptoNodeHash, summary.Header.Number(), summary.Conflicts) - indexTrie.SetNoFillCache(true) - - if err := indexTrie.DumpNodes(p.ctx, base, nil); err != nil { - return err - } - - // dump account trie - accTrie := p.db.NewTrie(state.AccountTrieName, summary.Header.StateRoot(), summary.Header.Number(), summary.Conflicts) - accTrie.SetNoFillCache(true) - - var sTries []*muxdb.Trie - if err := accTrie.DumpNodes(p.ctx, base, func(leaf *trie.Leaf) { - if sTrie := p.newStorageTrieIfUpdated(leaf, base); sTrie != nil { - sTries = append(sTries, sTrie) - } - }); err != nil { - return err - } - - // dump storage tries - for _, sTrie := range sTries { - sTrie.SetNoFillCache(true) - if err := sTrie.DumpNodes(p.ctx, base, nil); err != nil { - return err - } - } - return nil -} - -// pruneTries prunes index/account/storage tries in the range [base, target). -func (p *Optimizer) pruneTries(targetChain *chain.Chain, base, target uint32) error { - if err := p.dumpTrieNodes(targetChain, base, target); err != nil { - return errors.Wrap(err, "dump trie nodes") - } - - cleanBase := base - if base == 0 { - // keeps genesis state history like the previous version. - cleanBase = 1 - } - if err := p.db.CleanTrieHistory(p.ctx, cleanBase, target); err != nil { - return errors.Wrap(err, "clean trie history") - } - return nil -} - -// awaitUntilSteady waits until the target block number becomes almost final(steady), -// and returns the steady chain. -func (p *Optimizer) awaitUntilSteady(target uint32) (*chain.Chain, error) { - // the knowned steady id is newer than target - if steadyID := p.repo.SteadyBlockID(); block.Number(steadyID) >= target { - return p.repo.NewChain(steadyID), nil - } - - const windowSize = 100000 - - backoff := uint32(0) - for { - best := p.repo.BestBlockSummary() - bestNum := best.Header.Number() - if bestNum > target+backoff { - var meanScore float64 - if bestNum > windowSize { - baseNum := bestNum - windowSize - baseHeader, err := p.repo.NewChain(best.Header.ID()).GetBlockHeader(baseNum) - if err != nil { - return nil, err - } - meanScore = math.Round(float64(best.Header.TotalScore()-baseHeader.TotalScore()) / float64(windowSize)) - } else { - meanScore = math.Round(float64(best.Header.TotalScore()) / float64(bestNum)) - } - set := make(map[thor.Address]struct{}) - // reverse iterate the chain and collect signers. - for i, prev := 0, best.Header; i < int(meanScore*3) && prev.Number() >= target; i++ { - signer, _ := prev.Signer() - set[signer] = struct{}{} - if len(set) >= int(math.Round((meanScore+1)/2)) { - // got enough unique signers - steadyID := prev.ID() - if err := p.repo.SetSteadyBlockID(steadyID); err != nil { - return nil, err - } - return p.repo.NewChain(steadyID), nil - } - parent, err := p.repo.GetBlockSummary(prev.ParentID()) - if err != nil { - return nil, err - } - prev = parent.Header - } - backoff += uint32(meanScore) - } else { - select { - case <-p.ctx.Done(): - return nil, p.ctx.Err() - case <-time.After(time.Second): - } - } - } -} diff --git a/cmd/thor/pruner/pruner.go b/cmd/thor/pruner/pruner.go new file mode 100644 index 000000000..2fca9da92 --- /dev/null +++ b/cmd/thor/pruner/pruner.go @@ -0,0 +1,241 @@ +// Copyright (c) 2019 The VeChainThor developers + +// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying +// file LICENSE or + +package pruner + +import ( + "context" + "fmt" + "math" + "time" + + "github.com/ethereum/go-ethereum/rlp" + "github.com/pkg/errors" + "github.com/vechain/thor/v2/chain" + "github.com/vechain/thor/v2/co" + "github.com/vechain/thor/v2/log" + "github.com/vechain/thor/v2/muxdb" + "github.com/vechain/thor/v2/state" + "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" +) + +var logger = log.WithContext("pkg", "pruner") + +const ( + propsStoreName = "pruner.props" + statusKey = "status" +) + +// Pruner is a background task to prune tries. +type Pruner struct { + db *muxdb.MuxDB + repo *chain.Repository + ctx context.Context + cancel func() + goes co.Goes +} + +// New creates and starts the pruner. +func New(db *muxdb.MuxDB, repo *chain.Repository) *Pruner { + ctx, cancel := context.WithCancel(context.Background()) + o := &Pruner{ + db: db, + repo: repo, + ctx: ctx, + cancel: cancel, + } + o.goes.Go(func() { + if err := o.loop(); err != nil { + if err != context.Canceled && errors.Cause(err) != context.Canceled { + logger.Warn("pruner interrupted", "error", err) + } + } + }) + return o +} + +// Stop stops the pruner. +func (p *Pruner) Stop() { + p.cancel() + p.goes.Wait() +} + +// loop is the main loop. +func (p *Pruner) loop() error { + logger.Info("pruner started") + + var ( + status status + propsStore = p.db.NewStore(propsStoreName) + ) + if err := status.Load(propsStore); err != nil { + return errors.Wrap(err, "load status") + } + + for { + period := uint32(65536) + if int64(p.repo.BestBlockSummary().Header.Timestamp()) > time.Now().Unix()-10*24*3600 { + // use smaller period when nearly synced + period = 8192 + } + + // select target + target := status.Base + period + + targetChain, err := p.awaitUntilSteady(target + thor.MaxStateHistory) + if err != nil { + return errors.Wrap(err, "awaitUntilSteady") + } + startTime := time.Now().UnixNano() + + // prune index/account/storage tries + if err := p.pruneTries(targetChain, status.Base, target); err != nil { + return errors.Wrap(err, "prune tries") + } + + logger.Info("prune tries", + "range", fmt.Sprintf("#%v+%v", status.Base, target-status.Base), + "et", time.Duration(time.Now().UnixNano()-startTime), + ) + + status.Base = target + if err := status.Save(propsStore); err != nil { + return errors.Wrap(err, "save status") + } + } +} + +// newStorageTrieIfUpdated creates a storage trie object from the account leaf if the storage trie updated since base. +func (p *Pruner) newStorageTrieIfUpdated(accLeaf *trie.Leaf, base uint32) *muxdb.Trie { + if len(accLeaf.Meta) == 0 { + return nil + } + + var ( + acc state.Account + meta state.AccountMetadata + ) + if err := rlp.DecodeBytes(accLeaf.Value, &acc); err != nil { + panic(errors.Wrap(err, "decode account")) + } + + if err := rlp.DecodeBytes(accLeaf.Meta, &meta); err != nil { + panic(errors.Wrap(err, "decode account metadata")) + } + + if meta.StorageMajorVer >= base { + return p.db.NewTrie( + state.StorageTrieName(meta.StorageID), + trie.Root{ + Hash: thor.BytesToBytes32(acc.StorageRoot), + Ver: trie.Version{ + Major: meta.StorageMajorVer, + Minor: meta.StorageMinorVer, + }, + }) + } + return nil +} + +// checkpointTries transfers tries' standalone nodes, whose major version within [base, target). +func (p *Pruner) checkpointTries(targetChain *chain.Chain, base, target uint32) error { + summary, err := targetChain.GetBlockSummary(target - 1) + if err != nil { + return err + } + + // checkpoint index trie + indexTrie := p.db.NewTrie(chain.IndexTrieName, summary.IndexRoot()) + indexTrie.SetNoFillCache(true) + + if err := indexTrie.Checkpoint(p.ctx, base, nil); err != nil { + return err + } + + // checkpoint account trie + accTrie := p.db.NewTrie(state.AccountTrieName, summary.Root()) + accTrie.SetNoFillCache(true) + + var sTries []*muxdb.Trie + if err := accTrie.Checkpoint(p.ctx, base, func(leaf *trie.Leaf) { + if sTrie := p.newStorageTrieIfUpdated(leaf, base); sTrie != nil { + sTries = append(sTries, sTrie) + } + }); err != nil { + return err + } + + // checkpoint storage tries + for _, sTrie := range sTries { + sTrie.SetNoFillCache(true) + if err := sTrie.Checkpoint(p.ctx, base, nil); err != nil { + return err + } + } + return nil +} + +// pruneTries prunes index/account/storage tries in the range [base, target). +func (p *Pruner) pruneTries(targetChain *chain.Chain, base, target uint32) error { + if err := p.checkpointTries(targetChain, base, target); err != nil { + return errors.Wrap(err, "checkpoint tries") + } + + if err := p.db.DeleteTrieHistoryNodes(p.ctx, base, target); err != nil { + return errors.Wrap(err, "delete trie history") + } + return nil +} + +// awaitUntilSteady waits until the target block number becomes almost final(steady), +// and returns the steady chain. +// +// TODO: using finality flag +func (p *Pruner) awaitUntilSteady(target uint32) (*chain.Chain, error) { + const windowSize = 100000 + + backoff := uint32(0) + for { + best := p.repo.BestBlockSummary() + bestNum := best.Header.Number() + if bestNum > target+backoff { + var meanScore float64 + if bestNum > windowSize { + baseNum := bestNum - windowSize + baseHeader, err := p.repo.NewChain(best.Header.ID()).GetBlockHeader(baseNum) + if err != nil { + return nil, err + } + meanScore = math.Round(float64(best.Header.TotalScore()-baseHeader.TotalScore()) / float64(windowSize)) + } else { + meanScore = math.Round(float64(best.Header.TotalScore()) / float64(bestNum)) + } + set := make(map[thor.Address]struct{}) + // reverse iterate the chain and collect signers. + for i, prev := 0, best.Header; i < int(meanScore*3) && prev.Number() >= target; i++ { + signer, _ := prev.Signer() + set[signer] = struct{}{} + if len(set) >= int(math.Round((meanScore+1)/2)) { + // got enough unique signers + steadyID := prev.ID() + return p.repo.NewChain(steadyID), nil + } + parent, err := p.repo.GetBlockSummary(prev.ParentID()) + if err != nil { + return nil, err + } + prev = parent.Header + } + backoff += uint32(meanScore) + } else { + select { + case <-p.ctx.Done(): + return nil, p.ctx.Err() + case <-time.After(time.Second): + } + } + } +} diff --git a/cmd/thor/optimizer/optimizer_test.go b/cmd/thor/pruner/pruner_test.go similarity index 61% rename from cmd/thor/optimizer/optimizer_test.go rename to cmd/thor/pruner/pruner_test.go index af3f729c7..fb714fa80 100644 --- a/cmd/thor/optimizer/optimizer_test.go +++ b/cmd/thor/pruner/pruner_test.go @@ -3,7 +3,7 @@ // Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying // file LICENSE or -package optimizer +package pruner import ( "context" @@ -14,6 +14,7 @@ import ( "os" "path/filepath" "testing" + "time" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" @@ -28,14 +29,18 @@ import ( "github.com/vechain/thor/v2/tx" ) -func fastForwardTo(from uint32, to uint32, db *muxdb.MuxDB, steadyID thor.Bytes32) (thor.Bytes32, error) { - id := thor.Bytes32{} +func fastForwardTo(from uint32, to uint32, db *muxdb.MuxDB) (thor.Bytes32, error) { + var ( + parentID thor.Bytes32 + id thor.Bytes32 + ) + binary.BigEndian.PutUint32(parentID[:], to-1) binary.BigEndian.PutUint32(id[:], to) + blk := new(block.Builder).ParentID(parentID).Build() var summary = &chain.BlockSummary{ - Header: &block.Header{}, + Header: blk.Header(), Conflicts: 0, - SteadyNum: block.Number(steadyID), } data, err := rlp.EncodeToBytes(summary) @@ -43,33 +48,32 @@ func fastForwardTo(from uint32, to uint32, db *muxdb.MuxDB, steadyID thor.Bytes3 return thor.Bytes32{}, err } - store := db.NewStore("chain.data") + store := db.NewStore("chain.hdr") err = store.Put(id.Bytes(), data) if err != nil { return thor.Bytes32{}, err } - trie := db.NewNonCryptoTrie("i", trie.NonCryptoNodeHash, from, 0) - if err := trie.Update(id[:4], id[:], nil); err != nil { + indexTrie := db.NewTrie("i", trie.Root{ + Hash: thor.BytesToBytes32([]byte{1}), + Ver: trie.Version{ + Major: from, + Minor: 0, + }, + }) + if err := indexTrie.Update(id[:4], id[:], nil); err != nil { return thor.Bytes32{}, err } - if steadyID == (thor.Bytes32{}) { - if err := trie.Update(steadyID[:4], steadyID[:], nil); err != nil { - return thor.Bytes32{}, err - } - } - - _, commit := trie.Stage(to, 0) - err = commit() - if err != nil { + if err := indexTrie.Commit(trie.Version{Major: to, Minor: 0}, true); err != nil { return thor.Bytes32{}, err } return id, nil } func newBlock(parentID thor.Bytes32, score uint64, stateRoot thor.Bytes32, priv *ecdsa.PrivateKey) *block.Block { - blk := new(block.Builder).ParentID(parentID).TotalScore(score).StateRoot(stateRoot).Build() + now := uint64(time.Now().Unix()) + blk := new(block.Builder).ParentID(parentID).TotalScore(score).StateRoot(stateRoot).Timestamp(now - now%10 - 10).Build() if priv != nil { sig, _ := crypto.Sign(blk.Header().SigningHash().Bytes(), priv) @@ -79,18 +83,14 @@ func newBlock(parentID thor.Bytes32, score uint64, stateRoot thor.Bytes32, priv } func TestStatus(t *testing.T) { - db := muxdb.NewMem() - - store := db.NewStore("test") + store := muxdb.NewMem().NewStore("test") s := &status{} err := s.Load(store) assert.Nil(t, err, "load should not error") assert.Equal(t, uint32(0), s.Base) - assert.Equal(t, uint32(0), s.PruneBase) s.Base = 1 - s.PruneBase = 2 err = s.Save(store) assert.Nil(t, err, "save should not error") @@ -99,18 +99,17 @@ func TestStatus(t *testing.T) { err = s2.Load(store) assert.Nil(t, err, "load should not error") assert.Equal(t, uint32(1), s.Base) - assert.Equal(t, uint32(2), s.PruneBase) } -func TestNewOptimizer(t *testing.T) { +func TestNewPruner(t *testing.T) { db := muxdb.NewMem() stater := state.NewStater(db) gene := genesis.NewDevnet() b0, _, _, _ := gene.Build(stater) repo, _ := chain.NewRepository(db, b0) - op := New(db, repo, false) - op.Stop() + pr := New(db, repo) + pr.Stop() } func newTempFileDB() (*muxdb.MuxDB, func() error, error) { @@ -118,9 +117,7 @@ func newTempFileDB() (*muxdb.MuxDB, func() error, error) { opts := muxdb.Options{ TrieNodeCacheSizeMB: 128, - TrieRootCacheCapacity: 256, TrieCachedNodeTTL: 30, // 5min - TrieLeafBankSlotCapacity: 256, TrieDedupedPartitionFactor: math.MaxUint32, TrieWillCleanHistory: true, OpenFilesCacheCapacity: 512, @@ -134,7 +131,7 @@ func newTempFileDB() (*muxdb.MuxDB, func() error, error) { return nil, nil, err } - closeFunc := func() error { + close := func() error { err = db.Close() if err != nil { return err @@ -146,65 +143,7 @@ func newTempFileDB() (*muxdb.MuxDB, func() error, error) { return nil } - return db, closeFunc, nil -} - -func TestProcessDump(t *testing.T) { - db, closeDB, err := newTempFileDB() - assert.Nil(t, err) - stater := state.NewStater(db) - gene := genesis.NewDevnet() - b0, _, _, _ := gene.Build(stater) - repo, _ := chain.NewRepository(db, b0) - - devAccounts := genesis.DevAccounts() - - // fast forward to 1999 - parentID, err := fastForwardTo(0, 1999, db, repo.SteadyBlockID()) - assert.Nil(t, err) - - var parentScore uint64 = 1999 * 2 - // add new blocks with signature - for i := 0; i < 3; i++ { - blk := newBlock(parentID, parentScore+2, b0.Header().StateRoot(), devAccounts[i%2].PrivateKey) - err := repo.AddBlock(blk, tx.Receipts{}, 0) - assert.Nil(t, err) - - parentID = blk.Header().ID() - parentScore = blk.Header().TotalScore() - } - - repo.SetBestBlockID(parentID) - - op := New(db, repo, false) - op.Stop() - - var s status - assert.Nil(t, s.Load(op.db.NewStore(propsStoreName))) - assert.Equal(t, uint32(2000), s.Base) - - // fast forward to 3999 - parentID, err = fastForwardTo(block.Number(parentID), 3999, db, repo.SteadyBlockID()) - assert.Nil(t, err) - - // add new blocks with signature - for i := 0; i < 3; i++ { - blk := newBlock(parentID, parentScore+2, b0.Header().StateRoot(), devAccounts[i%2].PrivateKey) - err := repo.AddBlock(blk, tx.Receipts{}, 0) - assert.Nil(t, err) - - parentID = blk.Header().ID() - parentScore = blk.Header().TotalScore() - } - repo.SetBestBlockID(parentID) - - op = New(db, repo, true) - op.Stop() - - assert.Nil(t, s.Load(op.db.NewStore(propsStoreName))) - assert.Equal(t, uint32(4000), s.Base) - - closeDB() + return db, close, nil } func TestWaitUntil(t *testing.T) { @@ -216,7 +155,7 @@ func TestWaitUntil(t *testing.T) { devAccounts := genesis.DevAccounts() ctx, cancel := context.WithCancel(context.Background()) - op := &Optimizer{ + pruner := &Pruner{ repo: repo, db: db, ctx: ctx, @@ -224,18 +163,17 @@ func TestWaitUntil(t *testing.T) { } parentID := b0.Header().ID() - var parentScore uint64 + var parentScore uint64 = 0 for i := 0; i < 6; i++ { blk := newBlock(parentID, parentScore+2, b0.Header().StateRoot(), devAccounts[0].PrivateKey) - err := repo.AddBlock(blk, tx.Receipts{}, 0) + err := repo.AddBlock(blk, tx.Receipts{}, 0, true) assert.Nil(t, err) parentID = blk.Header().ID() parentScore = blk.Header().TotalScore() } - repo.SetBestBlockID(parentID) - parentID, err := fastForwardTo(block.Number(parentID), 100000-1, db, repo.SteadyBlockID()) + parentID, err := fastForwardTo(block.Number(parentID), 100000-1, db) assert.Nil(t, err) parentScore = (100000 - 1) * 2 @@ -243,13 +181,12 @@ func TestWaitUntil(t *testing.T) { signer := devAccounts[0].PrivateKey score := parentScore + 1 blk := newBlock(parentID, score, b0.Header().StateRoot(), signer) - err := repo.AddBlock(blk, tx.Receipts{}, 0) + err := repo.AddBlock(blk, tx.Receipts{}, 0, true) assert.Nil(t, err) parentID = blk.Header().ID() parentScore = blk.Header().TotalScore() } - repo.SetBestBlockID(parentID) go func() { cancel() @@ -258,7 +195,7 @@ func TestWaitUntil(t *testing.T) { // not enough signer, will wait for 1 sec // backoff will increase for more waiting // cancel here and restart a new test case - _, err = op.awaitUntilSteady(100000) + _, err = pruner.awaitUntilSteady(100000) assert.NotNil(t, err) for i := 0; i < 3; i++ { @@ -266,24 +203,23 @@ func TestWaitUntil(t *testing.T) { score := parentScore + 2 blk := newBlock(parentID, score, b0.Header().StateRoot(), signer) - err := repo.AddBlock(blk, tx.Receipts{}, 0) + err := repo.AddBlock(blk, tx.Receipts{}, 0, true) assert.Nil(t, err) parentID = blk.Header().ID() parentScore = blk.Header().TotalScore() } - repo.SetBestBlockID(parentID) ctx, cancel = context.WithCancel(context.Background()) - op.ctx = ctx - op.cancel = cancel + pruner.ctx = ctx + pruner.cancel = cancel - chain, err := op.awaitUntilSteady(100000) + chain, err := pruner.awaitUntilSteady(100000) assert.Nil(t, err) assert.True(t, block.Number(chain.HeadID()) >= 10000) } -func TestDumpAndPrune(t *testing.T) { +func TestPrune(t *testing.T) { db, closeDB, err := newTempFileDB() assert.Nil(t, err) @@ -294,7 +230,7 @@ func TestDumpAndPrune(t *testing.T) { devAccounts := genesis.DevAccounts() ctx, cancel := context.WithCancel(context.Background()) - op := &Optimizer{ + pruner := &Pruner{ repo: repo, db: db, ctx: ctx, @@ -311,31 +247,26 @@ func TestDumpAndPrune(t *testing.T) { for i := 0; i < 9; i++ { blk := newBlock(parentID, 10, b0.Header().StateRoot(), nil) - err := repo.AddBlock(blk, tx.Receipts{}, 0) + err := repo.AddBlock(blk, tx.Receipts{}, 0, false) assert.Nil(t, err) parentID = blk.Header().ID() } - st := stater.NewState(b0.Header().StateRoot(), b0.Header().Number(), 0, 0) + st := stater.NewState(trie.Root{Hash: b0.Header().StateRoot(), Ver: trie.Version{Major: 0, Minor: 0}}) st.SetBalance(acc1, big.NewInt(1e18)) st.SetCode(acc2, code) st.SetStorage(acc2, key, value) - stage, err := st.Stage(10, 0) + stage, err := st.Stage(trie.Version{Major: 10, Minor: 0}) assert.Nil(t, err) root, err := stage.Commit() assert.Nil(t, err) blk := newBlock(parentID, 10, root, devAccounts[0].PrivateKey) - err = repo.AddBlock(blk, tx.Receipts{}, 0) + err = repo.AddBlock(blk, tx.Receipts{}, 0, true) assert.Nil(t, err) parentID = blk.Header().ID() - repo.SetBestBlockID(parentID) - - err = op.dumpStateLeaves(repo.NewBestChain(), 0, block.Number(parentID)+1) - assert.Nil(t, err) - - err = op.pruneTries(repo.NewBestChain(), 0, block.Number(parentID)+1) + err = pruner.pruneTries(repo.NewBestChain(), 0, block.Number(parentID)+1) assert.Nil(t, err) closeDB() diff --git a/cmd/thor/optimizer/status.go b/cmd/thor/pruner/status.go similarity index 92% rename from cmd/thor/optimizer/status.go rename to cmd/thor/pruner/status.go index 8980a128e..202dfe98a 100644 --- a/cmd/thor/optimizer/status.go +++ b/cmd/thor/pruner/status.go @@ -3,7 +3,7 @@ // Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying // file LICENSE or -package optimizer +package pruner import ( "encoding/json" @@ -12,8 +12,7 @@ import ( ) type status struct { - Base uint32 - PruneBase uint32 + Base uint32 } func (s *status) Load(getter kv.Getter) error { diff --git a/cmd/thor/solo/solo.go b/cmd/thor/solo/solo.go index 638aa74ff..102320b5b 100644 --- a/cmd/thor/solo/solo.go +++ b/cmd/thor/solo/solo.go @@ -174,12 +174,6 @@ func (s *Solo) packing(pendingTxs tx.Transactions, onDemand bool) error { return errors.WithMessage(err, "commit state") } - // ignore fork when solo - if err := s.repo.AddBlock(b, receipts, 0); err != nil { - return errors.WithMessage(err, "commit block") - } - realElapsed := mclock.Now() - startTime - if !s.skipLogs { w := s.logDB.NewWriter() if err := w.Write(b, receipts); err != nil { @@ -191,9 +185,11 @@ func (s *Solo) packing(pendingTxs tx.Transactions, onDemand bool) error { } } - if err := s.repo.SetBestBlockID(b.Header().ID()); err != nil { - return errors.WithMessage(err, "set best block") + // ignore fork when solo + if err := s.repo.AddBlock(b, receipts, 0, true); err != nil { + return errors.WithMessage(err, "commit block") } + realElapsed := mclock.Now() - startTime commitElapsed := mclock.Now() - startTime - execElapsed @@ -216,7 +212,7 @@ func (s *Solo) packing(pendingTxs tx.Transactions, onDemand bool) error { // The init function initializes the chain parameters. func (s *Solo) init(ctx context.Context) error { best := s.repo.BestBlockSummary() - newState := s.stater.NewState(best.Header.StateRoot(), best.Header.Number(), best.Conflicts, best.SteadyNum) + newState := s.stater.NewState(best.Root()) currentBGP, err := builtin.Params.Native(newState).Get(thor.KeyBaseGasPrice) if err != nil { return errors.WithMessage(err, "failed to get the current base gas price") @@ -242,7 +238,6 @@ func (s *Solo) init(ctx context.Context) error { } if !s.onDemand { - // wait for the next block interval if not on-demand select { case <-ctx.Done(): return ctx.Err() diff --git a/cmd/thor/solo/solo_test.go b/cmd/thor/solo/solo_test.go index a4df3f35d..6fa2fde73 100644 --- a/cmd/thor/solo/solo_test.go +++ b/cmd/thor/solo/solo_test.go @@ -42,7 +42,7 @@ func TestInitSolo(t *testing.T) { // check the gas price best := solo.repo.BestBlockSummary() - newState := solo.stater.NewState(best.Header.StateRoot(), best.Header.Number(), best.Conflicts, best.SteadyNum) + newState := solo.stater.NewState(best.Root()) currentBGP, err := builtin.Params.Native(newState).Get(thor.KeyBaseGasPrice) assert.Nil(t, err) assert.Equal(t, baseGasPrice, currentBGP) diff --git a/cmd/thor/sync_logdb.go b/cmd/thor/sync_logdb.go index 9fccf3127..edfb78793 100644 --- a/cmd/thor/sync_logdb.go +++ b/cmd/thor/sync_logdb.go @@ -285,6 +285,8 @@ func verifyLogDBPerBlock( n := block.Header().Number() id := block.Header().ID() ts := block.Header().Timestamp() + evCount := 0 + trCount := 0 var expectedEvLogs []*logdb.Event var expectedTrLogs []*logdb.Transfer @@ -292,6 +294,8 @@ func verifyLogDBPerBlock( for txIndex, r := range receipts { tx := txs[txIndex] origin, _ := tx.Origin() + evCount = 0 + trCount = 0 for clauseIndex, output := range r.Outputs { for _, ev := range output.Events { @@ -301,7 +305,7 @@ func verifyLogDBPerBlock( } expectedEvLogs = append(expectedEvLogs, &logdb.Event{ BlockNumber: n, - Index: uint32(len(expectedEvLogs)), + LogIndex: uint32(evCount), BlockID: id, BlockTime: ts, TxID: tx.ID(), @@ -310,12 +314,14 @@ func verifyLogDBPerBlock( Address: ev.Address, Topics: convertTopics(ev.Topics), Data: data, + TxIndex: uint32(txIndex), }) + evCount++ } for _, tr := range output.Transfers { expectedTrLogs = append(expectedTrLogs, &logdb.Transfer{ BlockNumber: n, - Index: uint32(len(expectedTrLogs)), + LogIndex: uint32(trCount), BlockID: id, BlockTime: ts, TxID: tx.ID(), @@ -324,7 +330,9 @@ func verifyLogDBPerBlock( Sender: tr.Sender, Recipient: tr.Recipient, Amount: tr.Amount, + TxIndex: uint32(txIndex), }) + trCount++ } } } diff --git a/cmd/thor/utils.go b/cmd/thor/utils.go index d53935c10..561b1276a 100644 --- a/cmd/thor/utils.go +++ b/cmd/thor/utils.go @@ -315,7 +315,7 @@ func makeInstanceDir(ctx *cli.Context, gene *genesis.Genesis) (string, error) { suffix = "-full" } - instanceDir := filepath.Join(dataDir, fmt.Sprintf("instance-%x-v3", gene.ID().Bytes()[24:])+suffix) + instanceDir := filepath.Join(dataDir, fmt.Sprintf("instance-%x-v4", gene.ID().Bytes()[24:])+suffix) if err := os.MkdirAll(instanceDir, 0700); err != nil { return "", errors.Wrapf(err, "create instance dir [%v]", instanceDir) } @@ -331,9 +331,7 @@ func openMainDB(ctx *cli.Context, dir string) (*muxdb.MuxDB, error) { opts := muxdb.Options{ TrieNodeCacheSizeMB: cacheMB, - TrieRootCacheCapacity: 256, TrieCachedNodeTTL: 30, // 5min - TrieLeafBankSlotCapacity: 256, TrieDedupedPartitionFactor: math.MaxUint32, TrieWillCleanHistory: !ctx.Bool(disablePrunerFlag.Name), OpenFilesCacheCapacity: fdCache, @@ -350,9 +348,9 @@ func openMainDB(ctx *cli.Context, dir string) (*muxdb.MuxDB, error) { debug.SetGCPercent(int(gogc)) if opts.TrieWillCleanHistory { - opts.TrieHistPartitionFactor = 1000 + opts.TrieHistPartitionFactor = 256 } else { - opts.TrieHistPartitionFactor = 500000 + opts.TrieHistPartitionFactor = 524288 } path := filepath.Join(dir, "main.db") diff --git a/consensus/consensus.go b/consensus/consensus.go index 8d6f7a9c3..fd9a78e8a 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -48,7 +48,7 @@ func New(repo *chain.Repository, stater *state.Stater, forkConfig thor.ForkConfi // Process process a block. func (c *Consensus) Process(parentSummary *chain.BlockSummary, blk *block.Block, nowTimestamp uint64, blockConflicts uint32) (*state.Stage, tx.Receipts, error) { header := blk.Header() - state := c.stater.NewState(parentSummary.Header.StateRoot(), parentSummary.Header.Number(), parentSummary.Conflicts, parentSummary.SteadyNum) + state := c.stater.NewState(parentSummary.Root()) var features tx.Features if header.Number() >= c.forkConfig.VIP191 { @@ -79,7 +79,7 @@ func (c *Consensus) NewRuntimeForReplay(header *block.Header, skipPoA bool) (*ru } return nil, errors.New("parent block is missing") } - state := c.stater.NewState(parentSummary.Header.StateRoot(), parentSummary.Header.Number(), parentSummary.Conflicts, parentSummary.SteadyNum) + state := c.stater.NewState(parentSummary.Root()) if !skipPoA { if _, err := c.validateProposer(header, parentSummary.Header, state); err != nil { return nil, err diff --git a/consensus/consensus_test.go b/consensus/consensus_test.go index 5bac09763..ecd810838 100644 --- a/consensus/consensus_test.go +++ b/consensus/consensus_test.go @@ -122,11 +122,7 @@ func newTestConsensus() (*testConsensus, error) { return nil, err } - if err := repo.AddBlock(b1, receipts, 0); err != nil { - return nil, err - } - - if err := repo.SetBestBlockID(b1.Header().ID()); err != nil { + if err := repo.AddBlock(b1, receipts, 0, true); err != nil { return nil, err } @@ -706,7 +702,7 @@ func TestValidateBlockBody(t *testing.T) { Expiration(100). Clause(tx.NewClause(&thor.Address{}).WithValue(big.NewInt(0)).WithData(nil)). Nonce(0). - ChainTag(30) + ChainTag(208) tx := txSign(txBuilder) diff --git a/consensus/validator.go b/consensus/validator.go index dc7ee85b3..4749fcff8 100644 --- a/consensus/validator.go +++ b/consensus/validator.go @@ -16,6 +16,7 @@ import ( "github.com/vechain/thor/v2/runtime" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" "github.com/vechain/thor/v2/tx" "github.com/vechain/thor/v2/xenv" ) @@ -341,7 +342,7 @@ func (c *Consensus) verifyBlock(blk *block.Block, state *state.State, blockConfl } } - stage, err := state.Stage(header.Number(), blockConflicts) + stage, err := state.Stage(trie.Version{Major: header.Number(), Minor: blockConflicts}) if err != nil { return nil, nil, err } diff --git a/docs/v2-2-0-migration-guide.md b/docs/v2-2-0-migration-guide.md new file mode 100644 index 000000000..3f2ecbed4 --- /dev/null +++ b/docs/v2-2-0-migration-guide.md @@ -0,0 +1,181 @@ +# MainDB v4 Migration Paths + +## Introduction + +The `v2.2.0` release introduces database and SQLite changes to improve performance and storage. This document outlines the possible +migration paths. + +**Note:** The examples below assume you are operating a node on mainnet. + +## Table of Contents + +- [Blue / Green Deployment](#blue--green-deployment) +- [Sync in Parallel](#sync-in-parallel) + - [1. Docker Migration](#1-docker-migration) + - [2. Manual Migration](#2-manual-migration) +- [Install Latest Version](#install-latest-version) + - [Using Docker](#using-docker) + - [Install From Source](#install-from-source) + +## Blue / Green Deployment + +- For environments implementing a blue/green deployment strategy , starting a new node with the update image and allowing it to + sync before a switching traffic is a seamless approach. Once synced, traffic can be directed towards to the new node, and the + old node can be stopped. + +## Sync in Parallel + +- Syncing in parallel minimizes downtime but requires additional CPU, RAM and storage resources. + +### 1. Docker Migration + +For setups where Docker volumes are mapped to a location on the host machine. + +**Note**: The examples assume the default data directory within the container is used. If a custom directory is configured, +adjustments to the examples are required. + +For an existing node with a host instance directory of `/path/to/thor`: + +```html +docker run -d \ + -v /path/to/thor:/home/thor/.org.vechain.thor + -p 8669:8669 \ + -p 11235:11235 \ + --name \ + vechain/thor:v2.1.4 --network main +``` + +Start a new container with `v2.2.0`, without exposing the ports: + +```html +docker run -d \ + -v /path/to/thor:/home/thor/.org.vechain.thor + --name node-new \ + vechain/thor:v2.2.0 --network main +``` + +- The `v2.1.4` node will continue to operate and write data to the directory `/path/to/thor/instance-39627e6be7ec1b4a-v3`, while the + `v2.2.0` node will write the new databases to `/path/to/thor/instance-39627e6be7ec1b4a-v4`. +- Allow some time for the new node to sync. +- You can inspect the logs using `docker logs --tail 25 node-new`. +- After the new node is fully synced, stop both nodes and restart the original container with the updated image. + +```html +docker stop node-new +docker rm node-new +docker stop +docker rm + +docker run -d \ + -v /path/to/thor:/home/thor/.org.vechain.thor + -p 8669:8669 \ + -p 11235:11235 \ + --name \ + vechain/thor:v2.2.0 --network main +``` + +- Confirm that the node is functioning as expected, before cleaning up the old databases: + +```bash +rm -rf /path/to/thor/instance-39627e6be7ec1b4a-v3 +``` + +### 2. Manual Migration + +For nodes that installed from the source, follow the steps below: + +- Assuming the old nodes was started with: + +```html +/previous/executable/thor --network main +``` + +- Build the new `thor` binary as outlined in [Install From Source](#install-from-source) + +- Start the new node with different API, Metrics, Admin and P2P ports: + +```html +./bin/thor --network main \ + --api-addr localhost:8668 \ + --metrics-addr localhost:2102 \ + --admin-addr localhost:2103 \ + --p2p-port 11222 \ + +``` + +- The `v2.1.4` node will continue to operate and write data to the data directory under `/data/dir/instance-39627e6be7ec1b4a-v3`, while + `v2.2.0` node writes to `/data/dir/instance-39627e6be7ec1b4a-v4`. +- Allow the new node to sync before switching traffic. + +#### Stopping and Switching Nodes + +##### 1. Get the PID of the new node: + +```html +lsof -n -i:8668 +``` + +##### 2. Stop the new node: + +```html +kill +``` + +##### 3. Get the PID of the old node: + +```html +lsof -n -i:8669 +``` + +##### 4. Stop the old node: + +```html +kill +``` + +##### 5. Restart the original node command with the new binary: + +```html +/new/executable/thor --network main +``` + +##### 6. Remove the old databases: + +```bash +rm -rf /data/dir/instance-39627e6be7ec1b4a-v3 +``` + +## Install Latest Version + +### Using Docker + +```bash +docker pull vechain/thor:v2.2.0 +``` + +### Install From Source + +- Clone the repository and checkout the `v2.2.0` tag: + +```bash +git clone https://github.com/vechain/thor.git --branch v2.2.0 --depth 1 +``` + +- Build the `thor` binary: + +```bash +cd thor +make thor +``` + +- Verify the binary: + +```bash +./bin/thor --version +``` + +- (Optional), Copy the binary to a location in your `$PATH`: + +```bash +sudo cp ./bin/thor /usr/local/bin +``` diff --git a/genesis/builder.go b/genesis/builder.go index ea12655c1..521bab752 100644 --- a/genesis/builder.go +++ b/genesis/builder.go @@ -7,6 +7,7 @@ package genesis import ( "math" + "math/big" "github.com/pkg/errors" "github.com/vechain/thor/v2/block" @@ -14,6 +15,7 @@ import ( "github.com/vechain/thor/v2/runtime" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" "github.com/vechain/thor/v2/tx" "github.com/vechain/thor/v2/xenv" ) @@ -73,9 +75,7 @@ func (b *Builder) ForkConfig(fc thor.ForkConfig) *Builder { // ComputeID compute genesis ID. func (b *Builder) ComputeID() (thor.Bytes32, error) { - db := muxdb.NewMem() - - blk, _, _, err := b.Build(state.NewStater(db)) + blk, _, _, err := b.Build(state.NewStater(muxdb.NewMem())) if err != nil { return thor.Bytes32{}, err } @@ -84,7 +84,7 @@ func (b *Builder) ComputeID() (thor.Bytes32, error) { // Build build genesis block according to presets. func (b *Builder) Build(stater *state.Stater) (blk *block.Block, events tx.Events, transfers tx.Transfers, err error) { - state := stater.NewState(thor.Bytes32{}, 0, 0, 0) + state := stater.NewState(trie.Root{}) for _, proc := range b.stateProcs { if err := proc(state); err != nil { @@ -97,9 +97,12 @@ func (b *Builder) Build(stater *state.Stater) (blk *block.Block, events tx.Event GasLimit: b.gasLimit, }, b.forkConfig) + clauseCount := big.NewInt(int64(len(b.calls))) + for _, call := range b.calls { exec, _ := rt.PrepareClause(call.clause, 0, math.MaxUint64, &xenv.TransactionContext{ - Origin: call.caller, + ClauseCount: clauseCount, + Origin: call.caller, }) out, _, err := exec() if err != nil { @@ -112,7 +115,7 @@ func (b *Builder) Build(stater *state.Stater) (blk *block.Block, events tx.Event transfers = append(transfers, out.Transfers...) } - stage, err := state.Stage(0, 0) + stage, err := state.Stage(trie.Version{}) if err != nil { return nil, nil, nil, errors.Wrap(err, "stage") } diff --git a/genesis/devnet.go b/genesis/devnet.go index 5db8ad752..f3b9332df 100644 --- a/genesis/devnet.go +++ b/genesis/devnet.go @@ -58,6 +58,10 @@ func DevAccounts() []DevAccount { // NewDevnet create genesis for solo mode. func NewDevnet() *Genesis { + return NewDevnetWithConfig(thor.SoloFork) +} + +func NewDevnetWithConfig(config thor.ForkConfig) *Genesis { launchTime := uint64(1526400000) // 'Wed May 16 2018 00:00:00 GMT+0800 (CST)' executor := DevAccounts()[0].Address @@ -66,6 +70,7 @@ func NewDevnet() *Genesis { builder := new(Builder). GasLimit(thor.InitialGasLimit). Timestamp(launchTime). + ForkConfig(config). State(func(state *state.State) error { // setup builtin contracts if err := state.SetCode(builtin.Authority.Address, builtin.Authority.RuntimeBytecodes()); err != nil { diff --git a/genesis/devnet_test.go b/genesis/devnet_test.go index ea0f2e675..f8eb843c9 100644 --- a/genesis/devnet_test.go +++ b/genesis/devnet_test.go @@ -35,3 +35,10 @@ func TestNewDevnet(t *testing.T) { assert.NotEqual(t, thor.Bytes32{}, genesisObj.ID(), "Genesis ID should be valid") assert.Equal(t, "devnet", genesisObj.Name(), "Genesis name should be 'devnet'") } + +func TestNewDevnet_SoloConfig(t *testing.T) { + id := genesis.NewDevnet().ID() + + // Thor Solo Genesis ID should never change + assert.Equal(t, thor.MustParseBytes32("0x00000000c05a20fbca2bf6ae3affba6af4a74b800b585bf7a4988aba7aea69f6"), id) +} diff --git a/genesis/genesis_test.go b/genesis/genesis_test.go index e6c5c47ce..97b72295d 100644 --- a/genesis/genesis_test.go +++ b/genesis/genesis_test.go @@ -13,6 +13,7 @@ import ( "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" ) func TestTestnetGenesis(t *testing.T) { @@ -22,13 +23,7 @@ func TestTestnetGenesis(t *testing.T) { b0, _, _, err := gene.Build(state.NewStater(db)) assert.Nil(t, err) - id := gene.ID() - name := gene.Name() - - assert.Equal(t, id, thor.MustParseBytes32("0x000000000b2bce3c70bc649a02749e8687721b09ed2e15997f466536b20bb127")) - assert.Equal(t, name, "testnet") - - st := state.New(db, b0.Header().StateRoot(), 0, 0, 0) + st := state.New(db, trie.Root{Hash: b0.Header().StateRoot()}) v, err := st.Exists(thor.MustParseAddress("0xe59D475Abe695c7f67a8a2321f33A856B0B4c71d")) assert.Nil(t, err) diff --git a/go.mod b/go.mod index 9e8869618..3e79443aa 100644 --- a/go.mod +++ b/go.mod @@ -24,6 +24,7 @@ require ( github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.45.0 github.com/qianbin/directcache v0.9.7 + github.com/qianbin/drlp v0.0.0-20240102101024-e0e02518b5f9 github.com/stretchr/testify v1.8.4 github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a github.com/vechain/go-ecvrf v0.0.0-20220525125849-96fa0442e765 @@ -64,4 +65,4 @@ require ( replace github.com/syndtr/goleveldb => github.com/vechain/goleveldb v1.0.1-0.20220809091043-51eb019c8655 -replace github.com/ethereum/go-ethereum => github.com/vechain/go-ethereum v1.8.15-0.20241126085506-c74017ec91b2 +replace github.com/ethereum/go-ethereum => github.com/vechain/go-ethereum v1.8.15-0.20250203151135-b4d97bda6bc9 diff --git a/go.sum b/go.sum index 668dc9758..3c2c995a0 100644 --- a/go.sum +++ b/go.sum @@ -141,6 +141,8 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/qianbin/directcache v0.9.7 h1:DH6MdmU0fVjcKry57ju7U6akTFDBnLhHd0xOHZDq948= github.com/qianbin/directcache v0.9.7/go.mod h1:gZBpa9NqO1Qz7wZKO7t7atBA76bT8X0eM01PdveW4qc= +github.com/qianbin/drlp v0.0.0-20240102101024-e0e02518b5f9 h1:phutO88A0XihNL/23gAzaih6cqQB25smZ0STd/lM0Ng= +github.com/qianbin/drlp v0.0.0-20240102101024-e0e02518b5f9/go.mod h1:OnClEjurpFUtR3RUCauP9HxNNl8xjfGAOv0kWYTznOc= github.com/rjeczalik/notify v0.9.3 h1:6rJAzHTGKXGj76sbRgDiDcYj/HniypXmSJo1SWakZeY= github.com/rjeczalik/notify v0.9.3/go.mod h1:gF3zSOrafR9DQEWSE8TjfI9NkooDxbyT4UgRGKZA0lc= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= @@ -154,8 +156,8 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/vechain/go-ecvrf v0.0.0-20220525125849-96fa0442e765 h1:jvr+TSivjObZmOKVdqlgeLtRhaDG27gE39PMuE2IJ24= github.com/vechain/go-ecvrf v0.0.0-20220525125849-96fa0442e765/go.mod h1:cwnTMgAVzMb30xMKnGI1LdU1NjMiPllYb7i3ibj/fzE= -github.com/vechain/go-ethereum v1.8.15-0.20241126085506-c74017ec91b2 h1:ch3DqXvl1ApfJut768bf5Vlhqtw+bxAWTyPDYXQkQZk= -github.com/vechain/go-ethereum v1.8.15-0.20241126085506-c74017ec91b2/go.mod h1:yPUCNmntAh1PritrMfSi7noK+9vVPStZX3wgh3ieaY0= +github.com/vechain/go-ethereum v1.8.15-0.20250203151135-b4d97bda6bc9 h1:dkF3gD0LQPAD3ajR5XEtddDN0ffLZwflgRt6YKe5Deg= +github.com/vechain/go-ethereum v1.8.15-0.20250203151135-b4d97bda6bc9/go.mod h1:yPUCNmntAh1PritrMfSi7noK+9vVPStZX3wgh3ieaY0= github.com/vechain/goleveldb v1.0.1-0.20220809091043-51eb019c8655 h1:CbHcWpCi7wOYfpoErRABh3Slyq9vO0Ay/EHN5GuJSXQ= github.com/vechain/goleveldb v1.0.1-0.20220809091043-51eb019c8655/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/logdb/logdb.go b/logdb/logdb.go index bcd793e94..af043e9d2 100644 --- a/logdb/logdb.go +++ b/logdb/logdb.go @@ -9,7 +9,6 @@ import ( "context" "database/sql" "fmt" - "math" "math/big" sqlite3 "github.com/mattn/go-sqlite3" @@ -118,10 +117,18 @@ FROM (%v) e if filter.Range != nil { subQuery += " AND seq >= ?" - args = append(args, newSequence(filter.Range.From, 0)) + from, err := newSequence(filter.Range.From, 0, 0) + if err != nil { + return nil, err + } + args = append(args, from) if filter.Range.To >= filter.Range.From { subQuery += " AND seq <= ?" - args = append(args, newSequence(filter.Range.To, uint32(math.MaxInt32))) + to, err := newSequence(filter.Range.To, txIndexMask, logIndexMask) + if err != nil { + return nil, err + } + args = append(args, to) } } @@ -184,10 +191,18 @@ FROM (%v) t if filter.Range != nil { subQuery += " AND seq >= ?" - args = append(args, newSequence(filter.Range.From, 0)) + from, err := newSequence(filter.Range.From, 0, 0) + if err != nil { + return nil, err + } + args = append(args, from) if filter.Range.To >= filter.Range.From { subQuery += " AND seq <= ?" - args = append(args, newSequence(filter.Range.To, uint32(math.MaxInt32))) + to, err := newSequence(filter.Range.To, txIndexMask, logIndexMask) + if err != nil { + return nil, err + } + args = append(args, to) } } @@ -272,10 +287,11 @@ func (db *LogDB) queryEvents(ctx context.Context, query string, args ...interfac } event := &Event{ BlockNumber: seq.BlockNumber(), - Index: seq.Index(), + LogIndex: seq.LogIndex(), BlockID: thor.BytesToBytes32(blockID), BlockTime: blockTime, TxID: thor.BytesToBytes32(txID), + TxIndex: seq.TxIndex(), TxOrigin: thor.BytesToAddress(txOrigin), ClauseIndex: clauseIndex, Address: thor.BytesToAddress(address), @@ -334,10 +350,11 @@ func (db *LogDB) queryTransfers(ctx context.Context, query string, args ...inter } trans := &Transfer{ BlockNumber: seq.BlockNumber(), - Index: seq.Index(), + LogIndex: seq.LogIndex(), BlockID: thor.BytesToBytes32(blockID), BlockTime: blockTime, TxID: thor.BytesToBytes32(txID), + TxIndex: seq.TxIndex(), TxOrigin: thor.BytesToAddress(txOrigin), ClauseIndex: clauseIndex, Sender: thor.BytesToAddress(sender), @@ -376,7 +393,10 @@ func (db *LogDB) HasBlockID(id thor.Bytes32) (bool, error) { UNION SELECT * FROM (SELECT seq FROM event WHERE seq=? AND blockID=` + refIDQuery + ` LIMIT 1))` - seq := newSequence(block.Number(id), 0) + seq, err := newSequence(block.Number(id), 0, 0) + if err != nil { + return false, err + } row := db.stmtCache.MustPrepare(query).QueryRow(seq, id[:], seq, id[:]) var count int if err := row.Scan(&count); err != nil { @@ -398,11 +418,23 @@ func (db *LogDB) NewWriterSyncOff() *Writer { func topicValue(topics []thor.Bytes32, i int) []byte { if i < len(topics) { - return topics[i][:] + return removeLeadingZeros(topics[i][:]) } return nil } +func removeLeadingZeros(bytes []byte) []byte { + i := 0 + // increase i until it reaches the first non-zero byte + for ; i < len(bytes) && bytes[i] == 0; i++ { + } + // ensure at least 1 byte exists + if i == len(bytes) { + return []byte{0} + } + return bytes[i:] +} + // Writer is the transactional log writer. type Writer struct { conn *sql.Conn @@ -414,7 +446,11 @@ type Writer struct { // Truncate truncates the database by deleting logs after blockNum (included). func (w *Writer) Truncate(blockNum uint32) error { - seq := newSequence(blockNum, 0) + seq, err := newSequence(blockNum, 0, 0) + if err != nil { + return err + } + if err := w.exec("DELETE FROM event WHERE seq >= ?", seq); err != nil { return err } @@ -431,8 +467,6 @@ func (w *Writer) Write(b *block.Block, receipts tx.Receipts) error { blockNum = b.Header().Number() blockTimestamp = b.Header().Timestamp() txs = b.Transactions() - eventCount, - transferCount uint32 isReceiptEmpty = func(r *tx.Receipt) bool { for _, o := range r.Outputs { if len(o.Events) > 0 || len(o.Transfers) > 0 { @@ -441,20 +475,24 @@ func (w *Writer) Write(b *block.Block, receipts tx.Receipts) error { } return true } + blockIDInserted bool ) for i, r := range receipts { + eventCount, transferCount := uint32(0), uint32(0) + if isReceiptEmpty(r) { continue } - if eventCount == 0 && transferCount == 0 { + if !blockIDInserted { // block id is not yet inserted if err := w.exec( "INSERT OR IGNORE INTO ref(data) VALUES(?)", blockID[:]); err != nil { return err } + blockIDInserted = true } var ( @@ -466,6 +504,8 @@ func (w *Writer) Write(b *block.Block, receipts tx.Receipts) error { txID = tx.ID() txOrigin, _ = tx.Origin() } + + txIndex := i if err := w.exec( "INSERT OR IGNORE INTO ref(data) VALUES(?),(?)", txID[:], txOrigin[:]); err != nil { @@ -481,7 +521,8 @@ func (w *Writer) Write(b *block.Block, receipts tx.Receipts) error { topicValue(ev.Topics, 1), topicValue(ev.Topics, 2), topicValue(ev.Topics, 3), - topicValue(ev.Topics, 4)); err != nil { + topicValue(ev.Topics, 4), + ); err != nil { return err } @@ -502,9 +543,14 @@ func (w *Writer) Write(b *block.Block, receipts tx.Receipts) error { eventData = ev.Data } + seq, err := newSequence(blockNum, uint32(txIndex), eventCount) + if err != nil { + return err + } + if err := w.exec( query, - newSequence(blockNum, eventCount), + seq, blockTimestamp, clauseIndex, eventData, @@ -537,9 +583,14 @@ func (w *Writer) Write(b *block.Block, receipts tx.Receipts) error { refIDQuery + "," + refIDQuery + ")" + seq, err := newSequence(blockNum, uint32(txIndex), transferCount) + if err != nil { + return err + } + if err := w.exec( query, - newSequence(blockNum, transferCount), + seq, blockTimestamp, clauseIndex, tr.Amount.Bytes(), diff --git a/logdb/logdb_bench_test.go b/logdb/logdb_bench_test.go index e421ffce3..9e667999b 100644 --- a/logdb/logdb_bench_test.go +++ b/logdb/logdb_bench_test.go @@ -3,7 +3,7 @@ // Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying // file LICENSE or -package logdb_test +package logdb import ( "context" @@ -15,7 +15,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/vechain/thor/v2/block" - "github.com/vechain/thor/v2/logdb" "github.com/vechain/thor/v2/thor" "github.com/vechain/thor/v2/tx" ) @@ -39,7 +38,7 @@ func init() { flag.StringVar(&dbPath, "dbPath", "", "Path to the database file") } -// TestLogDB_NewestBlockID performs a series of read/write benchmarks on the NewestBlockID functionality of the LogDB. +// TestLogDB_NewestBlockID performs a series of read/write benchmarks on the NewestBlockID functionality of LogDB. // It benchmarks the creating, writing, committing a new block, followed by fetching this new block as the NewestBlockID func BenchmarkFakeDB_NewestBlockID(t *testing.B) { db, err := createTempDB() @@ -155,7 +154,7 @@ func BenchmarkTestDB_HasBlockID(b *testing.B) { defer db.Close() // find the first 500k blocks with events - events, err := db.FilterEvents(context.Background(), &logdb.EventFilter{Options: &logdb.Options{Offset: 0, Limit: 500_000}}) + events, err := db.FilterEvents(context.Background(), &EventFilter{Options: &Options{Offset: 0, Limit: 500_000}}) require.NoError(b, err) require.GreaterOrEqual(b, len(events), 500_000, "there should be more than 500k events in the db") @@ -178,12 +177,12 @@ func BenchmarkTestDB_FilterEvents(b *testing.B) { vthoAddress := thor.MustParseAddress(VTHO_ADDRESS) topic := thor.MustParseBytes32(VTHO_TOPIC) - addressFilterCriteria := []*logdb.EventCriteria{ + addressFilterCriteria := []*EventCriteria{ { Address: &vthoAddress, }, } - topicFilterCriteria := []*logdb.EventCriteria{ + topicFilterCriteria := []*EventCriteria{ { Topics: [5]*thor.Bytes32{&topic, nil, nil, nil, nil}, }, @@ -191,14 +190,14 @@ func BenchmarkTestDB_FilterEvents(b *testing.B) { tests := []struct { name string - arg *logdb.EventFilter + arg *EventFilter }{ - {"AddressCriteriaFilter", &logdb.EventFilter{CriteriaSet: addressFilterCriteria, Options: &logdb.Options{Offset: 0, Limit: 500000}}}, - {"TopicCriteriaFilter", &logdb.EventFilter{CriteriaSet: topicFilterCriteria, Options: &logdb.Options{Offset: 0, Limit: 500000}}}, - {"EventLimit", &logdb.EventFilter{Order: logdb.ASC, Options: &logdb.Options{Offset: 0, Limit: 500000}}}, - {"EventLimitDesc", &logdb.EventFilter{Order: logdb.DESC, Options: &logdb.Options{Offset: 0, Limit: 500000}}}, - {"EventRange", &logdb.EventFilter{Range: &logdb.Range{From: 500000, To: 1_000_000}}}, - {"EventRangeDesc", &logdb.EventFilter{Range: &logdb.Range{From: 500000, To: 1_000_000}, Order: logdb.DESC}}, + {"AddressCriteriaFilter", &EventFilter{CriteriaSet: addressFilterCriteria, Options: &Options{Offset: 0, Limit: 500000}}}, + {"TopicCriteriaFilter", &EventFilter{CriteriaSet: topicFilterCriteria, Options: &Options{Offset: 0, Limit: 500000}}}, + {"EventLimit", &EventFilter{Order: ASC, Options: &Options{Offset: 0, Limit: 500000}}}, + {"EventLimitDesc", &EventFilter{Order: DESC, Options: &Options{Offset: 0, Limit: 500000}}}, + {"EventRange", &EventFilter{Range: &Range{From: 500000, To: 1_000_000}}}, + {"EventRangeDesc", &EventFilter{Range: &Range{From: 500000, To: 1_000_000}, Order: DESC}}, } for _, tt := range tests { @@ -222,7 +221,7 @@ func BenchmarkTestDB_FilterTransfers(b *testing.B) { defer db.Close() txOrigin := thor.MustParseAddress(TEST_ADDRESS) - transferCriteria := []*logdb.TransferCriteria{ + transferCriteria := []*TransferCriteria{ { TxOrigin: &txOrigin, Sender: nil, @@ -232,12 +231,12 @@ func BenchmarkTestDB_FilterTransfers(b *testing.B) { tests := []struct { name string - arg *logdb.TransferFilter + arg *TransferFilter }{ - {"TransferCriteria", &logdb.TransferFilter{CriteriaSet: transferCriteria, Options: &logdb.Options{Offset: 0, Limit: 500_000}}}, - {"TransferCriteriaDesc", &logdb.TransferFilter{Order: logdb.DESC, CriteriaSet: transferCriteria, Options: &logdb.Options{Offset: 0, Limit: 500_000}}}, - {"Ranged500K", &logdb.TransferFilter{Range: &logdb.Range{From: 500_000, To: 1_000_000}}}, - {"Ranged500KDesc", &logdb.TransferFilter{Range: &logdb.Range{From: 500_000, To: 1_000_000}, Order: logdb.DESC}}, + {"TransferCriteria", &TransferFilter{CriteriaSet: transferCriteria, Options: &Options{Offset: 0, Limit: 500_000}}}, + {"TransferCriteriaDesc", &TransferFilter{Order: DESC, CriteriaSet: transferCriteria, Options: &Options{Offset: 0, Limit: 500_000}}}, + {"Ranged500K", &TransferFilter{Range: &Range{From: 500_000, To: 1_000_000}}}, + {"Ranged500KDesc", &TransferFilter{Range: &Range{From: 500_000, To: 1_000_000}, Order: DESC}}, } for _, tt := range tests { @@ -253,7 +252,7 @@ func BenchmarkTestDB_FilterTransfers(b *testing.B) { } } -func createTempDB() (*logdb.LogDB, error) { +func createTempDB() (*LogDB, error) { dir, err := os.MkdirTemp("", "tempdir-") if err != nil { return nil, fmt.Errorf("failed to create temp directory: %w", err) @@ -268,7 +267,7 @@ func createTempDB() (*logdb.LogDB, error) { return nil, fmt.Errorf("failed to close temp file: %w", err) } - db, err := logdb.New(tmpFile.Name()) + db, err := New(tmpFile.Name()) if err != nil { return nil, fmt.Errorf("unable to load logdb: %w", err) } @@ -276,10 +275,10 @@ func createTempDB() (*logdb.LogDB, error) { return db, nil } -func loadDBFromDisk(b *testing.B) (*logdb.LogDB, error) { +func loadDBFromDisk(b *testing.B) (*LogDB, error) { if dbPath == "" { b.Fatal("Please provide a dbPath") } - return logdb.New(dbPath) + return New(dbPath) } diff --git a/logdb/logdb_test.go b/logdb/logdb_test.go index 7ffdd59b1..454d3a1e8 100644 --- a/logdb/logdb_test.go +++ b/logdb/logdb_test.go @@ -3,7 +3,7 @@ // Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying // file LICENSE or -package logdb_test +package logdb import ( "context" @@ -11,10 +11,10 @@ import ( "math/big" "testing" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/assert" "github.com/vechain/thor/v2/block" - logdb "github.com/vechain/thor/v2/logdb" "github.com/vechain/thor/v2/thor" "github.com/vechain/thor/v2/tx" ) @@ -84,9 +84,9 @@ func newTransferOnlyReceipt() *tx.Receipt { } } -type eventLogs []*logdb.Event +type eventLogs []*Event -func (logs eventLogs) Filter(f func(ev *logdb.Event) bool) (ret eventLogs) { +func (logs eventLogs) Filter(f func(ev *Event) bool) (ret eventLogs) { for _, ev := range logs { if f(ev) { ret = append(ret, ev) @@ -102,9 +102,9 @@ func (logs eventLogs) Reverse() (ret eventLogs) { return } -type transferLogs []*logdb.Transfer +type transferLogs []*Transfer -func (logs transferLogs) Filter(f func(tr *logdb.Transfer) bool) (ret transferLogs) { +func (logs transferLogs) Filter(f func(tr *Transfer) bool) (ret transferLogs) { for _, tr := range logs { if f(tr) { ret = append(ret, tr) @@ -121,7 +121,7 @@ func (logs transferLogs) Reverse() (ret transferLogs) { } func TestEvents(t *testing.T) { - db, err := logdb.NewMem() + db, err := NewMem() if err != nil { t.Fatal(err) } @@ -144,9 +144,10 @@ func TestEvents(t *testing.T) { tx := b.Transactions()[j] receipt := receipts[j] origin, _ := tx.Origin() - allEvents = append(allEvents, &logdb.Event{ + allEvents = append(allEvents, &Event{ BlockNumber: b.Header().Number(), - Index: uint32(j), + LogIndex: uint32(0), + TxIndex: uint32(j), BlockID: b.Header().ID(), BlockTime: b.Header().Timestamp(), TxID: tx.ID(), @@ -157,9 +158,10 @@ func TestEvents(t *testing.T) { Data: receipt.Outputs[0].Events[0].Data, }) - allTransfers = append(allTransfers, &logdb.Transfer{ + allTransfers = append(allTransfers, &Transfer{ BlockNumber: b.Header().Number(), - Index: uint32(j), + LogIndex: uint32(0), + TxIndex: uint32(j), BlockID: b.Header().ID(), BlockTime: b.Header().Timestamp(), TxID: tx.ID(), @@ -184,21 +186,21 @@ func TestEvents(t *testing.T) { { tests := []struct { name string - arg *logdb.EventFilter + arg *EventFilter want eventLogs }{ - {"query all events", &logdb.EventFilter{}, allEvents}, + {"query all events", &EventFilter{}, allEvents}, {"query all events with nil option", nil, allEvents}, - {"query all events asc", &logdb.EventFilter{Order: logdb.ASC}, allEvents}, - {"query all events desc", &logdb.EventFilter{Order: logdb.DESC}, allEvents.Reverse()}, - {"query all events limit offset", &logdb.EventFilter{Options: &logdb.Options{Offset: 1, Limit: 10}}, allEvents[1:11]}, - {"query all events range", &logdb.EventFilter{Range: &logdb.Range{From: 10, To: 20}}, allEvents.Filter(func(ev *logdb.Event) bool { return ev.BlockNumber >= 10 && ev.BlockNumber <= 20 })}, - {"query events with range and desc", &logdb.EventFilter{Range: &logdb.Range{From: 10, To: 20}, Order: logdb.DESC}, allEvents.Filter(func(ev *logdb.Event) bool { return ev.BlockNumber >= 10 && ev.BlockNumber <= 20 }).Reverse()}, - {"query events with limit with desc", &logdb.EventFilter{Order: logdb.DESC, Options: &logdb.Options{Limit: 10}}, allEvents.Reverse()[0:10]}, - {"query all events with criteria", &logdb.EventFilter{CriteriaSet: []*logdb.EventCriteria{{Address: &allEvents[1].Address}}}, allEvents.Filter(func(ev *logdb.Event) bool { + {"query all events asc", &EventFilter{Order: ASC}, allEvents}, + {"query all events desc", &EventFilter{Order: DESC}, allEvents.Reverse()}, + {"query all events limit offset", &EventFilter{Options: &Options{Offset: 1, Limit: 10}}, allEvents[1:11]}, + {"query all events range", &EventFilter{Range: &Range{From: 10, To: 20}}, allEvents.Filter(func(ev *Event) bool { return ev.BlockNumber >= 10 && ev.BlockNumber <= 20 })}, + {"query events with range and desc", &EventFilter{Range: &Range{From: 10, To: 20}, Order: DESC}, allEvents.Filter(func(ev *Event) bool { return ev.BlockNumber >= 10 && ev.BlockNumber <= 20 }).Reverse()}, + {"query events with limit with desc", &EventFilter{Order: DESC, Options: &Options{Limit: 10}}, allEvents.Reverse()[0:10]}, + {"query all events with criteria", &EventFilter{CriteriaSet: []*EventCriteria{{Address: &allEvents[1].Address}}}, allEvents.Filter(func(ev *Event) bool { return ev.Address == allEvents[1].Address })}, - {"query all events with multi-criteria", &logdb.EventFilter{CriteriaSet: []*logdb.EventCriteria{{Address: &allEvents[1].Address}, {Topics: [5]*thor.Bytes32{allEvents[2].Topics[0]}}, {Topics: [5]*thor.Bytes32{allEvents[3].Topics[0]}}}}, allEvents.Filter(func(ev *logdb.Event) bool { + {"query all events with multi-criteria", &EventFilter{CriteriaSet: []*EventCriteria{{Address: &allEvents[1].Address}, {Topics: [5]*thor.Bytes32{allEvents[2].Topics[0]}}, {Topics: [5]*thor.Bytes32{allEvents[3].Topics[0]}}}}, allEvents.Filter(func(ev *Event) bool { return ev.Address == allEvents[1].Address || *ev.Topics[0] == *allEvents[2].Topics[0] || *ev.Topics[0] == *allEvents[3].Topics[0] })}, } @@ -215,21 +217,21 @@ func TestEvents(t *testing.T) { { tests := []struct { name string - arg *logdb.TransferFilter + arg *TransferFilter want transferLogs }{ - {"query all transfers", &logdb.TransferFilter{}, allTransfers}, + {"query all transfers", &TransferFilter{}, allTransfers}, {"query all transfers with nil option", nil, allTransfers}, - {"query all transfers asc", &logdb.TransferFilter{Order: logdb.ASC}, allTransfers}, - {"query all transfers desc", &logdb.TransferFilter{Order: logdb.DESC}, allTransfers.Reverse()}, - {"query all transfers limit offset", &logdb.TransferFilter{Options: &logdb.Options{Offset: 1, Limit: 10}}, allTransfers[1:11]}, - {"query all transfers range", &logdb.TransferFilter{Range: &logdb.Range{From: 10, To: 20}}, allTransfers.Filter(func(tr *logdb.Transfer) bool { return tr.BlockNumber >= 10 && tr.BlockNumber <= 20 })}, - {"query transfers with range and desc", &logdb.TransferFilter{Range: &logdb.Range{From: 10, To: 20}, Order: logdb.DESC}, allTransfers.Filter(func(tr *logdb.Transfer) bool { return tr.BlockNumber >= 10 && tr.BlockNumber <= 20 }).Reverse()}, - {"query transfers with limit with desc", &logdb.TransferFilter{Order: logdb.DESC, Options: &logdb.Options{Limit: 10}}, allTransfers.Reverse()[0:10]}, - {"query all transfers with criteria", &logdb.TransferFilter{CriteriaSet: []*logdb.TransferCriteria{{Sender: &allTransfers[1].Sender}}}, allTransfers.Filter(func(tr *logdb.Transfer) bool { + {"query all transfers asc", &TransferFilter{Order: ASC}, allTransfers}, + {"query all transfers desc", &TransferFilter{Order: DESC}, allTransfers.Reverse()}, + {"query all transfers limit offset", &TransferFilter{Options: &Options{Offset: 1, Limit: 10}}, allTransfers[1:11]}, + {"query all transfers range", &TransferFilter{Range: &Range{From: 10, To: 20}}, allTransfers.Filter(func(tr *Transfer) bool { return tr.BlockNumber >= 10 && tr.BlockNumber <= 20 })}, + {"query transfers with range and desc", &TransferFilter{Range: &Range{From: 10, To: 20}, Order: DESC}, allTransfers.Filter(func(tr *Transfer) bool { return tr.BlockNumber >= 10 && tr.BlockNumber <= 20 }).Reverse()}, + {"query transfers with limit with desc", &TransferFilter{Order: DESC, Options: &Options{Limit: 10}}, allTransfers.Reverse()[0:10]}, + {"query all transfers with criteria", &TransferFilter{CriteriaSet: []*TransferCriteria{{Sender: &allTransfers[1].Sender}}}, allTransfers.Filter(func(tr *Transfer) bool { return tr.Sender == allTransfers[1].Sender })}, - {"query all transfers with multi-criteria", &logdb.TransferFilter{CriteriaSet: []*logdb.TransferCriteria{{Sender: &allTransfers[1].Sender}, {Recipient: &allTransfers[2].Recipient}}}, allTransfers.Filter(func(tr *logdb.Transfer) bool { + {"query all transfers with multi-criteria", &TransferFilter{CriteriaSet: []*TransferCriteria{{Sender: &allTransfers[1].Sender}, {Recipient: &allTransfers[2].Recipient}}}, allTransfers.Filter(func(tr *Transfer) bool { return tr.Sender == allTransfers[1].Sender || tr.Recipient == allTransfers[2].Recipient })}, } @@ -244,10 +246,10 @@ func TestEvents(t *testing.T) { } } -// TestLogDB_NewestBlockID performs a series of read/write tests on the NewestBlockID functionality of the LogDB. +// TestLogDB_NewestBlockID performs a series of read/write tests on the NewestBlockID functionality of the // It validates the correctness of the NewestBlockID method under various scenarios. func TestLogDB_NewestBlockID(t *testing.T) { - db, err := logdb.NewMem() + db, err := NewMem() if err != nil { t.Fatal(err) } @@ -368,9 +370,9 @@ func TestLogDB_NewestBlockID(t *testing.T) { } } -// TestLogDB_HasBlockID performs a series of tests on the HasBlockID functionality of the LogDB. +// TestLogDB_HasBlockID performs a series of tests on the HasBlockID functionality of the func TestLogDB_HasBlockID(t *testing.T) { - db, err := logdb.NewMem() + db, err := NewMem() if err != nil { t.Fatal(err) } @@ -431,3 +433,34 @@ func TestLogDB_HasBlockID(t *testing.T) { } assert.True(t, has) } + +func TestRemoveLeadingZeros(t *testing.T) { + tests := []struct { + name string + input []byte + expected []byte + }{ + { + "should remove leading zeros", + common.Hex2Bytes("0000000000000000000000006d95e6dca01d109882fe1726a2fb9865fa41e7aa"), + common.Hex2Bytes("6d95e6dca01d109882fe1726a2fb9865fa41e7aa"), + }, + { + "should not remove any bytes", + common.Hex2Bytes("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), + common.Hex2Bytes("ddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"), + }, + { + "should have at least 1 byte", + common.Hex2Bytes("00000000000000000"), + []byte{0}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := removeLeadingZeros(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/logdb/sequence.go b/logdb/sequence.go index 52909ffe4..04606890a 100644 --- a/logdb/sequence.go +++ b/logdb/sequence.go @@ -5,21 +5,50 @@ package logdb -import "math" +import "errors" type sequence int64 -func newSequence(blockNum uint32, index uint32) sequence { - if (index & math.MaxInt32) != index { - panic("index too large") +// Adjust these constants based on your bit allocation requirements +// 64th bit is the sign bit so we have 63 bits to use +const ( + blockNumBits = 28 + txIndexBits = 15 + logIndexBits = 20 + // Max = 2^28 - 1 = 268,435,455 (unsigned int 28) + blockNumMask = (1 << blockNumBits) - 1 + // Max = 2^15 - 1 = 32,767 + txIndexMask = (1 << txIndexBits) - 1 + // Max = 2^20 - 1 = 1,048,575 + logIndexMask = (1 << logIndexBits) - 1 + + MaxBlockNumber = blockNumMask +) + +func newSequence(blockNum uint32, txIndex uint32, logIndex uint32) (sequence, error) { + if blockNum > blockNumMask { + return 0, errors.New("block number out of range: uint28") + } + if txIndex > txIndexMask { + return 0, errors.New("tx index out of range: uint15") + } + if logIndex > logIndexMask { + return 0, errors.New("log index out of range: uint21") } - return (sequence(blockNum) << 31) | sequence(index) + + return (sequence(blockNum) << (txIndexBits + logIndexBits)) | + (sequence(txIndex) << logIndexBits) | + sequence(logIndex), nil } func (s sequence) BlockNumber() uint32 { - return uint32(s >> 31) + return uint32(s>>(txIndexBits+logIndexBits)) & blockNumMask +} + +func (s sequence) TxIndex() uint32 { + return uint32((s >> logIndexBits) & txIndexMask) } -func (s sequence) Index() uint32 { - return uint32(s & math.MaxInt32) +func (s sequence) LogIndex() uint32 { + return uint32(s & logIndexMask) } diff --git a/logdb/sequence_test.go b/logdb/sequence_test.go index 9fa19fff0..4b4d4e421 100644 --- a/logdb/sequence_test.go +++ b/logdb/sequence_test.go @@ -6,41 +6,86 @@ package logdb import ( - "math" + "math/rand/v2" "testing" + + "github.com/stretchr/testify/assert" ) func TestSequence(t *testing.T) { type args struct { blockNum uint32 - index uint32 + txIndex uint32 + logIndex uint32 } tests := []struct { name string args args - want args }{ - {"regular", args{1, 2}, args{1, 2}}, - {"max bn", args{math.MaxUint32, 1}, args{math.MaxUint32, 1}}, - {"max index", args{5, math.MaxInt32}, args{5, math.MaxInt32}}, - {"both max", args{math.MaxUint32, math.MaxInt32}, args{math.MaxUint32, math.MaxInt32}}, + {"regular", args{1, 2, 3}}, + {"max bn", args{blockNumMask, 1, 2}}, + {"max tx index", args{5, txIndexMask, 4}}, + {"max log index", args{5, 4, logIndexMask}}, + {"close to max", args{blockNumMask - 5, txIndexMask - 5, logIndexMask - 5}}, + {"both max", args{blockNumMask, txIndexMask, logIndexMask}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := newSequence(tt.args.blockNum, tt.args.index) - if bn := got.BlockNumber(); bn != tt.want.blockNum { - t.Errorf("seq.blockNum() = %v, want %v", bn, tt.want.blockNum) + got, err := newSequence(tt.args.blockNum, tt.args.txIndex, tt.args.logIndex) + if err != nil { + t.Error(err) + } + + assert.True(t, got > 0, "sequence should be positive") + if bn := got.BlockNumber(); bn != tt.args.blockNum { + t.Errorf("seq.blockNum() = %v, want %v", bn, tt.args.blockNum) + } + if ti := got.TxIndex(); ti != tt.args.txIndex { + t.Errorf("seq.txIndex() = %v, want %v", ti, tt.args.txIndex) } - if i := got.Index(); i != tt.want.index { - t.Errorf("seq.index() = %v, want %v", i, tt.want.index) + if i := got.LogIndex(); i != tt.args.logIndex { + t.Errorf("seq.index() = %v, want %v", i, tt.args.logIndex) } }) } +} + +// In case some one messes up the bit allocation +func TestSequenceValue(t *testing.T) { + //#nosec G404 + for i := 0; i < 2; i++ { + blk := rand.Uint32N(blockNumMask) + txIndex := rand.Uint32N(txIndexMask) + logIndex := rand.Uint32N(logIndexMask) + + seq, err := newSequence(blk, txIndex, logIndex) + assert.Nil(t, err) + assert.True(t, seq > 0, "sequence should be positive") + + a := rand.Uint32N(blockNumMask) + b := rand.Uint32N(txIndexMask) + c := rand.Uint32N(logIndexMask) + + seq1, err := newSequence(a, b, c) + assert.Nil(t, err) + assert.True(t, seq1 > 0, "sequence should be positive") + + expected := func() bool { + if blk != a { + return blk > a + } + if txIndex != b { + return txIndex > b + } + if logIndex != c { + return logIndex > c + } + return false + }() + assert.Equal(t, expected, seq > seq1) + } +} - defer func() { - if e := recover(); e == nil { - t.Errorf("newSequence should panic on 2nd arg > math.MaxInt32") - } - }() - newSequence(1, math.MaxInt32+1) +func TestBitDistribution(t *testing.T) { + assert.Less(t, blockNumBits+txIndexBits+logIndexBits, 64, "total bits in sequence should be less than 64") } diff --git a/logdb/types.go b/logdb/types.go index e4ebb1be4..8e772cc0c 100644 --- a/logdb/types.go +++ b/logdb/types.go @@ -15,10 +15,11 @@ import ( // Event represents tx.Event that can be stored in db. type Event struct { BlockNumber uint32 - Index uint32 + LogIndex uint32 BlockID thor.Bytes32 BlockTime uint64 TxID thor.Bytes32 + TxIndex uint32 TxOrigin thor.Address //contract caller ClauseIndex uint32 Address thor.Address // always a contract address @@ -29,10 +30,11 @@ type Event struct { // Transfer represents tx.Transfer that can be stored in db. type Transfer struct { BlockNumber uint32 - Index uint32 + LogIndex uint32 BlockID thor.Bytes32 BlockTime uint64 TxID thor.Bytes32 + TxIndex uint32 TxOrigin thor.Address ClauseIndex uint32 Sender thor.Address @@ -71,7 +73,7 @@ func (c *EventCriteria) toWhereCondition() (cond string, args []interface{}) { for i, topic := range c.Topics { if topic != nil { cond += fmt.Sprintf(" AND topic%v = ", i) + refIDQuery - args = append(args, topic.Bytes()) + args = append(args, removeLeadingZeros(topic.Bytes())) } } return diff --git a/lowrlp/encoder.go b/lowrlp/encoder.go deleted file mode 100644 index 9f5bab37b..000000000 --- a/lowrlp/encoder.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright (c) 2021 The VeChainThor developers - -// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying -// file LICENSE or - -// Package lowrlp provides methods to perform low-level rlp encoding. -// Codes are mostly copied from github.com/ethereum/go-ethereum/rlp. -package lowrlp - -import ( - "io" -) - -// Encoder is the low-level rlp encoder. -type Encoder struct { - str []byte // string data, contains everything except list headers - lheads []listhead // all list headers - lhsize int // sum of sizes of all encoded list headers - sizebuf [9]byte // auxiliary buffer for uint encoding -} - -// Reset reset the encoder state. -func (w *Encoder) Reset() { - w.lhsize = 0 - w.str = w.str[:0] - w.lheads = w.lheads[:0] -} - -// EncodeString encodes the string value. -func (w *Encoder) EncodeString(b []byte) { - if len(b) == 1 && b[0] <= 0x7F { - // fits single byte, no string header - w.str = append(w.str, b[0]) - } else { - w.encodeStringHeader(len(b)) - w.str = append(w.str, b...) - } -} - -// EncodeUint encodes the uint value. -func (w *Encoder) EncodeUint(i uint64) { - if i == 0 { - w.str = append(w.str, 0x80) - } else if i < 128 { - // fits single byte - w.str = append(w.str, byte(i)) - } else { - s := putint(w.sizebuf[1:], i) - w.sizebuf[0] = 0x80 + byte(s) - w.str = append(w.str, w.sizebuf[:s+1]...) - } -} - -// EncodeRaw encodes raw value. -func (w *Encoder) EncodeRaw(r []byte) { - w.str = append(w.str, r...) -} - -// EncodeEmptyString encodes an empty string. -// It's equivalent to w.EncodeString(nil), but more efficient. -func (w *Encoder) EncodeEmptyString() { - w.str = append(w.str, 0x80) -} - -// EncodeEmptyList encodes an empty list. -// It's equivalent to w.ListEnd(w.List()), but more efficient. -func (w *Encoder) EncodeEmptyList() { - w.str = append(w.str, 0xC0) -} - -// List starts to encode list elements. -// It returns the offset which is passed to ListEnd when list ended. -func (w *Encoder) List() int { - w.lheads = append(w.lheads, listhead{offset: len(w.str), size: w.lhsize}) - return len(w.lheads) - 1 -} - -// ListEnd ends the list. offset is the return value of the corresponded List call. -func (w *Encoder) ListEnd(index int) { - lh := &w.lheads[index] - lh.size = w.size() - lh.offset - lh.size - if lh.size < 56 { - w.lhsize++ // length encoded into kind tag - } else { - w.lhsize += 1 + intsize(uint64(lh.size)) - } -} - -// ToBytes outputs the encode result to byte slice. -func (w *Encoder) ToBytes() []byte { - out := make([]byte, w.size()) - strpos := 0 - pos := 0 - for _, head := range w.lheads { - // write string data before header - n := copy(out[pos:], w.str[strpos:head.offset]) - pos += n - strpos += n - // write the header - enc := head.encode(out[pos:]) - pos += len(enc) - } - // copy string data after the last list header - copy(out[pos:], w.str[strpos:]) - return out -} - -// ToWriter outputs the encode result to io.Writer. -func (w *Encoder) ToWriter(out io.Writer) (err error) { - strpos := 0 - for _, head := range w.lheads { - // write string data before header - if head.offset-strpos > 0 { - n, err := out.Write(w.str[strpos:head.offset]) - strpos += n - if err != nil { - return err - } - } - // write the header - enc := head.encode(w.sizebuf[:]) - if _, err = out.Write(enc); err != nil { - return err - } - } - if strpos < len(w.str) { - // write string data after the last list header - _, err = out.Write(w.str[strpos:]) - } - return err -} - -func (w *Encoder) encodeStringHeader(size int) { - if size < 56 { - w.str = append(w.str, 0x80+byte(size)) - } else { - sizesize := putint(w.sizebuf[1:], uint64(size)) - w.sizebuf[0] = 0xB7 + byte(sizesize) - w.str = append(w.str, w.sizebuf[:sizesize+1]...) - } -} - -func (w *Encoder) size() int { - return len(w.str) + w.lhsize -} - -type listhead struct { - offset int // index of this header in string data - size int // total size of encoded data (including list headers) -} - -// encode writes head to the given buffer, which must be at least -// 9 bytes long. It returns the encoded bytes. -func (head *listhead) encode(buf []byte) []byte { - return buf[:puthead(buf, 0xC0, 0xF7, uint64(head.size))] -} - -// intsize computes the minimum number of bytes required to store i. -func intsize(i uint64) (size int) { - for size = 1; ; size++ { - if i >>= 8; i == 0 { - return size - } - } -} - -// puthead writes a list or string header to buf. -// buf must be at least 9 bytes long. -func puthead(buf []byte, smalltag, largetag byte, size uint64) int { - if size < 56 { - buf[0] = smalltag + byte(size) - return 1 - } - sizesize := putint(buf[1:], size) - buf[0] = largetag + byte(sizesize) - return sizesize + 1 -} - -// putint writes i to the beginning of b in big endian byte -// order, using the least number of bytes needed to represent i. -func putint(b []byte, i uint64) (size int) { - switch { - case i < (1 << 8): - b[0] = byte(i) - return 1 - case i < (1 << 16): - b[0] = byte(i >> 8) - b[1] = byte(i) - return 2 - case i < (1 << 24): - b[0] = byte(i >> 16) - b[1] = byte(i >> 8) - b[2] = byte(i) - return 3 - case i < (1 << 32): - b[0] = byte(i >> 24) - b[1] = byte(i >> 16) - b[2] = byte(i >> 8) - b[3] = byte(i) - return 4 - case i < (1 << 40): - b[0] = byte(i >> 32) - b[1] = byte(i >> 24) - b[2] = byte(i >> 16) - b[3] = byte(i >> 8) - b[4] = byte(i) - return 5 - case i < (1 << 48): - b[0] = byte(i >> 40) - b[1] = byte(i >> 32) - b[2] = byte(i >> 24) - b[3] = byte(i >> 16) - b[4] = byte(i >> 8) - b[5] = byte(i) - return 6 - case i < (1 << 56): - b[0] = byte(i >> 48) - b[1] = byte(i >> 40) - b[2] = byte(i >> 32) - b[3] = byte(i >> 24) - b[4] = byte(i >> 16) - b[5] = byte(i >> 8) - b[6] = byte(i) - return 7 - default: - b[0] = byte(i >> 56) - b[1] = byte(i >> 48) - b[2] = byte(i >> 40) - b[3] = byte(i >> 32) - b[4] = byte(i >> 24) - b[5] = byte(i >> 16) - b[6] = byte(i >> 8) - b[7] = byte(i) - return 8 - } -} diff --git a/metrics/telemetry.go b/metrics/telemetry.go index 1d1ee96f2..4e17a332d 100644 --- a/metrics/telemetry.go +++ b/metrics/telemetry.go @@ -30,6 +30,11 @@ func HTTPHandler() http.Handler { return metrics.GetOrCreateHandler() } +func NoOp() bool { + _, isNoOp := metrics.(*noopMetrics) + return isNoOp +} + // Define standard buckets for histograms var ( Bucket10s = []int64{0, 500, 1000, 2000, 3000, 4000, 5000, 7500, 10_000} diff --git a/muxdb/backend.go b/muxdb/backend.go new file mode 100644 index 000000000..4d63a2d6e --- /dev/null +++ b/muxdb/backend.go @@ -0,0 +1,94 @@ +// Copyright (c) 2024 The VeChainThor developers + +// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying +// file LICENSE or + +package muxdb + +import ( + "context" + "encoding/binary" + "math" + + "github.com/vechain/thor/v2/kv" + "github.com/vechain/thor/v2/trie" +) + +// backend is the backend of the trie. +type backend struct { + Store kv.Store + Cache Cache + HistPtnFactor, DedupedPtnFactor uint32 + CachedNodeTTL uint16 +} + +// AppendHistNodeKey composes hist node key and appends to buf. +func (b *backend) AppendHistNodeKey(buf []byte, name string, path []byte, ver trie.Version) []byte { + // encoding node keys in this way has the following benefits: + // 1. nodes are stored in order of partition id, which is friendly to LSM DB. + // 2. adjacent versions of a node are stored together, + // so that node data is well compressed (ref https://gist.github.com/qianbin/bffcd248b7312c35d7d526a974018b1b ) + buf = append(buf, trieHistSpace) // space + if b.HistPtnFactor != math.MaxUint32 { // partition id + buf = binary.BigEndian.AppendUint32(buf, ver.Major/b.HistPtnFactor) + } + buf = append(buf, name...) // trie name + buf = appendNodePath(buf, path) // path + + // major ver + mod := ver.Major % b.HistPtnFactor + // more compact encoding + switch { + case b.HistPtnFactor > (1 << 24): + buf = binary.BigEndian.AppendUint32(buf, mod) + case b.HistPtnFactor > (1 << 16): + buf = append(buf, byte(mod>>16), byte(mod>>8), byte(mod)) + case b.HistPtnFactor > (1 << 8): + buf = append(buf, byte(mod>>8), byte(mod)) + case b.HistPtnFactor > 1: + buf = append(buf, byte(mod)) + } + + if ver.Minor != 0 { // minor ver + buf = binary.AppendUvarint(buf, uint64(ver.Minor)) + } + return buf +} + +// AppendDedupedNodeKey composes deduped node key and appends to buf. +func (b *backend) AppendDedupedNodeKey(buf []byte, name string, path []byte, ver trie.Version) []byte { + buf = append(buf, trieDedupedSpace) // space + if b.DedupedPtnFactor != math.MaxUint32 { // partition id + buf = binary.BigEndian.AppendUint32(buf, ver.Major/b.DedupedPtnFactor) + } + buf = append(buf, name...) // trie name + buf = appendNodePath(buf, path) // path + return buf +} + +// DeleteHistoryNodes deletes trie history nodes within partitions of [startMajorVer, limitMajorVer). +func (b *backend) DeleteHistoryNodes(ctx context.Context, startMajorVer, limitMajorVer uint32) error { + startPtn := startMajorVer / b.HistPtnFactor + limitPtn := limitMajorVer / b.HistPtnFactor + + return b.Store.DeleteRange(ctx, kv.Range{ + Start: binary.BigEndian.AppendUint32([]byte{trieHistSpace}, startPtn), + Limit: binary.BigEndian.AppendUint32([]byte{trieHistSpace}, limitPtn), + }) +} + +// appendNodePath encodes the node path and appends to buf. +func appendNodePath(buf, path []byte) []byte { + switch len(path) { + case 0: + return append(buf, 0, 0) + case 1: + return append(buf, path[0], 1) + case 2: + return append(buf, path[0], (path[1]<<4)|2) + default: + // has more + buf = append(buf, path[0]|0x10, (path[1]<<4)|2) + return appendNodePath(buf, path[2:]) + } +} diff --git a/muxdb/cache.go b/muxdb/cache.go new file mode 100644 index 000000000..23a0b08d3 --- /dev/null +++ b/muxdb/cache.go @@ -0,0 +1,230 @@ +// Copyright (c) 2021 The VeChainThor developers + +// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying +// file LICENSE or + +package muxdb + +import ( + "bytes" + "encoding/binary" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/qianbin/directcache" + "github.com/vechain/thor/v2/trie" +) + +type Cache interface { + AddNodeBlob(keyBuf *[]byte, name string, path []byte, ver trie.Version, blob []byte, isCommitting bool) + GetNodeBlob(keyBuf *[]byte, name string, path []byte, ver trie.Version, peek bool) []byte + AddRootNode(name string, n trie.Node) + GetRootNode(name string, ver trie.Version) trie.Node +} + +// cache is the cache layer for trie. +type cache struct { + queriedNodes *directcache.Cache // caches recently queried node blobs. + committedNodes *directcache.Cache // caches newly committed node blobs. + roots struct { // caches root nodes. + m map[string]trie.Node + lock sync.RWMutex + maxMajor uint32 + ttl uint32 + } + + nodeStats cacheStats + rootStats cacheStats + lastLogTime atomic.Int64 +} + +// newCache creates a cache object with the given cache size. +func newCache(sizeMB int, rootTTL uint32) Cache { + sizeBytes := sizeMB * 1024 * 1024 + cache := &cache{ + queriedNodes: directcache.New(sizeBytes / 4), + committedNodes: directcache.New(sizeBytes - sizeBytes/4), + } + cache.lastLogTime.Store(time.Now().UnixNano()) + cache.roots.m = make(map[string]trie.Node) + cache.roots.ttl = rootTTL + return cache +} + +func (c *cache) log() { + now := time.Now().UnixNano() + last := c.lastLogTime.Swap(now) + + if now-last > int64(time.Second*20) { + shouldNode, hitNode, missNode := c.nodeStats.Stats() + shouldRoot, hitRoot, missRoot := c.rootStats.Stats() + + // log two categories together only one of the hit rate has + // changed compared to the last run, to avoid too many logs. + if shouldNode || shouldRoot { + logStats("node cache stats", hitNode, missNode) + logStats("root cache stats", hitRoot, missRoot) + } + + // metrics will reported every 20 seconds + metricCacheHitMiss().SetWithLabel(hitRoot, map[string]string{"type": "root", "event": "hit"}) + metricCacheHitMiss().SetWithLabel(missRoot, map[string]string{"type": "root", "event": "miss"}) + metricCacheHitMiss().SetWithLabel(hitNode, map[string]string{"type": "node", "event": "hit"}) + metricCacheHitMiss().SetWithLabel(missNode, map[string]string{"type": "node", "event": "miss"}) + } else { + c.lastLogTime.CompareAndSwap(now, last) + } +} + +// AddNodeBlob adds encoded node blob into the cache. +func (c *cache) AddNodeBlob(keyBuf *[]byte, name string, path []byte, ver trie.Version, blob []byte, isCommitting bool) { + // the version part + v := binary.AppendUvarint((*keyBuf)[:0], uint64(ver.Major)) + v = binary.AppendUvarint(v, uint64(ver.Minor)) + // the full key + k := append(v, name...) + k = append(k, path...) + *keyBuf = k + + if isCommitting { + _ = c.committedNodes.AdvSet(k[len(v):], len(blob)+len(v), func(val []byte) { + copy(val, v) + copy(val[len(v):], blob) + }) + } else { + _ = c.queriedNodes.Set(k, blob) + } +} + +// GetNodeBlob returns the cached node blob. +func (c *cache) GetNodeBlob(keyBuf *[]byte, name string, path []byte, ver trie.Version, peek bool) []byte { + // the version part + v := binary.AppendUvarint((*keyBuf)[:0], uint64(ver.Major)) + v = binary.AppendUvarint(v, uint64(ver.Minor)) + // the full key + k := append(v, name...) + k = append(k, path...) + *keyBuf = k + + var blob []byte + // lookup from committing cache + if c.committedNodes.AdvGet(k[len(v):], func(val []byte) { + if bytes.Equal(k[:len(v)], val[:len(v)]) { + blob = append([]byte(nil), val[len(v):]...) + } + }, peek) && len(blob) > 0 { + if !peek { + c.nodeStats.Hit() + } + return blob + } + + // fallback to querying cache + if c.queriedNodes.AdvGet(k, func(val []byte) { + blob = append([]byte(nil), val...) + }, peek) && len(blob) > 0 { + if !peek { + c.nodeStats.Hit() + } + return blob + } + if !peek { + c.nodeStats.Miss() + } + return nil +} + +// AddRootNode add the root node into the cache. +func (c *cache) AddRootNode(name string, n trie.Node) { + if n == nil { + return + } + c.roots.lock.Lock() + defer c.roots.lock.Unlock() + + major := n.Version().Major + if major > c.roots.maxMajor { + c.roots.maxMajor = major + // evict old root nodes + for k, r := range c.roots.m { + if major-r.Version().Major > c.roots.ttl { + delete(c.roots.m, k) + } + } + } + c.roots.m[name] = n +} + +// GetRootNode returns the cached root node. +func (c *cache) GetRootNode(name string, ver trie.Version) trie.Node { + c.roots.lock.RLock() + defer c.roots.lock.RUnlock() + + if r, has := c.roots.m[name]; has { + if r.Version() == ver { + if c.rootStats.Hit()%2000 == 0 { + c.log() + } + return r + } + } + c.rootStats.Miss() + return nil +} + +type cacheStats struct { + hit, miss atomic.Int64 + flag atomic.Int32 +} + +func (cs *cacheStats) Hit() int64 { return cs.hit.Add(1) } +func (cs *cacheStats) Miss() int64 { return cs.miss.Add(1) } + +func (cs *cacheStats) Stats() (bool, int64, int64) { + hit := cs.hit.Load() + miss := cs.miss.Load() + lookups := hit + miss + + hitRate := float64(0) + if lookups > 0 { + hitRate = float64(hit) / float64(lookups) + } + flag := int32(hitRate * 1000) + + return cs.flag.Swap(flag) != flag, hit, miss +} + +func logStats(msg string, hit, miss int64) { + lookups := hit + miss + var str string + if lookups > 0 { + str = fmt.Sprintf("%.3f", float64(hit)/float64(lookups)) + } else { + str = "n/a" + } + + logger.Info(msg, + "lookups", lookups, + "hitrate", str, + ) +} + +type dummyCache struct{} + +// AddNodeBlob is a no-op. +func (*dummyCache) AddNodeBlob(_ *[]byte, _ string, _ []byte, _ trie.Version, _ []byte, _ bool) {} + +// GetNodeBlob always returns nil. +func (*dummyCache) GetNodeBlob(_ *[]byte, _ string, _ []byte, _ trie.Version, _ bool) []byte { + return nil +} + +// AddRootNode is a no-op. +func (*dummyCache) AddRootNode(_ string, _ trie.Node) {} + +// GetRootNode always returns nil. +func (*dummyCache) GetRootNode(_ string, _ trie.Version) trie.Node { + return nil +} diff --git a/muxdb/cache_test.go b/muxdb/cache_test.go new file mode 100644 index 000000000..20635afc0 --- /dev/null +++ b/muxdb/cache_test.go @@ -0,0 +1,95 @@ +// Copyright (c) 2019 The VeChainThor developers + +// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying +// file LICENSE or + +package muxdb + +import ( + "bytes" + "crypto/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/vechain/thor/v2/trie" +) + +type mockedRootNode struct { + trie.Node + ver trie.Version +} + +func (m *mockedRootNode) Version() trie.Version { return m.ver } + +func TestCacheRootNode(t *testing.T) { + cache := newCache(0, 100) + + n1 := &mockedRootNode{ver: trie.Version{Major: 1, Minor: 1}} + cache.AddRootNode("", n1) + assert.Equal(t, n1, cache.GetRootNode("", n1.ver)) + + // minor ver not matched + assert.Equal(t, nil, cache.GetRootNode("", trie.Version{Major: 1})) +} + +func TestCacheNodeBlob(t *testing.T) { + var ( + cache = newCache(1, 0) + keyBuf []byte + blob = []byte{1, 1, 1} + ver = trie.Version{Major: 1, Minor: 1} + ) + + // add to committing cache + cache.AddNodeBlob(&keyBuf, "", nil, ver, blob, true) + assert.Equal(t, blob, cache.GetNodeBlob(&keyBuf, "", nil, ver, false)) + // minor ver not matched + assert.Nil(t, cache.GetNodeBlob(&keyBuf, "", nil, trie.Version{Major: 1}, false)) + + cache = newCache(1, 0) + + // add to querying cache + cache.AddNodeBlob(&keyBuf, "", nil, ver, blob, false) + assert.Equal(t, blob, cache.GetNodeBlob(&keyBuf, "", nil, ver, false)) + // minor ver not matched + assert.Nil(t, cache.GetNodeBlob(&keyBuf, "", nil, trie.Version{Major: 1}, false)) +} + +func Benchmark_cacheNodeBlob(b *testing.B) { + var ( + cache = newCache(100, 0) + keyBuf []byte + name = "n" + path = []byte{1, 1} + blob = make([]byte, 100) + ) + rand.Read(blob) + + for i := 0; i < b.N; i++ { + cache.AddNodeBlob(&keyBuf, name, path, trie.Version{}, blob, true) + got := cache.GetNodeBlob(&keyBuf, name, path, trie.Version{}, false) + if !bytes.Equal(got, blob) { + b.Fatalf("want %x, got %x", blob, got) + } + } +} + +func Benchmark_cacheRootNode(b *testing.B) { + var ( + cache = newCache(1, 0) + name = "n" + ) + + var tr trie.Trie + tr.Update([]byte{1}, []byte{2}, []byte{3}) + + rn := tr.RootNode() + + for i := 0; i < b.N; i++ { + cache.AddRootNode(name, rn) + got := cache.GetRootNode(name, trie.Version{}) + if got != rn { + b.Fatalf("want %v, got %v", rn, got) + } + } +} diff --git a/muxdb/internal/engine/engine.go b/muxdb/engine/engine.go similarity index 100% rename from muxdb/internal/engine/engine.go rename to muxdb/engine/engine.go diff --git a/muxdb/internal/engine/leveldb.go b/muxdb/engine/leveldb.go similarity index 82% rename from muxdb/internal/engine/leveldb.go rename to muxdb/engine/leveldb.go index b26d7acff..de317c857 100644 --- a/muxdb/internal/engine/leveldb.go +++ b/muxdb/engine/leveldb.go @@ -21,14 +21,14 @@ var ( scanOpt = opt.ReadOptions{DontFillCache: true} ) -type levelEngine struct { +type LevelEngine struct { db *leveldb.DB batchPool *sync.Pool } // NewLevelEngine creates leveldb instance which implements the Engine interface. func NewLevelEngine(db *leveldb.DB) Engine { - return &levelEngine{ + return &LevelEngine{ db, &sync.Pool{ New: func() interface{} { @@ -38,15 +38,15 @@ func NewLevelEngine(db *leveldb.DB) Engine { } } -func (ldb *levelEngine) Close() error { +func (ldb *LevelEngine) Close() error { return ldb.db.Close() } -func (ldb *levelEngine) IsNotFound(err error) bool { +func (ldb *LevelEngine) IsNotFound(err error) bool { return err == leveldb.ErrNotFound } -func (ldb *levelEngine) Get(key []byte) ([]byte, error) { +func (ldb *LevelEngine) Get(key []byte) ([]byte, error) { val, err := ldb.db.Get(key, &readOpt) // val will be []byte{} if error occurs, which is not expected if err != nil { @@ -55,19 +55,19 @@ func (ldb *levelEngine) Get(key []byte) ([]byte, error) { return val, nil } -func (ldb *levelEngine) Has(key []byte) (bool, error) { +func (ldb *LevelEngine) Has(key []byte) (bool, error) { return ldb.db.Has(key, &readOpt) } -func (ldb *levelEngine) Put(key, val []byte) error { +func (ldb *LevelEngine) Put(key, val []byte) error { return ldb.db.Put(key, val, &writeOpt) } -func (ldb *levelEngine) Delete(key []byte) error { +func (ldb *LevelEngine) Delete(key []byte) error { return ldb.db.Delete(key, &writeOpt) } -func (ldb *levelEngine) Snapshot() kv.Snapshot { +func (ldb *LevelEngine) Snapshot() kv.Snapshot { s, err := ldb.db.GetSnapshot() return &struct { kv.GetFunc @@ -100,7 +100,7 @@ func (ldb *levelEngine) Snapshot() kv.Snapshot { } } -func (ldb *levelEngine) Bulk() kv.Bulk { +func (ldb *LevelEngine) Bulk() kv.Bulk { const idealBatchSize = 128 * 1024 var batch *leveldb.Batch @@ -150,11 +150,11 @@ func (ldb *levelEngine) Bulk() kv.Bulk { } } -func (ldb *levelEngine) Iterate(r kv.Range) kv.Iterator { +func (ldb *LevelEngine) Iterate(r kv.Range) kv.Iterator { return ldb.db.NewIterator((*util.Range)(&r), &scanOpt) } -func (ldb *levelEngine) DeleteRange(ctx context.Context, r kv.Range) error { +func (ldb *LevelEngine) DeleteRange(ctx context.Context, r kv.Range) error { iter := ldb.Iterate(r) defer iter.Release() @@ -183,3 +183,7 @@ func (ldb *levelEngine) DeleteRange(ctx context.Context, r kv.Range) error { } return bulk.Write() } + +func (ldb *LevelEngine) Stats(s *leveldb.DBStats) error { + return ldb.db.Stats(s) +} diff --git a/muxdb/internal/trie/cache.go b/muxdb/internal/trie/cache.go deleted file mode 100644 index 10e792b57..000000000 --- a/muxdb/internal/trie/cache.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (c) 2021 The VeChainThor developers - -// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying -// file LICENSE or - -package trie - -import ( - "encoding/binary" - "fmt" - "sync/atomic" - "time" - - lru "github.com/hashicorp/golang-lru" - "github.com/qianbin/directcache" - "github.com/vechain/thor/v2/cache" - "github.com/vechain/thor/v2/trie" -) - -// Cache is the cache layer for trie. -type Cache struct { - // caches recently queried node blobs. Using full node key as key. - queriedNodes *directcache.Cache - // caches newly committed node blobs. Using node path as key. - committedNodes *directcache.Cache - // caches root nodes. - roots *lru.ARCCache - nodeStats cache.Stats - rootStats cache.Stats - lastLogTime int64 -} - -// NewCache creates a cache object with the given cache size. -func NewCache(sizeMB int, rootCap int) *Cache { - sizeBytes := sizeMB * 1024 * 1024 - var cache Cache - cache.queriedNodes = directcache.New(sizeBytes / 4) - cache.committedNodes = directcache.New(sizeBytes - sizeBytes/4) - cache.roots, _ = lru.NewARC(rootCap) - cache.lastLogTime = time.Now().UnixNano() - return &cache -} - -func (c *Cache) log() { - now := time.Now().UnixNano() - last := atomic.SwapInt64(&c.lastLogTime, now) - - if now-last > int64(time.Second*20) { - shouldNode, hitNode, missNode := c.nodeStats.Stats() - shouldRoot, hitRoot, missRoot := c.rootStats.Stats() - - // log two categories together only one of the hit rate has - // changed compared to the last run, to avoid too many logs. - if shouldNode || shouldRoot { - logStats("node cache stats", hitNode, missNode) - logStats("root cache stats", hitRoot, missRoot) - } - - // metrics will reported every 20 seconds - metricCacheHitMiss().SetWithLabel(hitNode, map[string]string{"type": "node", "event": "hit"}) - metricCacheHitMiss().SetWithLabel(missNode, map[string]string{"type": "node", "event": "miss"}) - metricCacheHitMiss().SetWithLabel(hitRoot, map[string]string{"type": "root", "event": "hit"}) - metricCacheHitMiss().SetWithLabel(missRoot, map[string]string{"type": "root", "event": "miss"}) - } else { - atomic.CompareAndSwapInt64(&c.lastLogTime, now, last) - } -} - -// AddNodeBlob adds node blob into the cache. -func (c *Cache) AddNodeBlob(name string, seq sequence, path []byte, blob []byte, isCommitting bool) { - if c == nil { - return - } - cNum, dNum := seq.CommitNum(), seq.DistinctNum() - k := bufferPool.Get().(*buffer) - defer bufferPool.Put(k) - - k.buf = append(k.buf[:0], name...) - k.buf = append(k.buf, path...) - k.buf = appendUint32(k.buf, dNum) - - if isCommitting { - // committing cache key: name + path + distinctNum - - // concat commit number with blob as cache value - _ = c.committedNodes.AdvSet(k.buf, 4+len(blob), func(val []byte) { - binary.BigEndian.PutUint32(val, cNum) - copy(val[4:], blob) - }) - } else { - // querying cache key: name + path + distinctNum + commitNum - k.buf = appendUint32(k.buf, cNum) - _ = c.queriedNodes.Set(k.buf, blob) - } -} - -// GetNodeBlob returns the cached node blob. -func (c *Cache) GetNodeBlob(name string, seq sequence, path []byte, peek bool, dst []byte) []byte { - if c == nil { - return nil - } - - cNum, dNum := seq.CommitNum(), seq.DistinctNum() - lookupQueried := c.queriedNodes.AdvGet - lookupCommitted := c.committedNodes.AdvGet - - k := bufferPool.Get().(*buffer) - defer bufferPool.Put(k) - - k.buf = append(k.buf[:0], name...) - k.buf = append(k.buf, path...) - k.buf = appendUint32(k.buf, dNum) - - // lookup from committing cache - var blob []byte - if lookupCommitted(k.buf, func(b []byte) { - if binary.BigEndian.Uint32(b) == cNum { - blob = append(dst, b[4:]...) - } - }, peek) && len(blob) > 0 { - if !peek { - c.nodeStats.Hit() - } - return blob - } - - // fallback to querying cache - k.buf = appendUint32(k.buf, cNum) - if lookupQueried(k.buf, func(b []byte) { - blob = append(dst, b...) - }, peek); len(blob) > 0 { - if !peek { - c.nodeStats.Hit() - } - return blob - } - if !peek { - c.nodeStats.Miss() - } - return nil -} - -// AddRootNode add the root node into the cache. -func (c *Cache) AddRootNode(name string, n trie.Node) bool { - if c == nil { - return false - } - if n.Dirty() { - return false - } - var sub *lru.Cache - if q, has := c.roots.Get(name); has { - sub = q.(*lru.Cache) - } else { - sub, _ = lru.New(4) - c.roots.Add(name, sub) - } - sub.Add(n.SeqNum(), n) - return true -} - -// GetRootNode returns the cached root node. -func (c *Cache) GetRootNode(name string, seq uint64, peek bool) (trie.Node, bool) { - if c == nil { - return trie.Node{}, false - } - - getByName := c.roots.Get - if peek { - getByName = c.roots.Peek - } - - if sub, has := getByName(name); has { - getByKey := sub.(*lru.Cache).Get - if peek { - getByKey = sub.(*lru.Cache).Peek - } - if cached, has := getByKey(seq); has { - if !peek { - if c.rootStats.Hit()%2000 == 0 { - c.log() - } - } - return cached.(trie.Node), true - } - } - if !peek { - c.rootStats.Miss() - } - return trie.Node{}, false -} - -func logStats(msg string, hit, miss int64) { - lookups := hit + miss - var str string - if lookups > 0 { - str = fmt.Sprintf("%.3f", float64(hit)/float64(lookups)) - } else { - str = "n/a" - } - - logger.Info(msg, - "lookups", lookups, - "hitrate", str, - ) -} diff --git a/muxdb/internal/trie/leaf_bank.go b/muxdb/internal/trie/leaf_bank.go deleted file mode 100644 index f088a1eb4..000000000 --- a/muxdb/internal/trie/leaf_bank.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright (c) 2021 The VeChainThor developers - -// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying -// file LICENSE or - -package trie - -import ( - "encoding/binary" - "sync/atomic" - - "github.com/ethereum/go-ethereum/rlp" - lru "github.com/hashicorp/golang-lru" - "github.com/pkg/errors" - "github.com/vechain/thor/v2/kv" - "github.com/vechain/thor/v2/trie" -) - -const ( - entityPrefix = "e" - deletionJournalPrefix = "d" - - slotCacheSize = 64 -) - -// LeafRecord presents the queried leaf record. -type LeafRecord struct { - *trie.Leaf - CommitNum uint32 // which commit number the leaf was committed - SlotCommitNum uint32 // up to which commit number this leaf is valid -} - -// leafEntity is the entity stored in leaf bank. -type leafEntity struct { - *trie.Leaf `rlp:"nil"` - CommitNum uint32 -} - -var encodedEmptyLeafEntity, _ = rlp.EncodeToBytes(&leafEntity{}) - -// trieSlot holds the state of a trie slot. -type trieSlot struct { - getter kv.Getter - commitNum uint32 // the commit number of this slot - cache *lru.Cache -} - -func (s *trieSlot) getEntity(key []byte) (*leafEntity, error) { - data, err := s.getter.Get(key) - if err != nil { - if !s.getter.IsNotFound(err) { - return nil, errors.Wrap(err, "get entity from leafbank") - } - // never seen, which means it has been an empty leaf until slotCommitNum. - return nil, nil - } - - // entity found - var ent leafEntity - if err := rlp.DecodeBytes(data, &ent); err != nil { - return nil, errors.Wrap(err, "decode leaf entity") - } - - if ent.Leaf != nil && len(ent.Leaf.Meta) == 0 { - ent.Meta = nil // normalize - } - return &ent, nil -} - -func (s *trieSlot) getRecord(key []byte) (rec *LeafRecord, err error) { - slotCommitNum := atomic.LoadUint32(&s.commitNum) - if slotCommitNum == 0 { - // an empty slot always gives undetermined value. - return &LeafRecord{}, nil - } - - strKey := string(key) - if cached, ok := s.cache.Get(strKey); ok { - return cached.(*LeafRecord), nil - } - - defer func() { - if err == nil { - s.cache.Add(strKey, rec) - } - }() - - ent, err := s.getEntity(key) - if err != nil { - return nil, err - } - - if ent == nil { // never seen - return &LeafRecord{ - Leaf: &trie.Leaf{}, - CommitNum: 0, - SlotCommitNum: slotCommitNum, - }, nil - } - - if slotCommitNum < ent.CommitNum { - slotCommitNum = ent.CommitNum - } - - return &LeafRecord{ - Leaf: ent.Leaf, - CommitNum: ent.CommitNum, - SlotCommitNum: slotCommitNum, - }, nil -} - -// LeafBank records accumulated trie leaves to help accelerate trie leaf access -// according to VIP-212. -type LeafBank struct { - store kv.Store - space byte - slots *lru.ARCCache -} - -// NewLeafBank creates a new LeafBank instance. -// The slotCap indicates the capacity of cached per-trie slots. -func NewLeafBank(store kv.Store, space byte, slotCap int) *LeafBank { - b := &LeafBank{store: store, space: space} - b.slots, _ = lru.NewARC(slotCap) - return b -} - -func (b *LeafBank) slotBucket(name string) kv.Bucket { - return kv.Bucket(string(b.space) + entityPrefix + name) -} - -func (b *LeafBank) deletionJournalBucket(name string) kv.Bucket { - return kv.Bucket(string(b.space) + deletionJournalPrefix + name) -} - -// getSlot gets slot from slots cache or create a new one. -func (b *LeafBank) getSlot(name string) (*trieSlot, error) { - if cached, ok := b.slots.Get(name); ok { - return cached.(*trieSlot), nil - } - - slot := &trieSlot{getter: b.slotBucket(name).NewGetter(b.store)} - if data, err := slot.getter.Get(nil); err != nil { - if !slot.getter.IsNotFound(err) { - return nil, errors.Wrap(err, "get slot from leafbank") - } - } else { - slot.commitNum = binary.BigEndian.Uint32(data) - } - - slot.cache, _ = lru.New(slotCacheSize) - b.slots.Add(name, slot) - return slot, nil -} - -// Lookup lookups a leaf record by the given leafKey for the trie named by name. -// LeafRecord.Leaf might be nil if the leaf can't be determined. -func (b *LeafBank) Lookup(name string, leafKey []byte) (rec *LeafRecord, err error) { - slot, err := b.getSlot(name) - if err != nil { - return nil, err - } - return slot.getRecord(leafKey) -} - -// LogDeletions saves the journal of leaf-key deletions which issued by one trie-commit. -func (b *LeafBank) LogDeletions(putter kv.Putter, name string, keys []string, commitNum uint32) error { - if len(keys) == 0 { - return nil - } - - bkt := b.deletionJournalBucket(name) + kv.Bucket(appendUint32(nil, commitNum)) - putter = bkt.NewPutter(putter) - for _, k := range keys { - if err := putter.Put([]byte(k), nil); err != nil { - return err - } - } - return nil -} - -// NewUpdater creates a leaf-updater for a trie slot with the given name. -func (b *LeafBank) NewUpdater(name string, baseCommitNum, targetCommitNum uint32) (*LeafUpdater, error) { - slot, err := b.getSlot(name) - if err != nil { - return nil, err - } - - bulk := b.slotBucket(name). - NewStore(b.store). - Bulk() - bulk.EnableAutoFlush() - - // traverse the deletion-journal and write to the slot - iter := b.deletionJournalBucket(name). - NewStore(b.store). - Iterate(kv.Range{ - Start: appendUint32(nil, baseCommitNum), - Limit: appendUint32(nil, targetCommitNum+1), - }) - defer iter.Release() - for iter.Next() { - // skip commit number to get leaf key - leafKey := iter.Key()[4:] - // put empty value to mark the leaf to undetermined state - if err := bulk.Put(leafKey, encodedEmptyLeafEntity); err != nil { - return nil, err - } - } - if err := iter.Error(); err != nil { - return nil, err - } - - return &LeafUpdater{ - slot: slot, - bulk: bulk, - targetCommitNum: targetCommitNum, - }, nil -} - -// LeafUpdater helps to record trie leaves. -type LeafUpdater struct { - slot *trieSlot - bulk kv.Bulk - targetCommitNum uint32 -} - -// Update updates the leaf for the given key. -func (u *LeafUpdater) Update(leafKey []byte, leaf *trie.Leaf, leafCommitNum uint32) error { - ent := &leafEntity{ - Leaf: leaf, - CommitNum: leafCommitNum, - } - data, err := rlp.EncodeToBytes(ent) - if err != nil { - return err - } - - return u.bulk.Put(leafKey, data) -} - -// Commit commits updates into leafbank. -func (u *LeafUpdater) Commit() error { - // save slot commit number - if err := u.bulk.Put(nil, appendUint32(nil, u.targetCommitNum)); err != nil { - return err - } - if err := u.bulk.Write(); err != nil { - return err - } - atomic.StoreUint32(&u.slot.commitNum, u.targetCommitNum) - return nil -} diff --git a/muxdb/internal/trie/leaf_bank_test.go b/muxdb/internal/trie/leaf_bank_test.go deleted file mode 100644 index a3b8ebde6..000000000 --- a/muxdb/internal/trie/leaf_bank_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright (c) 2021 The VeChainThor developers - -// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying -// file LICENSE or - -package trie - -import ( - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/vechain/thor/v2/trie" -) - -func TestLeafbank(t *testing.T) { - engine := newEngine() - space := byte(2) - slotCap := 10 - lb := NewLeafBank(engine, space, slotCap) - name := "the trie" - - t.Run("empty state", func(t *testing.T) { - key := []byte("key") - rec, err := lb.Lookup(name, key) - assert.NoError(t, err) - assert.Equal(t, &LeafRecord{}, rec) - }) - - t.Run("update and lookup", func(t *testing.T) { - u, err := lb.NewUpdater(name, 0, 100) - assert.Nil(t, err) - for i := 0; i < 10; i++ { - if err := u.Update([]byte(strconv.Itoa(i)), &trie.Leaf{Value: []byte(strconv.Itoa(i))}, 10); err != nil { - t.Fatal(err) - } - } - if err := u.Commit(); err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - rec, err := lb.Lookup(name, []byte(strconv.Itoa(i))) - assert.NoError(t, err) - assert.Equal(t, &LeafRecord{ - Leaf: &trie.Leaf{Value: []byte(strconv.Itoa(i))}, - CommitNum: 10, - SlotCommitNum: 100, - }, rec) - } - }) - - t.Run("lookup never seen", func(t *testing.T) { - rec, err := lb.Lookup(name, []byte(strconv.Itoa(11))) - assert.NoError(t, err) - - assert.Equal(t, &LeafRecord{Leaf: &trie.Leaf{}, SlotCommitNum: 100}, rec) - }) - - t.Run("lookup deleted", func(t *testing.T) { - // mark - err := lb.LogDeletions(engine, name, []string{strconv.Itoa(1)}, 101) - assert.Nil(t, err) - - u, err := lb.NewUpdater(name, 100, 101) - assert.Nil(t, err) - - err = u.Commit() - assert.Nil(t, err) - - // recreate to drop cache - lb = NewLeafBank(engine, space, slotCap) - - rec, err := lb.Lookup(name, []byte(strconv.Itoa(1))) - assert.NoError(t, err) - assert.Equal(t, &LeafRecord{SlotCommitNum: 101}, rec) - }) -} diff --git a/muxdb/internal/trie/trie.go b/muxdb/internal/trie/trie.go deleted file mode 100644 index af58fc78f..000000000 --- a/muxdb/internal/trie/trie.go +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright (c) 2021 The VeChainThor developers - -// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying -// file LICENSE or - -package trie - -import ( - "context" - - "github.com/pkg/errors" - "github.com/vechain/thor/v2/kv" - "github.com/vechain/thor/v2/log" - "github.com/vechain/thor/v2/thor" - "github.com/vechain/thor/v2/trie" -) - -var logger = log.WithContext("pkg", "muxdb.trie") - -// Backend is the backend of the trie. -type Backend struct { - Store kv.Store - Cache *Cache - LeafBank *LeafBank - HistSpace, - DedupedSpace byte - HistPtnFactor, - DedupedPtnFactor uint32 - CachedNodeTTL uint16 -} - -// sequence helps convert sequence number from/to commitNum & distinctNum. -type sequence uint64 - -func makeSequence(commitNum, distinctNum uint32) sequence { - return sequence(commitNum) | (sequence(distinctNum) << 32) -} - -func (s sequence) CommitNum() uint32 { return uint32(s) } -func (s sequence) DistinctNum() uint32 { return uint32(s >> 32) } - -// Trie is the managed trie. -type Trie struct { - back *Backend - name string - ext *trie.ExtendedTrie - - dirty bool - deletions []string - noFillCache bool - fastLeafGet func(nodeCommitNum uint32) (*trie.Leaf, error) -} - -// New creates a managed trie. -func New( - back *Backend, - name string, - root thor.Bytes32, - commitNum uint32, - distinctNum uint32, - nonCrypto bool, -) *Trie { - t := &Trie{ - back: back, - name: name, - } - - seq := makeSequence(commitNum, distinctNum) - if rootNode, ok := back.Cache.GetRootNode(name, uint64(seq), false); ok { - t.ext = trie.NewExtendedCached(rootNode, t.newDatabase(), nonCrypto) - } else { - t.ext = trie.NewExtended(root, uint64(seq), t.newDatabase(), nonCrypto) - } - t.ext.SetCacheTTL(t.back.CachedNodeTTL) - return t -} - -// Name returns the trie name. -func (t *Trie) Name() string { - return t.name -} - -func (t *Trie) makeHistNodeKey(dst []byte, seq sequence, path []byte) []byte { - commitNum, distinctNum := seq.CommitNum(), seq.DistinctNum() - dst = append(dst, t.back.HistSpace) // space - dst = appendUint32(dst, commitNum/t.back.HistPtnFactor) // partition id - dst = append(dst, t.name...) // trie name - dst = encodePath(dst, path) // path - dst = appendUint32(dst, commitNum%t.back.HistPtnFactor) // commit num mod - dst = appendUint32(dst, distinctNum) // distinct num - return dst -} - -func (t *Trie) makeDedupedNodeKey(dst []byte, seq sequence, path []byte) []byte { - commitNum := seq.CommitNum() - dst = append(dst, t.back.DedupedSpace) // space - dst = appendUint32(dst, commitNum/t.back.DedupedPtnFactor) // partition id - dst = append(dst, t.name...) // trie name - dst = encodePath(dst, path) // path - return dst -} - -// newDatabase creates a database instance for low-level trie construction. -func (t *Trie) newDatabase() trie.Database { - var ( - thisHash []byte - thisSeq sequence - thisPath []byte - keyBuf []byte - ) - - return &struct { - trie.DatabaseReaderTo - trie.DatabaseKeyEncoder - trie.DatabaseReader - trie.DatabaseWriter - }{ - databaseGetToFunc(func(_ []byte, dst []byte) (blob []byte, err error) { - // get from cache - if blob = t.back.Cache.GetNodeBlob(t.name, thisSeq, thisPath, t.noFillCache, dst); len(blob) > 0 { - return - } - defer func() { - if err == nil && !t.noFillCache { - t.back.Cache.AddNodeBlob(t.name, thisSeq, thisPath, blob, false) - } - }() - - // if cache missed, try fast leaf get - if t.fastLeafGet != nil { - if leaf, err := t.fastLeafGet(thisSeq.CommitNum()); err != nil { - return nil, err - } else if leaf != nil { - // good, leaf got. returns a special error to short-circuit further node lookups. - return nil, &leafAvailable{leaf} - } - } - - defer func() { - if err == nil && !t.ext.IsNonCrypto() { - // to ensure the node is correct, we need to verify the node hash. - // TODO: later can skip this step - if ok, err1 := trie.VerifyNodeHash(blob[len(dst):], thisHash); err1 != nil { - err = errors.Wrap(err1, "verify node hash") - } else if !ok { - err = errors.New("node hash checksum error") - } - } - }() - - // query in db - snapshot := t.back.Store.Snapshot() - defer snapshot.Release() - - // get from hist space first - keyBuf = t.makeHistNodeKey(keyBuf[:0], thisSeq, thisPath) - if val, err := snapshot.Get(keyBuf); err == nil { - // found - return append(dst, val...), nil - } else if !snapshot.IsNotFound(err) { - // error - if !snapshot.IsNotFound(err) { - return nil, err - } - } - - // then from deduped space - keyBuf = t.makeDedupedNodeKey(keyBuf[:0], thisSeq, thisPath) - if val, err := snapshot.Get(keyBuf); err == nil { - return append(dst, val...), nil - } - return nil, err - }), - databaseKeyEncodeFunc(func(hash []byte, seq uint64, path []byte) []byte { - thisHash = hash - thisSeq = sequence(seq) - thisPath = path - return nil - }), - nil, - nil, - } -} - -// Copy make a copy of this trie. -func (t *Trie) Copy() *Trie { - cpy := *t - cpy.ext = trie.NewExtendedCached(t.ext.RootNode(), cpy.newDatabase(), t.ext.IsNonCrypto()) - cpy.ext.SetCacheTTL(cpy.back.CachedNodeTTL) - cpy.fastLeafGet = nil - - if len(t.deletions) > 0 { - cpy.deletions = append([]string(nil), t.deletions...) - } else { - cpy.deletions = nil - } - return &cpy -} - -// Get returns the value for key stored in the trie. -// The value bytes must not be modified by the caller. -func (t *Trie) Get(key []byte) ([]byte, []byte, error) { - return t.ext.Get(key) -} - -// FastGet uses a fast way to query the value for key stored in the trie. -// See VIP-212 for detail. -func (t *Trie) FastGet(key []byte, steadyCommitNum uint32) ([]byte, []byte, error) { - if t.back.LeafBank == nil { - return t.ext.Get(key) - } - - // setup fast leaf getter - var leafRec *LeafRecord - t.fastLeafGet = func(nodeCommitNum uint32) (*trie.Leaf, error) { - // short circuit if the node is too new - if nodeCommitNum > steadyCommitNum { - return nil, nil - } - if leafRec == nil { - var err error - if leafRec, err = t.back.LeafBank.Lookup(t.name, key); err != nil { - return nil, err - } - } - - // can't be determined - if leafRec.Leaf == nil { - return nil, nil - } - - // if [nodeCN, steadyCN] and [leafCN, slotCN] have intersection, - // the leaf will be the correct one. - if nodeCommitNum <= leafRec.SlotCommitNum && leafRec.CommitNum <= steadyCommitNum { - return leafRec.Leaf, nil - } - return nil, nil - } - defer func() { t.fastLeafGet = nil }() - - val, meta, err := t.ext.Get(key) - if err != nil { - if miss, ok := err.(*trie.MissingNodeError); ok { - if la, ok := miss.Err.(*leafAvailable); ok { - return la.Value, la.Meta, nil - } - } - return nil, nil, err - } - return val, meta, nil -} - -// Update associates key with value in the trie. Subsequent calls to -// Get will return value. If value has length zero, any existing value -// is deleted from the trie and calls to Get will return nil. -// -// The value bytes must not be modified by the caller while they are -// stored in the trie. -func (t *Trie) Update(key, val, meta []byte) error { - t.dirty = true - if len(val) == 0 { // deletion - if t.back.LeafBank != nil { - t.deletions = append(t.deletions, string(key)) - } - } - return t.ext.Update(key, val, meta) -} - -// Stage processes trie updates and calculates the new root hash. -func (t *Trie) Stage(newCommitNum, newDistinctNum uint32) (root thor.Bytes32, commit func() error) { - var ( - thisPath []byte - bulk = t.back.Store.Bulk() - buf []byte - ) - - // make a copy of the original trie to perform commit. - // so later if real commit is discarded, the original trie will be in - // correct state. - extCpy := *t.ext - newSeq := makeSequence(newCommitNum, newDistinctNum) - - db := &struct { - trie.DatabaseWriter - trie.DatabaseKeyEncoder - }{ - kv.PutFunc(func(_, blob []byte) error { - buf = t.makeHistNodeKey(buf[:0], newSeq, thisPath) - if err := bulk.Put(buf, blob); err != nil { - return err - } - if !t.noFillCache { - t.back.Cache.AddNodeBlob(t.name, newSeq, thisPath, blob, true) - } - return nil - }), - databaseKeyEncodeFunc(func(_ []byte, _ uint64, path []byte) []byte { - thisPath = path - return nil - }), - } - - // commit the copied trie without flush to db - root, err := extCpy.CommitTo(db, uint64(newSeq)) - if err != nil { - return root, func() error { return err } - } - - commit = func() error { - if t.back.LeafBank != nil { - if err := t.back.LeafBank.LogDeletions(bulk, t.name, t.deletions, newCommitNum); err != nil { - return err - } - } - // real-commit, flush to db - if err := bulk.Write(); err != nil { - return err - } - - t.dirty = false - t.deletions = t.deletions[:0] - - // replace with the new root node after the copied trie committed - newRootNode := extCpy.RootNode() - t.ext.SetRootNode(newRootNode) - if !t.noFillCache { - t.back.Cache.AddRootNode(t.name, newRootNode) - } - return nil - } - return -} - -// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at -// the key after the given start key -func (t *Trie) NodeIterator(start []byte, baseCommitNum uint32) trie.NodeIterator { - return t.ext.NodeIterator(start, func(seq uint64) bool { - return sequence(seq).CommitNum() >= baseCommitNum - }) -} - -// SetNoFillCache enable or disable cache filling. -func (t *Trie) SetNoFillCache(b bool) { - t.noFillCache = b -} - -// DumpLeaves dumps leaves in the range of [baseCommitNum, targetCommitNum] into leaf bank. -// transform transforms leaves before passing into leaf bank. -func (t *Trie) DumpLeaves(ctx context.Context, baseCommitNum, targetCommitNum uint32, transform func(*trie.Leaf) *trie.Leaf) error { - if t.dirty { - return errors.New("dirty trie") - } - if t.back.LeafBank == nil { - return nil - } - - leafUpdater, err := t.back.LeafBank.NewUpdater(t.name, baseCommitNum, targetCommitNum) - if err != nil { - return err - } - var ( - checkContext = newContextChecker(ctx, 5000) - iter = t.NodeIterator(nil, baseCommitNum) - ) - - for iter.Next(true) { - if err := checkContext(); err != nil { - return err - } - - if leaf := iter.Leaf(); leaf != nil { - seq := sequence(iter.SeqNum()) - if err := leafUpdater.Update(iter.LeafKey(), transform(leaf), seq.CommitNum()); err != nil { - return err - } - } - } - if err := iter.Error(); err != nil { - return err - } - return leafUpdater.Commit() -} - -// DumpNodes dumps referenced nodes committed within [baseCommitNum, thisCommitNum], into the deduped space. -func (t *Trie) DumpNodes(ctx context.Context, baseCommitNum uint32, handleLeaf func(*trie.Leaf)) error { - if t.dirty { - return errors.New("dirty trie") - } - var ( - checkContext = newContextChecker(ctx, 5000) - bulk = t.back.Store.Bulk() - iter = t.NodeIterator(nil, baseCommitNum) - buf []byte - ) - bulk.EnableAutoFlush() - - for iter.Next(true) { - if err := checkContext(); err != nil { - return err - } - - if err := iter.Node(func(blob []byte) error { - buf = t.makeDedupedNodeKey(buf[:0], sequence(iter.SeqNum()), iter.Path()) - return bulk.Put(buf, blob) - }); err != nil { - return err - } - if handleLeaf != nil { - if leaf := iter.Leaf(); leaf != nil { - handleLeaf(leaf) - } - } - } - if err := iter.Error(); err != nil { - return err - } - return bulk.Write() -} - -// CleanHistory cleans history nodes within [startCommitNum, limitCommitNum). -func CleanHistory(ctx context.Context, back *Backend, startCommitNum, limitCommitNum uint32) error { - startPtn := startCommitNum / back.HistPtnFactor - limitPtn := limitCommitNum / back.HistPtnFactor - // preserve ptn 0 to make genesis state always visitable - if startPtn == 0 { - startPtn = 1 - } - - return back.Store.DeleteRange(ctx, kv.Range{ - Start: appendUint32([]byte{back.HistSpace}, startPtn), - Limit: appendUint32([]byte{back.HistSpace}, limitPtn), - }) -} - -// individual functions of trie database interface. -type ( - databaseKeyEncodeFunc func(hash []byte, seq uint64, path []byte) []byte - databaseGetToFunc func(key, dst []byte) ([]byte, error) -) - -func (f databaseKeyEncodeFunc) Encode(hash []byte, seq uint64, path []byte) []byte { - return f(hash, seq, path) -} - -func (f databaseGetToFunc) GetTo(key, dst []byte) ([]byte, error) { - return f(key, dst) -} - -// leafAvailable is a special error type to short circuit trie get method. -type leafAvailable struct { - *trie.Leaf -} - -func (*leafAvailable) Error() string { - return "leaf available" -} diff --git a/muxdb/internal/trie/trie_test.go b/muxdb/internal/trie/trie_test.go deleted file mode 100644 index d8ce9077c..000000000 --- a/muxdb/internal/trie/trie_test.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (c) 2021 The VeChainThor developers - -// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying -// file LICENSE or - -package trie - -import ( - "context" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/vechain/thor/v2/muxdb/internal/engine" - "github.com/vechain/thor/v2/thor" - "github.com/vechain/thor/v2/trie" -) - -func newEngine() engine.Engine { - db, _ := leveldb.Open(storage.NewMemStorage(), nil) - return engine.NewLevelEngine(db) -} - -func newBackend() *Backend { - engine := newEngine() - return &Backend{ - Store: engine, - Cache: nil, - LeafBank: NewLeafBank(engine, 2, 100), - HistSpace: 0, - DedupedSpace: 1, - HistPtnFactor: 1, - DedupedPtnFactor: 1, - CachedNodeTTL: 100, - } -} - -func TestTrie(t *testing.T) { - name := "the trie" - - t.Run("basic", func(t *testing.T) { - back := newBackend() - tr := New(back, name, thor.Bytes32{}, 0, 0, false) - assert.Equal(t, name, tr.Name()) - - assert.False(t, tr.dirty) - - key := []byte("key") - val := []byte("value") - tr.Update(key, val, nil) - assert.True(t, tr.dirty) - - _val, _, _ := tr.Get(key) - assert.Equal(t, val, _val) - }) - - t.Run("hash root", func(t *testing.T) { - back := newBackend() - tr := New(back, name, thor.Bytes32{}, 0, 0, false) - - _tr := new(trie.Trie) - - for i := 0; i < 100; i++ { - for j := 0; j < 100; j++ { - key := []byte(strconv.Itoa(i) + "_" + strconv.Itoa(j)) - val := []byte("v" + strconv.Itoa(j) + "_" + strconv.Itoa(i)) - tr.Update(key, val, nil) - _tr.Update(key, val) - } - h, _ := tr.Stage(0, 0) - assert.Equal(t, _tr.Hash(), h) - } - }) - - t.Run("fast get", func(t *testing.T) { - back := newBackend() - tr := New(back, name, thor.Bytes32{}, 0, 0, false) - - var roots []thor.Bytes32 - for i := 0; i < 100; i++ { - for j := 0; j < 100; j++ { - key := []byte(strconv.Itoa(i) + "_" + strconv.Itoa(j)) - val := []byte("v" + strconv.Itoa(j) + "_" + strconv.Itoa(i)) - tr.Update(key, val, nil) - } - root, commit := tr.Stage(uint32(i), 0) - if err := commit(); err != nil { - t.Fatal(err) - } - - roots = append(roots, root) - } - - tr = New(back, name, roots[10], 10, 0, false) - - if err := tr.DumpLeaves(context.Background(), 0, 10, func(l *trie.Leaf) *trie.Leaf { - return &trie.Leaf{ - Value: l.Value, - Meta: []byte("from lb"), - } - }); err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - for j := 0; j < 100; j++ { - key := []byte(strconv.Itoa(i) + "_" + strconv.Itoa(j)) - val := []byte("v" + strconv.Itoa(j) + "_" + strconv.Itoa(i)) - - _val, _meta, err := tr.FastGet(key, 10) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, val, _val) - assert.Equal(t, []byte("from lb"), _meta) - } - } - }) -} diff --git a/muxdb/internal/trie/util.go b/muxdb/internal/trie/util.go deleted file mode 100644 index 6f4f344af..000000000 --- a/muxdb/internal/trie/util.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright (c) 2021 The VeChainThor developers - -// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying -// file LICENSE or - -package trie - -import ( - "context" - "fmt" - "math" - "sync" -) - -// encodePath encodes the path into compact form. -func encodePath(dst []byte, path []byte) []byte { - d := len(path) - s := d / 4 - if s > math.MaxUint8 { - panic(fmt.Errorf("unexpected length of path: %v", d)) - } - // the prefix s is to split the trie into sub tries with depth 4. - dst = append(dst, byte(s)) - - // further on, a sub trie is divided to depth-2 sub tries. - for i := 0; ; i += 4 { - switch d - i { - case 0: - return append(dst, 0) - case 1: - return append(dst, (path[i]<<3)|1) - case 2: - t := (uint16(path[i]) << 4) | uint16(path[i+1]) - return appendUint16(dst, 0x8000|(t<<7)) - case 3: - t := (uint16(path[i]) << 8) | (uint16(path[i+1]) << 4) | uint16(path[i+2]) - return appendUint16(dst, 0x8000|(t<<3)|1) - default: - dst = append(dst, (path[i]<<4)|path[i+1], (path[i+2]<<4)|path[i+3]) - } - } -} - -func appendUint32(b []byte, v uint32) []byte { - return append(b, - byte(v>>24), - byte(v>>16), - byte(v>>8), - byte(v), - ) -} - -func appendUint16(b []byte, v uint16) []byte { - return append(b, - byte(v>>8), - byte(v), - ) -} - -// newContextChecker creates a debounced context checker. -func newContextChecker(ctx context.Context, debounce int) func() error { - count := 0 - return func() error { - count++ - if count > debounce { - count = 0 - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - } - return nil - } -} - -type buffer struct { - buf []byte -} - -var bufferPool = sync.Pool{ - New: func() interface{} { - return &buffer{} - }, -} diff --git a/muxdb/internal/trie/util_test.go b/muxdb/internal/trie/util_test.go deleted file mode 100644 index f6a8c14fb..000000000 --- a/muxdb/internal/trie/util_test.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) 2021 The VeChainThor developers - -// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying -// file LICENSE or - -package trie - -import ( - "reflect" - "testing" -) - -func Test_encodePath(t *testing.T) { - tests := []struct { - path []byte - want []byte - }{ - {[]byte{}, []byte{0, 0}}, - {[]byte{8}, []byte{0, (8 << 3) | 1}}, - {[]byte{8, 9}, []byte{0, 0x80 | (8 << 3) | (9 >> 1), 0x80}}, - {[]byte{8, 9, 0xa}, []byte{0, 0xc4, 0x80 | (0xa << 3) | 1}}, - {[]byte{8, 9, 0xa, 0xb}, []byte{1, 0x89, 0xab, 0}}, - {[]byte{8, 9, 0xa, 0xb, 0xc}, []byte{1, 0x89, 0xab, (0xc << 3) | 1}}, - {[]byte{8, 9, 0xa, 0xb, 0xc, 0xd}, []byte{1, 0x89, 0xab, 0x80 | (0xc << 3) | (0xd >> 1), 0x80}}, - {[]byte{8, 9, 0xa, 0xb, 0xc, 0xd, 0xe}, []byte{1, 0x89, 0xab, 0x80 | (0xc << 3) | (0xd >> 1), 0x80 | (0xe << 3) | 1}}, - } - for _, tt := range tests { - if got := encodePath(nil, tt.path); !reflect.DeepEqual(got, tt.want) { - t.Errorf("encodePath() = %v, want %v", got, tt.want) - } - } -} diff --git a/muxdb/metrics.go b/muxdb/metrics.go new file mode 100644 index 000000000..075677e27 --- /dev/null +++ b/muxdb/metrics.go @@ -0,0 +1,31 @@ +// Copyright (c) 2025 The VeChainThor developers + +// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying +// file LICENSE or + +// Package muxdb implements the storage layer for block-chain. +// It manages instance of merkle-patricia-trie, and general purpose named kv-store. +package muxdb + +import ( + "strconv" + + "github.com/syndtr/goleveldb/leveldb" + "github.com/vechain/thor/v2/metrics" +) + +var ( + metricCacheHitMiss = metrics.LazyLoadGaugeVec("cache_hit_miss_count", []string{"type", "event"}) + metricCompaction = metrics.LazyLoadGaugeVec("compaction_stats_gauge", []string{"level", "type"}) +) + +func registerCompactionMetrics(stats *leveldb.DBStats) { + for i := range stats.LevelDurations { + lvl := strconv.Itoa(i) + metricCompaction().SetWithLabel(int64(stats.LevelTablesCounts[i]), map[string]string{"level": lvl, "type": "tables"}) + metricCompaction().SetWithLabel(stats.LevelSizes[i], map[string]string{"level": lvl, "type": "size"}) + metricCompaction().SetWithLabel(int64(stats.LevelDurations[i].Seconds()), map[string]string{"level": lvl, "type": "time"}) + metricCompaction().SetWithLabel(stats.LevelRead[i], map[string]string{"level": lvl, "type": "read"}) + metricCompaction().SetWithLabel(stats.LevelWrite[i], map[string]string{"level": lvl, "type": "write"}) + } +} diff --git a/muxdb/muxdb.go b/muxdb/muxdb.go index 1b0da3971..8f9783d05 100644 --- a/muxdb/muxdb.go +++ b/muxdb/muxdb.go @@ -10,6 +10,7 @@ package muxdb import ( "context" "encoding/json" + "time" "github.com/syndtr/goleveldb/leveldb" dberrors "github.com/syndtr/goleveldb/leveldb/errors" @@ -17,16 +18,17 @@ import ( "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/storage" "github.com/vechain/thor/v2/kv" - "github.com/vechain/thor/v2/muxdb/internal/engine" - "github.com/vechain/thor/v2/muxdb/internal/trie" - "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/log" + "github.com/vechain/thor/v2/muxdb/engine" + "github.com/vechain/thor/v2/trie" ) const ( - trieHistSpace = byte(0) // the key space for historical trie nodes. - trieDedupedSpace = byte(1) // the key space for deduped trie nodes. - trieLeafBankSpace = byte(2) // the key space for the trie leaf bank. - namedStoreSpace = byte(3) // the key space for named store. + trieHistSpace = byte(0) // the key space for historical trie nodes. + trieDedupedSpace = byte(1) // the key space for deduped trie nodes. + namedStoreSpace = byte(2) // the key space for named store. + + metricsSampleInterval = 10 * time.Second ) const ( @@ -34,19 +36,14 @@ const ( configKey = "config" ) -// Trie is the managed trie. -type Trie = trie.Trie +var logger = log.WithContext("pkg", "muxdb") // Options optional parameters for MuxDB. type Options struct { // TrieNodeCacheSizeMB is the size of the cache for trie node blobs. TrieNodeCacheSizeMB int - // TrieRootCacheCapacity is the capacity of the cache for trie root nodes. - TrieRootCacheCapacity int // TrieCachedNodeTTL defines the life time(times of commit) of cached trie nodes. TrieCachedNodeTTL uint16 - // TrieLeafBankSlotCapacity defines max count of cached slot for leaf bank. - TrieLeafBankSlotCapacity int // TrieHistPartitionFactor is the partition factor for historical trie nodes. TrieHistPartitionFactor uint32 // TrieDedupedPartitionFactor is the partition factor for deduped trie nodes. @@ -65,7 +62,9 @@ type Options struct { // MuxDB is the database to efficiently store state trie and block-chain data. type MuxDB struct { engine engine.Engine - trieBackend *trie.Backend + trieBackend *backend + + done chan struct{} } // Open opens or creates DB at the given path. @@ -109,27 +108,18 @@ func Open(path string, options *Options) (*MuxDB, error) { return nil, err } - trieCache := trie.NewCache( - options.TrieNodeCacheSizeMB, - options.TrieRootCacheCapacity) - - trieLeafBank := trie.NewLeafBank( - engine, - trieLeafBankSpace, - options.TrieLeafBankSlotCapacity) - return &MuxDB{ engine: engine, - trieBackend: &trie.Backend{ - Store: engine, - Cache: trieCache, - LeafBank: trieLeafBank, - HistSpace: trieHistSpace, - DedupedSpace: trieDedupedSpace, + trieBackend: &backend{ + Store: engine, + Cache: newCache( + options.TrieNodeCacheSizeMB, + uint32(options.TrieCachedNodeTTL)), HistPtnFactor: cfg.HistPtnFactor, DedupedPtnFactor: cfg.DedupedPtnFactor, CachedNodeTTL: options.TrieCachedNodeTTL, }, + done: make(chan struct{}), }, nil } @@ -141,57 +131,36 @@ func NewMem() *MuxDB { engine := engine.NewLevelEngine(ldb) return &MuxDB{ engine: engine, - trieBackend: &trie.Backend{ + trieBackend: &backend{ Store: engine, - Cache: nil, - LeafBank: nil, - HistSpace: trieHistSpace, - DedupedSpace: trieDedupedSpace, + Cache: &dummyCache{}, HistPtnFactor: 1, DedupedPtnFactor: 1, CachedNodeTTL: 32, }, + done: make(chan struct{}), } } // Close closes the DB. func (db *MuxDB) Close() error { + close(db.done) return db.engine.Close() } // NewTrie creates trie with existing root node. -// -// If root is zero or blake2b hash of an empty string, the trie is -// initially empty. -func (db *MuxDB) NewTrie(name string, root thor.Bytes32, commitNum, distinctNum uint32) *Trie { - return trie.New( - db.trieBackend, +// If root is zero value, the trie is initially empty. +func (db *MuxDB) NewTrie(name string, root trie.Root) *Trie { + return newTrie( name, - root, - commitNum, - distinctNum, - false, - ) -} - -// NewNonCryptoTrie creates non-crypto trie with existing root node. -// -// If root is zero or blake2b hash of an empty string, the trie is -// initially empty. -func (db *MuxDB) NewNonCryptoTrie(name string, root thor.Bytes32, commitNum, distinctNum uint32) *Trie { - return trie.New( db.trieBackend, - name, root, - commitNum, - distinctNum, - true, ) } -// CleanTrieHistory clean trie history within [startCommitNum, limitCommitNum). -func (db *MuxDB) CleanTrieHistory(ctx context.Context, startCommitNum, limitCommitNum uint32) error { - return trie.CleanHistory(ctx, db.trieBackend, startCommitNum, limitCommitNum) +// DeleteTrieHistoryNodes deletes trie history nodes within partitions of [startMajorVer, limitMajorVer). +func (db *MuxDB) DeleteTrieHistoryNodes(ctx context.Context, startMajorVer, limitMajorVer uint32) error { + return db.trieBackend.DeleteHistoryNodes(ctx, startMajorVer, limitMajorVer) } // NewStore creates named kv-store. @@ -204,6 +173,34 @@ func (db *MuxDB) IsNotFound(err error) bool { return db.engine.IsNotFound(err) } +func (db *MuxDB) EnableMetrics() { + go func() { + ticker := time.NewTicker(metricsSampleInterval) + defer ticker.Stop() + + var ( + stats leveldb.DBStats + err error + ) + for { + select { + case <-ticker.C: + // we only have one engine implementation for now, put the type assertion just for safety + lvl, ok := db.engine.(*engine.LevelEngine) + if ok { + err = lvl.Stats(&stats) + if err != nil { + logger.Warn("Failed to get LevelDB stats: %v", err) + } + registerCompactionMetrics(&stats) + } + case <-db.done: + return + } + } + }() +} + type config struct { HistPtnFactor uint32 DedupedPtnFactor uint32 diff --git a/muxdb/muxdb_test.go b/muxdb/muxdb_test.go new file mode 100644 index 000000000..e0ecae072 --- /dev/null +++ b/muxdb/muxdb_test.go @@ -0,0 +1,144 @@ +// Copyright (c) 2024 The VeChainThor developers + +// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying +// file LICENSE or + +package muxdb + +import ( + "context" + "math" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/vechain/thor/v2/trie" +) + +func TestMuxdb(t *testing.T) { + var err error + db := NewMem() + db.Close() + + dir := os.TempDir() + + opts := Options{ + TrieNodeCacheSizeMB: 128, + TrieCachedNodeTTL: 30, // 5min + TrieDedupedPartitionFactor: math.MaxUint32, + TrieWillCleanHistory: true, + OpenFilesCacheCapacity: 512, + ReadCacheMB: 256, // rely on os page cache other than huge db read cache. + WriteBufferMB: 128, + TrieHistPartitionFactor: 1000, + } + path := filepath.Join(dir, "main.db") + db, err = Open(path, &opts) + assert.Nil(t, err) + + err = db.Close() + assert.Nil(t, err) + + os.RemoveAll(path) +} + +func TestStore(t *testing.T) { + db := NewMem() + + store := db.NewStore("test") + key := []byte("key") + val := []byte("val") + + store.Put(key, val) + v, err := store.Get(key) + assert.Nil(t, err) + assert.Equal(t, val, v) + + store.Delete(key) + _, err = store.Get(key) + assert.True(t, db.IsNotFound(err)) + + db.Close() +} + +func TestMuxdbTrie(t *testing.T) { + var err error + db := NewMem() + + tr := db.NewTrie("test", trie.Root{}) + tr.SetNoFillCache(true) + key := []byte("key") + val1 := []byte("val") + val2 := []byte("val2") + + ver1 := trie.Version{Major: 1, Minor: 0} + ver2 := trie.Version{Major: 100, Minor: 0} + ver3 := trie.Version{Major: 101, Minor: 0} + + err = tr.Update(key, val1, nil) + assert.Nil(t, err) + err = tr.Commit(ver1, false) + assert.Nil(t, err) + + root1 := tr.Hash() + tr1 := db.NewTrie("test", trie.Root{Hash: root1, Ver: ver1}) + tr1.SetNoFillCache(true) + v, _, err := tr1.Get(key) + assert.Nil(t, err) + assert.Equal(t, val1, v) + + tr1.Update(key, val2, nil) + err = tr1.Commit(ver2, false) + assert.Nil(t, err) + root2 := tr1.Hash() + + tr2 := db.NewTrie("test", trie.Root{Hash: root2, Ver: ver2}) + tr2.SetNoFillCache(true) + v, _, err = tr2.Get(key) + assert.Nil(t, err) + assert.Equal(t, val2, v) + + err = tr2.Commit(ver3, false) + assert.Nil(t, err) + root3 := tr2.Hash() + + //prune trie [0, ver3) + xtr := db.NewTrie("test", trie.Root{Hash: root2, Ver: ver2}) + err = xtr.Checkpoint(context.Background(), 0, nil) + assert.Nil(t, err) + err = db.DeleteTrieHistoryNodes(context.Background(), 0, ver3.Major) + assert.Nil(t, err) + + //after delete history nodes,the history nodes should be deleted + path := []byte{} + + histKey := xtr.back.AppendHistNodeKey(nil, "test", path, ver1) + _, err = xtr.back.Store.Get(histKey) + assert.True(t, db.IsNotFound(err)) + + histKey = xtr.back.AppendHistNodeKey(nil, "test", path, ver2) + _, err = xtr.back.Store.Get(histKey) + assert.True(t, db.IsNotFound(err)) + + histKey = xtr.back.AppendHistNodeKey(nil, "test", path, ver3) + _, err = xtr.back.Store.Get(histKey) + assert.Nil(t, err) + + dedupedKey := xtr.back.AppendDedupedNodeKey(nil, "test", path, ver2) + blob, err := xtr.back.Store.Get(dedupedKey) + assert.Nil(t, err) + assert.NotNil(t, blob) + + tr4 := db.NewTrie("test", trie.Root{Hash: root2, Ver: ver2}) + v, _, err = tr4.Get(key) + assert.Nil(t, err) + assert.Equal(t, val2, v) + + tr5 := db.NewTrie("test", trie.Root{Hash: root3, Ver: ver3}) + v, _, err = tr5.Get(key) + assert.Nil(t, err) + assert.Equal(t, val2, v) + + db.Close() +} diff --git a/muxdb/trie.go b/muxdb/trie.go new file mode 100644 index 000000000..f0da76e5e --- /dev/null +++ b/muxdb/trie.go @@ -0,0 +1,227 @@ +// Copyright (c) 2021 The VeChainThor developers + +// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying +// file LICENSE or + +package muxdb + +import ( + "context" + + "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" +) + +// Trie is the managed trie. +type Trie struct { + name string + back *backend + trie *trie.Trie + noFillCache bool +} + +// newTrie creates a managed trie. +func newTrie( + name string, + back *backend, + root trie.Root, +) *Trie { + t := &Trie{ + name: name, + back: back, + } + + if rn := back.Cache.GetRootNode(name, root.Ver); rn != nil { + t.trie = trie.FromRootNode(rn, t.newDatabaseReader()) + } else { + t.trie = trie.New(root, t.newDatabaseReader()) + } + t.trie.SetCacheTTL(back.CachedNodeTTL) + return t +} + +// newDatabase creates a database instance for low-level trie construction. +func (t *Trie) newDatabaseReader() trie.DatabaseReader { + var keyBuf []byte + + return &struct { + trie.DatabaseReader + }{ + databaseGetFunc(func(path []byte, ver trie.Version) (blob []byte, err error) { + // get from cache + if blob = t.back.Cache.GetNodeBlob(&keyBuf, t.name, path, ver, t.noFillCache); len(blob) > 0 { + return + } + defer func() { + if err == nil && !t.noFillCache { + t.back.Cache.AddNodeBlob(&keyBuf, t.name, path, ver, blob, false) + } + }() + + // query in db + snapshot := t.back.Store.Snapshot() + defer snapshot.Release() + + // get from hist space first + keyBuf = t.back.AppendHistNodeKey(keyBuf[:0], t.name, path, ver) + if blob, err = snapshot.Get(keyBuf); err != nil { + if !snapshot.IsNotFound(err) { + return + } + } else { + // found in hist space + return + } + + // then from deduped space + keyBuf = t.back.AppendDedupedNodeKey(keyBuf[:0], t.name, path, ver) + return snapshot.Get(keyBuf) + }), + } +} + +// Copy make a copy of this trie. +func (t *Trie) Copy() *Trie { + cpy := *t + cpy.trie = trie.FromRootNode(t.trie.RootNode(), cpy.newDatabaseReader()) + cpy.trie.SetCacheTTL(t.back.CachedNodeTTL) + return &cpy +} + +// Get returns the value for key stored in the trie. +// The value bytes must not be modified by the caller. +func (t *Trie) Get(key []byte) ([]byte, []byte, error) { + return t.trie.Get(key) +} + +// Update associates key with value in the trie. Subsequent calls to +// Get will return value. If value has length zero, any existing value +// is deleted from the trie and calls to Get will return nil. +// +// The value bytes must not be modified by the caller while they are +// stored in the trie. +func (t *Trie) Update(key, val, meta []byte) error { + return t.trie.Update(key, val, meta) +} + +// Hash returns the root hash of the trie. +func (t *Trie) Hash() thor.Bytes32 { + return t.trie.Hash() +} + +// Commit writes all nodes to the trie's database. +// +// Committing flushes nodes from memory. +// Subsequent Get calls will load nodes from the database. +// If skipHash is true, less disk space is taken up but crypto features of merkle trie lost. +func (t *Trie) Commit(newVer trie.Version, skipHash bool) error { + var ( + bulk = t.back.Store.Bulk() + keyBuf []byte + ) + + db := &struct{ trie.DatabaseWriter }{ + databasePutFunc(func(path []byte, ver trie.Version, blob []byte) error { + keyBuf = t.back.AppendHistNodeKey(keyBuf[:0], t.name, path, ver) + if err := bulk.Put(keyBuf, blob); err != nil { + return err + } + if !t.noFillCache { + t.back.Cache.AddNodeBlob(&keyBuf, t.name, path, ver, blob, true) + } + return nil + }), + } + + if err := t.trie.Commit(db, newVer, skipHash); err != nil { + return err + } + + if err := bulk.Write(); err != nil { + return err + } + + if !t.noFillCache { + t.back.Cache.AddRootNode(t.name, t.trie.RootNode()) + } + return nil +} + +// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at +// the key after the given start key +func (t *Trie) NodeIterator(start []byte, baseMajorVer uint32) trie.NodeIterator { + return t.trie.NodeIterator(start, trie.Version{Major: baseMajorVer}) +} + +// SetNoFillCache enable or disable cache filling. +func (t *Trie) SetNoFillCache(b bool) { + t.noFillCache = b +} + +// Checkpoint transfers standalone nodes, whose major version within [baseMajorVer, thisMajorVer], into deduped space. +func (t *Trie) Checkpoint(ctx context.Context, baseMajorVer uint32, handleLeaf func(*trie.Leaf)) error { + var ( + checkContext = newContextChecker(ctx, 5000) + bulk = t.back.Store.Bulk() + iter = t.NodeIterator(nil, baseMajorVer) + keyBuf []byte + ) + bulk.EnableAutoFlush() + + for iter.Next(true) { + if err := checkContext(); err != nil { + return err + } + + blob, ver, err := iter.Blob() + if err != nil { + return err + } + if len(blob) > 0 { + keyBuf = t.back.AppendDedupedNodeKey(keyBuf[:0], t.name, iter.Path(), ver) + if err := bulk.Put(keyBuf, blob); err != nil { + return err + } + } + if handleLeaf != nil { + if leaf := iter.Leaf(); leaf != nil { + handleLeaf(leaf) + } + } + } + if err := iter.Error(); err != nil { + return err + } + return bulk.Write() +} + +// individual functions of trie database interface. +type ( + databaseGetFunc func(path []byte, ver trie.Version) ([]byte, error) + databasePutFunc func(path []byte, ver trie.Version, value []byte) error +) + +func (f databaseGetFunc) Get(path []byte, ver trie.Version) ([]byte, error) { + return f(path, ver) +} + +func (f databasePutFunc) Put(path []byte, ver trie.Version, value []byte) error { + return f(path, ver, value) +} + +// newContextChecker creates a debounced context checker. +func newContextChecker(ctx context.Context, debounce int) func() error { + count := 0 + return func() error { + count++ + if count > debounce { + count = 0 + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + } + return nil + } +} diff --git a/muxdb/trie_test.go b/muxdb/trie_test.go new file mode 100644 index 000000000..79bec7124 --- /dev/null +++ b/muxdb/trie_test.go @@ -0,0 +1,78 @@ +// Copyright (c) 2021 The VeChainThor developers + +// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying +// file LICENSE or + +package muxdb + +import ( + "encoding/binary" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/vechain/thor/v2/muxdb/engine" + "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" +) + +func newTestEngine() engine.Engine { + db, _ := leveldb.Open(storage.NewMemStorage(), nil) + return engine.NewLevelEngine(db) +} + +func newTestBackend() *backend { + engine := newTestEngine() + return &backend{ + Store: engine, + Cache: &dummyCache{}, + HistPtnFactor: 1, + DedupedPtnFactor: 1, + CachedNodeTTL: 100, + } +} + +func TestTrie(t *testing.T) { + var ( + name = "the trie" + back = newTestBackend() + round = uint32(200) + roots []trie.Root + ) + + for i := uint32(0); i < round; i++ { + var root trie.Root + if len(roots) > 0 { + root = roots[len(roots)-1] + } + + tr := newTrie(name, back, root) + key := thor.Blake2b(binary.BigEndian.AppendUint32(nil, i)).Bytes() + val := thor.Blake2b(key).Bytes() + meta := thor.Blake2b(val).Bytes() + err := tr.Update(key, val, meta) + assert.Nil(t, err) + + err = tr.Commit(trie.Version{Major: i}, false) + assert.Nil(t, err) + + roots = append(roots, trie.Root{ + Hash: tr.Hash(), + Ver: trie.Version{Major: i}, + }) + } + + for _i, root := range roots { + tr := newTrie(name, back, root) + for i := uint32(0); i <= uint32(_i); i++ { + key := thor.Blake2b(binary.BigEndian.AppendUint32(nil, i)).Bytes() + val := thor.Blake2b(key).Bytes() + meta := thor.Blake2b(val).Bytes() + _val, _meta, err := tr.Get(key) + assert.Nil(t, err) + assert.Equal(t, val, _val) + assert.Equal(t, meta, _meta) + } + } +} diff --git a/packer/flow.go b/packer/flow.go index 47bd97ae7..ed1530dca 100644 --- a/packer/flow.go +++ b/packer/flow.go @@ -14,6 +14,7 @@ import ( "github.com/vechain/thor/v2/runtime" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" "github.com/vechain/thor/v2/tx" "github.com/vechain/thor/v2/vrf" ) @@ -156,7 +157,7 @@ func (f *Flow) Pack(privateKey *ecdsa.PrivateKey, newBlockConflicts uint32, shou return nil, nil, nil, errors.New("private key mismatch") } - stage, err := f.runtime.State().Stage(f.Number(), newBlockConflicts) + stage, err := f.runtime.State().Stage(trie.Version{Major: f.Number(), Minor: newBlockConflicts}) if err != nil { return nil, nil, nil, err } diff --git a/packer/packer.go b/packer/packer.go index aa85ade87..212c81c42 100644 --- a/packer/packer.go +++ b/packer/packer.go @@ -50,7 +50,7 @@ func New( // Schedule schedule a packing flow to pack new block upon given parent and clock time. func (p *Packer) Schedule(parent *chain.BlockSummary, nowTimestamp uint64) (flow *Flow, err error) { - state := p.stater.NewState(parent.Header.StateRoot(), parent.Header.Number(), parent.Conflicts, parent.SteadyNum) + state := p.stater.NewState(parent.Root()) var features tx.Features if parent.Header.Number()+1 >= p.forkConfig.VIP191 { @@ -141,7 +141,7 @@ func (p *Packer) Schedule(parent *chain.BlockSummary, nowTimestamp uint64) (flow // It will skip the PoA verification and scheduling, and the block produced by // the returned flow is not in consensus. func (p *Packer) Mock(parent *chain.BlockSummary, targetTime uint64, gasLimit uint64) (*Flow, error) { - state := p.stater.NewState(parent.Header.StateRoot(), parent.Header.Number(), parent.Conflicts, parent.SteadyNum) + state := p.stater.NewState(parent.Root()) var features tx.Features if parent.Header.Number()+1 >= p.forkConfig.VIP191 { diff --git a/packer/packer_test.go b/packer/packer_test.go index da8078379..fbe95fe8c 100644 --- a/packer/packer_test.go +++ b/packer/packer_test.go @@ -22,6 +22,7 @@ import ( "github.com/vechain/thor/v2/packer" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" "github.com/vechain/thor/v2/tx" ) @@ -101,10 +102,9 @@ func TestP(t *testing.T) { _, _, err = consensus.New(repo, stater, thor.NoFork).Process(best, blk, uint64(time.Now().Unix()*2), 0) assert.Nil(t, err) - if err := repo.AddBlock(blk, receipts, 0); err != nil { + if err := repo.AddBlock(blk, receipts, 0, true); err != nil { t.Fatal(err) } - repo.SetBestBlockID(blk.Header().ID()) if time.Now().UnixNano() > start+1000*1000*1000*1 { break @@ -166,15 +166,15 @@ func TestForkVIP191(t *testing.T) { t.Fatal(err) } - if err := repo.AddBlock(blk, receipts, 0); err != nil { + if err := repo.AddBlock(blk, receipts, 0, false); err != nil { t.Fatal(err) } - headState := state.New(db, blk.Header().StateRoot(), blk.Header().Number(), 0, 0) + headState := state.New(db, trie.Root{Hash: blk.Header().StateRoot(), Ver: trie.Version{Major: blk.Header().Number()}}) assert.Equal(t, M(builtin.Extension.V2.RuntimeBytecodes(), nil), M(headState.GetCode(builtin.Extension.Address))) - geneState := state.New(db, b0.Header().StateRoot(), 0, 0, 0) + geneState := state.New(db, trie.Root{Hash: b0.Header().StateRoot()}) assert.Equal(t, M(builtin.Extension.RuntimeBytecodes(), nil), M(geneState.GetCode(builtin.Extension.Address))) } diff --git a/poa/candidates_test.go b/poa/candidates_test.go index 16e43fd4e..4b73f50d8 100644 --- a/poa/candidates_test.go +++ b/poa/candidates_test.go @@ -14,6 +14,7 @@ import ( "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" ) func generateCandidateList(candidateCount int) []*authority.Candidate { @@ -103,8 +104,7 @@ func TestCopy(t *testing.T) { } func TestPick(t *testing.T) { - db := muxdb.NewMem() - state := state.New(db, thor.Bytes32{}, 0, 0, 0) + state := state.New(muxdb.NewMem(), trie.Root{}) candidateList := generateCandidateList(5) diff --git a/poa/seed_test.go b/poa/seed_test.go index ce16d13e8..88a41bff1 100644 --- a/poa/seed_test.go +++ b/poa/seed_test.go @@ -45,14 +45,12 @@ func TestSeeder_Generate(t *testing.T) { ParentID(parent.Header().ID()). Build().WithSignature(sig[:]) - if err := repo.AddBlock(b, nil, 0); err != nil { + asBest := i == int(epochInterval*3) + if err := repo.AddBlock(b, nil, 0, asBest); err != nil { t.Fatal(err) } parent = b } - if err := repo.SetBestBlockID(parent.Header().ID()); err != nil { - t.Fatal(err) - } b30ID, err := repo.NewBestChain().GetBlockID(epochInterval * 3) if err != nil { @@ -100,7 +98,7 @@ func TestSeeder_Generate(t *testing.T) { ParentID(parent.Header().ID()). Build().WithSignature(sig[:]) - if err := repo.AddBlock(b, nil, 0); err != nil { + if err := repo.AddBlock(b, nil, 0, false); err != nil { t.Fatal(err) } parent = b @@ -142,16 +140,13 @@ func TestSeeder_Generate(t *testing.T) { b = b.WithSignature(cs) - if err := repo.AddBlock(b, nil, 0); err != nil { + asBest := i == int(epochInterval*2) + if err := repo.AddBlock(b, nil, 0, asBest); err != nil { t.Fatal(err) } parent = b } - if err := repo.SetBestBlockID(parent.Header().ID()); err != nil { - t.Fatal(err) - } - chain := repo.NewBestChain() b40, err := chain.GetBlockHeader(40) if err != nil { diff --git a/runtime/native_return_gas_test.go b/runtime/native_return_gas_test.go index 707169e00..47dad803b 100644 --- a/runtime/native_return_gas_test.go +++ b/runtime/native_return_gas_test.go @@ -14,13 +14,13 @@ import ( "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" "github.com/vechain/thor/v2/tx" "github.com/vechain/thor/v2/xenv" ) func TestNativeCallReturnGas(t *testing.T) { - db := muxdb.NewMem() - state := state.New(db, thor.Bytes32{}, 0, 0, 0) + state := state.New(muxdb.NewMem(), trie.Root{}) state.SetCode(builtin.Measure.Address, builtin.Measure.RuntimeBytecodes()) inner, _ := builtin.Measure.ABI.MethodByName("inner") diff --git a/runtime/resolved_tx.go b/runtime/resolved_tx.go index b196f6a82..2d0b999c1 100644 --- a/runtime/resolved_tx.go +++ b/runtime/resolved_tx.go @@ -205,12 +205,13 @@ func (r *ResolvedTransaction) ToContext( return nil, err } return &xenv.TransactionContext{ - ID: r.tx.ID(), - Origin: r.Origin, - GasPayer: gasPayer, - GasPrice: gasPrice, - ProvedWork: provedWork, - BlockRef: r.tx.BlockRef(), - Expiration: r.tx.Expiration(), + ID: r.tx.ID(), + Origin: r.Origin, + GasPayer: gasPayer, + GasPrice: gasPrice, + ProvedWork: provedWork, + BlockRef: r.tx.BlockRef(), + Expiration: r.tx.Expiration(), + ClauseCount: big.NewInt(int64(len(r.tx.Clauses()))), }, nil } diff --git a/runtime/resolved_tx_test.go b/runtime/resolved_tx_test.go index 37a0e7425..5eae70486 100644 --- a/runtime/resolved_tx_test.go +++ b/runtime/resolved_tx_test.go @@ -72,7 +72,7 @@ func newTestResolvedTransaction(t *testing.T) (*testResolvedTransaction, error) func (tr *testResolvedTransaction) currentState() *state.State { h := tr.repo.BestBlockSummary() - return tr.stater.NewState(h.Header.StateRoot(), h.Header.Number(), 0, h.SteadyNum) + return tr.stater.NewState(h.Root()) } func (tr *testResolvedTransaction) TestResolveTransaction() { diff --git a/runtime/runtime.go b/runtime/runtime.go index cbf6d7de0..815a944c2 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -126,8 +126,12 @@ func New( } } - // VIP191 - if forkConfig.VIP191 == ctx.Number { + if forkConfig.GALACTICA == ctx.Number { + // upgrade extension contract to V3 + if err := state.SetCode(builtin.Extension.Address, builtin.Extension.V3.RuntimeBytecodes()); err != nil { + panic(err) + } + } else if forkConfig.VIP191 == ctx.Number { // upgrade extension contract to V2 if err := state.SetCode(builtin.Extension.Address, builtin.Extension.V2.RuntimeBytecodes()); err != nil { panic(err) @@ -236,7 +240,7 @@ func (rt *Runtime) newEVM(stateDB *statedb.StateDB, clauseIndex uint32, txCtx *x panic("serious bug: native call returned gas over consumed") } - ret, err := xenv.New(abi, rt.chain, rt.state, rt.ctx, txCtx, evm, contract).Call(run) + ret, err := xenv.New(abi, rt.chain, rt.state, rt.ctx, txCtx, evm, contract, clauseIndex).Call(run) return ret, err, true }, OnCreateContract: func(_ *vm.EVM, contractAddr, caller common.Address) { diff --git a/runtime/runtime_test.go b/runtime/runtime_test.go index 38645620e..950154380 100644 --- a/runtime/runtime_test.go +++ b/runtime/runtime_test.go @@ -7,7 +7,6 @@ package runtime_test import ( "encoding/hex" - "fmt" "math" "math/big" "testing" @@ -23,8 +22,8 @@ import ( "github.com/vechain/thor/v2/runtime" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" "github.com/vechain/thor/v2/tx" - "github.com/vechain/thor/v2/vm" "github.com/vechain/thor/v2/xenv" ) @@ -32,578 +31,314 @@ func M(a ...interface{}) []interface{} { return a } -func TestEVMFunction(t *testing.T) { - target := thor.BytesToAddress([]byte("acc01")) +func TestContractSuicide(t *testing.T) { + db := muxdb.NewMem() - type context struct { - chain *chain.Chain - state *state.State - method *abi.Method - } + g := genesis.NewDevnet() + stater := state.NewStater(db) + b0, _, _, err := g.Build(stater) + assert.Nil(t, err) - type testcase = struct { - name string - code string - abi string - methodName string - testFunc func(*context, *testing.T) - } + repo, _ := chain.NewRepository(db, b0) - baseTests := []testcase{ - { - name: "Contract Suicide", - code: "608060405260043610603f576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063085da1b3146044575b600080fd5b348015604f57600080fd5b5060566058565b005b3373ffffffffffffffffffffffffffffffffffffffff16ff00a165627a7a723058204cb70b653a3d1821e00e6ade869638e80fa99719931c9fa045cec2189d94086f0029", - abi: `[{"constant":false,"inputs":[],"name":"testSuicide","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}]`, - methodName: "testSuicide", - testFunc: func(ctx *context, t *testing.T) { - // contract: - // - // pragma solidity ^0.4.18; - - // contract TestSuicide { - // function testSuicide() public { - // selfdestruct(msg.sender); - // } - // } - - // time := ctx.chain.GetBlockSummary() - // .Header().Timestamp() - - head, _ := ctx.chain.GetBlockSummary(0) - time := head.Header.Timestamp() - - ctx.state.SetEnergy(target, big.NewInt(100), time) - ctx.state.SetBalance(target, big.NewInt(200)) - - methodData, err := ctx.method.EncodeInput() - if err != nil { - t.Fatal(err) - } + // contract: + // + // pragma solidity ^0.4.18; - origin := genesis.DevAccounts()[0].Address - exec, _ := runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{Time: time}, thor.NoFork). - PrepareClause(tx.NewClause(&target).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{Origin: origin}) - out, _, err := exec() - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - - expectedTransfer := &tx.Transfer{ - Sender: target, - Recipient: origin, - Amount: big.NewInt(200), - } - assert.Equal(t, 1, len(out.Transfers)) - assert.Equal(t, expectedTransfer, out.Transfers[0]) - - event, _ := builtin.Energy.ABI.EventByName("Transfer") - expectedEvent := &tx.Event{ - Address: builtin.Energy.Address, - Topics: []thor.Bytes32{event.ID(), thor.BytesToBytes32(target.Bytes()), thor.BytesToBytes32(origin.Bytes())}, - Data: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100}, - } - assert.Equal(t, 1, len(out.Events)) - assert.Equal(t, expectedEvent, out.Events[0]) + // contract TestSuicide { + // function testSuicide() public { + // selfdestruct(msg.sender); + // } + // } + data, _ := hex.DecodeString("608060405260043610603f576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063085da1b3146044575b600080fd5b348015604f57600080fd5b5060566058565b005b3373ffffffffffffffffffffffffffffffffffffffff16ff00a165627a7a723058204cb70b653a3d1821e00e6ade869638e80fa99719931c9fa045cec2189d94086f0029") + time := b0.Header().Timestamp() + addr := thor.BytesToAddress([]byte("acc01")) + state := stater.NewState(trie.Root{Hash: b0.Header().StateRoot()}) + state.SetCode(addr, data) + state.SetEnergy(addr, big.NewInt(100), time) + state.SetBalance(addr, big.NewInt(200)) + + abi, _ := abi.New([]byte(`[{ + "constant": false, + "inputs": [], + "name": "testSuicide", + "outputs": [], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + } + ]`)) + suicide, _ := abi.MethodByName("testSuicide") + methodData, err := suicide.EncodeInput() + if err != nil { + t.Fatal(err) + } - assert.Equal(t, M(big.NewInt(0), nil), M(ctx.state.GetBalance(target))) - assert.Equal(t, M(big.NewInt(0), nil), M(ctx.state.GetEnergy(target, time))) + origin := genesis.DevAccounts()[0].Address + exec, _ := runtime.New(repo.NewChain(b0.Header().ID()), state, &xenv.BlockContext{Time: time}, thor.NoFork). + PrepareClause(tx.NewClause(&addr).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{Origin: origin}) + out, _, err := exec() + assert.Nil(t, err) + assert.Nil(t, out.VMErr) - bal, _ := new(big.Int).SetString("1000000000000000000000000000", 10) - assert.Equal(t, M(new(big.Int).Add(bal, big.NewInt(200)), nil), M(ctx.state.GetBalance(origin))) - assert.Equal(t, M(new(big.Int).Add(bal, big.NewInt(100)), nil), M(ctx.state.GetEnergy(origin, time))) - }, - }, - { - name: "ChainID", - code: "6080604052348015600f57600080fd5b506004361060285760003560e01c8063adc879e914602d575b600080fd5b60336047565b604051603e9190605c565b60405180910390f35b600046905090565b6056816075565b82525050565b6000602082019050606f6000830184604f565b92915050565b600081905091905056fea264697066735822122060b67d944ffa8f0c5ee69f2f47decc3dc175ea2e4341a4de3705d72b868ce2b864736f6c63430008010033", - abi: `[{"inputs":[],"name":"chainID","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"}]`, - methodName: "chainID", - testFunc: func(ctx *context, t *testing.T) { - // pragma solidity >=0.7.0 <0.9.0; - // contract TestChainID { - - // function chainID() public view returns (uint256) { - // return block.chainid; - // } - // } - - methodData, err := ctx.method.EncodeInput() - if err != nil { - t.Fatal(err) - } + expectedTransfer := &tx.Transfer{ + Sender: addr, + Recipient: origin, + Amount: big.NewInt(200), + } + assert.Equal(t, 1, len(out.Transfers)) + assert.Equal(t, expectedTransfer, out.Transfers[0]) + + event, _ := builtin.Energy.ABI.EventByName("Transfer") + expectedEvent := &tx.Event{ + Address: builtin.Energy.Address, + Topics: []thor.Bytes32{event.ID(), thor.BytesToBytes32(addr.Bytes()), thor.BytesToBytes32(origin.Bytes())}, + Data: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100}, + } + assert.Equal(t, 1, len(out.Events)) + assert.Equal(t, expectedEvent, out.Events[0]) - exec, _ := runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, thor.ForkConfig{}). - PrepareClause(tx.NewClause(&target).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err := exec() - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - - assert.Equal(t, ctx.chain.GenesisID(), thor.BytesToBytes32(out.Data)) - }, - }, { - name: "Self Balance", - code: "6080604052348015600f57600080fd5b506004361060285760003560e01c8063b0bed0ba14602d575b600080fd5b60336047565b604051603e9190605c565b60405180910390f35b600047905090565b6056816075565b82525050565b6000602082019050606f6000830184604f565b92915050565b600081905091905056fea2646970667358221220eeac1b7322c414db88987af09d3c8bdfde83bb378be9ac0e9ebe3fe34ecbcf2564736f6c63430008010033", - abi: `[{"inputs":[],"name":"selfBalance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"}]`, - methodName: "selfBalance", - testFunc: func(ctx *context, t *testing.T) { - // pragma solidity >=0.7.0 <0.9.0; - // contract TestSelfBalance { - - // function selfBalance() public view returns (uint256) { - // return address(this).balance; - // } - // } - - ctx.state.SetBalance(target, big.NewInt(100)) - - methodData, err := ctx.method.EncodeInput() - if err != nil { - t.Fatal(err) - } + assert.Equal(t, M(big.NewInt(0), nil), M(state.GetBalance(addr))) + assert.Equal(t, M(big.NewInt(0), nil), M(state.GetEnergy(addr, time))) - exec, _ := runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, thor.ForkConfig{}). - PrepareClause(tx.NewClause(&target).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err := exec() - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - - assert.True(t, new(big.Int).SetBytes(out.Data).Cmp(big.NewInt(100)) == 0) - }, - }, { - name: "Blake2F", - code: "608060405234801561001057600080fd5b50600436106100365760003560e01c806372de3cbd1461003b578063fc75ac471461006b575b600080fd5b61005560048036038101906100509190610894565b610089565b6040516100629190610a9b565b60405180910390f35b6100736102e5565b6040516100809190610a9b565b60405180910390f35b61009161063c565b61009961063c565b600087876000600281106100d6577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002015188600160028110610115577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002015188600060048110610154577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002015189600160048110610193577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201518a6002600481106101d2577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201518b600360048110610211577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201518b600060028110610250577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201518c60016002811061028f577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201518c6040516020016102ae9a999897969594939291906109e7565b604051602081830303815290604052905060408260d5602084016009600019fa6102d757600080fd5b819250505095945050505050565b6102ed61063c565b6000600c90506102fb61063c565b7f48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa581600060028110610356577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6020020181815250507fd182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b816001600281106103ba577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6020020181815250506103cb61065e565b7f616263000000000000000000000000000000000000000000000000000000000081600060048110610426577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201818152505060008160016004811061046b577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6020020181815250506000816002600481106104b0577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6020020181815250506000816003600481106104f5577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002018181525050610506610680565b7f030000000000000000000000000000000000000000000000000000000000000081600060028110610561577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002019077ffffffffffffffffffffffffffffffffffffffffffffffff1916908177ffffffffffffffffffffffffffffffffffffffffffffffff1916815250506000816001600281106105de577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002019077ffffffffffffffffffffffffffffffffffffffffffffffff1916908177ffffffffffffffffffffffffffffffffffffffffffffffff1916815250506000600190506106328585858585610089565b9550505050505090565b6040518060400160405280600290602082028036833780820191505090505090565b6040518060800160405280600490602082028036833780820191505090505090565b6040518060400160405280600290602082028036833780820191505090505090565b60006106b56106b084610adb565b610ab6565b905080828560208602820111156106cb57600080fd5b60005b858110156106fb57816106e18882610855565b8452602084019350602083019250506001810190506106ce565b5050509392505050565b600061071861071384610b01565b610ab6565b9050808285602086028201111561072e57600080fd5b60005b8581101561075e57816107448882610855565b845260208401935060208301925050600181019050610731565b5050509392505050565b600061077b61077684610b27565b610ab6565b9050808285602086028201111561079157600080fd5b60005b858110156107c157816107a7888261086a565b845260208401935060208301925050600181019050610794565b5050509392505050565b600082601f8301126107dc57600080fd5b60026107e98482856106a2565b91505092915050565b600082601f83011261080357600080fd5b6004610810848285610705565b91505092915050565b600082601f83011261082a57600080fd5b6002610837848285610768565b91505092915050565b60008135905061084f81610ca1565b92915050565b60008135905061086481610cb8565b92915050565b60008135905061087981610ccf565b92915050565b60008135905061088e81610ce6565b92915050565b600080600080600061014086880312156108ad57600080fd5b60006108bb8882890161087f565b95505060206108cc888289016107cb565b94505060606108dd888289016107f2565b93505060e06108ee88828901610819565b92505061012061090088828901610840565b9150509295509295909350565b60006109198383610993565b60208301905092915050565b61092e81610b57565b6109388184610b6f565b925061094382610b4d565b8060005b8381101561097457815161095b878261090d565b965061096683610b62565b925050600181019050610947565b505050505050565b61098d61098882610b7a565b610bfd565b82525050565b61099c81610b86565b82525050565b6109b36109ae82610b86565b610c0f565b82525050565b6109ca6109c582610b90565b610c19565b82525050565b6109e16109dc82610bbc565b610c23565b82525050565b60006109f3828d6109d0565b600482019150610a03828c6109a2565b602082019150610a13828b6109a2565b602082019150610a23828a6109a2565b602082019150610a3382896109a2565b602082019150610a4382886109a2565b602082019150610a5382876109a2565b602082019150610a6382866109b9565b600882019150610a7382856109b9565b600882019150610a83828461097c565b6001820191508190509b9a5050505050505050505050565b6000604082019050610ab06000830184610925565b92915050565b6000610ac0610ad1565b9050610acc8282610bcc565b919050565b6000604051905090565b600067ffffffffffffffff821115610af657610af5610c47565b5b602082029050919050565b600067ffffffffffffffff821115610b1c57610b1b610c47565b5b602082029050919050565b600067ffffffffffffffff821115610b4257610b41610c47565b5b602082029050919050565b6000819050919050565b600060029050919050565b6000602082019050919050565b600081905092915050565b60008115159050919050565b6000819050919050565b60007fffffffffffffffff00000000000000000000000000000000000000000000000082169050919050565b600063ffffffff82169050919050565b610bd582610c76565b810181811067ffffffffffffffff82111715610bf457610bf3610c47565b5b80604052505050565b6000610c0882610c35565b9050919050565b6000819050919050565b6000819050919050565b6000610c2e82610c87565b9050919050565b6000610c4082610c94565b9050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000601f19601f8301169050919050565b60008160e01b9050919050565b60008160f81b9050919050565b610caa81610b7a565b8114610cb557600080fd5b50565b610cc181610b86565b8114610ccc57600080fd5b50565b610cd881610b90565b8114610ce357600080fd5b50565b610cef81610bbc565b8114610cfa57600080fd5b5056fea2646970667358221220d54d4583b224c049d80665ae690afd0e7e998bf883c6b97472d292d1e2e5fa3e64736f6c63430008010033", - abi: `[{"inputs":[{"internalType":"uint32","name":"rounds","type":"uint32"},{"internalType":"bytes32[2]","name":"h","type":"bytes32[2]"},{"internalType":"bytes32[4]","name":"m","type":"bytes32[4]"},{"internalType":"bytes8[2]","name":"t","type":"bytes8[2]"},{"internalType":"bool","name":"f","type":"bool"}],"name":"F","outputs":[{"internalType":"bytes32[2]","name":"","type":"bytes32[2]"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"callF","outputs":[{"internalType":"bytes32[2]","name":"","type":"bytes32[2]"}],"stateMutability":"view","type":"function"}]`, - methodName: "callF", - testFunc: func(ctx *context, t *testing.T) { - // pragma solidity >=0.7.0 <0.9.0; - // contract TestBlake2 { - // function F(uint32 rounds, bytes32[2] memory h, bytes32[4] memory m, bytes8[2] memory t, bool f) public view returns (bytes32[2] memory) { - // bytes32[2] memory output; - - // bytes memory args = abi.encodePacked(rounds, h[0], h[1], m[0], m[1], m[2], m[3], t[0], t[1], f); - - // assembly { - // if iszero(staticcall(not(0), 0x09, add(args, 32), 0xd5, output, 0x40)) { - // revert(0, 0) - // } - // } - - // return output; - // } - - // function callF() public view returns (bytes32[2] memory) { - // uint32 rounds = 12; - - // bytes32[2] memory h; - // h[0] = hex"48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5"; - // h[1] = hex"d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b"; - - // bytes32[4] memory m; - // m[0] = hex"6162630000000000000000000000000000000000000000000000000000000000"; - // m[1] = hex"0000000000000000000000000000000000000000000000000000000000000000"; - // m[2] = hex"0000000000000000000000000000000000000000000000000000000000000000"; - // m[3] = hex"0000000000000000000000000000000000000000000000000000000000000000"; - - // bytes8[2] memory t; - // t[0] = hex"03000000"; - // t[1] = hex"00000000"; - - // bool f = true; - - // // Expected output: - // // ba80a53f981c4d0d6a2797b69f12f6e94c212f14685ac4b74b12bb6fdbffa2d1 - // // 7d87c5392aab792dc252d5de4533cc9518d38aa8dbf1925ab92386edd4009923 - // return F(rounds, h, m, t, f); - // } - // } - - methodData, err := ctx.method.EncodeInput() - if err != nil { - t.Fatal(err) - } + bal, _ := new(big.Int).SetString("1000000000000000000000000000", 10) + assert.Equal(t, M(new(big.Int).Add(bal, big.NewInt(200)), nil), M(state.GetBalance(origin))) + assert.Equal(t, M(new(big.Int).Add(bal, big.NewInt(100)), nil), M(state.GetEnergy(origin, time))) +} - exec, _ := runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, thor.ForkConfig{}). - PrepareClause(tx.NewClause(&target).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err := exec() - assert.Nil(t, err) - assert.Nil(t, out.VMErr) +func TestChainID(t *testing.T) { + db := muxdb.NewMem() - var hashes [2][32]uint8 - ctx.method.DecodeOutput(out.Data, &hashes) + g := genesis.NewDevnet() - assert.Equal(t, thor.MustParseBytes32("ba80a53f981c4d0d6a2797b69f12f6e94c212f14685ac4b74b12bb6fdbffa2d1"), thor.Bytes32(hashes[0])) - assert.Equal(t, thor.MustParseBytes32("7d87c5392aab792dc252d5de4533cc9518d38aa8dbf1925ab92386edd4009923"), thor.Bytes32(hashes[1])) - }, - }, - } + stater := state.NewStater(db) + b0, _, _, err := g.Build(stater) + assert.Nil(t, err) - shanghaiTests := []testcase{ - { - name: "pre EVM SH deploy 0xEF started contract code", - code: "", - abi: "", - methodName: "", - testFunc: func(ctx *context, t *testing.T) { - code, _ := hex.DecodeString("60ef60005360016000f3") - - exec, _ := runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, thor.NoFork). - PrepareClause(tx.NewClause(nil).WithData(code), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err := exec() - - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - }, - }, - { - name: "EVM SH deploy 0xEF started contract code", - code: "", - abi: "", - methodName: "", - testFunc: func(ctx *context, t *testing.T) { - failingCalldata := []string{"60ef60005360016000f3", "60ef60005360026000f3", "60ef60005360036000f3", "60ef60005360206000f3"} - for _, calldata := range failingCalldata { - code, _ := hex.DecodeString(calldata) - - exec, _ := runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, thor.ForkConfig{}). - PrepareClause(tx.NewClause(nil).WithData(code), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err := exec() - - assert.Nil(t, err) - assert.NotNil(t, out.VMErr) - assert.Equal(t, "invalid code: must not begin with 0xef", out.VMErr.Error()) - } - }, - }, - { - name: "EVM SH deploy None 0xEF started contract code", - code: "", - abi: "", - methodName: "", - testFunc: func(ctx *context, t *testing.T) { - code, _ := hex.DecodeString("60ee60005360016000f3") - - exec, _ := runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, thor.ForkConfig{}). - PrepareClause(tx.NewClause(nil).WithData(code), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err := exec() - - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - }, - }, - { - name: "EVM SH PUSH0 gas cost", - code: "", - abi: "", - methodName: "", - testFunc: func(ctx *context, t *testing.T) { - // 0x5f is PUSH0 opCode - codeData := []byte{0x5f} - exec, _ := runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, thor.ForkConfig{}). - PrepareClause(tx.NewClause(nil).WithData(codeData), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err := exec() - - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - assert.Equal(t, uint64(2), math.MaxUint64-out.LeftOverGas) - }, - }, - { - name: "EVM SH BASEFEE output gas cost", - code: hex.EncodeToString([]byte{byte(vm.BASEFEE)}), - abi: "", - methodName: "", - testFunc: func(ctx *context, t *testing.T) { - exec, _ := runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, thor.ForkConfig{}). - PrepareClause(tx.NewClause(&target), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err := exec() - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - - assert.True(t, new(big.Int).SetBytes(out.Data).Cmp(big.NewInt(0)) == 0) - assert.Equal(t, uint64(2), math.MaxUint64-out.LeftOverGas) - }, - }, - { - name: "EVM SH BASEFEE", - code: hex.EncodeToString([]byte{byte(vm.BASEFEE), byte(vm.PUSH1), 0x80, byte(vm.MSTORE), byte(vm.PUSH1), 0x20, byte(vm.PUSH1), 0x80, byte(vm.RETURN)}), - abi: "", - methodName: "", - testFunc: func(ctx *context, t *testing.T) { - exec, _ := runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, thor.ForkConfig{}). - PrepareClause(tx.NewClause(&target), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err := exec() - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - - assert.True(t, new(big.Int).SetBytes(out.Data).Cmp(big.NewInt(0)) == 0) - }, - }, - { - name: "Precompile modexp", - code: "608060405234801561001057600080fd5b506004361061002b5760003560e01c8063577b75b914610030575b600080fd5b61004a60048036038101906100459190610287565b610060565b60405161005791906102e9565b60405180910390f35b6000806000600573ffffffffffffffffffffffffffffffffffffffff168460405161008b9190610375565b600060405180830381855afa9150503d80600081146100c6576040519150601f19603f3d011682016040523d82523d6000602084013e6100cb565b606091505b509150915081610110576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016101079061040f565b60405180910390fd5b80806020019051810190610124919061045b565b92505050919050565b6000604051905090565b600080fd5b600080fd5b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6101948261014b565b810181811067ffffffffffffffff821117156101b3576101b261015c565b5b80604052505050565b60006101c661012d565b90506101d2828261018b565b919050565b600067ffffffffffffffff8211156101f2576101f161015c565b5b6101fb8261014b565b9050602081019050919050565b82818337600083830152505050565b600061022a610225846101d7565b6101bc565b90508281526020810184848401111561024657610245610146565b5b610251848285610208565b509392505050565b600082601f83011261026e5761026d610141565b5b813561027e848260208601610217565b91505092915050565b60006020828403121561029d5761029c610137565b5b600082013567ffffffffffffffff8111156102bb576102ba61013c565b5b6102c784828501610259565b91505092915050565b6000819050919050565b6102e3816102d0565b82525050565b60006020820190506102fe60008301846102da565b92915050565b600081519050919050565b600081905092915050565b60005b8381101561033857808201518184015260208101905061031d565b60008484015250505050565b600061034f82610304565b610359818561030f565b935061036981856020860161031a565b80840191505092915050565b60006103818284610344565b915081905092915050565b600082825260208201905092915050565b7f4d6f64756c61724578706f6e656e74696174696f6e3a204661696c656420617460008201527f2063616c63756c6174696e672074686520726573756c74000000000000000000602082015250565b60006103f960378361038c565b91506104048261039d565b604082019050919050565b60006020820190508181036000830152610428816103ec565b9050919050565b610438816102d0565b811461044357600080fd5b50565b6000815190506104558161042f565b92915050565b60006020828403121561047157610470610137565b5b600061047f84828501610446565b9150509291505056fea2646970667358221220d362967bdc1ba52fd086eee3092f6a9a77f6993dd89c74e0718dfa791bc4794e64736f6c63430008180033", - abi: `[{"inputs":[{"internalType":"bytes","name":"input","type":"bytes"}],"name":"modExpBytes","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"}]`, - methodName: "modExpBytes", - testFunc: func(ctx *context, t *testing.T) { - // pragma solidity >=0.7.0 <0.9.0; - // contract Math{ - // function modExpBytes(bytes memory input) public view returns (uint256) { - // (bool success, bytes memory result) = (address(5).staticcall(input)); - // require(success, "ModularExponentiation: Failed at calculating the result"); - // return abi.decode(result, (uint256)); - // } - // } - - // test case picked from vm/testdata/precompiles - input := common.FromHex("000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b8102df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f") - methodData, err := ctx.method.EncodeInput(input) - if err != nil { - t.Fatal(err) - } + repo, _ := chain.NewRepository(db, b0) - forkConfig := thor.NoFork - forkConfig.ETH_IST = 0 - - exec, _ := runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, forkConfig). - PrepareClause(tx.NewClause(&target).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err := exec() - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - - gasBefore := math.MaxUint64 - out.LeftOverGas - - forkConfig.GALACTICA = 0 - exec, _ = runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, forkConfig). - PrepareClause(tx.NewClause(&target).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err = exec() - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - - gasAfter := math.MaxUint64 - out.LeftOverGas - assert.True(t, gasBefore > gasAfter) - // test case picked from vm/testdata/precompiles, 5580-1365 - assert.Zero(t, gasBefore-gasAfter-4215) - }, - }, - { - name: "Precompile bn256Add", - code: "608060405234801561001057600080fd5b506004361061002b5760003560e01c806383304e8a14610030575b600080fd5b61004a60048036038101906100459190610287565b610060565b60405161005791906102e9565b60405180910390f35b6000806000600673ffffffffffffffffffffffffffffffffffffffff168460405161008b9190610375565b600060405180830381855afa9150503d80600081146100c6576040519150601f19603f3d011682016040523d82523d6000602084013e6100cb565b606091505b509150915081610110576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016101079061040f565b60405180910390fd5b80806020019051810190610124919061045b565b92505050919050565b6000604051905090565b600080fd5b600080fd5b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6101948261014b565b810181811067ffffffffffffffff821117156101b3576101b261015c565b5b80604052505050565b60006101c661012d565b90506101d2828261018b565b919050565b600067ffffffffffffffff8211156101f2576101f161015c565b5b6101fb8261014b565b9050602081019050919050565b82818337600083830152505050565b600061022a610225846101d7565b6101bc565b90508281526020810184848401111561024657610245610146565b5b610251848285610208565b509392505050565b600082601f83011261026e5761026d610141565b5b813561027e848260208601610217565b91505092915050565b60006020828403121561029d5761029c610137565b5b600082013567ffffffffffffffff8111156102bb576102ba61013c565b5b6102c784828501610259565b91505092915050565b6000819050919050565b6102e3816102d0565b82525050565b60006020820190506102fe60008301846102da565b92915050565b600081519050919050565b600081905092915050565b60005b8381101561033857808201518184015260208101905061031d565b60008484015250505050565b600061034f82610304565b610359818561030f565b935061036981856020860161031a565b80840191505092915050565b60006103818284610344565b915081905092915050565b600082825260208201905092915050565b7f626e3235364164643a204661696c65642061742063616c63756c6174696e672060008201527f74686520726573756c7400000000000000000000000000000000000000000000602082015250565b60006103f9602a8361038c565b91506104048261039d565b604082019050919050565b60006020820190508181036000830152610428816103ec565b9050919050565b610438816102d0565b811461044357600080fd5b50565b6000815190506104558161042f565b92915050565b60006020828403121561047157610470610137565b5b600061047f84828501610446565b9150509291505056fea26469706673582212203fcccf78a18856182dd27cee0e3011ad18f00a9a29feb40bbcf800cdeb29a75a64736f6c63430008180033", - abi: `[{"inputs":[{"internalType":"bytes","name":"input","type":"bytes"}],"name":"bn256AddBytes","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"}]`, - methodName: "bn256AddBytes", - testFunc: func(ctx *context, t *testing.T) { - // pragma solidity >=0.7.0 <0.9.0; - // contract Math{ - // function bn256AddBytes(bytes memory input) public view returns (uint256) { - // (bool success, bytes memory result) = (address(6).staticcall(input)); - // require(success, "bn256Add: Failed at calculating the result"); - // return abi.decode(result, (uint256)); - // } - // } - // test case picked from vm/testdata/precompiles - input := common.FromHex("18b18acfb4c2c30276db5411368e7185b311dd124691610c5d3b74034e093dc9063c909c4720840cb5134cb9f59fa749755796819658d32efc0d288198f3726607c2b7f58a84bd6145f00c9c2bc0bb1a187f20ff2c92963a88019e7c6a014eed06614e20c147e940f2d70da3f74c9a17df361706a4485c742bd6788478fa17d7") - methodData, err := ctx.method.EncodeInput(input) - if err != nil { - t.Fatal(err) - } + // pragma solidity >=0.7.0 <0.9.0; + // contract TestChainID { - forkConfig := thor.NoFork - forkConfig.ETH_IST = 0 - - exec, _ := runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, forkConfig). - PrepareClause(tx.NewClause(&target).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err := exec() - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - - gasBefore := math.MaxUint64 - out.LeftOverGas - - forkConfig.GALACTICA = 0 - exec, _ = runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, forkConfig). - PrepareClause(tx.NewClause(&target).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err = exec() - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - - gasAfter := math.MaxUint64 - out.LeftOverGas - assert.True(t, gasBefore > gasAfter) - // test case picked from vm/testdata/precompiles, 500-150 - assert.Zero(t, gasBefore-gasAfter-350) - }, - }, - { - name: "Precompile bn256ScalarMul", - code: "608060405234801561001057600080fd5b506004361061002b5760003560e01c8063701961aa14610030575b600080fd5b61004a60048036038101906100459190610274565b610060565b604051610057919061033c565b60405180910390f35b6060600080600773ffffffffffffffffffffffffffffffffffffffff168460405161008b919061039a565b600060405180830381855afa9150503d80600081146100c6576040519150601f19603f3d011682016040523d82523d6000602084013e6100cb565b606091505b509150915081610110576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161010790610434565b60405180910390fd5b8092505050919050565b6000604051905090565b600080fd5b600080fd5b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b61018182610138565b810181811067ffffffffffffffff821117156101a05761019f610149565b5b80604052505050565b60006101b361011a565b90506101bf8282610178565b919050565b600067ffffffffffffffff8211156101df576101de610149565b5b6101e882610138565b9050602081019050919050565b82818337600083830152505050565b6000610217610212846101c4565b6101a9565b90508281526020810184848401111561023357610232610133565b5b61023e8482856101f5565b509392505050565b600082601f83011261025b5761025a61012e565b5b813561026b848260208601610204565b91505092915050565b60006020828403121561028a57610289610124565b5b600082013567ffffffffffffffff8111156102a8576102a7610129565b5b6102b484828501610246565b91505092915050565b600081519050919050565b600082825260208201905092915050565b60005b838110156102f75780820151818401526020810190506102dc565b60008484015250505050565b600061030e826102bd565b61031881856102c8565b93506103288185602086016102d9565b61033181610138565b840191505092915050565b600060208201905081810360008301526103568184610303565b905092915050565b600081905092915050565b6000610374826102bd565b61037e818561035e565b935061038e8185602086016102d9565b80840191505092915050565b60006103a68284610369565b915081905092915050565b600082825260208201905092915050565b7f626e3235364164643a204661696c65642061742063616c63756c6174696e672060008201527f74686520726573756c7400000000000000000000000000000000000000000000602082015250565b600061041e602a836103b1565b9150610429826103c2565b604082019050919050565b6000602082019050818103600083015261044d81610411565b905091905056fea2646970667358221220469c90049edd24983926f66efd6ae8e5ca288ad0b2b206fde2e8a5fb6875a81764736f6c63430008180033", - abi: `[{"inputs":[{"internalType":"bytes","name":"input","type":"bytes"}],"name":"bn256ScalarMulBytes","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"}]`, - methodName: "bn256ScalarMulBytes", - testFunc: func(ctx *context, t *testing.T) { - // pragma solidity >=0.7.0 <0.9.0; - // contract Math{ - // function bn256ScalarMulBytes(bytes memory input) public view returns (bytes memory) { - // (bool success, bytes memory result) = (address(7).staticcall(input)); - // require(success, "bn256ScalarMul: Failed at calculating the result"); - // return result; - // } - // } - // test case picked from vm/testdata/precompiles - input := common.FromHex("2bd3e6d0f3b142924f5ca7b49ce5b9d54c4703d7ae5648e61d02268b1a0a9fb721611ce0a6af85915e2f1d70300909ce2e49dfad4a4619c8390cae66cefdb20400000000000000000000000000000000000000000000000011138ce750fa15c2") - methodData, err := ctx.method.EncodeInput(input) - if err != nil { - t.Fatal(err) + // function chainID() public view returns (uint256) { + // return block.chainid; + // } + // } + data, _ := hex.DecodeString("6080604052348015600f57600080fd5b506004361060285760003560e01c8063adc879e914602d575b600080fd5b60336047565b604051603e9190605c565b60405180910390f35b600046905090565b6056816075565b82525050565b6000602082019050606f6000830184604f565b92915050565b600081905091905056fea264697066735822122060b67d944ffa8f0c5ee69f2f47decc3dc175ea2e4341a4de3705d72b868ce2b864736f6c63430008010033") + addr := thor.BytesToAddress([]byte("acc01")) + state := stater.NewState(trie.Root{Hash: b0.Header().StateRoot()}) + state.SetCode(addr, data) + + abi, _ := abi.New([]byte(`[{ + "inputs": [], + "name": "chainID", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" } + ], + "stateMutability": "view", + "type": "function" + } + ]`)) + chainIDMethod, _ := abi.MethodByName("chainID") + methodData, err := chainIDMethod.EncodeInput() + if err != nil { + t.Fatal(err) + } - forkConfig := thor.NoFork - forkConfig.ETH_IST = 0 - - exec, _ := runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, forkConfig). - PrepareClause(tx.NewClause(&target).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err := exec() - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - - gasBefore := math.MaxUint64 - out.LeftOverGas - - forkConfig.GALACTICA = 0 - exec, _ = runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, forkConfig). - PrepareClause(tx.NewClause(&target).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err = exec() - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - - gasAfter := math.MaxUint64 - out.LeftOverGas - assert.True(t, gasBefore > gasAfter) - // test case picked from vm/testdata/precompiles, 40000-6000 - assert.Zero(t, gasBefore-gasAfter-34000) - }, - }, - { - name: "Precompile bn256Pairing", - code: "608060405234801561001057600080fd5b506004361061002b5760003560e01c806382b72bfd14610030575b600080fd5b61004a60048036038101906100459190610287565b610060565b60405161005791906102e9565b60405180910390f35b6000806000600873ffffffffffffffffffffffffffffffffffffffff168460405161008b9190610375565b600060405180830381855afa9150503d80600081146100c6576040519150601f19603f3d011682016040523d82523d6000602084013e6100cb565b606091505b509150915081610110576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016101079061040f565b60405180910390fd5b80806020019051810190610124919061045b565b92505050919050565b6000604051905090565b600080fd5b600080fd5b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6101948261014b565b810181811067ffffffffffffffff821117156101b3576101b261015c565b5b80604052505050565b60006101c661012d565b90506101d2828261018b565b919050565b600067ffffffffffffffff8211156101f2576101f161015c565b5b6101fb8261014b565b9050602081019050919050565b82818337600083830152505050565b600061022a610225846101d7565b6101bc565b90508281526020810184848401111561024657610245610146565b5b610251848285610208565b509392505050565b600082601f83011261026e5761026d610141565b5b813561027e848260208601610217565b91505092915050565b60006020828403121561029d5761029c610137565b5b600082013567ffffffffffffffff8111156102bb576102ba61013c565b5b6102c784828501610259565b91505092915050565b6000819050919050565b6102e3816102d0565b82525050565b60006020820190506102fe60008301846102da565b92915050565b600081519050919050565b600081905092915050565b60005b8381101561033857808201518184015260208101905061031d565b60008484015250505050565b600061034f82610304565b610359818561030f565b935061036981856020860161031a565b80840191505092915050565b60006103818284610344565b915081905092915050565b600082825260208201905092915050565b7f626e32353650616972696e673a204661696c65642061742063616c63756c617460008201527f696e672074686520726573756c74000000000000000000000000000000000000602082015250565b60006103f9602e8361038c565b91506104048261039d565b604082019050919050565b60006020820190508181036000830152610428816103ec565b9050919050565b610438816102d0565b811461044357600080fd5b50565b6000815190506104558161042f565b92915050565b60006020828403121561047157610470610137565b5b600061047f84828501610446565b9150509291505056fea26469706673582212203f7c1d2240693474593d78d55b56a6b10de6fb80fd59956aea8108863cc779b064736f6c63430008180033", - abi: `[{"inputs":[{"internalType":"bytes","name":"input","type":"bytes"}],"name":"bn256PairingBytes","outputs":[{"internalType":"bytes","name":"","type":"bytes"}],"stateMutability":"view","type":"function"}]`, - methodName: "bn256PairingBytes", - testFunc: func(ctx *context, t *testing.T) { - // pragma solidity >=0.7.0 <0.9.0; - // contract Math{ - // contract Math{ - // function bn256PairingBytes(bytes memory input) public view returns (uint256) { - // (bool success, bytes memory result) = (address(8).staticcall(input)); - // require(success, "bn256Pairing: Failed at calculating the result"); - // return abi.decode(result, (uint256)); - // } - // } - // test case picked from vm/testdata/precompiles - input := common.FromHex("1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f593034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf704bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a416782bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c2032c61a830e3c17286de9462bf242fca2883585b93870a73853face6a6bf411198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c21800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa") - methodData, err := ctx.method.EncodeInput(input) - if err != nil { - t.Fatal(err) - } + exec, _ := runtime.New(repo.NewChain(b0.Header().ID()), state, &xenv.BlockContext{}, thor.ForkConfig{ETH_IST: 0}). + PrepareClause(tx.NewClause(&addr).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{}) + out, _, err := exec() + assert.Nil(t, err) + assert.Nil(t, out.VMErr) - forkConfig := thor.NoFork - forkConfig.ETH_IST = 0 - - exec, _ := runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, forkConfig). - PrepareClause(tx.NewClause(&target).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err := exec() - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - - gasBefore := math.MaxUint64 - out.LeftOverGas - - forkConfig.GALACTICA = 0 - exec, _ = runtime.New(ctx.chain, ctx.state, &xenv.BlockContext{}, forkConfig). - PrepareClause(tx.NewClause(&target).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err = exec() - assert.Nil(t, err) - assert.Nil(t, out.VMErr) - - gasAfter := math.MaxUint64 - out.LeftOverGas - assert.True(t, gasBefore > gasAfter) - // test case picked from vm/testdata/precompiles, 260000-113000 - assert.Zero(t, gasBefore-gasAfter-147000) - }, - }, - } + assert.Equal(t, g.ID(), thor.BytesToBytes32(out.Data)) +} - var tests = []testcase{} +func TestSelfBalance(t *testing.T) { + db := muxdb.NewMem() - tests = append(tests, baseTests...) - tests = append(tests, shanghaiTests...) + g := genesis.NewDevnet() - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - db := muxdb.NewMem() - g := genesis.NewDevnet() - stater := state.NewStater(db) - b0, _, _, _ := g.Build(stater) - repo, _ := chain.NewRepository(db, b0) + stater := state.NewStater(db) + b0, _, _, err := g.Build(stater) + assert.Nil(t, err) - ctx := &context{ - repo.NewChain(b0.Header().ID()), - stater.NewState(b0.Header().StateRoot(), 0, 0, 0), - nil, - } + repo, _ := chain.NewRepository(db, b0) - if len(tt.methodName) > 0 { - abi, _ := abi.New([]byte(tt.abi)) - method, _ := abi.MethodByName(tt.methodName) + // pragma solidity >=0.7.0 <0.9.0; + // contract TestSelfBalance { - ctx.method = method - } + // function selfBalance() public view returns (uint256) { + // return address(this).balance; + // } + // } - if len(tt.code) > 0 { - code, err := hex.DecodeString(tt.code) - if err != nil { - t.Fatal(err) + data, _ := hex.DecodeString("6080604052348015600f57600080fd5b506004361060285760003560e01c8063b0bed0ba14602d575b600080fd5b60336047565b604051603e9190605c565b60405180910390f35b600047905090565b6056816075565b82525050565b6000602082019050606f6000830184604f565b92915050565b600081905091905056fea2646970667358221220eeac1b7322c414db88987af09d3c8bdfde83bb378be9ac0e9ebe3fe34ecbcf2564736f6c63430008010033") + addr := thor.BytesToAddress([]byte("acc01")) + state := stater.NewState(trie.Root{Hash: b0.Header().StateRoot()}) + state.SetCode(addr, data) + state.SetBalance(addr, big.NewInt(100)) + + abi, _ := abi.New([]byte(`[{ + "inputs": [], + "name": "selfBalance", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" } - ctx.state.SetCode(target, code) - } - - tt.testFunc(ctx, t) - }) + ], + "stateMutability": "view", + "type": "function" + } + ]`)) + selfBalanceMethod, _ := abi.MethodByName("selfBalance") + methodData, err := selfBalanceMethod.EncodeInput() + if err != nil { + t.Fatal(err) } + + exec, _ := runtime.New(repo.NewChain(b0.Header().ID()), state, &xenv.BlockContext{}, thor.ForkConfig{ETH_IST: 0}). + PrepareClause(tx.NewClause(&addr).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{}) + out, _, err := exec() + assert.Nil(t, err) + assert.Nil(t, out.VMErr) + + assert.True(t, new(big.Int).SetBytes(out.Data).Cmp(big.NewInt(100)) == 0) } -func TestPreForkOpCode(t *testing.T) { +func TestBlake2(t *testing.T) { db := muxdb.NewMem() + g := genesis.NewDevnet() + stater := state.NewStater(db) - b0, _, _, _ := g.Build(stater) + b0, _, _, err := g.Build(stater) + assert.Nil(t, err) + repo, _ := chain.NewRepository(db, b0) - chain := repo.NewChain(b0.Header().ID()) - state := stater.NewState(b0.Header().StateRoot(), 0, 0, 0) + // pragma solidity >=0.7.0 <0.9.0; + // contract TestBlake2 { + // function F(uint32 rounds, bytes32[2] memory h, bytes32[4] memory m, bytes8[2] memory t, bool f) public view returns (bytes32[2] memory) { + // bytes32[2] memory output; - tests := []struct { - name string - code []byte - op vm.OpCode - }{ - { - name: "BASEFEE", - code: []byte{byte(vm.BASEFEE)}, - op: vm.BASEFEE, + // bytes memory args = abi.encodePacked(rounds, h[0], h[1], m[0], m[1], m[2], m[3], t[0], t[1], f); + + // assembly { + // if iszero(staticcall(not(0), 0x09, add(args, 32), 0xd5, output, 0x40)) { + // revert(0, 0) + // } + // } + + // return output; + // } + + // function callF() public view returns (bytes32[2] memory) { + // uint32 rounds = 12; + + // bytes32[2] memory h; + // h[0] = hex"48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5"; + // h[1] = hex"d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b"; + + // bytes32[4] memory m; + // m[0] = hex"6162630000000000000000000000000000000000000000000000000000000000"; + // m[1] = hex"0000000000000000000000000000000000000000000000000000000000000000"; + // m[2] = hex"0000000000000000000000000000000000000000000000000000000000000000"; + // m[3] = hex"0000000000000000000000000000000000000000000000000000000000000000"; + + // bytes8[2] memory t; + // t[0] = hex"03000000"; + // t[1] = hex"00000000"; + + // bool f = true; + + // // Expected output: + // // ba80a53f981c4d0d6a2797b69f12f6e94c212f14685ac4b74b12bb6fdbffa2d1 + // // 7d87c5392aab792dc252d5de4533cc9518d38aa8dbf1925ab92386edd4009923 + // return F(rounds, h, m, t, f); + // } + // } + data, _ := hex.DecodeString("608060405234801561001057600080fd5b50600436106100365760003560e01c806372de3cbd1461003b578063fc75ac471461006b575b600080fd5b61005560048036038101906100509190610894565b610089565b6040516100629190610a9b565b60405180910390f35b6100736102e5565b6040516100809190610a9b565b60405180910390f35b61009161063c565b61009961063c565b600087876000600281106100d6577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002015188600160028110610115577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002015188600060048110610154577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002015189600160048110610193577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201518a6002600481106101d2577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201518b600360048110610211577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201518b600060028110610250577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201518c60016002811061028f577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201518c6040516020016102ae9a999897969594939291906109e7565b604051602081830303815290604052905060408260d5602084016009600019fa6102d757600080fd5b819250505095945050505050565b6102ed61063c565b6000600c90506102fb61063c565b7f48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa581600060028110610356577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6020020181815250507fd182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b816001600281106103ba577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6020020181815250506103cb61065e565b7f616263000000000000000000000000000000000000000000000000000000000081600060048110610426577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60200201818152505060008160016004811061046b577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6020020181815250506000816002600481106104b0577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6020020181815250506000816003600481106104f5577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002018181525050610506610680565b7f030000000000000000000000000000000000000000000000000000000000000081600060028110610561577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002019077ffffffffffffffffffffffffffffffffffffffffffffffff1916908177ffffffffffffffffffffffffffffffffffffffffffffffff1916815250506000816001600281106105de577f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b602002019077ffffffffffffffffffffffffffffffffffffffffffffffff1916908177ffffffffffffffffffffffffffffffffffffffffffffffff1916815250506000600190506106328585858585610089565b9550505050505090565b6040518060400160405280600290602082028036833780820191505090505090565b6040518060800160405280600490602082028036833780820191505090505090565b6040518060400160405280600290602082028036833780820191505090505090565b60006106b56106b084610adb565b610ab6565b905080828560208602820111156106cb57600080fd5b60005b858110156106fb57816106e18882610855565b8452602084019350602083019250506001810190506106ce565b5050509392505050565b600061071861071384610b01565b610ab6565b9050808285602086028201111561072e57600080fd5b60005b8581101561075e57816107448882610855565b845260208401935060208301925050600181019050610731565b5050509392505050565b600061077b61077684610b27565b610ab6565b9050808285602086028201111561079157600080fd5b60005b858110156107c157816107a7888261086a565b845260208401935060208301925050600181019050610794565b5050509392505050565b600082601f8301126107dc57600080fd5b60026107e98482856106a2565b91505092915050565b600082601f83011261080357600080fd5b6004610810848285610705565b91505092915050565b600082601f83011261082a57600080fd5b6002610837848285610768565b91505092915050565b60008135905061084f81610ca1565b92915050565b60008135905061086481610cb8565b92915050565b60008135905061087981610ccf565b92915050565b60008135905061088e81610ce6565b92915050565b600080600080600061014086880312156108ad57600080fd5b60006108bb8882890161087f565b95505060206108cc888289016107cb565b94505060606108dd888289016107f2565b93505060e06108ee88828901610819565b92505061012061090088828901610840565b9150509295509295909350565b60006109198383610993565b60208301905092915050565b61092e81610b57565b6109388184610b6f565b925061094382610b4d565b8060005b8381101561097457815161095b878261090d565b965061096683610b62565b925050600181019050610947565b505050505050565b61098d61098882610b7a565b610bfd565b82525050565b61099c81610b86565b82525050565b6109b36109ae82610b86565b610c0f565b82525050565b6109ca6109c582610b90565b610c19565b82525050565b6109e16109dc82610bbc565b610c23565b82525050565b60006109f3828d6109d0565b600482019150610a03828c6109a2565b602082019150610a13828b6109a2565b602082019150610a23828a6109a2565b602082019150610a3382896109a2565b602082019150610a4382886109a2565b602082019150610a5382876109a2565b602082019150610a6382866109b9565b600882019150610a7382856109b9565b600882019150610a83828461097c565b6001820191508190509b9a5050505050505050505050565b6000604082019050610ab06000830184610925565b92915050565b6000610ac0610ad1565b9050610acc8282610bcc565b919050565b6000604051905090565b600067ffffffffffffffff821115610af657610af5610c47565b5b602082029050919050565b600067ffffffffffffffff821115610b1c57610b1b610c47565b5b602082029050919050565b600067ffffffffffffffff821115610b4257610b41610c47565b5b602082029050919050565b6000819050919050565b600060029050919050565b6000602082019050919050565b600081905092915050565b60008115159050919050565b6000819050919050565b60007fffffffffffffffff00000000000000000000000000000000000000000000000082169050919050565b600063ffffffff82169050919050565b610bd582610c76565b810181811067ffffffffffffffff82111715610bf457610bf3610c47565b5b80604052505050565b6000610c0882610c35565b9050919050565b6000819050919050565b6000819050919050565b6000610c2e82610c87565b9050919050565b6000610c4082610c94565b9050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000601f19601f8301169050919050565b60008160e01b9050919050565b60008160f81b9050919050565b610caa81610b7a565b8114610cb557600080fd5b50565b610cc181610b86565b8114610ccc57600080fd5b50565b610cd881610b90565b8114610ce357600080fd5b50565b610cef81610bbc565b8114610cfa57600080fd5b5056fea2646970667358221220d54d4583b224c049d80665ae690afd0e7e998bf883c6b97472d292d1e2e5fa3e64736f6c63430008010033") + addr := thor.BytesToAddress([]byte("acc01")) + state := stater.NewState(trie.Root{Hash: b0.Header().StateRoot()}) + state.SetCode(addr, data) + + abi, _ := abi.New([]byte(`[{ + "inputs": [ + { + "internalType": "uint32", + "name": "rounds", + "type": "uint32" + }, + { + "internalType": "bytes32[2]", + "name": "h", + "type": "bytes32[2]" + }, + { + "internalType": "bytes32[4]", + "name": "m", + "type": "bytes32[4]" + }, + { + "internalType": "bytes8[2]", + "name": "t", + "type": "bytes8[2]" + }, + { + "internalType": "bool", + "name": "f", + "type": "bool" + } + ], + "name": "F", + "outputs": [ + { + "internalType": "bytes32[2]", + "name": "", + "type": "bytes32[2]" + } + ], + "stateMutability": "view", + "type": "function" }, { - name: "PUSH0", - code: []byte{byte(vm.PUSH0)}, - op: vm.PUSH0, - }, + "inputs": [], + "name": "callF", + "outputs": [ + { + "internalType": "bytes32[2]", + "name": "", + "type": "bytes32[2]" + } + ], + "stateMutability": "view", + "type": "function" + } + ]`)) + callFMethod, _ := abi.MethodByName("callF") + methodData, err := callFMethod.EncodeInput() + if err != nil { + t.Fatal(err) } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - exec, _ := runtime.New(chain, state, &xenv.BlockContext{}, thor.NoFork). - PrepareClause(tx.NewClause(nil).WithData(tt.code), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err := exec() - assert.Nil(t, err) - assert.NotNil(t, out.VMErr) - assert.Equal(t, fmt.Sprintf("invalid opcode 0x%x", int(tt.op)), out.VMErr.Error()) - - // this one applies a fork config that forks from the start - exec, _ = runtime.New(chain, state, &xenv.BlockContext{}, thor.ForkConfig{}). - PrepareClause(tx.NewClause(nil).WithData(tt.code), 0, math.MaxUint64, &xenv.TransactionContext{}) - out, _, err = exec() - assert.Nil(t, err) - assert.Nil(t, out.VMErr, "after fork should not return error") - }) - } + exec, _ := runtime.New(repo.NewChain(b0.Header().ID()), state, &xenv.BlockContext{}, thor.ForkConfig{ETH_IST: 0}). + PrepareClause(tx.NewClause(&addr).WithData(methodData), 0, math.MaxUint64, &xenv.TransactionContext{}) + out, _, err := exec() + assert.Nil(t, err) + assert.Nil(t, out.VMErr) + + var hashes [2][32]uint8 + callFMethod.DecodeOutput(out.Data, &hashes) + + assert.Equal(t, thor.MustParseBytes32("ba80a53f981c4d0d6a2797b69f12f6e94c212f14685ac4b74b12bb6fdbffa2d1"), thor.Bytes32(hashes[0])) + assert.Equal(t, thor.MustParseBytes32("7d87c5392aab792dc252d5de4533cc9518d38aa8dbf1925ab92386edd4009923"), thor.Bytes32(hashes[1])) } func TestCall(t *testing.T) { @@ -615,7 +350,7 @@ func TestCall(t *testing.T) { repo, _ := chain.NewRepository(db, b0) - state := state.New(db, b0.Header().StateRoot(), 0, 0, 0) + state := state.New(db, trie.Root{Hash: b0.Header().StateRoot()}) rt := runtime.New(repo.NewChain(b0.Header().ID()), state, &xenv.BlockContext{}, thor.NoFork) @@ -704,7 +439,7 @@ func TestGetValues(t *testing.T) { repo, _ := chain.NewRepository(db, b0) - state := state.New(db, b0.Header().StateRoot(), 0, 0, 0) + state := state.New(db, trie.Root{Hash: b0.Header().StateRoot()}) rt := runtime.New(repo.NewChain(b0.Header().ID()), state, &xenv.BlockContext{}, thor.NoFork) runtimeChain := rt.Chain() @@ -727,7 +462,7 @@ func TestExecuteTransaction(t *testing.T) { repo, _ := chain.NewRepository(db, b0) - state := state.New(db, b0.Header().StateRoot(), 0, 0, 0) + state := state.New(db, trie.Root{Hash: b0.Header().StateRoot()}) originEnergy := new(big.Int) originEnergy.SetString("9000000000000000000000000000000000000", 10) @@ -755,7 +490,7 @@ func TestExecuteTransactionFailure(t *testing.T) { repo, _ := chain.NewRepository(db, b0) - state := state.New(db, b0.Header().StateRoot(), 0, 0, 0) + state := state.New(db, trie.Root{Hash: b0.Header().StateRoot()}) originEnergy := new(big.Int) originEnergy.SetString("9000000000000000000000000000000000000", 10) diff --git a/runtime/statedb/statedb_test.go b/runtime/statedb/statedb_test.go index dd5fc8b35..5f41a2c52 100644 --- a/runtime/statedb/statedb_test.go +++ b/runtime/statedb/statedb_test.go @@ -22,7 +22,7 @@ import ( "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/runtime/statedb" State "github.com/vechain/thor/v2/state" - "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" ) func TestSnapshotRandom(t *testing.T) { @@ -185,7 +185,7 @@ func (test *snapshotTest) run() bool { // Run all actions and create snapshots. var ( db = muxdb.NewMem() - state = State.New(db, thor.Bytes32{}, 0, 0, 0) + state = State.New(db, trie.Root{}) stateDB = statedb.New(state) snapshotRevs = make([]int, len(test.snapshots)) sindex = 0 @@ -200,7 +200,7 @@ func (test *snapshotTest) run() bool { // Revert all snapshots in reverse order. Each revert must yield a state // that is equivalent to fresh state with all actions up the snapshot applied. for sindex--; sindex >= 0; sindex-- { - state := State.New(db, thor.Bytes32{}, 0, 0, 0) + state := State.New(db, trie.Root{}) checkStateDB := statedb.New(state) for _, action := range test.actions[:test.snapshots[sindex]] { action.fn(action, checkStateDB) diff --git a/state/account.go b/state/account.go index cccec7eb4..f3cf5dbda 100644 --- a/state/account.go +++ b/state/account.go @@ -16,9 +16,9 @@ import ( // AccountMetadata is the account metadata. type AccountMetadata struct { - StorageID []byte // the unique id of the storage trie. - StorageCommitNum uint32 // the commit number of the last storage update. - StorageDistinctNum uint32 // the distinct number of the last storage update. + StorageID []byte // the unique id of the storage trie. + StorageMajorVer uint32 // the major version of the last storage update. + StorageMinorVer uint32 // the minor version of the last storage update. } // Account is the Thor consensus representation of an account. @@ -69,11 +69,12 @@ func emptyAccount() *Account { return &a } +func secureKey(k []byte) []byte { return thor.Blake2b(k).Bytes() } + // loadAccount load an account object and its metadata by address in trie. // It returns empty account is no account found at the address. -func loadAccount(trie *muxdb.Trie, addr thor.Address, steadyBlockNum uint32) (*Account, *AccountMetadata, error) { - hashedKey := thor.Blake2b(addr[:]) - data, meta, err := trie.FastGet(hashedKey[:], steadyBlockNum) +func loadAccount(trie *muxdb.Trie, addr thor.Address) (*Account, *AccountMetadata, error) { + data, meta, err := trie.Get(secureKey(addr[:])) if err != nil { return nil, nil, err } @@ -98,9 +99,8 @@ func loadAccount(trie *muxdb.Trie, addr thor.Address, steadyBlockNum uint32) (*A // If the given account is empty, the value for given address is deleted. func saveAccount(trie *muxdb.Trie, addr thor.Address, a *Account, am *AccountMetadata) error { if a.IsEmpty() { - hashedKey := thor.Blake2b(addr[:]) // delete if account is empty - return trie.Update(hashedKey[:], nil, nil) + return trie.Update(secureKey(addr[:]), nil, nil) } data, err := rlp.EncodeToBytes(a) @@ -114,25 +114,20 @@ func saveAccount(trie *muxdb.Trie, addr thor.Address, a *Account, am *AccountMet return err } } - hashedKey := thor.Blake2b(addr[:]) - return trie.Update(hashedKey[:], data, mdata) + return trie.Update(secureKey(addr[:]), data, mdata) } // loadStorage load storage data for given key. -func loadStorage(trie *muxdb.Trie, key thor.Bytes32, steadyBlockNum uint32) (rlp.RawValue, error) { - hashedKey := thor.Blake2b(key[:]) - v, _, err := trie.FastGet( - hashedKey[:], - steadyBlockNum) +func loadStorage(trie *muxdb.Trie, key thor.Bytes32) (rlp.RawValue, error) { + v, _, err := trie.Get(secureKey(key[:])) return v, err } // saveStorage save value for given key. // If the data is zero, the given key will be deleted. func saveStorage(trie *muxdb.Trie, key thor.Bytes32, data rlp.RawValue) error { - hashedKey := thor.Blake2b(key[:]) return trie.Update( - hashedKey[:], + secureKey(key[:]), data, bytes.TrimLeft(key[:], "\x00"), // key preimage as metadata ) diff --git a/state/account_test.go b/state/account_test.go index d95a281de..9bc84b517 100644 --- a/state/account_test.go +++ b/state/account_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" ) func M(a ...interface{}) []interface{} { @@ -40,11 +41,11 @@ func TestAccount(t *testing.T) { func TestTrie(t *testing.T) { db := muxdb.NewMem() - trie := db.NewTrie("", thor.Bytes32{}, 0, 0) + tr := db.NewTrie("", trie.Root{}) addr := thor.BytesToAddress([]byte("account1")) assert.Equal(t, - M(loadAccount(trie, addr, 0)), + M(loadAccount(tr, addr)), M(emptyAccount(), &AccountMetadata{}, nil), "should load an empty account") @@ -57,40 +58,40 @@ func TestTrie(t *testing.T) { []byte("storage root"), } meta1 := AccountMetadata{ - StorageID: []byte("sid"), - StorageCommitNum: 1, - StorageDistinctNum: 2, + StorageID: []byte("sid"), + StorageMajorVer: 1, + StorageMinorVer: 2, } - saveAccount(trie, addr, &acc1, &meta1) + saveAccount(tr, addr, &acc1, &meta1) assert.Equal(t, - M(loadAccount(trie, addr, 0)), + M(loadAccount(tr, addr)), M(&acc1, &meta1, nil)) - saveAccount(trie, addr, emptyAccount(), &meta1) + saveAccount(tr, addr, emptyAccount(), &meta1) assert.Equal(t, - M(trie.Get(addr[:])), + M(tr.Get(addr[:])), M([]byte(nil), []byte(nil), nil), "empty account should be deleted") } func TestStorageTrie(t *testing.T) { db := muxdb.NewMem() - trie := db.NewTrie("", thor.Bytes32{}, 0, 0) + tr := db.NewTrie("", trie.Root{}) key := thor.BytesToBytes32([]byte("key")) assert.Equal(t, - M(loadStorage(trie, key, 0)), + M(loadStorage(tr, key)), M(rlp.RawValue(nil), nil)) value := rlp.RawValue("value") - saveStorage(trie, key, value) + saveStorage(tr, key, value) assert.Equal(t, - M(loadStorage(trie, key, 0)), + M(loadStorage(tr, key)), M(value, nil)) - saveStorage(trie, key, nil) + saveStorage(tr, key, nil) assert.Equal(t, - M(trie.Get(key[:])), + M(tr.Get(key[:])), M([]byte(nil), []byte(nil), nil), "empty storage value should be deleted") } diff --git a/state/cached_object.go b/state/cached_object.go index 75f34d19a..df9a2275a 100644 --- a/state/cached_object.go +++ b/state/cached_object.go @@ -10,6 +10,7 @@ import ( lru "github.com/hashicorp/golang-lru" "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" ) var codeCache, _ = lru.NewARC(512) @@ -43,16 +44,21 @@ func (co *cachedObject) getOrCreateStorageTrie() *muxdb.Trie { trie := co.db.NewTrie( StorageTrieName(co.meta.StorageID), - thor.BytesToBytes32(co.data.StorageRoot), - co.meta.StorageCommitNum, - co.meta.StorageDistinctNum) + trie.Root{ + Hash: thor.BytesToBytes32(co.data.StorageRoot), + Ver: trie.Version{ + Major: co.meta.StorageMajorVer, + Minor: co.meta.StorageMinorVer, + }, + }, + ) co.cache.storageTrie = trie return trie } // GetStorage returns storage value for given key. -func (co *cachedObject) GetStorage(key thor.Bytes32, steadyBlockNum uint32) (rlp.RawValue, error) { +func (co *cachedObject) GetStorage(key thor.Bytes32) (rlp.RawValue, error) { cache := &co.cache // retrieve from storage cache if cache.storage != nil { @@ -70,7 +76,7 @@ func (co *cachedObject) GetStorage(key thor.Bytes32, steadyBlockNum uint32) (rlp } // load from trie - v, err := loadStorage(trie, key, steadyBlockNum) + v, err := loadStorage(trie, key) if err != nil { return nil, err } diff --git a/state/cached_object_test.go b/state/cached_object_test.go index 5a5265217..1f06f0e98 100644 --- a/state/cached_object_test.go +++ b/state/cached_object_test.go @@ -14,13 +14,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" ) func TestCachedObject(t *testing.T) { db := muxdb.NewMem() addr := thor.Address{} - stgTrie := db.NewTrie(StorageTrieName([]byte("sid")), thor.Bytes32{}, 0, 0) + stgTrie := db.NewTrie(StorageTrieName([]byte("sid")), trie.Root{}) storages := []struct { k thor.Bytes32 v rlp.RawValue @@ -35,9 +36,9 @@ func TestCachedObject(t *testing.T) { saveStorage(stgTrie, s.k, s.v) } - storageRoot, commit := stgTrie.Stage(0, 0) + storageRoot := stgTrie.Hash() - err := commit() + err := stgTrie.Commit(trie.Version{}, false) assert.Nil(t, err) code := make([]byte, 100) @@ -61,6 +62,6 @@ func TestCachedObject(t *testing.T) { for _, s := range storages { assert.Equal(t, M(s.v, nil), - M(obj.GetStorage(s.k, 0))) + M(obj.GetStorage(s.k))) } } diff --git a/muxdb/internal/trie/metrics.go b/state/metrics.go similarity index 57% rename from muxdb/internal/trie/metrics.go rename to state/metrics.go index 5266dfb3e..e1667668a 100644 --- a/muxdb/internal/trie/metrics.go +++ b/state/metrics.go @@ -3,10 +3,8 @@ // Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying // file LICENSE or -package trie +package state -import ( - "github.com/vechain/thor/v2/metrics" -) +import "github.com/vechain/thor/v2/metrics" -var metricCacheHitMiss = metrics.LazyLoadGaugeVec("cache_hit_miss_count", []string{"type", "event"}) +var metricAccountChanges = metrics.LazyLoadCounter("account_state_changes_count") diff --git a/state/stage.go b/state/stage.go index 5fca2859f..d21cacc1f 100644 --- a/state/stage.go +++ b/state/stage.go @@ -9,8 +9,8 @@ import "github.com/vechain/thor/v2/thor" // Stage abstracts changes on the main accounts trie. type Stage struct { - root thor.Bytes32 - commits []func() error + root thor.Bytes32 + commit func() error } // Hash computes hash of the main accounts trie. @@ -20,11 +20,9 @@ func (s *Stage) Hash() thor.Bytes32 { // Commit commits all changes into main accounts trie and storage tries. func (s *Stage) Commit() (root thor.Bytes32, err error) { - for _, c := range s.commits { - if err = c(); err != nil { - err = &Error{err} - return - } + if err = s.commit(); err != nil { + err = &Error{err} + return } return s.root, nil } diff --git a/state/stage_test.go b/state/stage_test.go index f157591fa..2296f2604 100644 --- a/state/stage_test.go +++ b/state/stage_test.go @@ -13,11 +13,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" ) func TestStage(t *testing.T) { db := muxdb.NewMem() - state := New(db, thor.Bytes32{}, 0, 0, 0) + state := New(db, trie.Root{}) addr := thor.BytesToAddress([]byte("acc1")) balance := big.NewInt(10) @@ -34,7 +35,7 @@ func TestStage(t *testing.T) { state.SetStorage(addr, k, v) } - stage, err := state.Stage(1, 0) + stage, err := state.Stage(trie.Version{Major: 1}) assert.Nil(t, err) hash := stage.Hash() @@ -44,7 +45,7 @@ func TestStage(t *testing.T) { assert.Equal(t, hash, root) - state = New(db, root, 1, 0, 0) + state = New(db, trie.Root{Hash: root, Ver: trie.Version{Major: 1}}) assert.Equal(t, M(balance, nil), M(state.GetBalance(addr))) assert.Equal(t, M(code, nil), M(state.GetCode(addr))) @@ -56,8 +57,7 @@ func TestStage(t *testing.T) { } func TestStageCommitError(t *testing.T) { - db := muxdb.NewMem() - state := New(db, thor.Bytes32{}, 0, 0, 0) + state := New(muxdb.NewMem(), trie.Root{}) // Set up the state with an account, balance, code, and storage. addr := thor.BytesToAddress([]byte("acc1")) @@ -76,7 +76,7 @@ func TestStageCommitError(t *testing.T) { } // Prepare the stage with the current state. - stage, err := state.Stage(1, 0) + stage, err := state.Stage(trie.Version{Major: 1}) assert.Nil(t, err, "Stage should not return an error") // Mock a commit function to simulate an error. @@ -85,7 +85,7 @@ func TestStageCommitError(t *testing.T) { } // Include the error-producing commit function in the stage's commits. - stage.commits = append(stage.commits, commitFuncWithError) + stage.commit = commitFuncWithError // Attempt to commit changes. _, err = stage.Commit() diff --git a/state/state.go b/state/state.go index 8fe7237c8..119546d3e 100644 --- a/state/state.go +++ b/state/state.go @@ -7,14 +7,15 @@ package state import ( "bytes" + "encoding/binary" "fmt" "math/big" "github.com/ethereum/go-ethereum/rlp" - "github.com/vechain/thor/v2/lowrlp" "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/stackedmap" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" ) const ( @@ -45,20 +46,18 @@ func (e *Error) Error() string { // State manages the world state. type State struct { - db *muxdb.MuxDB - trie *muxdb.Trie // the accounts trie reader - cache map[thor.Address]*cachedObject // cache of accounts trie - sm *stackedmap.StackedMap // keeps revisions of accounts state - steadyBlockNum uint32 + db *muxdb.MuxDB + trie *muxdb.Trie // the accounts trie reader + cache map[thor.Address]*cachedObject // cache of accounts trie + sm *stackedmap.StackedMap // keeps revisions of accounts state } // New create state object. -func New(db *muxdb.MuxDB, root thor.Bytes32, blockNum, blockConflicts, steadyBlockNum uint32) *State { +func New(db *muxdb.MuxDB, root trie.Root) *State { state := State{ - db: db, - trie: db.NewTrie(AccountTrieName, root, blockNum, blockConflicts), - cache: make(map[thor.Address]*cachedObject), - steadyBlockNum: steadyBlockNum, + db: db, + trie: db.NewTrie(AccountTrieName, root), + cache: make(map[thor.Address]*cachedObject), } state.sm = stackedmap.New(func(key interface{}) (interface{}, bool, error) { @@ -68,8 +67,8 @@ func New(db *muxdb.MuxDB, root thor.Bytes32, blockNum, blockConflicts, steadyBlo } // Checkout checkouts to another state. -func (s *State) Checkout(root thor.Bytes32, blockNum, blockConflicts, steadyBlockNum uint32) *State { - return New(s.db, root, blockNum, blockConflicts, steadyBlockNum) +func (s *State) Checkout(root trie.Root) *State { + return New(s.db, root) } // cacheGetter implements stackedmap.MapGetter. @@ -102,7 +101,7 @@ func (s *State) cacheGetter(key interface{}) (value interface{}, exist bool, err if err != nil { return nil, false, err } - v, err := obj.GetStorage(k.key, s.steadyBlockNum) + v, err := obj.GetStorage(k.key) if err != nil { return nil, false, err } @@ -117,7 +116,7 @@ func (s *State) getCachedObject(addr thor.Address) (*cachedObject, error) { if co, ok := s.cache[addr]; ok { return co, nil } - a, am, err := loadAccount(s.trie, addr, s.steadyBlockNum) + a, am, err := loadAccount(s.trie, addr) if err != nil { return nil, err } @@ -359,28 +358,27 @@ func (s *State) RevertTo(revision int) { } // BuildStorageTrie build up storage trie for given address with cumulative changes. -func (s *State) BuildStorageTrie(addr thor.Address) (trie *muxdb.Trie, err error) { - acc, err := s.getAccount(addr) +func (s *State) BuildStorageTrie(addr thor.Address) (t *muxdb.Trie, err error) { + obj, err := s.getCachedObject(addr) if err != nil { return nil, &Error{err} } - if len(acc.StorageRoot) > 0 { - obj, err := s.getCachedObject(addr) - if err != nil { - return nil, &Error{err} - } - trie = s.db.NewTrie( + if len(obj.data.StorageRoot) > 0 { + t = s.db.NewTrie( StorageTrieName(obj.meta.StorageID), - thor.BytesToBytes32(acc.StorageRoot), - obj.meta.StorageCommitNum, - obj.meta.StorageDistinctNum) + trie.Root{ + Hash: thor.BytesToBytes32(obj.data.StorageRoot), + Ver: trie.Version{ + Major: obj.meta.StorageMajorVer, + Minor: obj.meta.StorageMinorVer, + }, + }, + ) } else { - trie = s.db.NewTrie( + t = s.db.NewTrie( "", - thor.Bytes32{}, - 0, - 0, + trie.Root{}, ) } @@ -391,8 +389,7 @@ func (s *State) BuildStorageTrie(addr thor.Address) (trie *muxdb.Trie, err error switch key := k.(type) { case storageKey: if key.barrier == barrier && key.addr == addr { - err = saveStorage(trie, key.key, v.(rlp.RawValue)) - if err != nil { + if err = saveStorage(t, key.key, v.(rlp.RawValue)); err != nil { return false } } @@ -402,11 +399,11 @@ func (s *State) BuildStorageTrie(addr thor.Address) (trie *muxdb.Trie, err error if err != nil { return nil, &Error{err} } - return trie, nil + return t, nil } // Stage makes a stage object to compute hash of trie or commit all changes. -func (s *State) Stage(newBlockNum, newBlockConflicts uint32) (*Stage, error) { +func (s *State) Stage(newVer trie.Version) (*Stage, error) { type changed struct { data Account meta AccountMetadata @@ -460,13 +457,12 @@ func (s *State) Stage(newBlockNum, newBlockConflicts uint32) (*Stage, error) { } c.storage[key.key] = v.(rlp.RawValue) if len(c.meta.StorageID) == 0 { - // generate storage id for the new storage trie. - var enc lowrlp.Encoder - enc.EncodeUint(uint64(newBlockNum)) - enc.EncodeUint(uint64(newBlockConflicts)) - enc.EncodeUint(storageTrieCreationCount) + id := binary.BigEndian.AppendUint32(nil, newVer.Major) + id = binary.AppendUvarint(id, uint64(newVer.Minor)) + id = binary.AppendUvarint(id, storageTrieCreationCount) + + c.meta.StorageID = id storageTrieCreationCount++ - c.meta.StorageID = enc.ToBytes() } case storageBarrierKey: if c, jerr = getChanged(thor.Address(key)); jerr != nil { @@ -484,7 +480,7 @@ func (s *State) Stage(newBlockNum, newBlockConflicts uint32) (*Stage, error) { } trieCpy := s.trie.Copy() - commits := make([]func() error, 0, len(changes)+2) + tries := make([]*muxdb.Trie, 0, len(changes)+2) for addr, c := range changes { // skip storage changes if account is empty @@ -496,44 +492,56 @@ func (s *State) Stage(newBlockNum, newBlockConflicts uint32) (*Stage, error) { } else { sTrie = s.db.NewTrie( StorageTrieName(c.meta.StorageID), - thor.BytesToBytes32(c.data.StorageRoot), - c.meta.StorageCommitNum, - c.meta.StorageDistinctNum) + trie.Root{ + Hash: thor.BytesToBytes32(c.data.StorageRoot), + Ver: trie.Version{ + Major: c.meta.StorageMajorVer, + Minor: c.meta.StorageMinorVer, + }, + }) } for k, v := range c.storage { if err := saveStorage(sTrie, k, v); err != nil { return nil, &Error{err} } } - sRoot, commit := sTrie.Stage(newBlockNum, newBlockConflicts) + sRoot := sTrie.Hash() c.data.StorageRoot = sRoot[:] - c.meta.StorageCommitNum = newBlockNum - c.meta.StorageDistinctNum = newBlockConflicts - commits = append(commits, commit) + c.meta.StorageMajorVer = newVer.Major + c.meta.StorageMinorVer = newVer.Minor + tries = append(tries, sTrie) } } if err := saveAccount(trieCpy, addr, &c.data, &c.meta); err != nil { return nil, &Error{err} } } - root, commitAcc := trieCpy.Stage(newBlockNum, newBlockConflicts) - commitCodes := func() error { - if len(codes) > 0 { - bulk := s.db.NewStore(codeStoreName).Bulk() - for hash, code := range codes { - if err := bulk.Put(hash[:], code); err != nil { + root := trieCpy.Hash() + tries = append(tries, trieCpy) + + return &Stage{ + root: root, + commit: func() error { + if len(codes) > 0 { + bulk := s.db.NewStore(codeStoreName).Bulk() + for hash, code := range codes { + if err := bulk.Put(hash[:], code); err != nil { + return err + } + } + if err := bulk.Write(); err != nil { return err } } - return bulk.Write() - } - return nil - } - commits = append(commits, commitAcc, commitCodes) - - return &Stage{ - root: root, - commits: commits, + for _, t := range tries { + if err := t.Commit(newVer, false); err != nil { + return err + } + } + // Just once for the account trie. + metricAccountChanges().Add(int64(len(changes))) + return nil + }, }, nil } diff --git a/state/state_test.go b/state/state_test.go index 94cf3f979..966397600 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -13,12 +13,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" ) func TestStateReadWrite(t *testing.T) { - db := muxdb.NewMem() - - state := New(db, thor.Bytes32{}, 0, 0, 0) + state := New(muxdb.NewMem(), trie.Root{}) addr := thor.BytesToAddress([]byte("account1")) storageKey := thor.BytesToBytes32([]byte("storageKey")) @@ -57,7 +56,7 @@ func TestStateReadWrite(t *testing.T) { func TestStateRevert(t *testing.T) { db := muxdb.NewMem() - state := New(db, thor.Bytes32{}, 0, 0, 0) + state := New(muxdb.NewMem(), trie.Root{}) addr := thor.BytesToAddress([]byte("account1")) storageKey := thor.BytesToBytes32([]byte("storageKey")) @@ -92,15 +91,14 @@ func TestStateRevert(t *testing.T) { assert.Equal(t, M(false, nil), M(state.Exists(addr))) // - state = New(db, thor.Bytes32{}, 0, 0, 0) + state = New(db, trie.Root{}) assert.Equal(t, state.NewCheckpoint(), 1) state.RevertTo(0) assert.Equal(t, state.NewCheckpoint(), 0) } func TestEnergy(t *testing.T) { - db := muxdb.NewMem() - st := New(db, thor.Bytes32{}, 0, 0, 0) + st := New(muxdb.NewMem(), trie.Root{}) acc := thor.BytesToAddress([]byte("a1")) @@ -119,8 +117,7 @@ func TestEnergy(t *testing.T) { } func TestEncodeDecodeStorage(t *testing.T) { - db := muxdb.NewMem() - state := New(db, thor.Bytes32{}, 0, 0, 0) + state := New(muxdb.NewMem(), trie.Root{}) // Create an account and key addr := thor.BytesToAddress([]byte("account1")) @@ -153,8 +150,7 @@ func TestEncodeDecodeStorage(t *testing.T) { } func TestBuildStorageTrie(t *testing.T) { - db := muxdb.NewMem() - state := New(db, thor.Bytes32{}, 0, 0, 0) + state := New(muxdb.NewMem(), trie.Root{}) // Create an account and set storage values addr := thor.BytesToAddress([]byte("account1")) @@ -174,8 +170,7 @@ func TestBuildStorageTrie(t *testing.T) { } func TestStorage(t *testing.T) { - db := muxdb.NewMem() - st := New(db, thor.Bytes32{}, 0, 0, 0) + st := New(muxdb.NewMem(), trie.Root{}) addr := thor.BytesToAddress([]byte("addr")) key := thor.BytesToBytes32([]byte("key")) @@ -202,7 +197,7 @@ func TestStorage(t *testing.T) { func TestStorageBarrier(t *testing.T) { db := muxdb.NewMem() - st := New(db, thor.Bytes32{}, 0, 0, 0) + st := New(db, trie.Root{}) addr := thor.BytesToAddress([]byte("addr")) key := thor.BytesToBytes32([]byte("key")) @@ -215,14 +210,14 @@ func TestStorageBarrier(t *testing.T) { st.SetCode(addr, []byte("code")) - stage, err := st.Stage(0, 0) + stage, err := st.Stage(trie.Version{}) assert.Nil(t, err) root, err := stage.Commit() assert.Nil(t, err) - tr := db.NewTrie(AccountTrieName, root, 0, 0) - acc, _, err := loadAccount(tr, addr, 0) + tr := db.NewTrie(AccountTrieName, trie.Root{Hash: root}) + acc, _, err := loadAccount(tr, addr) assert.Nil(t, err) assert.Equal(t, 0, len(acc.StorageRoot), "should skip storage writes when account deleteed then recreated") } diff --git a/state/stater.go b/state/stater.go index 6a6e476f3..a5be1df36 100644 --- a/state/stater.go +++ b/state/stater.go @@ -7,7 +7,7 @@ package state import ( "github.com/vechain/thor/v2/muxdb" - "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" ) // Stater is the state creator. @@ -21,6 +21,6 @@ func NewStater(db *muxdb.MuxDB) *Stater { } // NewState create a new state object. -func (s *Stater) NewState(root thor.Bytes32, blockNum, blockConflicts, steadyBlockNum uint32) *State { - return New(s.db, root, blockNum, blockConflicts, steadyBlockNum) +func (s *Stater) NewState(root trie.Root) *State { + return New(s.db, root) } diff --git a/state/stater_test.go b/state/stater_test.go index fb24f03ac..131b8d8f8 100644 --- a/state/stater_test.go +++ b/state/stater_test.go @@ -9,20 +9,17 @@ import ( "testing" "github.com/vechain/thor/v2/muxdb" - "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" ) func TestStater(t *testing.T) { - db := muxdb.NewMem() - stater := NewStater(db) + stater := NewStater(muxdb.NewMem()) // Example State - root := thor.Bytes32{} - blockNum := uint32(1) - blockConflicts := uint32(0) - steadyBlockNum := uint32(1) + var root trie.Root + root.Ver.Major = 1 - state := stater.NewState(root, blockNum, blockConflicts, steadyBlockNum) + state := stater.NewState(root) if state == nil { t.Errorf("NewState returned nil") diff --git a/test/datagen/bytes.go b/test/datagen/bytes.go new file mode 100644 index 000000000..e01e2fece --- /dev/null +++ b/test/datagen/bytes.go @@ -0,0 +1,14 @@ +// Copyright (c) 2024 The VeChainThor developers + +// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying +// file LICENSE or + +package datagen + +import "crypto/rand" + +func RandBytes(n int) []byte { + bytes := make([]byte, n) + rand.Read(bytes) + return bytes +} diff --git a/test/datagen/numbers.go b/test/datagen/numbers.go index 9da0cdc00..b8745c978 100644 --- a/test/datagen/numbers.go +++ b/test/datagen/numbers.go @@ -13,6 +13,10 @@ func RandInt() int { return mathrand.Int() //#nosec G404 } +func RandUint64() uint64 { + return mathrand.Uint64() //#nosec G404 +} + func RandIntN(n int) int { return mathrand.N(n) //#nosec G404 } diff --git a/test/testchain/chain.go b/test/testchain/chain.go index af908594e..a92b3f0c4 100644 --- a/test/testchain/chain.go +++ b/test/testchain/chain.go @@ -8,9 +8,11 @@ package testchain import ( "errors" "fmt" + "math/rand/v2" "slices" "time" + "github.com/ethereum/go-ethereum/crypto" "github.com/vechain/thor/v2/bft" "github.com/vechain/thor/v2/block" "github.com/vechain/thor/v2/chain" @@ -63,6 +65,7 @@ func New( // NewIntegrationTestChain is a convenience function that creates a Chain for testing. // It uses an in-memory database, development network genesis, and a solo BFT engine. func NewIntegrationTestChain() (*Chain, error) { + forkConfig := thor.SoloFork // using SoloFork prevents tests depending on IDs of the genesis block from failing // Initialize the database db := muxdb.NewMem() @@ -70,7 +73,7 @@ func NewIntegrationTestChain() (*Chain, error) { stater := state.NewStater(db) // Initialize the genesis and retrieve the genesis block - gene := genesis.NewDevnet() + gene := genesis.NewDevnetWithConfig(forkConfig) geneBlk, _, _, err := gene.Build(stater) if err != nil { return nil, err @@ -88,11 +91,6 @@ func NewIntegrationTestChain() (*Chain, error) { return nil, err } - forkConfig := thor.NoFork - forkConfig.VIP191 = 1 - forkConfig.BLOCKLIST = 0 - forkConfig.VIP214 = 2 - return New( db, gene, @@ -101,7 +99,7 @@ func NewIntegrationTestChain() (*Chain, error) { stater, geneBlk, logDb, - thor.NoFork, + forkConfig, ), nil } @@ -131,6 +129,29 @@ func (c *Chain) MintTransactions(account genesis.DevAccount, transactions ...*tx return c.MintBlock(account, transactions...) } +// MintClauses creates a transaction with the provided clauses and adds it to the blockchain. +func (c *Chain) MintClauses(account genesis.DevAccount, clauses []*tx.Clause) error { + builer := new(tx.Builder).GasPriceCoef(255). + BlockRef(tx.NewBlockRef(c.Repo().BestBlockSummary().Header.Number())). + Expiration(1000). + ChainTag(c.Repo().ChainTag()). + Gas(10e6). + Nonce(rand.Uint64()) //#nosec G404 + + for _, clause := range clauses { + builer.Clause(clause) + } + + tx := builer.Build() + signature, err := crypto.Sign(tx.SigningHash().Bytes(), account.PrivateKey) + if err != nil { + return fmt.Errorf("unable to sign tx: %w", err) + } + tx = tx.WithSignature(signature) + + return c.MintBlock(account, tx) +} + // MintBlock creates and finalizes a new block with the given transactions. // It schedules a new block, adopts transactions, packs them into a block, and commits it to the chain. func (c *Chain) MintBlock(account genesis.DevAccount, transactions ...*tx.Transaction) error { @@ -166,15 +187,18 @@ func (c *Chain) MintBlock(account genesis.DevAccount, transactions ...*tx.Transa } // Add the block to the repository. - if err := c.Repo().AddBlock(newBlk, receipts, 0); err != nil { + if err := c.Repo().AddBlock(newBlk, receipts, 0, true); err != nil { return fmt.Errorf("unable to add tx to repo: %w", err) } - // Set the new block as the best (latest) block in the repository. - if err := c.Repo().SetBestBlockID(newBlk.Header().ID()); err != nil { - return fmt.Errorf("unable to set best block: %w", err) + // Write the new block and receipts to the logdb. + w := c.LogDB().NewWriter() + if err := w.Write(newBlk, receipts); err != nil { + return err + } + if err := w.Commit(); err != nil { + return err } - return nil } diff --git a/thor/fork_config.go b/thor/fork_config.go index 95ae288fe..6d5583a1d 100644 --- a/thor/fork_config.go +++ b/thor/fork_config.go @@ -53,6 +53,18 @@ var NoFork = ForkConfig{ GALACTICA: math.MaxUint32, } +// SoloFork is used to retain the solo genesis ID. +// Any forks that modify the chain state should be placed in block 1. +var SoloFork = ForkConfig{ + VIP191: 0, + ETH_CONST: 0, + BLOCKLIST: 0, + ETH_IST: 0, + VIP214: 0, + FINALITY: 0, + GALACTICA: 1, +} + // for well-known networks var forkConfigs = map[Bytes32]ForkConfig{ // mainnet diff --git a/thor/params.go b/thor/params.go index d65041e06..4ad295798 100644 --- a/thor/params.go +++ b/thor/params.go @@ -12,6 +12,11 @@ import ( "github.com/ethereum/go-ethereum/params" ) +/* + NOTE: any changes to gas limit or block interval may affect how the txIndex and blockNumber are stored in logdb/sequence.go: + - an increase in gas limit may require more bits for txIndex; + - if block frequency is increased, blockNumber will increment faster, potentially exhausting the allocated bits sooner than expected. +*/ // Constants of block chain. const ( BlockInterval uint64 = 10 // time interval between two consecutive blocks. diff --git a/thorclient/api_test.go b/thorclient/api_test.go index df72b1296..2ff52fb28 100644 --- a/thorclient/api_test.go +++ b/thorclient/api_test.go @@ -375,7 +375,7 @@ func testEventsEndpoint(t *testing.T, _ *testchain.Chain, ts *httptest.Server) { }, }, Range: nil, - Options: &logdb.Options{ + Options: &events.Options{ Offset: 0, Limit: 10, }, diff --git a/tracers/tracers_test.go b/tracers/tracers_test.go index bd44a85c0..ddd6db1f6 100644 --- a/tracers/tracers_test.go +++ b/tracers/tracers_test.go @@ -37,6 +37,7 @@ import ( "github.com/vechain/thor/v2/thor" "github.com/vechain/thor/v2/tracers" "github.com/vechain/thor/v2/tracers/logger" + "github.com/vechain/thor/v2/trie" "github.com/vechain/thor/v2/tx" "github.com/vechain/thor/v2/vm" "github.com/vechain/thor/v2/xenv" @@ -119,7 +120,7 @@ func RunTracerTest(t *testing.T, data *traceTest, tracerName string) json.RawMes } repo, _ := chain.NewRepository(db, gene) - st := state.New(db, gene.Header().StateRoot(), 0, 0, 0) + st := state.New(db, trie.Root{Hash: gene.Header().StateRoot()}) chain := repo.NewChain(gene.Header().ID()) for addr, account := range data.State { @@ -368,7 +369,7 @@ func TestInternals(t *testing.T) { } repo, _ := chain.NewRepository(db, gene) - st := state.New(db, gene.Header().StateRoot(), 0, 0, 0) + st := state.New(db, trie.Root{Hash: gene.Header().StateRoot()}) chain := repo.NewChain(gene.Header().ID()) st.SetCode(to, tc.code) diff --git a/trie/derive_root.go b/trie/derive_root.go index 9f03b1096..3eb9d15ea 100644 --- a/trie/derive_root.go +++ b/trie/derive_root.go @@ -5,9 +5,7 @@ package trie import ( - "bytes" - - "github.com/ethereum/go-ethereum/rlp" + "github.com/qianbin/drlp" "github.com/vechain/thor/v2/thor" ) @@ -19,12 +17,15 @@ type DerivableList interface { } func DeriveRoot(list DerivableList) thor.Bytes32 { - keybuf := new(bytes.Buffer) - trie := new(Trie) + var ( + trie Trie + key []byte + ) + for i := 0; i < list.Len(); i++ { - keybuf.Reset() - rlp.Encode(keybuf, uint(i)) - trie.Update(keybuf.Bytes(), list.GetRlp(i)) + key = drlp.AppendUint(key[:0], uint64(i)) + trie.Update(key, list.GetRlp(i), nil) } + return trie.Hash() } diff --git a/trie/derive_root_test.go b/trie/derive_root_test.go index 3ade78cc1..5e3a95e90 100644 --- a/trie/derive_root_test.go +++ b/trie/derive_root_test.go @@ -1,4 +1,4 @@ -// Copyright (c) 2024 The VeChainThor developers +// Copyright (c) 2023 The VeChainThor developers // Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying // file LICENSE or @@ -7,36 +7,23 @@ package trie import ( "testing" - - "github.com/stretchr/testify/assert" - "github.com/vechain/thor/v2/thor" ) -type MockDerivableList struct { - Elements [][]byte +type mockedDerivableList struct { + n int + content []byte } -func (m *MockDerivableList) Len() int { - return len(m.Elements) -} +func (l *mockedDerivableList) Len() int { return l.n } -func (m *MockDerivableList) GetRlp(i int) []byte { - if i >= len(m.Elements) { - return nil - } - return m.Elements[i] -} +func (l *mockedDerivableList) GetRlp(i int) []byte { return l.content } -func TestDeriveRoot(t *testing.T) { - mockList := &MockDerivableList{ - Elements: [][]byte{ - {1, 2, 3, 4}, - {1, 2, 3, 4, 5, 6}, - }, +func BenchmarkDeriveRoot(b *testing.B) { + list := mockedDerivableList{ + n: 100, + content: make([]byte, 32), + } + for i := 0; i < b.N; i++ { + DeriveRoot(&list) } - - root := DeriveRoot(mockList) - - assert.Equal(t, "0x154227caf1172839284ce29cd6eaaee115af0993d5a5a4a08d9bb19ed18edae7", root.String()) - assert.NotEqual(t, thor.Bytes32{}, root, "The root hash should not be empty") } diff --git a/trie/encoding.go b/trie/encoding.go index 1955a3e66..fa463414b 100644 --- a/trie/encoding.go +++ b/trie/encoding.go @@ -51,6 +51,35 @@ func hexToCompact(hex []byte) []byte { return buf } +func compactLen(hex []byte) int { + hexLen := len(hex) + if hasTerm(hex) { + hexLen-- + } + return hexLen/2 + 1 +} + +func appendHexToCompact(buf, hex []byte) []byte { + terminator := byte(0) + if hasTerm(hex) { + terminator = 1 + hex = hex[:len(hex)-1] + } + + b0 := terminator << 5 // the flag byte + if len(hex)&1 == 1 { + b0 |= 1 << 4 // odd flag + b0 |= hex[0] // first nibble is contained in the first byte + hex = hex[1:] + } + buf = append(buf, b0) + + for bi, ni := 0, 0; ni < len(hex); bi, ni = bi+1, ni+2 { + buf = append(buf, hex[ni]<<4|hex[ni+1]) + } + return buf +} + func compactToHex(compact []byte) []byte { if len(compact) == 0 { return compact diff --git a/trie/encoding_test.go b/trie/encoding_test.go index 97d8da136..dd019d44f 100644 --- a/trie/encoding_test.go +++ b/trie/encoding_test.go @@ -39,6 +39,12 @@ func TestHexCompact(t *testing.T) { if c := hexToCompact(test.hex); !bytes.Equal(c, test.compact) { t.Errorf("hexToCompact(%x) -> %x, want %x", test.hex, c, test.compact) } + if c := appendHexToCompact(nil, test.hex); !bytes.Equal(c, test.compact) { + t.Errorf("appendHexToCompact(%x) -> %x, want %x", test.hex, c, test.compact) + } + if l := compactLen(test.hex); l != len(test.compact) { + t.Errorf("compactLen(%x) -> %v, want %v", test.hex, l, len(test.compact)) + } if h := compactToHex(test.compact); !bytes.Equal(h, test.hex) { t.Errorf("compactToHex(%x) -> %x, want %x", test.compact, h, test.hex) } @@ -82,6 +88,14 @@ func BenchmarkHexToCompact(b *testing.B) { } } +func BenchmarkAppendHexToCompact(b *testing.B) { + testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/} + var buf []byte + for i := 0; i < b.N; i++ { + buf = appendHexToCompact(buf[:0], testBytes) + } +} + func BenchmarkCompactToHex(b *testing.B) { testBytes := []byte{0, 15, 1, 12, 11, 8, 16 /*term*/} for i := 0; i < b.N; i++ { diff --git a/trie/errors.go b/trie/errors.go index 92a84d0ef..9815e1f16 100644 --- a/trie/errors.go +++ b/trie/errors.go @@ -20,15 +20,15 @@ import ( "fmt" ) -// MissingNodeError is returned by the trie functions (TryGet, TryUpdate, TryDelete) +// MissingNodeError is returned by the trie functions (Get, Update) // in the case where a trie node is not present in the local database. It contains // information necessary for retrieving the missing node. type MissingNodeError struct { - NodeHash *hashNode // hash of the missing node - Path []byte // hex-encoded path to the missing node - Err error // the actual error + Ref refNode // the ref node of the missing node + Path []byte // hex-encoded path to the missing node + Err error // the actual error } func (err *MissingNodeError) Error() string { - return fmt.Sprintf("missing trie node %v (#%v path %x) reason: %v", err.NodeHash.Hash, err.NodeHash.seq, err.Path, err.Err) + return fmt.Sprintf("missing trie node (path %x hash %x #%v) reason: %v", err.Path, err.Ref.hash, err.Ref.ver, err.Err) } diff --git a/trie/extended.go b/trie/extended.go deleted file mode 100644 index 04b9de9ed..000000000 --- a/trie/extended.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright (c) 2021 The VeChainThor developers - -// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying -// file LICENSE or - -package trie - -import "github.com/vechain/thor/v2/thor" - -// ExtendedTrie is an extended Merkle Patricia Trie which supports nodes sequence number -// and leaf metadata. -type ExtendedTrie struct { - trie Trie - nonCrypto bool -} - -// Node contains the internal node object. -type Node struct { - node node - cacheGen uint16 -} - -// Dirty returns if the node is dirty. -func (n Node) Dirty() bool { - if n.node != nil { - _, dirty, _ := n.node.cache() - return dirty - } - return true -} - -// Hash returns the hash of the node. It returns zero hash in case of embedded or not computed. -func (n Node) Hash() (hash thor.Bytes32) { - if n.node != nil { - if h, _, _ := n.node.cache(); h != nil { - return h.Hash - } - } - return -} - -// SeqNum returns the node's sequence number. 0 is returned if the node is dirty. -func (n Node) SeqNum() uint64 { - if n.node != nil { - return n.node.seqNum() - } - return 0 -} - -// NewExtended creates an extended trie. -func NewExtended(root thor.Bytes32, seq uint64, db Database, nonCrypto bool) *ExtendedTrie { - ext := &ExtendedTrie{trie: Trie{db: db}, nonCrypto: nonCrypto} - if (root != thor.Bytes32{}) && root != emptyRoot { - if db == nil { - panic("trie.NewExtended: cannot use existing root without a database") - } - ext.trie.root = &hashNode{Hash: root, seq: seq} - } - return ext -} - -// IsNonCrypto returns whether the trie is a non-crypto trie. -func (e *ExtendedTrie) IsNonCrypto() bool { - return e.nonCrypto -} - -// NewExtendedCached creates an extended trie with the given root node. -func NewExtendedCached(rootNode Node, db Database, nonCrypto bool) *ExtendedTrie { - return &ExtendedTrie{trie: Trie{root: rootNode.node, db: db, cacheGen: rootNode.cacheGen}, nonCrypto: nonCrypto} -} - -// SetCacheTTL sets life time of a cached node. -func (e *ExtendedTrie) SetCacheTTL(ttl uint16) { - e.trie.cacheTTL = ttl -} - -// CacheTTL returns the life time of a cached node. -func (e *ExtendedTrie) CacheTTL() uint16 { - return e.trie.cacheTTL -} - -// RootNode returns the current root node. -func (e *ExtendedTrie) RootNode() Node { - return Node{e.trie.root, e.trie.cacheGen} -} - -// SetRootNode replace the root node with the given one. -func (e *ExtendedTrie) SetRootNode(root Node) { - e.trie.root = root.node - e.trie.cacheGen = root.cacheGen -} - -// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at -// the key after the given start key. It filters out nodes satisfy the filter. -func (e *ExtendedTrie) NodeIterator(start []byte, filter func(seq uint64) bool) NodeIterator { - t := &e.trie - return newNodeIterator(t, start, filter, true, e.nonCrypto) -} - -// Get returns the value and metadata for key stored in the trie. -// The value and meta bytes must not be modified by the caller. -// If a node was not found in the database, a MissingNodeError is returned. -func (e *ExtendedTrie) Get(key []byte) (val, meta []byte, err error) { - t := &e.trie - - value, newroot, err := t.tryGet(t.root, keybytesToHex(key), 0) - if t.root != newroot { - t.root = newroot - } - if err != nil { - return nil, nil, err - } - - if value != nil { - return value.Value, value.meta, nil - } - return nil, nil, nil -} - -// Update associates key with value and metadata in the trie. Subsequent calls to -// Get will return value. If value has length zero, any existing value -// is deleted from the trie and calls to Get will return nil. -// -// The value and meta bytes must not be modified by the caller while they are -// stored in the trie. -// -// If a node was not found in the database, a MissingNodeError is returned. -func (e *ExtendedTrie) Update(key, value, meta []byte) error { - t := &e.trie - - k := keybytesToHex(key) - if len(value) != 0 { - _, n, err := t.insert(t.root, nil, k, &valueNode{Value: value, meta: meta}) - if err != nil { - return err - } - t.root = n - } else { - _, n, err := t.delete(t.root, nil, k) - if err != nil { - return err - } - t.root = n - } - return nil -} - -// Hash returns the root hash of the trie. It does not write to the -// database and can be used even if the trie doesn't have one. -func (e *ExtendedTrie) Hash() thor.Bytes32 { - t := &e.trie - return t.Hash() -} - -// Commit writes all nodes with the given sequence number to the trie's database. -// -// Committing flushes nodes from memory. -// Subsequent Get calls will load nodes from the database. -func (e *ExtendedTrie) Commit(seq uint64) (root thor.Bytes32, err error) { - t := &e.trie - if t.db == nil { - panic("Commit called on trie with nil database") - } - return e.CommitTo(t.db, seq) -} - -// CommitTo writes all nodes with the given sequence number to the given database. -// -// Committing flushes nodes from memory. Subsequent Get calls will -// load nodes from the trie's database. Calling code must ensure that -// the changes made to db are written back to the trie's attached -// database before using the trie. -func (e *ExtendedTrie) CommitTo(db DatabaseWriter, seq uint64) (root thor.Bytes32, err error) { - t := &e.trie - // ext trie always stores the root node even not changed. so here have to - // resolve it (since ext trie lazily resolve the root node when initializing). - if root, ok := t.root.(*hashNode); ok { - rootnode, err := t.resolveHash(root, nil) - if err != nil { - return thor.Bytes32{}, err - } - t.root = rootnode - } - hash, cached, err := e.hashRoot(db, seq) - if err != nil { - return thor.Bytes32{}, err - } - t.root = cached - t.cacheGen++ - return hash.(*hashNode).Hash, nil -} - -func (e *ExtendedTrie) hashRoot(db DatabaseWriter, seq uint64) (node, node, error) { - t := &e.trie - if t.root == nil { - return &hashNode{Hash: emptyRoot}, nil, nil - } - h := newHasherExtended(t.cacheGen, t.cacheTTL, seq, e.nonCrypto) - defer returnHasherToPool(h) - return h.hash(t.root, db, nil, true) -} diff --git a/trie/fast_node_encoder.go b/trie/fast_node_encoder.go deleted file mode 100644 index f67f5b52a..000000000 --- a/trie/fast_node_encoder.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) 2021 The VeChainThor developers - -// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying -// file LICENSE or - -package trie - -import ( - "github.com/vechain/thor/v2/lowrlp" -) - -// implements node.encode and node.encodeTrailing - -func (n *fullNode) encode(e *lowrlp.Encoder, nonCrypto bool) { - off := e.List() - for _, c := range n.Children { - if c != nil { - c.encode(e, nonCrypto) - } else { - e.EncodeEmptyString() - } - } - e.ListEnd(off) -} - -func (n *fullNode) encodeTrailing(e *lowrlp.Encoder) { - for _, c := range n.Children { - if c != nil { - c.encodeTrailing(e) - } - } -} - -func (n *shortNode) encode(e *lowrlp.Encoder, nonCrypto bool) { - off := e.List() - e.EncodeString(n.Key) - if n.Val != nil { - n.Val.encode(e, nonCrypto) - } else { - e.EncodeEmptyString() - } - e.ListEnd(off) -} - -func (n *shortNode) encodeTrailing(e *lowrlp.Encoder) { - if n.Val != nil { - n.Val.encodeTrailing(e) - } -} - -func (n *hashNode) encode(e *lowrlp.Encoder, nonCrypto bool) { - if nonCrypto { - e.EncodeString(nonCryptoNodeHashPlaceholder) - } else { - e.EncodeString(n.Hash[:]) - } -} - -func (n *hashNode) encodeTrailing(e *lowrlp.Encoder) { - e.EncodeUint(n.seq) -} - -func (n *valueNode) encode(e *lowrlp.Encoder, _ bool) { - e.EncodeString(n.Value) -} - -func (n *valueNode) encodeTrailing(e *lowrlp.Encoder) { - if len(n.Value) > 0 { - e.EncodeString(n.meta) - } -} diff --git a/trie/hasher.go b/trie/hasher.go index 1b1bb384f..66ee01256 100644 --- a/trie/hasher.go +++ b/trie/hasher.go @@ -17,226 +17,148 @@ package trie import ( + "fmt" "sync" - "github.com/ethereum/go-ethereum/common" - "github.com/vechain/thor/v2/lowrlp" "github.com/vechain/thor/v2/thor" ) type hasher struct { - enc lowrlp.Encoder - tmp sliceBuffer - cacheGen uint16 - cacheTTL uint16 - - extended bool - seq uint64 - nonCrypto bool -} - -type sliceBuffer []byte + buf []byte -func (b *sliceBuffer) Write(data []byte) (n int, err error) { - *b = append(*b, data...) - return len(data), nil -} - -func (b *sliceBuffer) Reset() { - *b = (*b)[:0] + // parameters for storing nodes + newVer Version + cacheTTL uint16 + skipHash bool } -// hashers live in a global pool. +// cache hashers var hasherPool = sync.Pool{ - New: func() interface{} { - return &hasher{ - tmp: make(sliceBuffer, 0, 700), // cap is as large as a full fullNode. - } + New: func() any { + return &hasher{} }, } -func newHasher(cacheGen, cacheTTL uint16) *hasher { - h := hasherPool.Get().(*hasher) - h.cacheGen = cacheGen - h.cacheTTL = cacheTTL - h.extended = false - h.seq = 0 - h.nonCrypto = false - return h -} - -func newHasherExtended(cacheGen, cacheTTL uint16, seq uint64, nonCrypto bool) *hasher { - h := hasherPool.Get().(*hasher) - h.cacheGen = cacheGen - h.cacheTTL = cacheTTL - h.extended = true - h.seq = seq - h.nonCrypto = nonCrypto - return h -} - -func returnHasherToPool(h *hasher) { - hasherPool.Put(h) -} - -// hash collapses a node down into a hash node, also returning a copy of the -// original node initialized with the computed hash to replace the original one. -func (h *hasher) hash(n node, db DatabaseWriter, path []byte, force bool) (node, node, error) { - // If we're not storing the node, just hashing, use available cached data - if hash, dirty, gen := n.cache(); hash != nil { - if db == nil { - return hash, n, nil +// hash computes and returns the hash of n. +// If force is true, the node is always hashed even smaller than 32 bytes. +func (h *hasher) hash(n node, force bool) []byte { + switch n := n.(type) { + case *fullNode: + // already hashed + if hash := n.flags.ref.hash; hash != nil { + return hash } - - if !dirty { - if !force { // non-root node - if h.cacheGen-gen > h.cacheTTL { // drop cached nodes exceeds life-time - return hash, hash, nil - } - return hash, n, nil + // hash all children + for i := 0; i < 16; i++ { + if cn := n.children[i]; cn != nil { + h.hash(cn, false) } + } - if !h.extended { - return hash, n, nil - } - // else for extended trie, always store root node regardless of dirty flag + h.buf = n.encodeConsensus(h.buf[:0]) + if len(h.buf) >= 32 || force { + n.flags.ref.hash = thor.Blake2b(h.buf).Bytes() + return n.flags.ref.hash } - } - // Trie not processed yet or needs storage, walk the children - collapsed, cached, err := h.hashChildren(n, db, path) - if err != nil { - return nil, n, err - } - hashed, err := h.store(collapsed, db, path, force) - if err != nil { - return nil, n, err - } - // Cache the hash of the node for later reuse and remove - // the dirty flag in commit mode. It's fine to assign these values directly - // without copying the node first because hashChildren copies it. - cachedHash, _ := hashed.(*hashNode) - switch cn := cached.(type) { + return nil case *shortNode: - cn.flags.hash = cachedHash - if db != nil { - cn.flags.dirty = false + // already hashed + if hash := n.flags.ref.hash; hash != nil { + return hash } - case *fullNode: - cn.flags.hash = cachedHash - if db != nil { - cn.flags.dirty = false + + // hash child node + h.hash(n.child, false) + + h.buf = n.encodeConsensus(h.buf[:0]) + if len(h.buf) >= 32 || force { + n.flags.ref.hash = thor.Blake2b(h.buf).Bytes() + return n.flags.ref.hash } + return nil + case *refNode: + return n.hash + case *valueNode: + return nil + default: + panic(fmt.Sprintf("hash %T: unexpected node: %v", n, n)) } - return hashed, cached, nil } -// hashChildren replaces the children of a node with their hashes if the encoded -// size of the child is larger than a hash, returning the collapsed node as well -// as a replacement for the original node with the child hashes cached in. -func (h *hasher) hashChildren(original node, db DatabaseWriter, path []byte) (node, node, error) { - var err error - - switch n := original.(type) { - case *shortNode: - // Hash the short node's child, caching the newly hashed subtree - collapsed, cached := n.copy(), n.copy() - collapsed.Key = hexToCompact(n.Key) - cached.Key = common.CopyBytes(n.Key) - - if _, ok := n.Val.(*valueNode); !ok { - collapsed.Val, cached.Val, err = h.hash(n.Val, db, append(path, n.Key...), false) - if err != nil { - return original, original, err - } - } - // no need when using frlp - // if collapsed.Val == nil { - // collapsed.Val = &valueNode{} // Ensure that nil children are encoded as empty strings. - // } - return collapsed, cached, nil +// store stores node n and all its dirty sub nodes. +// Root node is always stored regardless of its dirty flag. +func (h *hasher) store(n node, db DatabaseWriter, path []byte) (node, error) { + isRoot := len(path) == 0 + switch n := n.(type) { case *fullNode: - // Hash the full node's children, caching the newly hashed subtrees - collapsed, cached := n.copy(), n.copy() - + n = n.copy() for i := 0; i < 16; i++ { - if n.Children[i] != nil { - collapsed.Children[i], cached.Children[i], err = h.hash(n.Children[i], db, append(path, byte(i)), false) - if err != nil { - return original, original, err + cn := n.children[i] + switch cn := cn.(type) { + case *fullNode, *shortNode: + // store the child node if dirty + if ref, gen, dirty := cn.cache(); dirty { + nn, err := h.store(cn, db, append(path, byte(i))) + if err != nil { + return nil, err + } + n.children[i] = nn + } else { + // drop the cached node by replacing with its ref node when ttl reached + if n.flags.gen-gen > h.cacheTTL { + n.children[i] = &ref + } } } - // no need when using frlp - // else { - // collapsed.Children[i] = &valueNode{} // Ensure that nil children are encoded as empty strings. - // } } - // no need when using frlp - // if collapsed.Children[16] == nil { - // collapsed.Children[16] = &valueNode{} - // } - return collapsed, cached, nil - - default: - // Value and hash nodes don't have children so they're left as were - return n, original, nil - } -} -func (h *hasher) store(n node, db DatabaseWriter, path []byte, force bool) (node, error) { - // Don't store hashes or empty nodes. - if _, isHash := n.(*hashNode); n == nil || isHash { - return n, nil - } - // Generate the RLP encoding of the node - h.enc.Reset() - n.encode(&h.enc, h.nonCrypto) - h.tmp.Reset() - h.enc.ToWriter(&h.tmp) - - if h.nonCrypto { - // fullnode and shortnode with non-value child are forced - // just like normal trie. - switch n := n.(type) { - case *fullNode: - force = true - case *shortNode: - if _, ok := n.Val.(*valueNode); !ok { - force = true + // full node is stored in case of + // 1. it's the root node + // 2. it has hash value + // 3. hash is being skipped + if isRoot || n.flags.ref.hash != nil || h.skipHash { + h.buf = n.encode(h.buf[:0], h.skipHash) + if err := db.Put(path, h.newVer, h.buf); err != nil { + return nil, err } + n.flags.dirty = false + n.flags.ref.ver = h.newVer } - } - - if len(h.tmp) < 32 && !force { - return n, nil // Nodes smaller than 32 bytes are stored inside their parent - } - // Larger nodes are replaced by their hash and stored in the database. - hash, _, _ := n.cache() - if hash == nil { - hash = &hashNode{} - if h.nonCrypto { - hash.Hash = NonCryptoNodeHash - } else { - hash.Hash = thor.Blake2b(h.tmp) - } - } else { - cpy := *hash - hash = &cpy - } - if db != nil { - // extended - if h.extended { - h.enc.Reset() - n.encodeTrailing(&h.enc) - h.enc.ToWriter(&h.tmp) - hash.seq = h.seq + return n, nil + case *shortNode: + n = n.copy() + switch cn := n.child.(type) { + case *fullNode, *shortNode: + if ref, gen, dirty := cn.cache(); dirty { + // store the child node if dirty + nn, err := h.store(cn, db, append(path, n.key...)) + if err != nil { + return nil, err + } + n.child = nn + } else { + // drop the cached node by replacing with its ref node when ttl reached + if n.flags.gen-gen > h.cacheTTL { + n.child = &ref + } + } } - key := hash.Hash[:] - if ke, ok := db.(DatabaseKeyEncoder); ok { - key = ke.Encode(hash.Hash[:], h.seq, path) + // Here is the very significant improvement compared to maindb-v3. A short-node is embedded + // in its parent node whenever possible. Doing so can save about 30% storage space for a pruned trie. + // + // While for a hash-skipped trie, short-nodes are always stored as standalone nodes. + if isRoot || h.skipHash { + h.buf = n.encode(h.buf[:0], h.skipHash) + if err := db.Put(path, h.newVer, h.buf); err != nil { + return nil, err + } + n.flags.dirty = false + n.flags.ref.ver = h.newVer } - return hash, db.Put(key, h.tmp) + return n, nil + default: + panic(fmt.Sprintf("store %T: unexpected node: %v", n, n)) } - return hash, nil } diff --git a/trie/iterator.go b/trie/iterator.go index a27702f46..71f7f963a 100644 --- a/trie/iterator.go +++ b/trie/iterator.go @@ -18,10 +18,7 @@ package trie import ( "bytes" - "container/heap" "errors" - - "github.com/vechain/thor/v2/thor" ) // Iterator is a key-value trie iterator that traverses a Trie. @@ -60,12 +57,6 @@ func (it *Iterator) Next() bool { return false } -// Prove generates the Merkle proof for the leaf node the iterator is currently -// positioned on. -func (it *Iterator) Prove() [][]byte { - return it.nodeIt.LeafProof() -} - // Leaf presents the leaf node. type Leaf struct { Value []byte @@ -81,18 +72,9 @@ type NodeIterator interface { // Error returns the error status of the iterator. Error() error - // Hash returns the hash of the current node. - Hash() thor.Bytes32 - - // Node calls the handler with the blob of the current node if any. - Node(handler func(blob []byte) error) error - - // SeqNum returns the sequence number of the current node. - SeqNum() uint64 - - // Parent returns the hash of the parent of the current node. The hash may be the one - // grandparent if the immediate parent is an internal node with no hash. - Parent() thor.Bytes32 + // Blob returns the encoded blob and version num of the current node. + // If the current node is not stored as standalone node, the returned blob has zero length. + Blob() ([]byte, Version, error) // Path returns the hex-encoded path to the current node. // Callers must not retain references to the return value after calling Next. @@ -106,31 +88,23 @@ type NodeIterator interface { // positioned at a leaf. Callers must not retain references to the value after // calling Next. LeafKey() []byte - - // LeafProof returns the Merkle proof of the leaf. The method panics if the - // iterator is not positioned at a leaf. Callers must not retain references - // to the value after calling Next. - LeafProof() [][]byte } // nodeIteratorState represents the iteration state at one particular node of the // trie, which can be resumed at a later invocation. type nodeIteratorState struct { - hash thor.Bytes32 // Hash of the node being iterated (nil if not standalone) - node node // Trie node being iterated - parent thor.Bytes32 // Hash of the first full ancestor node (nil if current is the root) - index int // Child to be processed next - pathlen int // Length of the path to this node + node node // Trie node being iterated + index int // Child to be processed next + pathlen int // Length of the path to this node + blob []byte // Encoded blob of the node } type nodeIterator struct { - trie *Trie // Trie being iterated - stack []*nodeIteratorState // Hierarchy of trie nodes persisting the iteration state - path []byte // Path to the current node - err error // Failure set in case of an internal error in the iterator - filter func(seq uint64) bool // The filter to filter iterated nodes. - extended bool // If the trie is extended. - nonCrypto bool // If the trie is non-crypto. + trie *Trie // Trie being iterated + stack []*nodeIteratorState // Hierarchy of trie nodes persisting the iteration state + path []byte // Path to the current node + err error // Failure set in case of an internal error in the iterator + minVer Version // Skips nodes whose version lower than minVer } // errIteratorEnd is stored in nodeIterator.err when iteration is done. @@ -146,73 +120,43 @@ func (e seekError) Error() string { return "seek error: " + e.err.Error() } -func newNodeIterator(trie *Trie, start []byte, filter func(seq uint64) bool, extended, nonCrypto bool) NodeIterator { - if trie.Hash() == emptyState { - return new(nodeIterator) - } +func newNodeIterator(trie *Trie, start []byte, min Version) NodeIterator { it := &nodeIterator{ - trie: trie, - filter: filter, - extended: extended, - nonCrypto: nonCrypto, + trie: trie, + minVer: min, } it.err = it.seek(start) return it } -func (it *nodeIterator) Hash() thor.Bytes32 { +func (it *nodeIterator) Blob() (blob []byte, ver Version, err error) { if len(it.stack) == 0 { - return thor.Bytes32{} - } - return it.stack[len(it.stack)-1].hash -} - -func (it *nodeIterator) Node(handler func(blob []byte) error) error { - if len(it.stack) == 0 { - return nil + return nil, Version{}, nil } st := it.stack[len(it.stack)-1] - if st.hash.IsZero() { - return nil + ref, _, dirty := st.node.cache() + // dirty node has no blob + if dirty { + return } - h := newHasher(0, 0) - h.extended = it.extended - h.nonCrypto = it.nonCrypto - defer returnHasherToPool(h) - - collapsed, _, _ := h.hashChildren(st.node, nil, it.path) - - h.enc.Reset() - collapsed.encode(&h.enc, h.nonCrypto) - if it.extended { - collapsed.encodeTrailing(&h.enc) + if len(st.blob) > 0 { + blob, ver = st.blob, ref.ver + return } - h.tmp.Reset() - h.enc.ToWriter(&h.tmp) - return handler(h.tmp) -} - -func (it *nodeIterator) SeqNum() uint64 { - for i := len(it.stack) - 1; i >= 0; i-- { - if st := it.stack[i]; !st.hash.IsZero() { - return st.node.seqNum() - } - } - return 0 -} -func (it *nodeIterator) Parent() thor.Bytes32 { - if len(it.stack) == 0 { - return thor.Bytes32{} + // load from db + if blob, err = it.trie.db.Get(it.path, ref.ver); err != nil { + return } - return it.stack[len(it.stack)-1].parent + st.blob, ver = blob, ref.ver + return } func (it *nodeIterator) Leaf() *Leaf { if len(it.stack) > 0 { - if node, ok := it.stack[len(it.stack)-1].node.(*valueNode); ok { - return &Leaf{node.Value, node.meta} + if vn, ok := it.stack[len(it.stack)-1].node.(*valueNode); ok { + return &Leaf{Value: vn.val, Meta: vn.meta} } } return nil @@ -227,30 +171,6 @@ func (it *nodeIterator) LeafKey() []byte { panic("not at leaf") } -func (it *nodeIterator) LeafProof() [][]byte { - if len(it.stack) > 0 { - if _, ok := it.stack[len(it.stack)-1].node.(*valueNode); ok { - hasher := newHasher(0, 0) - defer returnHasherToPool(hasher) - - proofs := make([][]byte, 0, len(it.stack)) - - for i, item := range it.stack[:len(it.stack)-1] { - // Gather nodes that end up as hash nodes (or the root) - node, _, _ := hasher.hashChildren(item.node, nil, nil) - hashed, _ := hasher.store(node, nil, nil, false) - if _, ok := hashed.(*hashNode); ok || i == 0 { - hasher.enc.Reset() - node.encode(&hasher.enc, hasher.nonCrypto) - proofs = append(proofs, hasher.enc.ToBytes()) - } - } - return proofs - } - } - panic("not at leaf") -} - func (it *nodeIterator) Path() []byte { return it.path } @@ -309,19 +229,21 @@ func (it *nodeIterator) seek(prefix []byte) error { // peek creates the next state of the iterator. func (it *nodeIterator) peek(descend bool) (*nodeIteratorState, *int, []byte, error) { if len(it.stack) == 0 { - if n := it.trie.root; n != nil { - if !it.filter(n.seqNum()) { + n := it.trie.root + if n == nil { + return nil, nil, nil, errIteratorEnd + } + if ref, _, dirty := n.cache(); !dirty { + if ref.ver.Compare(it.minVer) < 0 { return nil, nil, nil, errIteratorEnd } } // Initialize the iterator if we've just started. - root := it.trie.Hash() state := &nodeIteratorState{node: it.trie.root, index: -1} - if root != emptyRoot { - state.hash = root + if err := state.resolve(it.trie, nil); err != nil { + return nil, nil, nil, err } - err := state.resolve(it.trie, nil) - return state, nil, nil, err + return state, nil, nil, nil } if !descend { // If we're skipping children, pop the current node first @@ -331,11 +253,7 @@ func (it *nodeIterator) peek(descend bool) (*nodeIteratorState, *int, []byte, er // Continue iteration to the next child for len(it.stack) > 0 { parent := it.stack[len(it.stack)-1] - ancestor := parent.hash - if (ancestor == thor.Bytes32{}) { - ancestor = parent.parent - } - state, path, ok := it.nextChild(parent, ancestor) + state, path, ok := it.nextChild(parent) if ok { if err := state.resolve(it.trie, path); err != nil { return parent, &parent.index, path, err @@ -349,41 +267,35 @@ func (it *nodeIterator) peek(descend bool) (*nodeIteratorState, *int, []byte, er } func (st *nodeIteratorState) resolve(tr *Trie, path []byte) error { - if hash, ok := st.node.(*hashNode); ok { - resolved, err := tr.resolveHash(hash, path) + if ref, ok := st.node.(*refNode); ok { + blob, err := tr.db.Get(path, ref.ver) if err != nil { - return err + return &MissingNodeError{Ref: *ref, Path: path, Err: err} } - st.node = resolved - st.hash = hash.Hash + st.blob = blob + st.node = mustDecodeNode(ref, blob, 0) } return nil } -func (it *nodeIterator) nextChild(parent *nodeIteratorState, ancestor thor.Bytes32) (*nodeIteratorState, []byte, bool) { +func (it *nodeIterator) nextChild(parent *nodeIteratorState) (*nodeIteratorState, []byte, bool) { switch node := parent.node.(type) { case *fullNode: // Full node, move to the first non-nil child. - for i := parent.index + 1; i < len(node.Children); i++ { - child := node.Children[i] - if child != nil { - hash, _, _ := child.cache() - if _, ok := child.(*hashNode); ok || hash != nil { - if !it.filter(child.seqNum()) { + for i := parent.index + 1; i < len(node.children); i++ { + if child := node.children[i]; child != nil { + if ref, _, dirty := child.cache(); !dirty { + if ref.ver.Compare(it.minVer) < 0 { continue } } state := &nodeIteratorState{ node: child, - parent: ancestor, index: -1, pathlen: len(it.path), } - if hash != nil { - state.hash = hash.Hash - } parent.index = i - 1 return state, append(it.path, byte(i)), true } @@ -391,25 +303,18 @@ func (it *nodeIterator) nextChild(parent *nodeIteratorState, ancestor thor.Bytes case *shortNode: // Short node, return the pointer singleton child if parent.index < 0 { - hash, _, _ := node.Val.cache() - - if _, ok := node.Val.(*hashNode); ok || hash != nil { - if !it.filter(node.Val.seqNum()) { + if ref, _, dirty := node.child.cache(); !dirty { + if ref.ver.Compare(it.minVer) < 0 { break } } state := &nodeIteratorState{ - node: node.Val, - parent: ancestor, + node: node.child, index: -1, pathlen: len(it.path), } - - if hash != nil { - state.hash = hash.Hash - } - return state, append(it.path, node.Key...), true + return state, append(it.path, node.key...), true } } return parent, it.path, false @@ -428,237 +333,3 @@ func (it *nodeIterator) pop() { it.path = it.path[:parent.pathlen] it.stack = it.stack[:len(it.stack)-1] } - -func compareNodes(a, b NodeIterator) int { - if cmp := bytes.Compare(a.Path(), b.Path()); cmp != 0 { - return cmp - } - - aLeaf := a.Leaf() - bLeaf := b.Leaf() - - if aLeaf != nil && bLeaf == nil { - return -1 - } else if bLeaf != nil && aLeaf == nil { - return 1 - } - if cmp := bytes.Compare(a.Hash().Bytes(), b.Hash().Bytes()); cmp != 0 { - return cmp - } - if aLeaf != nil && bLeaf != nil { - return bytes.Compare(aLeaf.Value, bLeaf.Value) - } - return 0 -} - -type differenceIterator struct { - a, b NodeIterator // Nodes returned are those in b - a. - eof bool // Indicates a has run out of elements - count int // Number of nodes scanned on either trie -} - -// NewDifferenceIterator constructs a NodeIterator that iterates over elements in b that -// are not in a. Returns the iterator, and a pointer to an integer recording the number -// of nodes seen. -func NewDifferenceIterator(a, b NodeIterator) (NodeIterator, *int) { - a.Next(true) - it := &differenceIterator{ - a: a, - b: b, - } - return it, &it.count -} - -func (it *differenceIterator) Hash() thor.Bytes32 { - return it.b.Hash() -} - -func (it *differenceIterator) Node(handler func(blob []byte) error) error { - return it.b.Node(handler) -} - -func (it *differenceIterator) SeqNum() uint64 { - return it.b.SeqNum() -} - -func (it *differenceIterator) Parent() thor.Bytes32 { - return it.b.Parent() -} - -func (it *differenceIterator) Leaf() *Leaf { - return it.b.Leaf() -} - -func (it *differenceIterator) LeafKey() []byte { - return it.b.LeafKey() -} - -func (it *differenceIterator) LeafProof() [][]byte { - return it.b.LeafProof() -} - -func (it *differenceIterator) Path() []byte { - return it.b.Path() -} - -func (it *differenceIterator) Next(bool) bool { - // Invariants: - // - We always advance at least one element in b. - // - At the start of this function, a's path is lexically greater than b's. - if !it.b.Next(true) { - return false - } - it.count++ - - if it.eof { - // a has reached eof, so we just return all elements from b - return true - } - - for { - switch compareNodes(it.a, it.b) { - case -1: - // b jumped past a; advance a - if !it.a.Next(true) { - it.eof = true - return true - } - it.count++ - case 1: - // b is before a - return true - case 0: - // a and b are identical; skip this whole subtree if the nodes have hashes - hasHash := it.a.Hash() == thor.Bytes32{} - if !it.b.Next(hasHash) { - return false - } - it.count++ - if !it.a.Next(hasHash) { - it.eof = true - return true - } - it.count++ - } - } -} - -func (it *differenceIterator) Error() error { - if err := it.a.Error(); err != nil { - return err - } - return it.b.Error() -} - -type nodeIteratorHeap []NodeIterator - -func (h nodeIteratorHeap) Len() int { return len(h) } -func (h nodeIteratorHeap) Less(i, j int) bool { return compareNodes(h[i], h[j]) < 0 } -func (h nodeIteratorHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -func (h *nodeIteratorHeap) Push(x interface{}) { *h = append(*h, x.(NodeIterator)) } -func (h *nodeIteratorHeap) Pop() interface{} { - n := len(*h) - x := (*h)[n-1] - *h = (*h)[0 : n-1] - return x -} - -type unionIterator struct { - items *nodeIteratorHeap // Nodes returned are the union of the ones in these iterators - count int // Number of nodes scanned across all tries -} - -// NewUnionIterator constructs a NodeIterator that iterates over elements in the union -// of the provided NodeIterators. Returns the iterator, and a pointer to an integer -// recording the number of nodes visited. -func NewUnionIterator(iters []NodeIterator) (NodeIterator, *int) { - h := make(nodeIteratorHeap, len(iters)) - copy(h, iters) - heap.Init(&h) - - ui := &unionIterator{items: &h} - return ui, &ui.count -} - -func (it *unionIterator) Hash() thor.Bytes32 { - return (*it.items)[0].Hash() -} - -func (it *unionIterator) Node(handler func(blob []byte) error) error { - return (*it.items)[0].Node(handler) -} - -func (it *unionIterator) SeqNum() uint64 { - return (*it.items)[0].SeqNum() -} - -func (it *unionIterator) Parent() thor.Bytes32 { - return (*it.items)[0].Parent() -} - -func (it *unionIterator) Leaf() *Leaf { - return (*it.items)[0].Leaf() -} - -func (it *unionIterator) LeafKey() []byte { - return (*it.items)[0].LeafKey() -} - -func (it *unionIterator) LeafProof() [][]byte { - return (*it.items)[0].LeafProof() -} - -func (it *unionIterator) Path() []byte { - return (*it.items)[0].Path() -} - -// Next returns the next node in the union of tries being iterated over. -// -// It does this by maintaining a heap of iterators, sorted by the iteration -// order of their next elements, with one entry for each source trie. Each -// time Next() is called, it takes the least element from the heap to return, -// advancing any other iterators that also point to that same element. These -// iterators are called with descend=false, since we know that any nodes under -// these nodes will also be duplicates, found in the currently selected iterator. -// Whenever an iterator is advanced, it is pushed back into the heap if it still -// has elements remaining. -// -// In the case that descend=false - eg, we're asked to ignore all subnodes of the -// current node - we also advance any iterators in the heap that have the current -// path as a prefix. -func (it *unionIterator) Next(descend bool) bool { - if len(*it.items) == 0 { - return false - } - - // Get the next key from the union - least := heap.Pop(it.items).(NodeIterator) - - // Skip over other nodes as long as they're identical, or, if we're not descending, as - // long as they have the same prefix as the current node. - for len(*it.items) > 0 && ((!descend && bytes.HasPrefix((*it.items)[0].Path(), least.Path())) || compareNodes(least, (*it.items)[0]) == 0) { - skipped := heap.Pop(it.items).(NodeIterator) - // Skip the whole subtree if the nodes have hashes; otherwise just skip this node - if skipped.Next(skipped.Hash() == thor.Bytes32{}) { - it.count++ - // If there are more elements, push the iterator back on the heap - heap.Push(it.items, skipped) - } - } - - if least.Next(descend) { - it.count++ - heap.Push(it.items, least) - } - - return len(*it.items) > 0 -} - -func (it *unionIterator) Error() error { - for i := 0; i < len(*it.items); i++ { - if err := (*it.items)[i].Error(); err != nil { - return err - } - } - return nil -} diff --git a/trie/iterator_test.go b/trie/iterator_test.go index bddc99287..5da338417 100644 --- a/trie/iterator_test.go +++ b/trie/iterator_test.go @@ -18,22 +18,19 @@ package trie import ( "bytes" - "encoding/hex" "fmt" "math/rand/v2" "testing" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" "github.com/stretchr/testify/assert" - "github.com/vechain/thor/v2/thor" ) // makeTestTrie create a sample test trie to test node-wise reconstruction. -func makeTestTrie() (ethdb.Database, *Trie, map[string][]byte) { +func makeTestTrie() (*memdb, *Trie, map[string][]byte) { // Create an empty trie - db := ethdb.NewMemDatabase() - trie, _ := New(thor.Bytes32{}, db) + db := newMemDatabase() + trie := New(Root{}, db) // Fill it with some arbitrary data content := make(map[string][]byte) @@ -41,27 +38,28 @@ func makeTestTrie() (ethdb.Database, *Trie, map[string][]byte) { // Map the same data under multiple keys key, val := common.LeftPadBytes([]byte{1, i}, 32), []byte{i} content[string(key)] = val - trie.Update(key, val) + trie.Update(key, val, nil) key, val = common.LeftPadBytes([]byte{2, i}, 32), []byte{i} content[string(key)] = val - trie.Update(key, val) + trie.Update(key, val, nil) // Add some other data to inflate the trie for j := byte(3); j < 13; j++ { key, val = common.LeftPadBytes([]byte{j, i}, 32), []byte{j, i} content[string(key)] = val - trie.Update(key, val) + trie.Update(key, val, nil) } } - trie.Commit() + + trie.Commit(db, Version{Major: 1}, false) // Return the generated trie return db, trie, content } func TestIterator(t *testing.T) { - trie := newEmpty() + trie := new(Trie) vals := []struct{ k, v string }{ {"do", "verb"}, {"ether", "wookiedoo"}, @@ -74,12 +72,13 @@ func TestIterator(t *testing.T) { all := make(map[string]string) for _, val := range vals { all[val.k] = val.v - trie.Update([]byte(val.k), []byte(val.v)) + trie.Update([]byte(val.k), []byte(val.v), nil) } - trie.Commit() + db := newMemDatabase() + trie.Commit(db, Version{}, false) found := make(map[string]string) - it := NewIterator(trie.NodeIterator(nil)) + it := NewIterator(trie.NodeIterator(nil, Version{})) for it.Next() { found[string(it.Key)] = string(it.Value) } @@ -97,19 +96,19 @@ type kv struct { } func TestIteratorLargeData(t *testing.T) { - trie := newEmpty() + trie := new(Trie) vals := make(map[string]*kv) for i := byte(0); i < 255; i++ { value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} value2 := &kv{common.LeftPadBytes([]byte{10, i}, 32), []byte{i}, false} - trie.Update(value.k, value.v) - trie.Update(value2.k, value2.v) + trie.Update(value.k, value.v, nil) + trie.Update(value2.k, value2.v, nil) vals[string(value.k)] = value vals[string(value2.k)] = value2 } - it := NewIterator(trie.NodeIterator(nil)) + it := NewIterator(trie.NodeIterator(nil, Version{})) for it.Next() { vals[string(it.Key)].t = true } @@ -134,21 +133,22 @@ func TestNodeIteratorCoverage(t *testing.T) { // Create some arbitrary test trie to iterate db, trie, _ := makeTestTrie() - // Gather all the node hashes found by the iterator - hashes := make(map[thor.Bytes32]struct{}) - for it := trie.NodeIterator(nil); it.Next(true); { - if it.Hash() != (thor.Bytes32{}) { - hashes[it.Hash()] = struct{}{} + // Gather all the node storage key found by the iterator + keys := make(map[string]struct{}) + for it := trie.NodeIterator(nil, Version{}); it.Next(true); { + blob, ver, _ := it.Blob() + if len(blob) > 0 { + keys[string(makeKey(it.Path(), ver))] = struct{}{} } } // Cross check the hashes and the database itself - for hash := range hashes { - if _, err := db.Get(hash.Bytes()); err != nil { - t.Errorf("failed to retrieve reported node %x: %v", hash, err) + for key := range keys { + if _, err := db.db.Get([]byte(key)); err != nil { + t.Errorf("failed to retrieve reported node %x: %v", key, err) } } - for _, key := range db.(*ethdb.MemDatabase).Keys() { - if _, ok := hashes[thor.BytesToBytes32(key)]; !ok { + for _, key := range db.db.Keys() { + if _, ok := keys[string(key)]; !ok { t.Errorf("state entry not reported %x", key) } } @@ -180,25 +180,25 @@ var testdata2 = []kvs{ } func TestIteratorSeek(t *testing.T) { - trie := newEmpty() + trie := new(Trie) for _, val := range testdata1 { - trie.Update([]byte(val.k), []byte(val.v)) + trie.Update([]byte(val.k), []byte(val.v), nil) } // Seek to the middle. - it := NewIterator(trie.NodeIterator([]byte("fab"))) + it := NewIterator(trie.NodeIterator([]byte("fab"), Version{})) if err := checkIteratorOrder(testdata1[4:], it); err != nil { t.Fatal(err) } // Seek to a non-existent key. - it = NewIterator(trie.NodeIterator([]byte("barc"))) + it = NewIterator(trie.NodeIterator([]byte("barc"), Version{})) if err := checkIteratorOrder(testdata1[1:], it); err != nil { t.Fatal(err) } // Seek beyond the end. - it = NewIterator(trie.NodeIterator([]byte("z"))) + it = NewIterator(trie.NodeIterator([]byte("z"), Version{})) if err := checkIteratorOrder(nil, it); err != nil { t.Fatal(err) } @@ -220,136 +220,55 @@ func checkIteratorOrder(want []kvs, it *Iterator) error { return nil } -func TestDifferenceIterator(t *testing.T) { - triea := newEmpty() - for _, val := range testdata1 { - triea.Update([]byte(val.k), []byte(val.v)) - } - triea.Commit() - - trieb := newEmpty() - for _, val := range testdata2 { - trieb.Update([]byte(val.k), []byte(val.v)) - } - trieb.Commit() - - found := make(map[string]string) - di, _ := NewDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil)) - it := NewIterator(di) - for it.Next() { - found[string(it.Key)] = string(it.Value) - } - - all := []struct{ k, v string }{ - {"aardvark", "c"}, - {"barb", "bd"}, - {"bars", "be"}, - {"jars", "d"}, - } - for _, item := range all { - if found[item.k] != item.v { - t.Errorf("iterator value mismatch for %s: got %v want %v", item.k, found[item.k], item.v) - } - } - if len(found) != len(all) { - t.Errorf("iterator count mismatch: got %d values, want %d", len(found), len(all)) - } -} - -func TestUnionIterator(t *testing.T) { - triea := newEmpty() - for _, val := range testdata1 { - triea.Update([]byte(val.k), []byte(val.v)) - } - triea.Commit() - - trieb := newEmpty() - for _, val := range testdata2 { - trieb.Update([]byte(val.k), []byte(val.v)) - } - trieb.Commit() - - di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)}) - it := NewIterator(di) - - all := []struct{ k, v string }{ - {"aardvark", "c"}, - {"barb", "ba"}, - {"barb", "bd"}, - {"bard", "bc"}, - {"bars", "bb"}, - {"bars", "be"}, - {"bar", "b"}, - {"fab", "z"}, - {"food", "ab"}, - {"foos", "aa"}, - {"foo", "a"}, - {"jars", "d"}, - } - - for i, kv := range all { - if !it.Next() { - t.Errorf("Iterator ends prematurely at element %d", i) - } - if kv.k != string(it.Key) { - t.Errorf("iterator value mismatch for element %d: got key %s want %s", i, it.Key, kv.k) - } - if kv.v != string(it.Value) { - t.Errorf("iterator value mismatch for element %d: got value %s want %s", i, it.Value, kv.v) - } - } - if it.Next() { - t.Errorf("Iterator returned extra values.") - } -} - func TestIteratorNoDups(t *testing.T) { var tr Trie for _, val := range testdata1 { - tr.Update([]byte(val.k), []byte(val.v)) + tr.Update([]byte(val.k), []byte(val.v), nil) } - checkIteratorNoDups(t, tr.NodeIterator(nil), nil) + checkIteratorNoDups(t, tr.NodeIterator(nil, Version{}), nil) } // This test checks that nodeIterator.Next can be retried after inserting missing trie nodes. func TestIteratorContinueAfterError(t *testing.T) { - db := ethdb.NewMemDatabase() - tr, _ := New(thor.Bytes32{}, db) + db := newMemDatabase() + ver := Version{} + tr := New(Root{}, db) for _, val := range testdata1 { - tr.Update([]byte(val.k), []byte(val.v)) + tr.Update([]byte(val.k), []byte(val.v), nil) } - tr.Commit() - wantNodeCount := checkIteratorNoDups(t, tr.NodeIterator(nil), nil) - keys := db.Keys() + ver.Major++ + tr.Commit(db, ver, false) + wantNodeCount := checkIteratorNoDups(t, tr.NodeIterator(nil, Version{}), nil) + keys := db.db.Keys() t.Log("node count", wantNodeCount) for i := 0; i < 20; i++ { // Create trie that will load all nodes from DB. - tr, _ := New(tr.Hash(), db) + tr := New(Root{tr.Hash(), ver}, db) // Remove a random node from the database. It can't be the root node // because that one is already loaded. var rkey []byte for { //#nosec G404 - if rkey = keys[rand.N(len(keys))]; !bytes.Equal(rkey, tr.Hash().Bytes()) { + if rkey = keys[rand.N(len(keys))]; !bytes.Equal(rkey, makeKey(nil, ver)) { break } } - rval, _ := db.Get(rkey) - db.Delete(rkey) + rval, _ := db.db.Get(rkey) + db.db.Delete(rkey) // Iterate until the error is hit. seen := make(map[string]bool) - it := tr.NodeIterator(nil) + it := tr.NodeIterator(nil, Version{}) checkIteratorNoDups(t, it, seen) missing, ok := it.Error().(*MissingNodeError) - if !ok || !bytes.Equal(missing.NodeHash.Hash[:], rkey) { + if !ok || !bytes.Equal(makeKey(missing.Path, ver), rkey) { t.Fatal("didn't hit missing node, got", it.Error()) } // Add the node back and continue iteration. - db.Put(rkey, rval) + db.db.Put(rkey, rval) checkIteratorNoDups(t, it, seen) if it.Error() != nil { t.Fatal("unexpected error", it.Error()) @@ -360,41 +279,6 @@ func TestIteratorContinueAfterError(t *testing.T) { } } -// Similar to the test above, this one checks that failure to create nodeIterator at a -// certain key prefix behaves correctly when Next is called. The expectation is that Next -// should retry seeking before returning true for the first time. -func TestIteratorContinueAfterSeekError(t *testing.T) { - // Commit test trie to db, then remove the node containing "bars". - db := ethdb.NewMemDatabase() - ctr, _ := New(thor.Bytes32{}, db) - for _, val := range testdata1 { - ctr.Update([]byte(val.k), []byte(val.v)) - } - root, _ := ctr.Commit() - barNodeHash, _ := hex.DecodeString("d32fb77ad25227d60b76d53a512d28137304c9c03556db08a1709563c7ae9c9f") - barNode, _ := db.Get(barNodeHash[:]) - db.Delete(barNodeHash[:]) - - // Create a new iterator that seeks to "bars". Seeking can't proceed because - // the node is missing. - tr, _ := New(root, db) - it := tr.NodeIterator([]byte("bars")) - missing, ok := it.Error().(*MissingNodeError) - if !ok { - t.Fatal("want MissingNodeError, got", it.Error()) - } else if !bytes.Equal(missing.NodeHash.Hash[:], barNodeHash) { - t.Fatal("wrong node missing") - } - - // Reinsert the missing node. - db.Put(barNodeHash[:], barNode[:]) - - // Check that iteration produces the right set of values. - if err := checkIteratorOrder(testdata1[2:], NewIterator(it)); err != nil { - t.Fatal(err) - } -} - func checkIteratorNoDups(t *testing.T, it NodeIterator, seen map[string]bool) int { if seen == nil { seen = make(map[string]bool) @@ -409,33 +293,36 @@ func checkIteratorNoDups(t *testing.T, it NodeIterator, seen map[string]bool) in } func TestIteratorNodeFilter(t *testing.T) { - db := ethdb.NewMemDatabase() - tr := NewExtended(thor.Bytes32{}, 0, db, false) + db := newMemDatabase() + ver := Version{} + tr := New(Root{}, db) for _, val := range testdata1 { tr.Update([]byte(val.k), []byte(val.v), nil) } - root1, _ := tr.Commit(1) - _ = root1 + ver.Major++ + tr.Commit(db, ver, false) for _, val := range testdata2 { tr.Update([]byte(val.k), []byte(val.v), nil) } - root2, _ := tr.Commit(2) + ver.Major++ + tr.Commit(db, ver, false) + root2 := tr.Hash() - tr = NewExtended(root2, 2, db, false) + tr = New(Root{root2, Version{Major: 2}}, db) - it := tr.NodeIterator(nil, func(seq uint64) bool { return seq >= 1 }) + it := tr.NodeIterator(nil, Version{Major: 1}) for it.Next(true) { - if h := it.Hash(); !h.IsZero() { - assert.True(t, it.SeqNum() >= 1) + if blob, ver, _ := it.Blob(); len(blob) > 0 { + assert.True(t, ver.Major >= 1) } } - it = tr.NodeIterator(nil, func(seq uint64) bool { return seq >= 2 }) + it = tr.NodeIterator(nil, Version{Major: 2}) for it.Next(true) { - if h := it.Hash(); !h.IsZero() { - assert.True(t, it.SeqNum() >= 2) + if blob, ver, _ := it.Blob(); len(blob) > 0 { + assert.True(t, ver.Major >= 2) } } } diff --git a/trie/node.go b/trie/node.go index 77108aac3..eb295e8a0 100644 --- a/trie/node.go +++ b/trie/node.go @@ -17,108 +17,91 @@ package trie import ( - "bytes" - "errors" + "encoding/binary" "fmt" "io" "strings" - "github.com/ethereum/go-ethereum/rlp" - "github.com/vechain/thor/v2/lowrlp" - "github.com/vechain/thor/v2/thor" + "github.com/qianbin/drlp" ) -var NonCryptoNodeHash = thor.BytesToBytes32(bytes.Repeat([]byte{0xff}, 32)) -var nonCryptoNodeHashPlaceholder = []byte{0} - var indices = []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "[17]"} +// node kinds (lower 3 bits of node tag) +const ( + kindEmpty byte = iota + kindFull + kindShort + kindRef + kindValue +) + +// note attributes (higher 5 bits of node tag) +const ( + attrHasHash = byte(1 << iota) // indicates a ref node has the hash field + attrHasMajor // indicates a ref node has the ver.Major field + attrHasMinor // indicates a ref node has the ver.Minor field + attrHasMeta // indicates a value node has the meta field + attrHasManyRef // indicates a full node contains many ref nodes +) + type node interface { + Version() Version fstring(string) string - cache() (*hashNode, bool, uint16) - seqNum() uint64 - encode(e *lowrlp.Encoder, nonCrypto bool) - encodeTrailing(*lowrlp.Encoder) + cache() (ref refNode, gen uint16, dirty bool) + encodeConsensus(buf []byte) []byte // encode the node for computing MPT root + encode(buf []byte, skipHash bool) []byte } type ( fullNode struct { - Children [17]node // Actual trie node data to encode/decode (needs custom encoder) + children [17]node flags nodeFlag } shortNode struct { - Key []byte - Val node + key []byte + child node flags nodeFlag } - hashNode struct { - Hash thor.Bytes32 - seq uint64 // the sequence number + refNode struct { + hash []byte + ver Version } valueNode struct { - Value []byte - meta []byte // metadata of the value + val []byte + meta []byte // metadata of the value } ) -// EncodeRLP encodes a full node into the consensus RLP format. -func (n *fullNode) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, n.Children) -} +func (n *fullNode) Version() Version { return n.flags.ref.ver } +func (n *shortNode) Version() Version { return n.flags.ref.ver } +func (n *refNode) Version() Version { return n.ver } +func (n *valueNode) Version() Version { return Version{} } -// EncodeRLP encodes a hash node into the consensus RLP format. -func (n *hashNode) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, n.Hash) -} - -// EncodeRLP encodes a value node into the consensus RLP format. -func (n *valueNode) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, n.Value) -} - -func (n *fullNode) copy() *fullNode { cpy := *n; return &cpy } -func (n *shortNode) copy() *shortNode { cpy := *n; return &cpy } +func (n *fullNode) copy() *fullNode { copy := *n; return © } +func (n *shortNode) copy() *shortNode { copy := *n; return © } // nodeFlag contains caching-related metadata about a node. type nodeFlag struct { - hash *hashNode // cached hash of the node (may be nil) - dirty bool // whether the node has changes that must be written to the database - gen uint16 // cache generation counter -} - -func (n *fullNode) cache() (*hashNode, bool, uint16) { return n.flags.hash, n.flags.dirty, n.flags.gen } -func (n *shortNode) cache() (*hashNode, bool, uint16) { - return n.flags.hash, n.flags.dirty, n.flags.gen -} -func (n *hashNode) cache() (*hashNode, bool, uint16) { return nil, true, 0 } -func (n *valueNode) cache() (*hashNode, bool, uint16) { return nil, true, 0 } - -func (n *fullNode) seqNum() uint64 { - if n.flags.hash != nil { - return n.flags.hash.seq - } - return 0 + ref refNode // cached ref of the node + gen uint16 // cache generation counter + dirty bool // whether the node has changes that must be written to the database } -func (n *shortNode) seqNum() uint64 { - if n.flags.hash != nil { - return n.flags.hash.seq - } - return 0 -} - -func (n *hashNode) seqNum() uint64 { return n.seq } -func (n *valueNode) seqNum() uint64 { return 0 } +func (n *fullNode) cache() (refNode, uint16, bool) { return n.flags.ref, n.flags.gen, n.flags.dirty } +func (n *shortNode) cache() (refNode, uint16, bool) { return n.flags.ref, n.flags.gen, n.flags.dirty } +func (n *refNode) cache() (refNode, uint16, bool) { return *n, 0, false } +func (n *valueNode) cache() (refNode, uint16, bool) { return refNode{}, 0, true } // Pretty printing. func (n *fullNode) String() string { return n.fstring("") } func (n *shortNode) String() string { return n.fstring("") } -func (n *hashNode) String() string { return n.fstring("") } +func (n *refNode) String() string { return n.fstring("") } func (n *valueNode) String() string { return n.fstring("") } func (n *fullNode) fstring(ind string) string { resp := fmt.Sprintf("[\n%s ", ind) - for i, node := range n.Children { + for i, node := range n.children { if node == nil { resp += fmt.Sprintf("%s: ", indices[i]) } else { @@ -128,194 +111,165 @@ func (n *fullNode) fstring(ind string) string { return resp + fmt.Sprintf("\n%s] ", ind) } func (n *shortNode) fstring(ind string) string { - return fmt.Sprintf("{%x: %v} ", n.Key, n.Val.fstring(ind+" ")) -} -func (n *hashNode) fstring(_ string) string { - return fmt.Sprintf("<%v> ", n.Hash) -} -func (n *valueNode) fstring(_ string) string { - return fmt.Sprintf("%x ", n.Value) -} - -// trailing is the splitted rlp list of extra data of the trie node. -type trailing []byte - -func (t *trailing) next() ([]byte, error) { - if t == nil { - return nil, nil - } - if len(*t) == 0 { - return nil, io.EOF - } - - content, rest, err := rlp.SplitString(*t) - if err != nil { - return nil, err - } - - *t = rest - return content, nil + return fmt.Sprintf("{%x: %v} ", n.key, n.child.fstring(ind+" ")) } - -// NextSeq decodes the current list element to seq number and move to the next one. -// It returns io.EOF if reaches end. -func (t *trailing) NextSeq() (seq uint64, err error) { - content, err := t.next() - if err != nil { - return 0, err - } - if len(content) > 8 { - return 0, errors.New("encoded seq too long") - } - - for _, b := range content { - seq <<= 8 - seq |= uint64(b) - } - return +func (n *refNode) fstring(ind string) string { + return fmt.Sprintf("<%x> #%v", n.hash, n.ver) } - -// NextMeta returns the current list element as leaf metadata and move to the next one. -// It returns io.EOF if reaches end. -func (t *trailing) NextMeta() ([]byte, error) { - return t.next() +func (n *valueNode) fstring(ind string) string { + return fmt.Sprintf("%x - %x", n.val, n.meta) } -func mustDecodeNode(hash *hashNode, buf []byte, cacheGen uint16) node { - _, _, rest, err := rlp.Split(buf) - if err != nil { - panic(fmt.Sprintf("node %v: %v", hash.Hash, err)) - } - trailing := (*trailing)(&rest) - if len(rest) == 0 { - trailing = nil - } - buf = buf[:len(buf)-len(rest)] - n, err := decodeNode(hash, buf, trailing, cacheGen) +func mustDecodeNode(ref *refNode, buf []byte, cacheGen uint16) node { + n, _, err := decodeNode(ref, buf, cacheGen) if err != nil { - panic(fmt.Sprintf("node %v: %v", hash.Hash, err)) - } - if trailing != nil && len(*trailing) != 0 { - panic(fmt.Sprintf("node %v: trailing buffer not fully consumed", hash.Hash)) + panic(fmt.Sprintf("node %v: %v", ref, err)) } return n } -// decodeNode parses the RLP encoding of a trie node. -func decodeNode(hash *hashNode, buf []byte, trailing *trailing, cacheGen uint16) (node, error) { +// decodeNode parses a trie node in storage. +func decodeNode(ref *refNode, buf []byte, cacheGen uint16) (node, []byte, error) { if len(buf) == 0 { - return nil, io.ErrUnexpectedEOF - } - elems, _, err := rlp.SplitList(buf) - if err != nil { - return nil, fmt.Errorf("decode error: %v", err) - } - switch c, _ := rlp.CountValues(elems); c { - case 2: - n, err := decodeShort(hash, buf, elems, trailing, cacheGen) - return n, wrapError(err, "short") - case 17: - n, err := decodeFull(hash, buf, elems, trailing, cacheGen) - return n, wrapError(err, "full") - default: - return nil, fmt.Errorf("invalid number of list elements: %v", c) - } -} - -func decodeShort(hash *hashNode, buf, elems []byte, trailing *trailing, cacheGen uint16) (*shortNode, error) { - kbuf, rest, err := rlp.SplitString(elems) - if err != nil { - return nil, err - } - flag := nodeFlag{hash: hash, gen: cacheGen} - key := compactToHex(kbuf) - if hasTerm(key) { - // value node - val, _, err := rlp.SplitString(rest) + return nil, nil, io.ErrUnexpectedEOF + } + tag := buf[0] + buf = buf[1:] + kind, attrs := tag&0x7, tag>>3 + switch kind { + case kindEmpty: + return nil, buf, nil + case kindFull: + n, rest, err := decodeFull(ref, buf, cacheGen, attrs) if err != nil { - return nil, fmt.Errorf("invalid value node: %v", err) + return nil, nil, wrapError(err, "full") } - meta, err := trailing.NextMeta() + return n, rest, nil + case kindShort: + n, rest, err := decodeShort(ref, buf, cacheGen, attrs) if err != nil { - return nil, fmt.Errorf("invalid value meta: %v", err) + return nil, nil, wrapError(err, "short") } - - vn := &valueNode{Value: append([]byte(nil), val...)} - if len(meta) > 0 { - vn.meta = append([]byte(nil), meta...) + return n, rest, nil + case kindRef: + n, rest, err := decodeRef(&refNode{}, buf, attrs) + if err != nil { + return nil, nil, wrapError(err, "ref") + } + return n, rest, nil + case kindValue: + n, rest, err := decodeValue(buf, attrs) + if err != nil { + return nil, nil, wrapError(err, "value") } - return &shortNode{key, vn, flag}, nil + return n, rest, nil + default: + return nil, nil, fmt.Errorf("invalid node kind %v", kind) } +} - r, _, err := decodeRef(rest, trailing, cacheGen) - if err != nil { - return nil, wrapError(err, "val") +func decodeFull(ref *refNode, buf []byte, cacheGen uint16, attrs byte) (*fullNode, []byte, error) { + var ( + n = fullNode{flags: nodeFlag{gen: cacheGen}} + err error + refs []refNode // prealloced ref nodes + ) + if ref != nil { + n.flags.ref = *ref + } else { + n.flags.dirty = true + } + + // prealloc an array of refNode, to reduce alloc count + if (attrs & attrHasManyRef) != 0 { + refs = make([]refNode, 16) + } + + for i := range n.children { + if tag := buf[0]; tag&0x7 == kindRef { + var ref *refNode + if len(refs) > 0 { + ref = &refs[0] + refs = refs[1:] + } else { + ref = &refNode{} + } + if n.children[i], buf, err = decodeRef(ref, buf[1:], tag>>3); err != nil { + return nil, nil, wrapError(err, fmt.Sprintf("[%d]", i)) + } + } else { + if n.children[i], buf, err = decodeNode(nil, buf, cacheGen); err != nil { + return nil, nil, wrapError(err, fmt.Sprintf("[%d]", i)) + } + } } - return &shortNode{key, r, flag}, nil + return &n, buf, nil } -func decodeFull(hash *hashNode, _, elems []byte, trailing *trailing, cacheGen uint16) (*fullNode, error) { - n := &fullNode{flags: nodeFlag{hash: hash, gen: cacheGen}} - for i := 0; i < 16; i++ { - cld, rest, err := decodeRef(elems, trailing, cacheGen) - if err != nil { - return n, wrapError(err, fmt.Sprintf("[%d]", i)) - } - n.Children[i], elems = cld, rest +func decodeShort(ref *refNode, buf []byte, cacheGen uint16, attrs byte) (*shortNode, []byte, error) { + var ( + n = shortNode{flags: nodeFlag{gen: cacheGen}} + err error + compactKey []byte + ) + if ref != nil { + n.flags.ref = *ref + } else { + n.flags.dirty = true } - val, _, err := rlp.SplitString(elems) - if err != nil { - return n, err + + // decode key + if compactKey, buf, err = vp.SplitString(buf); err != nil { + return nil, nil, err } - if len(val) > 0 { - meta, err := trailing.NextMeta() - if err != nil { - return nil, fmt.Errorf("invalid value meta: %v", err) - } + n.key = compactToHex(compactKey) - vn := &valueNode{Value: append([]byte(nil), val...)} - if len(meta) > 0 { - vn.meta = append([]byte(nil), meta...) - } - n.Children[16] = vn + if hasTerm(n.key) { + // decode value + n.child, buf, err = decodeValue(buf, attrs) + } else { + // decode child node + n.child, buf, err = decodeNode(nil, buf, cacheGen) } - return n, nil + if err != nil { + return nil, nil, err + } + return &n, buf, nil } -const hashLen = len(thor.Bytes32{}) - -func decodeRef(buf []byte, trailing *trailing, cacheGen uint16) (node, []byte, error) { - kind, val, rest, err := rlp.Split(buf) - if err != nil { - return nil, buf, err +func decodeValue(buf []byte, attrs byte) (*valueNode, []byte, error) { + var ( + n valueNode + err error + ) + // decode val + if n.val, buf, err = vp.SplitString(buf); err != nil { + return nil, nil, err } - if kind == rlp.List { - // 'embedded' node reference. The encoding must be smaller - // than a hash in order to be valid. - if size := len(buf) - len(rest); size > hashLen { - err := fmt.Errorf("oversized embedded node (size is %d bytes, want size < %d)", size, hashLen) - return nil, buf, err + + // decode meta + if (attrs & attrHasMeta) != 0 { + if n.meta, buf, err = vp.SplitString(buf); err != nil { + return nil, nil, err } - n, err := decodeNode(nil, buf, trailing, cacheGen) - return n, rest, err - } - // string kind - valLen := len(val) - if valLen == 0 { - // empty node - return nil, rest, nil } - seq, err := trailing.NextSeq() - if err != nil { - return nil, nil, fmt.Errorf("invalid seq number: %v", err) + return &n, buf, nil +} + +func decodeRef(n *refNode, buf []byte, attrs byte) (*refNode, []byte, error) { + // decode hash + if (attrs & attrHasHash) != 0 { + n.hash, buf = buf[:32], buf[32:] } - if valLen == 32 { - return &hashNode{Hash: thor.BytesToBytes32(val), seq: seq}, rest, nil + + // decode version + if (attrs & attrHasMajor) != 0 { + n.ver.Major, buf = binary.BigEndian.Uint32(buf), buf[4:] } - if valLen == 1 && val[0] == nonCryptoNodeHashPlaceholder[0] { - return &hashNode{Hash: NonCryptoNodeHash, seq: seq}, rest, nil + if (attrs & attrHasMinor) != 0 { + n.ver.Minor, buf = binary.BigEndian.Uint32(buf), buf[4:] } - return nil, nil, fmt.Errorf("invalid RLP string size %d (want 0, 1 or 32)", len(val)) + return n, buf, nil } // wraps a decoding error with information about the path to the @@ -340,15 +294,160 @@ func (err *decodeError) Error() string { return fmt.Sprintf("%v (decode path: %s)", err.what, strings.Join(err.stack, "<-")) } -// VerifyNodeHash verifies the hash of the node blob (trailing excluded). -func VerifyNodeHash(blob, expectedHash []byte) (bool, error) { - // strip the trailing - _, _, trailing, err := rlp.Split(blob) - if err != nil { - return false, err +func (n *fullNode) encode(buf []byte, skipHash bool) []byte { + var ( + tagPos = len(buf) + nRefNode = 0 + ) + // encode tag + buf = append(buf, kindFull) + + // encode children + for _, cn := range n.children { + switch cn := cn.(type) { + case *refNode: + buf = cn.encode(buf, skipHash) + nRefNode++ + case nil: + buf = append(buf, kindEmpty) + default: + if ref, _, dirty := cn.cache(); dirty { + buf = cn.encode(buf, skipHash) + } else { + buf = ref.encode(buf, skipHash) + } + } + } + if nRefNode > 4 { + buf[tagPos] |= (attrHasManyRef << 3) } + return buf +} + +func (n *shortNode) encode(buf []byte, skipHash bool) []byte { + var ( + attrs byte + tagPos = len(buf) + ) + // encode tag + buf = append(buf, kindShort) + + // encode key + buf = vp.AppendUint32(buf, uint32(compactLen(n.key))) + buf = appendHexToCompact(buf, n.key) + + if hasTerm(n.key) { + vn := n.child.(*valueNode) + // encode value + buf = vp.AppendString(buf, vn.val) + // encode meta + if len(vn.meta) > 0 { + attrs |= attrHasMeta + buf = vp.AppendString(buf, vn.meta) + } + buf[tagPos] |= (attrs << 3) + } else { + // encode child node + if ref, _, dirty := n.child.cache(); dirty { + buf = n.child.encode(buf, skipHash) + } else { + buf = ref.encode(buf, skipHash) + } + } + return buf +} + +func (n *valueNode) encode(buf []byte, skipHash bool) []byte { + var ( + attrs byte + tagPos = len(buf) + ) + // encode tag + buf = append(buf, kindValue) + + // encode value + buf = vp.AppendString(buf, n.val) + + // encode meta + if len(n.meta) > 0 { + attrs |= attrHasMeta + buf = vp.AppendString(buf, n.meta) + } + buf[tagPos] |= (attrs << 3) + return buf +} + +func (n *refNode) encode(buf []byte, skipHash bool) []byte { + var ( + attrs byte + tagPos = len(buf) + ) + // encode tag + buf = append(buf, kindRef) + // encode hash + if !skipHash { + attrs |= attrHasHash + buf = append(buf, n.hash...) + } + // encode version + if n.ver.Major != 0 { + attrs |= attrHasMajor + buf = binary.BigEndian.AppendUint32(buf, n.ver.Major) + } + if n.ver.Minor != 0 { + attrs |= attrHasMinor + buf = binary.BigEndian.AppendUint32(buf, n.ver.Minor) + } + buf[tagPos] |= (attrs << 3) + return buf +} + +//// encodeConsensus + +func (n *fullNode) encodeConsensus(buf []byte) []byte { + offset := len(buf) + + for _, cn := range n.children { + switch cn := cn.(type) { + case *refNode: + buf = cn.encodeConsensus(buf) + case nil: + buf = drlp.AppendString(buf, nil) + default: + if ref, _, _ := cn.cache(); ref.hash != nil { + buf = drlp.AppendString(buf, ref.hash) + } else { + buf = cn.encodeConsensus(buf) + } + } + } + return drlp.EndList(buf, offset) +} + +func (n *shortNode) encodeConsensus(buf []byte) []byte { + offset := len(buf) + + const maxHeaderSize = 5 + // reserve space for rlp string header + buf = append(buf, make([]byte, maxHeaderSize)...) + // compact the key just after reserved space + buf = appendHexToCompact(buf, n.key) + // encode the compact key in the right place + buf = drlp.AppendString(buf[:offset], buf[offset+maxHeaderSize:]) + + if ref, _, _ := n.child.cache(); ref.hash != nil { + buf = drlp.AppendString(buf, ref.hash) + } else { + buf = n.child.encodeConsensus(buf) + } + + return drlp.EndList(buf, offset) +} + +func (n *valueNode) encodeConsensus(buf []byte) []byte { + return drlp.AppendString(buf, n.val) +} - node := blob[:len(blob)-len(trailing)] - have := thor.Blake2b(node) - return bytes.Equal(expectedHash, have.Bytes()), nil +func (n *refNode) encodeConsensus(buf []byte) []byte { + return drlp.AppendString(buf, n.hash) } diff --git a/trie/node_test.go b/trie/node_test.go index 9f42b969b..a9853c7d2 100644 --- a/trie/node_test.go +++ b/trie/node_test.go @@ -17,75 +17,226 @@ package trie import ( + "io" "testing" "github.com/ethereum/go-ethereum/rlp" - "github.com/vechain/thor/v2/thor" + "github.com/stretchr/testify/assert" + "github.com/vechain/thor/v2/test/datagen" ) -// func TestCanUnload(t *testing.T) { -// tests := []struct { -// flag nodeFlag -// cachegen, cachelimit uint16 -// want bool -// }{ -// { -// flag: nodeFlag{dirty: true, gen: 0}, -// want: false, -// }, -// { -// flag: nodeFlag{dirty: false, gen: 0}, -// cachegen: 0, cachelimit: 0, -// want: true, -// }, -// { -// flag: nodeFlag{dirty: false, gen: 65534}, -// cachegen: 65535, cachelimit: 1, -// want: true, -// }, -// { -// flag: nodeFlag{dirty: false, gen: 65534}, -// cachegen: 0, cachelimit: 1, -// want: true, -// }, -// { -// flag: nodeFlag{dirty: false, gen: 1}, -// cachegen: 65535, cachelimit: 1, -// want: true, -// }, -// } - -// for _, test := range tests { -// if got := test.flag.canUnload(test.cachegen, test.cachelimit); got != test.want { -// t.Errorf("%+v\n got %t, want %t", test, got, test.want) -// } -// } -// } +func benchmarkEncodeFullNode(b *testing.B, consensus, skipHash bool) { + var ( + f = fullNode{} + buf []byte + ) + for i := 0; i < 16; i++ { + f.children[i] = &refNode{hash: datagen.RandomHash().Bytes()} + } + for i := 0; i < b.N; i++ { + if consensus { + buf = f.encodeConsensus(buf[:0]) + } else { + buf = f.encode(buf[:0], skipHash) + } + } +} +func benchmarkEncodeShortNode(b *testing.B, consensus bool) { + var ( + s = shortNode{ + key: []byte{0x1, 0x2, 0x10}, + child: &valueNode{val: datagen.RandBytes(32)}, + } + buf []byte + ) + + for i := 0; i < b.N; i++ { + if consensus { + buf = s.encodeConsensus(buf[:0]) + } else { + buf = s.encode(buf[:0], false) + } + } +} func BenchmarkEncodeFullNode(b *testing.B) { - var buf sliceBuffer - f := &fullNode{} - for i := 0; i < len(f.Children); i++ { - f.Children[i] = &hashNode{Hash: thor.BytesToBytes32(randBytes(32))} + benchmarkEncodeFullNode(b, false, false) +} + +func BenchmarkEncodeFullNodeSkipHash(b *testing.B) { + benchmarkEncodeFullNode(b, false, true) +} + +func BenchmarkEncodeFullNodeConsensus(b *testing.B) { + benchmarkEncodeFullNode(b, true, false) +} + +func BenchmarkEncodeShortNode(b *testing.B) { + benchmarkEncodeShortNode(b, false) +} + +func BenchmarkEncodeShortNodeConsensus(b *testing.B) { + benchmarkEncodeShortNode(b, true) +} + +func benchmarkDecodeFullNode(b *testing.B, skipHash bool) { + f := fullNode{} + for i := 0; i < 16; i++ { + f.children[i] = &refNode{hash: datagen.RandomHash().Bytes()} } + enc := f.encode(nil, skipHash) for i := 0; i < b.N; i++ { - buf.Reset() - rlp.Encode(&buf, f) + mustDecodeNode(nil, enc, 0) } } -func BenchmarkFastEncodeFullNode(b *testing.B) { - f := &fullNode{} - for i := 0; i < len(f.Children); i++ { - f.Children[i] = &hashNode{Hash: thor.BytesToBytes32(randBytes(32))} - } +func BenchmarkDecodeFullNode(b *testing.B) { + benchmarkDecodeFullNode(b, false) +} + +func BenchmarkDecodeFullNodeSkipHash(b *testing.B) { + benchmarkDecodeFullNode(b, true) +} - h := newHasher(0, 0) +func BenchmarkDecodeShortNode(b *testing.B) { + s := shortNode{ + key: []byte{0x1, 0x2, 0x10}, + child: &valueNode{val: datagen.RandBytes(32)}, + } + enc := s.encode(nil, false) for i := 0; i < b.N; i++ { - h.enc.Reset() - f.encode(&h.enc, false) - h.tmp.Reset() - h.enc.ToWriter(&h.tmp) + mustDecodeNode(nil, enc, 0) + } +} + +type fNode struct { + Children [17]interface{} +} + +func (f *fNode) EncodeRLP(w io.Writer) error { + return rlp.Encode(w, f.Children) +} + +type sNode struct { + Key []byte + Val interface{} +} +type vNode []byte +type hNode []byte + +func TestRefNodeEncodeConsensus(t *testing.T) { + for i := 0; i < 10; i++ { + randHash := datagen.RandomHash() + + h := hNode(randHash.Bytes()) + ref := &refNode{hash: randHash.Bytes()} + + expected, err := rlp.EncodeToBytes(h) + assert.Nil(t, err) + actual := ref.encodeConsensus(nil) + + assert.Equal(t, expected, actual) + } +} + +func TestValueNodeEncodeConsensus(t *testing.T) { + for i := 0; i < 10; i++ { + randValue := datagen.RandBytes(datagen.RandIntN(30)) + + v := vNode(randValue) + value := &valueNode{val: randValue} + + expected, err := rlp.EncodeToBytes(v) + assert.Nil(t, err) + actual := value.encodeConsensus(nil) + + assert.Equal(t, expected, actual) + } +} + +func TestShortNodeEncodeConsensus(t *testing.T) { + for i := 0; i < 10; i++ { + randKey := datagen.RandBytes(datagen.RandIntN(32)) + randValue := datagen.RandBytes(datagen.RandIntN(30)) + + randKey = append(randKey, 16) + s := &sNode{Key: hexToCompact(randKey), Val: vNode(randValue)} + short := &shortNode{key: randKey, child: &valueNode{val: randValue}} + + expected, err := rlp.EncodeToBytes(s) + assert.Nil(t, err) + actual := short.encodeConsensus(nil) + + assert.Equal(t, expected, actual) + } + + for i := 0; i < 10; i++ { + randKey := datagen.RandBytes(datagen.RandIntN(32)) + randHash := datagen.RandomHash() + + s := &sNode{Key: hexToCompact(randKey), Val: hNode(randHash.Bytes())} + short := &shortNode{key: randKey, child: &refNode{hash: randHash.Bytes()}} + + expected, err := rlp.EncodeToBytes(s) + assert.Nil(t, err) + actual := short.encodeConsensus(nil) + + assert.Equal(t, expected, actual) + } +} + +func TestFullNodeEncodeConsensus(t *testing.T) { + for i := 0; i < 10; i++ { + randValue := datagen.RandBytes(datagen.RandIntN(30)) + + var ( + f fNode + full fullNode + ) + + for i := 0; i < 16; i++ { + if datagen.RandIntN(2) == 1 { + randHash := datagen.RandomHash() + + f.Children[i] = hNode(randHash.Bytes()) + full.children[i] = &refNode{hash: randHash.Bytes()} + } else { + f.Children[i] = vNode(nil) + } + } + f.Children[16] = vNode(randValue) + full.children[16] = &valueNode{val: randValue} + + expected, err := rlp.EncodeToBytes(&f) + assert.Nil(t, err) + actual := full.encodeConsensus(nil) + + assert.Equal(t, expected, actual) + } + + for i := 0; i < 10; i++ { + var ( + f fNode + full fullNode + ) + + for i := 0; i < 16; i++ { + if datagen.RandIntN(2) == 1 { + randHash := datagen.RandomHash() + + f.Children[i] = hNode(randHash.Bytes()) + full.children[i] = &refNode{hash: randHash.Bytes()} + } else { + f.Children[i] = vNode(nil) + } + } + f.Children[16] = vNode(nil) + + expected, err := rlp.EncodeToBytes(&f) + assert.Nil(t, err) + actual := full.encodeConsensus(nil) + + assert.Equal(t, expected, actual) } } diff --git a/trie/proof.go b/trie/proof.go deleted file mode 100644 index 735bddeb7..000000000 --- a/trie/proof.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package trie - -import ( - "bytes" - "fmt" - - "github.com/vechain/thor/v2/thor" -) - -// Prove constructs a merkle proof for key. The result contains all -// encoded nodes on the path to the value at key. The value itself is -// also included in the last node and can be retrieved by verifying -// the proof. -// -// If the trie does not contain a value for key, the returned proof -// contains all nodes of the longest existing prefix of the key -// (at least the root node), ending with the node that proves the -// absence of the key. -func (t *Trie) Prove(key []byte, fromLevel uint, proofDb DatabaseWriter) error { - // Collect all nodes on the path to key. - key = keybytesToHex(key) - nodes := []node{} - tn := t.root - for len(key) > 0 && tn != nil { - switch n := tn.(type) { - case *shortNode: - if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) { - // The trie doesn't contain the key. - tn = nil - } else { - tn = n.Val - key = key[len(n.Key):] - } - nodes = append(nodes, n) - case *fullNode: - tn = n.Children[key[0]] - key = key[1:] - nodes = append(nodes, n) - case *hashNode: - var err error - tn, err = t.resolveHash(n, nil) - if err != nil { - logger.Error(fmt.Sprintf("Unhandled trie error: %v", err)) - return err - } - default: - panic(fmt.Sprintf("%T: invalid node: %v", tn, tn)) - } - } - hasher := newHasher(0, 0) - for i, n := range nodes { - // Don't bother checking for errors here since hasher panics - // if encoding doesn't work and we're not writing to any database. - n, _, _ = hasher.hashChildren(n, nil, nil) - hn, _ := hasher.store(n, nil, nil, false) - if hash, ok := hn.(*hashNode); ok || i == 0 { - // If the node's database encoding is a hash (or is the - // root node), it becomes a proof element. - if fromLevel > 0 { - fromLevel-- - } else { - hasher.enc.Reset() - n.encode(&hasher.enc, hasher.nonCrypto) - hasher.tmp.Reset() - hasher.enc.ToWriter(&hasher.tmp) - if ok { - proofDb.Put(hash.Hash[:], hasher.tmp) - } else { - proofDb.Put(thor.Blake2b(hasher.tmp).Bytes(), hasher.tmp) - } - } - } - } - return nil -} - -// VerifyProof checks merkle proofs. The given proof must contain the -// value for key in a trie with the given root hash. VerifyProof -// returns an error if the proof contains invalid trie nodes or the -// wrong value. -func VerifyProof(rootHash thor.Bytes32, key []byte, proofDb DatabaseReader) (value []byte, err error, nodes int) { - key = keybytesToHex(key) - wantHash := rootHash - for i := 0; ; i++ { - buf, _ := proofDb.Get(wantHash[:]) - if buf == nil { - return nil, fmt.Errorf("proof node %d (hash %064x) missing", i, wantHash[:]), i - } - n, err := decodeNode(&hashNode{Hash: wantHash}, buf, nil, 0) - if err != nil { - return nil, fmt.Errorf("bad proof node %d: %v", i, err), i - } - keyrest, cld := get(n, key) - switch cld := cld.(type) { - case nil: - // The trie doesn't contain the key. - return nil, nil, i - case *hashNode: - key = keyrest - wantHash = cld.Hash - case *valueNode: - return cld.Value, nil, i + 1 - } - } -} - -func get(tn node, key []byte) ([]byte, node) { - for { - switch n := tn.(type) { - case *shortNode: - if len(key) < len(n.Key) || !bytes.Equal(n.Key, key[:len(n.Key)]) { - return nil, nil - } - tn = n.Val - key = key[len(n.Key):] - case *fullNode: - tn = n.Children[key[0]] - key = key[1:] - case *hashNode: - return key, n - case nil: - return key, nil - case *valueNode: - return nil, n - default: - panic(fmt.Sprintf("%T: invalid node: %v", tn, tn)) - } - } -} diff --git a/trie/proof_test.go b/trie/proof_test.go deleted file mode 100644 index 40b972bf8..000000000 --- a/trie/proof_test.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// #nosec G404 -package trie - -import ( - "bytes" - crand "crypto/rand" - mrand "math/rand/v2" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/vechain/thor/v2/thor" -) - -func TestProof(t *testing.T) { - trie, vals := randomTrie(500) - root := trie.Hash() - for _, kv := range vals { - proofs := ethdb.NewMemDatabase() - if trie.Prove(kv.k, 0, proofs) != nil { - t.Fatalf("missing key %x while constructing proof", kv.k) - } - val, err, _ := VerifyProof(root, kv.k, proofs) - if err != nil { - t.Fatalf("VerifyProof error for key %x: %v\nraw proof: %v", kv.k, err, proofs) - } - if !bytes.Equal(val, kv.v) { - t.Fatalf("VerifyProof returned wrong value for key %x: got %x, want %x", kv.k, val, kv.v) - } - } -} - -func TestOneElementProof(t *testing.T) { - trie := new(Trie) - updateString(trie, "k", "v") - proofs := ethdb.NewMemDatabase() - trie.Prove([]byte("k"), 0, proofs) - if len(proofs.Keys()) != 1 { - t.Error("proof should have one element") - } - val, err, _ := VerifyProof(trie.Hash(), []byte("k"), proofs) - if err != nil { - t.Fatalf("VerifyProof error: %v\nproof hashes: %v", err, proofs.Keys()) - } - if !bytes.Equal(val, []byte("v")) { - t.Fatalf("VerifyProof returned wrong value: got %x, want 'k'", val) - } -} - -func TestVerifyBadProof(t *testing.T) { - trie, vals := randomTrie(800) - root := trie.Hash() - for _, kv := range vals { - proofs := ethdb.NewMemDatabase() - trie.Prove(kv.k, 0, proofs) - if len(proofs.Keys()) == 0 { - t.Fatal("zero length proof") - } - keys := proofs.Keys() - key := keys[mrand.N(len(keys))] - node, _ := proofs.Get(key) - proofs.Delete(key) - mutateByte(node) - proofs.Put(thor.Blake2b(node).Bytes(), node) - if _, err, _ := VerifyProof(root, kv.k, proofs); err == nil { - t.Fatalf("expected proof to fail for key %x", kv.k) - } - } -} - -// mutateByte changes one byte in b. -func mutateByte(b []byte) { - for r := mrand.N(len(b)); ; { - new := byte(mrand.N(255)) - if new != b[r] { - b[r] = new - break - } - } -} - -func BenchmarkProve(b *testing.B) { - trie, vals := randomTrie(100) - var keys []string - for k := range vals { - keys = append(keys, k) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - kv := vals[keys[i%len(keys)]] - proofs := ethdb.NewMemDatabase() - if trie.Prove(kv.k, 0, proofs); len(proofs.Keys()) == 0 { - b.Fatalf("zero length proof for %x", kv.k) - } - } -} - -func BenchmarkVerifyProof(b *testing.B) { - trie, vals := randomTrie(100) - root := trie.Hash() - var keys []string - var proofs []*ethdb.MemDatabase - for k := range vals { - keys = append(keys, k) - proof := ethdb.NewMemDatabase() - trie.Prove([]byte(k), 0, proof) - proofs = append(proofs, proof) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - im := i % len(keys) - if _, err, _ := VerifyProof(root, []byte(keys[im]), proofs[im]); err != nil { - b.Fatalf("key %x: %v", keys[im], err) - } - } -} - -func randomTrie(n int) (*Trie, map[string]*kv) { - trie := new(Trie) - vals := make(map[string]*kv) - for i := byte(0); i < 100; i++ { - value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} - value2 := &kv{common.LeftPadBytes([]byte{i + 10}, 32), []byte{i}, false} - trie.Update(value.k, value.v) - trie.Update(value2.k, value2.v) - vals[string(value.k)] = value - vals[string(value2.k)] = value2 - } - for i := 0; i < n; i++ { - value := &kv{randBytes(32), randBytes(20), false} - trie.Update(value.k, value.v) - vals[string(value.k)] = value - } - return trie, vals -} - -func randBytes(n int) []byte { - r := make([]byte, n) - crand.Read(r) - return r -} diff --git a/trie/trie.go b/trie/trie.go index 62308aa5b..bb75f1ee0 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -22,51 +22,63 @@ import ( "fmt" "github.com/ethereum/go-ethereum/rlp" - "github.com/vechain/thor/v2/log" "github.com/vechain/thor/v2/thor" ) var ( // This is the known root hash of an empty trie. emptyRoot = thor.Blake2b(rlp.EmptyString) - // This is the known hash of an empty state trie entry. - emptyState = thor.Blake2b(nil) - - logger = log.WithContext("pkg", "trie") ) -// Database must be implemented by backing stores for the trie. -type Database interface { - DatabaseReader - DatabaseWriter +// Version is the version number of a standalone trie node. +type Version struct { + Major, + Minor uint32 +} + +// String pretty prints version. +func (v Version) String() string { + return fmt.Sprintf("%v.%v", v.Major, v.Minor) +} + +// Compare compares with b. +// The result will be 0 if a == b, -1 if a < b, and +1 if a > b. +func (a Version) Compare(b Version) int { + if a.Major > b.Major { + return 1 + } + if a.Major < b.Major { + return -1 + } + if a.Minor > b.Minor { + return 1 + } + if a.Minor < b.Minor { + return -1 + } + return 0 +} + +// Root wraps hash and version of the root node. +type Root struct { + Hash thor.Bytes32 + Ver Version } +// Node is the alias of inner node type. +type Node = node + // DatabaseReader wraps the Get method of a backing store for the trie. type DatabaseReader interface { - Get(key []byte) (value []byte, err error) + Get(path []byte, ver Version) (value []byte, err error) } // DatabaseWriter wraps the Put method of a backing store for the trie. type DatabaseWriter interface { - // Put stores the mapping key->value in the database. + // Put stores the mapping (path, ver)->value in the database. // Implementations must not hold onto the value bytes, the trie // will reuse the slice across calls to Put. - Put(key, value []byte) error -} - -// DatabaseReaderTo wraps the GetTo method of backing store for the trie. -// The purpose of this interface is to reuse read buffer and avoid allocs. -// If the database implements this interface, DatabaseReader.Get will not be called when resolving nodes. -type DatabaseReaderTo interface { - // GetTo gets value for the given key and append to dst. - GetTo(key, dst []byte) (value []byte, err error) -} - -// DatabaseKeyEncoder defines the method how to produce database key. -// If the database implements this interface, everytime before save the node, Encode is called and its -// return-value will be the saving key instead of node hash. -type DatabaseKeyEncoder interface { - Encode(hash []byte, seq uint64, path []byte) []byte + Put(path []byte, ver Version, value []byte) error } // Trie is a Merkle Patricia Trie. @@ -76,103 +88,117 @@ type DatabaseKeyEncoder interface { // Trie is not safe for concurrent use. type Trie struct { root node - db Database + db DatabaseReader cacheGen uint16 // cache generation counter for next committed nodes cacheTTL uint16 // the life time of cached nodes } +// SetCacheTTL sets the number of 'cache generations' to keep. +// A cache generation is increased by a call to Commit. +func (t *Trie) SetCacheTTL(ttl uint16) { + t.cacheTTL = ttl +} + // newFlag returns the cache flag value for a newly created node. func (t *Trie) newFlag() nodeFlag { return nodeFlag{dirty: true, gen: t.cacheGen} } +// RootNode returns the root node. +func (t *Trie) RootNode() Node { + return t.root +} + // New creates a trie with an existing root node from db. // -// If root is the zero hash or the blake2b hash of an empty string, the -// trie is initially empty and does not require a database. Otherwise, -// New will panic if db is nil and returns a MissingNodeError if root does -// not exist in the database. Accessing the trie loads nodes from db on demand. -func New(root thor.Bytes32, db Database) (*Trie, error) { - trie := &Trie{db: db} - if (root != thor.Bytes32{}) && root != emptyRoot { - if db == nil { - panic("trie.New: cannot use existing root without a database") - } - rootnode, err := trie.resolveHash(&hashNode{Hash: root}, nil) - if err != nil { - return nil, err - } - trie.root = rootnode +// If root hash is zero or the hash of an empty string, the trie is initially empty . +// Accessing the trie loads nodes from db on demand. +func New(root Root, db DatabaseReader) *Trie { + if root.Hash == emptyRoot || root.Hash.IsZero() { + return &Trie{db: db} } - return trie, nil -} -// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at -// the key after the given start key. -func (t *Trie) NodeIterator(start []byte) NodeIterator { - return newNodeIterator(t, start, func(seq uint64) bool { return true }, false, false) + return &Trie{ + root: &refNode{root.Hash.Bytes(), root.Ver}, + db: db, + } } -// Get returns the value for key stored in the trie. -// The value bytes must not be modified by the caller. -func (t *Trie) Get(key []byte) []byte { - res, err := t.TryGet(key) - if err != nil { - logger.Error(fmt.Sprintf("Unhandled trie error: %v", err)) +// FromRootNode creates a trie from a live root node. +func FromRootNode(rootNode Node, db DatabaseReader) *Trie { + if rootNode != nil { + _, gen, _ := rootNode.cache() + return &Trie{ + root: rootNode, + db: db, + cacheGen: gen + 1, // cacheGen is always one bigger than gen of root node + } } - return res + // allows nil root node + return &Trie{db: db} +} + +// NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at +// the key after the given start key. Nodes with version smaller than minVer are filtered out. +func (t *Trie) NodeIterator(start []byte, minVer Version) NodeIterator { + return newNodeIterator(t, start, minVer) } -// TryGet returns the value for key stored in the trie. -// The value bytes must not be modified by the caller. +// Get returns the value with meta for key stored in the trie. +// The value and meta bytes must not be modified by the caller. // If a node was not found in the database, a MissingNodeError is returned. -func (t *Trie) TryGet(key []byte) ([]byte, error) { - value, newroot, err := t.tryGet(t.root, keybytesToHex(key), 0) - if t.root != newroot { - t.root = newroot - } +func (t *Trie) Get(key []byte) ([]byte, []byte, error) { + value, newRoot, _, err := t.tryGet(t.root, keybytesToHex(key), 0) if err != nil { - return nil, err + return nil, nil, err } + t.root = newRoot if value != nil { - return value.Value, nil + return value.val, value.meta, nil } - return nil, nil + return nil, nil, nil } -func (t *Trie) tryGet(origNode node, key []byte, pos int) (value *valueNode, newnode node, err error) { +func (t *Trie) tryGet(origNode node, key []byte, pos int) (value *valueNode, newnode node, didResolve bool, err error) { switch n := (origNode).(type) { case nil: - return nil, nil, nil + return nil, nil, false, nil case *valueNode: - return n, n, nil + return n, n, false, nil case *shortNode: - if len(key)-pos < len(n.Key) || !bytes.Equal(n.Key, key[pos:pos+len(n.Key)]) { + if len(key)-pos < len(n.key) || !bytes.Equal(n.key, key[pos:pos+len(n.key)]) { // key not found in trie - return nil, n, nil + return nil, n, false, nil + } + if value, newnode, didResolve, err = t.tryGet(n.child, key, pos+len(n.key)); err != nil { + return } - value, newnode, err = t.tryGet(n.Val, key, pos+len(n.Key)) - if newnode != nil && newnode != n.Val { + if didResolve { n = n.copy() - n.Val = newnode + n.child = newnode + n.flags.gen = t.cacheGen } - return value, n, err + return value, n, didResolve, nil case *fullNode: - child := n.Children[key[pos]] - value, newnode, err = t.tryGet(child, key, pos+1) - if newnode != nil && newnode != child { + if value, newnode, didResolve, err = t.tryGet(n.children[key[pos]], key, pos+1); err != nil { + return + } + if didResolve { n = n.copy() - n.Children[key[pos]] = newnode + n.flags.gen = t.cacheGen + n.children[key[pos]] = newnode } - return value, n, err - case *hashNode: - child, err := t.resolveHash(n, key[:pos]) - if err != nil { - return nil, n, err + return value, n, didResolve, nil + case *refNode: + var child node + if child, err = t.resolveRef(n, key[:pos]); err != nil { + return + } + if value, newnode, _, err = t.tryGet(child, key, pos); err != nil { + return } - value, newnode, err := t.tryGet(child, key, pos) - return value, newnode, err + return value, newnode, true, nil default: panic(fmt.Sprintf("%T: invalid node: %v", origNode, origNode)) } @@ -184,24 +210,12 @@ func (t *Trie) tryGet(origNode node, key []byte, pos int) (value *valueNode, new // // The value bytes must not be modified by the caller while they are // stored in the trie. -func (t *Trie) Update(key, value []byte) { - if err := t.TryUpdate(key, value); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) - } -} - -// TryUpdate associates key with value in the trie. Subsequent calls to -// Get will return value. If value has length zero, any existing value -// is deleted from the trie and calls to Get will return nil. -// -// The value bytes must not be modified by the caller while they are -// stored in the trie. // // If a node was not found in the database, a MissingNodeError is returned. -func (t *Trie) TryUpdate(key, value []byte) error { +func (t *Trie) Update(key, value, meta []byte) error { k := keybytesToHex(key) if len(value) != 0 { - _, n, err := t.insert(t.root, nil, k, &valueNode{Value: value}) + _, n, err := t.insert(t.root, nil, k, &valueNode{value, meta}) if err != nil { return err } @@ -219,32 +233,32 @@ func (t *Trie) TryUpdate(key, value []byte) error { func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error) { if len(key) == 0 { if v, ok := n.(*valueNode); ok { - _v := value.(*valueNode) + newVal := value.(*valueNode) // dirty when value or meta is not equal - return !bytes.Equal(v.Value, _v.Value) || !bytes.Equal(v.meta, _v.meta), value, nil + return !bytes.Equal(v.val, newVal.val) || !bytes.Equal(v.meta, newVal.meta), value, nil } return true, value, nil } switch n := n.(type) { case *shortNode: - matchlen := prefixLen(key, n.Key) + matchlen := prefixLen(key, n.key) // If the whole key matches, keep this short node as is // and only update the value. - if matchlen == len(n.Key) { - dirty, nn, err := t.insert(n.Val, append(prefix, key[:matchlen]...), key[matchlen:], value) + if matchlen == len(n.key) { + dirty, nn, err := t.insert(n.child, append(prefix, key[:matchlen]...), key[matchlen:], value) if !dirty || err != nil { return false, n, err } - return true, &shortNode{n.Key, nn, t.newFlag()}, nil + return true, &shortNode{n.key, nn, t.newFlag()}, nil } // Otherwise branch out at the index where they differ. branch := &fullNode{flags: t.newFlag()} var err error - _, branch.Children[n.Key[matchlen]], err = t.insert(nil, append(prefix, n.Key[:matchlen+1]...), n.Key[matchlen+1:], n.Val) + _, branch.children[n.key[matchlen]], err = t.insert(nil, append(prefix, n.key[:matchlen+1]...), n.key[matchlen+1:], n.child) if err != nil { return false, nil, err } - _, branch.Children[key[matchlen]], err = t.insert(nil, append(prefix, key[:matchlen+1]...), key[matchlen+1:], value) + _, branch.children[key[matchlen]], err = t.insert(nil, append(prefix, key[:matchlen+1]...), key[matchlen+1:], value) if err != nil { return false, nil, err } @@ -256,23 +270,23 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error return true, &shortNode{key[:matchlen], branch, t.newFlag()}, nil case *fullNode: - dirty, nn, err := t.insert(n.Children[key[0]], append(prefix, key[0]), key[1:], value) + dirty, nn, err := t.insert(n.children[key[0]], append(prefix, key[0]), key[1:], value) if !dirty || err != nil { return false, n, err } n = n.copy() n.flags = t.newFlag() - n.Children[key[0]] = nn + n.children[key[0]] = nn return true, n, nil case nil: return true, &shortNode{key, value, t.newFlag()}, nil - case *hashNode: + case *refNode: // We've hit a part of the trie that isn't loaded yet. Load // the node and insert into it. This leaves all child nodes on // the path to the value in the trie. - rn, err := t.resolveHash(n, prefix) + rn, err := t.resolveRef(n, prefix) if err != nil { return false, nil, err } @@ -287,33 +301,14 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error } } -// Delete removes any existing value for key from the trie. -func (t *Trie) Delete(key []byte) { - if err := t.TryDelete(key); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) - } -} - -// TryDelete removes any existing value for key from the trie. -// If a node was not found in the database, a MissingNodeError is returned. -func (t *Trie) TryDelete(key []byte) error { - k := keybytesToHex(key) - _, n, err := t.delete(t.root, nil, k) - if err != nil { - return err - } - t.root = n - return nil -} - // delete returns the new root of the trie with key deleted. // It reduces the trie to minimal form by simplifying // nodes on the way up after deleting recursively. func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { switch n := n.(type) { case *shortNode: - matchlen := prefixLen(key, n.Key) - if matchlen < len(n.Key) { + matchlen := prefixLen(key, n.key) + if matchlen < len(n.key) { return false, n, nil // don't replace n on mismatch } if matchlen == len(key) { @@ -323,7 +318,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { // from the subtrie. Child can never be nil here since the // subtrie must contain at least two other values with keys // longer than n.Key. - dirty, child, err := t.delete(n.Val, append(prefix, key[:len(n.Key)]...), key[len(n.Key):]) + dirty, child, err := t.delete(n.child, append(prefix, key[:len(n.key)]...), key[len(n.key):]) if !dirty || err != nil { return false, n, err } @@ -335,19 +330,19 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { // always creates a new slice) instead of append to // avoid modifying n.Key since it might be shared with // other nodes. - return true, &shortNode{concat(n.Key, child.Key...), child.Val, t.newFlag()}, nil + return true, &shortNode{concat(n.key, child.key...), child.child, t.newFlag()}, nil default: - return true, &shortNode{n.Key, child, t.newFlag()}, nil + return true, &shortNode{n.key, child, t.newFlag()}, nil } case *fullNode: - dirty, nn, err := t.delete(n.Children[key[0]], append(prefix, key[0]), key[1:]) + dirty, nn, err := t.delete(n.children[key[0]], append(prefix, key[0]), key[1:]) if !dirty || err != nil { return false, n, err } n = n.copy() n.flags = t.newFlag() - n.Children[key[0]] = nn + n.children[key[0]] = nn // Check how many non-nil entries are left after deleting and // reduce the full node to a short node if only one entry is @@ -359,7 +354,7 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { // value that is left in n or -2 if n contains at least two // values. pos := -1 - for i, cld := range n.Children { + for i, cld := range n.children { if cld != nil { if pos == -1 { pos = i @@ -377,18 +372,18 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { // shortNode{..., shortNode{...}}. Since the entry // might not be loaded yet, resolve it just for this // check. - cnode, err := t.resolve(n.Children[pos], append(prefix, byte(pos))) + cnode, err := t.resolve(n.children[pos], append(prefix, byte(pos))) if err != nil { return false, nil, err } if cnode, ok := cnode.(*shortNode); ok { - k := append([]byte{byte(pos)}, cnode.Key...) - return true, &shortNode{k, cnode.Val, t.newFlag()}, nil + k := append([]byte{byte(pos)}, cnode.key...) + return true, &shortNode{k, cnode.child, t.newFlag()}, nil } } // Otherwise, n is replaced by a one-nibble short node // containing the child. - return true, &shortNode{[]byte{byte(pos)}, n.Children[pos], t.newFlag()}, nil + return true, &shortNode{[]byte{byte(pos)}, n.children[pos], t.newFlag()}, nil } // n still contains at least two values and cannot be reduced. return true, n, nil @@ -399,11 +394,11 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { case nil: return false, nil, nil - case *hashNode: + case *refNode: // We've hit a part of the trie that isn't loaded yet. Load // the node and delete from it. This leaves all child nodes on // the path to the value in the trie. - rn, err := t.resolveHash(n, prefix) + rn, err := t.resolveRef(n, prefix) if err != nil { return false, nil, err } @@ -426,84 +421,67 @@ func concat(s1 []byte, s2 ...byte) []byte { } func (t *Trie) resolve(n node, prefix []byte) (node, error) { - if n, ok := n.(*hashNode); ok { - node, err := t.resolveHash(n, prefix) + if ref, ok := n.(*refNode); ok { + node, err := t.resolveRef(ref, prefix) return node, err } return n, nil } -func (t *Trie) resolveHash(n *hashNode, prefix []byte) (node node, err error) { - key := n.Hash[:] - if ke, ok := t.db.(DatabaseKeyEncoder); ok { - key = ke.Encode(n.Hash[:], n.seq, prefix) - } - - var blob []byte - if r, ok := t.db.(DatabaseReaderTo); ok { - h := newHasher(0, 0) - defer returnHasherToPool(h) - if blob, err = r.GetTo(key, h.tmp[:0]); err != nil { - return nil, &MissingNodeError{NodeHash: n, Path: prefix, Err: err} - } - h.tmp = blob - } else { - if blob, err = t.db.Get(key); err != nil { - return nil, &MissingNodeError{NodeHash: n, Path: prefix, Err: err} - } - } - if len(blob) == 0 { - return nil, &MissingNodeError{NodeHash: n, Path: prefix} +func (t *Trie) resolveRef(ref *refNode, prefix []byte) (node, error) { + blob, err := t.db.Get(prefix, ref.ver) + if err != nil { + return nil, &MissingNodeError{Ref: *ref, Path: prefix, Err: err} } - return mustDecodeNode(n, blob, t.cacheGen), nil + return mustDecodeNode(ref, blob, t.cacheGen), nil } -// Root returns the root hash of the trie. -// Deprecated: use Hash instead. -func (t *Trie) Root() []byte { return t.Hash().Bytes() } - // Hash returns the root hash of the trie. It does not write to the // database and can be used even if the trie doesn't have one. func (t *Trie) Hash() thor.Bytes32 { - hash, cached, _ := t.hashRoot(nil) - t.root = cached - return hash.(*hashNode).Hash + if t.root == nil { + return emptyRoot + } + + h := hasherPool.Get().(*hasher) + defer hasherPool.Put(h) + + hash := h.hash(t.root, true) + return thor.BytesToBytes32(hash) } // Commit writes all nodes to the trie's database. -// Nodes are stored with their blake2b hash as the key. // // Committing flushes nodes from memory. // Subsequent Get calls will load nodes from the database. -func (t *Trie) Commit() (root thor.Bytes32, err error) { - if t.db == nil { - panic("Commit called on trie with nil database") +// If skipHash is true, less disk space is taken up but crypto features of merkle trie lost. +func (t *Trie) Commit(db DatabaseWriter, newVer Version, skipHash bool) error { + if t.root == nil { + return nil } - return t.CommitTo(t.db) -} -// CommitTo writes all nodes to the given database. -// Nodes are stored with their blake2b hash as the key. -// -// Committing flushes nodes from memory. Subsequent Get calls will -// load nodes from the trie's database. Calling code must ensure that -// the changes made to db are written back to the trie's attached -// database before using the trie. -func (t *Trie) CommitTo(db DatabaseWriter) (root thor.Bytes32, err error) { - hash, cached, err := t.hashRoot(db) + // the root node might be refNode, resolve it before later process. + resolved, err := t.resolve(t.root, nil) if err != nil { - return (thor.Bytes32{}), err + return err } - t.root = cached - t.cacheGen++ - return hash.(*hashNode).Hash, nil -} -func (t *Trie) hashRoot(db DatabaseWriter) (node, node, error) { - if t.root == nil { - return &hashNode{Hash: emptyRoot}, nil, nil + h := hasherPool.Get().(*hasher) + defer hasherPool.Put(h) + if !skipHash { + // hash the resolved root node before storing + h.hash(resolved, true) + } + + h.newVer = newVer + h.cacheTTL = t.cacheTTL + h.skipHash = skipHash + + rn, err := h.store(resolved, db, nil) + if err != nil { + return err } - h := newHasher(t.cacheGen, t.cacheTTL) - defer returnHasherToPool(h) - return h.hash(t.root, db, nil, true) + t.root = rn + t.cacheGen++ + return nil } diff --git a/trie/trie_test.go b/trie/trie_test.go index 78c1ce7ce..bc7c284a2 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -22,13 +22,13 @@ import ( "fmt" "math/big" "math/rand" - "os" "reflect" "testing" "testing/quick" "github.com/davecgh/go-spew/spew" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" @@ -40,19 +40,34 @@ func init() { spew.Config.DisableMethods = false } -// Used for testing -func newEmpty() *Trie { - db := ethdb.NewMemDatabase() - trie, _ := New(thor.Bytes32{}, db) - return trie +func makeKey(path []byte, ver Version) []byte { + key := binary.AppendUvarint([]byte(nil), uint64(ver.Major)) + key = binary.AppendUvarint(key, uint64(ver.Minor)) + return append(key, path...) +} + +type memdb struct { + db *ethdb.MemDatabase +} + +func (m *memdb) Get(path []byte, ver Version) ([]byte, error) { + return m.db.Get(makeKey(path, ver)) +} + +func (m *memdb) Put(path []byte, ver Version, value []byte) error { + return m.db.Put(makeKey(path, ver), value) +} + +func newMemDatabase() *memdb { + return &memdb{ethdb.NewMemDatabase()} } func TestEmptyTrie(t *testing.T) { var trie Trie res := trie.Hash() - exp := emptyRoot - if res != exp { - t.Errorf("expected %x got %x", exp, res) + + if res != emptyRoot { + t.Errorf("expected %x got %x", emptyRoot, res) } } @@ -60,125 +75,129 @@ func TestNull(t *testing.T) { var trie Trie key := make([]byte, 32) value := []byte("test") - trie.Update(key, value) - if !bytes.Equal(trie.Get(key), value) { + trie.Update(key, value, nil) + gotVal, _, _ := trie.Get(key) + if !bytes.Equal(gotVal, value) { t.Fatal("wrong value") } } func TestMissingRoot(t *testing.T) { - db := ethdb.NewMemDatabase() - root := thor.Bytes32{1, 2, 3, 4, 5} - trie, err := New(root, db) - if trie != nil { - t.Error("New returned non-nil trie for invalid root") - } + db := newMemDatabase() + hash := thor.Bytes32{1, 2, 3, 4, 5} + trie := New(Root{Hash: hash}, db) + + // will resolve node + err := trie.Commit(db, Version{}, false) if _, ok := err.(*MissingNodeError); !ok { t.Errorf("New returned wrong error: %v", err) } } func TestMissingNode(t *testing.T) { - db := ethdb.NewMemDatabase() - trie, _ := New(thor.Bytes32{}, db) + db := newMemDatabase() + + root := Root{} + trie := New(root, db) updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer") + updateString(trie, "120100", "qwerqwerqwerqwerqwerqwerqwerqwer") updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf") - root, _ := trie.Commit() + root.Ver.Major++ + trie.Commit(db, root.Ver, false) + root.Hash = trie.Hash() - trie, _ = New(root, db) - _, err := trie.TryGet([]byte("120000")) + trie = New(root, db) + _, _, err := trie.Get([]byte("120000")) if err != nil { t.Errorf("Unexpected error: %v", err) } - trie, _ = New(root, db) - _, err = trie.TryGet([]byte("120099")) + trie = New(root, db) + _, _, err = trie.Get([]byte("120099")) if err != nil { t.Errorf("Unexpected error: %v", err) } - trie, _ = New(root, db) - _, err = trie.TryGet([]byte("123456")) + trie = New(root, db) + _, _, err = trie.Get([]byte("123456")) if err != nil { t.Errorf("Unexpected error: %v", err) } - trie, _ = New(root, db) - err = trie.TryUpdate([]byte("120099"), []byte("zxcvzxcvzxcvzxcvzxcvzxcvzxcvzxcv")) + trie = New(root, db) + err = trie.Update([]byte("120099"), []byte("zxcvzxcvzxcvzxcvzxcvzxcvzxcvzxcv"), nil) if err != nil { t.Errorf("Unexpected error: %v", err) } - trie, _ = New(root, db) - err = trie.TryDelete([]byte("123456")) + trie = New(root, db) + err = trie.Update([]byte("123456"), nil, nil) if err != nil { t.Errorf("Unexpected error: %v", err) } - db.Delete(common.FromHex("f4c6f22acf81fd2d993636c74c17d58ad0344b55343f5121bf16fb5f5ec1fc6f")) + db.db.Delete(makeKey([]byte{3, 1, 3, 2, 3, 0, 3}, root.Ver)) - trie, _ = New(root, db) - _, err = trie.TryGet([]byte("120000")) + trie = New(root, db) + _, _, err = trie.Get([]byte("120000")) if _, ok := err.(*MissingNodeError); !ok { t.Errorf("Wrong error: %v", err) } - trie, _ = New(root, db) - _, err = trie.TryGet([]byte("120099")) + trie = New(root, db) + _, _, err = trie.Get([]byte("120099")) if _, ok := err.(*MissingNodeError); !ok { t.Errorf("Wrong error: %v", err) } - trie, _ = New(root, db) - _, err = trie.TryGet([]byte("123456")) + trie = New(root, db) + _, _, err = trie.Get([]byte("123456")) if err != nil { t.Errorf("Unexpected error: %v", err) } - trie, _ = New(root, db) - err = trie.TryUpdate([]byte("120099"), []byte("zxcv")) - if _, ok := err.(*MissingNodeError); !ok { - t.Errorf("Wrong error: %v", err) - } - - trie, _ = New(root, db) - err = trie.TryDelete([]byte("123456")) + trie = New(root, db) + err = trie.Update([]byte("120099"), []byte("zxcv"), nil) if _, ok := err.(*MissingNodeError); !ok { t.Errorf("Wrong error: %v", err) } } func TestInsert(t *testing.T) { - trie := newEmpty() + trie := new(Trie) updateString(trie, "doe", "reindeer") updateString(trie, "dog", "puppy") updateString(trie, "dogglesworth", "cat") exp, _ := thor.ParseBytes32("6ca394ff9b13d6690a51dea30b1b5c43108e52944d30b9095227c49bae03ff8b") - root := trie.Hash() - if root != exp { - t.Errorf("exp %v got %v", exp, root) + hash := trie.Hash() + if hash != exp { + t.Errorf("exp %v got %v", exp, hash) } - trie = newEmpty() + trie = new(Trie) updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") exp, _ = thor.ParseBytes32("e9d7f23f40cd82fe35f5a7a6778c3503f775f3623ba7a71fb335f0eee29dac8a") - root, err := trie.Commit() + db := newMemDatabase() + + err := trie.Commit(db, Version{}, false) + hash = trie.Hash() if err != nil { t.Fatalf("commit error: %v", err) } - if root != exp { - t.Errorf("exp %v got %v", exp, root) + if hash != exp { + t.Errorf("exp %v got %v", exp, hash) } } func TestGet(t *testing.T) { - trie := newEmpty() + trie := new(Trie) updateString(trie, "doe", "reindeer") updateString(trie, "dog", "puppy") updateString(trie, "dogglesworth", "cat") + db := newMemDatabase() for i := 0; i < 2; i++ { res := getString(trie, "dog") @@ -194,12 +213,12 @@ func TestGet(t *testing.T) { if i == 1 { return } - trie.Commit() + trie.Commit(db, Version{Major: uint32(i)}, false) } } func TestDelete(t *testing.T) { - trie := newEmpty() + trie := new(Trie) vals := []struct{ k, v string }{ {"do", "verb"}, {"ether", "wookiedoo"}, @@ -226,7 +245,7 @@ func TestDelete(t *testing.T) { } func TestEmptyValues(t *testing.T) { - trie := newEmpty() + trie := new(Trie) vals := []struct{ k, v string }{ {"do", "verb"}, @@ -250,7 +269,8 @@ func TestEmptyValues(t *testing.T) { } func TestReplication(t *testing.T) { - trie := newEmpty() + db := newMemDatabase() + trie := new(Trie) vals := []struct{ k, v string }{ {"do", "verb"}, {"ether", "wookiedoo"}, @@ -263,27 +283,27 @@ func TestReplication(t *testing.T) { for _, val := range vals { updateString(trie, val.k, val.v) } - exp, err := trie.Commit() - if err != nil { + ver := Version{} + if err := trie.Commit(db, ver, false); err != nil { t.Fatalf("commit error: %v", err) } + exp := trie.Hash() // create a new trie on top of the database and check that lookups work. - trie2, err := New(exp, trie.db) - if err != nil { - t.Fatalf("can't recreate trie at %x: %v", exp, err) - } + trie2 := New(Root{exp, ver}, db) + for _, kv := range vals { if string(getString(trie2, kv.k)) != kv.v { t.Errorf("trie2 doesn't have %q => %q", kv.k, kv.v) } } - hash, err := trie2.Commit() - if err != nil { + ver.Major++ + if err := trie2.Commit(db, ver, false); err != nil { t.Fatalf("commit error: %v", err) } - if hash != exp { - t.Errorf("root failure. expected %x got %x", exp, hash) + got := trie2.Hash() + if got != exp { + t.Errorf("root failure. expected %x got %x", exp, got) } // perform some insertions on the new trie. @@ -307,42 +327,12 @@ func TestReplication(t *testing.T) { } func TestLargeValue(t *testing.T) { - trie := newEmpty() - trie.Update([]byte("key1"), []byte{99, 99, 99, 99}) - trie.Update([]byte("key2"), bytes.Repeat([]byte{1}, 32)) + trie := new(Trie) + trie.Update([]byte("key1"), []byte{99, 99, 99, 99}, nil) + trie.Update([]byte("key2"), bytes.Repeat([]byte{1}, 32), nil) trie.Hash() } -// TestCacheUnload checks that decoded nodes are unloaded after a -// certain number of commit operations. -// func TestCacheUnload(t *testing.T) { -// // Create test trie with two branches. -// trie := newEmpty() -// key1 := "---------------------------------" -// key2 := "---some other branch" -// updateString(trie, key1, "this is the branch of key1.") -// updateString(trie, key2, "this is the branch of key2.") -// root, _ := trie.Commit() - -// // Commit the trie repeatedly and access key1. -// // The branch containing it is loaded from DB exactly two times: -// // in the 0th and 6th iteration. -// db := &countingDB{Database: trie.db, gets: make(map[string]int)} -// trie, _ = New(root, db) -// trie.SetCacheLimit(5) -// for i := 0; i < 12; i++ { -// getString(trie, key1) -// trie.Commit() -// } - -// // Check that it got loaded two times. -// for dbkey, count := range db.gets { -// if count != 2 { -// t.Errorf("db key %x loaded %d times, want %d times", []byte(dbkey), count, 2) -// } -// } -// } - // randTest performs random trie operations. // Instances of this test are created by Generate. type randTest []randTestStep @@ -397,45 +387,44 @@ func (randTest) Generate(r *rand.Rand, size int) reflect.Value { } func runRandTest(rt randTest) bool { - db := ethdb.NewMemDatabase() - tr, _ := New(thor.Bytes32{}, db) + db := newMemDatabase() + root := Root{} + tr := New(root, db) values := make(map[string]string) // tracks content of the trie for i, step := range rt { switch step.op { case opUpdate: - tr.Update(step.key, step.value) + tr.Update(step.key, step.value, nil) values[string(step.key)] = string(step.value) case opDelete: - tr.Delete(step.key) + tr.Update(step.key, nil, nil) delete(values, string(step.key)) case opGet: - v := tr.Get(step.key) + v, _, _ := tr.Get(step.key) want := values[string(step.key)] if string(v) != want { rt[i].err = fmt.Errorf("mismatch for key 0x%x, got 0x%x want 0x%x", step.key, v, want) } case opCommit: - _, rt[i].err = tr.Commit() + root.Ver.Major++ + rt[i].err = tr.Commit(db, root.Ver, false) case opHash: tr.Hash() case opReset: - hash, err := tr.Commit() - if err != nil { - rt[i].err = err - return false - } - newtr, err := New(hash, db) - if err != nil { + root.Ver.Major++ + if err := tr.Commit(db, root.Ver, false); err != nil { rt[i].err = err return false } + root.Hash = tr.Hash() + newtr := New(root, db) tr = newtr case opItercheckhash: - checktr, _ := New(thor.Bytes32{}, nil) - it := NewIterator(tr.NodeIterator(nil)) + checktr := new(Trie) + it := NewIterator(tr.NodeIterator(nil, Version{})) for it.Next() { - checktr.Update(it.Key, it.Value) + checktr.Update(it.Key, it.Value, nil) } if tr.Hash() != checktr.Hash() { rt[i].err = fmt.Errorf("hash mismatch in opItercheckhash") @@ -451,40 +440,6 @@ func runRandTest(rt randTest) bool { return true } -// func checkCacheInvariant(n, parent node, parentCachegen uint16, parentDirty bool, depth int) error { -// var children []node -// var flag nodeFlag -// switch n := n.(type) { -// case *shortNode: -// flag = n.flags -// children = []node{n.Val} -// case *fullNode: -// flag = n.flags -// children = n.Children[:] -// default: -// return nil -// } - -// errorf := func(format string, args ...interface{}) error { -// msg := fmt.Sprintf(format, args...) -// msg += fmt.Sprintf("\nat depth %d node %s", depth, spew.Sdump(n)) -// msg += fmt.Sprintf("parent: %s", spew.Sdump(parent)) -// return errors.New(msg) -// } -// if flag.gen > parentCachegen { -// return errorf("cache invariant violation: %d > %d\n", flag.gen, parentCachegen) -// } -// if depth > 0 && !parentDirty && flag.dirty { -// return errorf("cache invariant violation: %d > %d\n", flag.gen, parentCachegen) -// } -// for _, child := range children { -// if err := checkCacheInvariant(child, n, flag.gen, flag.dirty, depth+1); err != nil { -// return err -// } -// } -// return nil -// } - func TestRandom(t *testing.T) { if err := quick.Check(runRandTest, nil); err != nil { if cerr, ok := err.(*quick.CheckError); ok { @@ -503,18 +458,20 @@ const benchElemCount = 20000 func benchGet(b *testing.B, commit bool) { trie := new(Trie) + db := newMemDatabase() + root := Root{} if commit { - _, tmpdb := tempDB() - trie, _ = New(thor.Bytes32{}, tmpdb) + trie = New(root, db) } k := make([]byte, 32) for i := 0; i < benchElemCount; i++ { binary.LittleEndian.PutUint64(k, uint64(i)) - trie.Update(k, k) + trie.Update(k, k, nil) } binary.LittleEndian.PutUint64(k, benchElemCount/2) if commit { - trie.Commit() + root.Ver.Major++ + trie.Commit(db, root.Ver, false) } b.ResetTimer() @@ -522,20 +479,14 @@ func benchGet(b *testing.B, commit bool) { trie.Get(k) } b.StopTimer() - - if commit { - ldb := trie.db.(*ethdb.LDBDatabase) - ldb.Close() - os.RemoveAll(ldb.Path()) - } } func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie { - trie := newEmpty() + trie := new(Trie) k := make([]byte, 32) for i := 0; i < b.N; i++ { e.PutUint64(k, uint64(i)) - trie.Update(k, k) + trie.Update(k, k, nil) } return trie } @@ -561,47 +512,44 @@ func BenchmarkHash(b *testing.B) { nonce = uint64(random.Int63()) balance = new(big.Int).Rand(random, new(big.Int).Exp(common.Big2, common.Big256, nil)) root = emptyRoot - code = thor.Keccak256(nil) + code = crypto.Keccak256(nil) ) accounts[i], _ = rlp.EncodeToBytes([]interface{}{nonce, balance, root, code}) } // Insert the accounts into the trie and hash it - trie := newEmpty() + trie := new(Trie) for i := 0; i < len(addresses); i++ { - trie.Update(thor.Blake2b(addresses[i][:]).Bytes(), accounts[i]) + trie.Update(thor.Blake2b(addresses[i][:]).Bytes(), accounts[i], nil) } b.ResetTimer() b.ReportAllocs() trie.Hash() } -func tempDB() (string, Database) { - dir, err := os.MkdirTemp("", "trie-bench") - if err != nil { - panic(fmt.Sprintf("can't create temporary directory: %v", err)) - } - db, err := ethdb.NewLDBDatabase(dir, 256, 0) +func getString(trie *Trie, k string) []byte { + val, _, err := trie.Get([]byte(k)) if err != nil { - panic(fmt.Sprintf("can't create temporary database: %v", err)) + panic(err) } - return dir, db -} - -func getString(trie *Trie, k string) []byte { - return trie.Get([]byte(k)) + return val } func updateString(trie *Trie, k, v string) { - trie.Update([]byte(k), []byte(v)) + if err := trie.Update([]byte(k), []byte(v), nil); err != nil { + panic(err) + } } func deleteString(trie *Trie, k string) { - trie.Delete([]byte(k)) + if err := trie.Update([]byte(k), nil, nil); err != nil { + panic(err) + } } func TestExtended(t *testing.T) { - db := ethdb.NewMemDatabase() - tr := NewExtended(thor.Bytes32{}, 0, db, false) + db := newMemDatabase() + ver := Version{} + tr := New(Root{}, db) vals1 := []struct{ k, v string }{ {"do", "verb"}, @@ -634,20 +582,24 @@ func TestExtended(t *testing.T) { tr.Update([]byte(v.k), []byte(v.v), thor.Blake2b([]byte(v.v)).Bytes()) } - root1, err := tr.Commit(1) + ver.Major++ + err := tr.Commit(db, ver, false) if err != nil { t.Errorf("commit failed %v", err) } + root1 := tr.Hash() for _, v := range vals2 { tr.Update([]byte(v.k), []byte(v.v), thor.Blake2b([]byte(v.v)).Bytes()) } - root2, err := tr.Commit(2) + ver.Major++ + err = tr.Commit(db, ver, false) if err != nil { t.Errorf("commit failed %v", err) } + root2 := tr.Hash() - tr1 := NewExtended(root1, 1, db, false) + tr1 := New(Root{root1, Version{Major: 1}}, db) for _, v := range vals1 { val, meta, _ := tr1.Get([]byte(v.k)) if string(val) != v.v { @@ -658,7 +610,7 @@ func TestExtended(t *testing.T) { } } - tr2 := NewExtended(root2, 2, db, false) + tr2 := New(Root{root2, Version{Major: 2}}, db) for _, v := range append(vals1, vals2...) { val, meta, _ := tr2.Get([]byte(v.k)) if string(val) != v.v { @@ -670,30 +622,20 @@ func TestExtended(t *testing.T) { } } -type kedb struct { - *ethdb.MemDatabase -} - -func (db *kedb) Encode(_ []byte, seq uint64, path []byte) []byte { - var k [8]byte - binary.BigEndian.PutUint64(k[:], seq) - return append(k[:], path...) -} - -func TestNonCryptoExtended(t *testing.T) { - db := &kedb{ethdb.NewMemDatabase()} - - tr := NewExtended(thor.Bytes32{}, 0, db, true) - var root thor.Bytes32 +func TestCommitSkipHash(t *testing.T) { + db := newMemDatabase() + ver := Version{} + tr := New(Root{}, db) n := uint32(100) for i := uint32(0); i < n; i++ { var k [4]byte binary.BigEndian.PutUint32(k[:], i) tr.Update(k[:], thor.Blake2b(k[:]).Bytes(), nil) - root, _ = tr.Commit(uint64(i)) + ver.Major++ + tr.Commit(db, ver, true) } - tr = NewExtended(root, uint64(n-1), db, true) + tr = New(Root{thor.BytesToBytes32([]byte{1}), ver}, db) for i := uint32(0); i < n; i++ { var k [4]byte binary.BigEndian.PutUint32(k[:], i) @@ -703,9 +645,9 @@ func TestNonCryptoExtended(t *testing.T) { } } -func TestExtendedCached(t *testing.T) { - db := ethdb.NewMemDatabase() - tr := NewExtended(thor.Bytes32{}, 0, db, false) +func TestFromRootNode(t *testing.T) { + db := newMemDatabase() + tr := New(Root{}, db) vals := []struct{ k, v string }{ {"do", "verb"}, @@ -719,7 +661,7 @@ func TestExtendedCached(t *testing.T) { tr.Update([]byte(val.k), []byte(val.v), nil) } - tr = NewExtendedCached(tr.RootNode(), db, false) + tr = FromRootNode(tr.RootNode(), db) for _, val := range vals { v, _, _ := tr.Get([]byte(val.k)) diff --git a/trie/vp.go b/trie/vp.go new file mode 100644 index 000000000..5444b9531 --- /dev/null +++ b/trie/vp.go @@ -0,0 +1,52 @@ +// Copyright (c) 2024 The VeChainThor developers + +// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying +// file LICENSE or + +package trie + +import ( + "encoding/binary" + "errors" + "math" +) + +type vpScope struct{} + +// vp implements varint-prefix coding. +// +// It's much simpler and a bit faster than RLP. +// Trie nodes stored in database are encoded using vp. +var vp vpScope + +// AppendUint32 appends vp-encoded i to buf and returns the extended buffer. +func (vpScope) AppendUint32(buf []byte, i uint32) []byte { + return binary.AppendUvarint(buf, uint64(i)) +} + +// AppendString appends vp-encoded str to buf and returns the extended buffer. +func (vpScope) AppendString(buf, str []byte) []byte { + buf = binary.AppendUvarint(buf, uint64(len(str))) + return append(buf, str...) +} + +// SplitString extracts a string and returns rest bytes. +// It'll panic if errored. +func (vpScope) SplitString(buf []byte) (str []byte, rest []byte, err error) { + i, n := binary.Uvarint(buf) + if n <= 0 { + return nil, nil, errors.New("invalid uvarint prefix") + } + buf = buf[n:] + return buf[:i], buf[i:], nil +} + +// SplitUint32 extracts uint32 and returns rest bytes. +// It'll panic if errored. +func (vpScope) SplitUint32(buf []byte) (i uint32, rest []byte, err error) { + i64, n := binary.Uvarint(buf) + if n <= 0 || i64 > math.MaxUint32 { + return 0, nil, errors.New("invalid uvarint prefix") + } + return uint32(i64), buf[n:], nil +} diff --git a/trie/vp_test.go b/trie/vp_test.go new file mode 100644 index 000000000..cd066bacc --- /dev/null +++ b/trie/vp_test.go @@ -0,0 +1,46 @@ +// Copyright (c) 2024 The VeChainThor developers + +// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying +// file LICENSE or + +package trie + +import ( + "bytes" + "testing" +) + +func TestAppendString(t *testing.T) { + var buf []byte + want := []byte("vechain") + buf = vp.AppendString(buf, want) + got, buf, err := vp.SplitString(buf) + if err != nil { + t.Error("should no err") + } + + if !bytes.Equal(got, want) { + t.Errorf("want %v got %v", want, got) + } + + if len(buf) != 0 { + t.Error("rest buf should be 0") + } +} + +func TestAppendUint(t *testing.T) { + var buf []byte + const want = 1234567 + buf = vp.AppendUint32(buf, want) + got, buf, err := vp.SplitUint32(buf) + if err != nil { + t.Error("should no err") + } + if got != want { + t.Errorf("want %v got %v", want, got) + } + + if len(buf) != 0 { + t.Error("rest buf should be 0") + } +} diff --git a/txpool/tx_object_map_test.go b/txpool/tx_object_map_test.go index 9a0b38629..084bd8a78 100644 --- a/txpool/tx_object_map_test.go +++ b/txpool/tx_object_map_test.go @@ -19,8 +19,7 @@ import ( ) func TestGetByID(t *testing.T) { - db := muxdb.NewMem() - repo := newChainRepo(db) + repo := newChainRepo(muxdb.NewMem()) // Creating transactions tx1 := newTx(repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), genesis.DevAccounts()[0]) @@ -49,8 +48,7 @@ func TestGetByID(t *testing.T) { } func TestFill(t *testing.T) { - db := muxdb.NewMem() - repo := newChainRepo(db) + repo := newChainRepo(muxdb.NewMem()) // Creating transactions tx1 := newTx(repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), genesis.DevAccounts()[0]) @@ -83,8 +81,7 @@ func TestFill(t *testing.T) { } func TestTxObjMap(t *testing.T) { - db := muxdb.NewMem() - repo := newChainRepo(db) + repo := newChainRepo(muxdb.NewMem()) tx1 := newTx(repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), genesis.DevAccounts()[0]) tx2 := newTx(repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), genesis.DevAccounts()[0]) @@ -120,8 +117,7 @@ func TestTxObjMap(t *testing.T) { } func TestLimitByDelegator(t *testing.T) { - db := muxdb.NewMem() - repo := newChainRepo(db) + repo := newChainRepo(muxdb.NewMem()) tx1 := newTx(repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), genesis.DevAccounts()[0]) tx2 := newDelegatedTx(repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, genesis.DevAccounts()[0], genesis.DevAccounts()[1]) @@ -158,7 +154,7 @@ func TestPendingCost(t *testing.T) { chain := repo.NewBestChain() best := repo.BestBlockSummary() - state := stater.NewState(best.Header.StateRoot(), best.Header.Number(), best.Conflicts, best.SteadyNum) + state := stater.NewState(best.Root()) var err error txObj1.executable, err = txObj1.Executable(chain, state, best.Header) diff --git a/txpool/tx_object_test.go b/txpool/tx_object_test.go index 8358f1a6d..764b22b07 100644 --- a/txpool/tx_object_test.go +++ b/txpool/tx_object_test.go @@ -18,6 +18,7 @@ import ( "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" "github.com/vechain/thor/v2/tx" ) @@ -78,8 +79,8 @@ func SetupTest() (genesis.DevAccount, *chain.Repository, *block.Block, *state.St repo := newChainRepo(db) b0 := repo.GenesisBlock() b1 := new(block.Builder).ParentID(b0.Header().ID()).GasLimit(10000000).TotalScore(100).Build() - repo.AddBlock(b1, nil, 0) - st := state.New(db, repo.GenesisBlock().Header().StateRoot(), 0, 0, 0) + repo.AddBlock(b1, nil, 0, false) + st := state.New(db, trie.Root{Hash: repo.GenesisBlock().Header().StateRoot()}) return acc, repo, b1, st } @@ -137,7 +138,14 @@ func TestResolve(t *testing.T) { } func TestExecutable(t *testing.T) { - acc, repo, b1, st := SetupTest() + acc := genesis.DevAccounts()[0] + + db := muxdb.NewMem() + repo := newChainRepo(db) + b0 := repo.GenesisBlock() + b1 := new(block.Builder).ParentID(b0.Header().ID()).GasLimit(10000000).TotalScore(100).Build() + repo.AddBlock(b1, nil, 0, false) + st := state.New(db, trie.Root{Hash: repo.GenesisBlock().Header().StateRoot()}) tests := []struct { tx *tx.Transaction diff --git a/txpool/tx_pool.go b/txpool/tx_pool.go index 928751676..752258d27 100644 --- a/txpool/tx_pool.go +++ b/txpool/tx_pool.go @@ -245,7 +245,7 @@ func (p *TxPool) add(newTx *tx.Transaction, rejectNonExecutable bool, localSubmi } } - state := p.stater.NewState(headSummary.Header.StateRoot(), headSummary.Header.Number(), headSummary.Conflicts, headSummary.SteadyNum) + state := p.stater.NewState(headSummary.Root()) executable, err := txObj.Executable(p.repo.NewChain(headSummary.Header.ID()), state, headSummary.Header) if err != nil { return txRejectedError{err.Error()} @@ -255,6 +255,12 @@ func (p *TxPool) add(newTx *tx.Transaction, rejectNonExecutable bool, localSubmi return txRejectedError{"tx is not executable"} } + if !executable { + if p.all.Len()-len(p.Executables()) >= p.options.Limit*2/10 { + return txRejectedError{"non executable pool is full"} + } + } + txObj.executable = executable if err := p.all.Add(txObj, p.options.LimitPerAccount, func(payer thor.Address, needs *big.Int) error { // check payer's balance @@ -391,7 +397,7 @@ func (p *TxPool) wash(headSummary *chain.BlockSummary) (executables tx.Transacti // recreate state every time to avoid high RAM usage when the pool at hight water-mark. newState := func() *state.State { - return p.stater.NewState(headSummary.Header.StateRoot(), headSummary.Header.Number(), headSummary.Conflicts, headSummary.SteadyNum) + return p.stater.NewState(headSummary.Root()) } baseGasPrice, err := builtin.Params.Native(newState()).Get(thor.KeyBaseGasPrice) if err != nil { @@ -468,6 +474,12 @@ func (p *TxPool) wash(headSummary *chain.BlockSummary) (executables tx.Transacti toRemove = append(toRemove, txObj) logger.Debug("non-executable tx washed out due to pool limit", "id", txObj.ID()) } + } else if len(nonExecutableObjs) > limit*2/10 { + // nonExecutableObjs over pool limit + for _, txObj := range nonExecutableObjs[limit*2/10:] { + toRemove = append(toRemove, txObj) + logger.Debug("non-executable tx washed out due to non-executable limit", "id", txObj.ID()) + } } // Concatenate executables. diff --git a/txpool/tx_pool_test.go b/txpool/tx_pool_test.go index 73fe7db0a..916885b34 100644 --- a/txpool/tx_pool_test.go +++ b/txpool/tx_pool_test.go @@ -26,6 +26,7 @@ import ( "github.com/vechain/thor/v2/muxdb" "github.com/vechain/thor/v2/state" "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/trie" "github.com/vechain/thor/v2/tx" Tx "github.com/vechain/thor/v2/tx" ) @@ -46,6 +47,10 @@ func newPool(limit int, limitPerAccount int) *TxPool { } func newPoolWithParams(limit int, limitPerAccount int, BlocklistCacheFilePath string, BlocklistFetchURL string, timestamp uint64) *TxPool { + return newPoolWithMaxLifetime(limit, limitPerAccount, BlocklistCacheFilePath, BlocklistFetchURL, timestamp, time.Hour) +} + +func newPoolWithMaxLifetime(limit int, limitPerAccount int, BlocklistCacheFilePath string, BlocklistFetchURL string, timestamp uint64, maxLifetime time.Duration) *TxPool { db := muxdb.NewMem() gene := new(genesis.Builder). GasLimit(thor.InitialGasLimit). @@ -63,7 +68,7 @@ func newPoolWithParams(limit int, limitPerAccount int, BlocklistCacheFilePath st return New(repo, state.NewStater(db), Options{ Limit: limit, LimitPerAccount: limitPerAccount, - MaxLifetime: time.Hour, + MaxLifetime: maxLifetime, BlocklistCacheFilePath: BlocklistCacheFilePath, BlocklistFetchURL: BlocklistFetchURL, }) @@ -215,8 +220,8 @@ func TestSubscribeNewTx(t *testing.T) { pool := newPool(LIMIT, LIMIT_PER_ACCOUNT) defer pool.Close() - st := pool.stater.NewState(pool.repo.GenesisBlock().Header().StateRoot(), 0, 0, 0) - stage, _ := st.Stage(1, 0) + st := pool.stater.NewState(trie.Root{Hash: pool.repo.GenesisBlock().Header().StateRoot()}) + stage, _ := st.Stage(trie.Version{Major: 1}) root1, _ := stage.Commit() var sig [65]byte @@ -229,10 +234,9 @@ func TestSubscribeNewTx(t *testing.T) { GasLimit(10000000). StateRoot(root1). Build().WithSignature(sig[:]) - if err := pool.repo.AddBlock(b1, nil, 0); err != nil { + if err := pool.repo.AddBlock(b1, nil, 0, true); err != nil { t.Fatal(err) } - pool.repo.SetBestBlockID(b1.Header().ID()) txCh := make(chan *TxEvent) @@ -261,8 +265,8 @@ func TestWashTxs(t *testing.T) { assert.Nil(t, err) assert.Equal(t, Tx.Transactions{tx1}, txs) - st := pool.stater.NewState(pool.repo.GenesisBlock().Header().StateRoot(), 0, 0, 0) - stage, _ := st.Stage(1, 0) + st := pool.stater.NewState(trie.Root{Hash: pool.repo.GenesisBlock().Header().StateRoot()}) + stage, _ := st.Stage(trie.Version{Major: 1}) root1, _ := stage.Commit() b1 := new(block.Builder). ParentID(pool.repo.GenesisBlock().Header().ID()). @@ -271,7 +275,7 @@ func TestWashTxs(t *testing.T) { GasLimit(10000000). StateRoot(root1). Build() - pool.repo.AddBlock(b1, nil, 0) + pool.repo.AddBlock(b1, nil, 0, false) txs, _, err = pool.wash(pool.repo.BestBlockSummary()) assert.Nil(t, err) @@ -324,8 +328,8 @@ func TestFillPool(t *testing.T) { func TestAdd(t *testing.T) { pool := newPool(LIMIT, LIMIT_PER_ACCOUNT) defer pool.Close() - st := pool.stater.NewState(pool.repo.GenesisBlock().Header().StateRoot(), 0, 0, 0) - stage, _ := st.Stage(1, 0) + st := pool.stater.NewState(trie.Root{Hash: pool.repo.GenesisBlock().Header().StateRoot()}) + stage, _ := st.Stage(trie.Version{Major: 1}) root1, _ := stage.Commit() var sig [65]byte @@ -337,8 +341,7 @@ func TestAdd(t *testing.T) { GasLimit(10000000). StateRoot(root1). Build().WithSignature(sig[:]) - pool.repo.AddBlock(b1, nil, 0) - pool.repo.SetBestBlockID(b1.Header().ID()) + pool.repo.AddBlock(b1, nil, 0, true) acc := devAccounts[0] dupTx := newTx(pool.repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), acc) @@ -436,6 +439,97 @@ func TestPoolLimit(t *testing.T) { assert.Equal(t, "tx rejected: account quota exceeded", err.Error()) } +func TestExecutableAndNonExecutableLimits(t *testing.T) { + // executable pool limit + pool := newPoolWithParams(10, 2, "", "", uint64(time.Now().Unix())) + defer pool.Close() + + // Create a slice of transactions to be added to the pool. + txs := make(Tx.Transactions, 0, 11) + for i := 0; i < 12; i++ { + tx := newTx(pool.repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), devAccounts[i%len(devAccounts)]) + pool.add(tx, false, false) + txs = append(txs, tx) + } + pool.executables.Store(txs) + + trx1 := newTx(pool.repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), devAccounts[1]) + + err := pool.add(trx1, false, false) + assert.Equal(t, "tx rejected: pool is full", err.Error()) + + trx2 := newTx(pool.repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, &thor.Bytes32{1}, tx.Features(0), devAccounts[1]) + + err = pool.add(trx2, false, false) + assert.Equal(t, "tx rejected: pool is full", err.Error()) + + // non-executable pool limit + pool = newPoolWithParams(5, 2, "", "", uint64(time.Now().Unix())) + defer pool.Close() + + trx1 = newTx(pool.repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, &thor.Bytes32{1}, tx.Features(0), devAccounts[0]) + + err = pool.add(trx1, false, false) + assert.Nil(t, err) + + // dependant fails + trx2 = newTx(pool.repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, &thor.Bytes32{1}, tx.Features(0), devAccounts[2]) + + err = pool.add(trx2, false, false) + + assert.Equal(t, "tx rejected: non executable pool is full", err.Error()) + + // higher block fails + trx2 = newTx(pool.repo.ChainTag(), nil, 21000, tx.NewBlockRef(tx.BlockRef{}.Number()+2), 100, nil, tx.Features(0), devAccounts[2]) + + err = pool.add(trx2, false, false) + + assert.Equal(t, "tx rejected: non executable pool is full", err.Error()) +} + +func TestNonExecutables(t *testing.T) { + pool := newPoolWithParams(100, 100, "", "", uint64(time.Now().Unix())) + + // loop 90 times + for i := 0; i < 90; i++ { + assert.NoError(t, pool.AddLocal(newTx(pool.repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), devAccounts[i%len(devAccounts)]))) + } + + executables, _, _ := pool.wash(pool.repo.BestBlockSummary()) + pool.executables.Store(executables) + + // add 1 non-executable + assert.NoError(t, pool.AddLocal(newTx(pool.repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, &thor.Bytes32{1}, tx.Features(0), devAccounts[2]))) +} + +func TestExpiredTxs(t *testing.T) { + pool := newPoolWithMaxLifetime(100, 100, "", "", uint64(time.Now().Unix()), 3*time.Second) + + // loop 90 times + for i := 0; i < 90; i++ { + assert.NoError(t, pool.Add(newTx(pool.repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, nil, tx.Features(0), devAccounts[i%len(devAccounts)]))) + } + + executables, _, _ := pool.wash(pool.repo.BestBlockSummary()) + pool.executables.Store(executables) + + // add 1 non-executable + assert.NoError(t, pool.Add(newTx(pool.repo.ChainTag(), nil, 21000, tx.BlockRef{}, 100, &thor.Bytes32{1}, tx.Features(0), devAccounts[2]))) + + executables, washed, err := pool.wash(pool.repo.BestBlockSummary()) + assert.Nil(t, err) + assert.Equal(t, 90, len(executables)) + assert.Equal(t, 0, washed) + assert.Equal(t, 91, pool.all.Len()) + + time.Sleep(3 * time.Second) + executables, washed, err = pool.wash(pool.repo.BestBlockSummary()) + assert.Nil(t, err) + assert.Equal(t, 0, len(executables)) + assert.Equal(t, 91, washed) + assert.Equal(t, 0, pool.all.Len()) +} + func TestBlocked(t *testing.T) { acc := devAccounts[len(devAccounts)-1] @@ -614,8 +708,8 @@ func TestAddOverPendingCost(t *testing.T) { b0, _, _, err := builder.Build(state.NewStater(db)) assert.Nil(t, err) - st := state.New(db, b0.Header().StateRoot(), 0, 0, 0) - stage, err := st.Stage(1, 0) + st := state.New(db, trie.Root{Hash: b0.Header().StateRoot()}) + stage, err := st.Stage(trie.Version{Major: 1}) assert.Nil(t, err) root, err := stage.Commit() assert.Nil(t, err) @@ -631,8 +725,7 @@ func TestAddOverPendingCost(t *testing.T) { TransactionFeatures(feat).Build() repo, _ := chain.NewRepository(db, b0) - repo.AddBlock(b1, tx.Receipts{}, 0) - repo.SetBestBlockID(b1.Header().ID()) + repo.AddBlock(b1, tx.Receipts{}, 0, true) pool := New(repo, state.NewStater(db), Options{ Limit: LIMIT, LimitPerAccount: LIMIT, diff --git a/vm/evm.go b/vm/evm.go index fcba8ed32..79953b153 100644 --- a/vm/evm.go +++ b/vm/evm.go @@ -75,6 +75,8 @@ type Context struct { Transfer TransferFunc // GetHash returns the hash corresponding to n GetHash GetHashFunc + // ClauseIndex is the index of the clause in the transaction. + ClauseIndex uint64 NewContractAddress NewContractAddressFunc InterceptContractCall InterceptContractCallFunc diff --git a/xenv/env.go b/xenv/env.go index d801dc38a..c4375d992 100644 --- a/xenv/env.go +++ b/xenv/env.go @@ -32,24 +32,26 @@ type BlockContext struct { // TransactionContext transaction context. type TransactionContext struct { - ID thor.Bytes32 - Origin thor.Address - GasPayer thor.Address - GasPrice *big.Int - ProvedWork *big.Int - BlockRef tx.BlockRef - Expiration uint32 + ID thor.Bytes32 + Origin thor.Address + GasPayer thor.Address + GasPrice *big.Int + ProvedWork *big.Int + ClauseCount *big.Int + BlockRef tx.BlockRef + Expiration uint32 } // Environment an env to execute native method. type Environment struct { - abi *abi.Method - chain *chain.Chain - state *state.State - blockCtx *BlockContext - txCtx *TransactionContext - evm *vm.EVM - contract *vm.Contract + abi *abi.Method + chain *chain.Chain + state *state.State + blockCtx *BlockContext + txCtx *TransactionContext + evm *vm.EVM + contract *vm.Contract + clauseIndex uint32 } // New create a new env. @@ -61,15 +63,17 @@ func New( txCtx *TransactionContext, evm *vm.EVM, contract *vm.Contract, + clauseIndex uint32, ) *Environment { return &Environment{ - abi: abi, - chain: chain, - state: state, - blockCtx: blockCtx, - txCtx: txCtx, - evm: evm, - contract: contract, + abi: abi, + chain: chain, + state: state, + blockCtx: blockCtx, + txCtx: txCtx, + evm: evm, + contract: contract, + clauseIndex: clauseIndex, } } @@ -79,6 +83,7 @@ func (env *Environment) TransactionContext() *TransactionContext { return env.tx func (env *Environment) BlockContext() *BlockContext { return env.blockCtx } func (env *Environment) Caller() thor.Address { return thor.Address(env.contract.Caller()) } func (env *Environment) To() thor.Address { return thor.Address(env.contract.Address()) } +func (env *Environment) ClauseIndex() uint32 { return env.clauseIndex } func (env *Environment) UseGas(gas uint64) { if !env.contract.UseGas(gas) {