diff --git a/.github/workflows/lint-go.yaml b/.github/workflows/lint-go.yaml index 42a40e41c..d14521319 100644 --- a/.github/workflows/lint-go.yaml +++ b/.github/workflows/lint-go.yaml @@ -20,7 +20,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.55.2 + version: v1.59.1 # use the default if on main branch, otherwise use the pull request config args: --timeout=30m --config=.golangci.yml only-new-issues: true diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 7ee0ca0b6..760824037 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -13,13 +13,6 @@ jobs: matrix: go-version: [1.22.x] os: [ubuntu-latest, macos-latest, windows-latest] - include: - - go-version: 1.19.x - os: ubuntu-latest - - go-version: 1.20.x - os: ubuntu-latest - - go-version: 1.21.x - os: ubuntu-latest runs-on: ${{ matrix.os }} steps: - name: Checkout code diff --git a/Dockerfile b/Dockerfile index 2b0062da0..e6239998c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build thor in a stock Go builder container -FROM golang:1.21.9-alpine3.18 as builder +FROM golang:1.22-alpine3.20 as builder RUN apk add --no-cache make gcc musl-dev linux-headers git WORKDIR /go/thor @@ -7,7 +7,7 @@ COPY . /go/thor RUN make all # Pull thor into a second stage deploy alpine container -FROM alpine:latest +FROM alpine:3.20 RUN apk add --no-cache ca-certificates RUN apk upgrade libssl3 libcrypto3 diff --git a/go.mod b/go.mod index 51afb43eb..50ea1816d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/vechain/thor/v2 -go 1.19 +go 1.22 require ( github.com/beevik/ntp v0.2.0 @@ -16,7 +16,7 @@ require ( github.com/holiman/uint256 v1.2.0 github.com/inconshreveable/log15 v0.0.0-20171019012758-0decfc6c20d9 github.com/mattn/go-isatty v0.0.3 - github.com/mattn/go-sqlite3 v1.14.9 + github.com/mattn/go-sqlite3 v1.14.22 github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c github.com/pkg/errors v0.8.0 diff --git a/go.sum b/go.sum index ef3863f69..82fffa5f5 100644 --- a/go.sum +++ b/go.sum @@ -92,6 +92,7 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -102,8 +103,8 @@ github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-sqlite3 v1.14.9 h1:10HX2Td0ocZpYEjhilsuo6WWtUqttj2Kb0KtD86/KYA= -github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a h1:8TGB3DFRNl06DB1Q6zBX+I7FDoCUZY2fmMS9WGUIIpw= github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= @@ -142,6 +143,7 @@ github.com/rjeczalik/notify v0.9.3 h1:6rJAzHTGKXGj76sbRgDiDcYj/HniypXmSJo1SWakZe github.com/rjeczalik/notify v0.9.3/go.mod h1:gF3zSOrafR9DQEWSE8TjfI9NkooDxbyT4UgRGKZA0lc= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= diff --git a/logdb/logdb.go b/logdb/logdb.go index 5d0bf744b..bcd793e94 100644 --- a/logdb/logdb.go +++ b/logdb/logdb.go @@ -139,21 +139,29 @@ FROM (%v) e subQuery += ")" } - if filter.Order == DESC { - subQuery += " ORDER BY seq DESC " - } else { - subQuery += " ORDER BY seq ASC " - } - // if there is limit option, set order inside subquery if filter.Options != nil { + if filter.Order == DESC { + subQuery += " ORDER BY seq DESC " + } else { + subQuery += " ORDER BY seq ASC " + } subQuery += " LIMIT ?, ?" args = append(args, filter.Options.Offset, filter.Options.Limit) } subQuery = "SELECT e.* FROM (" + subQuery + ") s LEFT JOIN event e ON s.seq = e.seq" - return db.queryEvents(ctx, fmt.Sprintf(query, subQuery), args...) + eventQuery := fmt.Sprintf(query, subQuery) + // if there is no limit option, set order outside + if filter.Options == nil { + if filter.Order == DESC { + eventQuery += " ORDER BY seq DESC " + } else { + eventQuery += " ORDER BY seq ASC " + } + } + return db.queryEvents(ctx, eventQuery, args...) } func (db *LogDB) FilterTransfers(ctx context.Context, filter *TransferFilter) ([]*Transfer, error) { @@ -196,21 +204,28 @@ FROM (%v) t subQuery += ")" } - if filter.Order == DESC { - subQuery += " ORDER BY seq DESC " - } else { - subQuery += " ORDER BY seq ASC " - } - // if there is limit option, set order inside subquery if filter.Options != nil { + if filter.Order == DESC { + subQuery += " ORDER BY seq DESC" + } else { + subQuery += " ORDER BY seq ASC" + } subQuery += " LIMIT ?, ?" args = append(args, filter.Options.Offset, filter.Options.Limit) } subQuery = "SELECT e.* FROM (" + subQuery + ") s LEFT JOIN transfer e ON s.seq = e.seq" - - return db.queryTransfers(ctx, fmt.Sprintf(query, subQuery), args...) + transferQuery := fmt.Sprintf(query, subQuery) + // if there is no limit option, set order outside + if filter.Options == nil { + if filter.Order == DESC { + transferQuery += " ORDER BY seq DESC " + } else { + transferQuery += " ORDER BY seq ASC " + } + } + return db.queryTransfers(ctx, transferQuery, args...) } func (db *LogDB) queryEvents(ctx context.Context, query string, args ...interface{}) ([]*Event, error) { diff --git a/logdb/logdb_bench_test.go b/logdb/logdb_bench_test.go new file mode 100644 index 000000000..c1dd2c6e7 --- /dev/null +++ b/logdb/logdb_bench_test.go @@ -0,0 +1,285 @@ +// Copyright (c) 2024 The VeChainThor developers + +// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying +// file LICENSE or + +package logdb_test + +import ( + "context" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/vechain/thor/v2/block" + "github.com/vechain/thor/v2/logdb" + "github.com/vechain/thor/v2/thor" + "github.com/vechain/thor/v2/tx" +) + +const ( + VTHO_ADDRESS = "0x0000000000000000000000000000456E65726779" + VTHO_TOPIC = "0xDDF252AD1BE2C89B69C2B068FC378DAA952BA7F163C4A11628F55A4DF523B3EF" + TEST_ADDRESS = "0x7567D83B7B8D80ADDCB281A71D54FC7B3364FFED" +) + +var dbPath string + +// Command used to benchmark +// +// go test -bench="^Benchmark" -benchmem -count=5 github.com/vechain/thor/v2/logdb -dbPath |tee -a master.txt +// go test -bench="^Benchmark" -benchmem -count=5 github.com/vechain/thor/v2/logdb -dbPath |tee -a pr.txt +// benchstat maser.txt pr.txt +// + +func init() { + flag.StringVar(&dbPath, "dbPath", "", "Path to the database file") +} + +// TestLogDB_NewestBlockID performs a series of read/write benchmarks on the NewestBlockID functionality of the LogDB. +// It benchmarks the creating, writing, committing a new block, followed by fetching this new block as the NewestBlockID +func BenchmarkFakeDB_NewestBlockID(t *testing.B) { + db, err := createTempDB() + require.NoError(t, err) + defer db.Close() + + b := new(block.Builder). + ParentID(new(block.Builder).Build().Header().ID()). + Transaction(newTx()). + Build() + receipts := tx.Receipts{newReceipt()} + + w := db.NewWriter() + require.NoError(t, w.Write(b, receipts)) + require.NoError(t, w.Commit()) + + tests := []struct { + name string + prepare func() (thor.Bytes32, error) + }{ + { + "newest block id", + func() (thor.Bytes32, error) { + b = new(block.Builder). + ParentID(b.Header().ID()). + Build() + receipts := tx.Receipts{newReceipt()} + + require.NoError(t, w.Write(b, receipts)) + require.NoError(t, w.Commit()) + + return b.Header().ID(), nil + }, + }, + } + + t.ResetTimer() + for _, tt := range tests { + t.Run(tt.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + want, err := tt.prepare() + require.NoError(t, err) + + got, err := db.NewestBlockID() + if err != nil { + b.Fatal(err) + } + assert.Equal(b, want, got) + } + }) + } +} + +// BenchmarkFakeDB_WriteBlocks creates a temporary database, performs some write + commit benchmarks and then deletes the db +func BenchmarkFakeDB_WriteBlocks(t *testing.B) { + db, err := createTempDB() + require.NoError(t, err) + defer db.Close() + + blk := new(block.Builder).Build() + w := db.NewWriter() + writeCount := 10_000 + + tests := []struct { + name string + writeFunc func(b *testing.B) + }{ + { + "repeated writes", + func(b *testing.B) { + for i := 0; i < writeCount; i++ { + blk = new(block.Builder). + ParentID(blk.Header().ID()). + Transaction(newTx()). + Build() + receipts := tx.Receipts{newReceipt(), newReceipt()} + require.NoError(t, w.Write(blk, receipts)) + require.NoError(t, w.Commit()) + } + }, + }, + { + "batched writes", + func(b *testing.B) { + for i := 0; i < writeCount; i++ { + blk = new(block.Builder). + ParentID(blk.Header().ID()). + Transaction(newTx()). + Build() + receipts := tx.Receipts{newReceipt(), newReceipt()} + require.NoError(t, w.Write(blk, receipts)) + } + require.NoError(t, w.Commit()) + }, + }, + } + + t.ResetTimer() + for _, tt := range tests { + t.Run(tt.name, func(b *testing.B) { + for i := 0; i < t.N; i++ { + tt.writeFunc(b) + } + }) + } +} + +// BenchmarkTestDB_HasBlockID opens a log.db file and measures the performance of the HasBlockID functionality of LogDB. +// It uses unbounded event filtering to check for blocks existence using the HasBlockID +func BenchmarkTestDB_HasBlockID(b *testing.B) { + db, err := loadDBFromDisk(b) + require.NoError(b, err) + defer db.Close() + + // find the first 500k blocks with events + events, err := db.FilterEvents(context.Background(), &logdb.EventFilter{Options: &logdb.Options{Offset: 0, Limit: 500_000}}) + require.NoError(b, err) + require.GreaterOrEqual(b, len(events), 500_000, "there should be more than 500k events in the db") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, event := range events { + has, err := db.HasBlockID(event.BlockID) + require.NoError(b, err) + require.True(b, has) + } + } +} + +// BenchmarkTestDB_FilterEvents opens a log.db file and measures the performance of the Event filtering functionality of LogDB. +func BenchmarkTestDB_FilterEvents(b *testing.B) { + db, err := loadDBFromDisk(b) + require.NoError(b, err) + defer db.Close() + + vthoAddress := thor.MustParseAddress(VTHO_ADDRESS) + topic := thor.MustParseBytes32(VTHO_TOPIC) + + addressFilterCriteria := []*logdb.EventCriteria{ + { + Address: &vthoAddress, + }, + } + topicFilterCriteria := []*logdb.EventCriteria{ + { + Topics: [5]*thor.Bytes32{&topic, nil, nil, nil, nil}, + }, + } + + tests := []struct { + name string + arg *logdb.EventFilter + }{ + {"AddressCriteriaFilter", &logdb.EventFilter{CriteriaSet: addressFilterCriteria, Options: &logdb.Options{Offset: 0, Limit: 500000}}}, + {"TopicCriteriaFilter", &logdb.EventFilter{CriteriaSet: topicFilterCriteria, Options: &logdb.Options{Offset: 0, Limit: 500000}}}, + {"EventLimit", &logdb.EventFilter{Order: logdb.ASC, Options: &logdb.Options{Offset: 0, Limit: 500000}}}, + {"EventLimitDesc", &logdb.EventFilter{Order: logdb.DESC, Options: &logdb.Options{Offset: 0, Limit: 500000}}}, + {"EventRange", &logdb.EventFilter{Range: &logdb.Range{From: 500000, To: 1_000_000}}}, + {"EventRangeDesc", &logdb.EventFilter{Range: &logdb.Range{From: 500000, To: 1_000_000}, Order: logdb.DESC}}, + } + + for _, tt := range tests { + b.Run(tt.name, func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err = db.FilterEvents(context.Background(), tt.arg) + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +// BenchmarkTestDB_FilterEvents opens a log.db file and measures the performance of the Transfer filtering functionality of LogDB. +// Running: go test -bench=BenchmarkTestDB_FilterTransfers -benchmem github.com/vechain/thor/v2/logdb -dbPath /path/to/log.db +func BenchmarkTestDB_FilterTransfers(b *testing.B) { + db, err := loadDBFromDisk(b) + require.NoError(b, err) + defer db.Close() + + txOrigin := thor.MustParseAddress(TEST_ADDRESS) + transferCriteria := []*logdb.TransferCriteria{ + { + TxOrigin: &txOrigin, + Sender: nil, + Recipient: nil, + }, + } + + tests := []struct { + name string + arg *logdb.TransferFilter + }{ + {"TransferCriteria", &logdb.TransferFilter{CriteriaSet: transferCriteria, Options: &logdb.Options{Offset: 0, Limit: 500_000}}}, + {"TransferCriteriaDesc", &logdb.TransferFilter{Order: logdb.DESC, CriteriaSet: transferCriteria, Options: &logdb.Options{Offset: 0, Limit: 500_000}}}, + {"Ranged500K", &logdb.TransferFilter{Range: &logdb.Range{From: 500_000, To: 1_000_000}}}, + {"Ranged500KDesc", &logdb.TransferFilter{Range: &logdb.Range{From: 500_000, To: 1_000_000}, Order: logdb.DESC}}, + } + + for _, tt := range tests { + b.Run(tt.name, func(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err = db.FilterTransfers(context.Background(), tt.arg) + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +func createTempDB() (*logdb.LogDB, error) { + dir, err := os.MkdirTemp("", "tempdir-") + if err != nil { + return nil, fmt.Errorf("failed to create temp directory: %w", err) + } + + tmpFile, err := os.CreateTemp(dir, "temp-*.db") + if err != nil { + return nil, fmt.Errorf("failed to create temp file: %w", err) + } + + if err := tmpFile.Close(); err != nil { + return nil, fmt.Errorf("failed to close temp file: %w", err) + } + + db, err := logdb.New(tmpFile.Name()) + if err != nil { + return nil, fmt.Errorf("unable to load logdb: %w", err) + } + + return db, nil +} + +func loadDBFromDisk(b *testing.B) (*logdb.LogDB, error) { + if dbPath == "" { + b.Fatal("Please provide a dbPath") + } + + return logdb.New(dbPath) +} diff --git a/logdb/logdb_test.go b/logdb/logdb_test.go index 684c245cc..7ffdd59b1 100644 --- a/logdb/logdb_test.go +++ b/logdb/logdb_test.go @@ -244,6 +244,8 @@ func TestEvents(t *testing.T) { } } +// TestLogDB_NewestBlockID performs a series of read/write tests on the NewestBlockID functionality of the LogDB. +// It validates the correctness of the NewestBlockID method under various scenarios. func TestLogDB_NewestBlockID(t *testing.T) { db, err := logdb.NewMem() if err != nil { @@ -366,6 +368,7 @@ func TestLogDB_NewestBlockID(t *testing.T) { } } +// TestLogDB_HasBlockID performs a series of tests on the HasBlockID functionality of the LogDB. func TestLogDB_HasBlockID(t *testing.T) { db, err := logdb.NewMem() if err != nil {